diff options
Diffstat (limited to 'drivers/infiniband')
52 files changed, 1121 insertions, 955 deletions
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 02971e239a18..ece6926fa2e6 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -449,12 +449,7 @@ static int addr6_resolve(struct sockaddr_in6 *src_in, return ret; rt = (struct rt6_info *)dst; - if (ipv6_addr_any(&fl6.saddr)) { - ret = ipv6_dev_get_saddr(addr->net, ip6_dst_idev(dst)->dev, - &fl6.daddr, 0, &fl6.saddr); - if (ret) - goto put; - + if (ipv6_addr_any(&src_in->sin6_addr)) { src_in->sin6_family = AF_INET6; src_in->sin6_addr = fl6.saddr; } @@ -471,9 +466,6 @@ static int addr6_resolve(struct sockaddr_in6 *src_in, *pdst = dst; return 0; -put: - dst_release(dst); - return ret; } #else static int addr6_resolve(struct sockaddr_in6 *src_in, diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 1844770f3ae8..2b4d613a3474 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -1429,7 +1429,7 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg, primary_path->packet_life_time = cm_req_get_primary_local_ack_timeout(req_msg); primary_path->packet_life_time -= (primary_path->packet_life_time > 0); - sa_path_set_service_id(primary_path, req_msg->service_id); + primary_path->service_id = req_msg->service_id; if (req_msg->alt_local_lid) { alt_path->dgid = req_msg->alt_local_gid; @@ -1452,7 +1452,7 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg, alt_path->packet_life_time = cm_req_get_alt_local_ack_timeout(req_msg); alt_path->packet_life_time -= (alt_path->packet_life_time > 0); - sa_path_set_service_id(alt_path, req_msg->service_id); + alt_path->service_id = req_msg->service_id; } } diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 91b7a2fe5a55..31bb82d8ecd7 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -1140,7 +1140,7 @@ static void cma_save_ib_info(struct sockaddr *src_addr, ib->sib_pkey = path->pkey; ib->sib_flowinfo = path->flow_label; memcpy(&ib->sib_addr, &path->sgid, 16); - ib->sib_sid = sa_path_get_service_id(path); + ib->sib_sid = path->service_id; ib->sib_scope_id = 0; } else { ib->sib_pkey = listen_ib->sib_pkey; @@ -1274,8 +1274,7 @@ static int cma_save_req_info(const struct ib_cm_event *ib_event, memcpy(&req->local_gid, &req_param->primary_path->sgid, sizeof(req->local_gid)); req->has_gid = true; - req->service_id = - sa_path_get_service_id(req_param->primary_path); + req->service_id = req_param->primary_path->service_id; req->pkey = be16_to_cpu(req_param->primary_path->pkey); if (req->pkey != req_param->bth_pkey) pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n" @@ -1827,7 +1826,8 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id, struct rdma_route *rt; const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; struct sa_path_rec *path = ib_event->param.req_rcvd.primary_path; - const __be64 service_id = sa_path_get_service_id(path); + const __be64 service_id = + ib_event->param.req_rcvd.primary_path->service_id; int ret; id = rdma_create_id(listen_id->route.addr.dev_addr.net, @@ -2345,9 +2345,8 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); path_rec.numb_path = 1; path_rec.reversible = 1; - sa_path_set_service_id(&path_rec, - rdma_get_service_id(&id_priv->id, - cma_dst_addr(id_priv))); + path_rec.service_id = rdma_get_service_id(&id_priv->id, + cma_dst_addr(id_priv)); comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h index cb7d372e4bdf..d92ab4eaa8f3 100644 --- a/drivers/infiniband/core/core_priv.h +++ b/drivers/infiniband/core/core_priv.h @@ -169,6 +169,16 @@ void ib_mad_cleanup(void); int ib_sa_init(void); void ib_sa_cleanup(void); +int ibnl_init(void); +void ibnl_cleanup(void); + +/** + * Check if there are any listeners to the netlink group + * @group: the netlink group ID + * Returns 0 on success or a negative for no listeners. + */ +int ibnl_chk_listeners(unsigned int group); + int ib_nl_handle_resolve_resp(struct sk_buff *skb, struct netlink_callback *cb); int ib_nl_handle_set_timeout(struct sk_buff *skb, diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c index b784055423c8..94931c474d41 100644 --- a/drivers/infiniband/core/netlink.c +++ b/drivers/infiniband/core/netlink.c @@ -37,6 +37,7 @@ #include <net/net_namespace.h> #include <net/sock.h> #include <rdma/rdma_netlink.h> +#include "core_priv.h" struct ibnl_client { struct list_head list; @@ -55,7 +56,6 @@ int ibnl_chk_listeners(unsigned int group) return -1; return 0; } -EXPORT_SYMBOL(ibnl_chk_listeners); int ibnl_add_client(int index, int nops, const struct ibnl_client_cbs cb_table[]) diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index e335b09c022e..fb7aec4047c8 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -194,7 +194,7 @@ static u32 tid; .field_name = "sa_path_rec:" #field static const struct ib_field path_rec_table[] = { - { PATH_REC_FIELD(ib.service_id), + { PATH_REC_FIELD(service_id), .offset_words = 0, .offset_bits = 0, .size_bits = 64 }, @@ -296,7 +296,7 @@ static const struct ib_field path_rec_table[] = { .field_name = "sa_path_rec:" #field static const struct ib_field opa_path_rec_table[] = { - { OPA_PATH_REC_FIELD(opa.service_id), + { OPA_PATH_REC_FIELD(service_id), .offset_words = 0, .offset_bits = 0, .size_bits = 64 }, @@ -774,7 +774,7 @@ static void ib_nl_set_path_rec_attrs(struct sk_buff *skb, /* Now build the attributes */ if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) { - val64 = be64_to_cpu(sa_path_get_service_id(sa_rec)); + val64 = be64_to_cpu(sa_rec->service_id); nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID, sizeof(val64), &val64); } diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 3dbf811d3c51..21e60b1e2ff4 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -58,7 +58,7 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) { page = sg_page(sg); - if (umem->writable && dirty) + if (!PageDirty(page) && umem->writable && dirty) set_page_dirty_lock(page); put_page(page); } diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index 0780b1afefa9..8c4ec564e495 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c @@ -321,11 +321,15 @@ int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem, struct vm_area_struct *vma; struct hstate *h; + down_read(&mm->mmap_sem); vma = find_vma(mm, ib_umem_start(umem)); - if (!vma || !is_vm_hugetlb_page(vma)) + if (!vma || !is_vm_hugetlb_page(vma)) { + up_read(&mm->mmap_sem); return -EINVAL; + } h = hstate_vma(vma); umem->page_shift = huge_page_shift(h); + up_read(&mm->mmap_sem); umem->hugetlb = 1; } else { umem->hugetlb = 0; diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c index 8b9587fe2303..94fd989c9060 100644 --- a/drivers/infiniband/core/uverbs_marshall.c +++ b/drivers/infiniband/core/uverbs_marshall.c @@ -96,11 +96,11 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst, } EXPORT_SYMBOL(ib_copy_qp_attr_to_user); -void __ib_copy_path_rec_to_user(struct ib_user_path_rec *dst, - struct sa_path_rec *src) +static void __ib_copy_path_rec_to_user(struct ib_user_path_rec *dst, + struct sa_path_rec *src) { - memcpy(dst->dgid, src->dgid.raw, sizeof src->dgid); - memcpy(dst->sgid, src->sgid.raw, sizeof src->sgid); + memcpy(dst->dgid, src->dgid.raw, sizeof(src->dgid)); + memcpy(dst->sgid, src->sgid.raw, sizeof(src->sgid)); dst->dlid = htons(ntohl(sa_path_get_dlid(src))); dst->slid = htons(ntohl(sa_path_get_slid(src))); diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h index ebf7be8d4139..08772836fded 100644 --- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h +++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h @@ -56,6 +56,10 @@ #define BNXT_RE_MAX_SRQC_COUNT (64 * 1024) #define BNXT_RE_MAX_CQ_COUNT (64 * 1024) +#define BNXT_RE_UD_QP_HW_STALL 0x400000 + +#define BNXT_RE_RQ_WQE_THRESHOLD 32 + struct bnxt_re_work { struct work_struct work; unsigned long event; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 7ba9e699d7ab..c7bd68311d0c 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -61,6 +61,48 @@ #include "ib_verbs.h" #include <rdma/bnxt_re-abi.h> +static int __from_ib_access_flags(int iflags) +{ + int qflags = 0; + + if (iflags & IB_ACCESS_LOCAL_WRITE) + qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE; + if (iflags & IB_ACCESS_REMOTE_READ) + qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ; + if (iflags & IB_ACCESS_REMOTE_WRITE) + qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE; + if (iflags & IB_ACCESS_REMOTE_ATOMIC) + qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC; + if (iflags & IB_ACCESS_MW_BIND) + qflags |= BNXT_QPLIB_ACCESS_MW_BIND; + if (iflags & IB_ZERO_BASED) + qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED; + if (iflags & IB_ACCESS_ON_DEMAND) + qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND; + return qflags; +}; + +static enum ib_access_flags __to_ib_access_flags(int qflags) +{ + enum ib_access_flags iflags = 0; + + if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE) + iflags |= IB_ACCESS_LOCAL_WRITE; + if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE) + iflags |= IB_ACCESS_REMOTE_WRITE; + if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ) + iflags |= IB_ACCESS_REMOTE_READ; + if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC) + iflags |= IB_ACCESS_REMOTE_ATOMIC; + if (qflags & BNXT_QPLIB_ACCESS_MW_BIND) + iflags |= IB_ACCESS_MW_BIND; + if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED) + iflags |= IB_ZERO_BASED; + if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND) + iflags |= IB_ACCESS_ON_DEMAND; + return iflags; +}; + static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list, struct bnxt_qplib_sge *sg_list, int num) { @@ -149,8 +191,8 @@ int bnxt_re_query_device(struct ib_device *ibdev, ib_attr->max_total_mcast_qp_attach = 0; ib_attr->max_ah = dev_attr->max_ah; - ib_attr->max_fmr = dev_attr->max_fmr; - ib_attr->max_map_per_fmr = 1; /* ? */ + ib_attr->max_fmr = 0; + ib_attr->max_map_per_fmr = 0; ib_attr->max_srq = dev_attr->max_srq; ib_attr->max_srq_wr = dev_attr->max_srq_wqes; @@ -410,6 +452,158 @@ enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev, return IB_LINK_LAYER_ETHERNET; } +#define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE) + +static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd) +{ + struct bnxt_re_fence_data *fence = &pd->fence; + struct ib_mr *ib_mr = &fence->mr->ib_mr; + struct bnxt_qplib_swqe *wqe = &fence->bind_wqe; + + memset(wqe, 0, sizeof(*wqe)); + wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW; + wqe->wr_id = BNXT_QPLIB_FENCE_WRID; + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP; + wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE; + wqe->bind.zero_based = false; + wqe->bind.parent_l_key = ib_mr->lkey; + wqe->bind.va = (u64)(unsigned long)fence->va; + wqe->bind.length = fence->size; + wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ); + wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1; + + /* Save the initial rkey in fence structure for now; + * wqe->bind.r_key will be set at (re)bind time. + */ + fence->bind_rkey = ib_inc_rkey(fence->mw->rkey); +} + +static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp) +{ + struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp, + qplib_qp); + struct ib_pd *ib_pd = qp->ib_qp.pd; + struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); + struct bnxt_re_fence_data *fence = &pd->fence; + struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe; + struct bnxt_qplib_swqe wqe; + int rc; + + memcpy(&wqe, fence_wqe, sizeof(wqe)); + wqe.bind.r_key = fence->bind_rkey; + fence->bind_rkey = ib_inc_rkey(fence->bind_rkey); + + dev_dbg(rdev_to_dev(qp->rdev), + "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n", + wqe.bind.r_key, qp->qplib_qp.id, pd); + rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe); + if (rc) { + dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n"); + return rc; + } + bnxt_qplib_post_send_db(&qp->qplib_qp); + + return rc; +} + +static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd) +{ + struct bnxt_re_fence_data *fence = &pd->fence; + struct bnxt_re_dev *rdev = pd->rdev; + struct device *dev = &rdev->en_dev->pdev->dev; + struct bnxt_re_mr *mr = fence->mr; + + if (fence->mw) { + bnxt_re_dealloc_mw(fence->mw); + fence->mw = NULL; + } + if (mr) { + if (mr->ib_mr.rkey) + bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr, + true); + if (mr->ib_mr.lkey) + bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); + kfree(mr); + fence->mr = NULL; + } + if (fence->dma_addr) { + dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES, + DMA_BIDIRECTIONAL); + fence->dma_addr = 0; + } +} + +static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd) +{ + int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND; + struct bnxt_re_fence_data *fence = &pd->fence; + struct bnxt_re_dev *rdev = pd->rdev; + struct device *dev = &rdev->en_dev->pdev->dev; + struct bnxt_re_mr *mr = NULL; + dma_addr_t dma_addr = 0; + struct ib_mw *mw; + u64 pbl_tbl; + int rc; + + dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES, + DMA_BIDIRECTIONAL); + rc = dma_mapping_error(dev, dma_addr); + if (rc) { + dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n"); + rc = -EIO; + fence->dma_addr = 0; + goto fail; + } + fence->dma_addr = dma_addr; + + /* Allocate a MR */ + mr = kzalloc(sizeof(*mr), GFP_KERNEL); + if (!mr) { + rc = -ENOMEM; + goto fail; + } + fence->mr = mr; + mr->rdev = rdev; + mr->qplib_mr.pd = &pd->qplib_pd; + mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; + mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags); + rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr); + if (rc) { + dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n"); + goto fail; + } + + /* Register MR */ + mr->ib_mr.lkey = mr->qplib_mr.lkey; + mr->qplib_mr.va = (u64)(unsigned long)fence->va; + mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES; + pbl_tbl = dma_addr; + rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl, + BNXT_RE_FENCE_PBL_SIZE, false); + if (rc) { + dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n"); + goto fail; + } + mr->ib_mr.rkey = mr->qplib_mr.rkey; + + /* Create a fence MW only for kernel consumers */ + mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL); + if (!mw) { + dev_err(rdev_to_dev(rdev), + "Failed to create fence-MW for PD: %p\n", pd); + rc = -EINVAL; + goto fail; + } + fence->mw = mw; + + bnxt_re_create_fence_wqe(pd); + return 0; + +fail: + bnxt_re_destroy_fence_mr(pd); + return rc; +} + /* Protection Domains */ int bnxt_re_dealloc_pd(struct ib_pd *ib_pd) { @@ -417,6 +611,7 @@ int bnxt_re_dealloc_pd(struct ib_pd *ib_pd) struct bnxt_re_dev *rdev = pd->rdev; int rc; + bnxt_re_destroy_fence_mr(pd); if (ib_pd->uobject && pd->dpi.dbr) { struct ib_ucontext *ib_uctx = ib_pd->uobject->context; struct bnxt_re_ucontext *ucntx; @@ -498,6 +693,10 @@ struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev, } } + if (!udata) + if (bnxt_re_create_fence_mr(pd)) + dev_warn(rdev_to_dev(rdev), + "Failed to create Fence-MR\n"); return &pd->ib_pd; dbfail: (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl, @@ -849,12 +1048,16 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp /* Shadow QP SQ depth should be same as QP1 RQ depth */ qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe; qp->qplib_qp.sq.max_sge = 2; + /* Q full delta can be 1 since it is internal QP */ + qp->qplib_qp.sq.q_full_delta = 1; qp->qplib_qp.scq = qp1_qp->scq; qp->qplib_qp.rcq = qp1_qp->rcq; qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe; qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge; + /* Q full delta can be 1 since it is internal QP */ + qp->qplib_qp.rq.q_full_delta = 1; qp->qplib_qp.mtu = qp1_qp->mtu; @@ -917,10 +1120,6 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false); - entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1); - qp->qplib_qp.sq.max_wqe = min_t(u32, entries, - dev_attr->max_qp_wqes + 1); - qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge; if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges) qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges; @@ -959,6 +1158,9 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, qp->qplib_qp.rq.max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1); + qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe - + qp_init_attr->cap.max_recv_wr; + qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge; if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges) qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; @@ -967,6 +1169,12 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu)); if (qp_init_attr->qp_type == IB_QPT_GSI) { + /* Allocate 1 more than what's provided */ + entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1); + qp->qplib_qp.sq.max_wqe = min_t(u32, entries, + dev_attr->max_qp_wqes + 1); + qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe - + qp_init_attr->cap.max_send_wr; qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges) qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; @@ -1006,6 +1214,22 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, } } else { + /* Allocate 128 + 1 more than what's provided */ + entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + + BNXT_QPLIB_RESERVED_QP_WRS + 1); + qp->qplib_qp.sq.max_wqe = min_t(u32, entries, + dev_attr->max_qp_wqes + + BNXT_QPLIB_RESERVED_QP_WRS + 1); + qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1; + + /* + * Reserving one slot for Phantom WQE. Application can + * post one extra entry in this case. But allowing this to avoid + * unexpected Queue full condition + */ + + qp->qplib_qp.sq.q_full_delta -= 1; + qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom; qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom; if (udata) { @@ -1025,6 +1249,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd, qp->ib_qp.qp_num = qp->qplib_qp.id; spin_lock_init(&qp->sq_lock); + spin_lock_init(&qp->rq_lock); if (udata) { struct bnxt_re_qp_resp resp; @@ -1129,48 +1354,6 @@ static enum ib_mtu __to_ib_mtu(u32 mtu) } } -static int __from_ib_access_flags(int iflags) -{ - int qflags = 0; - - if (iflags & IB_ACCESS_LOCAL_WRITE) - qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE; - if (iflags & IB_ACCESS_REMOTE_READ) - qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ; - if (iflags & IB_ACCESS_REMOTE_WRITE) - qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE; - if (iflags & IB_ACCESS_REMOTE_ATOMIC) - qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC; - if (iflags & IB_ACCESS_MW_BIND) - qflags |= BNXT_QPLIB_ACCESS_MW_BIND; - if (iflags & IB_ZERO_BASED) - qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED; - if (iflags & IB_ACCESS_ON_DEMAND) - qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND; - return qflags; -}; - -static enum ib_access_flags __to_ib_access_flags(int qflags) -{ - enum ib_access_flags iflags = 0; - - if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE) - iflags |= IB_ACCESS_LOCAL_WRITE; - if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE) - iflags |= IB_ACCESS_REMOTE_WRITE; - if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ) - iflags |= IB_ACCESS_REMOTE_READ; - if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC) - iflags |= IB_ACCESS_REMOTE_ATOMIC; - if (qflags & BNXT_QPLIB_ACCESS_MW_BIND) - iflags |= IB_ACCESS_MW_BIND; - if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED) - iflags |= IB_ZERO_BASED; - if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND) - iflags |= IB_ACCESS_ON_DEMAND; - return iflags; -}; - static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp1_qp, int qp_attr_mask) @@ -1378,11 +1561,21 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, entries = roundup_pow_of_two(qp_attr->cap.max_send_wr); qp->qplib_qp.sq.max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1); + qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe - + qp_attr->cap.max_send_wr; + /* + * Reserving one slot for Phantom WQE. Some application can + * post one extra entry in this case. Allowing this to avoid + * unexpected Queue full condition + */ + qp->qplib_qp.sq.q_full_delta -= 1; qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge; if (qp->qplib_qp.rq.max_wqe) { entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr); qp->qplib_qp.rq.max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1); + qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe - + qp_attr->cap.max_recv_wr; qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge; } else { /* SRQ was used prior, just ignore the RQ caps */ @@ -1883,6 +2076,22 @@ static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev, return payload_sz; } +static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp) +{ + if ((qp->ib_qp.qp_type == IB_QPT_UD || + qp->ib_qp.qp_type == IB_QPT_GSI || + qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) && + qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) { + int qp_attr_mask; + struct ib_qp_attr qp_attr; + + qp_attr_mask = IB_QP_STATE; + qp_attr.qp_state = IB_QPS_RTS; + bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL); + qp->qplib_qp.wqe_cnt = 0; + } +} + static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp, struct ib_send_wr *wr) @@ -1928,6 +2137,7 @@ bad: wr = wr->next; } bnxt_qplib_post_send_db(&qp->qplib_qp); + bnxt_ud_qp_hw_stall_workaround(qp); spin_unlock_irqrestore(&qp->sq_lock, flags); return rc; } @@ -2024,6 +2234,7 @@ bad: wr = wr->next; } bnxt_qplib_post_send_db(&qp->qplib_qp); + bnxt_ud_qp_hw_stall_workaround(qp); spin_unlock_irqrestore(&qp->sq_lock, flags); return rc; @@ -2071,7 +2282,10 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr, struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); struct bnxt_qplib_swqe wqe; int rc = 0, payload_sz = 0; + unsigned long flags; + u32 count = 0; + spin_lock_irqsave(&qp->rq_lock, flags); while (wr) { /* House keeping */ memset(&wqe, 0, sizeof(wqe)); @@ -2100,9 +2314,21 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr, *bad_wr = wr; break; } + + /* Ring DB if the RQEs posted reaches a threshold value */ + if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) { + bnxt_qplib_post_recv_db(&qp->qplib_qp); + count = 0; + } + wr = wr->next; } - bnxt_qplib_post_recv_db(&qp->qplib_qp); + + if (count) + bnxt_qplib_post_recv_db(&qp->qplib_qp); + + spin_unlock_irqrestore(&qp->rq_lock, flags); + return rc; } @@ -2643,12 +2869,36 @@ static void bnxt_re_process_res_ud_wc(struct ib_wc *wc, wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; } +static int send_phantom_wqe(struct bnxt_re_qp *qp) +{ + struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp; + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&qp->sq_lock, flags); + + rc = bnxt_re_bind_fence_mw(lib_qp); + if (!rc) { + lib_qp->sq.phantom_wqe_cnt++; + dev_dbg(&lib_qp->sq.hwq.pdev->dev, + "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n", + lib_qp->id, lib_qp->sq.hwq.prod, + HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq), + lib_qp->sq.phantom_wqe_cnt); + } + + spin_unlock_irqrestore(&qp->sq_lock, flags); + return rc; +} + int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) { struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); struct bnxt_re_qp *qp; struct bnxt_qplib_cqe *cqe; int i, ncqe, budget; + struct bnxt_qplib_q *sq; + struct bnxt_qplib_qp *lib_qp; u32 tbl_idx; struct bnxt_re_sqp_entries *sqp_entry = NULL; unsigned long flags; @@ -2661,7 +2911,21 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) } cqe = &cq->cql[0]; while (budget) { - ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget); + lib_qp = NULL; + ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp); + if (lib_qp) { + sq = &lib_qp->sq; + if (sq->send_phantom) { + qp = container_of(lib_qp, + struct bnxt_re_qp, qplib_qp); + if (send_phantom_wqe(qp) == -ENOMEM) + dev_err(rdev_to_dev(cq->rdev), + "Phantom failed! Scheduled to send again\n"); + else + sq->send_phantom = false; + } + } + if (!ncqe) break; @@ -2822,6 +3086,12 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr) struct bnxt_re_dev *rdev = mr->rdev; int rc; + rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); + if (rc) { + dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc); + return rc; + } + if (mr->npages && mr->pages) { rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res, &mr->qplib_frpl); @@ -2829,8 +3099,6 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr) mr->npages = 0; mr->pages = NULL; } - rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); - if (!IS_ERR_OR_NULL(mr->ib_umem)) ib_umem_release(mr->ib_umem); @@ -2914,97 +3182,52 @@ fail: return ERR_PTR(rc); } -/* Fast Memory Regions */ -struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *ib_pd, int mr_access_flags, - struct ib_fmr_attr *fmr_attr) +struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type, + struct ib_udata *udata) { struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); struct bnxt_re_dev *rdev = pd->rdev; - struct bnxt_re_fmr *fmr; + struct bnxt_re_mw *mw; int rc; - if (fmr_attr->max_pages > MAX_PBL_LVL_2_PGS || - fmr_attr->max_maps > rdev->dev_attr.max_map_per_fmr) { - dev_err(rdev_to_dev(rdev), "Allocate FMR exceeded Max limit"); + mw = kzalloc(sizeof(*mw), GFP_KERNEL); + if (!mw) return ERR_PTR(-ENOMEM); - } - fmr = kzalloc(sizeof(*fmr), GFP_KERNEL); - if (!fmr) - return ERR_PTR(-ENOMEM); - - fmr->rdev = rdev; - fmr->qplib_fmr.pd = &pd->qplib_pd; - fmr->qplib_fmr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; + mw->rdev = rdev; + mw->qplib_mw.pd = &pd->qplib_pd; - rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &fmr->qplib_fmr); - if (rc) + mw->qplib_mw.type = (type == IB_MW_TYPE_1 ? + CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 : + CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B); + rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw); + if (rc) { + dev_err(rdev_to_dev(rdev), "Allocate MW failed!"); goto fail; + } + mw->ib_mw.rkey = mw->qplib_mw.rkey; - fmr->qplib_fmr.flags = __from_ib_access_flags(mr_access_flags); - fmr->ib_fmr.lkey = fmr->qplib_fmr.lkey; - fmr->ib_fmr.rkey = fmr->ib_fmr.lkey; + atomic_inc(&rdev->mw_count); + return &mw->ib_mw; - atomic_inc(&rdev->mr_count); - return &fmr->ib_fmr; fail: - kfree(fmr); + kfree(mw); return ERR_PTR(rc); } -int bnxt_re_map_phys_fmr(struct ib_fmr *ib_fmr, u64 *page_list, int list_len, - u64 iova) +int bnxt_re_dealloc_mw(struct ib_mw *ib_mw) { - struct bnxt_re_fmr *fmr = container_of(ib_fmr, struct bnxt_re_fmr, - ib_fmr); - struct bnxt_re_dev *rdev = fmr->rdev; + struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw); + struct bnxt_re_dev *rdev = mw->rdev; int rc; - fmr->qplib_fmr.va = iova; - fmr->qplib_fmr.total_size = list_len * PAGE_SIZE; - - rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &fmr->qplib_fmr, page_list, - list_len, true); - if (rc) - dev_err(rdev_to_dev(rdev), "Failed to map FMR for lkey = 0x%x!", - fmr->ib_fmr.lkey); - return rc; -} - -int bnxt_re_unmap_fmr(struct list_head *fmr_list) -{ - struct bnxt_re_dev *rdev; - struct bnxt_re_fmr *fmr; - struct ib_fmr *ib_fmr; - int rc = 0; - - /* Validate each FMRs inside the fmr_list */ - list_for_each_entry(ib_fmr, fmr_list, list) { - fmr = container_of(ib_fmr, struct bnxt_re_fmr, ib_fmr); - rdev = fmr->rdev; - - if (rdev) { - rc = bnxt_qplib_dereg_mrw(&rdev->qplib_res, - &fmr->qplib_fmr, true); - if (rc) - break; - } + rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw); + if (rc) { + dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc); + return rc; } - return rc; -} - -int bnxt_re_dealloc_fmr(struct ib_fmr *ib_fmr) -{ - struct bnxt_re_fmr *fmr = container_of(ib_fmr, struct bnxt_re_fmr, - ib_fmr); - struct bnxt_re_dev *rdev = fmr->rdev; - int rc; - rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &fmr->qplib_fmr); - if (rc) - dev_err(rdev_to_dev(rdev), "Failed to free FMR"); - - kfree(fmr); - atomic_dec(&rdev->mr_count); + kfree(mw); + atomic_dec(&rdev->mw_count); return rc; } diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h index 5c3d71765454..6c160f6a5398 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h @@ -44,11 +44,23 @@ struct bnxt_re_gid_ctx { u32 refcnt; }; +#define BNXT_RE_FENCE_BYTES 64 +struct bnxt_re_fence_data { + u32 size; + u8 va[BNXT_RE_FENCE_BYTES]; + dma_addr_t dma_addr; + struct bnxt_re_mr *mr; + struct ib_mw *mw; + struct bnxt_qplib_swqe bind_wqe; + u32 bind_rkey; +}; + struct bnxt_re_pd { struct bnxt_re_dev *rdev; struct ib_pd ib_pd; struct bnxt_qplib_pd qplib_pd; struct bnxt_qplib_dpi dpi; + struct bnxt_re_fence_data fence; }; struct bnxt_re_ah { @@ -62,6 +74,7 @@ struct bnxt_re_qp { struct bnxt_re_dev *rdev; struct ib_qp ib_qp; spinlock_t sq_lock; /* protect sq */ + spinlock_t rq_lock; /* protect rq */ struct bnxt_qplib_qp qplib_qp; struct ib_umem *sumem; struct ib_umem *rumem; @@ -181,12 +194,9 @@ int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents, struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type, u32 max_num_sg); int bnxt_re_dereg_mr(struct ib_mr *mr); -struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *pd, int mr_access_flags, - struct ib_fmr_attr *fmr_attr); -int bnxt_re_map_phys_fmr(struct ib_fmr *fmr, u64 *page_list, int list_len, - u64 iova); -int bnxt_re_unmap_fmr(struct list_head *fmr_list); -int bnxt_re_dealloc_fmr(struct ib_fmr *fmr); +struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type, + struct ib_udata *udata); +int bnxt_re_dealloc_mw(struct ib_mw *mw); struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt_addr, int mr_access_flags, struct ib_udata *udata); diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 5d355401179b..1fce5e73216b 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -507,10 +507,6 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev) ibdev->dereg_mr = bnxt_re_dereg_mr; ibdev->alloc_mr = bnxt_re_alloc_mr; ibdev->map_mr_sg = bnxt_re_map_mr_sg; - ibdev->alloc_fmr = bnxt_re_alloc_fmr; - ibdev->map_phys_fmr = bnxt_re_map_phys_fmr; - ibdev->unmap_fmr = bnxt_re_unmap_fmr; - ibdev->dealloc_fmr = bnxt_re_dealloc_fmr; ibdev->reg_user_mr = bnxt_re_reg_user_mr; ibdev->alloc_ucontext = bnxt_re_alloc_ucontext; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 43d08b5e9085..f05500bcdcf1 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -284,7 +284,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_create_qp1 req; - struct creq_create_qp1_resp *resp; + struct creq_create_qp1_resp resp; struct bnxt_qplib_pbl *pbl; struct bnxt_qplib_q *sq = &qp->sq; struct bnxt_qplib_q *rq = &qp->rq; @@ -394,31 +394,12 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) req.pd_id = cpu_to_le32(qp->pd->id); - resp = (struct creq_create_qp1_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) { - dev_err(&res->pdev->dev, "QPLIB: FP: CREATE_QP1 send failed"); - rc = -EINVAL; - goto fail; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP1 timed out"); - rc = -ETIMEDOUT; - goto fail; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP1 failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - rc = -EINVAL; + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); + if (rc) goto fail; - } - qp->id = le32_to_cpu(resp->xid); + + qp->id = le32_to_cpu(resp.xid); qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET; sq->flush_in_progress = false; rq->flush_in_progress = false; @@ -442,7 +423,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr; struct cmdq_create_qp req; - struct creq_create_qp_resp *resp; + struct creq_create_qp_resp resp; struct bnxt_qplib_pbl *pbl; struct sq_psn_search **psn_search_ptr; unsigned long int psn_search, poff = 0; @@ -627,31 +608,12 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) } req.pd_id = cpu_to_le32(qp->pd->id); - resp = (struct creq_create_qp_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP send failed"); - rc = -EINVAL; - goto fail; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP timed out"); - rc = -ETIMEDOUT; - goto fail; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - rc = -EINVAL; + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); + if (rc) goto fail; - } - qp->id = le32_to_cpu(resp->xid); + + qp->id = le32_to_cpu(resp.xid); qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET; sq->flush_in_progress = false; rq->flush_in_progress = false; @@ -769,10 +731,11 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_modify_qp req; - struct creq_modify_qp_resp *resp; + struct creq_modify_qp_resp resp; u16 cmd_flags = 0, pkey; u32 temp32[4]; u32 bmask; + int rc; RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags); @@ -862,27 +825,10 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id); - resp = (struct creq_modify_qp_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); + if (rc) + return rc; qp->cur_qp_state = qp->state; return 0; } @@ -891,37 +837,26 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_query_qp req; - struct creq_query_qp_resp *resp; + struct creq_query_qp_resp resp; + struct bnxt_qplib_rcfw_sbuf *sbuf; struct creq_query_qp_resp_sb *sb; u16 cmd_flags = 0; u32 temp32[4]; - int i; + int i, rc = 0; RCFW_CMD_PREP(req, QUERY_QP, cmd_flags); + sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb)); + if (!sbuf) + return -ENOMEM; + sb = sbuf->sb; + req.qp_cid = cpu_to_le32(qp->id); req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS; - resp = (struct creq_query_qp_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - (void **)&sb, 0); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, + (void *)sbuf, 0); + if (rc) + goto bail; /* Extract the context from the side buffer */ qp->state = sb->en_sqd_async_notify_state & CREQ_QUERY_QP_RESP_SB_STATE_MASK; @@ -976,7 +911,9 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) qp->dest_qpn = le32_to_cpu(sb->dest_qp_id); memcpy(qp->smac, sb->src_mac, 6); qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id); - return 0; +bail: + bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf); + return rc; } static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp) @@ -1021,34 +958,18 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_destroy_qp req; - struct creq_destroy_qp_resp *resp; + struct creq_destroy_qp_resp resp; unsigned long flags; u16 cmd_flags = 0; + int rc; RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags); req.qp_cid = cpu_to_le32(qp->id); - resp = (struct creq_destroy_qp_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); + if (rc) + return rc; /* Must walk the associated CQs to nullified the QP ptr */ spin_lock_irqsave(&qp->scq->hwq.lock, flags); @@ -1162,8 +1083,12 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp, rc = -EINVAL; goto done; } - if (HWQ_CMP((sq->hwq.prod + 1), &sq->hwq) == - HWQ_CMP(sq->hwq.cons, &sq->hwq)) { + + if (bnxt_qplib_queue_full(sq)) { + dev_err(&sq->hwq.pdev->dev, + "QPLIB: prod = %#x cons = %#x qdepth = %#x delta = %#x", + sq->hwq.prod, sq->hwq.cons, sq->hwq.max_elements, + sq->q_full_delta); rc = -ENOMEM; goto done; } @@ -1373,6 +1298,9 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp, } sq->hwq.prod++; + + qp->wqe_cnt++; + done: return rc; } @@ -1411,8 +1339,7 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp, rc = -EINVAL; goto done; } - if (HWQ_CMP((rq->hwq.prod + 1), &rq->hwq) == - HWQ_CMP(rq->hwq.cons, &rq->hwq)) { + if (bnxt_qplib_queue_full(rq)) { dev_err(&rq->hwq.pdev->dev, "QPLIB: FP: QP (0x%x) RQ is full!", qp->id); rc = -EINVAL; @@ -1483,7 +1410,7 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_create_cq req; - struct creq_create_cq_resp *resp; + struct creq_create_cq_resp resp; struct bnxt_qplib_pbl *pbl; u16 cmd_flags = 0; int rc; @@ -1525,30 +1452,12 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) << CMDQ_CREATE_CQ_CNQ_ID_SFT); - resp = (struct creq_create_cq_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ timed out"); - rc = -ETIMEDOUT; - goto fail; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - rc = -EINVAL; + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); + if (rc) goto fail; - } - cq->id = le32_to_cpu(resp->xid); + + cq->id = le32_to_cpu(resp.xid); cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem; cq->period = BNXT_QPLIB_QUEUE_START_PERIOD; init_waitqueue_head(&cq->waitq); @@ -1566,33 +1475,17 @@ int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_destroy_cq req; - struct creq_destroy_cq_resp *resp; + struct creq_destroy_cq_resp resp; u16 cmd_flags = 0; + int rc; RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags); req.cq_cid = cpu_to_le32(cq->id); - resp = (struct creq_destroy_cq_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); + if (rc) + return rc; bnxt_qplib_free_hwq(res->pdev, &cq->hwq); return 0; } @@ -1664,14 +1557,113 @@ static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp, return rc; } +/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive) + * CQE is track from sw_cq_cons to max_element but valid only if VALID=1 + */ +static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq, + u32 cq_cons, u32 sw_sq_cons, u32 cqe_sq_cons) +{ + struct bnxt_qplib_q *sq = &qp->sq; + struct bnxt_qplib_swq *swq; + u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx; + struct cq_base *peek_hwcqe, **peek_hw_cqe_ptr; + struct cq_req *peek_req_hwcqe; + struct bnxt_qplib_qp *peek_qp; + struct bnxt_qplib_q *peek_sq; + int i, rc = 0; + + /* Normal mode */ + /* Check for the psn_search marking before completing */ + swq = &sq->swq[sw_sq_cons]; + if (swq->psn_search && + le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) { + /* Unmark */ + swq->psn_search->flags_next_psn = cpu_to_le32 + (le32_to_cpu(swq->psn_search->flags_next_psn) + & ~0x80000000); + dev_dbg(&cq->hwq.pdev->dev, + "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n", + cq_cons, qp->id, sw_sq_cons, cqe_sq_cons); + sq->condition = true; + sq->send_phantom = true; + + /* TODO: Only ARM if the previous SQE is ARMALL */ + bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ_ARMALL); + + rc = -EAGAIN; + goto out; + } + if (sq->condition) { + /* Peek at the completions */ + peek_raw_cq_cons = cq->hwq.cons; + peek_sw_cq_cons = cq_cons; + i = cq->hwq.max_elements; + while (i--) { + peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq); + peek_hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr; + peek_hwcqe = &peek_hw_cqe_ptr[CQE_PG(peek_sw_cq_cons)] + [CQE_IDX(peek_sw_cq_cons)]; + /* If the next hwcqe is VALID */ + if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons, + cq->hwq.max_elements)) { + /* If the next hwcqe is a REQ */ + if ((peek_hwcqe->cqe_type_toggle & + CQ_BASE_CQE_TYPE_MASK) == + CQ_BASE_CQE_TYPE_REQ) { + peek_req_hwcqe = (struct cq_req *) + peek_hwcqe; + peek_qp = (struct bnxt_qplib_qp *) + ((unsigned long) + le64_to_cpu + (peek_req_hwcqe->qp_handle)); + peek_sq = &peek_qp->sq; + peek_sq_cons_idx = HWQ_CMP(le16_to_cpu( + peek_req_hwcqe->sq_cons_idx) - 1 + , &sq->hwq); + /* If the hwcqe's sq's wr_id matches */ + if (peek_sq == sq && + sq->swq[peek_sq_cons_idx].wr_id == + BNXT_QPLIB_FENCE_WRID) { + /* + * Unbreak only if the phantom + * comes back + */ + dev_dbg(&cq->hwq.pdev->dev, + "FP:Got Phantom CQE"); + sq->condition = false; + sq->single = true; + rc = 0; + goto out; + } + } + /* Valid but not the phantom, so keep looping */ + } else { + /* Not valid yet, just exit and wait */ + rc = -EINVAL; + goto out; + } + peek_sw_cq_cons++; + peek_raw_cq_cons++; + } + dev_err(&cq->hwq.pdev->dev, + "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x", + cq_cons, qp->id, sw_sq_cons, cqe_sq_cons); + rc = -EINVAL; + } +out: + return rc; +} + static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, struct cq_req *hwcqe, - struct bnxt_qplib_cqe **pcqe, int *budget) + struct bnxt_qplib_cqe **pcqe, int *budget, + u32 cq_cons, struct bnxt_qplib_qp **lib_qp) { struct bnxt_qplib_qp *qp; struct bnxt_qplib_q *sq; struct bnxt_qplib_cqe *cqe; - u32 sw_cons, cqe_cons; + u32 sw_sq_cons, cqe_sq_cons; + struct bnxt_qplib_swq *swq; int rc = 0; qp = (struct bnxt_qplib_qp *)((unsigned long) @@ -1683,13 +1675,13 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, } sq = &qp->sq; - cqe_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq); - if (cqe_cons > sq->hwq.max_elements) { + cqe_sq_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq); + if (cqe_sq_cons > sq->hwq.max_elements) { dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Process req reported "); dev_err(&cq->hwq.pdev->dev, "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x", - cqe_cons, sq->hwq.max_elements); + cqe_sq_cons, sq->hwq.max_elements); return -EINVAL; } /* If we were in the middle of flushing the SQ, continue */ @@ -1698,53 +1690,74 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, /* Require to walk the sq's swq to fabricate CQEs for all previously * signaled SWQEs due to CQE aggregation from the current sq cons - * to the cqe_cons + * to the cqe_sq_cons */ cqe = *pcqe; while (*budget) { - sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq); - if (sw_cons == cqe_cons) + sw_sq_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq); + if (sw_sq_cons == cqe_sq_cons) + /* Done */ break; + + swq = &sq->swq[sw_sq_cons]; memset(cqe, 0, sizeof(*cqe)); cqe->opcode = CQ_BASE_CQE_TYPE_REQ; cqe->qp_handle = (u64)(unsigned long)qp; cqe->src_qp = qp->id; - cqe->wr_id = sq->swq[sw_cons].wr_id; - cqe->type = sq->swq[sw_cons].type; + cqe->wr_id = swq->wr_id; + if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID) + goto skip; + cqe->type = swq->type; /* For the last CQE, check for status. For errors, regardless * of the request being signaled or not, it must complete with * the hwcqe error status */ - if (HWQ_CMP((sw_cons + 1), &sq->hwq) == cqe_cons && + if (HWQ_CMP((sw_sq_cons + 1), &sq->hwq) == cqe_sq_cons && hwcqe->status != CQ_REQ_STATUS_OK) { cqe->status = hwcqe->status; dev_err(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Processed Req "); dev_err(&cq->hwq.pdev->dev, "QPLIB: wr_id[%d] = 0x%llx with status 0x%x", - sw_cons, cqe->wr_id, cqe->status); + sw_sq_cons, cqe->wr_id, cqe->status); cqe++; (*budget)--; sq->flush_in_progress = true; /* Must block new posting of SQ and RQ */ qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; + sq->condition = false; + sq->single = false; } else { - if (sq->swq[sw_cons].flags & - SQ_SEND_FLAGS_SIGNAL_COMP) { + if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) { + /* Before we complete, do WA 9060 */ + if (do_wa9060(qp, cq, cq_cons, sw_sq_cons, + cqe_sq_cons)) { + *lib_qp = qp; + goto out; + } cqe->status = CQ_REQ_STATUS_OK; cqe++; (*budget)--; } } +skip: sq->hwq.cons++; + if (sq->single) + break; } +out: *pcqe = cqe; - if (!*budget && HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_cons) { + if (HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_sq_cons) { /* Out of budget */ rc = -EAGAIN; goto done; } + /* + * Back to normal completion mode only after it has completed all of + * the WC for this CQE + */ + sq->single = false; if (!sq->flush_in_progress) goto done; flush: @@ -2074,7 +2087,7 @@ static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq, } int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, - int num_cqes) + int num_cqes, struct bnxt_qplib_qp **lib_qp) { struct cq_base *hw_cqe, **hw_cqe_ptr; unsigned long flags; @@ -2099,7 +2112,8 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, case CQ_BASE_CQE_TYPE_REQ: rc = bnxt_qplib_cq_process_req(cq, (struct cq_req *)hw_cqe, - &cqe, &budget); + &cqe, &budget, + sw_cons, lib_qp); break; case CQ_BASE_CQE_TYPE_RES_RC: rc = bnxt_qplib_cq_process_res_rc(cq, diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h index f0150f8da1e3..36b7b7db0e3f 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h @@ -88,6 +88,7 @@ struct bnxt_qplib_swq { struct bnxt_qplib_swqe { /* General */ +#define BNXT_QPLIB_FENCE_WRID 0x46454E43 /* "FENC" */ u64 wr_id; u8 reqs_type; u8 type; @@ -216,9 +217,16 @@ struct bnxt_qplib_q { struct scatterlist *sglist; u32 nmap; u32 max_wqe; + u16 q_full_delta; u16 max_sge; u32 psn; bool flush_in_progress; + bool condition; + bool single; + bool send_phantom; + u32 phantom_wqe_cnt; + u32 phantom_cqe_cnt; + u32 next_cq_cons; }; struct bnxt_qplib_qp { @@ -242,6 +250,7 @@ struct bnxt_qplib_qp { u8 timeout; u8 retry_cnt; u8 rnr_retry; + u64 wqe_cnt; u32 min_rnr_timer; u32 max_rd_atomic; u32 max_dest_rd_atomic; @@ -301,6 +310,13 @@ struct bnxt_qplib_qp { (!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \ !((raw_cons) & (cp_bit))) +static inline bool bnxt_qplib_queue_full(struct bnxt_qplib_q *qplib_q) +{ + return HWQ_CMP((qplib_q->hwq.prod + qplib_q->q_full_delta), + &qplib_q->hwq) == HWQ_CMP(qplib_q->hwq.cons, + &qplib_q->hwq); +} + struct bnxt_qplib_cqe { u8 status; u8 type; @@ -432,7 +448,7 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp, int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, - int num); + int num, struct bnxt_qplib_qp **qp); void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type); void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq); int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 23fb7260662b..16e42754dbec 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -39,72 +39,55 @@ #include <linux/spinlock.h> #include <linux/pci.h> #include <linux/prefetch.h> +#include <linux/delay.h> + #include "roce_hsi.h" #include "qplib_res.h" #include "qplib_rcfw.h" static void bnxt_qplib_service_creq(unsigned long data); /* Hardware communication channel */ -int bnxt_qplib_rcfw_wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) +static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) { u16 cbit; int rc; - cookie &= RCFW_MAX_COOKIE_VALUE; cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; - if (!test_bit(cbit, rcfw->cmdq_bitmap)) - dev_warn(&rcfw->pdev->dev, - "QPLIB: CMD bit %d for cookie 0x%x is not set?", - cbit, cookie); - rc = wait_event_timeout(rcfw->waitq, !test_bit(cbit, rcfw->cmdq_bitmap), msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS)); - if (!rc) { - dev_warn(&rcfw->pdev->dev, - "QPLIB: Bono Error: timeout %d msec, msg {0x%x}\n", - RCFW_CMD_WAIT_TIME_MS, cookie); - } - - return rc; + return rc ? 0 : -ETIMEDOUT; }; -int bnxt_qplib_rcfw_block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) +static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) { - u32 count = -1; + u32 count = RCFW_BLOCKED_CMD_WAIT_COUNT; u16 cbit; - cookie &= RCFW_MAX_COOKIE_VALUE; cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; if (!test_bit(cbit, rcfw->cmdq_bitmap)) goto done; do { + mdelay(1); /* 1m sec */ bnxt_qplib_service_creq((unsigned long)rcfw); } while (test_bit(cbit, rcfw->cmdq_bitmap) && --count); done: - return count; + return count ? 0 : -ETIMEDOUT; }; -void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, - struct cmdq_base *req, void **crsbe, - u8 is_block) +static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req, + struct creq_base *resp, void *sb, u8 is_block) { - struct bnxt_qplib_crsq *crsq = &rcfw->crsq; struct bnxt_qplib_cmdqe *cmdqe, **cmdq_ptr; struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq; - struct bnxt_qplib_hwq *crsb = &rcfw->crsb; - struct bnxt_qplib_crsqe *crsqe = NULL; - struct bnxt_qplib_crsbe **crsb_ptr; + struct bnxt_qplib_crsq *crsqe; u32 sw_prod, cmdq_prod; - u8 retry_cnt = 0xFF; - dma_addr_t dma_addr; unsigned long flags; u32 size, opcode; u16 cookie, cbit; int pg, idx; u8 *preq; -retry: opcode = req->opcode; if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) && (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC && @@ -112,63 +95,50 @@ retry: dev_err(&rcfw->pdev->dev, "QPLIB: RCFW not initialized, reject opcode 0x%x", opcode); - return NULL; + return -EINVAL; } if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) && opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) { dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!"); - return NULL; + return -EINVAL; } /* Cmdq are in 16-byte units, each request can consume 1 or more * cmdqe */ spin_lock_irqsave(&cmdq->lock, flags); - if (req->cmd_size > cmdq->max_elements - - ((HWQ_CMP(cmdq->prod, cmdq) - HWQ_CMP(cmdq->cons, cmdq)) & - (cmdq->max_elements - 1))) { + if (req->cmd_size >= HWQ_FREE_SLOTS(cmdq)) { dev_err(&rcfw->pdev->dev, "QPLIB: RCFW: CMDQ is full!"); spin_unlock_irqrestore(&cmdq->lock, flags); - - if (!retry_cnt--) - return NULL; - goto retry; + return -EAGAIN; } - retry_cnt = 0xFF; - cookie = atomic_inc_return(&rcfw->seq_num) & RCFW_MAX_COOKIE_VALUE; + cookie = rcfw->seq_num & RCFW_MAX_COOKIE_VALUE; cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; if (is_block) cookie |= RCFW_CMD_IS_BLOCKING; + + set_bit(cbit, rcfw->cmdq_bitmap); req->cookie = cpu_to_le16(cookie); - if (test_and_set_bit(cbit, rcfw->cmdq_bitmap)) { - dev_err(&rcfw->pdev->dev, - "QPLIB: RCFW MAX outstanding cmd reached!"); - atomic_dec(&rcfw->seq_num); + crsqe = &rcfw->crsqe_tbl[cbit]; + if (crsqe->resp) { spin_unlock_irqrestore(&cmdq->lock, flags); - - if (!retry_cnt--) - return NULL; - goto retry; + return -EBUSY; } - /* Reserve a resp buffer slot if requested */ - if (req->resp_size && crsbe) { - spin_lock(&crsb->lock); - sw_prod = HWQ_CMP(crsb->prod, crsb); - crsb_ptr = (struct bnxt_qplib_crsbe **)crsb->pbl_ptr; - *crsbe = (void *)&crsb_ptr[get_crsb_pg(sw_prod)] - [get_crsb_idx(sw_prod)]; - bnxt_qplib_crsb_dma_next(crsb->pbl_dma_ptr, sw_prod, &dma_addr); - req->resp_addr = cpu_to_le64(dma_addr); - crsb->prod++; - spin_unlock(&crsb->lock); - - req->resp_size = (sizeof(struct bnxt_qplib_crsbe) + - BNXT_QPLIB_CMDQE_UNITS - 1) / - BNXT_QPLIB_CMDQE_UNITS; + memset(resp, 0, sizeof(*resp)); + crsqe->resp = (struct creq_qp_event *)resp; + crsqe->resp->cookie = req->cookie; + crsqe->req_size = req->cmd_size; + if (req->resp_size && sb) { + struct bnxt_qplib_rcfw_sbuf *sbuf = sb; + + req->resp_addr = cpu_to_le64(sbuf->dma_addr); + req->resp_size = (sbuf->size + BNXT_QPLIB_CMDQE_UNITS - 1) / + BNXT_QPLIB_CMDQE_UNITS; } + cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr; preq = (u8 *)req; size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS; @@ -190,23 +160,24 @@ retry: preq += min_t(u32, size, sizeof(*cmdqe)); size -= min_t(u32, size, sizeof(*cmdqe)); cmdq->prod++; + rcfw->seq_num++; } while (size > 0); + rcfw->seq_num++; + cmdq_prod = cmdq->prod; if (rcfw->flags & FIRMWARE_FIRST_FLAG) { - /* The very first doorbell write is required to set this flag - * which prompts the FW to reset its internal pointers + /* The very first doorbell write + * is required to set this flag + * which prompts the FW to reset + * its internal pointers */ cmdq_prod |= FIRMWARE_FIRST_FLAG; rcfw->flags &= ~FIRMWARE_FIRST_FLAG; } - sw_prod = HWQ_CMP(crsq->prod, crsq); - crsqe = &crsq->crsq[sw_prod]; - memset(crsqe, 0, sizeof(*crsqe)); - crsq->prod++; - crsqe->req_size = req->cmd_size; /* ring CMDQ DB */ + wmb(); writel(cmdq_prod, rcfw->cmdq_bar_reg_iomem + rcfw->cmdq_bar_reg_prod_off); writel(RCFW_CMDQ_TRIG_VAL, rcfw->cmdq_bar_reg_iomem + @@ -214,9 +185,56 @@ retry: done: spin_unlock_irqrestore(&cmdq->lock, flags); /* Return the CREQ response pointer */ - return crsqe ? &crsqe->qp_event : NULL; + return 0; } +int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, + struct cmdq_base *req, + struct creq_base *resp, + void *sb, u8 is_block) +{ + struct creq_qp_event *evnt = (struct creq_qp_event *)resp; + u16 cookie; + u8 opcode, retry_cnt = 0xFF; + int rc = 0; + + do { + opcode = req->opcode; + rc = __send_message(rcfw, req, resp, sb, is_block); + cookie = le16_to_cpu(req->cookie) & RCFW_MAX_COOKIE_VALUE; + if (!rc) + break; + + if (!retry_cnt || (rc != -EAGAIN && rc != -EBUSY)) { + /* send failed */ + dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x send failed", + cookie, opcode); + return rc; + } + is_block ? mdelay(1) : usleep_range(500, 1000); + + } while (retry_cnt--); + + if (is_block) + rc = __block_for_resp(rcfw, cookie); + else + rc = __wait_for_resp(rcfw, cookie); + if (rc) { + /* timed out */ + dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec", + cookie, opcode, RCFW_CMD_WAIT_TIME_MS); + return rc; + } + + if (evnt->status) { + /* failed with status */ + dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x status %#x", + cookie, opcode, evnt->status); + rc = -EFAULT; + } + + return rc; +} /* Completions */ static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw, struct creq_func_event *func_event) @@ -260,12 +278,12 @@ static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw, static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, struct creq_qp_event *qp_event) { - struct bnxt_qplib_crsq *crsq = &rcfw->crsq; struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq; - struct bnxt_qplib_crsqe *crsqe; - u16 cbit, cookie, blocked = 0; + struct bnxt_qplib_crsq *crsqe; unsigned long flags; - u32 sw_cons; + u16 cbit, blocked = 0; + u16 cookie; + __le16 mcookie; switch (qp_event->event) { case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION: @@ -275,24 +293,31 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, default: /* Command Response */ spin_lock_irqsave(&cmdq->lock, flags); - sw_cons = HWQ_CMP(crsq->cons, crsq); - crsqe = &crsq->crsq[sw_cons]; - crsq->cons++; - memcpy(&crsqe->qp_event, qp_event, sizeof(crsqe->qp_event)); - - cookie = le16_to_cpu(crsqe->qp_event.cookie); + cookie = le16_to_cpu(qp_event->cookie); + mcookie = qp_event->cookie; blocked = cookie & RCFW_CMD_IS_BLOCKING; cookie &= RCFW_MAX_COOKIE_VALUE; cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; + crsqe = &rcfw->crsqe_tbl[cbit]; + if (crsqe->resp && + crsqe->resp->cookie == mcookie) { + memcpy(crsqe->resp, qp_event, sizeof(*qp_event)); + crsqe->resp = NULL; + } else { + dev_err(&rcfw->pdev->dev, + "QPLIB: CMD %s resp->cookie = %#x, evnt->cookie = %#x", + crsqe->resp ? "mismatch" : "collision", + crsqe->resp ? crsqe->resp->cookie : 0, mcookie); + } if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap)) dev_warn(&rcfw->pdev->dev, "QPLIB: CMD bit %d was not requested", cbit); - cmdq->cons += crsqe->req_size; - spin_unlock_irqrestore(&cmdq->lock, flags); + crsqe->req_size = 0; + if (!blocked) wake_up(&rcfw->waitq); - break; + spin_unlock_irqrestore(&cmdq->lock, flags); } return 0; } @@ -305,12 +330,12 @@ static void bnxt_qplib_service_creq(unsigned long data) struct creq_base *creqe, **creq_ptr; u32 sw_cons, raw_cons; unsigned long flags; - u32 type; + u32 type, budget = CREQ_ENTRY_POLL_BUDGET; - /* Service the CREQ until empty */ + /* Service the CREQ until budget is over */ spin_lock_irqsave(&creq->lock, flags); raw_cons = creq->cons; - while (1) { + while (budget > 0) { sw_cons = HWQ_CMP(raw_cons, creq); creq_ptr = (struct creq_base **)creq->pbl_ptr; creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]; @@ -320,15 +345,9 @@ static void bnxt_qplib_service_creq(unsigned long data) type = creqe->type & CREQ_BASE_TYPE_MASK; switch (type) { case CREQ_BASE_TYPE_QP_EVENT: - if (!bnxt_qplib_process_qp_event - (rcfw, (struct creq_qp_event *)creqe)) - rcfw->creq_qp_event_processed++; - else { - dev_warn(&rcfw->pdev->dev, "QPLIB: crsqe with"); - dev_warn(&rcfw->pdev->dev, - "QPLIB: type = 0x%x not handled", - type); - } + bnxt_qplib_process_qp_event + (rcfw, (struct creq_qp_event *)creqe); + rcfw->creq_qp_event_processed++; break; case CREQ_BASE_TYPE_FUNC_EVENT: if (!bnxt_qplib_process_func_event @@ -346,7 +365,9 @@ static void bnxt_qplib_service_creq(unsigned long data) break; } raw_cons++; + budget--; } + if (creq->cons != raw_cons) { creq->cons = raw_cons; CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, raw_cons, @@ -375,23 +396,16 @@ static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance) /* RCFW */ int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw) { - struct creq_deinitialize_fw_resp *resp; struct cmdq_deinitialize_fw req; + struct creq_deinitialize_fw_resp resp; u16 cmd_flags = 0; + int rc; RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags); - resp = (struct creq_deinitialize_fw_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) - return -EINVAL; - - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) - return -ETIMEDOUT; - - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) - return -EFAULT; + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, + NULL, 0); + if (rc) + return rc; clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags); return 0; @@ -417,9 +431,10 @@ static int __get_pbl_pg_idx(struct bnxt_qplib_pbl *pbl) int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, struct bnxt_qplib_ctx *ctx, int is_virtfn) { - struct creq_initialize_fw_resp *resp; struct cmdq_initialize_fw req; + struct creq_initialize_fw_resp resp; u16 cmd_flags = 0, level; + int rc; RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags); @@ -482,37 +497,19 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, skip_ctx_setup: req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id); - resp = (struct creq_initialize_fw_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) { - dev_err(&rcfw->pdev->dev, - "QPLIB: RCFW: INITIALIZE_FW send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, - "QPLIB: RCFW: INITIALIZE_FW timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, - "QPLIB: RCFW: INITIALIZE_FW failed"); - return -EINVAL; - } + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, + NULL, 0); + if (rc) + return rc; set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags); return 0; } void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) { - bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->crsb); - kfree(rcfw->crsq.crsq); + kfree(rcfw->crsqe_tbl); bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq); bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq); - rcfw->pdev = NULL; } @@ -539,21 +536,11 @@ int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev, goto fail; } - rcfw->crsq.max_elements = rcfw->cmdq.max_elements; - rcfw->crsq.crsq = kcalloc(rcfw->crsq.max_elements, - sizeof(*rcfw->crsq.crsq), GFP_KERNEL); - if (!rcfw->crsq.crsq) + rcfw->crsqe_tbl = kcalloc(rcfw->cmdq.max_elements, + sizeof(*rcfw->crsqe_tbl), GFP_KERNEL); + if (!rcfw->crsqe_tbl) goto fail; - rcfw->crsb.max_elements = BNXT_QPLIB_CRSBE_MAX_CNT; - if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->crsb, NULL, 0, - &rcfw->crsb.max_elements, - BNXT_QPLIB_CRSBE_UNITS, 0, PAGE_SIZE, - HWQ_TYPE_CTX)) { - dev_err(&rcfw->pdev->dev, - "QPLIB: HW channel CRSB allocation failed"); - goto fail; - } return 0; fail: @@ -606,7 +593,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, int rc; /* General */ - atomic_set(&rcfw->seq_num, 0); + rcfw->seq_num = 0; rcfw->flags = FIRMWARE_FIRST_FLAG; bmap_size = BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD * sizeof(unsigned long)); @@ -636,10 +623,6 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET; - /* CRSQ */ - rcfw->crsq.prod = 0; - rcfw->crsq.cons = 0; - /* CREQ */ rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION; res_base = pci_resource_start(pdev, rcfw->creq_bar_reg); @@ -692,3 +675,34 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, __iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4); return 0; } + +struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf( + struct bnxt_qplib_rcfw *rcfw, + u32 size) +{ + struct bnxt_qplib_rcfw_sbuf *sbuf; + + sbuf = kzalloc(sizeof(*sbuf), GFP_ATOMIC); + if (!sbuf) + return NULL; + + sbuf->size = size; + sbuf->sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf->size, + &sbuf->dma_addr, GFP_ATOMIC); + if (!sbuf->sb) + goto bail; + + return sbuf; +bail: + kfree(sbuf); + return NULL; +} + +void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw, + struct bnxt_qplib_rcfw_sbuf *sbuf) +{ + if (sbuf->sb) + dma_free_coherent(&rcfw->pdev->dev, sbuf->size, + sbuf->sb, sbuf->dma_addr); + kfree(sbuf); +} diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index d3567d75bf58..09ce121770cd 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h @@ -73,6 +73,7 @@ #define RCFW_MAX_OUTSTANDING_CMD BNXT_QPLIB_CMDQE_MAX_CNT #define RCFW_MAX_COOKIE_VALUE 0x7FFF #define RCFW_CMD_IS_BLOCKING 0x8000 +#define RCFW_BLOCKED_CMD_WAIT_COUNT 0x4E20 /* Cmdq contains a fix number of a 16-Byte slots */ struct bnxt_qplib_cmdqe { @@ -94,32 +95,6 @@ struct bnxt_qplib_crsbe { u8 data[1024]; }; -/* CRSQ SB */ -#define BNXT_QPLIB_CRSBE_MAX_CNT 4 -#define BNXT_QPLIB_CRSBE_UNITS sizeof(struct bnxt_qplib_crsbe) -#define BNXT_QPLIB_CRSBE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_CRSBE_UNITS) - -#define MAX_CRSB_IDX (BNXT_QPLIB_CRSBE_MAX_CNT - 1) -#define MAX_CRSB_IDX_PER_PG (BNXT_QPLIB_CRSBE_CNT_PER_PG - 1) - -static inline u32 get_crsb_pg(u32 val) -{ - return (val & ~MAX_CRSB_IDX_PER_PG) / BNXT_QPLIB_CRSBE_CNT_PER_PG; -} - -static inline u32 get_crsb_idx(u32 val) -{ - return val & MAX_CRSB_IDX_PER_PG; -} - -static inline void bnxt_qplib_crsb_dma_next(dma_addr_t *pg_map_arr, - u32 prod, dma_addr_t *dma_addr) -{ - *dma_addr = pg_map_arr[(prod) / BNXT_QPLIB_CRSBE_CNT_PER_PG]; - *dma_addr += ((prod) % BNXT_QPLIB_CRSBE_CNT_PER_PG) * - BNXT_QPLIB_CRSBE_UNITS; -} - /* CREQ */ /* Allocate 1 per QP for async error notification for now */ #define BNXT_QPLIB_CREQE_MAX_CNT (64 * 1024) @@ -158,17 +133,19 @@ static inline u32 get_creq_idx(u32 val) #define CREQ_DB(db, raw_cons, cp_bit) \ writel(CREQ_DB_CP_FLAGS | ((raw_cons) & ((cp_bit) - 1)), db) +#define CREQ_ENTRY_POLL_BUDGET 0x100 + /* HWQ */ -struct bnxt_qplib_crsqe { - struct creq_qp_event qp_event; + +struct bnxt_qplib_crsq { + struct creq_qp_event *resp; u32 req_size; }; -struct bnxt_qplib_crsq { - struct bnxt_qplib_crsqe *crsq; - u32 prod; - u32 cons; - u32 max_elements; +struct bnxt_qplib_rcfw_sbuf { + void *sb; + dma_addr_t dma_addr; + u32 size; }; /* RCFW Communication Channels */ @@ -185,7 +162,7 @@ struct bnxt_qplib_rcfw { wait_queue_head_t waitq; int (*aeq_handler)(struct bnxt_qplib_rcfw *, struct creq_func_event *); - atomic_t seq_num; + u32 seq_num; /* Bar region info */ void __iomem *cmdq_bar_reg_iomem; @@ -203,8 +180,7 @@ struct bnxt_qplib_rcfw { /* Actual Cmd and Resp Queues */ struct bnxt_qplib_hwq cmdq; - struct bnxt_qplib_crsq crsq; - struct bnxt_qplib_hwq crsb; + struct bnxt_qplib_crsq *crsqe_tbl; }; void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw); @@ -219,11 +195,14 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, (struct bnxt_qplib_rcfw *, struct creq_func_event *)); -int bnxt_qplib_rcfw_block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie); -int bnxt_qplib_rcfw_wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie); -void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, - struct cmdq_base *req, void **crsbe, - u8 is_block); +struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf( + struct bnxt_qplib_rcfw *rcfw, + u32 size); +void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw, + struct bnxt_qplib_rcfw_sbuf *sbuf); +int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, + struct cmdq_base *req, struct creq_base *resp, + void *sbuf, u8 is_block); int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw); int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h index 6277d802ca4b..2e4855509719 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h @@ -48,6 +48,10 @@ extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero; #define HWQ_CMP(idx, hwq) ((idx) & ((hwq)->max_elements - 1)) +#define HWQ_FREE_SLOTS(hwq) (hwq->max_elements - \ + ((HWQ_CMP(hwq->prod, hwq)\ + - HWQ_CMP(hwq->cons, hwq))\ + & (hwq->max_elements - 1))) enum bnxt_qplib_hwq_type { HWQ_TYPE_CTX, HWQ_TYPE_QUEUE, diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index 7b31eccedf11..fde18cf0e406 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -55,37 +55,30 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, struct bnxt_qplib_dev_attr *attr) { struct cmdq_query_func req; - struct creq_query_func_resp *resp; + struct creq_query_func_resp resp; + struct bnxt_qplib_rcfw_sbuf *sbuf; struct creq_query_func_resp_sb *sb; u16 cmd_flags = 0; u32 temp; u8 *tqm_alloc; - int i; + int i, rc = 0; RCFW_CMD_PREP(req, QUERY_FUNC, cmd_flags); - req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS; - resp = (struct creq_query_func_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void **)&sb, - 0); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: SP: QUERY_FUNC failed "); + sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb)); + if (!sbuf) { dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; + "QPLIB: SP: QUERY_FUNC alloc side buffer failed"); + return -ENOMEM; } + + sb = sbuf->sb; + req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS; + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, + (void *)sbuf, 0); + if (rc) + goto bail; + /* Extract the context from the side buffer */ attr->max_qp = le32_to_cpu(sb->max_qp); attr->max_qp_rd_atom = @@ -95,6 +88,11 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ? BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom; attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr); + /* + * 128 WQEs needs to be reserved for the HW (8916). Prevent + * reporting the max number + */ + attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS; attr->max_qp_sges = sb->max_sge; attr->max_cq = le32_to_cpu(sb->max_cq); attr->max_cq_wqes = le32_to_cpu(sb->max_cqe); @@ -130,7 +128,10 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, attr->tqm_alloc_reqs[i * 4 + 2] = *(++tqm_alloc); attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc); } - return 0; + +bail: + bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf); + return rc; } /* SGID */ @@ -178,8 +179,9 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, /* Remove GID from the SGID table */ if (update) { struct cmdq_delete_gid req; - struct creq_delete_gid_resp *resp; + struct creq_delete_gid_resp resp; u16 cmd_flags = 0; + int rc; RCFW_CMD_PREP(req, DELETE_GID, cmd_flags); if (sgid_tbl->hw_id[index] == 0xFFFF) { @@ -188,31 +190,10 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, return -EINVAL; } req.gid_index = cpu_to_le16(sgid_tbl->hw_id[index]); - resp = (struct creq_delete_gid_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, NULL, - 0); - if (!resp) { - dev_err(&res->pdev->dev, - "QPLIB: SP: DELETE_GID send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, - le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&res->pdev->dev, - "QPLIB: SP: DELETE_GID timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&res->pdev->dev, - "QPLIB: SP: DELETE_GID failed "); - dev_err(&res->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); + if (rc) + return rc; } memcpy(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero, sizeof(bnxt_qplib_gid_zero)); @@ -234,7 +215,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, struct bnxt_qplib_res, sgid_tbl); struct bnxt_qplib_rcfw *rcfw = res->rcfw; - int i, free_idx, rc = 0; + int i, free_idx; if (!sgid_tbl) { dev_err(&res->pdev->dev, "QPLIB: SGID table not allocated"); @@ -266,10 +247,11 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, } if (update) { struct cmdq_add_gid req; - struct creq_add_gid_resp *resp; + struct creq_add_gid_resp resp; u16 cmd_flags = 0; u32 temp32[4]; u16 temp16[3]; + int rc; RCFW_CMD_PREP(req, ADD_GID, cmd_flags); @@ -290,31 +272,11 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, req.src_mac[1] = cpu_to_be16(temp16[1]); req.src_mac[2] = cpu_to_be16(temp16[2]); - resp = (struct creq_add_gid_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) { - dev_err(&res->pdev->dev, - "QPLIB: SP: ADD_GID send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, - le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&res->pdev->dev, - "QPIB: SP: ADD_GID timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&res->pdev->dev, "QPLIB: SP: ADD_GID failed "); - dev_err(&res->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } - sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp->xid); + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); + if (rc) + return rc; + sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp.xid); } /* Add GID to the sgid_tbl */ memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid)); @@ -325,7 +287,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, *index = free_idx; /* unlock */ - return rc; + return 0; } /* pkeys */ @@ -422,10 +384,11 @@ int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_create_ah req; - struct creq_create_ah_resp *resp; + struct creq_create_ah_resp resp; u16 cmd_flags = 0; u32 temp32[4]; u16 temp16[3]; + int rc; RCFW_CMD_PREP(req, CREATE_AH, cmd_flags); @@ -450,28 +413,12 @@ int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah) req.dest_mac[1] = cpu_to_le16(temp16[1]); req.dest_mac[2] = cpu_to_le16(temp16[2]); - resp = (struct creq_create_ah_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 1); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: SP: CREATE_AH failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } - ah->id = le32_to_cpu(resp->xid); + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, + NULL, 1); + if (rc) + return rc; + + ah->id = le32_to_cpu(resp.xid); return 0; } @@ -479,35 +426,19 @@ int bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_destroy_ah req; - struct creq_destroy_ah_resp *resp; + struct creq_destroy_ah_resp resp; u16 cmd_flags = 0; + int rc; /* Clean up the AH table in the device */ RCFW_CMD_PREP(req, DESTROY_AH, cmd_flags); req.ah_cid = cpu_to_le32(ah->id); - resp = (struct creq_destroy_ah_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 1); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: SP: DESTROY_AH failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, + NULL, 1); + if (rc) + return rc; return 0; } @@ -516,8 +447,9 @@ int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_deallocate_key req; - struct creq_deallocate_key_resp *resp; + struct creq_deallocate_key_resp resp; u16 cmd_flags = 0; + int rc; if (mrw->lkey == 0xFFFFFFFF) { dev_info(&res->pdev->dev, @@ -536,27 +468,11 @@ int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw) else req.key = cpu_to_le32(mrw->lkey); - resp = (struct creq_deallocate_key_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) { - dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&res->pdev->dev, "QPLIB: SP: FREE_MR failed "); - dev_err(&res->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, + NULL, 0); + if (rc) + return rc; + /* Free the qplib's MRW memory */ if (mrw->hwq.max_elements) bnxt_qplib_free_hwq(res->pdev, &mrw->hwq); @@ -568,9 +484,10 @@ int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_allocate_mrw req; - struct creq_allocate_mrw_resp *resp; + struct creq_allocate_mrw_resp resp; u16 cmd_flags = 0; unsigned long tmp; + int rc; RCFW_CMD_PREP(req, ALLOCATE_MRW, cmd_flags); @@ -584,33 +501,17 @@ int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw) tmp = (unsigned long)mrw; req.mrw_handle = cpu_to_le64(tmp); - resp = (struct creq_allocate_mrw_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, 0); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW send failed"); - return -EINVAL; - } - if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { - /* Cmd timed out */ - dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: SP: ALLOC_MRW failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); + if (rc) + return rc; + if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1) || (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) || (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B)) - mrw->rkey = le32_to_cpu(resp->xid); + mrw->rkey = le32_to_cpu(resp.xid); else - mrw->lkey = le32_to_cpu(resp->xid); + mrw->lkey = le32_to_cpu(resp.xid); return 0; } @@ -619,40 +520,17 @@ int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw, { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_deregister_mr req; - struct creq_deregister_mr_resp *resp; + struct creq_deregister_mr_resp resp; u16 cmd_flags = 0; int rc; RCFW_CMD_PREP(req, DEREGISTER_MR, cmd_flags); req.lkey = cpu_to_le32(mrw->lkey); - resp = (struct creq_deregister_mr_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, block); - if (!resp) { - dev_err(&rcfw->pdev->dev, "QPLIB: SP: DEREG_MR send failed"); - return -EINVAL; - } - if (block) - rc = bnxt_qplib_rcfw_block_for_resp(rcfw, - le16_to_cpu(req.cookie)); - else - rc = bnxt_qplib_rcfw_wait_for_resp(rcfw, - le16_to_cpu(req.cookie)); - if (!rc) { - /* Cmd timed out */ - dev_err(&res->pdev->dev, "QPLIB: SP: DEREG_MR timed out"); - return -ETIMEDOUT; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&rcfw->pdev->dev, "QPLIB: SP: DEREG_MR failed "); - dev_err(&rcfw->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, block); + if (rc) + return rc; /* Free the qplib's MR memory */ if (mrw->hwq.max_elements) { @@ -669,7 +547,7 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr, { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_register_mr req; - struct creq_register_mr_resp *resp; + struct creq_register_mr_resp resp; u16 cmd_flags = 0, level; int pg_ptrs, pages, i, rc; dma_addr_t **pbl_ptr; @@ -730,36 +608,11 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr, req.key = cpu_to_le32(mr->lkey); req.mr_size = cpu_to_le64(mr->total_size); - resp = (struct creq_register_mr_resp *) - bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, - NULL, block); - if (!resp) { - dev_err(&res->pdev->dev, "SP: REG_MR send failed"); - rc = -EINVAL; - goto fail; - } - if (block) - rc = bnxt_qplib_rcfw_block_for_resp(rcfw, - le16_to_cpu(req.cookie)); - else - rc = bnxt_qplib_rcfw_wait_for_resp(rcfw, - le16_to_cpu(req.cookie)); - if (!rc) { - /* Cmd timed out */ - dev_err(&res->pdev->dev, "SP: REG_MR timed out"); - rc = -ETIMEDOUT; - goto fail; - } - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&res->pdev->dev, "QPLIB: SP: REG_MR failed "); - dev_err(&res->pdev->dev, - "QPLIB: SP: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - rc = -EINVAL; + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, block); + if (rc) goto fail; - } + return 0; fail: @@ -804,35 +657,15 @@ int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids) { struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_map_tc_to_cos req; - struct creq_map_tc_to_cos_resp *resp; + struct creq_map_tc_to_cos_resp resp; u16 cmd_flags = 0; - int tleft; + int rc = 0; RCFW_CMD_PREP(req, MAP_TC_TO_COS, cmd_flags); req.cos0 = cpu_to_le16(cids[0]); req.cos1 = cpu_to_le16(cids[1]); - resp = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, NULL, 0); - if (!resp) { - dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS send failed"); - return -EINVAL; - } - - tleft = bnxt_qplib_rcfw_block_for_resp(rcfw, le16_to_cpu(req.cookie)); - if (!tleft) { - dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS timed out"); - return -ETIMEDOUT; - } - - if (resp->status || - le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) { - dev_err(&res->pdev->dev, "QPLIB: SP: MAP_TC2COS failed "); - dev_err(&res->pdev->dev, - "QPLIB: with status 0x%x cmdq 0x%x resp 0x%x", - resp->status, le16_to_cpu(req.cookie), - le16_to_cpu(resp->cookie)); - return -EINVAL; - } - + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, NULL, 0); return 0; } diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h index 1442a617e968..a543f959098b 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h @@ -40,6 +40,8 @@ #ifndef __BNXT_QPLIB_SP_H__ #define __BNXT_QPLIB_SP_H__ +#define BNXT_QPLIB_RESERVED_QP_WRS 128 + struct bnxt_qplib_dev_attr { char fw_ver[32]; u16 max_sgid; diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index b6fe45924c6e..0910faf3587b 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -488,6 +488,7 @@ static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb) ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); release_ep_resources(ep); + kfree_skb(skb); return 0; } @@ -498,6 +499,7 @@ static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb) ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); c4iw_put_ep(&ep->parent_ep->com); release_ep_resources(ep); + kfree_skb(skb); return 0; } @@ -569,11 +571,13 @@ static void abort_arp_failure(void *handle, struct sk_buff *skb) pr_debug("%s rdev %p\n", __func__, rdev); req->cmd = CPL_ABORT_NO_RST; + skb_get(skb); ret = c4iw_ofld_send(rdev, skb); if (ret) { __state_set(&ep->com, DEAD); queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE); - } + } else + kfree_skb(skb); } static int send_flowc(struct c4iw_ep *ep) @@ -2517,7 +2521,8 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) goto reject; } - hdrs = sizeof(struct iphdr) + sizeof(struct tcphdr) + + hdrs = ((iptype == 4) ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) + + sizeof(struct tcphdr) + ((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0); if (peer_mss && child_ep->mtu > (peer_mss + hdrs)) child_ep->mtu = peer_mss + hdrs; diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index 329fb65e8fb0..ae0b79aeea2e 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c @@ -767,7 +767,7 @@ void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev, kfree(entry); } - list_for_each_safe(pos, nxt, &uctx->qpids) { + list_for_each_safe(pos, nxt, &uctx->cqids) { entry = list_entry(pos, struct c4iw_qid_list, entry); list_del_init(&entry->entry); kfree(entry); @@ -880,13 +880,15 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free"); if (!rdev->free_workq) { err = -ENOMEM; - goto err_free_status_page; + goto err_free_status_page_and_wr_log; } rdev->status_page->db_off = 0; return 0; -err_free_status_page: +err_free_status_page_and_wr_log: + if (c4iw_wr_log && rdev->wr_log) + kfree(rdev->wr_log); free_page((unsigned long)rdev->status_page); destroy_ocqp_pool: c4iw_ocqp_pool_destroy(rdev); @@ -903,9 +905,11 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev) { destroy_workqueue(rdev->free_workq); kfree(rdev->wr_log); + c4iw_release_dev_ucontext(rdev, &rdev->uctx); free_page((unsigned long)rdev->status_page); c4iw_pblpool_destroy(rdev); c4iw_rqtpool_destroy(rdev); + c4iw_ocqp_pool_destroy(rdev); c4iw_destroy_resource(&rdev->resource); } @@ -971,7 +975,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) devp->rdev.lldi.sge_egrstatuspagesize); devp->rdev.hw_queue.t4_eq_status_entries = - devp->rdev.lldi.sge_ingpadboundary > 64 ? 2 : 1; + devp->rdev.lldi.sge_egrstatuspagesize / 64; devp->rdev.hw_queue.t4_max_eq_size = 65520; devp->rdev.hw_queue.t4_max_iq_size = 65520; devp->rdev.hw_queue.t4_max_rq_size = 8192 - diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 5d6b1eeaa9a0..2ba00b89df6a 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -6312,25 +6312,38 @@ static void handle_8051_request(struct hfi1_pportdata *ppd) } } -static void write_global_credit(struct hfi1_devdata *dd, - u8 vau, u16 total, u16 shared) +/* + * Set up allocation unit vaulue. + */ +void set_up_vau(struct hfi1_devdata *dd, u8 vau) { - write_csr(dd, SEND_CM_GLOBAL_CREDIT, - ((u64)total << - SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) | - ((u64)shared << - SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) | - ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT)); + u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); + + /* do not modify other values in the register */ + reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK; + reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT; + write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg); } /* * Set up initial VL15 credits of the remote. Assumes the rest of - * the CM credit registers are zero from a previous global or credit reset . + * the CM credit registers are zero from a previous global or credit reset. + * Shared limit for VL15 will always be 0. */ -void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf) +void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf) { - /* leave shared count at zero for both global and VL15 */ - write_global_credit(dd, vau, vl15buf, 0); + u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); + + /* set initial values for total and shared credit limit */ + reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK | + SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK); + + /* + * Set total limit to be equal to VL15 credits. + * Leave shared limit at 0. + */ + reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT; + write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg); write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT); @@ -6348,9 +6361,11 @@ void reset_link_credits(struct hfi1_devdata *dd) for (i = 0; i < TXE_NUM_DATA_VL; i++) write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0); write_csr(dd, SEND_CM_CREDIT_VL15, 0); - write_global_credit(dd, 0, 0, 0); + write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0); /* reset the CM block */ pio_send_control(dd, PSC_CM_RESET); + /* reset cached value */ + dd->vl15buf_cached = 0; } /* convert a vCU to a CU */ @@ -6839,24 +6854,35 @@ void handle_link_up(struct work_struct *work) { struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, link_up_work); + struct hfi1_devdata *dd = ppd->dd; + set_link_state(ppd, HLS_UP_INIT); /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */ - read_ltp_rtt(ppd->dd); + read_ltp_rtt(dd); /* * OPA specifies that certain counters are cleared on a transition * to link up, so do that. */ - clear_linkup_counters(ppd->dd); + clear_linkup_counters(dd); /* * And (re)set link up default values. */ set_linkup_defaults(ppd); + /* + * Set VL15 credits. Use cached value from verify cap interrupt. + * In case of quick linkup or simulator, vl15 value will be set by + * handle_linkup_change. VerifyCap interrupt handler will not be + * called in those scenarios. + */ + if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) + set_up_vl15(dd, dd->vl15buf_cached); + /* enforce link speed enabled */ if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) { /* oops - current speed is not enabled, bounce */ - dd_dev_err(ppd->dd, + dd_dev_err(dd, "Link speed active 0x%x is outside enabled 0x%x, downing link\n", ppd->link_speed_active, ppd->link_speed_enabled); set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0, @@ -7357,7 +7383,14 @@ void handle_verify_cap(struct work_struct *work) */ if (vau == 0) vau = 1; - set_up_vl15(dd, vau, vl15buf); + set_up_vau(dd, vau); + + /* + * Set VL15 credits to 0 in global credit register. Cache remote VL15 + * credits value and wait for link-up interrupt ot set it. + */ + set_up_vl15(dd, 0); + dd->vl15buf_cached = vl15buf; /* set up the LCB CRC mode */ crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc; diff --git a/drivers/infiniband/hw/hfi1/chip_registers.h b/drivers/infiniband/hw/hfi1/chip_registers.h index 5bfa839d1c48..793514f1d15f 100644 --- a/drivers/infiniband/hw/hfi1/chip_registers.h +++ b/drivers/infiniband/hw/hfi1/chip_registers.h @@ -839,7 +839,9 @@ #define SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK 0x8ull #define SEND_CM_CTRL_RESETCSR 0x0000000000000020ull #define SEND_CM_GLOBAL_CREDIT (TXE + 0x000000000508) +#define SEND_CM_GLOBAL_CREDIT_AU_MASK 0x7ull #define SEND_CM_GLOBAL_CREDIT_AU_SHIFT 16 +#define SEND_CM_GLOBAL_CREDIT_AU_SMASK 0x70000ull #define SEND_CM_GLOBAL_CREDIT_RESETCSR 0x0000094000030000ull #define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK 0xFFFFull #define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT 0 diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h index da322e6668cc..414a04a481c2 100644 --- a/drivers/infiniband/hw/hfi1/hfi.h +++ b/drivers/infiniband/hw/hfi1/hfi.h @@ -1045,6 +1045,14 @@ struct hfi1_devdata { /* initial vl15 credits to use */ u16 vl15_init; + /* + * Cached value for vl15buf, read during verify cap interrupt. VL15 + * credits are to be kept at 0 and set when handling the link-up + * interrupt. This removes the possibility of receiving VL15 MAD + * packets before this HFI is ready. + */ + u16 vl15buf_cached; + /* Misc small ints */ u8 n_krcv_queues; u8 qos_shift; @@ -1598,7 +1606,8 @@ int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encode); int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t); int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t); -void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf); +void set_up_vau(struct hfi1_devdata *dd, u8 vau); +void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf); void reset_link_credits(struct hfi1_devdata *dd); void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu); diff --git a/drivers/infiniband/hw/hfi1/intr.c b/drivers/infiniband/hw/hfi1/intr.c index ba265d0ae93b..04a5082d5ac5 100644 --- a/drivers/infiniband/hw/hfi1/intr.c +++ b/drivers/infiniband/hw/hfi1/intr.c @@ -130,7 +130,8 @@ void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup) * the remote values. Both sides must be using the values. */ if (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR) { - set_up_vl15(dd, dd->vau, dd->vl15_init); + set_up_vau(dd, dd->vau); + set_up_vl15(dd, dd->vl15_init); assign_remote_cm_au_table(dd, dd->vcu); } diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c index 93faf86d54b6..6a9f6f9819e1 100644 --- a/drivers/infiniband/hw/hfi1/pcie.c +++ b/drivers/infiniband/hw/hfi1/pcie.c @@ -207,8 +207,8 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev) /* * Save BARs and command to rewrite after device reset. */ - dd->pcibar0 = addr; - dd->pcibar1 = addr >> 32; + pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0, &dd->pcibar0); + pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1, &dd->pcibar1); pci_read_config_dword(dd->pcidev, PCI_ROM_ADDRESS, &dd->pci_rom); pci_read_config_word(dd->pcidev, PCI_COMMAND, &dd->pci_command); pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &dd->pcie_devctl); diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c index 069bdaf061ab..1080778a1f7c 100644 --- a/drivers/infiniband/hw/hfi1/rc.c +++ b/drivers/infiniband/hw/hfi1/rc.c @@ -2159,8 +2159,11 @@ send_last: ret = hfi1_rvt_get_rwqe(qp, 1); if (ret < 0) goto nack_op_err; - if (!ret) + if (!ret) { + /* peer will send again */ + rvt_put_ss(&qp->r_sge); goto rnr_nak; + } wc.ex.imm_data = ohdr->u.rc.imm_data; wc.wc_flags = IB_WC_WITH_IMM; goto send_last; diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c index 50d140d25e38..2f3bbcac1e34 100644 --- a/drivers/infiniband/hw/hfi1/sysfs.c +++ b/drivers/infiniband/hw/hfi1/sysfs.c @@ -196,7 +196,8 @@ static const struct sysfs_ops port_cc_sysfs_ops = { }; static struct attribute *port_cc_default_attributes[] = { - &cc_prescan_attr.attr + &cc_prescan_attr.attr, + NULL }; static struct kobj_type port_cc_ktype = { diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c index f3bc01bce483..6ae98aa7f74e 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_cm.c +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c @@ -784,7 +784,6 @@ static void i40iw_build_mpa_v2(struct i40iw_cm_node *cm_node, } ctrl_ird |= IETF_PEER_TO_PEER; - ctrl_ird |= IETF_FLPDU_ZERO_LEN; switch (mpa_key) { case MPA_KEY_REQUEST: @@ -2446,8 +2445,8 @@ static void i40iw_handle_rcv_mpa(struct i40iw_cm_node *cm_node, } else { type = I40IW_CM_EVENT_CONNECTED; cm_node->state = I40IW_CM_STATE_OFFLOADED; - i40iw_send_ack(cm_node); } + i40iw_send_ack(cm_node); break; default: pr_err("%s wrong cm_node state =%d\n", __func__, cm_node->state); diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c index f82483b3d1e7..a027e2072477 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c +++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c @@ -285,28 +285,20 @@ void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2pa struct i40iw_sc_dev *dev = vsi->dev; struct i40iw_sc_qp *qp = NULL; bool qs_handle_change = false; - bool mss_change = false; unsigned long flags; u16 qs_handle; int i; - if (vsi->mss != l2params->mss) { - mss_change = true; - vsi->mss = l2params->mss; - } + vsi->mss = l2params->mss; i40iw_fill_qos_list(l2params->qs_handle_list); for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) { qs_handle = l2params->qs_handle_list[i]; if (vsi->qos[i].qs_handle != qs_handle) qs_handle_change = true; - else if (!mss_change) - continue; /* no MSS nor qs handle change */ spin_lock_irqsave(&vsi->qos[i].lock, flags); qp = i40iw_get_qp(&vsi->qos[i].qplist, qp); while (qp) { - if (mss_change) - i40iw_qp_mss_modify(dev, qp); if (qs_handle_change) { qp->qs_handle = qs_handle; /* issue cqp suspend command */ @@ -2395,7 +2387,6 @@ static enum i40iw_status_code i40iw_sc_qp_modify( set_64bit_val(wqe, 8, - LS_64(info->new_mss, I40IW_CQPSQ_QP_NEWMSS) | LS_64(term_len, I40IW_CQPSQ_QP_TERMLEN)); set_64bit_val(wqe, 16, qp->hw_host_ctx_pa); @@ -2410,7 +2401,6 @@ static enum i40iw_status_code i40iw_sc_qp_modify( LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) | LS_64(info->force_loopback, I40IW_CQPSQ_QP_FORCELOOPBACK) | LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) | - LS_64(info->mss_change, I40IW_CQPSQ_QP_MSSCHANGE) | LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) | LS_64(info->remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) | LS_64(term_actions, I40IW_CQPSQ_QP_TERMACT) | diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c index 2728af3103ce..a3f18a22f5ed 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_main.c +++ b/drivers/infiniband/hw/i40iw/i40iw_main.c @@ -1319,13 +1319,13 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev, status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_QUERY_FPM_BUF_SIZE, I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK); if (status) - goto exit; + goto error; info.fpm_query_buf_pa = mem.pa; info.fpm_query_buf = mem.va; status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_COMMIT_FPM_BUF_SIZE, I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK); if (status) - goto exit; + goto error; info.fpm_commit_buf_pa = mem.pa; info.fpm_commit_buf = mem.va; info.hmc_fn_id = ldev->fid; @@ -1347,11 +1347,9 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev, info.exception_lan_queue = 1; info.vchnl_send = i40iw_virtchnl_send; status = i40iw_device_init(&iwdev->sc_dev, &info); -exit: - if (status) { - kfree(iwdev->hmc_info_mem); - iwdev->hmc_info_mem = NULL; - } + + if (status) + goto error; memset(&vsi_info, 0, sizeof(vsi_info)); vsi_info.dev = &iwdev->sc_dev; vsi_info.back_vsi = (void *)iwdev; @@ -1362,11 +1360,19 @@ exit: memset(&stats_info, 0, sizeof(stats_info)); stats_info.fcn_id = ldev->fid; stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL); + if (!stats_info.pestat) { + status = I40IW_ERR_NO_MEMORY; + goto error; + } stats_info.stats_initialize = true; if (stats_info.pestat) i40iw_vsi_stats_init(&iwdev->vsi, &stats_info); } return status; +error: + kfree(iwdev->hmc_info_mem); + iwdev->hmc_info_mem = NULL; + return status; } /** diff --git a/drivers/infiniband/hw/i40iw/i40iw_osdep.h b/drivers/infiniband/hw/i40iw/i40iw_osdep.h index aa66c1c63dfa..f27be3e7830b 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_osdep.h +++ b/drivers/infiniband/hw/i40iw/i40iw_osdep.h @@ -199,7 +199,6 @@ void i40iw_cqp_spawn_worker(struct i40iw_sc_dev *dev, struct i40iw_virtchnl_work_info *work_info, u32 iw_vf_idx); void *i40iw_remove_head(struct list_head *list); void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend); -void i40iw_qp_mss_modify(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp); void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len); void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred); diff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h index 7b76259752b0..959ec81fba99 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_type.h +++ b/drivers/infiniband/hw/i40iw/i40iw_type.h @@ -541,7 +541,6 @@ struct i40iw_create_qp_info { struct i40iw_modify_qp_info { u64 rx_win0; u64 rx_win1; - u16 new_mss; u8 next_iwarp_state; u8 termlen; bool ord_valid; @@ -554,7 +553,6 @@ struct i40iw_modify_qp_info { bool dont_send_term; bool dont_send_fin; bool cached_var_valid; - bool mss_change; bool force_loopback; }; diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c index 409a3781e735..56d986924a4c 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_utils.c +++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c @@ -757,23 +757,6 @@ void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, b } /** - * i40iw_qp_mss_modify - modify mss for qp - * @dev: hardware control device structure - * @qp: hardware control qp - */ -void i40iw_qp_mss_modify(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp) -{ - struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; - struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp; - struct i40iw_modify_qp_info info; - - memset(&info, 0, sizeof(info)); - info.mss_change = true; - info.new_mss = qp->vsi->mss; - i40iw_hw_modify_qp(iwdev, iwqp, &info, false); -} - -/** * i40iw_term_modify_qp - modify qp for term message * @qp: hardware control qp * @next_state: qp's next state diff --git a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c index f4d13683a403..48fd327f876b 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c +++ b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c @@ -443,10 +443,7 @@ enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev, if (!dev->vchnl_up) return I40IW_ERR_NOT_READY; if (vchnl_msg->iw_op_code == I40IW_VCHNL_OP_GET_VER) { - if (vchnl_msg->iw_op_ver != I40IW_VCHNL_OP_GET_VER_V0) - vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg); - else - vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg); + vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg); return I40IW_SUCCESS; } for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) { diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index b4694717f6f3..21d31cb1325f 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -1578,6 +1578,7 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc if (port < 0) return; ah.av.ib.port_pd = cpu_to_be32(port << 24 | (be32_to_cpu(ah.av.ib.port_pd) & 0xffffff)); + ah.ibah.type = rdma_ah_find_type(&dev->ib_dev, port); mlx4_ib_query_ah(&ah.ibah, &ah_attr); if (rdma_ah_get_ah_flags(&ah_attr) & IB_AH_GRH) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index d45772da0963..9ecc089d4529 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -2979,6 +2979,18 @@ error_0: return ret; } +static u8 mlx5_get_umr_fence(u8 umr_fence_cap) +{ + switch (umr_fence_cap) { + case MLX5_CAP_UMR_FENCE_NONE: + return MLX5_FENCE_MODE_NONE; + case MLX5_CAP_UMR_FENCE_SMALL: + return MLX5_FENCE_MODE_INITIATOR_SMALL; + default: + return MLX5_FENCE_MODE_STRONG_ORDERING; + } +} + static int create_dev_resources(struct mlx5_ib_resources *devr) { struct ib_srq_init_attr attr; @@ -3680,8 +3692,10 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; dev->ib_dev.get_port_immutable = mlx5_port_immutable; dev->ib_dev.get_dev_fw_str = get_dev_fw_str; - dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev; - dev->ib_dev.free_rdma_netdev = mlx5_ib_free_rdma_netdev; + if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) { + dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev; + dev->ib_dev.free_rdma_netdev = mlx5_ib_free_rdma_netdev; + } if (mlx5_core_is_pf(mdev)) { dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config; dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state; @@ -3693,6 +3707,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) mlx5_ib_internal_fill_odp_caps(dev); + dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence)); + if (MLX5_CAP_GEN(mdev, imaicl)) { dev->ib_dev.alloc_mw = mlx5_ib_alloc_mw; dev->ib_dev.dealloc_mw = mlx5_ib_dealloc_mw; diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 38c877bc45e5..bdcf25410c99 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -349,7 +349,7 @@ struct mlx5_ib_qp { struct mlx5_ib_wq rq; u8 sq_signal_bits; - u8 fm_cache; + u8 next_fence; struct mlx5_ib_wq sq; /* serialize qp state modifications @@ -654,6 +654,7 @@ struct mlx5_ib_dev { struct mlx5_ib_port *port; struct mlx5_sq_bfreg bfreg; struct mlx5_sq_bfreg fp_bfreg; + u8 umr_fence; }; static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 93959e1e43a3..ebb6768684de 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -3738,24 +3738,6 @@ static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16) } } -static u8 get_fence(u8 fence, struct ib_send_wr *wr) -{ - if (unlikely(wr->opcode == IB_WR_LOCAL_INV && - wr->send_flags & IB_SEND_FENCE)) - return MLX5_FENCE_MODE_STRONG_ORDERING; - - if (unlikely(fence)) { - if (wr->send_flags & IB_SEND_FENCE) - return MLX5_FENCE_MODE_SMALL_AND_FENCE; - else - return fence; - } else if (unlikely(wr->send_flags & IB_SEND_FENCE)) { - return MLX5_FENCE_MODE_FENCE; - } - - return 0; -} - static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, struct mlx5_wqe_ctrl_seg **ctrl, struct ib_send_wr *wr, unsigned *idx, @@ -3784,8 +3766,7 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, static void finish_wqe(struct mlx5_ib_qp *qp, struct mlx5_wqe_ctrl_seg *ctrl, u8 size, unsigned idx, u64 wr_id, - int nreq, u8 fence, u8 next_fence, - u32 mlx5_opcode) + int nreq, u8 fence, u32 mlx5_opcode) { u8 opmod = 0; @@ -3793,7 +3774,6 @@ static void finish_wqe(struct mlx5_ib_qp *qp, mlx5_opcode | ((u32)opmod << 24)); ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8)); ctrl->fm_ce_se |= fence; - qp->fm_cache = next_fence; if (unlikely(qp->wq_sig)) ctrl->signature = wq_sig(ctrl); @@ -3853,7 +3833,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, goto out; } - fence = qp->fm_cache; num_sge = wr->num_sge; if (unlikely(num_sge > qp->sq.max_gs)) { mlx5_ib_warn(dev, "\n"); @@ -3870,6 +3849,19 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, goto out; } + if (wr->opcode == IB_WR_LOCAL_INV || + wr->opcode == IB_WR_REG_MR) { + fence = dev->umr_fence; + next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; + } else if (wr->send_flags & IB_SEND_FENCE) { + if (qp->next_fence) + fence = MLX5_FENCE_MODE_SMALL_AND_FENCE; + else + fence = MLX5_FENCE_MODE_FENCE; + } else { + fence = qp->next_fence; + } + switch (ibqp->qp_type) { case IB_QPT_XRC_INI: xrc = seg; @@ -3896,7 +3888,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, goto out; case IB_WR_LOCAL_INV: - next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; qp->sq.wr_data[idx] = IB_WR_LOCAL_INV; ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey); set_linv_wr(qp, &seg, &size); @@ -3904,7 +3895,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, break; case IB_WR_REG_MR: - next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; qp->sq.wr_data[idx] = IB_WR_REG_MR; ctrl->imm = cpu_to_be32(reg_wr(wr)->key); err = set_reg_wr(qp, reg_wr(wr), &seg, &size); @@ -3927,9 +3917,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, goto out; } - finish_wqe(qp, ctrl, size, idx, wr->wr_id, - nreq, get_fence(fence, wr), - next_fence, MLX5_OPCODE_UMR); + finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, + fence, MLX5_OPCODE_UMR); /* * SET_PSV WQEs are not signaled and solicited * on error @@ -3954,9 +3943,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, goto out; } - finish_wqe(qp, ctrl, size, idx, wr->wr_id, - nreq, get_fence(fence, wr), - next_fence, MLX5_OPCODE_SET_PSV); + finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, + fence, MLX5_OPCODE_SET_PSV); err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, nreq); if (err) { @@ -3966,7 +3954,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, goto out; } - next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->wire, mr->sig->psv_wire.psv_idx, &seg, &size); @@ -3976,9 +3963,9 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, goto out; } - finish_wqe(qp, ctrl, size, idx, wr->wr_id, - nreq, get_fence(fence, wr), - next_fence, MLX5_OPCODE_SET_PSV); + finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, + fence, MLX5_OPCODE_SET_PSV); + qp->next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; num_sge = 0; goto skip_psv; @@ -4089,8 +4076,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, } } - finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, - get_fence(fence, wr), next_fence, + qp->next_fence = next_fence; + finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, fence, mlx5_ib_opcode[wr->opcode]); skip_psv: if (0) diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index fb983df7c157..30b256a2c54e 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c @@ -610,7 +610,6 @@ static void build_mpa_v2(struct nes_cm_node *cm_node, ctrl_ord = cm_node->ord_size & IETF_NO_IRD_ORD; } ctrl_ird |= IETF_PEER_TO_PEER; - ctrl_ird |= IETF_FLPDU_ZERO_LEN; switch (mpa_key) { case MPA_KEY_REQUEST: @@ -1826,7 +1825,7 @@ static void handle_rcv_mpa(struct nes_cm_node *cm_node, struct sk_buff *skb) type = NES_CM_EVENT_CONNECTED; cm_node->state = NES_CM_STATE_TSA; } - + send_ack(cm_node, NULL); break; default: WARN_ON(1); diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index aa08c76a4245..d961f79b317c 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h @@ -58,7 +58,10 @@ #define QEDR_MSG_QP " QP" #define QEDR_MSG_GSI " GSI" -#define QEDR_CQ_MAGIC_NUMBER (0x11223344) +#define QEDR_CQ_MAGIC_NUMBER (0x11223344) + +#define FW_PAGE_SIZE (RDMA_RING_PAGE_SIZE) +#define FW_PAGE_SHIFT (12) struct qedr_dev; diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c index 3d7705cec770..d86dbe814d98 100644 --- a/drivers/infiniband/hw/qedr/qedr_cm.c +++ b/drivers/infiniband/hw/qedr/qedr_cm.c @@ -270,11 +270,13 @@ static inline int qedr_gsi_build_header(struct qedr_dev *dev, return rc; } - vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev); - if (vlan_id < VLAN_CFI_MASK) - has_vlan = true; - if (sgid_attr.ndev) + if (sgid_attr.ndev) { + vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev); + if (vlan_id < VLAN_CFI_MASK) + has_vlan = true; + dev_put(sgid_attr.ndev); + } if (!memcmp(&sgid, &zgid, sizeof(sgid))) { DP_ERR(dev, "gsi post send: GID not found GID index %d\n", diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 17685cfea6a2..d6723c365c7f 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -653,14 +653,15 @@ static int qedr_prepare_pbl_tbl(struct qedr_dev *dev, static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem, struct qedr_pbl *pbl, - struct qedr_pbl_info *pbl_info) + struct qedr_pbl_info *pbl_info, u32 pg_shift) { int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0; + u32 fw_pg_cnt, fw_pg_per_umem_pg; struct qedr_pbl *pbl_tbl; struct scatterlist *sg; struct regpair *pbe; + u64 pg_addr; int entry; - u32 addr; if (!pbl_info->num_pbes) return; @@ -683,29 +684,35 @@ static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem, shift = umem->page_shift; + fw_pg_per_umem_pg = BIT(umem->page_shift - pg_shift); + for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { pages = sg_dma_len(sg) >> shift; + pg_addr = sg_dma_address(sg); for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) { - /* store the page address in pbe */ - pbe->lo = cpu_to_le32(sg_dma_address(sg) + - (pg_cnt << shift)); - addr = upper_32_bits(sg_dma_address(sg) + - (pg_cnt << shift)); - pbe->hi = cpu_to_le32(addr); - pbe_cnt++; - total_num_pbes++; - pbe++; - - if (total_num_pbes == pbl_info->num_pbes) - return; - - /* If the given pbl is full storing the pbes, - * move to next pbl. - */ - if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) { - pbl_tbl++; - pbe = (struct regpair *)pbl_tbl->va; - pbe_cnt = 0; + for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) { + pbe->lo = cpu_to_le32(pg_addr); + pbe->hi = cpu_to_le32(upper_32_bits(pg_addr)); + + pg_addr += BIT(pg_shift); + pbe_cnt++; + total_num_pbes++; + pbe++; + + if (total_num_pbes == pbl_info->num_pbes) + return; + + /* If the given pbl is full storing the pbes, + * move to next pbl. + */ + if (pbe_cnt == + (pbl_info->pbl_size / sizeof(u64))) { + pbl_tbl++; + pbe = (struct regpair *)pbl_tbl->va; + pbe_cnt = 0; + } + + fw_pg_cnt++; } } } @@ -754,7 +761,7 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx, u64 buf_addr, size_t buf_len, int access, int dmasync) { - int page_cnt; + u32 fw_pages; int rc; q->buf_addr = buf_addr; @@ -766,8 +773,10 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx, return PTR_ERR(q->umem); } - page_cnt = ib_umem_page_count(q->umem); - rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt, 0); + fw_pages = ib_umem_page_count(q->umem) << + (q->umem->page_shift - FW_PAGE_SHIFT); + + rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0); if (rc) goto err0; @@ -777,7 +786,8 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx, goto err0; } - qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info); + qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info, + FW_PAGE_SHIFT); return 0; @@ -2226,7 +2236,7 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, goto err1; qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table, - &mr->info.pbl_info); + &mr->info.pbl_info, mr->umem->page_shift); rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid); if (rc) { @@ -3209,6 +3219,10 @@ static int process_req(struct qedr_dev *dev, struct qedr_qp *qp, case IB_WC_REG_MR: qp->wqe_wr_id[qp->sq.cons].mr->info.completed++; break; + case IB_WC_RDMA_READ: + case IB_WC_SEND: + wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len; + break; default: break; } diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index fc8b88514da5..4ddbcac5eabe 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c @@ -1956,8 +1956,10 @@ send_last: ret = qib_get_rwqe(qp, 1); if (ret < 0) goto nack_op_err; - if (!ret) + if (!ret) { + rvt_put_ss(&qp->r_sge); goto rnr_nak; + } wc.ex.imm_data = ohdr->u.rc.imm_data; hdrsize += 4; wc.wc_flags = IB_WC_WITH_IMM; diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h index ecdba2fce083..1ac5b8551a4d 100644 --- a/drivers/infiniband/sw/rxe/rxe.h +++ b/drivers/infiniband/sw/rxe/rxe.h @@ -68,6 +68,7 @@ static inline u32 rxe_crc32(struct rxe_dev *rxe, u32 crc, void *next, size_t len) { + u32 retval; int err; SHASH_DESC_ON_STACK(shash, rxe->tfm); @@ -81,7 +82,9 @@ static inline u32 rxe_crc32(struct rxe_dev *rxe, return crc32_le(crc, next, len); } - return *(u32 *)shash_desc_ctx(shash); + retval = *(u32 *)shash_desc_ctx(shash); + barrier_data(shash_desc_ctx(shash)); + return retval; } int rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu); diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 83d709e74dfb..073e66783f1d 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -740,13 +740,8 @@ static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr, sge = ibwr->sg_list; for (i = 0; i < num_sge; i++, sge++) { - if (qp->is_user && copy_from_user(p, (__user void *) - (uintptr_t)sge->addr, sge->length)) - return -EFAULT; - - else if (!qp->is_user) - memcpy(p, (void *)(uintptr_t)sge->addr, - sge->length); + memcpy(p, (void *)(uintptr_t)sge->addr, + sge->length); p += sge->length; } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c index 874b24366e4d..7871379342f4 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c @@ -178,7 +178,7 @@ static inline int ib_speed_enum_to_int(int speed) static int ipoib_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { - struct ipoib_dev_priv *priv = netdev_priv(netdev); + struct ipoib_dev_priv *priv = ipoib_priv(netdev); struct ib_port_attr attr; int ret, speed, width; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 0060b2f9f659..efe7402f4885 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -863,7 +863,6 @@ dev_stop: set_bit(IPOIB_STOP_REAPER, &priv->flags); cancel_delayed_work(&priv->ah_reap_task); set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); - napi_enable(&priv->napi); ipoib_ib_dev_stop(dev); return -1; } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 2869d1adb1de..1015a63de6ae 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -1590,12 +1590,14 @@ static void ipoib_neigh_hash_uninit(struct net_device *dev) wait_for_completion(&priv->ntbl.deleted); } -void ipoib_dev_uninit_default(struct net_device *dev) +static void ipoib_dev_uninit_default(struct net_device *dev) { struct ipoib_dev_priv *priv = ipoib_priv(dev); ipoib_transport_dev_cleanup(dev); + netif_napi_del(&priv->napi); + ipoib_cm_dev_cleanup(dev); kfree(priv->rx_ring); @@ -1649,6 +1651,7 @@ out_rx_ring_cleanup: kfree(priv->rx_ring); out: + netif_napi_del(&priv->napi); return -ENOMEM; } @@ -2237,6 +2240,7 @@ event_failed: device_init_failed: free_netdev(priv->dev); + kfree(priv); alloc_mem_failed: return ERR_PTR(result); @@ -2277,7 +2281,7 @@ static void ipoib_add_one(struct ib_device *device) static void ipoib_remove_one(struct ib_device *device, void *client_data) { - struct ipoib_dev_priv *priv, *tmp; + struct ipoib_dev_priv *priv, *tmp, *cpriv, *tcpriv; struct list_head *dev_list = client_data; if (!dev_list) @@ -2300,7 +2304,14 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data) flush_workqueue(priv->wq); unregister_netdev(priv->dev); - free_netdev(priv->dev); + if (device->free_rdma_netdev) + device->free_rdma_netdev(priv->dev); + else + free_netdev(priv->dev); + + list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) + kfree(cpriv); + kfree(priv); } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index 36dc4fcaa3cd..081b33deff1b 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c @@ -133,13 +133,13 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) snprintf(intf_name, sizeof intf_name, "%s.%04x", ppriv->dev->name, pkey); + if (!rtnl_trylock()) + return restart_syscall(); + priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name); if (!priv) return -ENOMEM; - if (!rtnl_trylock()) - return restart_syscall(); - down_write(&ppriv->vlan_rwsem); /* @@ -167,8 +167,10 @@ out: rtnl_unlock(); - if (result) + if (result) { free_netdev(priv->dev); + kfree(priv); + } return result; } @@ -209,6 +211,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) if (dev) { free_netdev(dev); + kfree(priv); return 0; } diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index def723a5df29..2354c742caa1 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -320,7 +320,7 @@ static int srp_new_cm_id(struct srp_rdma_ch *ch) ch->path.sgid = target->sgid; ch->path.dgid = target->orig_dgid; ch->path.pkey = target->pkey; - sa_path_set_service_id(&ch->path, target->service_id); + ch->path.service_id = target->service_id; return 0; } @@ -575,7 +575,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch) return 0; err_qp: - srp_destroy_qp(ch, qp); + ib_destroy_qp(qp); err_send_cq: ib_free_cq(send_cq); |