On Wed, Dec 21, 2016 at 03:41:59AM -0800, Selvin Xavier wrote:
> Implements support for create_cq, destroy_cq and req_notify_cq
> verbs.
>
> v3: Code cleanup based on errors reported by sparse on endianness check.
>     Removes unwanted macros.
>
> Signed-off-by: Eddie Wai <eddie....@broadcom.com>
> Signed-off-by: Devesh Sharma <devesh.sha...@broadcom.com>
> Signed-off-by: Somnath Kotur <somnath.ko...@broadcom.com>
> Signed-off-by: Sriharsha Basavapatna <sriharsha.basavapa...@broadcom.com>
> Signed-off-by: Selvin Xavier <selvin.xav...@broadcom.com>
> ---
>  drivers/infiniband/hw/bnxt_re/ib_verbs.c | 146 +++++++++++++++++++++++++
>  drivers/infiniband/hw/bnxt_re/ib_verbs.h |  19 ++++
>  drivers/infiniband/hw/bnxt_re/main.c     |   4 +
>  drivers/infiniband/hw/bnxt_re/qplib_fp.c | 181 
> +++++++++++++++++++++++++++++++
>  drivers/infiniband/hw/bnxt_re/qplib_fp.h |  50 +++++++++
>  include/uapi/rdma/bnxt_re-abi.h          |  12 ++
>  6 files changed, 412 insertions(+)
>
> diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c 
> b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
> index b09c2cb..e12e0c2 100644
> --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
> +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
> @@ -492,6 +492,152 @@ struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
>       return ERR_PTR(rc);
>  }
>
> +/* Completion Queues */
> +int bnxt_re_destroy_cq(struct ib_cq *ib_cq)
> +{
> +     struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
> +     struct bnxt_re_dev *rdev = cq->rdev;
> +     int rc;
> +
> +     rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
> +     if (rc) {
> +             dev_err(rdev_to_dev(rdev), "Failed to destroy HW CQ");
> +             return rc;
> +     }
> +     if (cq->umem && !IS_ERR(cq->umem))
> +             ib_umem_release(cq->umem);
> +
> +     if (cq) {
> +             kfree(cq->cql);
> +             kfree(cq);
> +     }
> +     atomic_dec(&rdev->cq_count);
> +     rdev->nq.budget--;
> +     return 0;
> +}
> +
> +struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
> +                             const struct ib_cq_init_attr *attr,
> +                             struct ib_ucontext *context,
> +                             struct ib_udata *udata)
> +{
> +     struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
> +     struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
> +     struct bnxt_re_cq *cq = NULL;
> +     int rc, entries;
> +     int cqe = attr->cqe;
> +
> +     /* Validate CQ fields */
> +     if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
> +             dev_err(rdev_to_dev(rdev), "Failed to create CQ -max exceeded");
> +             return ERR_PTR(-EINVAL);
> +     }
> +     cq = kzalloc(sizeof(*cq), GFP_KERNEL);
> +     if (!cq)
> +             return ERR_PTR(-ENOMEM);
> +
> +     cq->rdev = rdev;
> +     cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
> +
> +     entries = roundup_pow_of_two(cqe + 1);
> +     if (entries > dev_attr->max_cq_wqes + 1)
> +             entries = dev_attr->max_cq_wqes + 1;
> +
> +     if (context) {
> +             struct bnxt_re_cq_req req;
> +             struct bnxt_re_ucontext *uctx = container_of
> +                                             (context,
> +                                              struct bnxt_re_ucontext,
> +                                              ib_uctx);
> +             if (ib_copy_from_udata(&req, udata, sizeof(req))) {
> +                     rc = -EFAULT;
> +                     goto fail;
> +             }
> +
> +             cq->umem = ib_umem_get(context, req.cq_va,
> +                                    entries * sizeof(struct cq_base),
> +                                    IB_ACCESS_LOCAL_WRITE, 1);
> +             if (IS_ERR(cq->umem)) {
> +                     rc = PTR_ERR(cq->umem);
> +                     goto fail;
> +             }
> +             cq->qplib_cq.sghead = cq->umem->sg_head.sgl;
> +             cq->qplib_cq.nmap = cq->umem->nmap;
> +             cq->qplib_cq.dpi = uctx->dpi;
> +     } else {
> +             cq->max_cql = entries > MAX_CQL_PER_POLL ? MAX_CQL_PER_POLL :
> +                                     entries;

It is better to use already existing macros - min()
cq->max_cql = min(entries, MAX_CQL_PER_POLL);

I afraid that you can't avoid the respinning, you have more than month
till merge window.

Can you please remove useless wrappers and try to reuse kernel macros?

Thanks

Attachment: signature.asc
Description: PGP signature

Reply via email to