Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 68e326de authored by Leon Romanovsky's avatar Leon Romanovsky Committed by Jason Gunthorpe
Browse files

RDMA: Handle SRQ allocations by IB/core



Convert SRQ allocation from drivers to be in the IB/core

Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent d3456914
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -2224,6 +2224,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)

	SET_OBJ_SIZE(dev_ops, ib_ah);
	SET_OBJ_SIZE(dev_ops, ib_pd);
	SET_OBJ_SIZE(dev_ops, ib_srq);
	SET_OBJ_SIZE(dev_ops, ib_ucontext);
}
EXPORT_SYMBOL(ib_set_device_ops);
+9 −3
Original line number Diff line number Diff line
@@ -3409,9 +3409,9 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
	obj->uevent.events_reported = 0;
	INIT_LIST_HEAD(&obj->uevent.event_list);

	srq = pd->device->ops.create_srq(pd, &attr, udata);
	if (IS_ERR(srq)) {
		ret = PTR_ERR(srq);
	srq = rdma_zalloc_drv_obj(ib_dev, ib_srq);
	if (!srq) {
		ret = -ENOMEM;
		goto err_put;
	}

@@ -3422,6 +3422,10 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
	srq->event_handler = attr.event_handler;
	srq->srq_context   = attr.srq_context;

	ret = pd->device->ops.create_srq(srq, &attr, udata);
	if (ret)
		goto err_free;

	if (ib_srq_has_cq(cmd->srq_type)) {
		srq->ext.cq       = attr.ext.cq;
		atomic_inc(&attr.ext.cq->usecnt);
@@ -3461,6 +3465,8 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
err_copy:
	ib_destroy_srq_user(srq, &attrs->driver_udata);

err_free:
	kfree(srq);
err_put:
	uobj_put_obj_read(pd);

+38 −40
Original line number Diff line number Diff line
@@ -964,19 +964,21 @@ struct ib_srq *ib_create_srq(struct ib_pd *pd,
			     struct ib_srq_init_attr *srq_init_attr)
{
	struct ib_srq *srq;
	int ret;

	if (!pd->device->ops.create_srq)
		return ERR_PTR(-EOPNOTSUPP);

	srq = pd->device->ops.create_srq(pd, srq_init_attr, NULL);
	srq = rdma_zalloc_drv_obj(pd->device, ib_srq);
	if (!srq)
		return ERR_PTR(-ENOMEM);

	if (!IS_ERR(srq)) {
	srq->device = pd->device;
	srq->pd = pd;
		srq->uobject       = NULL;
	srq->event_handler = srq_init_attr->event_handler;
	srq->srq_context = srq_init_attr->srq_context;
	srq->srq_type = srq_init_attr->srq_type;

	if (ib_srq_has_cq(srq->srq_type)) {
		srq->ext.cq = srq_init_attr->ext.cq;
		atomic_inc(&srq->ext.cq->usecnt);
@@ -986,7 +988,16 @@ struct ib_srq *ib_create_srq(struct ib_pd *pd,
		atomic_inc(&srq->ext.xrc.xrcd->usecnt);
	}
	atomic_inc(&pd->usecnt);
		atomic_set(&srq->usecnt, 0);

	ret = pd->device->ops.create_srq(srq, srq_init_attr, NULL);
	if (ret) {
		atomic_dec(&srq->pd->usecnt);
		if (srq->srq_type == IB_SRQT_XRC)
			atomic_dec(&srq->ext.xrc.xrcd->usecnt);
		if (ib_srq_has_cq(srq->srq_type))
			atomic_dec(&srq->ext.cq->usecnt);
		kfree(srq);
		return ERR_PTR(ret);
	}

	return srq;
@@ -1013,32 +1024,19 @@ EXPORT_SYMBOL(ib_query_srq);

int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata)
{
	struct ib_pd *pd;
	enum ib_srq_type srq_type;
	struct ib_xrcd *uninitialized_var(xrcd);
	struct ib_cq *uninitialized_var(cq);
	int ret;

	if (atomic_read(&srq->usecnt))
		return -EBUSY;

	pd = srq->pd;
	srq_type = srq->srq_type;
	if (ib_srq_has_cq(srq_type))
		cq = srq->ext.cq;
	if (srq_type == IB_SRQT_XRC)
		xrcd = srq->ext.xrc.xrcd;
	srq->device->ops.destroy_srq(srq, udata);

	ret = srq->device->ops.destroy_srq(srq, udata);
	if (!ret) {
		atomic_dec(&pd->usecnt);
		if (srq_type == IB_SRQT_XRC)
			atomic_dec(&xrcd->usecnt);
		if (ib_srq_has_cq(srq_type))
			atomic_dec(&cq->usecnt);
	}
	atomic_dec(&srq->pd->usecnt);
	if (srq->srq_type == IB_SRQT_XRC)
		atomic_dec(&srq->ext.xrc.xrcd->usecnt);
	if (ib_srq_has_cq(srq->srq_type))
		atomic_dec(&srq->ext.cq->usecnt);
	kfree(srq);

	return ret;
	return 0;
}
EXPORT_SYMBOL(ib_destroy_srq_user);

+10 −22
Original line number Diff line number Diff line
@@ -1305,30 +1305,22 @@ static enum ib_mtu __to_ib_mtu(u32 mtu)
}

/* Shared Receive Queues */
int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
void bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
{
	struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
					       ib_srq);
	struct bnxt_re_dev *rdev = srq->rdev;
	struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
	struct bnxt_qplib_nq *nq = NULL;
	int rc;

	if (qplib_srq->cq)
		nq = qplib_srq->cq->nq;
	rc = bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
	if (rc) {
		dev_err(rdev_to_dev(rdev), "Destroy HW SRQ failed!");
		return rc;
	}

	bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
	if (srq->umem)
		ib_umem_release(srq->umem);
	kfree(srq);
	atomic_dec(&rdev->srq_count);
	if (nq)
		nq->budget--;
	return 0;
}

static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
@@ -1362,14 +1354,16 @@ static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
	return 0;
}

struct ib_srq *bnxt_re_create_srq(struct ib_pd *ib_pd,
int bnxt_re_create_srq(struct ib_srq *ib_srq,
		       struct ib_srq_init_attr *srq_init_attr,
		       struct ib_udata *udata)
{
	struct ib_pd *ib_pd = ib_srq->pd;
	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
	struct bnxt_re_dev *rdev = pd->rdev;
	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
	struct bnxt_re_srq *srq;
	struct bnxt_re_srq *srq =
		container_of(ib_srq, struct bnxt_re_srq, ib_srq);
	struct bnxt_qplib_nq *nq = NULL;
	int rc, entries;

@@ -1384,11 +1378,6 @@ struct ib_srq *bnxt_re_create_srq(struct ib_pd *ib_pd,
		goto exit;
	}

	srq = kzalloc(sizeof(*srq), GFP_KERNEL);
	if (!srq) {
		rc = -ENOMEM;
		goto exit;
	}
	srq->rdev = rdev;
	srq->qplib_srq.pd = &pd->qplib_pd;
	srq->qplib_srq.dpi = &rdev->dpi_privileged;
@@ -1434,14 +1423,13 @@ struct ib_srq *bnxt_re_create_srq(struct ib_pd *ib_pd,
		nq->budget++;
	atomic_inc(&rdev->srq_count);

	return &srq->ib_srq;
	return 0;

fail:
	if (srq->umem)
		ib_umem_release(srq->umem);
	kfree(srq);
exit:
	return ERR_PTR(rc);
	return rc;
}

int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
+5 −5
Original line number Diff line number Diff line
@@ -69,9 +69,9 @@ struct bnxt_re_ah {
};

struct bnxt_re_srq {
	struct ib_srq		ib_srq;
	struct bnxt_re_dev	*rdev;
	u32			srq_limit;
	struct ib_srq		ib_srq;
	struct bnxt_qplib_srq	qplib_srq;
	struct ib_umem		*umem;
	spinlock_t		lock;		/* protect srq */
@@ -170,14 +170,14 @@ int bnxt_re_create_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, u32 flags,
int bnxt_re_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
int bnxt_re_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
void bnxt_re_destroy_ah(struct ib_ah *ah, u32 flags);
struct ib_srq *bnxt_re_create_srq(struct ib_pd *pd,
int bnxt_re_create_srq(struct ib_srq *srq,
		       struct ib_srq_init_attr *srq_init_attr,
		       struct ib_udata *udata);
int bnxt_re_modify_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
		       enum ib_srq_attr_mask srq_attr_mask,
		       struct ib_udata *udata);
int bnxt_re_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
int bnxt_re_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
void bnxt_re_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
int bnxt_re_post_srq_recv(struct ib_srq *srq, const struct ib_recv_wr *recv_wr,
			  const struct ib_recv_wr **bad_recv_wr);
struct ib_qp *bnxt_re_create_qp(struct ib_pd *pd,
Loading