Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 21a428a0 authored by Leon Romanovsky's avatar Leon Romanovsky Committed by Jason Gunthorpe
Browse files

RDMA: Handle PD allocations by IB/core



The PD allocations in IB/core allows us to simplify drivers and their
error flows in their .alloc_pd() paths. The changes in .alloc_pd() go hand
in had with relevant update in .dealloc_pd().

We will use this opportunity and convert .dealloc_pd() to don't fail, as
it was suggested a long time ago, failures are not happening as we have
never seen a WARN_ON print.

Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 30471d4b
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -1319,6 +1319,8 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
	SET_DEVICE_OP(dev_ops, set_vf_guid);
	SET_DEVICE_OP(dev_ops, set_vf_link_state);
	SET_DEVICE_OP(dev_ops, unmap_fmr);

	SET_OBJ_SIZE(dev_ops, ib_pd);
}
EXPORT_SYMBOL(ib_set_device_ops);

+10 −5
Original line number Diff line number Diff line
@@ -407,9 +407,9 @@ static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs)
	if (IS_ERR(uobj))
		return PTR_ERR(uobj);

	pd = ib_dev->ops.alloc_pd(ib_dev, uobj->context, &attrs->driver_udata);
	if (IS_ERR(pd)) {
		ret = PTR_ERR(pd);
	pd = rdma_zalloc_drv_obj(ib_dev, ib_pd);
	if (!pd) {
		ret = -ENOMEM;
		goto err;
	}

@@ -417,11 +417,15 @@ static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs)
	pd->uobject = uobj;
	pd->__internal_mr = NULL;
	atomic_set(&pd->usecnt, 0);
	pd->res.type = RDMA_RESTRACK_PD;

	ret = ib_dev->ops.alloc_pd(pd, uobj->context, &attrs->driver_udata);
	if (ret)
		goto err_alloc;

	uobj->object = pd;
	memset(&resp, 0, sizeof resp);
	resp.pd_handle = uobj->id;
	pd->res.type = RDMA_RESTRACK_PD;
	rdma_restrack_uadd(&pd->res);

	ret = uverbs_response(attrs, &resp, sizeof(resp));
@@ -432,7 +436,8 @@ static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs)

err_copy:
	ib_dealloc_pd(pd);

err_alloc:
	kfree(pd);
err:
	uobj_alloc_abort(uobj);
	return ret;
+1 −1
Original line number Diff line number Diff line
@@ -188,7 +188,7 @@ static int uverbs_free_pd(struct ib_uobject *uobject,
	if (ret)
		return ret;

	ib_dealloc_pd((struct ib_pd *)uobject->object);
	ib_dealloc_pd(pd);
	return 0;
}

+16 −11
Original line number Diff line number Diff line
@@ -254,10 +254,11 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
{
	struct ib_pd *pd;
	int mr_access_flags = 0;
	int ret;

	pd = device->ops.alloc_pd(device, NULL, NULL);
	if (IS_ERR(pd))
		return pd;
	pd = rdma_zalloc_drv_obj(device, ib_pd);
	if (!pd)
		return ERR_PTR(-ENOMEM);

	pd->device = device;
	pd->uobject = NULL;
@@ -265,6 +266,16 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
	atomic_set(&pd->usecnt, 0);
	pd->flags = flags;

	pd->res.type = RDMA_RESTRACK_PD;
	rdma_restrack_set_task(&pd->res, caller);

	ret = device->ops.alloc_pd(pd, NULL, NULL);
	if (ret) {
		kfree(pd);
		return ERR_PTR(ret);
	}
	rdma_restrack_kadd(&pd->res);

	if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
		pd->local_dma_lkey = device->local_dma_lkey;
	else
@@ -275,10 +286,6 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
		mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE;
	}

	pd->res.type = RDMA_RESTRACK_PD;
	rdma_restrack_set_task(&pd->res, caller);
	rdma_restrack_kadd(&pd->res);

	if (mr_access_flags) {
		struct ib_mr *mr;

@@ -329,10 +336,8 @@ void ib_dealloc_pd(struct ib_pd *pd)
	WARN_ON(atomic_read(&pd->usecnt));

	rdma_restrack_del(&pd->res);
	/* Making delalloc_pd a void return is a WIP, no driver should return
	   an error here. */
	ret = pd->device->ops.dealloc_pd(pd);
	WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd");
	pd->device->ops.dealloc_pd(pd);
	kfree(pd);
}
EXPORT_SYMBOL(ib_dealloc_pd);

+12 −25
Original line number Diff line number Diff line
@@ -563,41 +563,29 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
}

/* Protection Domains */
int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
void bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
{
	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
	struct bnxt_re_dev *rdev = pd->rdev;
	int rc;

	bnxt_re_destroy_fence_mr(pd);

	if (pd->qplib_pd.id) {
		rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
					   &rdev->qplib_res.pd_tbl,
	if (pd->qplib_pd.id)
		bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
				      &pd->qplib_pd);
		if (rc)
			dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD");
	}

	kfree(pd);
	return 0;
}

struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
			       struct ib_ucontext *ucontext,
int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *ucontext,
		     struct ib_udata *udata)
{
	struct ib_device *ibdev = ibpd->device;
	struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
	struct bnxt_re_ucontext *ucntx = container_of(ucontext,
						      struct bnxt_re_ucontext,
						      ib_uctx);
	struct bnxt_re_pd *pd;
	struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd);
	int rc;

	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
	if (!pd)
		return ERR_PTR(-ENOMEM);

	pd->rdev = rdev;
	if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
		dev_err(rdev_to_dev(rdev), "Failed to allocate HW PD");
@@ -637,13 +625,12 @@ struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
		if (bnxt_re_create_fence_mr(pd))
			dev_warn(rdev_to_dev(rdev),
				 "Failed to create Fence-MR\n");
	return &pd->ib_pd;
	return 0;
dbfail:
	(void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
	bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
			      &pd->qplib_pd);
fail:
	kfree(pd);
	return ERR_PTR(rc);
	return rc;
}

/* Address Handles */
Loading