Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit add08d76 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Doug Ledford
Browse files

IB/mlx5: Convert UMR CQ to new CQ API



Simplifies the code, and makes it more fair vs other users by using a
softirq for polling.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarHaggai Eran <haggaie@mellanox.com>
Reviewed-by: default avatarSagi Grimberg <sagig@mellanox.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent d2370e0a
Loading
Loading
Loading
Loading
+3 −7
Original line number Original line Diff line number Diff line
@@ -1861,7 +1861,7 @@ static void destroy_umrc_res(struct mlx5_ib_dev *dev)
		mlx5_ib_warn(dev, "mr cache cleanup failed\n");
		mlx5_ib_warn(dev, "mr cache cleanup failed\n");


	mlx5_ib_destroy_qp(dev->umrc.qp);
	mlx5_ib_destroy_qp(dev->umrc.qp);
	ib_destroy_cq(dev->umrc.cq);
	ib_free_cq(dev->umrc.cq);
	ib_dealloc_pd(dev->umrc.pd);
	ib_dealloc_pd(dev->umrc.pd);
}
}


@@ -1876,7 +1876,6 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
	struct ib_pd *pd;
	struct ib_pd *pd;
	struct ib_cq *cq;
	struct ib_cq *cq;
	struct ib_qp *qp;
	struct ib_qp *qp;
	struct ib_cq_init_attr cq_attr = {};
	int ret;
	int ret;


	attr = kzalloc(sizeof(*attr), GFP_KERNEL);
	attr = kzalloc(sizeof(*attr), GFP_KERNEL);
@@ -1893,15 +1892,12 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
		goto error_0;
		goto error_0;
	}
	}


	cq_attr.cqe = 128;
	cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ);
	cq = ib_create_cq(&dev->ib_dev, mlx5_umr_cq_handler, NULL, NULL,
			  &cq_attr);
	if (IS_ERR(cq)) {
	if (IS_ERR(cq)) {
		mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n");
		mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n");
		ret = PTR_ERR(cq);
		ret = PTR_ERR(cq);
		goto error_2;
		goto error_2;
	}
	}
	ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);


	init_attr->send_cq = cq;
	init_attr->send_cq = cq;
	init_attr->recv_cq = cq;
	init_attr->recv_cq = cq;
@@ -1968,7 +1964,7 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
	mlx5_ib_destroy_qp(qp);
	mlx5_ib_destroy_qp(qp);


error_3:
error_3:
	ib_destroy_cq(cq);
	ib_free_cq(cq);


error_2:
error_2:
	ib_dealloc_pd(pd);
	ib_dealloc_pd(pd);
+1 −7
Original line number Original line Diff line number Diff line
@@ -468,16 +468,11 @@ struct mlx5_ib_mw {
};
};


struct mlx5_ib_umr_context {
struct mlx5_ib_umr_context {
	struct ib_cqe		cqe;
	enum ib_wc_status	status;
	enum ib_wc_status	status;
	struct completion	done;
	struct completion	done;
};
};


static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
{
	context->status = -1;
	init_completion(&context->done);
}

struct umr_common {
struct umr_common {
	struct ib_pd	*pd;
	struct ib_pd	*pd;
	struct ib_cq	*cq;
	struct ib_cq	*cq;
@@ -762,7 +757,6 @@ int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq);
int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift);
int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift);
void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context);
int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
			    struct ib_mr_status *mr_status);
			    struct ib_mr_status *mr_status);


+23 −26
Original line number Original line Diff line number Diff line
@@ -836,26 +836,20 @@ static struct ib_umem *mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
	return umem;
	return umem;
}
}


void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context)
static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc)
{
{
	struct mlx5_ib_umr_context *context;
	struct mlx5_ib_umr_context *context =
	struct ib_wc wc;
		container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
	int err;

	while (1) {
		err = ib_poll_cq(cq, 1, &wc);
		if (err < 0) {
			pr_warn("poll cq error %d\n", err);
			return;
		}
		if (err == 0)
			break;


		context = (struct mlx5_ib_umr_context *) (unsigned long) wc.wr_id;
	context->status = wc->status;
		context->status = wc.status;
	complete(&context->done);
	complete(&context->done);
}
}
	ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);

static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
{
	context->cqe.done = mlx5_ib_umr_done;
	context->status = -1;
	init_completion(&context->done);
}
}


static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
@@ -896,12 +890,13 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
	if (err)
	if (err)
		goto free_mr;
		goto free_mr;


	mlx5_ib_init_umr_context(&umr_context);

	memset(&umrwr, 0, sizeof(umrwr));
	memset(&umrwr, 0, sizeof(umrwr));
	umrwr.wr.wr_id = (u64)(unsigned long)&umr_context;
	umrwr.wr.wr_cqe = &umr_context.cqe;
	prep_umr_reg_wqe(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key,
	prep_umr_reg_wqe(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key,
			 page_shift, virt_addr, len, access_flags);
			 page_shift, virt_addr, len, access_flags);


	mlx5_ib_init_umr_context(&umr_context);
	down(&umrc->sem);
	down(&umrc->sem);
	err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
	err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
	if (err) {
	if (err) {
@@ -1013,8 +1008,10 @@ int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,


		dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
		dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);


		mlx5_ib_init_umr_context(&umr_context);

		memset(&wr, 0, sizeof(wr));
		memset(&wr, 0, sizeof(wr));
		wr.wr.wr_id = (u64)(unsigned long)&umr_context;
		wr.wr.wr_cqe = &umr_context.cqe;


		sg.addr = dma;
		sg.addr = dma;
		sg.length = ALIGN(npages * sizeof(u64),
		sg.length = ALIGN(npages * sizeof(u64),
@@ -1031,7 +1028,6 @@ int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
		wr.mkey = mr->mmkey.key;
		wr.mkey = mr->mmkey.key;
		wr.target.offset = start_page_index;
		wr.target.offset = start_page_index;


		mlx5_ib_init_umr_context(&umr_context);
		down(&umrc->sem);
		down(&umrc->sem);
		err = ib_post_send(umrc->qp, &wr.wr, &bad);
		err = ib_post_send(umrc->qp, &wr.wr, &bad);
		if (err) {
		if (err) {
@@ -1204,11 +1200,12 @@ static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
	struct ib_send_wr *bad;
	struct ib_send_wr *bad;
	int err;
	int err;


	mlx5_ib_init_umr_context(&umr_context);

	memset(&umrwr.wr, 0, sizeof(umrwr));
	memset(&umrwr.wr, 0, sizeof(umrwr));
	umrwr.wr.wr_id = (u64)(unsigned long)&umr_context;
	umrwr.wr.wr_cqe = &umr_context.cqe;
	prep_umr_unreg_wqe(dev, &umrwr.wr, mr->mmkey.key);
	prep_umr_unreg_wqe(dev, &umrwr.wr, mr->mmkey.key);


	mlx5_ib_init_umr_context(&umr_context);
	down(&umrc->sem);
	down(&umrc->sem);
	err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
	err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
	if (err) {
	if (err) {
@@ -1246,7 +1243,9 @@ static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, u64 virt_addr,
	int size;
	int size;
	int err;
	int err;


	umrwr.wr.wr_id = (u64)(unsigned long)&umr_context;
	mlx5_ib_init_umr_context(&umr_context);

	umrwr.wr.wr_cqe = &umr_context.cqe;
	umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE;
	umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE;


	if (flags & IB_MR_REREG_TRANS) {
	if (flags & IB_MR_REREG_TRANS) {
@@ -1273,8 +1272,6 @@ static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, u64 virt_addr,
		umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_ACCESS;
		umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_ACCESS;
	}
	}


	mlx5_ib_init_umr_context(&umr_context);

	/* post send request to UMR QP */
	/* post send request to UMR QP */
	down(&umrc->sem);
	down(&umrc->sem);
	err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
	err = ib_post_send(umrc->qp, &umrwr.wr, &bad);