Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 87295b6c authored by Tom Tucker's avatar Tom Tucker
Browse files

svcrdma: Add dma map count and WARN_ON



Add a dma map count in order to verify that all DMA mapping resources
have been freed when the transport is closed.

Signed-off-by: default avatarTom Tucker <tom@opengridcomputing.com>
parent e6ab9143
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -130,6 +130,7 @@ struct svcxprt_rdma {


	struct ib_pd         *sc_pd;
	struct ib_pd         *sc_pd;


	atomic_t	     sc_dma_used;
	atomic_t	     sc_ctxt_used;
	atomic_t	     sc_ctxt_used;
	struct list_head     sc_ctxt_free;
	struct list_head     sc_ctxt_free;
	int		     sc_ctxt_cnt;
	int		     sc_ctxt_cnt;
+1 −0
Original line number Original line Diff line number Diff line
@@ -222,6 +222,7 @@ static void rdma_set_ctxt_sge(struct svcxprt_rdma *xprt,
	ctxt->count = count;
	ctxt->count = count;
	ctxt->direction = DMA_FROM_DEVICE;
	ctxt->direction = DMA_FROM_DEVICE;
	for (i = 0; i < count; i++) {
	for (i = 0; i < count; i++) {
		atomic_inc(&xprt->sc_dma_used);
		ctxt->sge[i].addr =
		ctxt->sge[i].addr =
			ib_dma_map_single(xprt->sc_cm_id->device,
			ib_dma_map_single(xprt->sc_cm_id->device,
					  vec[i].iov_base, vec[i].iov_len,
					  vec[i].iov_base, vec[i].iov_len,
+3 −0
Original line number Original line Diff line number Diff line
@@ -163,6 +163,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
		sge_bytes = min((size_t)bc,
		sge_bytes = min((size_t)bc,
				(size_t)(vec->sge[xdr_sge_no].iov_len-sge_off));
				(size_t)(vec->sge[xdr_sge_no].iov_len-sge_off));
		sge[sge_no].length = sge_bytes;
		sge[sge_no].length = sge_bytes;
		atomic_inc(&xprt->sc_dma_used);
		sge[sge_no].addr =
		sge[sge_no].addr =
			ib_dma_map_single(xprt->sc_cm_id->device,
			ib_dma_map_single(xprt->sc_cm_id->device,
					  (void *)
					  (void *)
@@ -385,6 +386,7 @@ static int send_reply(struct svcxprt_rdma *rdma,
	ctxt->count = 1;
	ctxt->count = 1;


	/* Prepare the SGE for the RPCRDMA Header */
	/* Prepare the SGE for the RPCRDMA Header */
	atomic_inc(&rdma->sc_dma_used);
	ctxt->sge[0].addr =
	ctxt->sge[0].addr =
		ib_dma_map_page(rdma->sc_cm_id->device,
		ib_dma_map_page(rdma->sc_cm_id->device,
				page, 0, PAGE_SIZE, DMA_TO_DEVICE);
				page, 0, PAGE_SIZE, DMA_TO_DEVICE);
@@ -396,6 +398,7 @@ static int send_reply(struct svcxprt_rdma *rdma,
	for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
	for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
		sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
		sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
		byte_count -= sge_bytes;
		byte_count -= sge_bytes;
		atomic_inc(&rdma->sc_dma_used);
		ctxt->sge[sge_no].addr =
		ctxt->sge[sge_no].addr =
			ib_dma_map_single(rdma->sc_cm_id->device,
			ib_dma_map_single(rdma->sc_cm_id->device,
					  vec->sge[sge_no].iov_base,
					  vec->sge[sge_no].iov_base,
+5 −0
Original line number Original line Diff line number Diff line
@@ -155,6 +155,7 @@ static void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt)
	struct svcxprt_rdma *xprt = ctxt->xprt;
	struct svcxprt_rdma *xprt = ctxt->xprt;
	int i;
	int i;
	for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) {
	for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) {
		atomic_dec(&xprt->sc_dma_used);
		ib_dma_unmap_single(xprt->sc_cm_id->device,
		ib_dma_unmap_single(xprt->sc_cm_id->device,
				    ctxt->sge[i].addr,
				    ctxt->sge[i].addr,
				    ctxt->sge[i].length,
				    ctxt->sge[i].length,
@@ -519,6 +520,7 @@ static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
	cma_xprt->sc_max_requests = svcrdma_max_requests;
	cma_xprt->sc_max_requests = svcrdma_max_requests;
	cma_xprt->sc_sq_depth = svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT;
	cma_xprt->sc_sq_depth = svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT;
	atomic_set(&cma_xprt->sc_sq_count, 0);
	atomic_set(&cma_xprt->sc_sq_count, 0);
	atomic_set(&cma_xprt->sc_ctxt_used, 0);


	if (!listener) {
	if (!listener) {
		int reqs = cma_xprt->sc_max_requests;
		int reqs = cma_xprt->sc_max_requests;
@@ -569,6 +571,7 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
		BUG_ON(sge_no >= xprt->sc_max_sge);
		BUG_ON(sge_no >= xprt->sc_max_sge);
		page = svc_rdma_get_page();
		page = svc_rdma_get_page();
		ctxt->pages[sge_no] = page;
		ctxt->pages[sge_no] = page;
		atomic_inc(&xprt->sc_dma_used);
		pa = ib_dma_map_page(xprt->sc_cm_id->device,
		pa = ib_dma_map_page(xprt->sc_cm_id->device,
				     page, 0, PAGE_SIZE,
				     page, 0, PAGE_SIZE,
				     DMA_FROM_DEVICE);
				     DMA_FROM_DEVICE);
@@ -1049,6 +1052,7 @@ static void __svc_rdma_free(struct work_struct *work)


	/* Warn if we leaked a resource or under-referenced */
	/* Warn if we leaked a resource or under-referenced */
	WARN_ON(atomic_read(&rdma->sc_ctxt_used) != 0);
	WARN_ON(atomic_read(&rdma->sc_ctxt_used) != 0);
	WARN_ON(atomic_read(&rdma->sc_dma_used) != 0);


	/* Destroy the QP if present (not a listener) */
	/* Destroy the QP if present (not a listener) */
	if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
	if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
@@ -1169,6 +1173,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
	length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
	length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);


	/* Prepare SGE for local address */
	/* Prepare SGE for local address */
	atomic_inc(&xprt->sc_dma_used);
	sge.addr = ib_dma_map_page(xprt->sc_cm_id->device,
	sge.addr = ib_dma_map_page(xprt->sc_cm_id->device,
				   p, 0, PAGE_SIZE, DMA_FROM_DEVICE);
				   p, 0, PAGE_SIZE, DMA_FROM_DEVICE);
	sge.lkey = xprt->sc_phys_mr->lkey;
	sge.lkey = xprt->sc_phys_mr->lkey;