Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4a84386f authored by Tom Tucker's avatar Tom Tucker Committed by J. Bruce Fields
Browse files

svcrdma: Cleanup DMA unmapping in error paths.



There are several error paths in the code that do not unmap DMA. This
patch adds calls to svc_rdma_unmap_dma to free these DMA contexts.

Signed-off-by: default avatarTom Tucker <tom@opengridcomputing.com>
Signed-off-by: default avatarJ. Bruce Fields <bfields@redhat.com>
parent b432e6b3
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -495,6 +495,7 @@ next_sge:
			printk(KERN_ERR "svcrdma: Error %d posting RDMA_READ\n",
			printk(KERN_ERR "svcrdma: Error %d posting RDMA_READ\n",
			       err);
			       err);
			set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
			set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
			svc_rdma_unmap_dma(ctxt);
			svc_rdma_put_context(ctxt, 0);
			svc_rdma_put_context(ctxt, 0);
			goto out;
			goto out;
		}
		}
+2 −0
Original line number Original line Diff line number Diff line
@@ -367,6 +367,8 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
		goto err;
		goto err;
	return 0;
	return 0;
 err:
 err:
	svc_rdma_unmap_dma(ctxt);
	svc_rdma_put_frmr(xprt, vec->frmr);
	svc_rdma_put_context(ctxt, 0);
	svc_rdma_put_context(ctxt, 0);
	/* Fatal error, close transport */
	/* Fatal error, close transport */
	return -EIO;
	return -EIO;
+14 −15
Original line number Original line Diff line number Diff line
@@ -512,9 +512,9 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
		ctxt->sge[sge_no].addr = pa;
		ctxt->sge[sge_no].addr = pa;
		ctxt->sge[sge_no].length = PAGE_SIZE;
		ctxt->sge[sge_no].length = PAGE_SIZE;
		ctxt->sge[sge_no].lkey = xprt->sc_dma_lkey;
		ctxt->sge[sge_no].lkey = xprt->sc_dma_lkey;
		ctxt->count = sge_no + 1;
		buflen += PAGE_SIZE;
		buflen += PAGE_SIZE;
	}
	}
	ctxt->count = sge_no;
	recv_wr.next = NULL;
	recv_wr.next = NULL;
	recv_wr.sg_list = &ctxt->sge[0];
	recv_wr.sg_list = &ctxt->sge[0];
	recv_wr.num_sge = ctxt->count;
	recv_wr.num_sge = ctxt->count;
@@ -530,6 +530,7 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
	return ret;
	return ret;


 err_put_ctxt:
 err_put_ctxt:
	svc_rdma_unmap_dma(ctxt);
	svc_rdma_put_context(ctxt, 1);
	svc_rdma_put_context(ctxt, 1);
	return -ENOMEM;
	return -ENOMEM;
}
}
@@ -1308,7 +1309,6 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
			 enum rpcrdma_errcode err)
			 enum rpcrdma_errcode err)
{
{
	struct ib_send_wr err_wr;
	struct ib_send_wr err_wr;
	struct ib_sge sge;
	struct page *p;
	struct page *p;
	struct svc_rdma_op_ctxt *ctxt;
	struct svc_rdma_op_ctxt *ctxt;
	u32 *va;
	u32 *va;
@@ -1321,26 +1321,27 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
	/* XDR encode error */
	/* XDR encode error */
	length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
	length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);


	ctxt = svc_rdma_get_context(xprt);
	ctxt->direction = DMA_FROM_DEVICE;
	ctxt->count = 1;
	ctxt->pages[0] = p;

	/* Prepare SGE for local address */
	/* Prepare SGE for local address */
	sge.addr = ib_dma_map_page(xprt->sc_cm_id->device,
	ctxt->sge[0].addr = ib_dma_map_page(xprt->sc_cm_id->device,
				   p, 0, PAGE_SIZE, DMA_FROM_DEVICE);
					    p, 0, length, DMA_FROM_DEVICE);
	if (ib_dma_mapping_error(xprt->sc_cm_id->device, sge.addr)) {
	if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) {
		put_page(p);
		put_page(p);
		return;
		return;
	}
	}
	atomic_inc(&xprt->sc_dma_used);
	atomic_inc(&xprt->sc_dma_used);
	sge.lkey = xprt->sc_dma_lkey;
	ctxt->sge[0].lkey = xprt->sc_dma_lkey;
	sge.length = length;
	ctxt->sge[0].length = length;

	ctxt = svc_rdma_get_context(xprt);
	ctxt->count = 1;
	ctxt->pages[0] = p;


	/* Prepare SEND WR */
	/* Prepare SEND WR */
	memset(&err_wr, 0, sizeof err_wr);
	memset(&err_wr, 0, sizeof err_wr);
	ctxt->wr_op = IB_WR_SEND;
	ctxt->wr_op = IB_WR_SEND;
	err_wr.wr_id = (unsigned long)ctxt;
	err_wr.wr_id = (unsigned long)ctxt;
	err_wr.sg_list = &sge;
	err_wr.sg_list = ctxt->sge;
	err_wr.num_sge = 1;
	err_wr.num_sge = 1;
	err_wr.opcode = IB_WR_SEND;
	err_wr.opcode = IB_WR_SEND;
	err_wr.send_flags = IB_SEND_SIGNALED;
	err_wr.send_flags = IB_SEND_SIGNALED;
@@ -1350,9 +1351,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
	if (ret) {
	if (ret) {
		dprintk("svcrdma: Error %d posting send for protocol error\n",
		dprintk("svcrdma: Error %d posting send for protocol error\n",
			ret);
			ret);
		ib_dma_unmap_page(xprt->sc_cm_id->device,
		svc_rdma_unmap_dma(ctxt);
				  sge.addr, PAGE_SIZE,
				  DMA_FROM_DEVICE);
		svc_rdma_put_context(ctxt, 1);
		svc_rdma_put_context(ctxt, 1);
	}
	}
}
}