Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f016f305 authored by Chuck Lever's avatar Chuck Lever Committed by J. Bruce Fields
Browse files

svcrdma: Refactor svc_rdma_dma_map_buf



Clean up: svc_rdma_dma_map_buf does mostly the same thing as
svc_rdma_dma_map_page, so let's fold these together.

Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Signed-off-by: default avatarJ. Bruce Fields <bfields@redhat.com>
parent eb5d7a62
Loading
Loading
Loading
Loading
+0 −7
Original line number Diff line number Diff line
@@ -158,13 +158,6 @@ struct svc_rdma_recv_ctxt {
	struct page		*rc_pages[RPCSVC_MAXPAGES];
};

/* Track DMA maps for this transport and context */
static inline void svc_rdma_count_mappings(struct svcxprt_rdma *rdma,
					   struct svc_rdma_op_ctxt *ctxt)
{
	ctxt->mapped_sges++;
}

/* svc_rdma_backchannel.c */
extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt,
				    __be32 *rdma_resp,
+17 −33
Original line number Diff line number Diff line
@@ -302,59 +302,42 @@ static u32 svc_rdma_get_inv_rkey(__be32 *rdma_argp,
	return be32_to_cpup(p);
}

/* ib_dma_map_page() is used here because svc_rdma_dma_unmap()
 * is used during completion to DMA-unmap this memory, and
 * it uses ib_dma_unmap_page() exclusively.
 */
static int svc_rdma_dma_map_buf(struct svcxprt_rdma *rdma,
static int svc_rdma_dma_map_page(struct svcxprt_rdma *rdma,
				 struct svc_rdma_op_ctxt *ctxt,
				 unsigned int sge_no,
				unsigned char *base,
				 struct page *page,
				 unsigned long offset,
				 unsigned int len)
{
	unsigned long offset = (unsigned long)base & ~PAGE_MASK;
	struct ib_device *dev = rdma->sc_cm_id->device;
	dma_addr_t dma_addr;

	dma_addr = ib_dma_map_page(dev, virt_to_page(base),
				   offset, len, DMA_TO_DEVICE);
	dma_addr = ib_dma_map_page(dev, page, offset, len, DMA_TO_DEVICE);
	if (ib_dma_mapping_error(dev, dma_addr))
		goto out_maperr;

	ctxt->sge[sge_no].addr = dma_addr;
	ctxt->sge[sge_no].length = len;
	ctxt->sge[sge_no].lkey = rdma->sc_pd->local_dma_lkey;
	svc_rdma_count_mappings(rdma, ctxt);
	ctxt->mapped_sges++;
	return 0;

out_maperr:
	pr_err("svcrdma: failed to map buffer\n");
	trace_svcrdma_dma_map_page(rdma, page);
	return -EIO;
}

static int svc_rdma_dma_map_page(struct svcxprt_rdma *rdma,
/* ib_dma_map_page() is used here because svc_rdma_dma_unmap()
 * handles DMA-unmap and it uses ib_dma_unmap_page() exclusively.
 */
static int svc_rdma_dma_map_buf(struct svcxprt_rdma *rdma,
				struct svc_rdma_op_ctxt *ctxt,
				unsigned int sge_no,
				 struct page *page,
				 unsigned int offset,
				unsigned char *base,
				unsigned int len)
{
	struct ib_device *dev = rdma->sc_cm_id->device;
	dma_addr_t dma_addr;

	dma_addr = ib_dma_map_page(dev, page, offset, len, DMA_TO_DEVICE);
	if (ib_dma_mapping_error(dev, dma_addr))
		goto out_maperr;

	ctxt->sge[sge_no].addr = dma_addr;
	ctxt->sge[sge_no].length = len;
	ctxt->sge[sge_no].lkey = rdma->sc_pd->local_dma_lkey;
	svc_rdma_count_mappings(rdma, ctxt);
	return 0;

out_maperr:
	trace_svcrdma_dma_map_page(rdma, page);
	return -EIO;
	return svc_rdma_dma_map_page(rdma, ctxt, sge_no, virt_to_page(base),
				     offset_in_page(base), len);
}

/**
@@ -389,7 +372,8 @@ static int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
				  struct svc_rdma_op_ctxt *ctxt,
				  struct xdr_buf *xdr, __be32 *wr_lst)
{
	unsigned int len, sge_no, remaining, page_off;
	unsigned int len, sge_no, remaining;
	unsigned long page_off;
	struct page **ppages;
	unsigned char *base;
	u32 xdr_pad;