Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dd6fd213 authored by Chuck Lever's avatar Chuck Lever Committed by J. Bruce Fields
Browse files

svcrdma: Remove DMA map accounting



Clean up: sc_dma_used is not required for correct operation. It is
simply a debugging tool to report when svcrdma has leaked DMA maps.

However, manipulating an atomic has a measurable CPU cost, and DMA
map accounting specific to svcrdma will be meaningless once svcrdma
is converted to use the new generic r/w API.

A similar kind of debug accounting can be done simply by enabling
the IOMMU or by using CONFIG_DMA_API_DEBUG, CONFIG_IOMMU_DEBUG, and
CONFIG_IOMMU_LEAK.

Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Signed-off-by: default avatarJ. Bruce Fields <bfields@redhat.com>
parent e4eb42ce
Loading
Loading
Loading
Loading
+0 −2
Original line number Diff line number Diff line
@@ -148,7 +148,6 @@ struct svcxprt_rdma {

	struct ib_pd         *sc_pd;

	atomic_t	     sc_dma_used;
	spinlock_t	     sc_ctxt_lock;
	struct list_head     sc_ctxts;
	int		     sc_ctxt_used;
@@ -200,7 +199,6 @@ static inline void svc_rdma_count_mappings(struct svcxprt_rdma *rdma,
					   struct svc_rdma_op_ctxt *ctxt)
{
	ctxt->mapped_sges++;
	atomic_inc(&rdma->sc_dma_used);
}

/* svc_rdma_backchannel.c */
+0 −1
Original line number Diff line number Diff line
@@ -279,7 +279,6 @@ int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt,
		       frmr->sg);
		return -ENOMEM;
	}
	atomic_inc(&xprt->sc_dma_used);

	n = ib_map_mr_sg(frmr->mr, frmr->sg, frmr->sg_nents, NULL, PAGE_SIZE);
	if (unlikely(n != frmr->sg_nents)) {
+3 −10
Original line number Diff line number Diff line
@@ -224,25 +224,22 @@ void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt)
	struct svcxprt_rdma *xprt = ctxt->xprt;
	struct ib_device *device = xprt->sc_cm_id->device;
	u32 lkey = xprt->sc_pd->local_dma_lkey;
	unsigned int i, count;
	unsigned int i;

	for (count = 0, i = 0; i < ctxt->mapped_sges; i++) {
	for (i = 0; i < ctxt->mapped_sges; i++) {
		/*
		 * Unmap the DMA addr in the SGE if the lkey matches
		 * the local_dma_lkey, otherwise, ignore it since it is
		 * an FRMR lkey and will be unmapped later when the
		 * last WR that uses it completes.
		 */
		if (ctxt->sge[i].lkey == lkey) {
			count++;
		if (ctxt->sge[i].lkey == lkey)
			ib_dma_unmap_page(device,
					    ctxt->sge[i].addr,
					    ctxt->sge[i].length,
					    ctxt->direction);
	}
	}
	ctxt->mapped_sges = 0;
	atomic_sub(count, &xprt->sc_dma_used);
}

void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
@@ -944,7 +941,6 @@ void svc_rdma_put_frmr(struct svcxprt_rdma *rdma,
	if (frmr) {
		ib_dma_unmap_sg(rdma->sc_cm_id->device,
				frmr->sg, frmr->sg_nents, frmr->direction);
		atomic_dec(&rdma->sc_dma_used);
		spin_lock_bh(&rdma->sc_frmr_q_lock);
		WARN_ON_ONCE(!list_empty(&frmr->frmr_list));
		list_add(&frmr->frmr_list, &rdma->sc_frmr_q);
@@ -1256,9 +1252,6 @@ static void __svc_rdma_free(struct work_struct *work)
	if (rdma->sc_ctxt_used != 0)
		pr_err("svcrdma: ctxt still in use? (%d)\n",
		       rdma->sc_ctxt_used);
	if (atomic_read(&rdma->sc_dma_used) != 0)
		pr_err("svcrdma: dma still in use? (%d)\n",
		       atomic_read(&rdma->sc_dma_used));

	/* Final put of backchannel client transport */
	if (xprt->xpt_bc_xprt) {