Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8031612d authored by Michal Schmidt's avatar Michal Schmidt Committed by David S. Miller
Browse files

bnx2x: fix DMA API usage



With CONFIG_DMA_API_DEBUG=y bnx2x triggers the error "DMA-API: device
driver frees DMA memory with wrong function".
On archs where PAGE_SIZE > SGE_PAGE_SIZE it also triggers "DMA-API:
device driver frees DMA memory with different size".

Fix this by making the mapping and unmapping symmetric:
 - Do not map the whole pool page at once. Instead map the
   SGE_PAGE_SIZE-sized pieces individually, so they can be unmapped in
   the same manner.
 - What's mapped using dma_map_page() must be unmapped using
   dma_unmap_page().

Tested on ppc64.

Fixes: 4cace675 ("bnx2x: Alloc 4k fragment for each rx ring buffer element")
Signed-off-by: default avatarMichal Schmidt <mschmidt@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 0f8b6cea
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -530,7 +530,6 @@ enum bnx2x_tpa_mode_t {

struct bnx2x_alloc_pool {
	struct page	*page;
	dma_addr_t	dma;
	unsigned int	offset;
};

+10 −13
Original line number Diff line number Diff line
@@ -563,23 +563,20 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
			return -ENOMEM;
		}

		pool->dma = dma_map_page(&bp->pdev->dev, pool->page, 0,
					 PAGE_SIZE, DMA_FROM_DEVICE);
		if (unlikely(dma_mapping_error(&bp->pdev->dev,
					       pool->dma))) {
			__free_pages(pool->page, PAGES_PER_SGE_SHIFT);
			pool->page = NULL;
		pool->offset = 0;
	}

	mapping = dma_map_page(&bp->pdev->dev, pool->page,
			       pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
		BNX2X_ERR("Can't map sge\n");
		return -ENOMEM;
	}
		pool->offset = 0;
	}

	get_page(pool->page);
	sw_buf->page = pool->page;
	sw_buf->offset = pool->offset;

	mapping = pool->dma + sw_buf->offset;
	dma_unmap_addr_set(sw_buf, mapping, mapping);

	sge->addr_hi = cpu_to_le32(U64_HI(mapping));
@@ -648,7 +645,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
			return err;
		}

		dma_unmap_single(&bp->pdev->dev,
		dma_unmap_page(&bp->pdev->dev,
			       dma_unmap_addr(&old_rx_pg, mapping),
			       SGE_PAGE_SIZE, DMA_FROM_DEVICE);
		/* Add one frag and update the appropriate fields in the skb */
+2 −10
Original line number Diff line number Diff line
@@ -807,7 +807,7 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
	/* Since many fragments can share the same page, make sure to
	 * only unmap and free the page once.
	 */
	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
	dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
		       SGE_PAGE_SIZE, DMA_FROM_DEVICE);

	put_page(page);
@@ -974,14 +974,6 @@ static inline void bnx2x_free_rx_mem_pool(struct bnx2x *bp,
	if (!pool->page)
		return;

	/* Page was not fully fragmented.  Unmap unused space */
	if (pool->offset < PAGE_SIZE) {
		dma_addr_t dma = pool->dma + pool->offset;
		int size = PAGE_SIZE - pool->offset;

		dma_unmap_single(&bp->pdev->dev, dma, size, DMA_FROM_DEVICE);
	}

	put_page(pool->page);

	pool->page = NULL;