Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e50287be authored by Sathya Perla's avatar Sathya Perla Committed by David S. Miller
Browse files

be2net: dma_sync each RX frag before passing it to the stack



The driver currently maps a page for DMA, divides the page into multiple
frags and posts them to the HW. It un-maps the page after data is received
on all the frags of the page. This scheme doesn't work when bounce buffers
are used for DMA (swiotlb=force kernel param).

This patch fixes this problem by calling dma_sync_single_for_cpu() for each
frag (excepting the last one) so that the data is copied from the bounce
buffers. The page is un-mapped only when DMA finishes on the last frag of
the page.
(Thanks Ben H. for suggesting the dma_sync API!)

This patch also renames the "last_page_user" field of be_rx_page_info{}
struct to "last_frag" to improve readability of the fixed code.

Reported-by: default avatarLi Fengmao <li.fengmao@zte.com.cn>
Signed-off-by: default avatarSathya Perla <sathya.perla@emulex.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 9e82e7f4
Loading
Loading
Loading
Loading
+2 −1
Original line number Original line Diff line number Diff line
@@ -261,9 +261,10 @@ struct be_tx_obj {
/* Struct to remember the pages posted for rx frags */
/* Struct to remember the pages posted for rx frags */
struct be_rx_page_info {
struct be_rx_page_info {
	struct page *page;
	struct page *page;
	/* set to page-addr for last frag of the page & frag-addr otherwise */
	DEFINE_DMA_UNMAP_ADDR(bus);
	DEFINE_DMA_UNMAP_ADDR(bus);
	u16 page_offset;
	u16 page_offset;
	bool last_page_user;
	bool last_frag;		/* last frag of the page */
};
};


struct be_rx_stats {
struct be_rx_stats {
+22 −10
Original line number Original line Diff line number Diff line
@@ -1448,11 +1448,15 @@ static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
	rx_page_info = &rxo->page_info_tbl[frag_idx];
	rx_page_info = &rxo->page_info_tbl[frag_idx];
	BUG_ON(!rx_page_info->page);
	BUG_ON(!rx_page_info->page);


	if (rx_page_info->last_page_user) {
	if (rx_page_info->last_frag) {
		dma_unmap_page(&adapter->pdev->dev,
		dma_unmap_page(&adapter->pdev->dev,
			       dma_unmap_addr(rx_page_info, bus),
			       dma_unmap_addr(rx_page_info, bus),
			       adapter->big_page_size, DMA_FROM_DEVICE);
			       adapter->big_page_size, DMA_FROM_DEVICE);
		rx_page_info->last_page_user = false;
		rx_page_info->last_frag = false;
	} else {
		dma_sync_single_for_cpu(&adapter->pdev->dev,
					dma_unmap_addr(rx_page_info, bus),
					rx_frag_size, DMA_FROM_DEVICE);
	}
	}


	queue_tail_inc(rxq);
	queue_tail_inc(rxq);
@@ -1786,17 +1790,16 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
				rx_stats(rxo)->rx_post_fail++;
				rx_stats(rxo)->rx_post_fail++;
				break;
				break;
			}
			}
			page_info->page_offset = 0;
			page_offset = 0;
		} else {
		} else {
			get_page(pagep);
			get_page(pagep);
			page_info->page_offset = page_offset + rx_frag_size;
			page_offset += rx_frag_size;
		}
		}
		page_offset = page_info->page_offset;
		page_info->page_offset = page_offset;
		page_info->page = pagep;
		page_info->page = pagep;
		dma_unmap_addr_set(page_info, bus, page_dmaaddr);
		frag_dmaaddr = page_dmaaddr + page_info->page_offset;


		rxd = queue_head_node(rxq);
		rxd = queue_head_node(rxq);
		frag_dmaaddr = page_dmaaddr + page_info->page_offset;
		rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
		rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
		rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
		rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));


@@ -1804,15 +1807,24 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
		if ((page_offset + rx_frag_size + rx_frag_size) >
		if ((page_offset + rx_frag_size + rx_frag_size) >
					adapter->big_page_size) {
					adapter->big_page_size) {
			pagep = NULL;
			pagep = NULL;
			page_info->last_page_user = true;
			page_info->last_frag = true;
			dma_unmap_addr_set(page_info, bus, page_dmaaddr);
		} else {
			dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
		}
		}


		prev_page_info = page_info;
		prev_page_info = page_info;
		queue_head_inc(rxq);
		queue_head_inc(rxq);
		page_info = &rxo->page_info_tbl[rxq->head];
		page_info = &rxo->page_info_tbl[rxq->head];
	}
	}
	if (pagep)

		prev_page_info->last_page_user = true;
	/* Mark the last frag of a page when we break out of the above loop
	 * with no more slots available in the RXQ
	 */
	if (pagep) {
		prev_page_info->last_frag = true;
		dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
	}


	if (posted) {
	if (posted) {
		atomic_add(posted, &rxq->used);
		atomic_add(posted, &rxq->used);