Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 42073d91 authored by Alexander Duyck's avatar Alexander Duyck Committed by Peter P Waskiewicz Jr
Browse files

ixgbe: Have the CPU take ownership of the buffers sooner



This patch makes it so that we will always have ownership of the buffers by
the time we get to ixgbe_add_rx_frag. This is necessary as I am planning to
add a copy-break to ixgbe_add_rx_frag and in order for that to function
correctly we need the CPU to have ownership of the buffer.

Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Tested-by: default avatarPhil Schmitt <phillip.j.schmitt@intel.com>
Signed-off-by: default avatarPeter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
parent 09816fbe
Loading
Loading
Loading
Loading
+38 −14
Original line number Diff line number Diff line
@@ -1457,6 +1457,36 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring,
	return true;
}

/**
 * ixgbe_dma_sync_frag - perform DMA sync for first frag of SKB
 * @rx_ring: rx descriptor ring packet is being transacted on
 * @skb: pointer to current skb being updated
 *
 * This function provides a basic DMA sync up for the first fragment of an
 * skb.  The reason for doing this is that the first fragment cannot be
 * unmapped until we have reached the end of packet descriptor for a buffer
 * chain.
 */
static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
				struct sk_buff *skb)
{
	/* if the page was released unmap it, else just sync our portion */
	if (unlikely(IXGBE_CB(skb)->page_released)) {
		dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
			       ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
		IXGBE_CB(skb)->page_released = false;
	} else {
		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];

		dma_sync_single_range_for_cpu(rx_ring->dev,
					      IXGBE_CB(skb)->dma,
					      frag->page_offset,
					      ixgbe_rx_bufsz(rx_ring),
					      DMA_FROM_DEVICE);
	}
	IXGBE_CB(skb)->dma = 0;
}

/**
 * ixgbe_cleanup_headers - Correct corrupted or empty headers
 * @rx_ring: rx descriptor ring packet is being transacted on
@@ -1484,20 +1514,6 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
	unsigned char *va;
	unsigned int pull_len;

	/* if the page was released unmap it, else just sync our portion */
	if (unlikely(IXGBE_CB(skb)->page_released)) {
		dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
			       ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
		IXGBE_CB(skb)->page_released = false;
	} else {
		dma_sync_single_range_for_cpu(rx_ring->dev,
					      IXGBE_CB(skb)->dma,
					      frag->page_offset,
					      ixgbe_rx_bufsz(rx_ring),
					      DMA_FROM_DEVICE);
	}
	IXGBE_CB(skb)->dma = 0;

	/* verify that the packet does not have any known errors */
	if (unlikely(ixgbe_test_staterr(rx_desc,
					IXGBE_RXDADV_ERR_FRAME_ERR_MASK) &&
@@ -1742,8 +1758,16 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
			 * after the writeback.  Only unmap it when EOP is
			 * reached
			 */
			if (likely(ixgbe_test_staterr(rx_desc,
						      IXGBE_RXD_STAT_EOP)))
				goto dma_sync;

			IXGBE_CB(skb)->dma = rx_buffer->dma;
		} else {
			if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
				ixgbe_dma_sync_frag(rx_ring, skb);

dma_sync:
			/* we are reusing so sync this buffer for CPU use */
			dma_sync_single_range_for_cpu(rx_ring->dev,
						      rx_buffer->dma,