Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 47f44e40 authored by Auke Kok's avatar Auke Kok Committed by Jeff Garzik
Browse files

e1000e: Fix jumbo frame receive code.



Fix allocation and freeing of jumbo frames where several bugs
were recently introduced by cleanups after we forked this code
from e1000. This moves ps_pages to buffer_info where it really
belongs and makes it a dynamically allocated array. The penalty
is not that high since it's allocated outside of the buffer_info
struct anyway.

Without this patch all jumbo frames are completely broken and the
driver panics.

Signed-off-by: default avatarJesse Brandeburg <jesse.brandeburg@intel.com>
Signed-off-by: default avatarAuke Kok <auke-jan.h.kok@intel.com>
Signed-off-by: default avatarJeff Garzik <jeff@garzik.org>
parent e38c2c65
Loading
Loading
Loading
Loading
+2 −2
Original line number Original line Diff line number Diff line
@@ -123,6 +123,8 @@ struct e1000_buffer {
		};
		};
		/* RX */
		/* RX */
		struct page *page;
		struct page *page;
		/* arrays of page information for packet split */
		struct e1000_ps_page *ps_pages;
	};
	};


};
};
@@ -142,8 +144,6 @@ struct e1000_ring {
	/* array of buffer information structs */
	/* array of buffer information structs */
	struct e1000_buffer *buffer_info;
	struct e1000_buffer *buffer_info;


	/* arrays of page information for packet split */
	struct e1000_ps_page *ps_pages;
	struct sk_buff *rx_skb_top;
	struct sk_buff *rx_skb_top;


	struct e1000_queue_stats stats;
	struct e1000_queue_stats stats;
+52 −50
Original line number Original line Diff line number Diff line
@@ -245,9 +245,12 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
		rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
		rx_desc = E1000_RX_DESC_PS(*rx_ring, i);


		for (j = 0; j < PS_PAGE_BUFFERS; j++) {
		for (j = 0; j < PS_PAGE_BUFFERS; j++) {
			ps_page = &rx_ring->ps_pages[(i * PS_PAGE_BUFFERS)
			ps_page = &buffer_info->ps_pages[j];
						     + j];
			if (j >= adapter->rx_ps_pages) {
			if (j < adapter->rx_ps_pages) {
				/* all unused desc entries get hw null ptr */
				rx_desc->read.buffer_addr[j+1] = ~0;
				continue;
			}
			if (!ps_page->page) {
			if (!ps_page->page) {
				ps_page->page = alloc_page(GFP_ATOMIC);
				ps_page->page = alloc_page(GFP_ATOMIC);
				if (!ps_page->page) {
				if (!ps_page->page) {
@@ -258,8 +261,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
						   ps_page->page,
						   ps_page->page,
						   0, PAGE_SIZE,
						   0, PAGE_SIZE,
						   PCI_DMA_FROMDEVICE);
						   PCI_DMA_FROMDEVICE);
					if (pci_dma_mapping_error(
				if (pci_dma_mapping_error(ps_page->dma)) {
							ps_page->dma)) {
					dev_err(&adapter->pdev->dev,
					dev_err(&adapter->pdev->dev,
					  "RX DMA page map failed\n");
					  "RX DMA page map failed\n");
					adapter->rx_dma_failed++;
					adapter->rx_dma_failed++;
@@ -273,9 +275,6 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
			 */
			 */
			rx_desc->read.buffer_addr[j+1] =
			rx_desc->read.buffer_addr[j+1] =
			     cpu_to_le64(ps_page->dma);
			     cpu_to_le64(ps_page->dma);
			} else {
				rx_desc->read.buffer_addr[j+1] = ~0;
			}
		}
		}


		skb = netdev_alloc_skb(netdev,
		skb = netdev_alloc_skb(netdev,
@@ -953,7 +952,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
		    ((length + l1) <= adapter->rx_ps_bsize0)) {
		    ((length + l1) <= adapter->rx_ps_bsize0)) {
			u8 *vaddr;
			u8 *vaddr;


			ps_page = &rx_ring->ps_pages[i * PS_PAGE_BUFFERS];
			ps_page = &buffer_info->ps_pages[0];


			/* there is no documentation about how to call
			/* there is no documentation about how to call
			 * kmap_atomic, so we can't hold the mapping
			 * kmap_atomic, so we can't hold the mapping
@@ -977,7 +976,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
			if (!length)
			if (!length)
				break;
				break;


			ps_page = &rx_ring->ps_pages[(i * PS_PAGE_BUFFERS) + j];
			ps_page = &buffer_info->ps_pages[j];
			pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
			pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
				       PCI_DMA_FROMDEVICE);
				       PCI_DMA_FROMDEVICE);
			ps_page->dma = 0;
			ps_page->dma = 0;
@@ -1043,7 +1042,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
	struct e1000_buffer *buffer_info;
	struct e1000_buffer *buffer_info;
	struct e1000_ps_page *ps_page;
	struct e1000_ps_page *ps_page;
	struct pci_dev *pdev = adapter->pdev;
	struct pci_dev *pdev = adapter->pdev;
	unsigned long size;
	unsigned int i, j;
	unsigned int i, j;


	/* Free all the Rx ring sk_buffs */
	/* Free all the Rx ring sk_buffs */
@@ -1075,8 +1073,7 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
		}
		}


		for (j = 0; j < PS_PAGE_BUFFERS; j++) {
		for (j = 0; j < PS_PAGE_BUFFERS; j++) {
			ps_page = &rx_ring->ps_pages[(i * PS_PAGE_BUFFERS)
			ps_page = &buffer_info->ps_pages[j];
						     + j];
			if (!ps_page->page)
			if (!ps_page->page)
				break;
				break;
			pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
			pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
@@ -1093,12 +1090,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
		rx_ring->rx_skb_top = NULL;
		rx_ring->rx_skb_top = NULL;
	}
	}


	size = sizeof(struct e1000_buffer) * rx_ring->count;
	memset(rx_ring->buffer_info, 0, size);
	size = sizeof(struct e1000_ps_page)
	       * (rx_ring->count * PS_PAGE_BUFFERS);
	memset(rx_ring->ps_pages, 0, size);

	/* Zero out the descriptor ring */
	/* Zero out the descriptor ring */
	memset(rx_ring->desc, 0, rx_ring->size);
	memset(rx_ring->desc, 0, rx_ring->size);


@@ -1421,7 +1412,8 @@ int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
{
{
	struct e1000_ring *rx_ring = adapter->rx_ring;
	struct e1000_ring *rx_ring = adapter->rx_ring;
	int size, desc_len, err = -ENOMEM;
	struct e1000_buffer *buffer_info;
	int i, size, desc_len, err = -ENOMEM;


	size = sizeof(struct e1000_buffer) * rx_ring->count;
	size = sizeof(struct e1000_buffer) * rx_ring->count;
	rx_ring->buffer_info = vmalloc(size);
	rx_ring->buffer_info = vmalloc(size);
@@ -1429,11 +1421,14 @@ int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
		goto err;
		goto err;
	memset(rx_ring->buffer_info, 0, size);
	memset(rx_ring->buffer_info, 0, size);


	rx_ring->ps_pages = kcalloc(rx_ring->count * PS_PAGE_BUFFERS,
	for (i = 0; i < rx_ring->count; i++) {
		buffer_info = &rx_ring->buffer_info[i];
		buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS,
						sizeof(struct e1000_ps_page),
						sizeof(struct e1000_ps_page),
						GFP_KERNEL);
						GFP_KERNEL);
	if (!rx_ring->ps_pages)
		if (!buffer_info->ps_pages)
		goto err;
			goto err_pages;
	}


	desc_len = sizeof(union e1000_rx_desc_packet_split);
	desc_len = sizeof(union e1000_rx_desc_packet_split);


@@ -1443,16 +1438,21 @@ int e1000e_setup_rx_resources(struct e1000_adapter *adapter)


	err = e1000_alloc_ring_dma(adapter, rx_ring);
	err = e1000_alloc_ring_dma(adapter, rx_ring);
	if (err)
	if (err)
		goto err;
		goto err_pages;


	rx_ring->next_to_clean = 0;
	rx_ring->next_to_clean = 0;
	rx_ring->next_to_use = 0;
	rx_ring->next_to_use = 0;
	rx_ring->rx_skb_top = NULL;
	rx_ring->rx_skb_top = NULL;


	return 0;
	return 0;

err_pages:
	for (i = 0; i < rx_ring->count; i++) {
		buffer_info = &rx_ring->buffer_info[i];
		kfree(buffer_info->ps_pages);
	}
err:
err:
	vfree(rx_ring->buffer_info);
	vfree(rx_ring->buffer_info);
	kfree(rx_ring->ps_pages);
	ndev_err(adapter->netdev,
	ndev_err(adapter->netdev,
	"Unable to allocate memory for the transmit descriptor ring\n");
	"Unable to allocate memory for the transmit descriptor ring\n");
	return err;
	return err;
@@ -1518,15 +1518,17 @@ void e1000e_free_rx_resources(struct e1000_adapter *adapter)
{
{
	struct pci_dev *pdev = adapter->pdev;
	struct pci_dev *pdev = adapter->pdev;
	struct e1000_ring *rx_ring = adapter->rx_ring;
	struct e1000_ring *rx_ring = adapter->rx_ring;
	int i;


	e1000_clean_rx_ring(adapter);
	e1000_clean_rx_ring(adapter);


	for (i = 0; i < rx_ring->count; i++) {
		kfree(rx_ring->buffer_info[i].ps_pages);
	}

	vfree(rx_ring->buffer_info);
	vfree(rx_ring->buffer_info);
	rx_ring->buffer_info = NULL;
	rx_ring->buffer_info = NULL;


	kfree(rx_ring->ps_pages);
	rx_ring->ps_pages = NULL;

	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
			  rx_ring->dma);
			  rx_ring->dma);
	rx_ring->desc = NULL;
	rx_ring->desc = NULL;