Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ea0d7d91 authored by Scott Feldman's avatar Scott Feldman Committed by David S. Miller
Browse files

enic: bug fix: split TSO fragments larger than 16K into multiple descs



enic WQ desc supports a maximum 16K buf size, so split any send fragments
larger than 16K into several descs.

Signed-off-by: default avatarScott Feldman <scofeldm@cisco.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 4badc385
Loading
Loading
Loading
Loading
+69 −18
Original line number Original line Diff line number Diff line
@@ -44,10 +44,15 @@
#include "enic.h"
#include "enic.h"


#define ENIC_NOTIFY_TIMER_PERIOD	(2 * HZ)
#define ENIC_NOTIFY_TIMER_PERIOD	(2 * HZ)
#define WQ_ENET_MAX_DESC_LEN		(1 << WQ_ENET_LEN_BITS)
#define MAX_TSO				(1 << 16)
#define ENIC_DESC_MAX_SPLITS		(MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)

#define PCI_DEVICE_ID_CISCO_VIC_ENET         0x0043  /* ethernet vnic */


/* Supported devices */
/* Supported devices */
static struct pci_device_id enic_id_table[] = {
static struct pci_device_id enic_id_table[] = {
	{ PCI_VDEVICE(CISCO, 0x0043) },
	{ PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
	{ 0, }	/* end of table */
	{ 0, }	/* end of table */
};
};


@@ -310,7 +315,8 @@ static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
		opaque);
		opaque);


	if (netif_queue_stopped(enic->netdev) &&
	if (netif_queue_stopped(enic->netdev) &&
	    vnic_wq_desc_avail(&enic->wq[q_number]) >= MAX_SKB_FRAGS + 1)
	    vnic_wq_desc_avail(&enic->wq[q_number]) >=
	    (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
		netif_wake_queue(enic->netdev);
		netif_wake_queue(enic->netdev);


	spin_unlock(&enic->wq_lock[q_number]);
	spin_unlock(&enic->wq_lock[q_number]);
@@ -525,7 +531,11 @@ static inline void enic_queue_wq_skb_vlan(struct enic *enic,
	unsigned int len_left = skb->len - head_len;
	unsigned int len_left = skb->len - head_len;
	int eop = (len_left == 0);
	int eop = (len_left == 0);


	/* Queue the main skb fragment */
	/* Queue the main skb fragment. The fragments are no larger
	 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
	 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
	 * per fragment is queued.
	 */
	enic_queue_wq_desc(wq, skb,
	enic_queue_wq_desc(wq, skb,
		pci_map_single(enic->pdev, skb->data,
		pci_map_single(enic->pdev, skb->data,
			head_len, PCI_DMA_TODEVICE),
			head_len, PCI_DMA_TODEVICE),
@@ -547,7 +557,11 @@ static inline void enic_queue_wq_skb_csum_l4(struct enic *enic,
	unsigned int csum_offset = hdr_len + skb->csum_offset;
	unsigned int csum_offset = hdr_len + skb->csum_offset;
	int eop = (len_left == 0);
	int eop = (len_left == 0);


	/* Queue the main skb fragment */
	/* Queue the main skb fragment. The fragments are no larger
	 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less
	 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor
	 * per fragment is queued.
	 */
	enic_queue_wq_desc_csum_l4(wq, skb,
	enic_queue_wq_desc_csum_l4(wq, skb,
		pci_map_single(enic->pdev, skb->data,
		pci_map_single(enic->pdev, skb->data,
			head_len, PCI_DMA_TODEVICE),
			head_len, PCI_DMA_TODEVICE),
@@ -565,10 +579,14 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic,
	struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss,
	struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss,
	int vlan_tag_insert, unsigned int vlan_tag)
	int vlan_tag_insert, unsigned int vlan_tag)
{
{
	unsigned int head_len = skb_headlen(skb);
	unsigned int frag_len_left = skb_headlen(skb);
	unsigned int len_left = skb->len - head_len;
	unsigned int len_left = skb->len - frag_len_left;
	unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
	unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
	int eop = (len_left == 0);
	int eop = (len_left == 0);
	unsigned int len;
	dma_addr_t dma_addr;
	unsigned int offset = 0;
	skb_frag_t *frag;


	/* Preload TCP csum field with IP pseudo hdr calculated
	/* Preload TCP csum field with IP pseudo hdr calculated
	 * with IP length set to zero.  HW will later add in length
	 * with IP length set to zero.  HW will later add in length
@@ -584,17 +602,49 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic,
			&ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
			&ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
	}
	}


	/* Queue the main skb fragment */
	/* Queue WQ_ENET_MAX_DESC_LEN length descriptors
	 * for the main skb fragment
	 */
	while (frag_len_left) {
		len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN);
		dma_addr = pci_map_single(enic->pdev, skb->data + offset,
				len, PCI_DMA_TODEVICE);
		enic_queue_wq_desc_tso(wq, skb,
		enic_queue_wq_desc_tso(wq, skb,
		pci_map_single(enic->pdev, skb->data,
			dma_addr,
			head_len, PCI_DMA_TODEVICE),
			len,
		head_len,
			mss, hdr_len,
			mss, hdr_len,
			vlan_tag_insert, vlan_tag,
			vlan_tag_insert, vlan_tag,
		eop);
			eop && (len == frag_len_left));
		frag_len_left -= len;
		offset += len;
	}


	if (!eop)
	if (eop)
		enic_queue_wq_skb_cont(enic, wq, skb, len_left);
		return;

	/* Queue WQ_ENET_MAX_DESC_LEN length descriptors
	 * for additional data fragments
	 */
	for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
		len_left -= frag->size;
		frag_len_left = frag->size;
		offset = frag->page_offset;

		while (frag_len_left) {
			len = min(frag_len_left,
				(unsigned int)WQ_ENET_MAX_DESC_LEN);
			dma_addr = pci_map_page(enic->pdev, frag->page,
				offset, len,
				PCI_DMA_TODEVICE);
			enic_queue_wq_desc_cont(wq, skb,
				dma_addr,
				len,
				(len_left == 0) &&
				(len == frag_len_left));	/* EOP? */
			frag_len_left -= len;
			offset += len;
		}
	}
}
}


static inline void enic_queue_wq_skb(struct enic *enic,
static inline void enic_queue_wq_skb(struct enic *enic,
@@ -648,7 +698,8 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,


	spin_lock_irqsave(&enic->wq_lock[0], flags);
	spin_lock_irqsave(&enic->wq_lock[0], flags);


	if (vnic_wq_desc_avail(wq) < skb_shinfo(skb)->nr_frags + 1) {
	if (vnic_wq_desc_avail(wq) <
	    skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
		netif_stop_queue(netdev);
		netif_stop_queue(netdev);
		/* This is a hard error, log it */
		/* This is a hard error, log it */
		printk(KERN_ERR PFX "%s: BUG! Tx ring full when "
		printk(KERN_ERR PFX "%s: BUG! Tx ring full when "
@@ -659,7 +710,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,


	enic_queue_wq_skb(enic, wq, skb);
	enic_queue_wq_skb(enic, wq, skb);


	if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + 1)
	if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
		netif_stop_queue(netdev);
		netif_stop_queue(netdev);


	spin_unlock_irqrestore(&enic->wq_lock[0], flags);
	spin_unlock_irqrestore(&enic->wq_lock[0], flags);