Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit da057fb7 authored by stephen hemminger's avatar stephen hemminger Committed by David S. Miller
Browse files

skge: add byte queue limit support



This also changes the cleanup logic slightly to aggregate
completed notifications for multiple packets.

Signed-off-by: default avatarStephen Hemminger <shemminger@vyatta.com>
Acked-by: default avatarEric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 302476c9
Loading
Loading
Loading
Loading
+24 −13
Original line number Diff line number Diff line
@@ -2817,6 +2817,8 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
	td->control = BMU_OWN | BMU_SW | BMU_STF | control | len;
	wmb();

	netdev_sent_queue(dev, skb->len);

	skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START);

	netif_printk(skge, tx_queued, KERN_DEBUG, skge->netdev,
@@ -2858,11 +2860,9 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,


/* Free resources associated with this reing element */
static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
static inline void skge_tx_unmap(struct pci_dev *pdev, struct skge_element *e,
				 u32 control)
{
	struct pci_dev *pdev = skge->hw->pdev;

	/* skb header vs. fragment */
	if (control & BMU_STF)
		pci_unmap_single(pdev, dma_unmap_addr(e, mapaddr),
@@ -2872,13 +2872,6 @@ static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
		pci_unmap_page(pdev, dma_unmap_addr(e, mapaddr),
			       dma_unmap_len(e, maplen),
			       PCI_DMA_TODEVICE);

	if (control & BMU_EOF) {
		netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev,
			     "tx done slot %td\n", e - skge->tx_ring.start);

		dev_kfree_skb(e->skb);
	}
}

/* Free all buffers in transmit ring */
@@ -2889,10 +2882,15 @@ static void skge_tx_clean(struct net_device *dev)

	for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
		struct skge_tx_desc *td = e->desc;
		skge_tx_free(skge, e, td->control);

		skge_tx_unmap(skge->hw->pdev, e, td->control);

		if (td->control & BMU_EOF)
			dev_kfree_skb(e->skb);
		td->control = 0;
	}

	netdev_reset_queue(dev);
	skge->tx_ring.to_clean = e;
}

@@ -3157,6 +3155,7 @@ static void skge_tx_done(struct net_device *dev)
	struct skge_port *skge = netdev_priv(dev);
	struct skge_ring *ring = &skge->tx_ring;
	struct skge_element *e;
	unsigned int bytes_compl = 0, pkts_compl = 0;

	skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);

@@ -3166,8 +3165,20 @@ static void skge_tx_done(struct net_device *dev)
		if (control & BMU_OWN)
			break;

		skge_tx_free(skge, e, control);
		skge_tx_unmap(skge->hw->pdev, e, control);

		if (control & BMU_EOF) {
			netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev,
				     "tx done slot %td\n",
				     e - skge->tx_ring.start);

			pkts_compl++;
			bytes_compl += e->skb->len;

			dev_kfree_skb(e->skb);
		}
	}
	netdev_completed_queue(dev, pkts_compl, bytes_compl);
	skge->tx_ring.to_clean = e;

	/* Can run lockless until we need to synchronize to restart queue. */