Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bd092ad1 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'remove-__napi_complete_done'



Eric Dumazet says:

====================
net: get rid of __napi_complete()

This patch series removes __napi_complete() calls, in an effort
to make NAPI API simpler and generalize GRO and napi_complete_done()
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3976001c 02c1602e
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -1008,7 +1008,7 @@ static int greth_poll(struct napi_struct *napi, int budget)
			spin_unlock_irqrestore(&greth->devlock, flags);
			goto restart_txrx_poll;
		} else {
			__napi_complete(napi);
			napi_complete_done(napi, work_done);
			spin_unlock_irqrestore(&greth->devlock, flags);
		}
	}
+72 −92
Original line number Diff line number Diff line
@@ -695,23 +695,13 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
	void __iomem *mmio = lp->mmio;
	struct sk_buff *skb,*new_skb;
	int min_pkt_len, status;
	unsigned int intr0;
	int num_rx_pkt = 0;
	short pkt_len;
#if AMD8111E_VLAN_TAG_USED
	short vtag;
#endif
	int rx_pkt_limit = budget;
	unsigned long flags;

	if (rx_pkt_limit <= 0)
		goto rx_not_empty;

	do{
		/* process receive packets until we use the quota.
		 * If we own the next entry, it's a new packet. Send it up.
		 */
		while(1) {
	while (num_rx_pkt < budget) {
		status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
		if (status & OWN_BIT)
			break;
@@ -749,8 +739,6 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
			lp->drv_rx_errors++;
			goto err_next_pkt;
		}
			if(--rx_pkt_limit < 0)
				goto rx_not_empty;
		new_skb = netdev_alloc_skb(dev, lp->rx_buff_len);
		if (!new_skb) {
			/* if allocation fail,
@@ -780,7 +768,7 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
		}
#endif
			netif_receive_skb(skb);
		napi_gro_receive(napi, skb);
		/* COAL update rx coalescing parameters */
		lp->coal_conf.rx_packets++;
		lp->coal_conf.rx_bytes += pkt_len;
@@ -795,25 +783,17 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
		lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT);
		rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
	}
		/* Check the interrupt status register for more packets in the
		 * mean time. Process them since we have not used up our quota.
		 */
		intr0 = readl(mmio + INT0);
		/*Ack receive packets */
		writel(intr0 & RINT0,mmio + INT0);

	} while(intr0 & RINT0);
	if (num_rx_pkt < budget && napi_complete_done(napi, num_rx_pkt)) {
		unsigned long flags;

	if (rx_pkt_limit > 0) {
		/* Receive descriptor is empty now */
		spin_lock_irqsave(&lp->lock, flags);
		__napi_complete(napi);
		writel(VAL0|RINTEN0, mmio + INTEN0);
		writel(VAL2 | RDMD0, mmio + CMD0);
		spin_unlock_irqrestore(&lp->lock, flags);
	}

rx_not_empty:
	return num_rx_pkt;
}

+3 −8
Original line number Diff line number Diff line
@@ -1350,13 +1350,8 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
		pcnet32_restart(dev, CSR0_START);
		netif_wake_queue(dev);
	}
	spin_unlock_irqrestore(&lp->lock, flags);

	if (work_done < budget) {
		spin_lock_irqsave(&lp->lock, flags);

		__napi_complete(napi);

	if (work_done < budget && napi_complete_done(napi, work_done)) {
		/* clear interrupt masks */
		val = lp->a->read_csr(ioaddr, CSR3);
		val &= 0x00ff;
@@ -1364,9 +1359,9 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)

		/* Set interrupt enable. */
		lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
	}

	spin_unlock_irqrestore(&lp->lock, flags);
	}
	return work_done;
}

+6 −23
Original line number Diff line number Diff line
@@ -228,9 +228,10 @@ static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int d
		pr_info("mdio write timed out\n");
}

static int ep93xx_rx(struct net_device *dev, int processed, int budget)
static int ep93xx_rx(struct net_device *dev, int budget)
{
	struct ep93xx_priv *ep = netdev_priv(dev);
	int processed = 0;

	while (processed < budget) {
		int entry;
@@ -294,7 +295,7 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
			skb_put(skb, length);
			skb->protocol = eth_type_trans(skb, dev);

			netif_receive_skb(skb);
			napi_gro_receive(&ep->napi, skb);

			dev->stats.rx_packets++;
			dev->stats.rx_bytes += length;
@@ -310,35 +311,17 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
	return processed;
}

static int ep93xx_have_more_rx(struct ep93xx_priv *ep)
{
	struct ep93xx_rstat *rstat = ep->descs->rstat + ep->rx_pointer;
	return !!((rstat->rstat0 & RSTAT0_RFP) && (rstat->rstat1 & RSTAT1_RFP));
}

static int ep93xx_poll(struct napi_struct *napi, int budget)
{
	struct ep93xx_priv *ep = container_of(napi, struct ep93xx_priv, napi);
	struct net_device *dev = ep->dev;
	int rx = 0;

poll_some_more:
	rx = ep93xx_rx(dev, rx, budget);
	if (rx < budget) {
		int more = 0;
	int rx;

	rx = ep93xx_rx(dev, budget);
	if (rx < budget && napi_complete_done(napi, rx)) {
		spin_lock_irq(&ep->rx_lock);
		__napi_complete(napi);
		wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX);
		if (ep93xx_have_more_rx(ep)) {
			wrl(ep, REG_INTEN, REG_INTEN_TX);
			wrl(ep, REG_INTSTSP, REG_INTSTS_RX);
			more = 1;
		}
		spin_unlock_irq(&ep->rx_lock);

		if (more && napi_reschedule(napi))
			goto poll_some_more;
	}

	if (rx) {
+9 −9
Original line number Diff line number Diff line
@@ -421,20 +421,20 @@ static int mal_poll(struct napi_struct *napi, int budget)
		int n;
		if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags)))
			continue;
		n = mc->ops->poll_rx(mc->dev, budget);
		n = mc->ops->poll_rx(mc->dev, budget - received);
		if (n) {
			received += n;
			budget -= n;
			if (budget <= 0)
				goto more_work; // XXX What if this is the last one ?
			if (received >= budget)
				return budget;
		}
	}

	if (napi_complete_done(napi, received)) {
		/* We need to disable IRQs to protect from RXDE IRQ here */
		spin_lock_irqsave(&mal->lock, flags);
	__napi_complete(napi);
		mal_enable_eob_irq(mal);
		spin_unlock_irqrestore(&mal->lock, flags);
	}

	/* Check for "rotting" packet(s) */
	list_for_each(l, &mal->poll_list) {
Loading