Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b6bacd55 authored by Eric W. Biederman's avatar Eric W. Biederman Committed by David S. Miller
Browse files

netpoll: Don't drop all received packets.



Change the strategy of netpoll from dropping all packets received
during netpoll_poll_dev to calling napi poll with a budget of 0
(to avoid processing drivers rx queue), and to ignore packets received
with netif_rx (those will safely be placed on the backlog queue).

All of the netpoll supporting drivers have been reviewed to ensure
either thay use netif_rx or that a budget of 0 is supported by their
napi poll routine and that a budget of 0 will not process the drivers
rx queues.

Not dropping packets makes NETPOLL_RX_DROP unnecesary so it is removed.

npinfo->rx_flags is removed  as rx_flags with just the NETPOLL_RX_ENABLED
flag becomes just a redundant mirror of list_empty(&npinfo->rx_np).

Signed-off-by: default avatar"Eric W. Biederman" <ebiederm@xmission.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ff607631
Loading
Loading
Loading
Loading
+1 −2
Original line number Diff line number Diff line
@@ -39,7 +39,6 @@ struct netpoll {
struct netpoll_info {
	atomic_t refcnt;

	unsigned long rx_flags;
	spinlock_t rx_lock;
	struct semaphore dev_lock;
	struct list_head rx_np; /* netpolls that registered an rx_skb_hook */
@@ -99,7 +98,7 @@ static inline bool netpoll_rx_on(struct sk_buff *skb)
{
	struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo);

	return npinfo && (netpoll_rx_processing(npinfo) || npinfo->rx_flags);
	return npinfo && netpoll_rx_processing(npinfo);
}

static inline bool netpoll_rx(struct sk_buff *skb)
+6 −11
Original line number Diff line number Diff line
@@ -51,8 +51,6 @@ static atomic_t trapped;
DEFINE_STATIC_SRCU(netpoll_srcu);

#define USEC_PER_POLL	50
#define NETPOLL_RX_ENABLED  1
#define NETPOLL_RX_DROP     2

#define MAX_SKB_SIZE							\
	(sizeof(struct ethhdr) +					\
@@ -193,7 +191,8 @@ static void netpoll_poll_dev(struct net_device *dev)
{
	const struct net_device_ops *ops;
	struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
	int budget = 16;
	bool rx_processing = netpoll_rx_processing(ni);
	int budget = rx_processing? 16 : 0;

	/* Don't do any rx activity if the dev_lock mutex is held
	 * the dev_open/close paths use this to block netpoll activity
@@ -207,7 +206,7 @@ static void netpoll_poll_dev(struct net_device *dev)
		return;
	}

	ni->rx_flags |= NETPOLL_RX_DROP;
	if (rx_processing)
		atomic_inc(&trapped);

	ops = dev->netdev_ops;
@@ -221,8 +220,8 @@ static void netpoll_poll_dev(struct net_device *dev)

	poll_napi(dev, budget);

	if (rx_processing)
		atomic_dec(&trapped);
	ni->rx_flags &= ~NETPOLL_RX_DROP;

	up(&ni->dev_lock);

@@ -1050,7 +1049,6 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
			goto out;
		}

		npinfo->rx_flags = 0;
		INIT_LIST_HEAD(&npinfo->rx_np);

		spin_lock_init(&npinfo->rx_lock);
@@ -1076,7 +1074,6 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)

	if (np->rx_skb_hook) {
		spin_lock_irqsave(&npinfo->rx_lock, flags);
		npinfo->rx_flags |= NETPOLL_RX_ENABLED;
		list_add_tail(&np->rx, &npinfo->rx_np);
		spin_unlock_irqrestore(&npinfo->rx_lock, flags);
	}
@@ -1258,8 +1255,6 @@ void __netpoll_cleanup(struct netpoll *np)
	if (!list_empty(&npinfo->rx_np)) {
		spin_lock_irqsave(&npinfo->rx_lock, flags);
		list_del(&np->rx);
		if (list_empty(&npinfo->rx_np))
			npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
		spin_unlock_irqrestore(&npinfo->rx_lock, flags);
	}