Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 44345724 authored by Octavian Purdila's avatar Octavian Purdila Committed by David S. Miller
Browse files

net: factorize sync-rcu call in unregister_netdevice_many



Add dev_close_many and dev_deactivate_many to factorize another
sync-rcu operation on the netdevice unregister path.

$ modprobe dummy numdummies=10000
$ ip link set dev dummy* up
$ time rmmod dummy

Without the patch           With the patch

real    0m 24.63s           real    0m 5.15s
user    0m 0.00s            user    0m 0.00s
sys     0m 6.05s            sys     0m 5.14s

Signed-off-by: default avatarOctavian Purdila <opurdila@ixiacom.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c6c8fea2
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -321,6 +321,7 @@ extern void dev_init_scheduler(struct net_device *dev);
extern void dev_shutdown(struct net_device *dev);
extern void dev_activate(struct net_device *dev);
extern void dev_deactivate(struct net_device *dev);
extern void dev_deactivate_many(struct list_head *head);
extern struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
				     struct Qdisc *qdisc);
extern void qdisc_reset(struct Qdisc *qdisc);
+76 −42
Original line number Diff line number Diff line
@@ -1222,13 +1222,14 @@ int dev_open(struct net_device *dev)
}
EXPORT_SYMBOL(dev_open);

static int __dev_close(struct net_device *dev)
static int __dev_close_many(struct list_head *head)
{
	const struct net_device_ops *ops = dev->netdev_ops;
	struct net_device *dev;

	ASSERT_RTNL();
	might_sleep();

	list_for_each_entry(dev, head, unreg_list) {
		/*
		 *	Tell people we are going down, so that they can
		 *	prepare to death, when device is still operating.
@@ -1237,15 +1238,19 @@ static int __dev_close(struct net_device *dev)

		clear_bit(__LINK_STATE_START, &dev->state);

	/* Synchronize to scheduled poll. We cannot touch poll list,
	 * it can be even on different cpu. So just clear netif_running().
		/* Synchronize to scheduled poll. We cannot touch poll list, it
		 * can be even on different cpu. So just clear netif_running().
		 *
		 * dev->stop() will invoke napi_disable() on all of it's
		 * napi_struct instances on this device.
		 */
		smp_mb__after_clear_bit(); /* Commit netif_running(). */
	}

	dev_deactivate_many(head);

	dev_deactivate(dev);
	list_for_each_entry(dev, head, unreg_list) {
		const struct net_device_ops *ops = dev->netdev_ops;

		/*
		 *	Call the device specific close. This cannot fail.
@@ -1267,7 +1272,40 @@ static int __dev_close(struct net_device *dev)
		 *	Shutdown NET_DMA
		 */
		net_dmaengine_put();
	}

	return 0;
}

static int __dev_close(struct net_device *dev)
{
	LIST_HEAD(single);

	list_add(&dev->unreg_list, &single);
	return __dev_close_many(&single);
}

int dev_close_many(struct list_head *head)
{
	struct net_device *dev, *tmp;
	LIST_HEAD(tmp_list);

	list_for_each_entry_safe(dev, tmp, head, unreg_list)
		if (!(dev->flags & IFF_UP))
			list_move(&dev->unreg_list, &tmp_list);

	__dev_close_many(head);

	/*
	 * Tell people we are down
	 */
	list_for_each_entry(dev, head, unreg_list) {
		rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
		call_netdevice_notifiers(NETDEV_DOWN, dev);
	}

	/* rollback_registered_many needs the complete original list */
	list_splice(&tmp_list, head);
	return 0;
}

@@ -1282,16 +1320,10 @@ static int __dev_close(struct net_device *dev)
 */
int dev_close(struct net_device *dev)
{
	if (!(dev->flags & IFF_UP))
		return 0;

	__dev_close(dev);
	LIST_HEAD(single);

	/*
	 * Tell people we are down
	 */
	rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
	call_netdevice_notifiers(NETDEV_DOWN, dev);
	list_add(&dev->unreg_list, &single);
	dev_close_many(&single);

	return 0;
}
@@ -4963,10 +4995,12 @@ static void rollback_registered_many(struct list_head *head)
		}

		BUG_ON(dev->reg_state != NETREG_REGISTERED);
	}

	/* If device is running, close it first. */
		dev_close(dev);
	dev_close_many(head);

	list_for_each_entry(dev, head, unreg_list) {
		/* And unlink it from device chain. */
		unlist_netdevice(dev);

+22 −7
Original line number Diff line number Diff line
@@ -810,22 +810,37 @@ static bool some_qdisc_is_busy(struct net_device *dev)
	return false;
}

void dev_deactivate(struct net_device *dev)
void dev_deactivate_many(struct list_head *head)
{
	netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc);
	struct net_device *dev;

	list_for_each_entry(dev, head, unreg_list) {
		netdev_for_each_tx_queue(dev, dev_deactivate_queue,
					 &noop_qdisc);
		if (dev_ingress_queue(dev))
		dev_deactivate_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
			dev_deactivate_queue(dev, dev_ingress_queue(dev),
					     &noop_qdisc);

		dev_watchdog_down(dev);
	}

	/* Wait for outstanding qdisc-less dev_queue_xmit calls. */
	synchronize_rcu();

	/* Wait for outstanding qdisc_run calls. */
	list_for_each_entry(dev, head, unreg_list)
		while (some_qdisc_is_busy(dev))
			yield();
}

void dev_deactivate(struct net_device *dev)
{
	LIST_HEAD(single);

	list_add(&dev->unreg_list, &single);
	dev_deactivate_many(&single);
}

static void dev_init_scheduler_queue(struct net_device *dev,
				     struct netdev_queue *dev_queue,
				     void *_qdisc)