Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 65b7b5b9 authored by Kirill Tkhai's avatar Kirill Tkhai Committed by David S. Miller
Browse files

net: Make cleanup_list and net::cleanup_list of llist type



This simplifies cleanup queueing and makes cleanup lists
to use llist primitives. Since llist has its own cmpxchg()
ordering, cleanup_list_lock is not more need.

Also, struct llist_node is smaller, than struct list_head,
so we save some bytes in struct net with this patch.

Signed-off-by: default avatarKirill Tkhai <ktkhai@virtuozzo.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 19efbd93
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -59,12 +59,13 @@ struct net {
	atomic64_t		cookie_gen;

	struct list_head	list;		/* list of network namespaces */
	struct list_head	cleanup_list;	/* namespaces on death row */
	struct list_head	exit_list;	/* To linked to call pernet exit
						 * methods on dead net (net_sem
						 * read locked), or to unregister
						 * pernet ops (net_sem wr locked).
						 */
	struct llist_node	cleanup_list;	/* namespaces on death row */

	struct user_namespace   *user_ns;	/* Owning user namespace */
	struct ucounts		*ucounts;
	spinlock_t		nsid_lock;
+6 −14
Original line number Diff line number Diff line
@@ -481,21 +481,18 @@ static void unhash_nsid(struct net *net, struct net *last)
	spin_unlock_bh(&net->nsid_lock);
}

static DEFINE_SPINLOCK(cleanup_list_lock);
static LIST_HEAD(cleanup_list);  /* Must hold cleanup_list_lock to touch */
static LLIST_HEAD(cleanup_list);

static void cleanup_net(struct work_struct *work)
{
	const struct pernet_operations *ops;
	struct net *net, *tmp, *last;
	struct list_head net_kill_list;
	struct llist_node *net_kill_list;
	LIST_HEAD(net_exit_list);
	unsigned write;

	/* Atomically snapshot the list of namespaces to cleanup */
	spin_lock_irq(&cleanup_list_lock);
	list_replace_init(&cleanup_list, &net_kill_list);
	spin_unlock_irq(&cleanup_list_lock);
	net_kill_list = llist_del_all(&cleanup_list);
again:
	write = READ_ONCE(nr_sync_pernet_ops);
	if (write)
@@ -510,7 +507,7 @@ static void cleanup_net(struct work_struct *work)

	/* Don't let anyone else find us. */
	rtnl_lock();
	list_for_each_entry(net, &net_kill_list, cleanup_list)
	llist_for_each_entry(net, net_kill_list, cleanup_list)
		list_del_rcu(&net->list);
	/* Cache last net. After we unlock rtnl, no one new net
	 * added to net_namespace_list can assign nsid pointer
@@ -525,7 +522,7 @@ static void cleanup_net(struct work_struct *work)
	last = list_last_entry(&net_namespace_list, struct net, list);
	rtnl_unlock();

	list_for_each_entry(net, &net_kill_list, cleanup_list) {
	llist_for_each_entry(net, net_kill_list, cleanup_list) {
		unhash_nsid(net, last);
		list_add_tail(&net->exit_list, &net_exit_list);
	}
@@ -585,12 +582,7 @@ static DECLARE_WORK(net_cleanup_work, cleanup_net);
void __put_net(struct net *net)
{
	/* Cleanup the network namespace in process context */
	unsigned long flags;

	spin_lock_irqsave(&cleanup_list_lock, flags);
	list_add(&net->cleanup_list, &cleanup_list);
	spin_unlock_irqrestore(&cleanup_list_lock, flags);

	llist_add(&net->cleanup_list, &cleanup_list);
	queue_work(netns_wq, &net_cleanup_work);
}
EXPORT_SYMBOL_GPL(__put_net);