Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2c80cd57 authored by Sahitya Tummala's avatar Sahitya Tummala Committed by Linus Torvalds
Browse files

mm/list_lru.c: fix list_lru_count_node() to be race free

list_lru_count_node() iterates over all memcgs to get the total number of
entries on the node but it can race with memcg_drain_all_list_lrus(),
which migrates the entries from a dead cgroup to another.  This can return
incorrect number of entries from list_lru_count_node().

Fix this by keeping track of entries per node and simply return it in
list_lru_count_node().

Link: http://lkml.kernel.org/r/1498707555-30525-1-git-send-email-stummala@codeaurora.org


Signed-off-by: default avatarSahitya Tummala <stummala@codeaurora.org>
Acked-by: default avatarVladimir Davydov <vdavydov.dev@gmail.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Alexander Polakov <apolyakov@beget.ru>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 32e4e6d5
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -44,6 +44,7 @@ struct list_lru_node {
	/* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
	/* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
	struct list_lru_memcg	*memcg_lrus;
	struct list_lru_memcg	*memcg_lrus;
#endif
#endif
	long nr_items;
} ____cacheline_aligned_in_smp;
} ____cacheline_aligned_in_smp;


struct list_lru {
struct list_lru {
+6 −8
Original line number Original line Diff line number Diff line
@@ -117,6 +117,7 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item)
		l = list_lru_from_kmem(nlru, item);
		l = list_lru_from_kmem(nlru, item);
		list_add_tail(item, &l->list);
		list_add_tail(item, &l->list);
		l->nr_items++;
		l->nr_items++;
		nlru->nr_items++;
		spin_unlock(&nlru->lock);
		spin_unlock(&nlru->lock);
		return true;
		return true;
	}
	}
@@ -136,6 +137,7 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item)
		l = list_lru_from_kmem(nlru, item);
		l = list_lru_from_kmem(nlru, item);
		list_del_init(item);
		list_del_init(item);
		l->nr_items--;
		l->nr_items--;
		nlru->nr_items--;
		spin_unlock(&nlru->lock);
		spin_unlock(&nlru->lock);
		return true;
		return true;
	}
	}
@@ -183,15 +185,10 @@ EXPORT_SYMBOL_GPL(list_lru_count_one);


unsigned long list_lru_count_node(struct list_lru *lru, int nid)
unsigned long list_lru_count_node(struct list_lru *lru, int nid)
{
{
	long count = 0;
	struct list_lru_node *nlru;
	int memcg_idx;


	count += __list_lru_count_one(lru, nid, -1);
	nlru = &lru->node[nid];
	if (list_lru_memcg_aware(lru)) {
	return nlru->nr_items;
		for_each_memcg_cache_index(memcg_idx)
			count += __list_lru_count_one(lru, nid, memcg_idx);
	}
	return count;
}
}
EXPORT_SYMBOL_GPL(list_lru_count_node);
EXPORT_SYMBOL_GPL(list_lru_count_node);


@@ -226,6 +223,7 @@ __list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
			assert_spin_locked(&nlru->lock);
			assert_spin_locked(&nlru->lock);
		case LRU_REMOVED:
		case LRU_REMOVED:
			isolated++;
			isolated++;
			nlru->nr_items--;
			/*
			/*
			 * If the lru lock has been dropped, our list
			 * If the lru lock has been dropped, our list
			 * traversal is now invalid and so we have to
			 * traversal is now invalid and so we have to