Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6290df54 authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds
Browse files

mm: collect LRU list heads into struct lruvec



Having a unified structure with a LRU list set for both global zones and
per-memcg zones allows to keep that code simple which deals with LRU
lists and does not care about the container itself.

Once the per-memcg LRU lists directly link struct pages, the isolation
function and all other list manipulations are shared between the memcg
case and the global LRU case.

Signed-off-by: default avatarJohannes Weiner <jweiner@redhat.com>
Reviewed-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Reviewed-by: default avatarMichal Hocko <mhocko@suse.cz>
Reviewed-by: default avatarKirill A. Shutemov <kirill@shutemov.name>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: Ying Han <yinghan@google.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Michel Lespinasse <walken@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b95a2f2d
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -33,7 +33,7 @@ __add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l,
static inline void
add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l)
{
	__add_page_to_lru_list(zone, page, l, &zone->lru[l].list);
	__add_page_to_lru_list(zone, page, l, &zone->lruvec.lists[l]);
}

static inline void
+6 −4
Original line number Diff line number Diff line
@@ -159,6 +159,10 @@ static inline int is_unevictable_lru(enum lru_list l)
	return (l == LRU_UNEVICTABLE);
}

struct lruvec {
	struct list_head lists[NR_LRU_LISTS];
};

/* Mask used at gathering information at once (see memcontrol.c) */
#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
@@ -365,9 +369,7 @@ struct zone {

	/* Fields commonly accessed by the page reclaim scanner */
	spinlock_t		lru_lock;
	struct zone_lru {
		struct list_head list;
	} lru[NR_LRU_LISTS];
	struct lruvec		lruvec;

	struct zone_reclaim_stat reclaim_stat;

+7 −10
Original line number Diff line number Diff line
@@ -134,10 +134,7 @@ struct mem_cgroup_reclaim_iter {
 * per-zone information in memory controller.
 */
struct mem_cgroup_per_zone {
	/*
	 * spin_lock to protect the per cgroup LRU
	 */
	struct list_head	lists[NR_LRU_LISTS];
	struct lruvec		lruvec;
	unsigned long		count[NR_LRU_LISTS];

	struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];
@@ -1061,7 +1058,7 @@ void mem_cgroup_rotate_reclaimable_page(struct page *page)
	/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
	smp_rmb();
	mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
	list_move_tail(&pc->lru, &mz->lists[lru]);
	list_move_tail(&pc->lru, &mz->lruvec.lists[lru]);
}

void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
@@ -1079,7 +1076,7 @@ void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
	/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
	smp_rmb();
	mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
	list_move(&pc->lru, &mz->lists[lru]);
	list_move(&pc->lru, &mz->lruvec.lists[lru]);
}

void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
@@ -1109,7 +1106,7 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
	/* huge page split is done under lru_lock. so, we have no races. */
	MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
	SetPageCgroupAcctLRU(pc);
	list_add(&pc->lru, &mz->lists[lru]);
	list_add(&pc->lru, &mz->lruvec.lists[lru]);
}

/*
@@ -1307,7 +1304,7 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,

	BUG_ON(!mem_cont);
	mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
	src = &mz->lists[lru];
	src = &mz->lruvec.lists[lru];

	scan = 0;
	list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
@@ -3738,7 +3735,7 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg,

	zone = &NODE_DATA(node)->node_zones[zid];
	mz = mem_cgroup_zoneinfo(memcg, node, zid);
	list = &mz->lists[lru];
	list = &mz->lruvec.lists[lru];

	loop = MEM_CGROUP_ZSTAT(mz, lru);
	/* give some margin against EBUSY etc...*/
@@ -4864,7 +4861,7 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
	for (zone = 0; zone < MAX_NR_ZONES; zone++) {
		mz = &pn->zoneinfo[zone];
		for_each_lru(l)
			INIT_LIST_HEAD(&mz->lists[l]);
			INIT_LIST_HEAD(&mz->lruvec.lists[l]);
		mz->usage_in_excess = 0;
		mz->on_tree = false;
		mz->mem = memcg;
+1 −1
Original line number Diff line number Diff line
@@ -4288,7 +4288,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,

		zone_pcp_init(zone);
		for_each_lru(l)
			INIT_LIST_HEAD(&zone->lru[l].list);
			INIT_LIST_HEAD(&zone->lruvec.lists[l]);
		zone->reclaim_stat.recent_rotated[0] = 0;
		zone->reclaim_stat.recent_rotated[1] = 0;
		zone->reclaim_stat.recent_scanned[0] = 0;
+5 −6
Original line number Diff line number Diff line
@@ -236,7 +236,7 @@ static void pagevec_move_tail_fn(struct page *page, void *arg)

	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
		enum lru_list lru = page_lru_base_type(page);
		list_move_tail(&page->lru, &zone->lru[lru].list);
		list_move_tail(&page->lru, &zone->lruvec.lists[lru]);
		mem_cgroup_rotate_reclaimable_page(page);
		(*pgmoved)++;
	}
@@ -480,7 +480,7 @@ static void lru_deactivate_fn(struct page *page, void *arg)
		 * The page's writeback ends up during pagevec
		 * We moves tha page into tail of inactive.
		 */
		list_move_tail(&page->lru, &zone->lru[lru].list);
		list_move_tail(&page->lru, &zone->lruvec.lists[lru]);
		mem_cgroup_rotate_reclaimable_page(page);
		__count_vm_event(PGROTATED);
	}
@@ -654,7 +654,6 @@ void lru_add_page_tail(struct zone* zone,
	int active;
	enum lru_list lru;
	const int file = 0;
	struct list_head *head;

	VM_BUG_ON(!PageHead(page));
	VM_BUG_ON(PageCompound(page_tail));
@@ -674,10 +673,10 @@ void lru_add_page_tail(struct zone* zone,
		}
		update_page_reclaim_stat(zone, page_tail, file, active);
		if (likely(PageLRU(page)))
			head = page->lru.prev;
			__add_page_to_lru_list(zone, page_tail, lru,
					       page->lru.prev);
		else
			head = &zone->lru[lru].list;
		__add_page_to_lru_list(zone, page_tail, lru, head);
			add_page_to_lru_list(zone, page_tail, lru);
	} else {
		SetPageUnevictable(page_tail);
		add_page_to_lru_list(zone, page_tail, LRU_UNEVICTABLE);
Loading