Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 074291fe authored by Konstantin Khlebnikov's avatar Konstantin Khlebnikov Committed by Linus Torvalds
Browse files

mm/vmscan: replace zone_nr_lru_pages() with get_lruvec_size()



If memory cgroup is enabled we always use lruvecs which are embedded into
struct mem_cgroup_per_zone, so we can reach lru_size counters via
container_of().

Signed-off-by: default avatarKonstantin Khlebnikov <khlebnikov@openvz.org>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: default avatarHugh Dickins <hughd@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 27ac81d8
Loading
Loading
Loading
Loading
+2 −4
Original line number Diff line number Diff line
@@ -123,8 +123,7 @@ int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg,
int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg,
				    struct zone *zone);
int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg,
					int nid, int zid, unsigned int lrumask);
unsigned long mem_cgroup_get_lruvec_size(struct lruvec *lruvec, enum lru_list);
struct zone_reclaim_stat*
mem_cgroup_get_reclaim_stat_from_page(struct page *page);
extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
@@ -343,8 +342,7 @@ mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, struct zone *zone)
}

static inline unsigned long
mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
				unsigned int lru_mask)
mem_cgroup_get_lruvec_size(struct lruvec *lruvec, enum lru_list lru)
{
	return 0;
}
+9 −0
Original line number Diff line number Diff line
@@ -723,6 +723,15 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
}

unsigned long
mem_cgroup_get_lruvec_size(struct lruvec *lruvec, enum lru_list lru)
{
	struct mem_cgroup_per_zone *mz;

	mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
	return mz->lru_size[lru];
}

static unsigned long
mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
			unsigned int lru_mask)
{
+16 −15
Original line number Diff line number Diff line
@@ -155,19 +155,14 @@ static struct zone_reclaim_stat *get_reclaim_stat(struct mem_cgroup_zone *mz)
	return &mem_cgroup_zone_lruvec(mz->zone, mz->mem_cgroup)->reclaim_stat;
}

static unsigned long zone_nr_lru_pages(struct mem_cgroup_zone *mz,
				       enum lru_list lru)
static unsigned long get_lruvec_size(struct lruvec *lruvec, enum lru_list lru)
{
	if (!mem_cgroup_disabled())
		return mem_cgroup_zone_nr_lru_pages(mz->mem_cgroup,
						    zone_to_nid(mz->zone),
						    zone_idx(mz->zone),
						    BIT(lru));
		return mem_cgroup_get_lruvec_size(lruvec, lru);

	return zone_page_state(mz->zone, NR_LRU_BASE + lru);
	return zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru);
}


/*
 * Add a shrinker callback to be called from the vm
 */
@@ -1603,6 +1598,9 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
	enum lru_list lru;
	int noswap = 0;
	bool force_scan = false;
	struct lruvec *lruvec;

	lruvec = mem_cgroup_zone_lruvec(mz->zone, mz->mem_cgroup);

	/*
	 * If the zone or memcg is small, nr[l] can be 0.  This
@@ -1628,10 +1626,10 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
		goto out;
	}

	anon  = zone_nr_lru_pages(mz, LRU_ACTIVE_ANON) +
		zone_nr_lru_pages(mz, LRU_INACTIVE_ANON);
	file  = zone_nr_lru_pages(mz, LRU_ACTIVE_FILE) +
		zone_nr_lru_pages(mz, LRU_INACTIVE_FILE);
	anon  = get_lruvec_size(lruvec, LRU_ACTIVE_ANON) +
		get_lruvec_size(lruvec, LRU_INACTIVE_ANON);
	file  = get_lruvec_size(lruvec, LRU_ACTIVE_FILE) +
		get_lruvec_size(lruvec, LRU_INACTIVE_FILE);

	if (global_reclaim(sc)) {
		free  = zone_page_state(mz->zone, NR_FREE_PAGES);
@@ -1694,7 +1692,7 @@ out:
		int file = is_file_lru(lru);
		unsigned long scan;

		scan = zone_nr_lru_pages(mz, lru);
		scan = get_lruvec_size(lruvec, lru);
		if (sc->priority || noswap || !vmscan_swappiness(sc)) {
			scan >>= sc->priority;
			if (!scan && force_scan)
@@ -1730,6 +1728,7 @@ static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz,
{
	unsigned long pages_for_compaction;
	unsigned long inactive_lru_pages;
	struct lruvec *lruvec;

	/* If not in reclaim/compaction mode, stop */
	if (!in_reclaim_compaction(sc))
@@ -1762,10 +1761,12 @@ static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz,
	 * If we have not reclaimed enough pages for compaction and the
	 * inactive lists are large enough, continue reclaiming
	 */
	lruvec = mem_cgroup_zone_lruvec(mz->zone, mz->mem_cgroup);
	pages_for_compaction = (2UL << sc->order);
	inactive_lru_pages = zone_nr_lru_pages(mz, LRU_INACTIVE_FILE);
	inactive_lru_pages = get_lruvec_size(lruvec, LRU_INACTIVE_FILE);
	if (nr_swap_pages > 0)
		inactive_lru_pages += zone_nr_lru_pages(mz, LRU_INACTIVE_ANON);
		inactive_lru_pages += get_lruvec_size(lruvec,
						      LRU_INACTIVE_ANON);
	if (sc->nr_reclaimed < pages_for_compaction &&
			inactive_lru_pages > pages_for_compaction)
		return true;