Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f3fd4a61 authored by Konstantin Khlebnikov's avatar Konstantin Khlebnikov Committed by Linus Torvalds
Browse files

mm: remove lru type checks from __isolate_lru_page()



After patch "mm: forbid lumpy-reclaim in shrink_active_list()" we can
completely remove anon/file and active/inactive lru type filters from
__isolate_lru_page(), because isolation for 0-order reclaim always
isolates pages from right lru list.  And pages-isolation for lumpy
shrink_inactive_list() or memory-compaction anyway allowed to isolate
pages from all evictable lru lists.

Signed-off-by: default avatarKonstantin Khlebnikov <khlebnikov@openvz.org>
Acked-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Hugh Dickins <hughd@google.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.cz>
Cc: Glauber Costa <glommer@parallels.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Minchan Kim <minchan@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 014483bc
Loading
Loading
Loading
Loading
+3 −7
Original line number Diff line number Diff line
@@ -209,16 +209,12 @@ struct lruvec {
#define LRU_ALL_EVICTABLE (LRU_ALL_FILE | LRU_ALL_ANON)
#define LRU_ALL	     ((1 << NR_LRU_LISTS) - 1)

/* Isolate inactive pages */
#define ISOLATE_INACTIVE	((__force isolate_mode_t)0x1)
/* Isolate active pages */
#define ISOLATE_ACTIVE		((__force isolate_mode_t)0x2)
/* Isolate clean file */
#define ISOLATE_CLEAN		((__force isolate_mode_t)0x4)
#define ISOLATE_CLEAN		((__force isolate_mode_t)0x1)
/* Isolate unmapped file */
#define ISOLATE_UNMAPPED	((__force isolate_mode_t)0x8)
#define ISOLATE_UNMAPPED	((__force isolate_mode_t)0x2)
/* Isolate for asynchronous migration */
#define ISOLATE_ASYNC_MIGRATE	((__force isolate_mode_t)0x10)
#define ISOLATE_ASYNC_MIGRATE	((__force isolate_mode_t)0x4)

/* LRU Isolation modes. */
typedef unsigned __bitwise__ isolate_mode_t;
+1 −1
Original line number Diff line number Diff line
@@ -251,7 +251,7 @@ static inline void lru_cache_add_file(struct page *page)
/* linux/mm/vmscan.c */
extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
					gfp_t gfp_mask, nodemask_t *mask);
extern int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file);
extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
						  gfp_t gfp_mask, bool noswap);
extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
+2 −2
Original line number Diff line number Diff line
@@ -226,7 +226,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
	unsigned long last_pageblock_nr = 0, pageblock_nr;
	unsigned long nr_scanned = 0, nr_isolated = 0;
	struct list_head *migratelist = &cc->migratepages;
	isolate_mode_t mode = ISOLATE_ACTIVE|ISOLATE_INACTIVE;
	isolate_mode_t mode = 0;

	/*
	 * Ensure that there are not too many pages isolated from the LRU
@@ -329,7 +329,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
			mode |= ISOLATE_ASYNC_MIGRATE;

		/* Try isolate the page */
		if (__isolate_lru_page(page, mode, 0) != 0)
		if (__isolate_lru_page(page, mode) != 0)
			continue;

		VM_BUG_ON(PageTransCompound(page));
+4 −19
Original line number Diff line number Diff line
@@ -949,29 +949,14 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 *
 * returns 0 on success, -ve errno on failure.
 */
int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file)
int __isolate_lru_page(struct page *page, isolate_mode_t mode)
{
	bool all_lru_mode;
	int ret = -EINVAL;

	/* Only take pages on the LRU. */
	if (!PageLRU(page))
		return ret;

	all_lru_mode = (mode & (ISOLATE_ACTIVE|ISOLATE_INACTIVE)) ==
		(ISOLATE_ACTIVE|ISOLATE_INACTIVE);

	/*
	 * When checking the active state, we need to be sure we are
	 * dealing with comparible boolean values.  Take the logical not
	 * of each.
	 */
	if (!all_lru_mode && !PageActive(page) != !(mode & ISOLATE_ACTIVE))
		return ret;

	if (!all_lru_mode && !!page_is_file_cache(page) != file)
		return ret;

	/* Do not give back unevictable pages for compaction */
	if (PageUnevictable(page))
		return ret;
@@ -1070,7 +1055,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,

		VM_BUG_ON(!PageLRU(page));

		switch (__isolate_lru_page(page, mode, file)) {
		switch (__isolate_lru_page(page, mode)) {
		case 0:
			mem_cgroup_lru_del(page);
			list_move(&page->lru, dst);
@@ -1282,7 +1267,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
	unsigned long nr_file;
	unsigned long nr_dirty = 0;
	unsigned long nr_writeback = 0;
	isolate_mode_t isolate_mode = ISOLATE_INACTIVE;
	isolate_mode_t isolate_mode = 0;
	int file = is_file_lru(lru);
	struct zone *zone = mz->zone;
	struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
@@ -1452,7 +1437,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
	struct page *page;
	struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
	unsigned long nr_rotated = 0;
	isolate_mode_t isolate_mode = ISOLATE_ACTIVE;
	isolate_mode_t isolate_mode = 0;
	int file = is_file_lru(lru);
	struct zone *zone = mz->zone;