Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a43960d3 authored by Bob Liu's avatar Bob Liu Committed by Gerrit - the friendly Code Review server
Browse files

mm: add WasActive page flag



Zcache could be ineffective if the compressed memory pool is full with
compressed inactive file pages and most of them will be never used again.

So we pick up pages from active file list only, those pages would probably
be accessed again. Compress them in memory can reduce the latency
significantly compared with rereading from disk.

When a file page is shrunk from active file list to inactive file list,
PageActive flag is also cleared.
So adding an extra WasActive page flag for zcache to know whether the
file page was shrunk from the active list.

Change-Id: Ida1f4db17075d1f6f825ef7ce2b3bae4eb799e3f
Signed-off-by: default avatarBob Liu <bob.liu@oracle.com>
Patch-mainline: linux-mm @ 2013-08-06 11:36:17
[vinmenon@codeaurora.org: trivial merge conflict fixes, checkpatch fixes,
fix the definitions of was_active page flag so that it does not create
compile time errors with CONFIG_CLEANCACHE disabled. Also remove the
unnecessary use of PG_was_active in PAGE_FLAGS_CHECK_AT_PREP. Since
was_active is a requirement for zcache, make the definitions dependent on
CONFIG_ZCACHE rather than CONFIG_CLEANCACHE.]
Signed-off-by: default avatarVinayak Menon <vinmenon@codeaurora.org>
parent 2cbb7a3a
Loading
Loading
Loading
Loading
+8 −0
Original line number Diff line number Diff line
@@ -108,6 +108,9 @@ enum pageflags {
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	PG_compound_lock,
#endif
#ifdef CONFIG_ZCACHE
	PG_was_active,
#endif
	__NR_PAGEFLAGS,

@@ -220,6 +223,11 @@ PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
	__SETPAGEFLAG(SwapBacked, swapbacked)

__PAGEFLAG(SlobFree, slob_free)
#ifdef CONFIG_ZCACHE
PAGEFLAG(WasActive, was_active)
#else
PAGEFLAG_FALSE(WasActive)
#endif

/*
 * Private page markings that may be used by the filesystem that owns the page
+3 −0
Original line number Diff line number Diff line
@@ -48,6 +48,9 @@ static const struct trace_print_flags pageflag_names[] = {
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	{1UL << PG_compound_lock,	"compound_lock"	},
#endif
#ifdef CONFIG_ZCACHE
	{1UL << PG_was_active,		"was_active"	},
#endif
};

static void dump_flags(unsigned long flags,
+11 −1
Original line number Diff line number Diff line
@@ -1556,6 +1556,7 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
	while (!list_empty(page_list)) {
		struct page *page = lru_to_page(page_list);
		int lru;
		int file;

		VM_BUG_ON_PAGE(PageLRU(page), page);
		list_del(&page->lru);
@@ -1572,8 +1573,11 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
		lru = page_lru(page);
		add_page_to_lru_list(page, lruvec, lru);

		file = is_file_lru(lru);
		if (IS_ENABLED(CONFIG_ZCACHE))
			if (file)
				SetPageWasActive(page);
		if (is_active_lru(lru)) {
			int file = is_file_lru(lru);
			int numpages = hpage_nr_pages(page);
			reclaim_stat->recent_rotated[file] += numpages;
		}
@@ -1898,6 +1902,12 @@ static void shrink_active_list(unsigned long nr_to_scan,
		}

		ClearPageActive(page);	/* we are de-activating */
		if (IS_ENABLED(CONFIG_ZCACHE))
			/*
			 * For zcache to know whether the page is from active
			 * file list
			 */
			SetPageWasActive(page);
		list_add(&page->lru, &l_inactive);
	}

+15 −0
Original line number Diff line number Diff line
@@ -67,6 +67,7 @@ static u64 zcache_zbud_alloc_fail;
static u64 zcache_pool_pages;
static u64 zcache_evict_zpages;
static u64 zcache_evict_filepages;
static u64 zcache_inactive_pages_refused;
static u64 zcache_reclaim_fail;
static atomic_t zcache_stored_pages = ATOMIC_INIT(0);

@@ -496,6 +497,17 @@ static void zcache_store_page(int pool_id, struct cleancache_filekey key,

	struct zcache_pool *zpool = zcache.pools[pool_id];

	/*
	 * Zcache will be ineffective if the compressed memory pool is full with
	 * compressed inactive file pages and most of them will never be used
	 * again.
	 * So we refuse to compress pages that are not from active file list.
	 */
	if (!PageWasActive(page)) {
		zcache_inactive_pages_refused++;
		return;
	}

	if (zcache_is_full()) {
		zcache_pool_limit_hit++;
		if (zbud_reclaim_page(zpool->pool, 8)) {
@@ -587,6 +599,7 @@ static int zcache_load_page(int pool_id, struct cleancache_filekey key,
	/* update stats */
	atomic_dec(&zcache_stored_pages);
	zcache_pool_pages = zbud_get_pool_size(zpool->pool);
	SetPageWasActive(page);
	return ret;
}

@@ -872,6 +885,8 @@ static int __init zcache_debugfs_init(void)
			&zcache_evict_filepages);
	debugfs_create_u64("reclaim_fail", S_IRUGO, zcache_debugfs_root,
			&zcache_reclaim_fail);
	debugfs_create_u64("inactive_pages_refused", S_IRUGO,
			zcache_debugfs_root, &zcache_inactive_pages_refused);
	return 0;
}