Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a2f1039c authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mm: add WasActive page flag"

parents 87af9318 a43960d3
Loading
Loading
Loading
Loading
+8 −0
Original line number Diff line number Diff line
@@ -108,6 +108,9 @@ enum pageflags {
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	PG_compound_lock,
#endif
#ifdef CONFIG_ZCACHE
	PG_was_active,
#endif
	__NR_PAGEFLAGS,

@@ -220,6 +223,11 @@ PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
	__SETPAGEFLAG(SwapBacked, swapbacked)

__PAGEFLAG(SlobFree, slob_free)
#ifdef CONFIG_ZCACHE
PAGEFLAG(WasActive, was_active)
#else
PAGEFLAG_FALSE(WasActive)
#endif

/*
 * Private page markings that may be used by the filesystem that owns the page
+3 −0
Original line number Diff line number Diff line
@@ -48,6 +48,9 @@ static const struct trace_print_flags pageflag_names[] = {
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	{1UL << PG_compound_lock,	"compound_lock"	},
#endif
#ifdef CONFIG_ZCACHE
	{1UL << PG_was_active,		"was_active"	},
#endif
};

static void dump_flags(unsigned long flags,
+11 −1
Original line number Diff line number Diff line
@@ -1556,6 +1556,7 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
	while (!list_empty(page_list)) {
		struct page *page = lru_to_page(page_list);
		int lru;
		int file;

		VM_BUG_ON_PAGE(PageLRU(page), page);
		list_del(&page->lru);
@@ -1572,8 +1573,11 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
		lru = page_lru(page);
		add_page_to_lru_list(page, lruvec, lru);

		file = is_file_lru(lru);
		if (IS_ENABLED(CONFIG_ZCACHE))
			if (file)
				SetPageWasActive(page);
		if (is_active_lru(lru)) {
			int file = is_file_lru(lru);
			int numpages = hpage_nr_pages(page);
			reclaim_stat->recent_rotated[file] += numpages;
		}
@@ -1898,6 +1902,12 @@ static void shrink_active_list(unsigned long nr_to_scan,
		}

		ClearPageActive(page);	/* we are de-activating */
		if (IS_ENABLED(CONFIG_ZCACHE))
			/*
			 * For zcache to know whether the page is from active
			 * file list
			 */
			SetPageWasActive(page);
		list_add(&page->lru, &l_inactive);
	}

+61 −6
Original line number Diff line number Diff line
@@ -65,6 +65,10 @@ static u64 zcache_pool_limit_hit;
static u64 zcache_dup_entry;
static u64 zcache_zbud_alloc_fail;
static u64 zcache_pool_pages;
static u64 zcache_evict_zpages;
static u64 zcache_evict_filepages;
static u64 zcache_inactive_pages_refused;
static u64 zcache_reclaim_fail;
static atomic_t zcache_stored_pages = ATOMIC_INIT(0);

/*
@@ -129,6 +133,7 @@ struct zcache_ra_handle {
	int rb_index;			/* Redblack tree index */
	int ra_index;			/* Radix tree index */
	int zlen;			/* Compressed page size */
	struct zcache_pool *zpool;	/* Finding zcache_pool during evict */
};

static struct kmem_cache *zcache_rbnode_cache;
@@ -492,10 +497,29 @@ static void zcache_store_page(int pool_id, struct cleancache_filekey key,

	struct zcache_pool *zpool = zcache.pools[pool_id];

	/*
	 * Zcache will be ineffective if the compressed memory pool is full with
	 * compressed inactive file pages and most of them will never be used
	 * again.
	 * So we refuse to compress pages that are not from active file list.
	 */
	if (!PageWasActive(page)) {
		zcache_inactive_pages_refused++;
		return;
	}

	if (zcache_is_full()) {
		zcache_pool_limit_hit++;
		if (zbud_reclaim_page(zpool->pool, 8)) {
			zcache_reclaim_fail++;
			return;
		}
		/*
		 * Continue if reclaimed a page frame succ.
		 */
		zcache_evict_filepages++;
		zcache_pool_pages = zbud_get_pool_size(zpool->pool);
	}

	/* compress */
	dst = get_cpu_var(zcache_dstmem);
@@ -522,6 +546,8 @@ static void zcache_store_page(int pool_id, struct cleancache_filekey key,
	zhandle->ra_index = index;
	zhandle->rb_index = key.u.ino;
	zhandle->zlen = zlen;
	zhandle->zpool = zpool;

	/* Compressed page data stored at the end of zcache_ra_handle */
	zpage = (u8 *)(zhandle + 1);
	memcpy(zpage, dst, zlen);
@@ -573,6 +599,7 @@ static int zcache_load_page(int pool_id, struct cleancache_filekey key,
	/* update stats */
	atomic_dec(&zcache_stored_pages);
	zcache_pool_pages = zbud_get_pool_size(zpool->pool);
	SetPageWasActive(page);
	return ret;
}

@@ -692,16 +719,36 @@ static void zcache_flush_fs(int pool_id)
}

/*
 * Evict pages from zcache pool on an LRU basis after the compressed pool is
 * full.
 * Evict compressed pages from zcache pool on an LRU basis after the compressed
 * pool is full.
 */
static int zcache_evict_entry(struct zbud_pool *pool, unsigned long zaddr)
static int zcache_evict_zpage(struct zbud_pool *pool, unsigned long zaddr)
{
	return -EINVAL;
	struct zcache_pool *zpool;
	struct zcache_ra_handle *zhandle;
	void *zaddr_intree;

	zhandle = (struct zcache_ra_handle *)zbud_map(pool, zaddr);

	zpool = zhandle->zpool;
	BUG_ON(!zpool);
	BUG_ON(pool != zpool->pool);

	zaddr_intree = zcache_load_delete_zaddr(zpool, zhandle->rb_index,
			zhandle->ra_index);
	if (zaddr_intree) {
		BUG_ON((unsigned long)zaddr_intree != zaddr);
		zbud_unmap(pool, zaddr);
		zbud_free(pool, zaddr);
		atomic_dec(&zcache_stored_pages);
		zcache_pool_pages = zbud_get_pool_size(pool);
		zcache_evict_zpages++;
	}
	return 0;
}

static struct zbud_ops zcache_zbud_ops = {
	.evict = zcache_evict_entry
	.evict = zcache_evict_zpage
};

/* Return pool id */
@@ -832,6 +879,14 @@ static int __init zcache_debugfs_init(void)
			&zcache_pool_pages);
	debugfs_create_atomic_t("stored_pages", S_IRUGO, zcache_debugfs_root,
			&zcache_stored_pages);
	debugfs_create_u64("evicted_zpages", S_IRUGO, zcache_debugfs_root,
			&zcache_evict_zpages);
	debugfs_create_u64("evicted_filepages", S_IRUGO, zcache_debugfs_root,
			&zcache_evict_filepages);
	debugfs_create_u64("reclaim_fail", S_IRUGO, zcache_debugfs_root,
			&zcache_reclaim_fail);
	debugfs_create_u64("inactive_pages_refused", S_IRUGO,
			zcache_debugfs_root, &zcache_inactive_pages_refused);
	return 0;
}