Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2cbb7a3a authored by Bob Liu's avatar Bob Liu Committed by Gerrit - the friendly Code Review server
Browse files

mm: zcache: add evict zpages supporting



Implemented zbud_ops->evict, so that compressed zpages can be evicted from
zbud memory pool in the case that the compressed pool is full.

zbud already managered the compressed pool based on LRU. The evict was
implemented just by dropping the compressed file page data directly, if
the data is required again then no more disk reading can be saved.

Signed-off-by: default avatarBob Liu <bob.liu@oracle.com>
Patch-mainline: linux-mm @ 2013-08-06 11:36:16
[vinmenon@codeaurora.org: trivial merge conflict fixes]
Signed-off-by: default avatarVinayak Menon <vinmenon@codeaurora.org>
Change-Id: Ia66652475e490f0233547511e80abf7587054e65
parent 2edfdf13
Loading
Loading
Loading
Loading
+46 −6
Original line number Diff line number Diff line
@@ -65,6 +65,9 @@ static u64 zcache_pool_limit_hit;
static u64 zcache_dup_entry;
static u64 zcache_zbud_alloc_fail;
static u64 zcache_pool_pages;
static u64 zcache_evict_zpages;
static u64 zcache_evict_filepages;
static u64 zcache_reclaim_fail;
static atomic_t zcache_stored_pages = ATOMIC_INIT(0);

/*
@@ -129,6 +132,7 @@ struct zcache_ra_handle {
	int rb_index;			/* Redblack tree index */
	int ra_index;			/* Radix tree index */
	int zlen;			/* Compressed page size */
	struct zcache_pool *zpool;	/* Finding zcache_pool during evict */
};

static struct kmem_cache *zcache_rbnode_cache;
@@ -494,8 +498,16 @@ static void zcache_store_page(int pool_id, struct cleancache_filekey key,

	if (zcache_is_full()) {
		zcache_pool_limit_hit++;
		if (zbud_reclaim_page(zpool->pool, 8)) {
			zcache_reclaim_fail++;
			return;
		}
		/*
		 * Continue if reclaimed a page frame succ.
		 */
		zcache_evict_filepages++;
		zcache_pool_pages = zbud_get_pool_size(zpool->pool);
	}

	/* compress */
	dst = get_cpu_var(zcache_dstmem);
@@ -522,6 +534,8 @@ static void zcache_store_page(int pool_id, struct cleancache_filekey key,
	zhandle->ra_index = index;
	zhandle->rb_index = key.u.ino;
	zhandle->zlen = zlen;
	zhandle->zpool = zpool;

	/* Compressed page data stored at the end of zcache_ra_handle */
	zpage = (u8 *)(zhandle + 1);
	memcpy(zpage, dst, zlen);
@@ -692,16 +706,36 @@ static void zcache_flush_fs(int pool_id)
}

/*
 * Evict pages from zcache pool on an LRU basis after the compressed pool is
 * full.
 * Evict compressed pages from zcache pool on an LRU basis after the compressed
 * pool is full.
 */
static int zcache_evict_entry(struct zbud_pool *pool, unsigned long zaddr)
static int zcache_evict_zpage(struct zbud_pool *pool, unsigned long zaddr)
{
	return -EINVAL;
	struct zcache_pool *zpool;
	struct zcache_ra_handle *zhandle;
	void *zaddr_intree;

	zhandle = (struct zcache_ra_handle *)zbud_map(pool, zaddr);

	zpool = zhandle->zpool;
	BUG_ON(!zpool);
	BUG_ON(pool != zpool->pool);

	zaddr_intree = zcache_load_delete_zaddr(zpool, zhandle->rb_index,
			zhandle->ra_index);
	if (zaddr_intree) {
		BUG_ON((unsigned long)zaddr_intree != zaddr);
		zbud_unmap(pool, zaddr);
		zbud_free(pool, zaddr);
		atomic_dec(&zcache_stored_pages);
		zcache_pool_pages = zbud_get_pool_size(pool);
		zcache_evict_zpages++;
	}
	return 0;
}

static struct zbud_ops zcache_zbud_ops = {
	.evict = zcache_evict_entry
	.evict = zcache_evict_zpage
};

/* Return pool id */
@@ -832,6 +866,12 @@ static int __init zcache_debugfs_init(void)
			&zcache_pool_pages);
	debugfs_create_atomic_t("stored_pages", S_IRUGO, zcache_debugfs_root,
			&zcache_stored_pages);
	debugfs_create_u64("evicted_zpages", S_IRUGO, zcache_debugfs_root,
			&zcache_evict_zpages);
	debugfs_create_u64("evicted_filepages", S_IRUGO, zcache_debugfs_root,
			&zcache_evict_filepages);
	debugfs_create_u64("reclaim_fail", S_IRUGO, zcache_debugfs_root,
			&zcache_reclaim_fail);
	return 0;
}