Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c97542e1 authored by qctecmdr's avatar qctecmdr Committed by Gerrit - the friendly Code Review server
Browse files

Merge "zram: implement deduplication in zram"

parents f7f4e508 affb4215
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -223,6 +223,8 @@ line of text and contains the following stats separated by whitespace:
                  No memory is allocated for such pages.
 pages_compacted  the number of pages freed during compaction
 huge_pages	  the number of incompressible pages
 dup_data_size	  deduplicated data size
 meta_data_size	  the amount of metadata allocated for deduplication feature

File /sys/block/zram<id>/bd_stat

+1 −1
Original line number Diff line number Diff line
zram-y	:=	zcomp.o zram_drv.o
zram-y	:=	zcomp.o zram_drv.o zram_dedup.o

obj-$(CONFIG_ZRAM)	+=	zram.o
+204 −0
Original line number Diff line number Diff line
/*
 * Copyright (C) 2017 Joonsoo Kim.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

#include <linux/vmalloc.h>
#include <linux/jhash.h>
#include <linux/highmem.h>

#include "zram_drv.h"

/* One slot will contain 128 pages theoretically */
#define ZRAM_HASH_SHIFT		7
#define ZRAM_HASH_SIZE_MIN	(1 << 10)
#define ZRAM_HASH_SIZE_MAX	(1 << 31)

u64 zram_dedup_dup_size(struct zram *zram)
{
	return (u64)atomic64_read(&zram->stats.dup_data_size);
}

u64 zram_dedup_meta_size(struct zram *zram)
{
	return (u64)atomic64_read(&zram->stats.meta_data_size);
}

static u32 zram_dedup_checksum(unsigned char *mem)
{
	return jhash(mem, PAGE_SIZE, 0);
}

void zram_dedup_insert(struct zram *zram, struct zram_entry *new,
				u32 checksum)
{
	struct zram_hash *hash;
	struct rb_root *rb_root;
	struct rb_node **rb_node, *parent = NULL;
	struct zram_entry *entry;

	new->checksum = checksum;
	hash = &zram->hash[checksum % zram->hash_size];
	rb_root = &hash->rb_root;

	spin_lock(&hash->lock);
	rb_node = &rb_root->rb_node;
	while (*rb_node) {
		parent = *rb_node;
		entry = rb_entry(parent, struct zram_entry, rb_node);
		if (checksum < entry->checksum)
			rb_node = &parent->rb_left;
		else if (checksum > entry->checksum)
			rb_node = &parent->rb_right;
		else
			rb_node = &parent->rb_left;
	}

	rb_link_node(&new->rb_node, parent, rb_node);
	rb_insert_color(&new->rb_node, rb_root);
	spin_unlock(&hash->lock);
}

static bool zram_dedup_match(struct zram *zram, struct zram_entry *entry,
				unsigned char *mem)
{
	bool match = false;
	unsigned char *cmem;
	struct zcomp_strm *zstrm;

	cmem = zs_map_object(zram->mem_pool, entry->handle, ZS_MM_RO);
	if (entry->len == PAGE_SIZE) {
		match = !memcmp(mem, cmem, PAGE_SIZE);
	} else {
		zstrm = zcomp_stream_get(zram->comp);
		if (!zcomp_decompress(zstrm, cmem, entry->len, zstrm->buffer))
			match = !memcmp(mem, zstrm->buffer, PAGE_SIZE);
		zcomp_stream_put(zram->comp);
	}
	zs_unmap_object(zram->mem_pool, entry->handle);

	return match;
}

static unsigned long zram_dedup_put(struct zram *zram,
				struct zram_entry *entry)
{
	struct zram_hash *hash;
	u32 checksum;

	checksum = entry->checksum;
	hash = &zram->hash[checksum % zram->hash_size];

	spin_lock(&hash->lock);

	entry->refcount--;
	if (!entry->refcount)
		rb_erase(&entry->rb_node, &hash->rb_root);
	else
		atomic64_sub(entry->len, &zram->stats.dup_data_size);

	spin_unlock(&hash->lock);

	return entry->refcount;
}

static struct zram_entry *zram_dedup_get(struct zram *zram,
				unsigned char *mem, u32 checksum)
{
	struct zram_hash *hash;
	struct zram_entry *entry;
	struct rb_node *rb_node;

	hash = &zram->hash[checksum % zram->hash_size];

	spin_lock(&hash->lock);
	rb_node = hash->rb_root.rb_node;
	while (rb_node) {
		entry = rb_entry(rb_node, struct zram_entry, rb_node);
		if (checksum == entry->checksum) {
			entry->refcount++;
			atomic64_add(entry->len, &zram->stats.dup_data_size);
			spin_unlock(&hash->lock);

			if (zram_dedup_match(zram, entry, mem))
				return entry;

			zram_entry_free(zram, entry);

			return NULL;
		}

		if (checksum < entry->checksum)
			rb_node = rb_node->rb_left;
		else
			rb_node = rb_node->rb_right;
	}
	spin_unlock(&hash->lock);

	return NULL;
}

struct zram_entry *zram_dedup_find(struct zram *zram, struct page *page,
				u32 *checksum)
{
	void *mem;
	struct zram_entry *entry;

	mem = kmap_atomic(page);
	*checksum = zram_dedup_checksum(mem);

	entry = zram_dedup_get(zram, mem, *checksum);
	kunmap_atomic(mem);

	return entry;
}

void zram_dedup_init_entry(struct zram *zram, struct zram_entry *entry,
				unsigned long handle, unsigned int len)
{
	entry->handle = handle;
	entry->refcount = 1;
	entry->len = len;
}

bool zram_dedup_put_entry(struct zram *zram, struct zram_entry *entry)
{
	if (zram_dedup_put(zram, entry))
		return false;

	return true;
}

int zram_dedup_init(struct zram *zram, size_t num_pages)
{
	int i;
	struct zram_hash *hash;

	zram->hash_size = num_pages >> ZRAM_HASH_SHIFT;
	zram->hash_size = min_t(size_t, ZRAM_HASH_SIZE_MAX, zram->hash_size);
	zram->hash_size = max_t(size_t, ZRAM_HASH_SIZE_MIN, zram->hash_size);
	zram->hash = vzalloc(zram->hash_size * sizeof(struct zram_hash));
	if (!zram->hash) {
		pr_err("Error allocating zram entry hash\n");
		return -ENOMEM;
	}

	for (i = 0; i < zram->hash_size; i++) {
		hash = &zram->hash[i];
		spin_lock_init(&hash->lock);
		hash->rb_root = RB_ROOT;
	}

	return 0;
}

void zram_dedup_fini(struct zram *zram)
{
	vfree(zram->hash);
	zram->hash = NULL;
	zram->hash_size = 0;
}
+22 −0
Original line number Diff line number Diff line
#ifndef _ZRAM_DEDUP_H_
#define _ZRAM_DEDUP_H_

struct zram;
struct zram_entry;

u64 zram_dedup_dup_size(struct zram *zram);
u64 zram_dedup_meta_size(struct zram *zram);

void zram_dedup_insert(struct zram *zram, struct zram_entry *new,
				u32 checksum);
struct zram_entry *zram_dedup_find(struct zram *zram, struct page *page,
				u32 *checksum);

void zram_dedup_init_entry(struct zram *zram, struct zram_entry *entry,
				unsigned long handle, unsigned int len);
bool zram_dedup_put_entry(struct zram *zram, struct zram_entry *entry);

int zram_dedup_init(struct zram *zram, size_t num_pages);
void zram_dedup_fini(struct zram *zram);

#endif /* _ZRAM_DEDUP_H_ */
+32 −6
Original line number Diff line number Diff line
@@ -1086,7 +1086,7 @@ static ssize_t mm_stat_show(struct device *dev,
	max_used = atomic_long_read(&zram->stats.max_used_pages);

	ret = scnprintf(buf, PAGE_SIZE,
			"%8llu %8llu %8llu %8lu %8ld %8llu %8lu %8llu\n",
			"%8llu %8llu %8llu %8lu %8ld %8llu %8lu %8llu %8llu %8llu\n",
			orig_size << PAGE_SHIFT,
			(u64)atomic64_read(&zram->stats.compr_data_size),
			mem_used << PAGE_SHIFT,
@@ -1094,7 +1094,9 @@ static ssize_t mm_stat_show(struct device *dev,
			max_used << PAGE_SHIFT,
			(u64)atomic64_read(&zram->stats.same_pages),
			pool_stats.pages_compacted,
			(u64)atomic64_read(&zram->stats.huge_pages));
			(u64)atomic64_read(&zram->stats.huge_pages),
			zram_dedup_dup_size(zram),
			zram_dedup_meta_size(zram));
	up_read(&zram->init_lock);

	return ret;
@@ -1149,26 +1151,34 @@ static struct zram_entry *zram_entry_alloc(struct zram *zram,
					   unsigned int len, gfp_t flags)
{
	struct zram_entry *entry;
	unsigned long handle;

	entry = kzalloc(sizeof(*entry),
			flags & ~(__GFP_HIGHMEM|__GFP_MOVABLE|__GFP_CMA));
	if (!entry)
		return NULL;

	entry->handle = zs_malloc(zram->mem_pool, len, flags);
	if (!entry->handle) {
	handle = zs_malloc(zram->mem_pool, len, flags);
	if (!handle) {
		kfree(entry);
		return NULL;
	}

	zram_dedup_init_entry(zram, entry, handle, len);
	atomic64_add(sizeof(*entry), &zram->stats.meta_data_size);

	return entry;
}

static inline void zram_entry_free(struct zram *zram,
				   struct zram_entry *entry)
void zram_entry_free(struct zram *zram, struct zram_entry *entry)
{
	if (!zram_dedup_put_entry(zram, entry))
		return;

	zs_free(zram->mem_pool, entry->handle);
	kfree(entry);

	atomic64_sub(sizeof(*entry), &zram->stats.meta_data_size);
}

static void zram_meta_free(struct zram *zram, u64 disksize)
@@ -1181,6 +1191,7 @@ static void zram_meta_free(struct zram *zram, u64 disksize)
		zram_free_page(zram, index);

	zs_destroy_pool(zram->mem_pool);
	zram_dedup_fini(zram);
	vfree(zram->table);
}

@@ -1201,6 +1212,13 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize)

	if (!huge_class_size)
		huge_class_size = zs_huge_class_size(zram->mem_pool);

	if (zram_dedup_init(zram, num_pages)) {
		vfree(zram->table);
		zs_destroy_pool(zram->mem_pool);
		return false;
	}

	return true;
}

@@ -1360,6 +1378,7 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
	void *src, *dst, *mem;
	struct zcomp_strm *zstrm;
	struct page *page = bvec->bv_page;
	u32 checksum;
	unsigned long element = 0;
	enum zram_pageflags flags = 0;

@@ -1373,6 +1392,12 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
	}
	kunmap_atomic(mem);

	entry = zram_dedup_find(zram, page, &checksum);
	if (entry) {
		comp_len = entry->len;
		goto out;
	}

compress_again:
	zstrm = zcomp_stream_get(zram->comp);
	src = kmap_atomic(page);
@@ -1441,6 +1466,7 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
	zcomp_stream_put(zram->comp);
	zs_unmap_object(zram->mem_pool, entry->handle);
	atomic64_add(comp_len, &zram->stats.compr_data_size);
	zram_dedup_insert(zram, entry, checksum);
out:
	/*
	 * Free memory associated with this sector
Loading