Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 53cbdaaf authored by Joonsoo Kim's avatar Joonsoo Kim Committed by Gerrit - the friendly Code Review server
Browse files

zram: compare all the entries with same checksum for deduplication



Until now, we compare just one entry with same checksum when
checking duplication since it is the simplest way to implement.
However, for the completeness, checking all the entries is better
so this patch implement to compare all the entries with same checksum.
Since this event would be rare so there would be no performance loss.

Change-Id: Ie7d61c14d127a28f5a06d85b0ca66b9fada20cbb
Reviewed-by: default avatarSergey Senozhatsky <sergey.senozhatsky@gmail.com>
Acked-by: default avatarMinchan Kim <minchan@kernel.org>
Signed-off-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Link: https://lore.kernel.org/patchwork/patch/787163/


Patch-mainline: linux-kernel@ Thu, 11 May 2017 22:30:29
Signed-off-by: default avatarCharan Teja Reddy <charante@codeaurora.org>
parent 52cf034a
Loading
Loading
Loading
Loading
+47 −12
Original line number Original line Diff line number Diff line
@@ -109,32 +109,67 @@ static unsigned long zram_dedup_put(struct zram *zram,
	return entry->refcount;
	return entry->refcount;
}
}


static struct zram_entry *zram_dedup_get(struct zram *zram,
static struct zram_entry *__zram_dedup_get(struct zram *zram,
				unsigned char *mem, u32 checksum)
				struct zram_hash *hash, unsigned char *mem,
				struct zram_entry *entry)
{
{
	struct zram_hash *hash;
	struct zram_entry *tmp, *prev = NULL;
	struct zram_entry *entry;
	struct rb_node *rb_node;
	struct rb_node *rb_node;


	hash = &zram->hash[checksum % zram->hash_size];
	/* find left-most entry with same checksum */
	while ((rb_node = rb_prev(&entry->rb_node))) {
		tmp = rb_entry(rb_node, struct zram_entry, rb_node);
		if (tmp->checksum != entry->checksum)
			break;


	spin_lock(&hash->lock);
		entry = tmp;
	rb_node = hash->rb_root.rb_node;
	}
	while (rb_node) {

		entry = rb_entry(rb_node, struct zram_entry, rb_node);
again:
		if (checksum == entry->checksum) {
	entry->refcount++;
	entry->refcount++;
	atomic64_add(entry->len, &zram->stats.dup_data_size);
	atomic64_add(entry->len, &zram->stats.dup_data_size);
	spin_unlock(&hash->lock);
	spin_unlock(&hash->lock);


	if (prev)
		zram_entry_free(zram, prev);

	if (zram_dedup_match(zram, entry, mem))
	if (zram_dedup_match(zram, entry, mem))
		return entry;
		return entry;


	spin_lock(&hash->lock);
	tmp = NULL;
	rb_node = rb_next(&entry->rb_node);
	if (rb_node)
		tmp = rb_entry(rb_node, struct zram_entry, rb_node);

	if (tmp && (tmp->checksum == entry->checksum)) {
		prev = entry;
		entry = tmp;
		goto again;
	}

	spin_unlock(&hash->lock);
	zram_entry_free(zram, entry);
	zram_entry_free(zram, entry);


	return NULL;
	return NULL;
}
}


static struct zram_entry *zram_dedup_get(struct zram *zram,
				unsigned char *mem, u32 checksum)
{
	struct zram_hash *hash;
	struct zram_entry *entry;
	struct rb_node *rb_node;

	hash = &zram->hash[checksum % zram->hash_size];

	spin_lock(&hash->lock);
	rb_node = hash->rb_root.rb_node;
	while (rb_node) {
		entry = rb_entry(rb_node, struct zram_entry, rb_node);
		if (checksum == entry->checksum)
			return __zram_dedup_get(zram, hash, mem, entry);

		if (checksum < entry->checksum)
		if (checksum < entry->checksum)
			rb_node = rb_node->rb_left;
			rb_node = rb_node->rb_left;
		else
		else