Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8c921b2b authored by Jerome Marchand's avatar Jerome Marchand Committed by Greg Kroah-Hartman
Browse files

Staging: zram: Refactor zram_read/write() functions



This patch refactor the code of zram_read/write() functions. It does
not removes a lot of duplicate code alone, but is mostly a helper for
the third patch of this series (Staging: zram: allow partial page
operations).

Signed-off-by: default avatarJerome Marchand <jmarchan@redhat.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent 6a587e83
Loading
Loading
Loading
Loading
+155 −160
Original line number Original line Diff line number Diff line
@@ -203,17 +203,9 @@ static void handle_uncompressed_page(struct zram *zram,
	flush_dcache_page(page);
	flush_dcache_page(page);
}
}


static void zram_read(struct zram *zram, struct bio *bio)
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
			  u32 index, struct bio *bio)
{
{

	int i;
	u32 index;
	struct bio_vec *bvec;

	zram_stat64_inc(zram, &zram->stats.num_reads);
	index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;

	bio_for_each_segment(bvec, bio, i) {
	int ret;
	int ret;
	size_t clen;
	size_t clen;
	struct page *page;
	struct page *page;
@@ -224,8 +216,7 @@ static void zram_read(struct zram *zram, struct bio *bio)


	if (zram_test_flag(zram, index, ZRAM_ZERO)) {
	if (zram_test_flag(zram, index, ZRAM_ZERO)) {
		handle_zero_page(page);
		handle_zero_page(page);
			index++;
		return 0;
			continue;
	}
	}


	/* Requested page is not present in compressed area */
	/* Requested page is not present in compressed area */
@@ -233,15 +224,13 @@ static void zram_read(struct zram *zram, struct bio *bio)
		pr_debug("Read before write: sector=%lu, size=%u",
		pr_debug("Read before write: sector=%lu, size=%u",
			 (ulong)(bio->bi_sector), bio->bi_size);
			 (ulong)(bio->bi_sector), bio->bi_size);
		handle_zero_page(page);
		handle_zero_page(page);
			index++;
		return 0;
			continue;
	}
	}


	/* Page is stored uncompressed since it's incompressible */
	/* Page is stored uncompressed since it's incompressible */
	if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
	if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
		handle_uncompressed_page(zram, page, index);
		handle_uncompressed_page(zram, page, index);
			index++;
		return 0;
			continue;
	}
	}


	user_mem = kmap_atomic(page, KM_USER0);
	user_mem = kmap_atomic(page, KM_USER0);
@@ -250,8 +239,7 @@ static void zram_read(struct zram *zram, struct bio *bio)
	cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
	cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
		zram->table[index].offset;
		zram->table[index].offset;


		ret = lzo1x_decompress_safe(
	ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
			cmem + sizeof(*zheader),
				    xv_get_object_size(cmem) - sizeof(*zheader),
				    xv_get_object_size(cmem) - sizeof(*zheader),
				    user_mem, &clen);
				    user_mem, &clen);


@@ -260,34 +248,18 @@ static void zram_read(struct zram *zram, struct bio *bio)


	/* Should NEVER happen. Return bio error if it does. */
	/* Should NEVER happen. Return bio error if it does. */
	if (unlikely(ret != LZO_E_OK)) {
	if (unlikely(ret != LZO_E_OK)) {
			pr_err("Decompression failed! err=%d, page=%u\n",
		pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
				ret, index);
		zram_stat64_inc(zram, &zram->stats.failed_reads);
		zram_stat64_inc(zram, &zram->stats.failed_reads);
			goto out;
		return ret;
	}
	}


	flush_dcache_page(page);
	flush_dcache_page(page);
		index++;
	}

	set_bit(BIO_UPTODATE, &bio->bi_flags);
	bio_endio(bio, 0);
	return;


out:
	return 0;
	bio_io_error(bio);
}
}


static void zram_write(struct zram *zram, struct bio *bio)
static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index)
{
{
	int i;
	u32 index;
	struct bio_vec *bvec;

	zram_stat64_inc(zram, &zram->stats.num_writes);
	index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;

	bio_for_each_segment(bvec, bio, i) {
	int ret;
	int ret;
	u32 offset;
	u32 offset;
	size_t clen;
	size_t clen;
@@ -314,8 +286,7 @@ static void zram_write(struct zram *zram, struct bio *bio)
		mutex_unlock(&zram->lock);
		mutex_unlock(&zram->lock);
		zram_stat_inc(&zram->stats.pages_zero);
		zram_stat_inc(&zram->stats.pages_zero);
		zram_set_flag(zram, index, ZRAM_ZERO);
		zram_set_flag(zram, index, ZRAM_ZERO);
			index++;
		return 0;
			continue;
	}
	}


	ret = lzo1x_1_compress(user_mem, PAGE_SIZE, src, &clen,
	ret = lzo1x_1_compress(user_mem, PAGE_SIZE, src, &clen,
@@ -327,7 +298,7 @@ static void zram_write(struct zram *zram, struct bio *bio)
		mutex_unlock(&zram->lock);
		mutex_unlock(&zram->lock);
		pr_err("Compression failed! err=%d\n", ret);
		pr_err("Compression failed! err=%d\n", ret);
		zram_stat64_inc(zram, &zram->stats.failed_writes);
		zram_stat64_inc(zram, &zram->stats.failed_writes);
			goto out;
		return ret;
	}
	}


	/*
	/*
@@ -342,9 +313,8 @@ static void zram_write(struct zram *zram, struct bio *bio)
			mutex_unlock(&zram->lock);
			mutex_unlock(&zram->lock);
			pr_info("Error allocating memory for "
			pr_info("Error allocating memory for "
				"incompressible page: %u\n", index);
				"incompressible page: %u\n", index);
				zram_stat64_inc(zram,
			zram_stat64_inc(zram, &zram->stats.failed_writes);
					&zram->stats.failed_writes);
				return -ENOMEM;
				goto out;
			}
			}


		offset = 0;
		offset = 0;
@@ -362,7 +332,7 @@ static void zram_write(struct zram *zram, struct bio *bio)
		pr_info("Error allocating memory for compressed "
		pr_info("Error allocating memory for compressed "
			"page: %u, size=%zu\n", index, clen);
			"page: %u, size=%zu\n", index, clen);
		zram_stat64_inc(zram, &zram->stats.failed_writes);
		zram_stat64_inc(zram, &zram->stats.failed_writes);
			goto out;
		return -ENOMEM;
	}
	}


memstore:
memstore:
@@ -393,6 +363,39 @@ static void zram_write(struct zram *zram, struct bio *bio)
		zram_stat_inc(&zram->stats.good_compress);
		zram_stat_inc(&zram->stats.good_compress);


	mutex_unlock(&zram->lock);
	mutex_unlock(&zram->lock);

	return 0;
}

static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
			struct bio *bio, int rw)
{
	if (rw == READ)
		return zram_bvec_read(zram, bvec, index, bio);

	return zram_bvec_write(zram, bvec, index);
}

static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
{
	int i;
	u32 index;
	struct bio_vec *bvec;

	switch (rw) {
	case READ:
		zram_stat64_inc(zram, &zram->stats.num_reads);
		break;
	case WRITE:
		zram_stat64_inc(zram, &zram->stats.num_writes);
		break;
	}

	index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;

	bio_for_each_segment(bvec, bio, i) {
		if (zram_bvec_rw(zram, bvec, index, bio, rw) < 0)
			goto out;
		index++;
		index++;
	}
	}


@@ -439,15 +442,7 @@ static int zram_make_request(struct request_queue *queue, struct bio *bio)
		return 0;
		return 0;
	}
	}


	switch (bio_data_dir(bio)) {
	__zram_make_request(zram, bio, bio_data_dir(bio));
	case READ:
		zram_read(zram, bio);
		break;

	case WRITE:
		zram_write(zram, bio);
		break;
	}


	return 0;
	return 0;
}
}