Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c11f0c0b authored by Jens Axboe's avatar Jens Axboe
Browse files

block/mm: make bdev_ops->rw_page() take a bool for read/write



Commit abf54548 changed it from an 'rw' flags type to the
newer ops based interface, but now we're effectively leaking
some bdev internals to the rest of the kernel. Since we only
care about whether it's a read or a write at that level, just
pass in a bool 'is_write' parameter instead.

Then we can also move op_is_write() and friends back under
CONFIG_BLOCK protection.

Reviewed-by: default avatarMike Christie <mchristi@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 52ddb7e9
Loading
Loading
Loading
Loading
+8 −8
Original line number Diff line number Diff line
@@ -300,20 +300,20 @@ static void copy_from_brd(void *dst, struct brd_device *brd,
 * Process a single bvec of a bio.
 */
static int brd_do_bvec(struct brd_device *brd, struct page *page,
			unsigned int len, unsigned int off, int op,
			unsigned int len, unsigned int off, bool is_write,
			sector_t sector)
{
	void *mem;
	int err = 0;

	if (op_is_write(op)) {
	if (is_write) {
		err = copy_to_brd_setup(brd, sector, len);
		if (err)
			goto out;
	}

	mem = kmap_atomic(page);
	if (!op_is_write(op)) {
	if (!is_write) {
		copy_from_brd(mem + off, brd, sector, len);
		flush_dcache_page(page);
	} else {
@@ -350,8 +350,8 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
		unsigned int len = bvec.bv_len;
		int err;

		err = brd_do_bvec(brd, bvec.bv_page, len,
					bvec.bv_offset, bio_op(bio), sector);
		err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
					op_is_write(bio_op(bio)), sector);
		if (err)
			goto io_error;
		sector += len >> SECTOR_SHIFT;
@@ -366,11 +366,11 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
}

static int brd_rw_page(struct block_device *bdev, sector_t sector,
		       struct page *page, int op)
		       struct page *page, bool is_write)
{
	struct brd_device *brd = bdev->bd_disk->private_data;
	int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, op, sector);
	page_endio(page, op, err);
	int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, is_write, sector);
	page_endio(page, is_write, err);
	return err;
}

+12 −11
Original line number Diff line number Diff line
@@ -843,15 +843,16 @@ static void zram_bio_discard(struct zram *zram, u32 index,
}

static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
			int offset, int op)
			int offset, bool is_write)
{
	unsigned long start_time = jiffies;
	int rw_acct = is_write ? REQ_OP_WRITE : REQ_OP_READ;
	int ret;

	generic_start_io_acct(op, bvec->bv_len >> SECTOR_SHIFT,
	generic_start_io_acct(rw_acct, bvec->bv_len >> SECTOR_SHIFT,
			&zram->disk->part0);

	if (!op_is_write(op)) {
	if (!is_write) {
		atomic64_inc(&zram->stats.num_reads);
		ret = zram_bvec_read(zram, bvec, index, offset);
	} else {
@@ -859,10 +860,10 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
		ret = zram_bvec_write(zram, bvec, index, offset);
	}

	generic_end_io_acct(op, &zram->disk->part0, start_time);
	generic_end_io_acct(rw_acct, &zram->disk->part0, start_time);

	if (unlikely(ret)) {
		if (!op_is_write(op))
		if (!is_write)
			atomic64_inc(&zram->stats.failed_reads);
		else
			atomic64_inc(&zram->stats.failed_writes);
@@ -903,17 +904,17 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
			bv.bv_offset = bvec.bv_offset;

			if (zram_bvec_rw(zram, &bv, index, offset,
					 bio_op(bio)) < 0)
					 op_is_write(bio_op(bio))) < 0)
				goto out;

			bv.bv_len = bvec.bv_len - max_transfer_size;
			bv.bv_offset += max_transfer_size;
			if (zram_bvec_rw(zram, &bv, index + 1, 0,
					 bio_op(bio)) < 0)
					 op_is_write(bio_op(bio))) < 0)
				goto out;
		} else
			if (zram_bvec_rw(zram, &bvec, index, offset,
					 bio_op(bio)) < 0)
					 op_is_write(bio_op(bio))) < 0)
				goto out;

		update_position(&index, &offset, &bvec);
@@ -970,7 +971,7 @@ static void zram_slot_free_notify(struct block_device *bdev,
}

static int zram_rw_page(struct block_device *bdev, sector_t sector,
		       struct page *page, int op)
		       struct page *page, bool is_write)
{
	int offset, err = -EIO;
	u32 index;
@@ -994,7 +995,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
	bv.bv_len = PAGE_SIZE;
	bv.bv_offset = 0;

	err = zram_bvec_rw(zram, &bv, index, offset, op);
	err = zram_bvec_rw(zram, &bv, index, offset, is_write);
put_zram:
	zram_meta_put(zram);
out:
@@ -1007,7 +1008,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
	 * (e.g., SetPageError, set_page_dirty and extra works).
	 */
	if (err == 0)
		page_endio(page, op, 0);
		page_endio(page, is_write, 0);
	return err;
}

+6 −6
Original line number Diff line number Diff line
@@ -1133,11 +1133,11 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,

static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
			struct page *page, unsigned int len, unsigned int off,
			int op, sector_t sector)
			bool is_write, sector_t sector)
{
	int ret;

	if (!op_is_write(op)) {
	if (!is_write) {
		ret = btt_read_pg(btt, bip, page, off, sector, len);
		flush_dcache_page(page);
	} else {
@@ -1180,7 +1180,7 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
		BUG_ON(len % btt->sector_size);

		err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
				  bio_op(bio), iter.bi_sector);
				  op_is_write(bio_op(bio)), iter.bi_sector);
		if (err) {
			dev_info(&btt->nd_btt->dev,
					"io error in %s sector %lld, len %d,\n",
@@ -1200,12 +1200,12 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
}

static int btt_rw_page(struct block_device *bdev, sector_t sector,
		struct page *page, int op)
		struct page *page, bool is_write)
{
	struct btt *btt = bdev->bd_disk->private_data;

	btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, op, sector);
	page_endio(page, op, 0);
	btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, is_write, sector);
	page_endio(page, is_write, 0);
	return 0;
}

+6 −6
Original line number Diff line number Diff line
@@ -67,7 +67,7 @@ static void pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
}

static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
			unsigned int len, unsigned int off, int op,
			unsigned int len, unsigned int off, bool is_write,
			sector_t sector)
{
	int rc = 0;
@@ -79,7 +79,7 @@ static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
	if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
		bad_pmem = true;

	if (!op_is_write(op)) {
	if (!is_write) {
		if (unlikely(bad_pmem))
			rc = -EIO;
		else {
@@ -134,7 +134,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
	do_acct = nd_iostat_start(bio, &start);
	bio_for_each_segment(bvec, bio, iter) {
		rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
				bvec.bv_offset, bio_op(bio),
				bvec.bv_offset, op_is_write(bio_op(bio)),
				iter.bi_sector);
		if (rc) {
			bio->bi_error = rc;
@@ -152,12 +152,12 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
}

static int pmem_rw_page(struct block_device *bdev, sector_t sector,
		       struct page *page, int op)
		       struct page *page, bool is_write)
{
	struct pmem_device *pmem = bdev->bd_queue->queuedata;
	int rc;

	rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, op, sector);
	rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, is_write, sector);

	/*
	 * The ->rw_page interface is subtle and tricky.  The core
@@ -166,7 +166,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
	 * caused by double completion.
	 */
	if (rc == 0)
		page_endio(page, op, 0);
		page_endio(page, is_write, 0);

	return rc;
}
+2 −4
Original line number Diff line number Diff line
@@ -416,8 +416,7 @@ int bdev_read_page(struct block_device *bdev, sector_t sector,
	result = blk_queue_enter(bdev->bd_queue, false);
	if (result)
		return result;
	result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
			      REQ_OP_READ);
	result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, false);
	blk_queue_exit(bdev->bd_queue);
	return result;
}
@@ -455,8 +454,7 @@ int bdev_write_page(struct block_device *bdev, sector_t sector,
		return result;

	set_page_writeback(page);
	result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
			      REQ_OP_WRITE);
	result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, true);
	if (result)
		end_page_writeback(page);
	else
Loading