Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e6047149 authored by Mike Christie's avatar Mike Christie Committed by Jens Axboe
Browse files

dm: use bio op accessors



Separate the op from the rq_flag_bits and have dm
set/get the bio using bio_set_op_attrs/bio_op.

Signed-off-by: default avatarMike Christie <mchristi@redhat.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 528ec5ab
Loading
Loading
Loading
Loading
+5 −3
Original line number Diff line number Diff line
@@ -574,7 +574,8 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
{
	int r;
	struct dm_io_request io_req = {
		.bi_rw = rw,
		.bi_op = rw,
		.bi_op_flags = 0,
		.notify.fn = dmio_complete,
		.notify.context = b,
		.client = b->c->dm_io,
@@ -634,7 +635,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
	 * the dm_buffer's inline bio is local to bufio.
	 */
	b->bio.bi_private = end_io;
	b->bio.bi_rw = rw;
	bio_set_op_attrs(&b->bio, rw, 0);

	/*
	 * We assume that if len >= PAGE_SIZE ptr is page-aligned.
@@ -1327,7 +1328,8 @@ EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
int dm_bufio_issue_flush(struct dm_bufio_client *c)
{
	struct dm_io_request io_req = {
		.bi_rw = WRITE_FLUSH,
		.bi_op = REQ_OP_WRITE,
		.bi_op_flags = WRITE_FLUSH,
		.mem.type = DM_IO_KMEM,
		.mem.ptr.addr = NULL,
		.client = c->dm_io,
+6 −4
Original line number Diff line number Diff line
@@ -788,7 +788,8 @@ static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)

	spin_lock_irqsave(&cache->lock, flags);
	if (cache->need_tick_bio &&
	    !(bio->bi_rw & (REQ_FUA | REQ_FLUSH | REQ_DISCARD))) {
	    !(bio->bi_rw & (REQ_FUA | REQ_FLUSH)) &&
	    bio_op(bio) != REQ_OP_DISCARD) {
		pb->tick = true;
		cache->need_tick_bio = false;
	}
@@ -851,7 +852,7 @@ static void inc_ds(struct cache *cache, struct bio *bio,
static bool accountable_bio(struct cache *cache, struct bio *bio)
{
	return ((bio->bi_bdev == cache->origin_dev->bdev) &&
		!(bio->bi_rw & REQ_DISCARD));
		bio_op(bio) != REQ_OP_DISCARD);
}

static void accounted_begin(struct cache *cache, struct bio *bio)
@@ -1067,7 +1068,8 @@ static void dec_io_migrations(struct cache *cache)

static bool discard_or_flush(struct bio *bio)
{
	return bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD);
	return bio_op(bio) == REQ_OP_DISCARD ||
	       bio->bi_rw & (REQ_FLUSH | REQ_FUA);
}

static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell)
@@ -1980,7 +1982,7 @@ static void process_deferred_bios(struct cache *cache)

		if (bio->bi_rw & REQ_FLUSH)
			process_flush_bio(cache, bio);
		else if (bio->bi_rw & REQ_DISCARD)
		else if (bio_op(bio) == REQ_OP_DISCARD)
			process_discard_bio(cache, &structs, bio);
		else
			process_bio(cache, &structs, bio);
+4 −4
Original line number Diff line number Diff line
@@ -1136,7 +1136,7 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
	clone->bi_private = io;
	clone->bi_end_io  = crypt_endio;
	clone->bi_bdev    = cc->dev->bdev;
	clone->bi_rw      = io->base_bio->bi_rw;
	bio_set_op_attrs(clone, bio_op(io->base_bio), io->base_bio->bi_rw);
}

static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
@@ -1911,11 +1911,11 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
	struct crypt_config *cc = ti->private;

	/*
	 * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues.
	 * If bio is REQ_FLUSH or REQ_OP_DISCARD, just bypass crypt queues.
	 * - for REQ_FLUSH device-mapper core ensures that no IO is in-flight
	 * - for REQ_DISCARD caller must use flush if IO ordering matters
	 * - for REQ_OP_DISCARD caller must use flush if IO ordering matters
	 */
	if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
	if (unlikely(bio->bi_rw & REQ_FLUSH || bio_op(bio) == REQ_OP_DISCARD)) {
		bio->bi_bdev = cc->dev->bdev;
		if (bio_sectors(bio))
			bio->bi_iter.bi_sector = cc->start +
+30 −26
Original line number Diff line number Diff line
@@ -278,8 +278,9 @@ static void km_dp_init(struct dpages *dp, void *data)
/*-----------------------------------------------------------------
 * IO routines that accept a list of pages.
 *---------------------------------------------------------------*/
static void do_region(int rw, unsigned region, struct dm_io_region *where,
		      struct dpages *dp, struct io *io)
static void do_region(int op, int op_flags, unsigned region,
		      struct dm_io_region *where, struct dpages *dp,
		      struct io *io)
{
	struct bio *bio;
	struct page *page;
@@ -295,24 +296,25 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
	/*
	 * Reject unsupported discard and write same requests.
	 */
	if (rw & REQ_DISCARD)
	if (op == REQ_OP_DISCARD)
		special_cmd_max_sectors = q->limits.max_discard_sectors;
	else if (rw & REQ_WRITE_SAME)
	else if (op == REQ_OP_WRITE_SAME)
		special_cmd_max_sectors = q->limits.max_write_same_sectors;
	if ((rw & (REQ_DISCARD | REQ_WRITE_SAME)) && special_cmd_max_sectors == 0) {
	if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_SAME) &&
	    special_cmd_max_sectors == 0) {
		dec_count(io, region, -EOPNOTSUPP);
		return;
	}

	/*
	 * where->count may be zero if rw holds a flush and we need to
	 * where->count may be zero if op holds a flush and we need to
	 * send a zero-sized flush.
	 */
	do {
		/*
		 * Allocate a suitably sized-bio.
		 */
		if ((rw & REQ_DISCARD) || (rw & REQ_WRITE_SAME))
		if ((op == REQ_OP_DISCARD) || (op == REQ_OP_WRITE_SAME))
			num_bvecs = 1;
		else
			num_bvecs = min_t(int, BIO_MAX_PAGES,
@@ -322,14 +324,14 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
		bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
		bio->bi_bdev = where->bdev;
		bio->bi_end_io = endio;
		bio->bi_rw = rw;
		bio_set_op_attrs(bio, op, op_flags);
		store_io_and_region_in_bio(bio, io, region);

		if (rw & REQ_DISCARD) {
		if (op == REQ_OP_DISCARD) {
			num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
			bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
			remaining -= num_sectors;
		} else if (rw & REQ_WRITE_SAME) {
		} else if (op == REQ_OP_WRITE_SAME) {
			/*
			 * WRITE SAME only uses a single page.
			 */
@@ -360,7 +362,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
	} while (remaining);
}

static void dispatch_io(int rw, unsigned int num_regions,
static void dispatch_io(int op, int op_flags, unsigned int num_regions,
			struct dm_io_region *where, struct dpages *dp,
			struct io *io, int sync)
{
@@ -370,7 +372,7 @@ static void dispatch_io(int rw, unsigned int num_regions,
	BUG_ON(num_regions > DM_IO_MAX_REGIONS);

	if (sync)
		rw |= REQ_SYNC;
		op_flags |= REQ_SYNC;

	/*
	 * For multiple regions we need to be careful to rewind
@@ -378,8 +380,8 @@ static void dispatch_io(int rw, unsigned int num_regions,
	 */
	for (i = 0; i < num_regions; i++) {
		*dp = old_pages;
		if (where[i].count || (rw & REQ_FLUSH))
			do_region(rw, i, where + i, dp, io);
		if (where[i].count || (op_flags & REQ_FLUSH))
			do_region(op, op_flags, i, where + i, dp, io);
	}

	/*
@@ -403,13 +405,13 @@ static void sync_io_complete(unsigned long error, void *context)
}

static int sync_io(struct dm_io_client *client, unsigned int num_regions,
		   struct dm_io_region *where, int rw, struct dpages *dp,
		   unsigned long *error_bits)
		   struct dm_io_region *where, int op, int op_flags,
		   struct dpages *dp, unsigned long *error_bits)
{
	struct io *io;
	struct sync_io sio;

	if (num_regions > 1 && !op_is_write(rw)) {
	if (num_regions > 1 && !op_is_write(op)) {
		WARN_ON(1);
		return -EIO;
	}
@@ -426,7 +428,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
	io->vma_invalidate_address = dp->vma_invalidate_address;
	io->vma_invalidate_size = dp->vma_invalidate_size;

	dispatch_io(rw, num_regions, where, dp, io, 1);
	dispatch_io(op, op_flags, num_regions, where, dp, io, 1);

	wait_for_completion_io(&sio.wait);

@@ -437,12 +439,12 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
}

static int async_io(struct dm_io_client *client, unsigned int num_regions,
		    struct dm_io_region *where, int rw, struct dpages *dp,
		    io_notify_fn fn, void *context)
		    struct dm_io_region *where, int op, int op_flags,
		    struct dpages *dp, io_notify_fn fn, void *context)
{
	struct io *io;

	if (num_regions > 1 && !op_is_write(rw)) {
	if (num_regions > 1 && !op_is_write(op)) {
		WARN_ON(1);
		fn(1, context);
		return -EIO;
@@ -458,7 +460,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,
	io->vma_invalidate_address = dp->vma_invalidate_address;
	io->vma_invalidate_size = dp->vma_invalidate_size;

	dispatch_io(rw, num_regions, where, dp, io, 0);
	dispatch_io(op, op_flags, num_regions, where, dp, io, 0);
	return 0;
}

@@ -481,7 +483,7 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp,

	case DM_IO_VMA:
		flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
		if ((io_req->bi_rw & RW_MASK) == READ) {
		if (io_req->bi_op == REQ_OP_READ) {
			dp->vma_invalidate_address = io_req->mem.ptr.vma;
			dp->vma_invalidate_size = size;
		}
@@ -519,10 +521,12 @@ int dm_io(struct dm_io_request *io_req, unsigned num_regions,

	if (!io_req->notify.fn)
		return sync_io(io_req->client, num_regions, where,
			       io_req->bi_rw, &dp, sync_error_bits);
			       io_req->bi_op, io_req->bi_op_flags, &dp,
			       sync_error_bits);

	return async_io(io_req->client, num_regions, where, io_req->bi_rw,
			&dp, io_req->notify.fn, io_req->notify.context);
	return async_io(io_req->client, num_regions, where, io_req->bi_op,
			io_req->bi_op_flags, &dp, io_req->notify.fn,
			io_req->notify.context);
}
EXPORT_SYMBOL(dm_io);

+3 −2
Original line number Diff line number Diff line
@@ -496,7 +496,8 @@ static int run_io_job(struct kcopyd_job *job)
{
	int r;
	struct dm_io_request io_req = {
		.bi_rw = job->rw,
		.bi_op = job->rw,
		.bi_op_flags = 0,
		.mem.type = DM_IO_PAGE_LIST,
		.mem.ptr.pl = job->pages,
		.mem.offset = 0,
@@ -734,7 +735,7 @@ int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
		/*
		 * Use WRITE SAME to optimize zeroing if all dests support it.
		 */
		job->rw = WRITE | REQ_WRITE_SAME;
		job->rw = REQ_OP_WRITE_SAME;
		for (i = 0; i < job->num_dests; i++)
			if (!bdev_write_same(job->dests[i].bdev)) {
				job->rw = WRITE;
Loading