Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dece1635 authored by Jens Axboe's avatar Jens Axboe
Browse files

block: change ->make_request_fn() and users to return a queue cookie



No functional changes in this patch, but it prepares us for returning
a more useful cookie related to the IO that was queued up.

Signed-off-by: default avatarJens Axboe <axboe@fb.com>
Acked-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarKeith Busch <keith.busch@intel.com>
parent 8e483ed1
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -59,7 +59,7 @@ struct nfhd_device {
	struct gendisk *disk;
};

static void nfhd_make_request(struct request_queue *queue, struct bio *bio)
static blk_qc_t nfhd_make_request(struct request_queue *queue, struct bio *bio)
{
	struct nfhd_device *dev = queue->queuedata;
	struct bio_vec bvec;
@@ -77,6 +77,7 @@ static void nfhd_make_request(struct request_queue *queue, struct bio *bio)
		sec += len;
	}
	bio_endio(bio);
	return BLK_QC_T_NONE;
}

static int nfhd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+3 −2
Original line number Diff line number Diff line
@@ -103,7 +103,7 @@ axon_ram_irq_handler(int irq, void *dev)
 * axon_ram_make_request - make_request() method for block device
 * @queue, @bio: see blk_queue_make_request()
 */
static void
static blk_qc_t
axon_ram_make_request(struct request_queue *queue, struct bio *bio)
{
	struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data;
@@ -120,7 +120,7 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
	bio_for_each_segment(vec, bio, iter) {
		if (unlikely(phys_mem + vec.bv_len > phys_end)) {
			bio_io_error(bio);
			return;
			return BLK_QC_T_NONE;
		}

		user_mem = page_address(vec.bv_page) + vec.bv_offset;
@@ -133,6 +133,7 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
		transfered += vec.bv_len;
	}
	bio_endio(bio);
	return BLK_QC_T_NONE;
}

/**
+2 −1
Original line number Diff line number Diff line
@@ -101,7 +101,7 @@ static void simdisk_transfer(struct simdisk *dev, unsigned long sector,
	spin_unlock(&dev->lock);
}

static void simdisk_make_request(struct request_queue *q, struct bio *bio)
static blk_qc_t simdisk_make_request(struct request_queue *q, struct bio *bio)
{
	struct simdisk *dev = q->queuedata;
	struct bio_vec bvec;
@@ -119,6 +119,7 @@ static void simdisk_make_request(struct request_queue *q, struct bio *bio)
	}

	bio_endio(bio);
	return BLK_QC_T_NONE;
}

static int simdisk_open(struct block_device *bdev, fmode_t mode)
+16 −10
Original line number Diff line number Diff line
@@ -809,7 +809,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
}
EXPORT_SYMBOL(blk_init_queue_node);

static void blk_queue_bio(struct request_queue *q, struct bio *bio);
static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio);

struct request_queue *
blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
@@ -1678,7 +1678,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
	blk_rq_bio_prep(req->q, req, bio);
}

static void blk_queue_bio(struct request_queue *q, struct bio *bio)
static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
{
	const bool sync = !!(bio->bi_rw & REQ_SYNC);
	struct blk_plug *plug;
@@ -1698,7 +1698,7 @@ static void blk_queue_bio(struct request_queue *q, struct bio *bio)
	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
		bio->bi_error = -EIO;
		bio_endio(bio);
		return;
		return BLK_QC_T_NONE;
	}

	if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
@@ -1713,7 +1713,7 @@ static void blk_queue_bio(struct request_queue *q, struct bio *bio)
	 */
	if (!blk_queue_nomerges(q)) {
		if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
			return;
			return BLK_QC_T_NONE;
	} else
		request_count = blk_plug_queued_count(q);

@@ -1791,6 +1791,8 @@ static void blk_queue_bio(struct request_queue *q, struct bio *bio)
out_unlock:
		spin_unlock_irq(q->queue_lock);
	}

	return BLK_QC_T_NONE;
}

/*
@@ -1996,12 +1998,13 @@ generic_make_request_checks(struct bio *bio)
 * a lower device by calling into generic_make_request recursively, which
 * means the bio should NOT be touched after the call to ->make_request_fn.
 */
void generic_make_request(struct bio *bio)
blk_qc_t generic_make_request(struct bio *bio)
{
	struct bio_list bio_list_on_stack;
	blk_qc_t ret = BLK_QC_T_NONE;

	if (!generic_make_request_checks(bio))
		return;
		goto out;

	/*
	 * We only want one ->make_request_fn to be active at a time, else
@@ -2015,7 +2018,7 @@ void generic_make_request(struct bio *bio)
	 */
	if (current->bio_list) {
		bio_list_add(current->bio_list, bio);
		return;
		goto out;
	}

	/* following loop may be a bit non-obvious, and so deserves some
@@ -2040,7 +2043,7 @@ void generic_make_request(struct bio *bio)

		if (likely(blk_queue_enter(q, __GFP_WAIT) == 0)) {

			q->make_request_fn(q, bio);
			ret = q->make_request_fn(q, bio);

			blk_queue_exit(q);

@@ -2053,6 +2056,9 @@ void generic_make_request(struct bio *bio)
		}
	} while (bio);
	current->bio_list = NULL; /* deactivate */

out:
	return ret;
}
EXPORT_SYMBOL(generic_make_request);

@@ -2066,7 +2072,7 @@ EXPORT_SYMBOL(generic_make_request);
 * interfaces; @bio must be presetup and ready for I/O.
 *
 */
void submit_bio(int rw, struct bio *bio)
blk_qc_t submit_bio(int rw, struct bio *bio)
{
	bio->bi_rw |= rw;

@@ -2100,7 +2106,7 @@ void submit_bio(int rw, struct bio *bio)
		}
	}

	generic_make_request(bio);
	return generic_make_request(bio);
}
EXPORT_SYMBOL(submit_bio);

+14 −12
Original line number Diff line number Diff line
@@ -1235,7 +1235,7 @@ static int blk_mq_direct_issue_request(struct request *rq)
 * but will attempt to bypass the hctx queueing if we can go straight to
 * hardware for SYNC IO.
 */
static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
{
	const int is_sync = rw_is_sync(bio->bi_rw);
	const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
@@ -1249,7 +1249,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)

	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
		bio_io_error(bio);
		return;
		return BLK_QC_T_NONE;
	}

	blk_queue_split(q, &bio, q->bio_split);
@@ -1257,13 +1257,13 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
	if (!is_flush_fua && !blk_queue_nomerges(q)) {
		if (blk_attempt_plug_merge(q, bio, &request_count,
					   &same_queue_rq))
			return;
			return BLK_QC_T_NONE;
	} else
		request_count = blk_plug_queued_count(q);

	rq = blk_mq_map_request(q, bio, &data);
	if (unlikely(!rq))
		return;
		return BLK_QC_T_NONE;

	if (unlikely(is_flush_fua)) {
		blk_mq_bio_to_request(rq, bio);
@@ -1302,11 +1302,11 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
			old_rq = rq;
		blk_mq_put_ctx(data.ctx);
		if (!old_rq)
			return;
			return BLK_QC_T_NONE;
		if (!blk_mq_direct_issue_request(old_rq))
			return;
			return BLK_QC_T_NONE;
		blk_mq_insert_request(old_rq, false, true, true);
		return;
		return BLK_QC_T_NONE;
	}

	if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
@@ -1320,13 +1320,14 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
		blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
	}
	blk_mq_put_ctx(data.ctx);
	return BLK_QC_T_NONE;
}

/*
 * Single hardware queue variant. This will attempt to use any per-process
 * plug for merging and IO deferral.
 */
static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
{
	const int is_sync = rw_is_sync(bio->bi_rw);
	const int is_flush_fua = bio->bi_rw & (REQ_FLUSH | REQ_FUA);
@@ -1339,18 +1340,18 @@ static void blk_sq_make_request(struct request_queue *q, struct bio *bio)

	if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
		bio_io_error(bio);
		return;
		return BLK_QC_T_NONE;
	}

	blk_queue_split(q, &bio, q->bio_split);

	if (!is_flush_fua && !blk_queue_nomerges(q) &&
	    blk_attempt_plug_merge(q, bio, &request_count, NULL))
		return;
		return BLK_QC_T_NONE;

	rq = blk_mq_map_request(q, bio, &data);
	if (unlikely(!rq))
		return;
		return BLK_QC_T_NONE;

	if (unlikely(is_flush_fua)) {
		blk_mq_bio_to_request(rq, bio);
@@ -1374,7 +1375,7 @@ static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
		}
		list_add_tail(&rq->queuelist, &plug->mq_list);
		blk_mq_put_ctx(data.ctx);
		return;
		return BLK_QC_T_NONE;
	}

	if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
@@ -1389,6 +1390,7 @@ static void blk_sq_make_request(struct request_queue *q, struct bio *bio)
	}

	blk_mq_put_ctx(data.ctx);
	return BLK_QC_T_NONE;
}

/*
Loading