Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f73f44eb authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

block: add a op_is_flush helper



This centralizes the checks for bios that needs to be go into the flush
state machine.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Reviewed-by: default avatarBart Van Assche <bart.vanassche@sandisk.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent c13660a0
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -1035,7 +1035,7 @@ static bool blk_rq_should_init_elevator(struct bio *bio)
	 * Flush requests do not use the elevator so skip initialization.
	 * This allows a request to share the flush and elevator data.
	 */
	if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA))
	if (op_is_flush(bio->bi_opf))
		return false;

	return true;
@@ -1641,7 +1641,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
		return BLK_QC_T_NONE;
	}

	if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) {
	if (op_is_flush(bio->bi_opf)) {
		spin_lock_irq(q->queue_lock);
		where = ELEVATOR_INSERT_FLUSH;
		goto get_rq;
@@ -2145,7 +2145,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
	 */
	BUG_ON(blk_queued_rq(rq));

	if (rq->cmd_flags & (REQ_PREFLUSH | REQ_FUA))
	if (op_is_flush(rq->cmd_flags))
		where = ELEVATOR_INSERT_FLUSH;

	add_acct_request(q, rq, where);
@@ -3256,7 +3256,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
		/*
		 * rq is already accounted, so use raw insert
		 */
		if (rq->cmd_flags & (REQ_PREFLUSH | REQ_FUA))
		if (op_is_flush(rq->cmd_flags))
			__elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
		else
			__elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
+2 −3
Original line number Diff line number Diff line
@@ -111,7 +111,6 @@ struct request *blk_mq_sched_get_request(struct request_queue *q,
	struct blk_mq_hw_ctx *hctx;
	struct blk_mq_ctx *ctx;
	struct request *rq;
	const bool is_flush = op & (REQ_PREFLUSH | REQ_FUA);

	blk_queue_enter_live(q);
	ctx = blk_mq_get_ctx(q);
@@ -126,7 +125,7 @@ struct request *blk_mq_sched_get_request(struct request_queue *q,
		 * Flush requests are special and go directly to the
		 * dispatch list.
		 */
		if (!is_flush && e->type->ops.mq.get_request) {
		if (!op_is_flush(op) && e->type->ops.mq.get_request) {
			rq = e->type->ops.mq.get_request(q, op, data);
			if (rq)
				rq->rq_flags |= RQF_QUEUED;
@@ -139,7 +138,7 @@ struct request *blk_mq_sched_get_request(struct request_queue *q,
	}

	if (rq) {
		if (!is_flush) {
		if (!op_is_flush(op)) {
			rq->elv.icq = NULL;
			if (e && e->type->icq_cache)
				blk_mq_sched_assign_ioc(q, rq, bio);
+2 −2
Original line number Diff line number Diff line
@@ -1406,7 +1406,7 @@ static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
{
	const int is_sync = op_is_sync(bio->bi_opf);
	const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
	const int is_flush_fua = op_is_flush(bio->bi_opf);
	struct blk_mq_alloc_data data = { .flags = 0 };
	struct request *rq;
	unsigned int request_count = 0, srcu_idx;
@@ -1527,7 +1527,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
{
	const int is_sync = op_is_sync(bio->bi_opf);
	const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
	const int is_flush_fua = op_is_flush(bio->bi_opf);
	struct blk_plug *plug;
	unsigned int request_count = 0;
	struct blk_mq_alloc_data data = { .flags = 0 };
+1 −1
Original line number Diff line number Diff line
@@ -666,7 +666,7 @@ static inline struct search *search_alloc(struct bio *bio,
	s->iop.write_prio	= 0;
	s->iop.error		= 0;
	s->iop.flags		= 0;
	s->iop.flush_journal	= (bio->bi_opf & (REQ_PREFLUSH|REQ_FUA)) != 0;
	s->iop.flush_journal	= op_is_flush(bio->bi_opf);
	s->iop.wq		= bcache_wq;

	return s;
+3 −10
Original line number Diff line number Diff line
@@ -787,8 +787,7 @@ static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
	struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);

	spin_lock_irqsave(&cache->lock, flags);
	if (cache->need_tick_bio &&
	    !(bio->bi_opf & (REQ_FUA | REQ_PREFLUSH)) &&
	if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) &&
	    bio_op(bio) != REQ_OP_DISCARD) {
		pb->tick = true;
		cache->need_tick_bio = false;
@@ -828,11 +827,6 @@ static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
	return to_oblock(block_nr);
}

static int bio_triggers_commit(struct cache *cache, struct bio *bio)
{
	return bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
}

/*
 * You must increment the deferred set whilst the prison cell is held.  To
 * encourage this, we ask for 'cell' to be passed in.
@@ -884,7 +878,7 @@ static void issue(struct cache *cache, struct bio *bio)
{
	unsigned long flags;

	if (!bio_triggers_commit(cache, bio)) {
	if (!op_is_flush(bio->bi_opf)) {
		accounted_request(cache, bio);
		return;
	}
@@ -1069,8 +1063,7 @@ static void dec_io_migrations(struct cache *cache)

static bool discard_or_flush(struct bio *bio)
{
	return bio_op(bio) == REQ_OP_DISCARD ||
	       bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
	return bio_op(bio) == REQ_OP_DISCARD || op_is_flush(bio->bi_opf);
}

static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell)
Loading