Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 055f6e18 authored by Ming Lei's avatar Ming Lei Committed by Jens Axboe
Browse files

block: Make q_usage_counter also track legacy requests



This patch makes it possible to pause request allocation for
the legacy block layer by calling blk_mq_freeze_queue() and
blk_mq_unfreeze_queue().

Signed-off-by: default avatarMing Lei <ming.lei@redhat.com>
[ bvanassche: Combined two patches into one, edited a comment and made sure
  REQ_NOWAIT is handled properly in blk_old_get_request() ]
Signed-off-by: default avatarBart Van Assche <bart.vanassche@wdc.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarJohannes Thumshirn <jthumshirn@suse.de>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Tested-by: default avatarMartin Steigerwald <martin@lichtvoll.de>
Tested-by: default avatarOleksandr Natalenko <oleksandr@natalenko.name>
Cc: Ming Lei <ming.lei@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent eb619fdb
Loading
Loading
Loading
Loading
+12 −0
Original line number Diff line number Diff line
@@ -612,6 +612,9 @@ void blk_set_queue_dying(struct request_queue *q)
		}
		spin_unlock_irq(q->queue_lock);
	}

	/* Make blk_queue_enter() reexamine the DYING flag. */
	wake_up_all(&q->mq_freeze_wq);
}
EXPORT_SYMBOL_GPL(blk_set_queue_dying);

@@ -1398,16 +1401,22 @@ static struct request *blk_old_get_request(struct request_queue *q,
					   unsigned int op, gfp_t gfp_mask)
{
	struct request *rq;
	int ret = 0;

	WARN_ON_ONCE(q->mq_ops);

	/* create ioc upfront */
	create_io_context(gfp_mask, q->node);

	ret = blk_queue_enter(q, !(gfp_mask & __GFP_DIRECT_RECLAIM) ||
			      (op & REQ_NOWAIT));
	if (ret)
		return ERR_PTR(ret);
	spin_lock_irq(q->queue_lock);
	rq = get_request(q, op, NULL, gfp_mask);
	if (IS_ERR(rq)) {
		spin_unlock_irq(q->queue_lock);
		blk_queue_exit(q);
		return rq;
	}

@@ -1579,6 +1588,7 @@ void __blk_put_request(struct request_queue *q, struct request *req)
		blk_free_request(rl, req);
		freed_request(rl, sync, rq_flags);
		blk_put_rl(rl);
		blk_queue_exit(q);
	}
}
EXPORT_SYMBOL_GPL(__blk_put_request);
@@ -1860,8 +1870,10 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
	 * Grab a free request. This is might sleep but can not fail.
	 * Returns with the queue unlocked.
	 */
	blk_queue_enter_live(q);
	req = get_request(q, bio->bi_opf, bio, GFP_NOIO);
	if (IS_ERR(req)) {
		blk_queue_exit(q);
		__wbt_done(q->rq_wb, wb_acct);
		if (PTR_ERR(req) == -ENOMEM)
			bio->bi_status = BLK_STS_RESOURCE;
+2 −8
Original line number Diff line number Diff line
@@ -126,6 +126,7 @@ void blk_freeze_queue_start(struct request_queue *q)
	freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
	if (freeze_depth == 1) {
		percpu_ref_kill(&q->q_usage_counter);
		if (q->mq_ops)
			blk_mq_run_hw_queues(q, false);
	}
}
@@ -256,13 +257,6 @@ void blk_mq_wake_waiters(struct request_queue *q)
	queue_for_each_hw_ctx(q, hctx, i)
		if (blk_mq_hw_queue_mapped(hctx))
			blk_mq_tag_wakeup_all(hctx->tags, true);

	/*
	 * If we are called because the queue has now been marked as
	 * dying, we need to ensure that processes currently waiting on
	 * the queue are notified as well.
	 */
	wake_up_all(&q->mq_freeze_wq);
}

bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)