Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 429fba10 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'for-linus-20190407' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:

 - Fixups for the pf/pcd queue handling (YueHaibing)

 - Revert of the three direct issue changes as they have been proven to
   cause an issue with dm-mpath (Bart)

 - Plug rq_count reset fix (Dongli)

 - io_uring double free in fileset registration error handling (me)

 - Make null_blk handle bad numa node passed in (John)

 - BFQ ifdef fix (Konstantin)

 - Flush queue leak fix (Shenghui)

 - Plug trace fix (Yufen)

* tag 'for-linus-20190407' of git://git.kernel.dk/linux-block:
  xsysace: Fix error handling in ace_setup
  null_blk: prevent crash from bad home_node value
  block: Revert v5.0 blk_mq_request_issue_directly() changes
  paride/pcd: Fix potential NULL pointer dereference and mem leak
  blk-mq: do not reset plug->rq_count before the list is sorted
  paride/pf: Fix potential NULL pointer dereference
  io_uring: fix double free in case of fileset regitration failure
  blk-mq: add trace block plug and unplug for multiple queues
  block: use blk_free_flush_queue() to free hctx->fq in blk_mq_init_hctx
  block/bfq: fix ifdef for CONFIG_BFQ_GROUP_IOSCHED=y
parents 3b046891 47b16820
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -674,7 +674,7 @@ static bool bfq_symmetric_scenario(struct bfq_data *bfqd)
	 * at least two nodes.
	 */
	return !(varied_queue_weights || multiple_classes_busy
#ifdef BFQ_GROUP_IOSCHED_ENABLED
#ifdef CONFIG_BFQ_GROUP_IOSCHED
	       || bfqd->num_groups_with_pending_reqs > 0
#endif
		);
+1 −1
Original line number Diff line number Diff line
@@ -1012,7 +1012,7 @@ static void __bfq_activate_entity(struct bfq_entity *entity,
		entity->on_st = true;
	}

#ifdef BFQ_GROUP_IOSCHED_ENABLED
#ifdef CONFIG_BFQ_GROUP_IOSCHED
	if (!bfq_entity_to_bfqq(entity)) { /* bfq_group */
		struct bfq_group *bfqg =
			container_of(entity, struct bfq_group, entity);
+1 −3
Original line number Diff line number Diff line
@@ -1245,8 +1245,6 @@ static int blk_cloned_rq_check_limits(struct request_queue *q,
 */
blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
{
	blk_qc_t unused;

	if (blk_cloned_rq_check_limits(q, rq))
		return BLK_STS_IOERR;

@@ -1262,7 +1260,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
	 * bypass a potential scheduler on the bottom device for
	 * insert.
	 */
	return blk_mq_try_issue_directly(rq->mq_hctx, rq, &unused, true, true);
	return blk_mq_request_issue_directly(rq, true);
}
EXPORT_SYMBOL_GPL(blk_insert_cloned_request);

+5 −3
Original line number Diff line number Diff line
@@ -423,9 +423,11 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
		 * busy in case of 'none' scheduler, and this way may save
		 * us one extra enqueue & dequeue to sw queue.
		 */
		if (!hctx->dispatch_busy && !e && !run_queue_async)
		if (!hctx->dispatch_busy && !e && !run_queue_async) {
			blk_mq_try_issue_list_directly(hctx, list);
		else
			if (list_empty(list))
				return;
		}
		blk_mq_insert_requests(hctx, ctx, list);
	}

+68 −61
Original line number Diff line number Diff line
@@ -1711,11 +1711,12 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
	unsigned int depth;

	list_splice_init(&plug->mq_list, &list);
	plug->rq_count = 0;

	if (plug->rq_count > 2 && plug->multiple_queues)
		list_sort(NULL, &list, plug_rq_cmp);

	plug->rq_count = 0;

	this_q = NULL;
	this_hctx = NULL;
	this_ctx = NULL;
@@ -1800,95 +1801,99 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
	return ret;
}

blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
						struct request *rq,
						blk_qc_t *cookie,
						bool bypass, bool last)
						bool bypass_insert, bool last)
{
	struct request_queue *q = rq->q;
	bool run_queue = true;
	blk_status_t ret = BLK_STS_RESOURCE;
	int srcu_idx;
	bool force = false;

	hctx_lock(hctx, &srcu_idx);
	/*
	 * hctx_lock is needed before checking quiesced flag.
	 * RCU or SRCU read lock is needed before checking quiesced flag.
	 *
	 * When queue is stopped or quiesced, ignore 'bypass', insert
	 * and return BLK_STS_OK to caller, and avoid driver to try to
	 * dispatch again.
	 * When queue is stopped or quiesced, ignore 'bypass_insert' from
	 * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
	 * and avoid driver to try to dispatch again.
	 */
	if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) {
	if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
		run_queue = false;
		bypass = false;
		goto out_unlock;
		bypass_insert = false;
		goto insert;
	}

	if (unlikely(q->elevator && !bypass))
		goto out_unlock;
	if (q->elevator && !bypass_insert)
		goto insert;

	if (!blk_mq_get_dispatch_budget(hctx))
		goto out_unlock;
		goto insert;

	if (!blk_mq_get_driver_tag(rq)) {
		blk_mq_put_dispatch_budget(hctx);
		goto out_unlock;
		goto insert;
	}

	/*
	 * Always add a request that has been through
	 *.queue_rq() to the hardware dispatch list.
	 */
	force = true;
	ret = __blk_mq_issue_directly(hctx, rq, cookie, last);
out_unlock:
	hctx_unlock(hctx, srcu_idx);
	switch (ret) {
	case BLK_STS_OK:
		break;
	case BLK_STS_DEV_RESOURCE:
	case BLK_STS_RESOURCE:
		if (force) {
	return __blk_mq_issue_directly(hctx, rq, cookie, last);
insert:
	if (bypass_insert)
		return BLK_STS_RESOURCE;

	blk_mq_request_bypass_insert(rq, run_queue);
			/*
			 * We have to return BLK_STS_OK for the DM
			 * to avoid livelock. Otherwise, we return
			 * the real result to indicate whether the
			 * request is direct-issued successfully.
			 */
			ret = bypass ? BLK_STS_OK : ret;
		} else if (!bypass) {
			blk_mq_sched_insert_request(rq, false,
						    run_queue, false);
	return BLK_STS_OK;
}
		break;
	default:
		if (!bypass)

static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
		struct request *rq, blk_qc_t *cookie)
{
	blk_status_t ret;
	int srcu_idx;

	might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);

	hctx_lock(hctx, &srcu_idx);

	ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true);
	if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
		blk_mq_request_bypass_insert(rq, true);
	else if (ret != BLK_STS_OK)
		blk_mq_end_request(rq, ret);
		break;

	hctx_unlock(hctx, srcu_idx);
}

blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
{
	blk_status_t ret;
	int srcu_idx;
	blk_qc_t unused_cookie;
	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;

	hctx_lock(hctx, &srcu_idx);
	ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last);
	hctx_unlock(hctx, srcu_idx);

	return ret;
}

void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
		struct list_head *list)
{
	blk_qc_t unused;
	blk_status_t ret = BLK_STS_OK;

	while (!list_empty(list)) {
		blk_status_t ret;
		struct request *rq = list_first_entry(list, struct request,
				queuelist);

		list_del_init(&rq->queuelist);
		if (ret == BLK_STS_OK)
			ret = blk_mq_try_issue_directly(hctx, rq, &unused,
							false,
		ret = blk_mq_request_issue_directly(rq, list_empty(list));
		if (ret != BLK_STS_OK) {
			if (ret == BLK_STS_RESOURCE ||
					ret == BLK_STS_DEV_RESOURCE) {
				blk_mq_request_bypass_insert(rq,
							list_empty(list));
		else
			blk_mq_sched_insert_request(rq, false, true, false);
				break;
			}
			blk_mq_end_request(rq, ret);
		}
	}

	/*
@@ -1896,7 +1901,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
	 * the driver there was more coming, but that turned out to
	 * be a lie.
	 */
	if (ret != BLK_STS_OK && hctx->queue->mq_ops->commit_rqs)
	if (!list_empty(list) && hctx->queue->mq_ops->commit_rqs)
		hctx->queue->mq_ops->commit_rqs(hctx);
}

@@ -2003,19 +2008,21 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
			plug->rq_count--;
		}
		blk_add_rq_to_plug(plug, rq);
		trace_block_plug(q);

		blk_mq_put_ctx(data.ctx);

		if (same_queue_rq) {
			data.hctx = same_queue_rq->mq_hctx;
			trace_block_unplug(q, 1, true);
			blk_mq_try_issue_directly(data.hctx, same_queue_rq,
					&cookie, false, true);
					&cookie);
		}
	} else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
			!data.hctx->dispatch_busy)) {
		blk_mq_put_ctx(data.ctx);
		blk_mq_bio_to_request(rq, bio);
		blk_mq_try_issue_directly(data.hctx, rq, &cookie, false, true);
		blk_mq_try_issue_directly(data.hctx, rq, &cookie);
	} else {
		blk_mq_put_ctx(data.ctx);
		blk_mq_bio_to_request(rq, bio);
@@ -2332,7 +2339,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
	return 0;

 free_fq:
	kfree(hctx->fq);
	blk_free_flush_queue(hctx->fq);
 exit_hctx:
	if (set->ops->exit_hctx)
		set->ops->exit_hctx(hctx, hctx_idx);
Loading