Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fc17b653 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

blk-mq: switch ->queue_rq return value to blk_status_t



Use the same values for use for request completion errors as the return
value from ->queue_rq.  BLK_STS_RESOURCE is special cased to cause
a requeue, and all the others are completed as-is.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 2a842aca
Loading
Loading
Loading
Loading
+17 −20
Original line number Original line Diff line number Diff line
@@ -924,7 +924,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
{
{
	struct blk_mq_hw_ctx *hctx;
	struct blk_mq_hw_ctx *hctx;
	struct request *rq;
	struct request *rq;
	int errors, queued, ret = BLK_MQ_RQ_QUEUE_OK;
	int errors, queued;


	if (list_empty(list))
	if (list_empty(list))
		return false;
		return false;
@@ -935,6 +935,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
	errors = queued = 0;
	errors = queued = 0;
	do {
	do {
		struct blk_mq_queue_data bd;
		struct blk_mq_queue_data bd;
		blk_status_t ret;


		rq = list_first_entry(list, struct request, queuelist);
		rq = list_first_entry(list, struct request, queuelist);
		if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
		if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
@@ -975,25 +976,20 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
		}
		}


		ret = q->mq_ops->queue_rq(hctx, &bd);
		ret = q->mq_ops->queue_rq(hctx, &bd);
		switch (ret) {
		if (ret == BLK_STS_RESOURCE) {
		case BLK_MQ_RQ_QUEUE_OK:
			queued++;
			break;
		case BLK_MQ_RQ_QUEUE_BUSY:
			blk_mq_put_driver_tag_hctx(hctx, rq);
			blk_mq_put_driver_tag_hctx(hctx, rq);
			list_add(&rq->queuelist, list);
			list_add(&rq->queuelist, list);
			__blk_mq_requeue_request(rq);
			__blk_mq_requeue_request(rq);
			break;
			break;
		default:
		}
			pr_err("blk-mq: bad return on queue: %d\n", ret);

		case BLK_MQ_RQ_QUEUE_ERROR:
		if (unlikely(ret != BLK_STS_OK)) {
			errors++;
			errors++;
			blk_mq_end_request(rq, BLK_STS_IOERR);
			blk_mq_end_request(rq, BLK_STS_IOERR);
			break;
			continue;
		}
		}


		if (ret == BLK_MQ_RQ_QUEUE_BUSY)
		queued++;
			break;
	} while (!list_empty(list));
	} while (!list_empty(list));


	hctx->dispatched[queued_to_index(queued)]++;
	hctx->dispatched[queued_to_index(queued)]++;
@@ -1031,7 +1027,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
		 * - blk_mq_run_hw_queue() checks whether or not a queue has
		 * - blk_mq_run_hw_queue() checks whether or not a queue has
		 *   been stopped before rerunning a queue.
		 *   been stopped before rerunning a queue.
		 * - Some but not all block drivers stop a queue before
		 * - Some but not all block drivers stop a queue before
		 *   returning BLK_MQ_RQ_QUEUE_BUSY. Two exceptions are scsi-mq
		 *   returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
		 *   and dm-rq.
		 *   and dm-rq.
		 */
		 */
		if (!blk_mq_sched_needs_restart(hctx) &&
		if (!blk_mq_sched_needs_restart(hctx) &&
@@ -1410,7 +1406,7 @@ static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
	};
	};
	struct blk_mq_hw_ctx *hctx;
	struct blk_mq_hw_ctx *hctx;
	blk_qc_t new_cookie;
	blk_qc_t new_cookie;
	int ret;
	blk_status_t ret;


	if (q->elevator)
	if (q->elevator)
		goto insert;
		goto insert;
@@ -1426,18 +1422,19 @@ static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
	 * would have done
	 * would have done
	 */
	 */
	ret = q->mq_ops->queue_rq(hctx, &bd);
	ret = q->mq_ops->queue_rq(hctx, &bd);
	if (ret == BLK_MQ_RQ_QUEUE_OK) {
	switch (ret) {
	case BLK_STS_OK:
		*cookie = new_cookie;
		*cookie = new_cookie;
		return;
		return;
	}
	case BLK_STS_RESOURCE:

		__blk_mq_requeue_request(rq);
	if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
		goto insert;
	default:
		*cookie = BLK_QC_T_NONE;
		*cookie = BLK_QC_T_NONE;
		blk_mq_end_request(rq, BLK_STS_IOERR);
		blk_mq_end_request(rq, ret);
		return;
		return;
	}
	}


	__blk_mq_requeue_request(rq);
insert:
insert:
	blk_mq_sched_insert_request(rq, false, true, false, may_sleep);
	blk_mq_sched_insert_request(rq, false, true, false, may_sleep);
}
}
+3 −3
Original line number Original line Diff line number Diff line
@@ -1674,7 +1674,7 @@ int loop_unregister_transfer(int number)
EXPORT_SYMBOL(loop_register_transfer);
EXPORT_SYMBOL(loop_register_transfer);
EXPORT_SYMBOL(loop_unregister_transfer);
EXPORT_SYMBOL(loop_unregister_transfer);


static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
		const struct blk_mq_queue_data *bd)
		const struct blk_mq_queue_data *bd)
{
{
	struct loop_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
	struct loop_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
@@ -1683,7 +1683,7 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
	blk_mq_start_request(bd->rq);
	blk_mq_start_request(bd->rq);


	if (lo->lo_state != Lo_bound)
	if (lo->lo_state != Lo_bound)
		return BLK_MQ_RQ_QUEUE_ERROR;
		return BLK_STS_IOERR;


	switch (req_op(cmd->rq)) {
	switch (req_op(cmd->rq)) {
	case REQ_OP_FLUSH:
	case REQ_OP_FLUSH:
@@ -1698,7 +1698,7 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,


	kthread_queue_work(&lo->worker, &cmd->work);
	kthread_queue_work(&lo->worker, &cmd->work);


	return BLK_MQ_RQ_QUEUE_OK;
	return BLK_STS_OK;
}
}


static void loop_handle_cmd(struct loop_cmd *cmd)
static void loop_handle_cmd(struct loop_cmd *cmd)
+8 −9
Original line number Original line Diff line number Diff line
@@ -3633,7 +3633,7 @@ static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx,
	return false;
	return false;
}
}


static int mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx,
static blk_status_t mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx,
		struct request *rq)
		struct request *rq)
{
{
	struct driver_data *dd = hctx->queue->queuedata;
	struct driver_data *dd = hctx->queue->queuedata;
@@ -3642,7 +3642,7 @@ static int mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx,
	struct mtip_cmd_sg *command_sg;
	struct mtip_cmd_sg *command_sg;


	if (mtip_commands_active(dd->port))
	if (mtip_commands_active(dd->port))
		return BLK_MQ_RQ_QUEUE_BUSY;
		return BLK_STS_RESOURCE;


	/* Populate the SG list */
	/* Populate the SG list */
	cmd->command_header->opts =
	cmd->command_header->opts =
@@ -3666,10 +3666,10 @@ static int mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx,


	blk_mq_start_request(rq);
	blk_mq_start_request(rq);
	mtip_issue_non_ncq_command(dd->port, rq->tag);
	mtip_issue_non_ncq_command(dd->port, rq->tag);
	return BLK_MQ_RQ_QUEUE_OK;
	return 0;
}
}


static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
static blk_status_t mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
			 const struct blk_mq_queue_data *bd)
			 const struct blk_mq_queue_data *bd)
{
{
	struct request *rq = bd->rq;
	struct request *rq = bd->rq;
@@ -3681,15 +3681,14 @@ static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
		return mtip_issue_reserved_cmd(hctx, rq);
		return mtip_issue_reserved_cmd(hctx, rq);


	if (unlikely(mtip_check_unal_depth(hctx, rq)))
	if (unlikely(mtip_check_unal_depth(hctx, rq)))
		return BLK_MQ_RQ_QUEUE_BUSY;
		return BLK_STS_RESOURCE;


	blk_mq_start_request(rq);
	blk_mq_start_request(rq);


	ret = mtip_submit_request(hctx, rq);
	ret = mtip_submit_request(hctx, rq);
	if (likely(!ret))
	if (likely(!ret))
		return BLK_MQ_RQ_QUEUE_OK;
		return BLK_STS_OK;

	return BLK_STS_IOERR;
	return BLK_MQ_RQ_QUEUE_ERROR;
}
}


static void mtip_free_cmd(struct blk_mq_tag_set *set, struct request *rq,
static void mtip_free_cmd(struct blk_mq_tag_set *set, struct request *rq,
+4 −8
Original line number Original line Diff line number Diff line
@@ -469,7 +469,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
				nsock->pending = req;
				nsock->pending = req;
				nsock->sent = sent;
				nsock->sent = sent;
			}
			}
			return BLK_MQ_RQ_QUEUE_BUSY;
			return BLK_STS_RESOURCE;
		}
		}
		dev_err_ratelimited(disk_to_dev(nbd->disk),
		dev_err_ratelimited(disk_to_dev(nbd->disk),
			"Send control failed (result %d)\n", result);
			"Send control failed (result %d)\n", result);
@@ -510,7 +510,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
					 */
					 */
					nsock->pending = req;
					nsock->pending = req;
					nsock->sent = sent;
					nsock->sent = sent;
					return BLK_MQ_RQ_QUEUE_BUSY;
					return BLK_STS_RESOURCE;
				}
				}
				dev_err(disk_to_dev(nbd->disk),
				dev_err(disk_to_dev(nbd->disk),
					"Send data failed (result %d)\n",
					"Send data failed (result %d)\n",
@@ -798,7 +798,7 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
	return ret;
	return ret;
}
}


static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
			const struct blk_mq_queue_data *bd)
			const struct blk_mq_queue_data *bd)
{
{
	struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
	struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
@@ -822,13 +822,9 @@ static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
	 * appropriate.
	 * appropriate.
	 */
	 */
	ret = nbd_handle_cmd(cmd, hctx->queue_num);
	ret = nbd_handle_cmd(cmd, hctx->queue_num);
	if (ret < 0)
		ret = BLK_MQ_RQ_QUEUE_ERROR;
	if (!ret)
		ret = BLK_MQ_RQ_QUEUE_OK;
	complete(&cmd->send_complete);
	complete(&cmd->send_complete);


	return ret;
	return ret < 0 ? BLK_STS_IOERR : BLK_STS_OK;
}
}


static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
+2 −2
Original line number Original line Diff line number Diff line
@@ -356,7 +356,7 @@ static void null_request_fn(struct request_queue *q)
	}
	}
}
}


static int null_queue_rq(struct blk_mq_hw_ctx *hctx,
static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
			 const struct blk_mq_queue_data *bd)
			 const struct blk_mq_queue_data *bd)
{
{
	struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
	struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
@@ -373,7 +373,7 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx,
	blk_mq_start_request(bd->rq);
	blk_mq_start_request(bd->rq);


	null_handle_cmd(cmd);
	null_handle_cmd(cmd);
	return BLK_MQ_RQ_QUEUE_OK;
	return BLK_STS_OK;
}
}


static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
Loading