Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit caf7df12 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

block: remove the errors field from struct request

parent cee4b7ce
Loading
Loading
Loading
Loading
+1 −13
Original line number Diff line number Diff line
@@ -1635,7 +1635,6 @@ void blk_init_request_from_bio(struct request *req, struct bio *bio)
	if (bio->bi_opf & REQ_RAHEAD)
		req->cmd_flags |= REQ_FAILFAST_MASK;

	req->errors = 0;
	req->__sector = bio->bi_iter.bi_sector;
	if (ioprio_valid(bio_prio(bio)))
		req->ioprio = bio_prio(bio);
@@ -2573,22 +2572,11 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
{
	int total_bytes;

	trace_block_rq_complete(req->q, req, nr_bytes);
	trace_block_rq_complete(req, error, nr_bytes);

	if (!req->bio)
		return false;

	/*
	 * For fs requests, rq is just carrier of independent bio's
	 * and each partial completion should be handled separately.
	 * Reset per-request error on each partial completion.
	 *
	 * TODO: tj: This is too subtle.  It would be better to let
	 * low level drivers do what they see fit.
	 */
	if (!blk_rq_is_passthrough(req))
		req->errors = 0;

	if (error && !blk_rq_is_passthrough(req) &&
	    !(req->rq_flags & RQF_QUIET)) {
		char *error_type;
+1 −2
Original line number Diff line number Diff line
@@ -69,8 +69,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,

	if (unlikely(blk_queue_dying(q))) {
		rq->rq_flags |= RQF_QUIET;
		rq->errors = -ENXIO;
		__blk_end_request_all(rq, rq->errors);
		__blk_end_request_all(rq, -ENXIO);
		spin_unlock_irq(q->queue_lock);
		return;
	}
+3 −7
Original line number Diff line number Diff line
@@ -213,7 +213,6 @@ void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
#endif
	rq->special = NULL;
	/* tag was already set */
	rq->errors = 0;
	rq->extra_len = 0;

	INIT_LIST_HEAD(&rq->timeout_list);
@@ -624,8 +623,7 @@ void blk_mq_abort_requeue_list(struct request_queue *q)

		rq = list_first_entry(&rq_list, struct request, queuelist);
		list_del_init(&rq->queuelist);
		rq->errors = -EIO;
		blk_mq_end_request(rq, rq->errors);
		blk_mq_end_request(rq, -EIO);
	}
}
EXPORT_SYMBOL(blk_mq_abort_requeue_list);
@@ -1032,8 +1030,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
			pr_err("blk-mq: bad return on queue: %d\n", ret);
		case BLK_MQ_RQ_QUEUE_ERROR:
			errors++;
			rq->errors = -EIO;
			blk_mq_end_request(rq, rq->errors);
			blk_mq_end_request(rq, -EIO);
			break;
		}

@@ -1484,8 +1481,7 @@ static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,

	if (ret == BLK_MQ_RQ_QUEUE_ERROR) {
		*cookie = BLK_QC_T_NONE;
		rq->errors = -EIO;
		blk_mq_end_request(rq, rq->errors);
		blk_mq_end_request(rq, -EIO);
		return;
	}

+0 −1
Original line number Diff line number Diff line
@@ -89,7 +89,6 @@ static void blk_rq_timed_out(struct request *req)
		ret = q->rq_timed_out_fn(req);
	switch (ret) {
	case BLK_EH_HANDLED:
		/* Can we use req->errors here? */
		__blk_complete_request(req);
		break;
	case BLK_EH_RESET_TIMER:
+0 −2
Original line number Diff line number Diff line
@@ -220,8 +220,6 @@ struct request {

	void *special;		/* opaque pointer available for LLD use */

	int errors;

	unsigned int extra_len;	/* length of alignment and padding */

	unsigned long deadline;
Loading