Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 95a77b5a authored by Sayali Lokhande's avatar Sayali Lokhande Committed by Gerrit - the friendly Code Review server
Browse files

Revert "block: fix the DISCARD request merge"



This reverts commit 10807b37.
This is needed to fix regression observed in reboot path,
where discard requests are merged and for some reason merged
request is not properly freed or cleaned up resulting in panic
in __blk_put_request().

[ 338.244938] Call trace:
[ 338.244940] __blk_put_request+0x310/0x318
[ 338.244943] blk_queue_bio+0x170/0x698
[ 338.244946] generic_make_request+0x1f8/0x420
[ 338.244949] submit_bio+0x140/0x1d8
[ 338.244953] __submit_discard_cmd+0x3c4/0x4e8
[ 338.244956] __issue_discard_cmd+0x1c4/0x480
[ 338.244960] f2fs_issue_discard_timeout+0x64/0x128
[ 338.244962] f2fs_put_super+0x88/0x2c8
[ 338.244966] generic_shutdown_super+0x70/0xf8
[ 338.244968] kill_block_super+0x2c/0x60
[ 338.244972] kill_f2fs_super+0xb4/0xe0.

Change-Id: I585d54ca6be67eb31062f2de44472b27792320d1
Signed-off-by: default avatarSayali Lokhande <sayalil@codeaurora.org>
parent 4ef926c8
Loading
Loading
Loading
Loading
+10 −36
Original line number Diff line number Diff line
@@ -673,31 +673,6 @@ static void blk_account_io_merge(struct request *req)
		part_stat_unlock();
	}
}
/*
 * Two cases of handling DISCARD merge:
 * If max_discard_segments > 1, the driver takes every bio
 * as a range and send them to controller together. The ranges
 * needn't to be contiguous.
 * Otherwise, the bios/requests will be handled as same as
 * others which should be contiguous.
 */
static inline bool blk_discard_mergable(struct request *req)
{
	if (req_op(req) == REQ_OP_DISCARD &&
	    queue_max_discard_segments(req->q) > 1)
		return true;
	return false;
}

enum elv_merge blk_try_req_merge(struct request *req, struct request *next)
{
	if (blk_discard_mergable(req))
		return ELEVATOR_DISCARD_MERGE;
	else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
		return ELEVATOR_BACK_MERGE;

	return ELEVATOR_NO_MERGE;
}

static bool crypto_not_mergeable(const struct bio *bio, const struct bio *nxt)
{
@@ -720,6 +695,12 @@ static struct request *attempt_merge(struct request_queue *q,
	if (req_op(req) != req_op(next))
		return NULL;

	/*
	 * not contiguous
	 */
	if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
		return NULL;

	if (rq_data_dir(req) != rq_data_dir(next)
	    || req->rq_disk != next->rq_disk
	    || req_no_special_merge(next))
@@ -746,19 +727,11 @@ static struct request *attempt_merge(struct request_queue *q,
	 * counts here. Handle DISCARDs separately, as they
	 * have separate settings.
	 */

	switch (blk_try_req_merge(req, next)) {
	case ELEVATOR_DISCARD_MERGE:
	if (req_op(req) == REQ_OP_DISCARD) {
		if (!req_attempt_discard_merge(q, req, next))
			return NULL;
		break;
	case ELEVATOR_BACK_MERGE:
		if (!ll_merge_requests_fn(q, req, next))
	} else if (!ll_merge_requests_fn(q, req, next))
		return NULL;
		break;
	default:
		return NULL;
	}

	/*
	 * If failfast settings disagree or any of the two is already
@@ -882,7 +855,8 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)

enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
{
	if (blk_discard_mergable(rq))
	if (req_op(rq) == REQ_OP_DISCARD &&
	    queue_max_discard_segments(rq->q) > 1)
		return ELEVATOR_DISCARD_MERGE;
	else if (blk_rq_pos(rq) + blk_rq_sectors(rq) ==
						bio->bi_iter.bi_sector) {