Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fbb81a52 authored by Tatyana Brokhman's avatar Tatyana Brokhman Committed by Stephen Boyd
Browse files

Revert "block: Add URGENT request notification support to CFQ scheduler"



This reverts commit b410a82118cdaa1dc92759e7995c20dcce0d1f1a.
The reverted commit was identified as the cause of the FS error
mentioned in the CR bellow.
It's reverted till further annalists of the root cause of FS error.

Change-Id: Ia75216de8012a2491b87f33e8c21f75592d87c80
CRs-fixed: 531257
Signed-off-by: default avatarTatyana Brokhman <tlinder@codeaurora.org>
parent f7151d45
Loading
Loading
Loading
Loading
+3 −1
Original line number Diff line number Diff line
@@ -2196,8 +2196,10 @@ struct request *blk_peek_request(struct request_queue *q)
			 * not be passed by new incoming requests
			 */
			rq->cmd_flags |= REQ_STARTED;
			if (rq->cmd_flags & REQ_URGENT)
			if (rq->cmd_flags & REQ_URGENT) {
				WARN_ON(q->dispatched_urgent);
				q->dispatched_urgent = true;
			}
			trace_block_rq_issue(q, rq);
		}

+0 −120
Original line number Diff line number Diff line
@@ -320,9 +320,6 @@ struct cfq_data {
	unsigned long workload_expires;
	struct cfq_group *serving_group;

	unsigned int nr_urgent_pending;
	unsigned int nr_urgent_in_flight;

	/*
	 * Each priority tree is sorted by next_request position.  These
	 * trees are used when determining if two or more queues are
@@ -2786,14 +2783,6 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
	(RQ_CFQG(rq))->dispatched++;
	elv_dispatch_sort(q, rq);

	if (rq->cmd_flags & REQ_URGENT) {
		if (!cfqd->nr_urgent_pending)
			WARN_ON(1);
		else
			cfqd->nr_urgent_pending--;
		cfqd->nr_urgent_in_flight++;
	}

	cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
	cfqq->nr_sectors += blk_rq_sectors(rq);
	cfqg_stats_update_dispatch(cfqq->cfqg, blk_rq_bytes(rq), rq->cmd_flags);
@@ -3923,68 +3912,6 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
	}
}

/*
 * Called when a request (rq) is reinserted (to cfqq). Check if there's
 * something we should do about it
 */
static void
cfq_rq_requeued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
		struct request *rq)
{
	struct cfq_io_cq *cic = RQ_CIC(rq);

	cfqd->rq_queued++;
	if (rq->cmd_flags & REQ_PRIO)
		cfqq->prio_pending++;

	cfqq->dispatched--;
	(RQ_CFQG(rq))->dispatched--;

	cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;

	cfq_update_io_thinktime(cfqd, cfqq, cic);
	cfq_update_io_seektime(cfqd, cfqq, rq);
	cfq_update_idle_window(cfqd, cfqq, cic);

	cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);

	if (cfqq == cfqd->active_queue) {
		if (cfq_cfqq_wait_request(cfqq)) {
			if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
			    cfqd->busy_queues > 1) {
				cfq_del_timer(cfqd, cfqq);
				cfq_clear_cfqq_wait_request(cfqq);
			} else {
				cfqg_stats_update_idle_time(cfqq->cfqg);
				cfq_mark_cfqq_must_dispatch(cfqq);
			}
		}
	} else if (cfq_should_preempt(cfqd, cfqq, rq)) {
		cfq_preempt_queue(cfqd, cfqq);
	}
}

static int cfq_reinsert_request(struct request_queue *q, struct request *rq)
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
	struct cfq_queue *cfqq = RQ_CFQQ(rq);

	if (!cfqq || cfqq->cfqd != cfqd)
		return -EIO;

	cfq_log_cfqq(cfqd, cfqq, "re-insert_request");
	list_add(&rq->queuelist, &cfqq->fifo);
	cfq_add_rq_rb(rq);

	cfq_rq_requeued(cfqd, cfqq, rq);
	if (rq->cmd_flags & REQ_URGENT) {
			if (cfqd->nr_urgent_in_flight)
				cfqd->nr_urgent_in_flight--;
			cfqd->nr_urgent_pending++;
	}
	return 0;
}

static void cfq_insert_request(struct request_queue *q, struct request *rq)
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
@@ -3999,43 +3926,6 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
	cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
				 rq->cmd_flags);
	cfq_rq_enqueued(cfqd, cfqq, rq);

	if (rq->cmd_flags & REQ_URGENT) {
		WARN_ON(1);
		blk_dump_rq_flags(rq, "");
		rq->cmd_flags &= ~REQ_URGENT;
	}

	/* Request is considered URGENT if:
	 * 1. The queue being served is of a lower IO priority then the new
	 *    request
	 * OR:
	 * 2. The workload being performed is ASYNC
	 * Only READ requests may be considered as URGENT
	 */
	if ((cfqd->active_queue &&
		 cfqq->ioprio_class < cfqd->active_queue->ioprio_class) ||
		(cfqd->serving_wl_type == ASYNC_WORKLOAD &&
		 rq_data_dir(rq) == READ)) {
		rq->cmd_flags |= REQ_URGENT;
		cfqd->nr_urgent_pending++;
	}
}


/**
 * cfq_urgent_pending() - Return TRUE if there is an urgent
 *			  request on scheduler
 * @q:	requests queue
 */
static bool cfq_urgent_pending(struct request_queue *q)
{
	struct cfq_data *cfqd = q->elevator->elevator_data;

	if (cfqd->nr_urgent_pending && !cfqd->nr_urgent_in_flight)
		return true;

	return false;
}

/*
@@ -4119,14 +4009,6 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
	const int sync = rq_is_sync(rq);
	unsigned long now;

	if (rq->cmd_flags & REQ_URGENT) {
		if (!cfqd->nr_urgent_in_flight)
			WARN_ON(1);
		else
			cfqd->nr_urgent_in_flight--;
		rq->cmd_flags &= ~REQ_URGENT;
	}

	now = jiffies;
	cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
		     !!(rq->cmd_flags & REQ_NOIDLE));
@@ -4660,8 +4542,6 @@ static struct elevator_type iosched_cfq = {
		.elevator_bio_merged_fn =	cfq_bio_merged,
		.elevator_dispatch_fn =		cfq_dispatch_requests,
		.elevator_add_req_fn =		cfq_insert_request,
		.elevator_reinsert_req_fn	= cfq_reinsert_request,
		.elevator_is_urgent_fn		= cfq_urgent_pending,
		.elevator_activate_req_fn =	cfq_activate_request,
		.elevator_deactivate_req_fn =	cfq_deactivate_request,
		.elevator_completed_req_fn =	cfq_completed_request,
+1 −0
Original line number Diff line number Diff line
@@ -790,6 +790,7 @@ void elv_completed_request(struct request_queue *q, struct request *rq)

	if (rq->cmd_flags & REQ_URGENT) {
		q->notified_urgent = false;
		WARN_ON(!q->dispatched_urgent);
		q->dispatched_urgent = false;
	}
	/*