Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 47db9b9a authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'for-linus-2019-09-27' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "A few fixes/changes to round off this merge window. This contains:

   - Small series making some functional tweaks to blk-iocost (Tejun)

   - Elevator switch locking fix (Ming)

   - Kill redundant call in blk-wbt (Yufen)

   - Fix flush timeout handling (Yufen)"

* tag 'for-linus-2019-09-27' of git://git.kernel.dk/linux-block:
  block: fix null pointer dereference in blk_mq_rq_timed_out()
  rq-qos: get rid of redundant wbt_update_limits()
  iocost: bump up default latency targets for hard disks
  iocost: improve nr_lagging handling
  iocost: better trace vrate changes
  block: don't release queue's sysfs lock during switching elevator
  blk-mq: move lockdep_assert_held() into elevator_exit
parents d0e00bc5 8d699663
Loading
Loading
Loading
Loading
+10 −0
Original line number Diff line number Diff line
@@ -214,6 +214,16 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)

	/* release the tag's ownership to the req cloned from */
	spin_lock_irqsave(&fq->mq_flush_lock, flags);

	if (!refcount_dec_and_test(&flush_rq->ref)) {
		fq->rq_status = error;
		spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
		return;
	}

	if (fq->rq_status != BLK_STS_OK)
		error = fq->rq_status;

	hctx = flush_rq->mq_hctx;
	if (!q->elevator) {
		blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
+19 −11
Original line number Diff line number Diff line
@@ -529,8 +529,8 @@ struct iocg_wake_ctx {
static const struct ioc_params autop[] = {
	[AUTOP_HDD] = {
		.qos				= {
			[QOS_RLAT]		=         50000, /* 50ms */
			[QOS_WLAT]		=         50000,
			[QOS_RLAT]		=        250000, /* 250ms */
			[QOS_WLAT]		=        250000,
			[QOS_MIN]		= VRATE_MIN_PPM,
			[QOS_MAX]		= VRATE_MAX_PPM,
		},
@@ -1343,7 +1343,7 @@ static void ioc_timer_fn(struct timer_list *timer)
	u32 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM];
	u32 missed_ppm[2], rq_wait_pct;
	u64 period_vtime;
	int i;
	int prev_busy_level, i;

	/* how were the latencies during the period? */
	ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct);
@@ -1407,7 +1407,8 @@ static void ioc_timer_fn(struct timer_list *timer)
		 * comparing vdone against period start.  If lagging behind
		 * IOs from past periods, don't increase vrate.
		 */
		if (!atomic_read(&iocg_to_blkg(iocg)->use_delay) &&
		if ((ppm_rthr != MILLION || ppm_wthr != MILLION) &&
		    !atomic_read(&iocg_to_blkg(iocg)->use_delay) &&
		    time_after64(vtime, vdone) &&
		    time_after64(vtime, now.vnow -
				 MAX_LAGGING_PERIODS * period_vtime) &&
@@ -1531,26 +1532,29 @@ static void ioc_timer_fn(struct timer_list *timer)
	 * and experiencing shortages but not surpluses, we're too stingy
	 * and should increase vtime rate.
	 */
	prev_busy_level = ioc->busy_level;
	if (rq_wait_pct > RQ_WAIT_BUSY_PCT ||
	    missed_ppm[READ] > ppm_rthr ||
	    missed_ppm[WRITE] > ppm_wthr) {
		ioc->busy_level = max(ioc->busy_level, 0);
		ioc->busy_level++;
	} else if (nr_lagging) {
		ioc->busy_level = max(ioc->busy_level, 0);
	} else if (nr_shortages && !nr_surpluses &&
		   rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 &&
	} else if (rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 &&
		   missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 &&
		   missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) {
		/* take action iff there is contention */
		if (nr_shortages && !nr_lagging) {
			ioc->busy_level = min(ioc->busy_level, 0);
			/* redistribute surpluses first */
			if (!nr_surpluses)
				ioc->busy_level--;
		}
	} else {
		ioc->busy_level = 0;
	}

	ioc->busy_level = clamp(ioc->busy_level, -1000, 1000);

	if (ioc->busy_level) {
	if (ioc->busy_level > 0 || (ioc->busy_level < 0 && !nr_lagging)) {
		u64 vrate = atomic64_read(&ioc->vtime_rate);
		u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;

@@ -1592,6 +1596,10 @@ static void ioc_timer_fn(struct timer_list *timer)
		atomic64_set(&ioc->vtime_rate, vrate);
		ioc->inuse_margin_vtime = DIV64_U64_ROUND_UP(
			ioc->period_us * vrate * INUSE_MARGIN_PCT, 100);
	} else if (ioc->busy_level != prev_busy_level || nr_lagging) {
		trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate),
					   &missed_ppm, rq_wait_pct, nr_lagging,
					   nr_shortages, nr_surpluses);
	}

	ioc_refresh_params(ioc, false);
+0 −2
Original line number Diff line number Diff line
@@ -555,8 +555,6 @@ void blk_mq_sched_free_requests(struct request_queue *q)
	struct blk_mq_hw_ctx *hctx;
	int i;

	lockdep_assert_held(&q->sysfs_lock);

	queue_for_each_hw_ctx(q, hctx, i) {
		if (hctx->sched_tags)
			blk_mq_free_rqs(q->tag_set, hctx->sched_tags, i);
+4 −1
Original line number Diff line number Diff line
@@ -918,7 +918,10 @@ static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
	 */
	if (blk_mq_req_expired(rq, next))
		blk_mq_rq_timed_out(rq, reserved);
	if (refcount_dec_and_test(&rq->ref))

	if (is_flush_rq(rq, hctx))
		rq->end_io(rq, 0);
	else if (refcount_dec_and_test(&rq->ref))
		__blk_mq_free_request(rq);

	return true;
+4 −10
Original line number Diff line number Diff line
@@ -482,7 +482,6 @@ static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
	blk_mq_quiesce_queue(q);

	wbt_set_min_lat(q, val);
	wbt_update_limits(q);

	blk_mq_unquiesce_queue(q);
	blk_mq_unfreeze_queue(q);
@@ -989,13 +988,11 @@ int blk_register_queue(struct gendisk *disk)
		blk_mq_debugfs_register(q);
	}

	/*
	 * The flag of QUEUE_FLAG_REGISTERED isn't set yet, so elevator
	 * switch won't happen at all.
	 */
	mutex_lock(&q->sysfs_lock);
	if (q->elevator) {
		ret = elv_register_queue(q, false);
		if (ret) {
			mutex_unlock(&q->sysfs_lock);
			mutex_unlock(&q->sysfs_dir_lock);
			kobject_del(&q->kobj);
			blk_trace_remove_sysfs(dev);
@@ -1005,7 +1002,6 @@ int blk_register_queue(struct gendisk *disk)
		has_elevator = true;
	}

	mutex_lock(&q->sysfs_lock);
	blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
	wbt_enable_default(q);
	blk_throtl_register_queue(q);
@@ -1062,12 +1058,10 @@ void blk_unregister_queue(struct gendisk *disk)
	kobject_del(&q->kobj);
	blk_trace_remove_sysfs(disk_to_dev(disk));

	/*
	 * q->kobj has been removed, so it is safe to check if elevator
	 * exists without holding q->sysfs_lock.
	 */
	mutex_lock(&q->sysfs_lock);
	if (q->elevator)
		elv_unregister_queue(q);
	mutex_unlock(&q->sysfs_lock);
	mutex_unlock(&q->sysfs_dir_lock);

	kobject_put(&disk_to_dev(disk)->kobj);
Loading