Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 8fba70b0 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'for-linus-20180425' of git://git.kernel.dk/linux-block

Pull block updates from Jens Axboe:
 "I ended up sitting on this about a week longer than I wanted to, since
  we were hashing out details with a timeout change. I've now killed
  that patch, so we can flush the existing queue in due time.

  This contains:

   - Fix for an old regression, where entering the queue can be
     disturbed by a signal to the process. This can cause spurious EIO.
     Fix from Alan Jenkins.

   - cdrom information leak fix from Dan.

   - Trivial helper for testing queue FUA from Dave Chinner, part of his
     O_DIRECT FUA series.

   - Series of swim fixes from Finn that actually makes it work again.

   - Loop O_DIRECT corruption fix, which caused data corruption in
     production for us. From me.

   - BFQ crash fix from me.

   - bcache maintainer update. Michael no longer has the time to do it,
     Coly has stepped up to serve as the new maintainer.

   - blkcg locking fixes from Jiang Biao.

   - Revert of a change from this merge window from Ming, that causes an
     issue on some hardware.

   - Minor clarification doc addition from Linus Walleij"

* tag 'for-linus-20180425' of git://git.kernel.dk/linux-block: (22 commits)
  Revert "blk-mq: remove code for dealing with remapping queue"
  block: mq: Add some minor doc for core structs
  bcache: mark Coly Li as bcache maintainer
  MAINTAINERS: Remove me as maintainer of bcache
  blkcg: init root blkcg_gq under lock
  blkcg: small fix on comment in blkcg_init_queue
  blkcg: don't hold blkcg lock when deactivating policy
  block: add blk_queue_fua() helper function
  cdrom: information leak in cdrom_ioctl_media_changed()
  bfq-iosched: ensure to clear bic/bfqq pointers when preparing request
  blk-mq: start request gstate with gen 1
  block/swim: Select appropriate drive on device open
  block/swim: Fix IO error at end of medium
  block/swim: Check drive type
  block/swim: Rename macros to avoid inconsistent inverted logic
  block/swim: Don't log an error message for an invalid ioctl
  block/swim: Remove extra put_disk() call from error path
  block/swim: Fix array bounds check
  m68k/mac: Don't remap SWIM MMIO region
  loop: handle short DIO reads
  ...
parents c6dc3e71 4412efec
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -2617,7 +2617,7 @@ S: Maintained
F:	drivers/net/hamradio/baycom*

BCACHE (BLOCK LAYER CACHE)
M:	Michael Lyle <mlyle@lyle.org>
M:	Coly Li <colyli@suse.de>
M:	Kent Overstreet <kent.overstreet@gmail.com>
L:	linux-bcache@vger.kernel.org
W:	http://bcache.evilpiepirate.org
+9 −1
Original line number Diff line number Diff line
@@ -4934,8 +4934,16 @@ static void bfq_prepare_request(struct request *rq, struct bio *bio)
	bool new_queue = false;
	bool bfqq_already_existing = false, split = false;

	if (!rq->elv.icq)
	/*
	 * Even if we don't have an icq attached, we should still clear
	 * the scheduler pointers, as they might point to previously
	 * allocated bic/bfqq structs.
	 */
	if (!rq->elv.icq) {
		rq->elv.priv[0] = rq->elv.priv[1] = NULL;
		return;
	}

	bic = icq_to_bic(rq->elv.icq);

	spin_lock_irq(&bfqd->lock);
+12 −16
Original line number Diff line number Diff line
@@ -1177,26 +1177,20 @@ int blkcg_init_queue(struct request_queue *q)

	preloaded = !radix_tree_preload(GFP_KERNEL);

	/*
	 * Make sure the root blkg exists and count the existing blkgs.  As
	 * @q is bypassing at this point, blkg_lookup_create() can't be
	 * used.  Open code insertion.
	 */
	/* Make sure the root blkg exists. */
	rcu_read_lock();
	spin_lock_irq(q->queue_lock);
	blkg = blkg_create(&blkcg_root, q, new_blkg);
	if (IS_ERR(blkg))
		goto err_unlock;
	q->root_blkg = blkg;
	q->root_rl.blkg = blkg;
	spin_unlock_irq(q->queue_lock);
	rcu_read_unlock();

	if (preloaded)
		radix_tree_preload_end();

	if (IS_ERR(blkg))
		return PTR_ERR(blkg);

	q->root_blkg = blkg;
	q->root_rl.blkg = blkg;

	ret = blk_throtl_init(q);
	if (ret) {
		spin_lock_irq(q->queue_lock);
@@ -1204,6 +1198,13 @@ int blkcg_init_queue(struct request_queue *q)
		spin_unlock_irq(q->queue_lock);
	}
	return ret;

err_unlock:
	spin_unlock_irq(q->queue_lock);
	rcu_read_unlock();
	if (preloaded)
		radix_tree_preload_end();
	return PTR_ERR(blkg);
}

/**
@@ -1410,9 +1411,6 @@ void blkcg_deactivate_policy(struct request_queue *q,
	__clear_bit(pol->plid, q->blkcg_pols);

	list_for_each_entry(blkg, &q->blkg_list, q_node) {
		/* grab blkcg lock too while removing @pd from @blkg */
		spin_lock(&blkg->blkcg->lock);

		if (blkg->pd[pol->plid]) {
			if (!blkg->pd[pol->plid]->offline &&
			    pol->pd_offline_fn) {
@@ -1422,8 +1420,6 @@ void blkcg_deactivate_policy(struct request_queue *q,
			pol->pd_free_fn(blkg->pd[pol->plid]);
			blkg->pd[pol->plid] = NULL;
		}

		spin_unlock(&blkg->blkcg->lock);
	}

	spin_unlock_irq(q->queue_lock);
+8 −7
Original line number Diff line number Diff line
@@ -201,6 +201,10 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
	rq->part = NULL;
	seqcount_init(&rq->gstate_seq);
	u64_stats_init(&rq->aborted_gstate_sync);
	/*
	 * See comment of blk_mq_init_request
	 */
	WRITE_ONCE(rq->gstate, MQ_RQ_GEN_INC);
}
EXPORT_SYMBOL(blk_rq_init);

@@ -915,7 +919,6 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)

	while (true) {
		bool success = false;
		int ret;

		rcu_read_lock();
		if (percpu_ref_tryget_live(&q->q_usage_counter)) {
@@ -947,14 +950,12 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
		 */
		smp_rmb();

		ret = wait_event_interruptible(q->mq_freeze_wq,
		wait_event(q->mq_freeze_wq,
			   (atomic_read(&q->mq_freeze_depth) == 0 &&
			    (preempt || !blk_queue_preempt_only(q))) ||
			   blk_queue_dying(q));
		if (blk_queue_dying(q))
			return -ENODEV;
		if (ret)
			return ret;
	}
}

+38 −3
Original line number Diff line number Diff line
@@ -2042,6 +2042,13 @@ static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,

	seqcount_init(&rq->gstate_seq);
	u64_stats_init(&rq->aborted_gstate_sync);
	/*
	 * start gstate with gen 1 instead of 0, otherwise it will be equal
	 * to aborted_gstate, and be identified timed out by
	 * blk_mq_terminate_expired.
	 */
	WRITE_ONCE(rq->gstate, MQ_RQ_GEN_INC);

	return 0;
}

@@ -2329,7 +2336,7 @@ static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,

static void blk_mq_map_swqueue(struct request_queue *q)
{
	unsigned int i;
	unsigned int i, hctx_idx;
	struct blk_mq_hw_ctx *hctx;
	struct blk_mq_ctx *ctx;
	struct blk_mq_tag_set *set = q->tag_set;
@@ -2346,8 +2353,23 @@ static void blk_mq_map_swqueue(struct request_queue *q)

	/*
	 * Map software to hardware queues.
	 *
	 * If the cpu isn't present, the cpu is mapped to first hctx.
	 */
	for_each_possible_cpu(i) {
		hctx_idx = q->mq_map[i];
		/* unmapped hw queue can be remapped after CPU topo changed */
		if (!set->tags[hctx_idx] &&
		    !__blk_mq_alloc_rq_map(set, hctx_idx)) {
			/*
			 * If tags initialization fail for some hctx,
			 * that hctx won't be brought online.  In this
			 * case, remap the current ctx to hctx[0] which
			 * is guaranteed to always have tags allocated
			 */
			q->mq_map[i] = 0;
		}

		ctx = per_cpu_ptr(q->queue_ctx, i);
		hctx = blk_mq_map_queue(q, i);

@@ -2359,8 +2381,21 @@ static void blk_mq_map_swqueue(struct request_queue *q)
	mutex_unlock(&q->sysfs_lock);

	queue_for_each_hw_ctx(q, hctx, i) {
		/* every hctx should get mapped by at least one CPU */
		WARN_ON(!hctx->nr_ctx);
		/*
		 * If no software queues are mapped to this hardware queue,
		 * disable it and free the request entries.
		 */
		if (!hctx->nr_ctx) {
			/* Never unmap queue 0.  We need it as a
			 * fallback in case of a new remap fails
			 * allocation
			 */
			if (i && set->tags[i])
				blk_mq_free_map_and_requests(set, i);

			hctx->tags = NULL;
			continue;
		}

		hctx->tags = set->tags[i];
		WARN_ON(!hctx->tags);
Loading