Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 265c5596 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'for-linus-20180616' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "A collection of fixes that should go into -rc1. This contains:

   - bsg_open vs bsg_unregister race fix (Anatoliy)

   - NVMe pull request from Christoph, with fixes for regressions in
     this window, FC connect/reconnect path code unification, and a
     trace point addition.

   - timeout fix (Christoph)

   - remove a few unused functions (Christoph)

   - blk-mq tag_set reinit fix (Roman)"

* tag 'for-linus-20180616' of git://git.kernel.dk/linux-block:
  bsg: fix race of bsg_open and bsg_unregister
  block: remov blk_queue_invalidate_tags
  nvme-fabrics: fix and refine state checks in __nvmf_check_ready
  nvme-fabrics: handle the admin-only case properly in nvmf_check_ready
  nvme-fabrics: refactor queue ready check
  blk-mq: remove blk_mq_tagset_iter
  nvme: remove nvme_reinit_tagset
  nvme-fc: fix nulling of queue data on reconnect
  nvme-fc: remove reinit_request routine
  blk-mq: don't time out requests again that are in the timeout handler
  nvme-fc: change controllers first connect to use reconnect path
  nvme: don't rely on the changed namespace list log
  nvmet: free smart-log buffer after use
  nvme-rdma: fix error flow during mapping request data
  nvme: add bio remapping tracepoint
  nvme: fix NULL pointer dereference in nvme_init_subsystem
  blk-mq: reinit q->tag_set_list entry only after grace period
parents 5e7b9212 d6c73964
Loading
Loading
Loading
Loading
+1 −14
Original line number Original line Diff line number Diff line
@@ -752,18 +752,6 @@ completion of the request to the block layer. This means ending tag
operations before calling end_that_request_last()! For an example of a user
operations before calling end_that_request_last()! For an example of a user
of these helpers, see the IDE tagged command queueing support.
of these helpers, see the IDE tagged command queueing support.


Certain hardware conditions may dictate a need to invalidate the block tag
queue. For instance, on IDE any tagged request error needs to clear both
the hardware and software block queue and enable the driver to sanely restart
all the outstanding requests. There's a third helper to do that:

	blk_queue_invalidate_tags(struct request_queue *q)

	Clear the internal block tag queue and re-add all the pending requests
	to the request queue. The driver will receive them again on the
	next request_fn run, just like it did the first time it encountered
	them.

3.2.5.2 Tag info
3.2.5.2 Tag info


Some block functions exist to query current tag status or to go from a
Some block functions exist to query current tag status or to go from a
@@ -805,8 +793,7 @@ Internally, block manages tags in the blk_queue_tag structure:
Most of the above is simple and straight forward, however busy_list may need
Most of the above is simple and straight forward, however busy_list may need
a bit of explaining. Normally we don't care too much about request ordering,
a bit of explaining. Normally we don't care too much about request ordering,
but in the event of any barrier requests in the tag queue we need to ensure
but in the event of any barrier requests in the tag queue we need to ensure
that requests are restarted in the order they were queue. This may happen
that requests are restarted in the order they were queue.
if the driver needs to use blk_queue_invalidate_tags().


3.3 I/O Submission
3.3 I/O Submission


+0 −29
Original line number Original line Diff line number Diff line
@@ -311,35 +311,6 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
}
}
EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
EXPORT_SYMBOL(blk_mq_tagset_busy_iter);


int blk_mq_tagset_iter(struct blk_mq_tag_set *set, void *data,
			 int (fn)(void *, struct request *))
{
	int i, j, ret = 0;

	if (WARN_ON_ONCE(!fn))
		goto out;

	for (i = 0; i < set->nr_hw_queues; i++) {
		struct blk_mq_tags *tags = set->tags[i];

		if (!tags)
			continue;

		for (j = 0; j < tags->nr_tags; j++) {
			if (!tags->static_rqs[j])
				continue;

			ret = fn(data, tags->static_rqs[j]);
			if (ret)
				goto out;
		}
	}

out:
	return ret;
}
EXPORT_SYMBOL_GPL(blk_mq_tagset_iter);

void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
		void *priv)
		void *priv)
{
{
+6 −2
Original line number Original line Diff line number Diff line
@@ -671,6 +671,7 @@ static void __blk_mq_requeue_request(struct request *rq)


	if (blk_mq_request_started(rq)) {
	if (blk_mq_request_started(rq)) {
		WRITE_ONCE(rq->state, MQ_RQ_IDLE);
		WRITE_ONCE(rq->state, MQ_RQ_IDLE);
		rq->rq_flags &= ~RQF_TIMED_OUT;
		if (q->dma_drain_size && blk_rq_bytes(rq))
		if (q->dma_drain_size && blk_rq_bytes(rq))
			rq->nr_phys_segments--;
			rq->nr_phys_segments--;
	}
	}
@@ -770,6 +771,7 @@ EXPORT_SYMBOL(blk_mq_tag_to_rq);


static void blk_mq_rq_timed_out(struct request *req, bool reserved)
static void blk_mq_rq_timed_out(struct request *req, bool reserved)
{
{
	req->rq_flags |= RQF_TIMED_OUT;
	if (req->q->mq_ops->timeout) {
	if (req->q->mq_ops->timeout) {
		enum blk_eh_timer_return ret;
		enum blk_eh_timer_return ret;


@@ -779,6 +781,7 @@ static void blk_mq_rq_timed_out(struct request *req, bool reserved)
		WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
		WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
	}
	}


	req->rq_flags &= ~RQF_TIMED_OUT;
	blk_add_timer(req);
	blk_add_timer(req);
}
}


@@ -788,6 +791,8 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)


	if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
	if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
		return false;
		return false;
	if (rq->rq_flags & RQF_TIMED_OUT)
		return false;


	deadline = blk_rq_deadline(rq);
	deadline = blk_rq_deadline(rq);
	if (time_after_eq(jiffies, deadline))
	if (time_after_eq(jiffies, deadline))
@@ -2349,7 +2354,6 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)


	mutex_lock(&set->tag_list_lock);
	mutex_lock(&set->tag_list_lock);
	list_del_rcu(&q->tag_set_list);
	list_del_rcu(&q->tag_set_list);
	INIT_LIST_HEAD(&q->tag_set_list);
	if (list_is_singular(&set->tag_list)) {
	if (list_is_singular(&set->tag_list)) {
		/* just transitioned to unshared */
		/* just transitioned to unshared */
		set->flags &= ~BLK_MQ_F_TAG_SHARED;
		set->flags &= ~BLK_MQ_F_TAG_SHARED;
@@ -2357,8 +2361,8 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
		blk_mq_update_tag_set_depth(set, false);
		blk_mq_update_tag_set_depth(set, false);
	}
	}
	mutex_unlock(&set->tag_list_lock);
	mutex_unlock(&set->tag_list_lock);

	synchronize_rcu();
	synchronize_rcu();
	INIT_LIST_HEAD(&q->tag_set_list);
}
}


static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
+0 −22
Original line number Original line Diff line number Diff line
@@ -188,7 +188,6 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
	 */
	 */
	q->queue_tags = tags;
	q->queue_tags = tags;
	queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
	queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
	INIT_LIST_HEAD(&q->tag_busy_list);
	return 0;
	return 0;
}
}
EXPORT_SYMBOL(blk_queue_init_tags);
EXPORT_SYMBOL(blk_queue_init_tags);
@@ -374,27 +373,6 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
	rq->tag = tag;
	rq->tag = tag;
	bqt->tag_index[tag] = rq;
	bqt->tag_index[tag] = rq;
	blk_start_request(rq);
	blk_start_request(rq);
	list_add(&rq->queuelist, &q->tag_busy_list);
	return 0;
	return 0;
}
}
EXPORT_SYMBOL(blk_queue_start_tag);
EXPORT_SYMBOL(blk_queue_start_tag);

/**
 * blk_queue_invalidate_tags - invalidate all pending tags
 * @q:  the request queue for the device
 *
 *  Description:
 *   Hardware conditions may dictate a need to stop all pending requests.
 *   In this case, we will safely clear the block side of the tag queue and
 *   readd all requests to the request queue in the right order.
 **/
void blk_queue_invalidate_tags(struct request_queue *q)
{
	struct list_head *tmp, *n;

	lockdep_assert_held(q->queue_lock);

	list_for_each_safe(tmp, n, &q->tag_busy_list)
		blk_requeue_request(q, list_entry_rq(tmp));
}
EXPORT_SYMBOL(blk_queue_invalidate_tags);
+11 −11
Original line number Original line Diff line number Diff line
@@ -693,6 +693,8 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
	struct bsg_device *bd;
	struct bsg_device *bd;
	unsigned char buf[32];
	unsigned char buf[32];


	lockdep_assert_held(&bsg_mutex);

	if (!blk_get_queue(rq))
	if (!blk_get_queue(rq))
		return ERR_PTR(-ENXIO);
		return ERR_PTR(-ENXIO);


@@ -707,14 +709,12 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
	bsg_set_block(bd, file);
	bsg_set_block(bd, file);


	atomic_set(&bd->ref_count, 1);
	atomic_set(&bd->ref_count, 1);
	mutex_lock(&bsg_mutex);
	hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
	hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));


	strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1);
	strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1);
	bsg_dbg(bd, "bound to <%s>, max queue %d\n",
	bsg_dbg(bd, "bound to <%s>, max queue %d\n",
		format_dev_t(buf, inode->i_rdev), bd->max_queue);
		format_dev_t(buf, inode->i_rdev), bd->max_queue);


	mutex_unlock(&bsg_mutex);
	return bd;
	return bd;
}
}


@@ -722,7 +722,7 @@ static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
{
{
	struct bsg_device *bd;
	struct bsg_device *bd;


	mutex_lock(&bsg_mutex);
	lockdep_assert_held(&bsg_mutex);


	hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) {
	hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) {
		if (bd->queue == q) {
		if (bd->queue == q) {
@@ -732,7 +732,6 @@ static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
	}
	}
	bd = NULL;
	bd = NULL;
found:
found:
	mutex_unlock(&bsg_mutex);
	return bd;
	return bd;
}
}


@@ -746,17 +745,18 @@ static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
	 */
	 */
	mutex_lock(&bsg_mutex);
	mutex_lock(&bsg_mutex);
	bcd = idr_find(&bsg_minor_idr, iminor(inode));
	bcd = idr_find(&bsg_minor_idr, iminor(inode));
	mutex_unlock(&bsg_mutex);


	if (!bcd)
	if (!bcd) {
		return ERR_PTR(-ENODEV);
		bd = ERR_PTR(-ENODEV);
		goto out_unlock;
	}


	bd = __bsg_get_device(iminor(inode), bcd->queue);
	bd = __bsg_get_device(iminor(inode), bcd->queue);
	if (bd)
	if (!bd)
		return bd;

		bd = bsg_add_device(inode, bcd->queue, file);
		bd = bsg_add_device(inode, bcd->queue, file);


out_unlock:
	mutex_unlock(&bsg_mutex);
	return bd;
	return bd;
}
}


Loading