Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 27728bf0 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "Another week, another round of fixes.

  These have been brewing for a bit and in various iterations, but I
  feel pretty comfortable about the quality of them.  They fix real
  issues.  The pull request is mostly blk-mq related, and the only one
  not fixing a real bug, is the tag iterator abstraction from Christoph.
  But it's pretty trivial, and we'll need it for another fix soon.

  Apart from the blk-mq fixes, there's an NVMe affinity fix from Keith,
  and a single fix for xen-blkback from Roger fixing failure to free
  requests on disconnect"

* 'for-linus' of git://git.kernel.dk/linux-block:
  blk-mq: factor out a helper to iterate all tags for a request_queue
  blk-mq: fix racy updates of rq->errors
  blk-mq: fix deadlock when reading cpu_list
  blk-mq: avoid inserting requests before establishing new mapping
  blk-mq: fix q->mq_usage_counter access race
  blk-mq: Fix use after of free q->mq_map
  blk-mq: fix sysfs registration/unregistration race
  blk-mq: avoid setting hctx->tags->cpumask before allocation
  NVMe: Set affinity after allocating request queues
  xen/blkback: free requests on disconnection
parents 36f8dafe 0bf6cd5b
Loading
Loading
Loading
Loading
+5 −4
Original line number Original line Diff line number Diff line
@@ -31,7 +31,8 @@ static int get_first_sibling(unsigned int cpu)
	return cpu;
	return cpu;
}
}


int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues)
int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
			    const struct cpumask *online_mask)
{
{
	unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
	unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
	cpumask_var_t cpus;
	cpumask_var_t cpus;
@@ -41,7 +42,7 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues)


	cpumask_clear(cpus);
	cpumask_clear(cpus);
	nr_cpus = nr_uniq_cpus = 0;
	nr_cpus = nr_uniq_cpus = 0;
	for_each_online_cpu(i) {
	for_each_cpu(i, online_mask) {
		nr_cpus++;
		nr_cpus++;
		first_sibling = get_first_sibling(i);
		first_sibling = get_first_sibling(i);
		if (!cpumask_test_cpu(first_sibling, cpus))
		if (!cpumask_test_cpu(first_sibling, cpus))
@@ -51,7 +52,7 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues)


	queue = 0;
	queue = 0;
	for_each_possible_cpu(i) {
	for_each_possible_cpu(i) {
		if (!cpu_online(i)) {
		if (!cpumask_test_cpu(i, online_mask)) {
			map[i] = 0;
			map[i] = 0;
			continue;
			continue;
		}
		}
@@ -95,7 +96,7 @@ unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set)
	if (!map)
	if (!map)
		return NULL;
		return NULL;


	if (!blk_mq_update_queue_map(map, set->nr_hw_queues))
	if (!blk_mq_update_queue_map(map, set->nr_hw_queues, cpu_online_mask))
		return map;
		return map;


	kfree(map);
	kfree(map);
+22 −12
Original line number Original line Diff line number Diff line
@@ -229,8 +229,6 @@ static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
	unsigned int i, first = 1;
	unsigned int i, first = 1;
	ssize_t ret = 0;
	ssize_t ret = 0;


	blk_mq_disable_hotplug();

	for_each_cpu(i, hctx->cpumask) {
	for_each_cpu(i, hctx->cpumask) {
		if (first)
		if (first)
			ret += sprintf(ret + page, "%u", i);
			ret += sprintf(ret + page, "%u", i);
@@ -240,8 +238,6 @@ static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
		first = 0;
		first = 0;
	}
	}


	blk_mq_enable_hotplug();

	ret += sprintf(ret + page, "\n");
	ret += sprintf(ret + page, "\n");
	return ret;
	return ret;
}
}
@@ -343,7 +339,7 @@ static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
	struct blk_mq_ctx *ctx;
	struct blk_mq_ctx *ctx;
	int i;
	int i;


	if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP))
	if (!hctx->nr_ctx)
		return;
		return;


	hctx_for_each_ctx(hctx, ctx, i)
	hctx_for_each_ctx(hctx, ctx, i)
@@ -358,7 +354,7 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
	struct blk_mq_ctx *ctx;
	struct blk_mq_ctx *ctx;
	int i, ret;
	int i, ret;


	if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP))
	if (!hctx->nr_ctx)
		return 0;
		return 0;


	ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
	ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
@@ -381,6 +377,8 @@ void blk_mq_unregister_disk(struct gendisk *disk)
	struct blk_mq_ctx *ctx;
	struct blk_mq_ctx *ctx;
	int i, j;
	int i, j;


	blk_mq_disable_hotplug();

	queue_for_each_hw_ctx(q, hctx, i) {
	queue_for_each_hw_ctx(q, hctx, i) {
		blk_mq_unregister_hctx(hctx);
		blk_mq_unregister_hctx(hctx);


@@ -395,6 +393,9 @@ void blk_mq_unregister_disk(struct gendisk *disk)
	kobject_put(&q->mq_kobj);
	kobject_put(&q->mq_kobj);


	kobject_put(&disk_to_dev(disk)->kobj);
	kobject_put(&disk_to_dev(disk)->kobj);

	q->mq_sysfs_init_done = false;
	blk_mq_enable_hotplug();
}
}


static void blk_mq_sysfs_init(struct request_queue *q)
static void blk_mq_sysfs_init(struct request_queue *q)
@@ -425,27 +426,30 @@ int blk_mq_register_disk(struct gendisk *disk)
	struct blk_mq_hw_ctx *hctx;
	struct blk_mq_hw_ctx *hctx;
	int ret, i;
	int ret, i;


	blk_mq_disable_hotplug();

	blk_mq_sysfs_init(q);
	blk_mq_sysfs_init(q);


	ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
	ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
	if (ret < 0)
	if (ret < 0)
		return ret;
		goto out;


	kobject_uevent(&q->mq_kobj, KOBJ_ADD);
	kobject_uevent(&q->mq_kobj, KOBJ_ADD);


	queue_for_each_hw_ctx(q, hctx, i) {
	queue_for_each_hw_ctx(q, hctx, i) {
		hctx->flags |= BLK_MQ_F_SYSFS_UP;
		ret = blk_mq_register_hctx(hctx);
		ret = blk_mq_register_hctx(hctx);
		if (ret)
		if (ret)
			break;
			break;
	}
	}


	if (ret) {
	if (ret)
		blk_mq_unregister_disk(disk);
		blk_mq_unregister_disk(disk);
		return ret;
	else
	}
		q->mq_sysfs_init_done = true;
out:
	blk_mq_enable_hotplug();


	return 0;
	return ret;
}
}
EXPORT_SYMBOL_GPL(blk_mq_register_disk);
EXPORT_SYMBOL_GPL(blk_mq_register_disk);


@@ -454,6 +458,9 @@ void blk_mq_sysfs_unregister(struct request_queue *q)
	struct blk_mq_hw_ctx *hctx;
	struct blk_mq_hw_ctx *hctx;
	int i;
	int i;


	if (!q->mq_sysfs_init_done)
		return;

	queue_for_each_hw_ctx(q, hctx, i)
	queue_for_each_hw_ctx(q, hctx, i)
		blk_mq_unregister_hctx(hctx);
		blk_mq_unregister_hctx(hctx);
}
}
@@ -463,6 +470,9 @@ int blk_mq_sysfs_register(struct request_queue *q)
	struct blk_mq_hw_ctx *hctx;
	struct blk_mq_hw_ctx *hctx;
	int i, ret = 0;
	int i, ret = 0;


	if (!q->mq_sysfs_init_done)
		return ret;

	queue_for_each_hw_ctx(q, hctx, i) {
	queue_for_each_hw_ctx(q, hctx, i) {
		ret = blk_mq_register_hctx(hctx);
		ret = blk_mq_register_hctx(hctx);
		if (ret)
		if (ret)
+20 −7
Original line number Original line Diff line number Diff line
@@ -471,17 +471,30 @@ void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
}
}
EXPORT_SYMBOL(blk_mq_all_tag_busy_iter);
EXPORT_SYMBOL(blk_mq_all_tag_busy_iter);


void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
		void *priv)
		void *priv)
{
{
	struct blk_mq_hw_ctx *hctx;
	int i;


	queue_for_each_hw_ctx(q, hctx, i) {
		struct blk_mq_tags *tags = hctx->tags;
		struct blk_mq_tags *tags = hctx->tags;


		/*
		 * If not software queues are currently mapped to this
		 * hardware queue, there's nothing to check
		 */
		if (!blk_mq_hw_queue_mapped(hctx))
			continue;

		if (tags->nr_reserved_tags)
		if (tags->nr_reserved_tags)
			bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true);
			bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true);
		bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
		bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
		      false);
		      false);
	}
	}
EXPORT_SYMBOL(blk_mq_tag_busy_iter);

}


static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt)
static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt)
{
{
+2 −0
Original line number Original line Diff line number Diff line
@@ -58,6 +58,8 @@ extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag);
extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag);
extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth);
extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth);
extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
		void *priv);


enum {
enum {
	BLK_MQ_TAG_CACHE_MIN	= 1,
	BLK_MQ_TAG_CACHE_MIN	= 1,
+77 −41
Original line number Original line Diff line number Diff line
@@ -393,15 +393,17 @@ void __blk_mq_complete_request(struct request *rq)
 *	Ends all I/O on a request. It does not handle partial completions.
 *	Ends all I/O on a request. It does not handle partial completions.
 *	The actual completion happens out-of-order, through a IPI handler.
 *	The actual completion happens out-of-order, through a IPI handler.
 **/
 **/
void blk_mq_complete_request(struct request *rq)
void blk_mq_complete_request(struct request *rq, int error)
{
{
	struct request_queue *q = rq->q;
	struct request_queue *q = rq->q;


	if (unlikely(blk_should_fake_timeout(q)))
	if (unlikely(blk_should_fake_timeout(q)))
		return;
		return;
	if (!blk_mark_rq_complete(rq))
	if (!blk_mark_rq_complete(rq)) {
		rq->errors = error;
		__blk_mq_complete_request(rq);
		__blk_mq_complete_request(rq);
	}
	}
}
EXPORT_SYMBOL(blk_mq_complete_request);
EXPORT_SYMBOL(blk_mq_complete_request);


int blk_mq_request_started(struct request *rq)
int blk_mq_request_started(struct request *rq)
@@ -616,10 +618,8 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
		 * If a request wasn't started before the queue was
		 * If a request wasn't started before the queue was
		 * marked dying, kill it here or it'll go unnoticed.
		 * marked dying, kill it here or it'll go unnoticed.
		 */
		 */
		if (unlikely(blk_queue_dying(rq->q))) {
		if (unlikely(blk_queue_dying(rq->q)))
			rq->errors = -EIO;
			blk_mq_complete_request(rq, -EIO);
			blk_mq_complete_request(rq);
		}
		return;
		return;
	}
	}
	if (rq->cmd_flags & REQ_NO_TIMEOUT)
	if (rq->cmd_flags & REQ_NO_TIMEOUT)
@@ -641,24 +641,16 @@ static void blk_mq_rq_timer(unsigned long priv)
		.next		= 0,
		.next		= 0,
		.next_set	= 0,
		.next_set	= 0,
	};
	};
	struct blk_mq_hw_ctx *hctx;
	int i;
	int i;


	queue_for_each_hw_ctx(q, hctx, i) {
	blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &data);
		/*
		 * If not software queues are currently mapped to this
		 * hardware queue, there's nothing to check
		 */
		if (!blk_mq_hw_queue_mapped(hctx))
			continue;

		blk_mq_tag_busy_iter(hctx, blk_mq_check_expired, &data);
	}


	if (data.next_set) {
	if (data.next_set) {
		data.next = blk_rq_timeout(round_jiffies_up(data.next));
		data.next = blk_rq_timeout(round_jiffies_up(data.next));
		mod_timer(&q->timeout, data.next);
		mod_timer(&q->timeout, data.next);
	} else {
	} else {
		struct blk_mq_hw_ctx *hctx;

		queue_for_each_hw_ctx(q, hctx, i) {
		queue_for_each_hw_ctx(q, hctx, i) {
			/* the hctx may be unmapped, so check it here */
			/* the hctx may be unmapped, so check it here */
			if (blk_mq_hw_queue_mapped(hctx))
			if (blk_mq_hw_queue_mapped(hctx))
@@ -1789,13 +1781,19 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
	}
	}
}
}


static void blk_mq_map_swqueue(struct request_queue *q)
static void blk_mq_map_swqueue(struct request_queue *q,
			       const struct cpumask *online_mask)
{
{
	unsigned int i;
	unsigned int i;
	struct blk_mq_hw_ctx *hctx;
	struct blk_mq_hw_ctx *hctx;
	struct blk_mq_ctx *ctx;
	struct blk_mq_ctx *ctx;
	struct blk_mq_tag_set *set = q->tag_set;
	struct blk_mq_tag_set *set = q->tag_set;


	/*
	 * Avoid others reading imcomplete hctx->cpumask through sysfs
	 */
	mutex_lock(&q->sysfs_lock);

	queue_for_each_hw_ctx(q, hctx, i) {
	queue_for_each_hw_ctx(q, hctx, i) {
		cpumask_clear(hctx->cpumask);
		cpumask_clear(hctx->cpumask);
		hctx->nr_ctx = 0;
		hctx->nr_ctx = 0;
@@ -1806,16 +1804,17 @@ static void blk_mq_map_swqueue(struct request_queue *q)
	 */
	 */
	queue_for_each_ctx(q, ctx, i) {
	queue_for_each_ctx(q, ctx, i) {
		/* If the cpu isn't online, the cpu is mapped to first hctx */
		/* If the cpu isn't online, the cpu is mapped to first hctx */
		if (!cpu_online(i))
		if (!cpumask_test_cpu(i, online_mask))
			continue;
			continue;


		hctx = q->mq_ops->map_queue(q, i);
		hctx = q->mq_ops->map_queue(q, i);
		cpumask_set_cpu(i, hctx->cpumask);
		cpumask_set_cpu(i, hctx->cpumask);
		cpumask_set_cpu(i, hctx->tags->cpumask);
		ctx->index_hw = hctx->nr_ctx;
		ctx->index_hw = hctx->nr_ctx;
		hctx->ctxs[hctx->nr_ctx++] = ctx;
		hctx->ctxs[hctx->nr_ctx++] = ctx;
	}
	}


	mutex_unlock(&q->sysfs_lock);

	queue_for_each_hw_ctx(q, hctx, i) {
	queue_for_each_hw_ctx(q, hctx, i) {
		struct blk_mq_ctxmap *map = &hctx->ctx_map;
		struct blk_mq_ctxmap *map = &hctx->ctx_map;


@@ -1851,6 +1850,14 @@ static void blk_mq_map_swqueue(struct request_queue *q)
		hctx->next_cpu = cpumask_first(hctx->cpumask);
		hctx->next_cpu = cpumask_first(hctx->cpumask);
		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
		hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
	}
	}

	queue_for_each_ctx(q, ctx, i) {
		if (!cpumask_test_cpu(i, online_mask))
			continue;

		hctx = q->mq_ops->map_queue(q, i);
		cpumask_set_cpu(i, hctx->tags->cpumask);
	}
}
}


static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
@@ -1918,6 +1925,9 @@ void blk_mq_release(struct request_queue *q)
		kfree(hctx);
		kfree(hctx);
	}
	}


	kfree(q->mq_map);
	q->mq_map = NULL;

	kfree(q->queue_hw_ctx);
	kfree(q->queue_hw_ctx);


	/* ctx kobj stays in queue_ctx */
	/* ctx kobj stays in queue_ctx */
@@ -2027,13 +2037,15 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
	if (blk_mq_init_hw_queues(q, set))
	if (blk_mq_init_hw_queues(q, set))
		goto err_hctxs;
		goto err_hctxs;


	get_online_cpus();
	mutex_lock(&all_q_mutex);
	mutex_lock(&all_q_mutex);
	list_add_tail(&q->all_q_node, &all_q_list);
	mutex_unlock(&all_q_mutex);


	list_add_tail(&q->all_q_node, &all_q_list);
	blk_mq_add_queue_tag_set(set, q);
	blk_mq_add_queue_tag_set(set, q);
	blk_mq_map_swqueue(q, cpu_online_mask);


	blk_mq_map_swqueue(q);
	mutex_unlock(&all_q_mutex);
	put_online_cpus();


	return q;
	return q;


@@ -2057,30 +2069,27 @@ void blk_mq_free_queue(struct request_queue *q)
{
{
	struct blk_mq_tag_set	*set = q->tag_set;
	struct blk_mq_tag_set	*set = q->tag_set;


	mutex_lock(&all_q_mutex);
	list_del_init(&q->all_q_node);
	mutex_unlock(&all_q_mutex);

	blk_mq_del_queue_tag_set(q);
	blk_mq_del_queue_tag_set(q);


	blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
	blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
	blk_mq_free_hw_queues(q, set);
	blk_mq_free_hw_queues(q, set);


	percpu_ref_exit(&q->mq_usage_counter);
	percpu_ref_exit(&q->mq_usage_counter);

	kfree(q->mq_map);

	q->mq_map = NULL;

	mutex_lock(&all_q_mutex);
	list_del_init(&q->all_q_node);
	mutex_unlock(&all_q_mutex);
}
}


/* Basically redo blk_mq_init_queue with queue frozen */
/* Basically redo blk_mq_init_queue with queue frozen */
static void blk_mq_queue_reinit(struct request_queue *q)
static void blk_mq_queue_reinit(struct request_queue *q,
				const struct cpumask *online_mask)
{
{
	WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
	WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));


	blk_mq_sysfs_unregister(q);
	blk_mq_sysfs_unregister(q);


	blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues);
	blk_mq_update_queue_map(q->mq_map, q->nr_hw_queues, online_mask);


	/*
	/*
	 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
	 * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
@@ -2088,7 +2097,7 @@ static void blk_mq_queue_reinit(struct request_queue *q)
	 * involves free and re-allocate memory, worthy doing?)
	 * involves free and re-allocate memory, worthy doing?)
	 */
	 */


	blk_mq_map_swqueue(q);
	blk_mq_map_swqueue(q, online_mask);


	blk_mq_sysfs_register(q);
	blk_mq_sysfs_register(q);
}
}
@@ -2097,16 +2106,43 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
				      unsigned long action, void *hcpu)
				      unsigned long action, void *hcpu)
{
{
	struct request_queue *q;
	struct request_queue *q;
	int cpu = (unsigned long)hcpu;
	/*
	 * New online cpumask which is going to be set in this hotplug event.
	 * Declare this cpumasks as global as cpu-hotplug operation is invoked
	 * one-by-one and dynamically allocating this could result in a failure.
	 */
	static struct cpumask online_new;


	/*
	/*
	 * Before new mappings are established, hotadded cpu might already
	 * Before hotadded cpu starts handling requests, new mappings must
	 * start handling requests. This doesn't break anything as we map
	 * be established.  Otherwise, these requests in hw queue might
	 * offline CPUs to first hardware queue. We will re-init the queue
	 * never be dispatched.
	 * below to get optimal settings.
	 *
	 * For example, there is a single hw queue (hctx) and two CPU queues
	 * (ctx0 for CPU0, and ctx1 for CPU1).
	 *
	 * Now CPU1 is just onlined and a request is inserted into
	 * ctx1->rq_list and set bit0 in pending bitmap as ctx1->index_hw is
	 * still zero.
	 *
	 * And then while running hw queue, flush_busy_ctxs() finds bit0 is
	 * set in pending bitmap and tries to retrieve requests in
	 * hctx->ctxs[0]->rq_list.  But htx->ctxs[0] is a pointer to ctx0,
	 * so the request in ctx1->rq_list is ignored.
	 */
	 */
	if (action != CPU_DEAD && action != CPU_DEAD_FROZEN &&
	switch (action & ~CPU_TASKS_FROZEN) {
	    action != CPU_ONLINE && action != CPU_ONLINE_FROZEN)
	case CPU_DEAD:
	case CPU_UP_CANCELED:
		cpumask_copy(&online_new, cpu_online_mask);
		break;
	case CPU_UP_PREPARE:
		cpumask_copy(&online_new, cpu_online_mask);
		cpumask_set_cpu(cpu, &online_new);
		break;
	default:
		return NOTIFY_OK;
		return NOTIFY_OK;
	}


	mutex_lock(&all_q_mutex);
	mutex_lock(&all_q_mutex);


@@ -2130,7 +2166,7 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
	}
	}


	list_for_each_entry(q, &all_q_list, all_q_node)
	list_for_each_entry(q, &all_q_list, all_q_node)
		blk_mq_queue_reinit(q);
		blk_mq_queue_reinit(q, &online_new);


	list_for_each_entry(q, &all_q_list, all_q_node)
	list_for_each_entry(q, &all_q_list, all_q_node)
		blk_mq_unfreeze_queue(q);
		blk_mq_unfreeze_queue(q);
Loading