Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 65d5291e authored by Sebastian Andrzej Siewior's avatar Sebastian Andrzej Siewior Committed by Jens Axboe
Browse files

blk-mq: Convert to new hotplug state machine



Install the callbacks via the state machine so we can phase out the cpu
hotplug notifiers mess.

Signed-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: linux-block@vger.kernel.org
Cc: rt@linutronix.de
Cc: Christoph Hellwing <hch@lst.de>
Link: http://lkml.kernel.org/r/20160919212601.180033814@linutronix.de


Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 9467f859
Loading
Loading
Loading
Loading
+43 −44
Original line number Original line Diff line number Diff line
@@ -2116,50 +2116,18 @@ static void blk_mq_queue_reinit(struct request_queue *q,
	blk_mq_sysfs_register(q);
	blk_mq_sysfs_register(q);
}
}


static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
				      unsigned long action, void *hcpu)
{
	struct request_queue *q;
	int cpu = (unsigned long)hcpu;
/*
/*
 * New online cpumask which is going to be set in this hotplug event.
 * New online cpumask which is going to be set in this hotplug event.
 * Declare this cpumasks as global as cpu-hotplug operation is invoked
 * Declare this cpumasks as global as cpu-hotplug operation is invoked
 * one-by-one and dynamically allocating this could result in a failure.
 * one-by-one and dynamically allocating this could result in a failure.
 */
 */
	static struct cpumask online_new;
static struct cpumask cpuhp_online_new;


	/*
static void blk_mq_queue_reinit_work(void)
	 * Before hotadded cpu starts handling requests, new mappings must
{
	 * be established.  Otherwise, these requests in hw queue might
	struct request_queue *q;
	 * never be dispatched.
	 *
	 * For example, there is a single hw queue (hctx) and two CPU queues
	 * (ctx0 for CPU0, and ctx1 for CPU1).
	 *
	 * Now CPU1 is just onlined and a request is inserted into
	 * ctx1->rq_list and set bit0 in pending bitmap as ctx1->index_hw is
	 * still zero.
	 *
	 * And then while running hw queue, flush_busy_ctxs() finds bit0 is
	 * set in pending bitmap and tries to retrieve requests in
	 * hctx->ctxs[0]->rq_list.  But htx->ctxs[0] is a pointer to ctx0,
	 * so the request in ctx1->rq_list is ignored.
	 */
	switch (action & ~CPU_TASKS_FROZEN) {
	case CPU_DEAD:
	case CPU_UP_CANCELED:
		cpumask_copy(&online_new, cpu_online_mask);
		break;
	case CPU_UP_PREPARE:
		cpumask_copy(&online_new, cpu_online_mask);
		cpumask_set_cpu(cpu, &online_new);
		break;
	default:
		return NOTIFY_OK;
	}


	mutex_lock(&all_q_mutex);
	mutex_lock(&all_q_mutex);

	/*
	/*
	 * We need to freeze and reinit all existing queues.  Freezing
	 * We need to freeze and reinit all existing queues.  Freezing
	 * involves synchronous wait for an RCU grace period and doing it
	 * involves synchronous wait for an RCU grace period and doing it
@@ -2180,13 +2148,43 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
	}
	}


	list_for_each_entry(q, &all_q_list, all_q_node)
	list_for_each_entry(q, &all_q_list, all_q_node)
		blk_mq_queue_reinit(q, &online_new);
		blk_mq_queue_reinit(q, &cpuhp_online_new);


	list_for_each_entry(q, &all_q_list, all_q_node)
	list_for_each_entry(q, &all_q_list, all_q_node)
		blk_mq_unfreeze_queue(q);
		blk_mq_unfreeze_queue(q);


	mutex_unlock(&all_q_mutex);
	mutex_unlock(&all_q_mutex);
	return NOTIFY_OK;
}

static int blk_mq_queue_reinit_dead(unsigned int cpu)
{
	cpumask_clear_cpu(cpu, &cpuhp_online_new);
	blk_mq_queue_reinit_work();
	return 0;
}

/*
 * Before hotadded cpu starts handling requests, new mappings must be
 * established.  Otherwise, these requests in hw queue might never be
 * dispatched.
 *
 * For example, there is a single hw queue (hctx) and two CPU queues (ctx0
 * for CPU0, and ctx1 for CPU1).
 *
 * Now CPU1 is just onlined and a request is inserted into ctx1->rq_list
 * and set bit0 in pending bitmap as ctx1->index_hw is still zero.
 *
 * And then while running hw queue, flush_busy_ctxs() finds bit0 is set in
 * pending bitmap and tries to retrieve requests in hctx->ctxs[0]->rq_list.
 * But htx->ctxs[0] is a pointer to ctx0, so the request in ctx1->rq_list
 * is ignored.
 */
static int blk_mq_queue_reinit_prepare(unsigned int cpu)
{
	cpumask_copy(&cpuhp_online_new, cpu_online_mask);
	cpumask_set_cpu(cpu, &cpuhp_online_new);
	blk_mq_queue_reinit_work();
	return 0;
}
}


static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
@@ -2391,8 +2389,9 @@ static int __init blk_mq_init(void)
	cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
	cpuhp_setup_state_multi(CPUHP_BLK_MQ_DEAD, "block/mq:dead", NULL,
				blk_mq_hctx_notify_dead);
				blk_mq_hctx_notify_dead);


	hotcpu_notifier(blk_mq_queue_reinit_notify, 0);
	cpuhp_setup_state_nocalls(CPUHP_BLK_MQ_PREPARE, "block/mq:prepare",

				  blk_mq_queue_reinit_prepare,
				  blk_mq_queue_reinit_dead);
	return 0;
	return 0;
}
}
subsys_initcall(blk_mq_init);
subsys_initcall(blk_mq_init);