Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9386c0b7 authored by Paul E. McKenney's avatar Paul E. McKenney
Browse files

rcu: Rationalize kthread spawning



Currently, RCU spawns kthreads from several different early_initcall()
functions.  Although this has served RCU well for quite some time,
as more kthreads are added a more deterministic approach is required.
This commit therefore causes all of RCU's early-boot kthreads to be
spawned from a single early_initcall() function.

Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: default avatarJosh Triplett <josh@joshtriplett.org>
Tested-by: default avatarPaul Gortmaker <paul.gortmaker@windriver.com>
parent f4aa84ba
Loading
Loading
Loading
Loading
+3 −1
Original line number Diff line number Diff line
@@ -3489,7 +3489,7 @@ static int rcu_pm_notify(struct notifier_block *self,
}

/*
 * Spawn the kthread that handles this RCU flavor's grace periods.
 * Spawn the kthreads that handle each RCU flavor's grace periods.
 */
static int __init rcu_spawn_gp_kthread(void)
{
@@ -3498,6 +3498,7 @@ static int __init rcu_spawn_gp_kthread(void)
	struct rcu_state *rsp;
	struct task_struct *t;

	rcu_scheduler_fully_active = 1;
	for_each_rcu_flavor(rsp) {
		t = kthread_run(rcu_gp_kthread, rsp, "%s", rsp->name);
		BUG_ON(IS_ERR(t));
@@ -3507,6 +3508,7 @@ static int __init rcu_spawn_gp_kthread(void)
		raw_spin_unlock_irqrestore(&rnp->lock, flags);
		rcu_spawn_nocb_kthreads(rsp);
	}
	rcu_spawn_boost_kthreads();
	return 0;
}
early_initcall(rcu_spawn_gp_kthread);
+1 −0
Original line number Diff line number Diff line
@@ -572,6 +572,7 @@ static void rcu_preempt_do_callbacks(void);
static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
						 struct rcu_node *rnp);
#endif /* #ifdef CONFIG_RCU_BOOST */
static void __init rcu_spawn_boost_kthreads(void);
static void rcu_prepare_kthreads(int cpu);
static void rcu_cleanup_after_idle(int cpu);
static void rcu_prepare_for_idle(int cpu);
+3 −9
Original line number Diff line number Diff line
@@ -1435,14 +1435,13 @@ static struct smp_hotplug_thread rcu_cpu_thread_spec = {
};

/*
 * Spawn all kthreads -- called as soon as the scheduler is running.
 * Spawn boost kthreads -- called as soon as the scheduler is running.
 */
static int __init rcu_spawn_kthreads(void)
static void __init rcu_spawn_boost_kthreads(void)
{
	struct rcu_node *rnp;
	int cpu;

	rcu_scheduler_fully_active = 1;
	for_each_possible_cpu(cpu)
		per_cpu(rcu_cpu_has_work, cpu) = 0;
	BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
@@ -1452,9 +1451,7 @@ static int __init rcu_spawn_kthreads(void)
		rcu_for_each_leaf_node(rcu_state_p, rnp)
			(void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
	}
	return 0;
}
early_initcall(rcu_spawn_kthreads);

static void rcu_prepare_kthreads(int cpu)
{
@@ -1492,12 +1489,9 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
{
}

static int __init rcu_scheduler_really_started(void)
static void __init rcu_spawn_boost_kthreads(void)
{
	rcu_scheduler_fully_active = 1;
	return 0;
}
early_initcall(rcu_scheduler_really_started);

static void rcu_prepare_kthreads(int cpu)
{