Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 900b1028 authored by Paul E. McKenney's avatar Paul E. McKenney
Browse files

srcu: Allow SRCU to access rcu_scheduler_active



This is primarily a code-movement commit in preparation for allowing
SRCU to handle early-boot SRCU grace periods.

Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
parent 15fecf89
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -217,14 +217,14 @@ static inline void exit_rcu(void)
{
}

#ifdef CONFIG_DEBUG_LOCK_ALLOC
#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU)
extern int rcu_scheduler_active __read_mostly;
void rcu_scheduler_starting(void);
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
#else /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) */
static inline void rcu_scheduler_starting(void)
{
}
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
#endif /* #else #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) */

#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE)

+5 −4
Original line number Diff line number Diff line
@@ -52,7 +52,7 @@ static struct rcu_ctrlblk rcu_bh_ctrlblk = {
	RCU_TRACE(.name = "rcu_bh")
};

#ifdef CONFIG_DEBUG_LOCK_ALLOC
#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU)
#include <linux/kernel_stat.h>

int rcu_scheduler_active __read_mostly;
@@ -65,15 +65,16 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
 * to RCU_SCHEDULER_RUNNING, skipping the RCU_SCHEDULER_INIT stage.
 * The reason for this is that Tiny RCU does not need kthreads, so does
 * not have to care about the fact that the scheduler is half-initialized
 * at a certain phase of the boot process.
 * at a certain phase of the boot process.  Unless SRCU is in the mix.
 */
void __init rcu_scheduler_starting(void)
{
	WARN_ON(nr_context_switches() > 0);
	rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
	rcu_scheduler_active = IS_ENABLED(CONFIG_SRCU)
		? RCU_SCHEDULER_INIT : RCU_SCHEDULER_RUNNING;
}

#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) */

#ifdef CONFIG_RCU_TRACE

+1 −1
Original line number Diff line number Diff line
@@ -3974,7 +3974,7 @@ early_initcall(rcu_spawn_gp_kthread);
 * task is booting the system, and such primitives are no-ops).  After this
 * function is called, any synchronous grace-period primitives are run as
 * expedited, with the requesting task driving the grace period forward.
 * A later core_initcall() rcu_exp_runtime_mode() will switch to full
 * A later core_initcall() rcu_set_runtime_mode() will switch to full
 * runtime RCU functionality.
 */
void rcu_scheduler_starting(void)
+0 −12
Original line number Diff line number Diff line
@@ -737,15 +737,3 @@ void synchronize_rcu_expedited(void)
EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);

#endif /* #else #ifdef CONFIG_PREEMPT_RCU */

/*
 * Switch to run-time mode once Tree RCU has fully initialized.
 */
static int __init rcu_exp_runtime_mode(void)
{
	rcu_test_sync_prims();
	rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
	rcu_test_sync_prims();
	return 0;
}
core_initcall(rcu_exp_runtime_mode);
+34 −18
Original line number Diff line number Diff line
@@ -124,7 +124,7 @@ EXPORT_SYMBOL(rcu_read_lock_sched_held);
 * non-expedited counterparts?  Intended for use within RCU.  Note
 * that if the user specifies both rcu_expedited and rcu_normal, then
 * rcu_normal wins.  (Except during the time period during boot from
 * when the first task is spawned until the rcu_exp_runtime_mode()
 * when the first task is spawned until the rcu_set_runtime_mode()
 * core_initcall() is invoked, at which point everything is expedited.)
 */
bool rcu_gp_is_normal(void)
@@ -190,6 +190,39 @@ void rcu_end_inkernel_boot(void)

#endif /* #ifndef CONFIG_TINY_RCU */

/*
 * Test each non-SRCU synchronous grace-period wait API.  This is
 * useful just after a change in mode for these primitives, and
 * during early boot.
 */
void rcu_test_sync_prims(void)
{
	if (!IS_ENABLED(CONFIG_PROVE_RCU))
		return;
	synchronize_rcu();
	synchronize_rcu_bh();
	synchronize_sched();
	synchronize_rcu_expedited();
	synchronize_rcu_bh_expedited();
	synchronize_sched_expedited();
}

#if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU)

/*
 * Switch to run-time mode once RCU has fully initialized.
 */
static int __init rcu_set_runtime_mode(void)
{
	rcu_test_sync_prims();
	rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
	rcu_test_sync_prims();
	return 0;
}
core_initcall(rcu_set_runtime_mode);

#endif /* #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU) */

#ifdef CONFIG_PREEMPT_RCU

/*
@@ -817,23 +850,6 @@ static void rcu_spawn_tasks_kthread(void)

#endif /* #ifdef CONFIG_TASKS_RCU */

/*
 * Test each non-SRCU synchronous grace-period wait API.  This is
 * useful just after a change in mode for these primitives, and
 * during early boot.
 */
void rcu_test_sync_prims(void)
{
	if (!IS_ENABLED(CONFIG_PROVE_RCU))
		return;
	synchronize_rcu();
	synchronize_rcu_bh();
	synchronize_sched();
	synchronize_rcu_expedited();
	synchronize_rcu_bh_expedited();
	synchronize_sched_expedited();
}

#ifdef CONFIG_PROVE_RCU

/*