Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7b27d547 authored by Lai Jiangshan's avatar Lai Jiangshan Committed by Paul E. McKenney
Browse files

rcu,cleanup: move synchronize_sched_expedited() out of sched.c



The first version of synchronize_sched_expedited() used the migration
code in the scheduler, and was therefore implemented in kernel/sched.c.
However, the more recent version of this code no longer uses the
migration code, so this commit moves it to the main RCU source files.

Signed-off-by: default avatarLai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
parent deb7a418
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -66,7 +66,6 @@ extern void call_rcu_sched(struct rcu_head *head,
extern void synchronize_sched(void);
extern void rcu_barrier_bh(void);
extern void rcu_barrier_sched(void);
extern void synchronize_sched_expedited(void);
extern int sched_expedited_torture_stats(char *page);

static inline void __rcu_read_lock_bh(void)
+5 −0
Original line number Diff line number Diff line
@@ -60,6 +60,11 @@ static inline void synchronize_rcu_bh_expedited(void)
	synchronize_sched();
}

static inline void synchronize_sched_expedited(void)
{
	synchronize_sched();
}

#ifdef CONFIG_TINY_RCU

static inline void rcu_preempt_note_context_switch(void)
+1 −0
Original line number Diff line number Diff line
@@ -48,6 +48,7 @@ static inline void exit_rcu(void)
#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */

extern void synchronize_rcu_bh(void);
extern void synchronize_sched_expedited(void);
extern void synchronize_rcu_expedited(void);

static inline void synchronize_rcu_bh_expedited(void)
+71 −0
Original line number Diff line number Diff line
@@ -25,6 +25,7 @@
 */

#include <linux/delay.h>
#include <linux/stop_machine.h>

/*
 * Check the RCU kernel configuration parameters and print informative
@@ -1014,6 +1015,76 @@ static void __init __rcu_init_preempt(void)

#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */

#ifndef CONFIG_SMP

void synchronize_sched_expedited(void)
{
	cond_resched();
}
EXPORT_SYMBOL_GPL(synchronize_sched_expedited);

#else /* #ifndef CONFIG_SMP */

static atomic_t synchronize_sched_expedited_count = ATOMIC_INIT(0);

static int synchronize_sched_expedited_cpu_stop(void *data)
{
	/*
	 * There must be a full memory barrier on each affected CPU
	 * between the time that try_stop_cpus() is called and the
	 * time that it returns.
	 *
	 * In the current initial implementation of cpu_stop, the
	 * above condition is already met when the control reaches
	 * this point and the following smp_mb() is not strictly
	 * necessary.  Do smp_mb() anyway for documentation and
	 * robustness against future implementation changes.
	 */
	smp_mb(); /* See above comment block. */
	return 0;
}

/*
 * Wait for an rcu-sched grace period to elapse, but use "big hammer"
 * approach to force grace period to end quickly.  This consumes
 * significant time on all CPUs, and is thus not recommended for
 * any sort of common-case code.
 *
 * Note that it is illegal to call this function while holding any
 * lock that is acquired by a CPU-hotplug notifier.  Failing to
 * observe this restriction will result in deadlock.
 */
void synchronize_sched_expedited(void)
{
	int snap, trycount = 0;

	smp_mb();  /* ensure prior mod happens before capturing snap. */
	snap = atomic_read(&synchronize_sched_expedited_count) + 1;
	get_online_cpus();
	while (try_stop_cpus(cpu_online_mask,
			     synchronize_sched_expedited_cpu_stop,
			     NULL) == -EAGAIN) {
		put_online_cpus();
		if (trycount++ < 10)
			udelay(trycount * num_online_cpus());
		else {
			synchronize_sched();
			return;
		}
		if (atomic_read(&synchronize_sched_expedited_count) - snap > 0) {
			smp_mb(); /* ensure test happens before caller kfree */
			return;
		}
		get_online_cpus();
	}
	atomic_inc(&synchronize_sched_expedited_count);
	smp_mb__after_atomic_inc(); /* ensure post-GP actions seen after GP. */
	put_online_cpus();
}
EXPORT_SYMBOL_GPL(synchronize_sched_expedited);

#endif /* #else #ifndef CONFIG_SMP */

#if !defined(CONFIG_RCU_FAST_NO_HZ)

/*
+0 −69
Original line number Diff line number Diff line
@@ -9131,72 +9131,3 @@ struct cgroup_subsys cpuacct_subsys = {
};
#endif	/* CONFIG_CGROUP_CPUACCT */
#ifndef CONFIG_SMP

void synchronize_sched_expedited(void)
{
	barrier();
}
EXPORT_SYMBOL_GPL(synchronize_sched_expedited);

#else /* #ifndef CONFIG_SMP */

static atomic_t synchronize_sched_expedited_count = ATOMIC_INIT(0);

static int synchronize_sched_expedited_cpu_stop(void *data)
{
	/*
	 * There must be a full memory barrier on each affected CPU
	 * between the time that try_stop_cpus() is called and the
	 * time that it returns.
	 *
	 * In the current initial implementation of cpu_stop, the
	 * above condition is already met when the control reaches
	 * this point and the following smp_mb() is not strictly
	 * necessary.  Do smp_mb() anyway for documentation and
	 * robustness against future implementation changes.
	 */
	smp_mb(); /* See above comment block. */
	return 0;
}

/*
 * Wait for an rcu-sched grace period to elapse, but use "big hammer"
 * approach to force grace period to end quickly.  This consumes
 * significant time on all CPUs, and is thus not recommended for
 * any sort of common-case code.
 *
 * Note that it is illegal to call this function while holding any
 * lock that is acquired by a CPU-hotplug notifier.  Failing to
 * observe this restriction will result in deadlock.
 */
void synchronize_sched_expedited(void)
{
	int snap, trycount = 0;

	smp_mb();  /* ensure prior mod happens before capturing snap. */
	snap = atomic_read(&synchronize_sched_expedited_count) + 1;
	get_online_cpus();
	while (try_stop_cpus(cpu_online_mask,
			     synchronize_sched_expedited_cpu_stop,
			     NULL) == -EAGAIN) {
		put_online_cpus();
		if (trycount++ < 10)
			udelay(trycount * num_online_cpus());
		else {
			synchronize_sched();
			return;
		}
		if (atomic_read(&synchronize_sched_expedited_count) - snap > 0) {
			smp_mb(); /* ensure test happens before caller kfree */
			return;
		}
		get_online_cpus();
	}
	atomic_inc(&synchronize_sched_expedited_count);
	smp_mb__after_atomic_inc(); /* ensure post-GP actions seen after GP. */
	put_online_cpus();
}
EXPORT_SYMBOL_GPL(synchronize_sched_expedited);

#endif /* #else #ifndef CONFIG_SMP */