Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 38200cf2 authored by Paul E. McKenney's avatar Paul E. McKenney
Browse files

rcu: Remove "cpu" argument to rcu_note_context_switch()



The "cpu" argument to rcu_note_context_switch() is always the current
CPU, so drop it.  This in turn allows the "cpu" argument to
rcu_preempt_note_context_switch() to be removed, which allows the sole
use of "cpu" in both functions to be replaced with a this_cpu_ptr().
Again, the anticipated cross-CPU uses of these functions has been
replaced by NO_HZ_FULL.

Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: default avatarPranith Kumar <bobby.prani@gmail.com>
parent 86aea0e6
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -78,7 +78,7 @@ static inline void kfree_call_rcu(struct rcu_head *head,
	call_rcu(head, func);
}

static inline void rcu_note_context_switch(int cpu)
static inline void rcu_note_context_switch(void)
{
	rcu_sched_qs();
}
+2 −2
Original line number Diff line number Diff line
@@ -30,7 +30,7 @@
#ifndef __LINUX_RCUTREE_H
#define __LINUX_RCUTREE_H

void rcu_note_context_switch(int cpu);
void rcu_note_context_switch(void);
#ifndef CONFIG_RCU_NOCB_CPU_ALL
int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies);
#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
@@ -43,7 +43,7 @@ void rcu_cpu_stall_reset(void);
 */
static inline void rcu_virt_note_context_switch(int cpu)
{
	rcu_note_context_switch(cpu);
	rcu_note_context_switch();
}

void synchronize_rcu_bh(void);
+2 −2
Original line number Diff line number Diff line
@@ -286,11 +286,11 @@ static void rcu_momentary_dyntick_idle(void)
 * and requires special handling for preemptible RCU.
 * The caller must have disabled preemption.
 */
void rcu_note_context_switch(int cpu)
void rcu_note_context_switch(void)
{
	trace_rcu_utilization(TPS("Start context switch"));
	rcu_sched_qs();
	rcu_preempt_note_context_switch(cpu);
	rcu_preempt_note_context_switch();
	if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
		rcu_momentary_dyntick_idle();
	trace_rcu_utilization(TPS("End context switch"));
+1 −1
Original line number Diff line number Diff line
@@ -547,7 +547,7 @@ DECLARE_PER_CPU(char, rcu_cpu_has_work);
/* Forward declarations for rcutree_plugin.h */
static void rcu_bootup_announce(void);
long rcu_batches_completed(void);
static void rcu_preempt_note_context_switch(int cpu);
static void rcu_preempt_note_context_switch(void);
static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
#ifdef CONFIG_HOTPLUG_CPU
static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
+3 −3
Original line number Diff line number Diff line
@@ -156,7 +156,7 @@ static void rcu_preempt_qs(void)
 *
 * Caller must disable preemption.
 */
static void rcu_preempt_note_context_switch(int cpu)
static void rcu_preempt_note_context_switch(void)
{
	struct task_struct *t = current;
	unsigned long flags;
@@ -167,7 +167,7 @@ static void rcu_preempt_note_context_switch(int cpu)
	    !t->rcu_read_unlock_special.b.blocked) {

		/* Possibly blocking in an RCU read-side critical section. */
		rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
		rdp = this_cpu_ptr(rcu_preempt_state.rda);
		rnp = rdp->mynode;
		raw_spin_lock_irqsave(&rnp->lock, flags);
		smp_mb__after_unlock_lock();
@@ -945,7 +945,7 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed);
 * Because preemptible RCU does not exist, we never have to check for
 * CPUs being in quiescent states.
 */
static void rcu_preempt_note_context_switch(int cpu)
static void rcu_preempt_note_context_switch(void)
{
}

Loading