Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit abb06b99 authored by Paul E. McKenney's avatar Paul E. McKenney
Browse files

rcu: Pull rcu_sched_qs_mask into rcu_dynticks structure



The rcu_sched_qs_mask variable is yet another isolated per-CPU variable,
so this commit pulls it into the pre-existing rcu_dynticks per-CPU
structure.

Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
parent 88a4976d
Loading
Loading
Loading
Loading
+8 −1
Original line number Diff line number Diff line
@@ -1104,6 +1104,7 @@ Its fields are as follows:
  1   int dynticks_nesting;
  2   int dynticks_nmi_nesting;
  3   atomic_t dynticks;
  4   int rcu_sched_qs_mask;
</pre>

<p>The <tt>-&gt;dynticks_nesting</tt> field counts the
@@ -1117,11 +1118,17 @@ NMIs are counted by the <tt>-&gt;dynticks_nmi_nesting</tt>
field, except that NMIs that interrupt non-dyntick-idle execution
are not counted.

</p><p>Finally, the <tt>-&gt;dynticks</tt> field counts the corresponding
</p><p>The <tt>-&gt;dynticks</tt> field counts the corresponding
CPU's transitions to and from dyntick-idle mode, so that this counter
has an even value when the CPU is in dyntick-idle mode and an odd
value otherwise.

</p><p>Finally, the  <tt>-&gt;rcu_sched_qs_mask</tt> field is used
to record the fact that the RCU core code would really like to
see a quiescent state from the corresponding CPU.
This flag is checked by RCU's context-switch and <tt>cond_resched()</tt>
code, which provide a momentary idle sojourn in response.

<table>
<tr><th>&nbsp;</th></tr>
<tr><th align="left">Quick Quiz:</th></tr>
+5 −7
Original line number Diff line number Diff line
@@ -272,8 +272,6 @@ void rcu_bh_qs(void)
	}
}

static DEFINE_PER_CPU(int, rcu_sched_qs_mask);

/*
 * Steal a bit from the bottom of ->dynticks for idle entry/exit
 * control.  Initially this is for TLB flushing.
@@ -464,8 +462,8 @@ static void rcu_momentary_dyntick_idle(void)
	 * Yes, we can lose flag-setting operations.  This is OK, because
	 * the flag will be set again after some delay.
	 */
	resched_mask = raw_cpu_read(rcu_sched_qs_mask);
	raw_cpu_write(rcu_sched_qs_mask, 0);
	resched_mask = raw_cpu_read(rcu_dynticks.rcu_sched_qs_mask);
	raw_cpu_write(rcu_dynticks.rcu_sched_qs_mask, 0);

	/* Find the flavor that needs a quiescent state. */
	for_each_rcu_flavor(rsp) {
@@ -499,7 +497,7 @@ void rcu_note_context_switch(void)
	trace_rcu_utilization(TPS("Start context switch"));
	rcu_sched_qs();
	rcu_preempt_note_context_switch();
	if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
	if (unlikely(raw_cpu_read(rcu_dynticks.rcu_sched_qs_mask)))
		rcu_momentary_dyntick_idle();
	trace_rcu_utilization(TPS("End context switch"));
	barrier(); /* Avoid RCU read-side critical sections leaking up. */
@@ -524,7 +522,7 @@ void rcu_all_qs(void)
	unsigned long flags;

	barrier(); /* Avoid RCU read-side critical sections leaking down. */
	if (unlikely(raw_cpu_read(rcu_sched_qs_mask))) {
	if (unlikely(raw_cpu_read(rcu_dynticks.rcu_sched_qs_mask))) {
		local_irq_save(flags);
		rcu_momentary_dyntick_idle();
		local_irq_restore(flags);
@@ -1351,7 +1349,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
	 * is set too high, we override with half of the RCU CPU stall
	 * warning delay.
	 */
	rcrmp = &per_cpu(rcu_sched_qs_mask, rdp->cpu);
	rcrmp = &per_cpu(rcu_dynticks.rcu_sched_qs_mask, rdp->cpu);
	if (time_after(jiffies, rdp->rsp->gp_start + jtsq) ||
	    time_after(jiffies, rdp->rsp->jiffies_resched)) {
		if (!(READ_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
+1 −0
Original line number Diff line number Diff line
@@ -113,6 +113,7 @@ struct rcu_dynticks {
				    /* Process level is worth LLONG_MAX/2. */
	int dynticks_nmi_nesting;   /* Track NMI nesting level. */
	atomic_t dynticks;	    /* Even value for idle, else odd. */
	int rcu_sched_qs_mask;      /* GP old, need quiescent state. */
#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
	long long dynticks_idle_nesting;
				    /* irq/process nesting level from idle. */