Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0742ac3e authored by Paul E. McKenney's avatar Paul E. McKenney
Browse files

rcu: Make expedited grace periods recheck dyntick idle state



Expedited grace periods check dyntick-idle state, and avoid sending
IPIs to idle CPUs, including those running guest OSes, and, on NOHZ_FULL
kernels, nohz_full CPUs.  However, the kernel has been observed checking
a CPU while it was non-idle, but sending the IPI after it has gone
idle.  This commit therefore rechecks idle state immediately before
sending the IPI, refraining from IPIing CPUs that have since gone idle.

Reported-by: default avatarRik van Riel <riel@redhat.com>
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
parent d0af39e8
Loading
Loading
Loading
Loading
+1 −0
Original line number Original line Diff line number Diff line
@@ -404,6 +404,7 @@ struct rcu_data {
	atomic_long_t exp_workdone1;	/* # done by others #1. */
	atomic_long_t exp_workdone1;	/* # done by others #1. */
	atomic_long_t exp_workdone2;	/* # done by others #2. */
	atomic_long_t exp_workdone2;	/* # done by others #2. */
	atomic_long_t exp_workdone3;	/* # done by others #3. */
	atomic_long_t exp_workdone3;	/* # done by others #3. */
	int exp_dynticks_snap;		/* Double-check need for IPI. */


	/* 7) Callback offloading. */
	/* 7) Callback offloading. */
#ifdef CONFIG_RCU_NOCB_CPU
#ifdef CONFIG_RCU_NOCB_CPU
+11 −1
Original line number Original line Diff line number Diff line
@@ -358,8 +358,10 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
			struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
			struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
			struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
			struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);


			rdp->exp_dynticks_snap =
				atomic_add_return(0, &rdtp->dynticks);
			if (raw_smp_processor_id() == cpu ||
			if (raw_smp_processor_id() == cpu ||
			    !(atomic_add_return(0, &rdtp->dynticks) & 0x1) ||
			    !(rdp->exp_dynticks_snap & 0x1) ||
			    !(rnp->qsmaskinitnext & rdp->grpmask))
			    !(rnp->qsmaskinitnext & rdp->grpmask))
				mask_ofl_test |= rdp->grpmask;
				mask_ofl_test |= rdp->grpmask;
		}
		}
@@ -377,9 +379,17 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
		/* IPI the remaining CPUs for expedited quiescent state. */
		/* IPI the remaining CPUs for expedited quiescent state. */
		for_each_leaf_node_possible_cpu(rnp, cpu) {
		for_each_leaf_node_possible_cpu(rnp, cpu) {
			unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
			unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
			struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
			struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);

			if (!(mask_ofl_ipi & mask))
			if (!(mask_ofl_ipi & mask))
				continue;
				continue;
retry_ipi:
retry_ipi:
			if (atomic_add_return(0, &rdtp->dynticks) !=
			    rdp->exp_dynticks_snap) {
				mask_ofl_test |= mask;
				continue;
			}
			ret = smp_call_function_single(cpu, func, rsp, 0);
			ret = smp_call_function_single(cpu, func, rsp, 0);
			if (!ret) {
			if (!ret) {
				mask_ofl_ipi &= ~mask;
				mask_ofl_ipi &= ~mask;