Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit dcdb8807 authored by Paul E. McKenney's avatar Paul E. McKenney
Browse files

rcu: Consolidate expedited CPU selection



Now that sync_sched_exp_select_cpus() and sync_rcu_exp_select_cpus()
are identical aside from the the argument to smp_call_function_single(),
this commit consolidates them with a functional argument.

Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
parent 66fe6cbe
Loading
Loading
Loading
Loading
+4 −3
Original line number Diff line number Diff line
@@ -3654,7 +3654,8 @@ static void synchronize_sched_expedited_cpu_stop(void *data)
 * Select the nodes that the upcoming expedited grace period needs
 * to wait for.
 */
static void sync_sched_exp_select_cpus(struct rcu_state *rsp)
static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
				     smp_call_func_t func)
{
	int cpu;
	unsigned long flags;
@@ -3696,7 +3697,7 @@ static void sync_sched_exp_select_cpus(struct rcu_state *rsp)
		for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask <<= 1) {
			if (!(mask_ofl_ipi & mask))
				continue;
			ret = smp_call_function_single(cpu, synchronize_sched_expedited_cpu_stop, NULL, 0);
			ret = smp_call_function_single(cpu, func, rsp, 0);
			if (!ret)
				mask_ofl_ipi &= ~mask;
		}
@@ -3788,7 +3789,7 @@ void synchronize_sched_expedited(void)
		return;  /* Someone else did our work for us. */

	rcu_exp_gp_seq_start(rsp);
	sync_sched_exp_select_cpus(rsp);
	sync_rcu_exp_select_cpus(rsp, synchronize_sched_expedited_cpu_stop);
	synchronize_sched_expedited_wait(rsp);

	rcu_exp_gp_seq_end(rsp);
+1 −60
Original line number Diff line number Diff line
@@ -708,65 +708,6 @@ static void sync_rcu_exp_handler(void *info)
	rcu_report_exp_rdp(rsp, rdp, true);
}

/*
 * Select the nodes that the upcoming expedited grace period needs
 * to wait for.
 */
static void sync_rcu_exp_select_cpus(struct rcu_state *rsp)
{
	int cpu;
	unsigned long flags;
	unsigned long mask;
	unsigned long mask_ofl_test;
	unsigned long mask_ofl_ipi;
	int ret;
	struct rcu_node *rnp;

	sync_exp_reset_tree(rsp);
	rcu_for_each_leaf_node(rsp, rnp) {
		raw_spin_lock_irqsave(&rnp->lock, flags);
		smp_mb__after_unlock_lock();

		/* Each pass checks a CPU for identity, offline, and idle. */
		mask_ofl_test = 0;
		for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) {
			struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
			struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);

			if (raw_smp_processor_id() == cpu ||
			    cpu_is_offline(cpu) ||
			    !(atomic_add_return(0, &rdtp->dynticks) & 0x1))
				mask_ofl_test |= rdp->grpmask;
		}
		mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;

		/*
		 * Need to wait for any blocked tasks as well.  Note that
		 * additional blocking tasks will also block the expedited
		 * GP until such time as the ->expmask bits are cleared.
		 */
		if (rcu_preempt_has_tasks(rnp))
			rnp->exp_tasks = rnp->blkd_tasks.next;
		raw_spin_unlock_irqrestore(&rnp->lock, flags);

		/* IPI the remaining CPUs for expedited quiescent state. */
		mask = 1;
		for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask <<= 1) {
			if (!(mask_ofl_ipi & mask))
				continue;
			ret = smp_call_function_single(cpu,
						       sync_rcu_exp_handler,
						       rsp, 0);
			if (!ret)
				mask_ofl_ipi &= ~mask;
		}
		/* Report quiescent states for those that went offline. */
		mask_ofl_test |= mask_ofl_ipi;
		if (mask_ofl_test)
			rcu_report_exp_cpu_mult(rsp, rnp, mask_ofl_test, false);
	}
}

/**
 * synchronize_rcu_expedited - Brute-force RCU grace period
 *
@@ -795,7 +736,7 @@ void synchronize_rcu_expedited(void)
	rcu_exp_gp_seq_start(rsp);

	/* Initialize the rcu_node tree in preparation for the wait. */
	sync_rcu_exp_select_cpus(rsp);
	sync_rcu_exp_select_cpus(rsp, sync_rcu_exp_handler);

	/* Wait for snapshotted ->blkd_tasks lists to drain. */
	rnp = rcu_get_root(rsp);