Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bd9f0686 authored by Paul E. McKenney's avatar Paul E. McKenney Committed by Paul E. McKenney
Browse files

rcu: Repurpose no-CBs event tracing to future-GP events



Dyntick-idle CPUs need to be able to pre-announce their need for grace
periods.  This can be done using something similar to the mechanism used
by no-CB CPUs to announce their need for grace periods.  This commit
moves in this direction by renaming the no-CBs grace-period event tracing
to suit the new future-grace-period needs.

Signed-off-by: default avatarPaul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
parent b92db6cb
Loading
Loading
Loading
Loading
+8 −8
Original line number Diff line number Diff line
@@ -72,10 +72,10 @@ TRACE_EVENT(rcu_grace_period,
);

/*
 * Tracepoint for no-callbacks grace-period events.  The caller should
 * pull the data from the rcu_node structure, other than rcuname, which
 * comes from the rcu_state structure, and event, which is one of the
 * following:
 * Tracepoint for future grace-period events, including those for no-callbacks
 * CPUs.  The caller should pull the data from the rcu_node structure,
 * other than rcuname, which comes from the rcu_state structure, and event,
 * which is one of the following:
 *
 * "Startleaf": Request a nocb grace period based on leaf-node data.
 * "Startedleaf": Leaf-node start proved sufficient.
@@ -87,7 +87,7 @@ TRACE_EVENT(rcu_grace_period,
 * "Cleanup": Clean up rcu_node structure after previous GP.
 * "CleanupMore": Clean up, and another no-CB GP is needed.
 */
TRACE_EVENT(rcu_nocb_grace_period,
TRACE_EVENT(rcu_future_grace_period,

	TP_PROTO(char *rcuname, unsigned long gpnum, unsigned long completed,
		 unsigned long c, u8 level, int grplo, int grphi,
@@ -653,7 +653,7 @@ TRACE_EVENT(rcu_barrier,
#define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \
				    qsmask) do { } while (0)
#define trace_rcu_nocb_grace_period(rcuname, gpnum, completed, c, \
#define trace_rcu_future_grace_period(rcuname, gpnum, completed, c, \
				      level, grplo, grphi, event) \
				      do { } while (0)
#define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
+32 −30
Original line number Diff line number Diff line
@@ -2034,7 +2034,7 @@ static int rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
	wake_up_all(&rnp->nocb_gp_wq[c & 0x1]);
	rnp->n_nocb_gp_requests[c & 0x1] = 0;
	needmore = rnp->n_nocb_gp_requests[(c + 1) & 0x1];
	trace_rcu_nocb_grace_period(rsp->name, rnp->gpnum, rnp->completed,
	trace_rcu_future_grace_period(rsp->name, rnp->gpnum, rnp->completed,
				      c, rnp->level, rnp->grplo, rnp->grphi,
				      needmore ? "CleanupMore" : "Cleanup");
	return needmore;
@@ -2183,9 +2183,9 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)

	/* Count our request for a grace period. */
	rnp->n_nocb_gp_requests[c & 0x1]++;
	trace_rcu_nocb_grace_period(rdp->rsp->name, rnp->gpnum, rnp->completed,
				    c, rnp->level, rnp->grplo, rnp->grphi,
				    "Startleaf");
	trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
				      rnp->completed, c, rnp->level,
				      rnp->grplo, rnp->grphi, "Startleaf");

	if (rnp->gpnum != rnp->completed) {

@@ -2194,9 +2194,9 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
		 * is in progress, so we are done.  When this grace
		 * period ends, our request will be acted upon.
		 */
		trace_rcu_nocb_grace_period(rdp->rsp->name,
					    rnp->gpnum, rnp->completed, c,
					    rnp->level, rnp->grplo, rnp->grphi,
		trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
					      rnp->completed, c, rnp->level,
					      rnp->grplo, rnp->grphi,
					      "Startedleaf");
		raw_spin_unlock_irqrestore(&rnp->lock, flags);

@@ -2209,8 +2209,9 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
		if (rnp != rnp_root)
			raw_spin_lock(&rnp_root->lock); /* irqs disabled. */
		if (rnp_root->gpnum != rnp_root->completed) {
			trace_rcu_nocb_grace_period(rdp->rsp->name,
						    rnp->gpnum, rnp->completed,
			trace_rcu_future_grace_period(rdp->rsp->name,
						      rnp->gpnum,
						      rnp->completed,
						      c, rnp->level,
						      rnp->grplo, rnp->grphi,
						      "Startedleafroot");
@@ -2229,8 +2230,9 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
			c = rnp_root->completed + 1;
			rnp->n_nocb_gp_requests[c & 0x1]++;
			rnp_root->n_nocb_gp_requests[c & 0x1]++;
			trace_rcu_nocb_grace_period(rdp->rsp->name,
						    rnp->gpnum, rnp->completed,
			trace_rcu_future_grace_period(rdp->rsp->name,
						      rnp->gpnum,
						      rnp->completed,
						      c, rnp->level,
						      rnp->grplo, rnp->grphi,
						      "Startedroot");
@@ -2249,9 +2251,9 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
	 * Wait for the grace period.  Do so interruptibly to avoid messing
	 * up the load average.
	 */
	trace_rcu_nocb_grace_period(rdp->rsp->name, rnp->gpnum, rnp->completed,
				    c, rnp->level, rnp->grplo, rnp->grphi,
				    "StartWait");
	trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
				      rnp->completed, c, rnp->level,
				      rnp->grplo, rnp->grphi, "StartWait");
	for (;;) {
		wait_event_interruptible(
			rnp->nocb_gp_wq[c & 0x1],
@@ -2259,14 +2261,14 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
		if (likely(d))
			break;
		flush_signals(current);
		trace_rcu_nocb_grace_period(rdp->rsp->name,
		trace_rcu_future_grace_period(rdp->rsp->name,
					      rnp->gpnum, rnp->completed, c,
					    rnp->level, rnp->grplo, rnp->grphi,
					    "ResumeWait");
					      rnp->level, rnp->grplo,
					      rnp->grphi, "ResumeWait");
	}
	trace_rcu_nocb_grace_period(rdp->rsp->name, rnp->gpnum, rnp->completed,
				    c, rnp->level, rnp->grplo, rnp->grphi,
				    "EndWait");
	trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
				      rnp->completed, c, rnp->level,
				      rnp->grplo, rnp->grphi, "EndWait");
	smp_mb(); /* Ensure that CB invocation happens after GP end. */
}