Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5b4c11d5 authored by Paul E. McKenney's avatar Paul E. McKenney
Browse files

rcu: Add leaf-node macros



This commit adds rcu_first_leaf_node() that returns a pointer to
the first leaf rcu_node structure in the specified RCU flavor and an
rcu_is_leaf_node() that returns true iff the specified rcu_node structure
is a leaf.  This commit also uses these macros where appropriate.

Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: default avatarNicholas Piggin <npiggin@gmail.com>
parent 4317228a
Loading
Loading
Loading
Loading
+8 −3
Original line number Original line Diff line number Diff line
@@ -270,6 +270,12 @@ static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
	}
	}
}
}


/* Returns first leaf rcu_node of the specified RCU flavor. */
#define rcu_first_leaf_node(rsp) ((rsp)->level[rcu_num_lvls - 1])

/* Is this rcu_node a leaf? */
#define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1)

/*
/*
 * Do a full breadth-first scan of the rcu_node structures for the
 * Do a full breadth-first scan of the rcu_node structures for the
 * specified rcu_state structure.
 * specified rcu_state structure.
@@ -284,8 +290,7 @@ static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
 * rcu_node tree with but one rcu_node structure, this loop is a no-op.
 * rcu_node tree with but one rcu_node structure, this loop is a no-op.
 */
 */
#define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
#define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
	for ((rnp) = &(rsp)->node[0]; \
	for ((rnp) = &(rsp)->node[0]; !rcu_is_leaf_node(rsp, rnp); (rnp)++)
	     (rnp) < (rsp)->level[rcu_num_lvls - 1]; (rnp)++)


/*
/*
 * Scan the leaves of the rcu_node hierarchy for the specified rcu_state
 * Scan the leaves of the rcu_node hierarchy for the specified rcu_state
@@ -294,7 +299,7 @@ static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
 * It is still a leaf node, even if it is also the root node.
 * It is still a leaf node, even if it is also the root node.
 */
 */
#define rcu_for_each_leaf_node(rsp, rnp) \
#define rcu_for_each_leaf_node(rsp, rnp) \
	for ((rnp) = (rsp)->level[rcu_num_lvls - 1]; \
	for ((rnp) = rcu_first_leaf_node(rsp); \
	     (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
	     (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)


/*
/*
+2 −2
Original line number Original line Diff line number Diff line
@@ -2398,7 +2398,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
			return;
			return;
		}
		}
		WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
		WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
		WARN_ON_ONCE(rnp->level != rcu_num_lvls - 1 &&
		WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
			     rcu_preempt_blocked_readers_cgp(rnp));
			     rcu_preempt_blocked_readers_cgp(rnp));
		rnp->qsmask &= ~mask;
		rnp->qsmask &= ~mask;
		trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
		trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
@@ -4056,7 +4056,7 @@ static void __init rcu_init_one(struct rcu_state *rsp)


	init_swait_queue_head(&rsp->gp_wq);
	init_swait_queue_head(&rsp->gp_wq);
	init_swait_queue_head(&rsp->expedited_wq);
	init_swait_queue_head(&rsp->expedited_wq);
	rnp = rsp->level[rcu_num_lvls - 1];
	rnp = rcu_first_leaf_node(rsp);
	for_each_possible_cpu(i) {
	for_each_possible_cpu(i) {
		while (i > rnp->grphi)
		while (i > rnp->grphi)
			rnp++;
			rnp++;
+2 −2
Original line number Original line Diff line number Diff line
@@ -182,7 +182,7 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)


	raw_lockdep_assert_held_rcu_node(rnp);
	raw_lockdep_assert_held_rcu_node(rnp);
	WARN_ON_ONCE(rdp->mynode != rnp);
	WARN_ON_ONCE(rdp->mynode != rnp);
	WARN_ON_ONCE(rnp->level != rcu_num_lvls - 1);
	WARN_ON_ONCE(!rcu_is_leaf_node(rnp));


	/*
	/*
	 * Decide where to queue the newly blocked task.  In theory,
	 * Decide where to queue the newly blocked task.  In theory,
@@ -533,7 +533,7 @@ void rcu_read_unlock_special(struct task_struct *t)
		rnp = t->rcu_blocked_node;
		rnp = t->rcu_blocked_node;
		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
		raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
		WARN_ON_ONCE(rnp != t->rcu_blocked_node);
		WARN_ON_ONCE(rnp != t->rcu_blocked_node);
		WARN_ON_ONCE(rnp->level != rcu_num_lvls - 1);
		WARN_ON_ONCE(!rcu_is_leaf_node(rnp));
		empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
		empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
		empty_exp = sync_rcu_preempt_exp_done(rnp);
		empty_exp = sync_rcu_preempt_exp_done(rnp);
		smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
		smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */