Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 28ecd580 authored by Paul E. McKenney's avatar Paul E. McKenney Committed by Ingo Molnar
Browse files

rcu: Add WARN_ON_ONCE() consistency checks covering state transitions



o Verify that qsmask bits stay clear through GP
  initialization.

o Verify that cpu_quiet_msk_finish() is never invoked unless
  there actually is an RCU grace period in progress.

o Verify that all internal-node rcu_node structures have empty
  blocked_tasks[] lists.

o Verify that child rcu_node structure's bits remain clear after
  acquiring parent's lock.

Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josh@joshtriplett.org
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
Cc: Valdis.Kletnieks@vt.edu
LKML-Reference: <12532926191947-git-send-email->
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 16e30811
Loading
Loading
Loading
Loading
+9 −4
Original line number Original line Diff line number Diff line
@@ -628,8 +628,8 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)


	/* Special-case the common single-level case. */
	/* Special-case the common single-level case. */
	if (NUM_RCU_NODES == 1) {
	if (NUM_RCU_NODES == 1) {
		rnp->qsmask = rnp->qsmaskinit;
		rcu_preempt_check_blocked_tasks(rnp);
		rcu_preempt_check_blocked_tasks(rnp);
		rnp->qsmask = rnp->qsmaskinit;
		rnp->gpnum = rsp->gpnum;
		rnp->gpnum = rsp->gpnum;
		rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */
		rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */
		spin_unlock_irqrestore(&rnp->lock, flags);
		spin_unlock_irqrestore(&rnp->lock, flags);
@@ -662,8 +662,8 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
	rnp_end = &rsp->node[NUM_RCU_NODES];
	rnp_end = &rsp->node[NUM_RCU_NODES];
	for (rnp_cur = &rsp->node[0]; rnp_cur < rnp_end; rnp_cur++) {
	for (rnp_cur = &rsp->node[0]; rnp_cur < rnp_end; rnp_cur++) {
		spin_lock(&rnp_cur->lock);	/* irqs already disabled. */
		spin_lock(&rnp_cur->lock);	/* irqs already disabled. */
		rnp_cur->qsmask = rnp_cur->qsmaskinit;
		rcu_preempt_check_blocked_tasks(rnp);
		rcu_preempt_check_blocked_tasks(rnp);
		rnp_cur->qsmask = rnp_cur->qsmaskinit;
		rnp->gpnum = rsp->gpnum;
		rnp->gpnum = rsp->gpnum;
		spin_unlock(&rnp_cur->lock);	/* irqs already disabled. */
		spin_unlock(&rnp_cur->lock);	/* irqs already disabled. */
	}
	}
@@ -708,6 +708,7 @@ rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags)
static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags)
	__releases(rnp->lock)
	__releases(rnp->lock)
{
{
	WARN_ON_ONCE(rsp->completed == rsp->gpnum);
	rsp->completed = rsp->gpnum;
	rsp->completed = rsp->gpnum;
	rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]);
	rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]);
	rcu_start_gp(rsp, flags);  /* releases root node's rnp->lock. */
	rcu_start_gp(rsp, flags);  /* releases root node's rnp->lock. */
@@ -725,6 +726,8 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp,
	      unsigned long flags)
	      unsigned long flags)
	__releases(rnp->lock)
	__releases(rnp->lock)
{
{
	struct rcu_node *rnp_c;

	/* Walk up the rcu_node hierarchy. */
	/* Walk up the rcu_node hierarchy. */
	for (;;) {
	for (;;) {
		if (!(rnp->qsmask & mask)) {
		if (!(rnp->qsmask & mask)) {
@@ -748,8 +751,10 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp,
			break;
			break;
		}
		}
		spin_unlock_irqrestore(&rnp->lock, flags);
		spin_unlock_irqrestore(&rnp->lock, flags);
		rnp_c = rnp;
		rnp = rnp->parent;
		rnp = rnp->parent;
		spin_lock_irqsave(&rnp->lock, flags);
		spin_lock_irqsave(&rnp->lock, flags);
		WARN_ON_ONCE(rnp_c->qsmask);
	}
	}


	/*
	/*
@@ -858,7 +863,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
	spin_lock_irqsave(&rsp->onofflock, flags);
	spin_lock_irqsave(&rsp->onofflock, flags);


	/* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */
	/* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */
	rnp = rdp->mynode;
	rnp = rdp->mynode;	/* this is the outgoing CPU's rnp. */
	mask = rdp->grpmask;	/* rnp->grplo is constant. */
	mask = rdp->grpmask;	/* rnp->grplo is constant. */
	do {
	do {
		spin_lock(&rnp->lock);		/* irqs already disabled. */
		spin_lock(&rnp->lock);		/* irqs already disabled. */
@@ -867,7 +872,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
			spin_unlock(&rnp->lock); /* irqs remain disabled. */
			spin_unlock(&rnp->lock); /* irqs remain disabled. */
			break;
			break;
		}
		}
		rcu_preempt_offline_tasks(rsp, rnp);
		rcu_preempt_offline_tasks(rsp, rnp, rdp);
		mask = rnp->grpmask;
		mask = rnp->grpmask;
		spin_unlock(&rnp->lock);	/* irqs remain disabled. */
		spin_unlock(&rnp->lock);	/* irqs remain disabled. */
		rnp = rnp->parent;
		rnp = rnp->parent;
+14 −6
Original line number Original line Diff line number Diff line
@@ -206,7 +206,8 @@ static void rcu_read_unlock_special(struct task_struct *t)
		 */
		 */
		if (!empty && rnp->qsmask == 0 &&
		if (!empty && rnp->qsmask == 0 &&
		    list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1])) {
		    list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1])) {
			t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
			struct rcu_node *rnp_p;

			if (rnp->parent == NULL) {
			if (rnp->parent == NULL) {
				/* Only one rcu_node in the tree. */
				/* Only one rcu_node in the tree. */
				cpu_quiet_msk_finish(&rcu_preempt_state, flags);
				cpu_quiet_msk_finish(&rcu_preempt_state, flags);
@@ -215,9 +216,10 @@ static void rcu_read_unlock_special(struct task_struct *t)
			/* Report up the rest of the hierarchy. */
			/* Report up the rest of the hierarchy. */
			mask = rnp->grpmask;
			mask = rnp->grpmask;
			spin_unlock_irqrestore(&rnp->lock, flags);
			spin_unlock_irqrestore(&rnp->lock, flags);
			rnp = rnp->parent;
			rnp_p = rnp->parent;
			spin_lock_irqsave(&rnp->lock, flags);
			spin_lock_irqsave(&rnp_p->lock, flags);
			cpu_quiet_msk(mask, &rcu_preempt_state, rnp, flags);
			WARN_ON_ONCE(rnp->qsmask);
			cpu_quiet_msk(mask, &rcu_preempt_state, rnp_p, flags);
			return;
			return;
		}
		}
		spin_unlock(&rnp->lock);
		spin_unlock(&rnp->lock);
@@ -278,6 +280,7 @@ static void rcu_print_task_stall(struct rcu_node *rnp)
static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
{
{
	WARN_ON_ONCE(!list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]));
	WARN_ON_ONCE(!list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]));
	WARN_ON_ONCE(rnp->qsmask);
}
}


/*
/*
@@ -302,7 +305,8 @@ static int rcu_preempted_readers(struct rcu_node *rnp)
 * The caller must hold rnp->lock with irqs disabled.
 * The caller must hold rnp->lock with irqs disabled.
 */
 */
static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
				      struct rcu_node *rnp)
				      struct rcu_node *rnp,
				      struct rcu_data *rdp)
{
{
	int i;
	int i;
	struct list_head *lp;
	struct list_head *lp;
@@ -314,6 +318,9 @@ static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
		WARN_ONCE(1, "Last CPU thought to be offlined?");
		WARN_ONCE(1, "Last CPU thought to be offlined?");
		return;  /* Shouldn't happen: at least one CPU online. */
		return;  /* Shouldn't happen: at least one CPU online. */
	}
	}
	WARN_ON_ONCE(rnp != rdp->mynode &&
		     (!list_empty(&rnp->blocked_tasks[0]) ||
		      !list_empty(&rnp->blocked_tasks[1])));


	/*
	/*
	 * Move tasks up to root rcu_node.  Rely on the fact that the
	 * Move tasks up to root rcu_node.  Rely on the fact that the
@@ -489,7 +496,8 @@ static int rcu_preempted_readers(struct rcu_node *rnp)
 * tasks that were blocked within RCU read-side critical sections.
 * tasks that were blocked within RCU read-side critical sections.
 */
 */
static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
				      struct rcu_node *rnp)
				      struct rcu_node *rnp,
				      struct rcu_data *rdp)
{
{
}
}