Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b8c7f1dc authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'core-fixes-for-linus' of...

Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  rcu: Fix whitespace inconsistencies
  rcu: Fix thinko, actually initialize full tree
  rcu: Apply results of code inspection of kernel/rcutree_plugin.h
  rcu: Add WARN_ON_ONCE() consistency checks covering state transitions
  rcu: Fix synchronize_rcu() for TREE_PREEMPT_RCU
  rcu: Simplify rcu_read_unlock_special() quiescent-state accounting
  rcu: Add debug checks to TREE_PREEMPT_RCU for premature grace periods
  rcu: Kconfig help needs to say that TREE_PREEMPT_RCU scales down
  rcutorture: Occasionally delay readers enough to make RCU force_quiescent_state
  rcu: Initialize multi-level RCU grace periods holding locks
  rcu: Need to update rnp->gpnum if preemptable RCU is to be reliable
parents f4eccb6d a71fca58
Loading
Loading
Loading
Loading
+8 −21
Original line number Original line Diff line number Diff line
@@ -52,8 +52,13 @@ struct rcu_head {
};
};


/* Exported common interfaces */
/* Exported common interfaces */
#ifdef CONFIG_TREE_PREEMPT_RCU
extern void synchronize_rcu(void);
extern void synchronize_rcu(void);
#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
#define synchronize_rcu synchronize_sched
#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
extern void synchronize_rcu_bh(void);
extern void synchronize_rcu_bh(void);
extern void synchronize_sched(void);
extern void rcu_barrier(void);
extern void rcu_barrier(void);
extern void rcu_barrier_bh(void);
extern void rcu_barrier_bh(void);
extern void rcu_barrier_sched(void);
extern void rcu_barrier_sched(void);
@@ -261,24 +266,6 @@ struct rcu_synchronize {


extern void wakeme_after_rcu(struct rcu_head  *head);
extern void wakeme_after_rcu(struct rcu_head  *head);


/**
 * synchronize_sched - block until all CPUs have exited any non-preemptive
 * kernel code sequences.
 *
 * This means that all preempt_disable code sequences, including NMI and
 * hardware-interrupt handlers, in progress on entry will have completed
 * before this primitive returns.  However, this does not guarantee that
 * softirq handlers will have completed, since in some kernels, these
 * handlers can run in process context, and can block.
 *
 * This primitive provides the guarantees made by the (now removed)
 * synchronize_kernel() API.  In contrast, synchronize_rcu() only
 * guarantees that rcu_read_lock() sections will have completed.
 * In "classic RCU", these two guarantees happen to be one and
 * the same, but can differ in realtime RCU implementations.
 */
#define synchronize_sched() __synchronize_sched()

/**
/**
 * call_rcu - Queue an RCU callback for invocation after a grace period.
 * call_rcu - Queue an RCU callback for invocation after a grace period.
 * @head: structure to be used for queueing the RCU updates.
 * @head: structure to be used for queueing the RCU updates.
+3 −3
Original line number Original line Diff line number Diff line
@@ -53,6 +53,8 @@ static inline void __rcu_read_unlock(void)
	preempt_enable();
	preempt_enable();
}
}


#define __synchronize_sched() synchronize_rcu()

static inline void exit_rcu(void)
static inline void exit_rcu(void)
{
{
}
}
@@ -68,8 +70,6 @@ static inline void __rcu_read_unlock_bh(void)
	local_bh_enable();
	local_bh_enable();
}
}


#define __synchronize_sched() synchronize_rcu()

extern void call_rcu_sched(struct rcu_head *head,
extern void call_rcu_sched(struct rcu_head *head,
			   void (*func)(struct rcu_head *rcu));
			   void (*func)(struct rcu_head *rcu));


+0 −1
Original line number Original line Diff line number Diff line
@@ -1755,7 +1755,6 @@ extern cputime_t task_gtime(struct task_struct *p);


#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
#define RCU_READ_UNLOCK_GOT_QS  (1 << 2) /* CPU has responded to RCU core. */


static inline void rcu_copy_process(struct task_struct *p)
static inline void rcu_copy_process(struct task_struct *p)
{
{
+2 −1
Original line number Original line Diff line number Diff line
@@ -331,7 +331,8 @@ config TREE_PREEMPT_RCU
	  This option selects the RCU implementation that is
	  This option selects the RCU implementation that is
	  designed for very large SMP systems with hundreds or
	  designed for very large SMP systems with hundreds or
	  thousands of CPUs, but for which real-time response
	  thousands of CPUs, but for which real-time response
	  is also required.
	  is also required.  It also scales down nicely to
	  smaller systems.


endchoice
endchoice


+45 −3
Original line number Original line Diff line number Diff line
@@ -74,6 +74,8 @@ void wakeme_after_rcu(struct rcu_head *head)
	complete(&rcu->completion);
	complete(&rcu->completion);
}
}


#ifdef CONFIG_TREE_PREEMPT_RCU

/**
/**
 * synchronize_rcu - wait until a grace period has elapsed.
 * synchronize_rcu - wait until a grace period has elapsed.
 *
 *
@@ -87,7 +89,7 @@ void synchronize_rcu(void)
{
{
	struct rcu_synchronize rcu;
	struct rcu_synchronize rcu;


	if (rcu_blocking_is_gp())
	if (!rcu_scheduler_active)
		return;
		return;


	init_completion(&rcu.completion);
	init_completion(&rcu.completion);
@@ -98,6 +100,46 @@ void synchronize_rcu(void)
}
}
EXPORT_SYMBOL_GPL(synchronize_rcu);
EXPORT_SYMBOL_GPL(synchronize_rcu);


#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */

/**
 * synchronize_sched - wait until an rcu-sched grace period has elapsed.
 *
 * Control will return to the caller some time after a full rcu-sched
 * grace period has elapsed, in other words after all currently executing
 * rcu-sched read-side critical sections have completed.   These read-side
 * critical sections are delimited by rcu_read_lock_sched() and
 * rcu_read_unlock_sched(), and may be nested.  Note that preempt_disable(),
 * local_irq_disable(), and so on may be used in place of
 * rcu_read_lock_sched().
 *
 * This means that all preempt_disable code sequences, including NMI and
 * hardware-interrupt handlers, in progress on entry will have completed
 * before this primitive returns.  However, this does not guarantee that
 * softirq handlers will have completed, since in some kernels, these
 * handlers can run in process context, and can block.
 *
 * This primitive provides the guarantees made by the (now removed)
 * synchronize_kernel() API.  In contrast, synchronize_rcu() only
 * guarantees that rcu_read_lock() sections will have completed.
 * In "classic RCU", these two guarantees happen to be one and
 * the same, but can differ in realtime RCU implementations.
 */
void synchronize_sched(void)
{
	struct rcu_synchronize rcu;

	if (rcu_blocking_is_gp())
		return;

	init_completion(&rcu.completion);
	/* Will wake me after RCU finished. */
	call_rcu_sched(&rcu.head, wakeme_after_rcu);
	/* Wait for it. */
	wait_for_completion(&rcu.completion);
}
EXPORT_SYMBOL_GPL(synchronize_sched);

/**
/**
 * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
 * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
 *
 *
Loading