Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f5d2a045 authored by Paul E. McKenney's avatar Paul E. McKenney
Browse files

Merge branches 'doc.2014.04.29a', 'fixes.2014.04.29a' and 'torture.2014.05.14a' into HEAD

doc.2014.04.29a:  Documentation updates.
fixes.2014.04.29a:  Miscellaneous fixes.
torture.2014.05.14a:  RCU/Lock torture tests.
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -639,7 +639,7 @@ do { \
#  define raw_cpu_add_return_8(pcp, val)	raw_cpu_generic_add_return(pcp, val)
# endif
# define raw_cpu_add_return(pcp, val)	\
	__pcpu_size_call_return2(raw_add_return_, pcp, val)
	__pcpu_size_call_return2(raw_cpu_add_return_, pcp, val)
#endif

#define raw_cpu_sub_return(pcp, val)	raw_cpu_add_return(pcp, -(typeof(pcp))(val))
+59 −1
Original line number Diff line number Diff line
@@ -44,6 +44,7 @@
#include <linux/debugobjects.h>
#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/percpu.h>
#include <asm/barrier.h>

extern int rcu_expedited; /* for sysctl */
@@ -51,7 +52,17 @@ extern int rcu_expedited; /* for sysctl */
extern int rcutorture_runnable; /* for sysctl */
#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */

enum rcutorture_type {
	RCU_FLAVOR,
	RCU_BH_FLAVOR,
	RCU_SCHED_FLAVOR,
	SRCU_FLAVOR,
	INVALID_RCU_FLAVOR
};

#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
			    unsigned long *gpnum, unsigned long *completed);
void rcutorture_record_test_transition(void);
void rcutorture_record_progress(unsigned long vernum);
void do_trace_rcu_torture_read(const char *rcutorturename,
@@ -60,6 +71,15 @@ void do_trace_rcu_torture_read(const char *rcutorturename,
			       unsigned long c_old,
			       unsigned long c);
#else
static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
					  int *flags,
					  unsigned long *gpnum,
					  unsigned long *completed)
{
	*flags = 0;
	*gpnum = 0;
	*completed = 0;
}
static inline void rcutorture_record_test_transition(void)
{
}
@@ -267,6 +287,41 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev,
bool __rcu_is_watching(void);
#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */

/*
 * Hooks for cond_resched() and friends to avoid RCU CPU stall warnings.
 */

#define RCU_COND_RESCHED_LIM 256	/* ms vs. 100s of ms. */
DECLARE_PER_CPU(int, rcu_cond_resched_count);
void rcu_resched(void);

/*
 * Is it time to report RCU quiescent states?
 *
 * Note unsynchronized access to rcu_cond_resched_count.  Yes, we might
 * increment some random CPU's count, and possibly also load the result from
 * yet another CPU's count.  We might even clobber some other CPU's attempt
 * to zero its counter.  This is all OK because the goal is not precision,
 * but rather reasonable amortization of rcu_note_context_switch() overhead
 * and extremely high probability of avoiding RCU CPU stall warnings.
 * Note that this function has to be preempted in just the wrong place,
 * many thousands of times in a row, for anything bad to happen.
 */
static inline bool rcu_should_resched(void)
{
	return raw_cpu_inc_return(rcu_cond_resched_count) >=
	       RCU_COND_RESCHED_LIM;
}

/*
 * Report quiscent states to RCU if it is time to do so.
 */
static inline void rcu_cond_resched(void)
{
	if (unlikely(rcu_should_resched()))
		rcu_resched();
}

/*
 * Infrastructure to implement the synchronize_() primitives in
 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
@@ -328,7 +383,7 @@ extern struct lockdep_map rcu_lock_map;
extern struct lockdep_map rcu_bh_lock_map;
extern struct lockdep_map rcu_sched_lock_map;
extern struct lockdep_map rcu_callback_map;
extern int debug_lockdep_rcu_enabled(void);
int debug_lockdep_rcu_enabled(void);

/**
 * rcu_read_lock_held() - might we be in RCU read-side critical section?
@@ -949,6 +1004,9 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
 * pointers, but you must use rcu_assign_pointer() to initialize the
 * external-to-structure pointer -after- you have completely initialized
 * the reader-accessible portions of the linked structure.
 *
 * Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no
 * ordering guarantees for either the CPU or the compiler.
 */
#define RCU_INIT_POINTER(p, v) \
	do { \
+4 −0
Original line number Diff line number Diff line
@@ -119,6 +119,10 @@ static inline void rcu_sched_force_quiescent_state(void)
{
}

static inline void show_rcu_gp_kthreads(void)
{
}

static inline void rcu_cpu_stall_reset(void)
{
}
+1 −0
Original line number Diff line number Diff line
@@ -84,6 +84,7 @@ extern unsigned long rcutorture_vernum;
long rcu_batches_completed(void);
long rcu_batches_completed_bh(void);
long rcu_batches_completed_sched(void);
void show_rcu_gp_kthreads(void);

void rcu_force_quiescent_state(void);
void rcu_bh_force_quiescent_state(void);
+1 −7
Original line number Diff line number Diff line
@@ -49,12 +49,6 @@
#define VERBOSE_TOROUT_ERRSTRING(s) \
	do { if (verbose) pr_alert("%s" TORTURE_FLAG "!!! %s\n", torture_type, s); } while (0)

/* Definitions for a non-string torture-test module parameter. */
#define torture_parm(type, name, init, msg) \
	static type name = init; \
	module_param(name, type, 0444); \
	MODULE_PARM_DESC(name, msg);

/* Definitions for online/offline exerciser. */
int torture_onoff_init(long ooholdoff, long oointerval);
char *torture_onoff_stats(char *page);
@@ -81,7 +75,7 @@ void stutter_wait(const char *title);
int torture_stutter_init(int s);

/* Initialization and cleanup. */
void torture_init_begin(char *ttype, bool v, int *runnable);
bool torture_init_begin(char *ttype, bool v, int *runnable);
void torture_init_end(void);
bool torture_cleanup(void);
bool torture_must_stop(void);
Loading