Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4e3eaddd authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'core-fixes-for-linus' of...

Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  locking: Make sparse work with inline spinlocks and rwlocks
  x86/mce: Fix RCU lockdep splats
  rcu: Increase RCU CPU stall timeouts if PROVE_RCU
  ftrace: Replace read_barrier_depends() with rcu_dereference_raw()
  rcu: Suppress RCU lockdep warnings during early boot
  rcu, ftrace: Fix RCU lockdep splat in ftrace_perf_buf_prepare()
  rcu: Suppress __mpol_dup() false positive from RCU lockdep
  rcu: Make rcu_read_lock_sched_held() handle !PREEMPT
  rcu: Add control variables to lockdep_rcu_dereference() diagnostics
  rcu, cgroup: Relax the check in task_subsys_state() as early boot is now handled by lockdep-RCU
  rcu: Use wrapper function instead of exporting tasklist_lock
  sched, rcu: Fix rcu_dereference() for RCU-lockdep
  rcu: Make task_subsys_state() RCU-lockdep checks handle boot-time use
  rcu: Fix holdoff for accelerated GPs for last non-dynticked CPU
  x86/gart: Unexport gart_iommu_aperture

Fix trivial conflicts in kernel/trace/ftrace.c
parents 8655e7e3 b97c4bc1
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -31,7 +31,6 @@
#include <asm/x86_init.h>

int gart_iommu_aperture;
EXPORT_SYMBOL_GPL(gart_iommu_aperture);
int gart_iommu_aperture_disabled __initdata;
int gart_iommu_aperture_allowed __initdata;

+8 −3
Original line number Diff line number Diff line
@@ -46,6 +46,11 @@

#include "mce-internal.h"

#define rcu_dereference_check_mce(p) \
	rcu_dereference_check((p), \
			      rcu_read_lock_sched_held() || \
			      lockdep_is_held(&mce_read_mutex))

#define CREATE_TRACE_POINTS
#include <trace/events/mce.h>

@@ -158,7 +163,7 @@ void mce_log(struct mce *mce)
	mce->finished = 0;
	wmb();
	for (;;) {
		entry = rcu_dereference(mcelog.next);
		entry = rcu_dereference_check_mce(mcelog.next);
		for (;;) {
			/*
			 * When the buffer fills up discard new entries.
@@ -1500,7 +1505,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
		return -ENOMEM;

	mutex_lock(&mce_read_mutex);
	next = rcu_dereference(mcelog.next);
	next = rcu_dereference_check_mce(mcelog.next);

	/* Only supports full reads right now */
	if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) {
@@ -1565,7 +1570,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
static unsigned int mce_poll(struct file *file, poll_table *wait)
{
	poll_wait(file, &mce_wait, wait);
	if (rcu_dereference(mcelog.next))
	if (rcu_dereference_check_mce(mcelog.next))
		return POLLIN | POLLRDNORM;
	return 0;
}
+1 −1
Original line number Diff line number Diff line
@@ -280,7 +280,7 @@ static inline void put_cred(const struct cred *_cred)
 * task or by holding tasklist_lock to prevent it from being unlinked.
 */
#define __task_cred(task) \
	((const struct cred *)(rcu_dereference_check((task)->real_cred, rcu_read_lock_held() || lockdep_is_held(&tasklist_lock))))
	((const struct cred *)(rcu_dereference_check((task)->real_cred, rcu_read_lock_held() || lockdep_tasklist_lock_is_held())))

/**
 * get_task_cred - Get another task's objective credentials
+36 −9
Original line number Diff line number Diff line
@@ -101,6 +101,11 @@ extern struct lockdep_map rcu_sched_lock_map;
# define rcu_read_release_sched() \
		lock_release(&rcu_sched_lock_map, 1, _THIS_IP_)

static inline int debug_lockdep_rcu_enabled(void)
{
	return likely(rcu_scheduler_active && debug_locks);
}

/**
 * rcu_read_lock_held - might we be in RCU read-side critical section?
 *
@@ -108,12 +113,14 @@ extern struct lockdep_map rcu_sched_lock_map;
 * an RCU read-side critical section.  In absence of CONFIG_PROVE_LOCKING,
 * this assumes we are in an RCU read-side critical section unless it can
 * prove otherwise.
 *
 * Check rcu_scheduler_active to prevent false positives during boot.
 */
static inline int rcu_read_lock_held(void)
{
	if (debug_locks)
		return lock_is_held(&rcu_lock_map);
	if (!debug_lockdep_rcu_enabled())
		return 1;
	return lock_is_held(&rcu_lock_map);
}

/**
@@ -123,12 +130,14 @@ static inline int rcu_read_lock_held(void)
 * an RCU-bh read-side critical section.  In absence of CONFIG_PROVE_LOCKING,
 * this assumes we are in an RCU-bh read-side critical section unless it can
 * prove otherwise.
 *
 * Check rcu_scheduler_active to prevent false positives during boot.
 */
static inline int rcu_read_lock_bh_held(void)
{
	if (debug_locks)
		return lock_is_held(&rcu_bh_lock_map);
	if (!debug_lockdep_rcu_enabled())
		return 1;
	return lock_is_held(&rcu_bh_lock_map);
}

/**
@@ -139,15 +148,26 @@ static inline int rcu_read_lock_bh_held(void)
 * this assumes we are in an RCU-sched read-side critical section unless it
 * can prove otherwise.  Note that disabling of preemption (including
 * disabling irqs) counts as an RCU-sched read-side critical section.
 *
 * Check rcu_scheduler_active to prevent false positives during boot.
 */
#ifdef CONFIG_PREEMPT
static inline int rcu_read_lock_sched_held(void)
{
	int lockdep_opinion = 0;

	if (!debug_lockdep_rcu_enabled())
		return 1;
	if (debug_locks)
		lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
	return lockdep_opinion || preempt_count() != 0 || !rcu_scheduler_active;
	return lockdep_opinion || preempt_count() != 0;
}
#else /* #ifdef CONFIG_PREEMPT */
static inline int rcu_read_lock_sched_held(void)
{
	return 1;
}
#endif /* #else #ifdef CONFIG_PREEMPT */

#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */

@@ -168,10 +188,17 @@ static inline int rcu_read_lock_bh_held(void)
	return 1;
}

#ifdef CONFIG_PREEMPT
static inline int rcu_read_lock_sched_held(void)
{
	return preempt_count() != 0 || !rcu_scheduler_active;
	return !rcu_scheduler_active || preempt_count() != 0;
}
#else /* #ifdef CONFIG_PREEMPT */
static inline int rcu_read_lock_sched_held(void)
{
	return 1;
}
#endif /* #else #ifdef CONFIG_PREEMPT */

#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */

@@ -188,7 +215,7 @@ static inline int rcu_read_lock_sched_held(void)
 */
#define rcu_dereference_check(p, c) \
	({ \
		if (debug_locks && !(c)) \
		if (debug_lockdep_rcu_enabled() && !(c)) \
			lockdep_rcu_dereference(__FILE__, __LINE__); \
		rcu_dereference_raw(p); \
	})
+10 −10
Original line number Diff line number Diff line
@@ -29,25 +29,25 @@ do { \
#endif

#ifdef CONFIG_DEBUG_SPINLOCK
 extern void do_raw_read_lock(rwlock_t *lock);
 extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock);
#define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock)
 extern int do_raw_read_trylock(rwlock_t *lock);
 extern void do_raw_read_unlock(rwlock_t *lock);
 extern void do_raw_write_lock(rwlock_t *lock);
 extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock);
 extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock);
#define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock)
 extern int do_raw_write_trylock(rwlock_t *lock);
 extern void do_raw_write_unlock(rwlock_t *lock);
 extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock);
#else
# define do_raw_read_lock(rwlock)	arch_read_lock(&(rwlock)->raw_lock)
# define do_raw_read_lock(rwlock)	do {__acquire(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0)
# define do_raw_read_lock_flags(lock, flags) \
		arch_read_lock_flags(&(lock)->raw_lock, *(flags))
		do {__acquire(lock); arch_read_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
# define do_raw_read_trylock(rwlock)	arch_read_trylock(&(rwlock)->raw_lock)
# define do_raw_read_unlock(rwlock)	arch_read_unlock(&(rwlock)->raw_lock)
# define do_raw_write_lock(rwlock)	arch_write_lock(&(rwlock)->raw_lock)
# define do_raw_read_unlock(rwlock)	do {arch_read_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
# define do_raw_write_lock(rwlock)	do {__acquire(lock); arch_write_lock(&(rwlock)->raw_lock); } while (0)
# define do_raw_write_lock_flags(lock, flags) \
		arch_write_lock_flags(&(lock)->raw_lock, *(flags))
		do {__acquire(lock); arch_write_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
# define do_raw_write_trylock(rwlock)	arch_write_trylock(&(rwlock)->raw_lock)
# define do_raw_write_unlock(rwlock)	arch_write_unlock(&(rwlock)->raw_lock)
# define do_raw_write_unlock(rwlock)	do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
#endif

#define read_can_lock(rwlock)		arch_read_can_lock(&(rwlock)->raw_lock)
Loading