Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9a11b49a authored by Ingo Molnar's avatar Ingo Molnar Committed by Linus Torvalds
Browse files

[PATCH] lockdep: better lock debugging



Generic lock debugging:

 - generalized lock debugging framework. For example, a bug in one lock
   subsystem turns off debugging in all lock subsystems.

 - got rid of the caller address passing (__IP__/__IP_DECL__/etc.) from
   the mutex/rtmutex debugging code: it caused way too much prototype
   hackery, and lockdep will give the same information anyway.

 - ability to do silent tests

 - check lock freeing in vfree too.

 - more finegrained debugging options, to allow distributions to
   turn off more expensive debugging features.

There's no separate 'held mutexes' list anymore - but there's a 'held locks'
stack within lockdep, which unifies deadlock detection across all lock
classes.  (this is independent of the lockdep validation stuff - lockdep first
checks whether we are holding a lock already)

Here are the current debugging options:

CONFIG_DEBUG_MUTEXES=y
CONFIG_DEBUG_LOCK_ALLOC=y

which do:

 config DEBUG_MUTEXES
          bool "Mutex debugging, basic checks"

 config DEBUG_LOCK_ALLOC
         bool "Detect incorrect freeing of live mutexes"

Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarArjan van de Ven <arjan@linux.intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent fb7e4241
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -151,7 +151,7 @@ static struct sysrq_key_op sysrq_mountro_op = {
static void sysrq_handle_showlocks(int key, struct pt_regs *pt_regs,
				struct tty_struct *tty)
{
	mutex_debug_show_all_locks();
	debug_show_all_locks();
}
static struct sysrq_key_op sysrq_showlocks_op = {
	.handler	= sysrq_handle_showlocks,
+5 −10
Original line number Diff line number Diff line
@@ -10,14 +10,9 @@
#ifndef _ASM_GENERIC_MUTEX_NULL_H
#define _ASM_GENERIC_MUTEX_NULL_H

/* extra parameter only needed for mutex debugging: */
#ifndef __IP__
# define __IP__
#endif

#define __mutex_fastpath_lock(count, fail_fn)	      fail_fn(count __RET_IP__)
#define __mutex_fastpath_lock_retval(count, fail_fn)  fail_fn(count __RET_IP__)
#define __mutex_fastpath_unlock(count, fail_fn)       fail_fn(count __RET_IP__)
#define __mutex_fastpath_lock(count, fail_fn)		fail_fn(count)
#define __mutex_fastpath_lock_retval(count, fail_fn)	fail_fn(count)
#define __mutex_fastpath_unlock(count, fail_fn)		fail_fn(count)
#define __mutex_fastpath_trylock(count, fail_fn)	fail_fn(count)
#define __mutex_slowpath_needs_to_unlock()		1

+69 −0
Original line number Diff line number Diff line
#ifndef __LINUX_DEBUG_LOCKING_H
#define __LINUX_DEBUG_LOCKING_H

extern int debug_locks;
extern int debug_locks_silent;

/*
 * Generic 'turn off all lock debugging' function:
 */
extern int debug_locks_off(void);

/*
 * In the debug case we carry the caller's instruction pointer into
 * other functions, but we dont want the function argument overhead
 * in the nondebug case - hence these macros:
 */
#define _RET_IP_		(unsigned long)__builtin_return_address(0)
#define _THIS_IP_  ({ __label__ __here; __here: (unsigned long)&&__here; })

#define DEBUG_LOCKS_WARN_ON(c)						\
({									\
	int __ret = 0;							\
									\
	if (unlikely(c)) {						\
		if (debug_locks_off())					\
			WARN_ON(1);					\
		__ret = 1;						\
	}								\
	__ret;								\
})

#ifdef CONFIG_SMP
# define SMP_DEBUG_LOCKS_WARN_ON(c)			DEBUG_LOCKS_WARN_ON(c)
#else
# define SMP_DEBUG_LOCKS_WARN_ON(c)			do { } while (0)
#endif

#ifdef CONFIG_DEBUG_LOCKING_API_SELFTESTS
  extern void locking_selftest(void);
#else
# define locking_selftest()	do { } while (0)
#endif

#ifdef CONFIG_LOCKDEP
extern void debug_show_all_locks(void);
extern void debug_show_held_locks(struct task_struct *task);
extern void debug_check_no_locks_freed(const void *from, unsigned long len);
extern void debug_check_no_locks_held(struct task_struct *task);
#else
static inline void debug_show_all_locks(void)
{
}

static inline void debug_show_held_locks(struct task_struct *task)
{
}

static inline void
debug_check_no_locks_freed(const void *from, unsigned long len)
{
}

static inline void
debug_check_no_locks_held(struct task_struct *task)
{
}
#endif

#endif
+0 −1
Original line number Diff line number Diff line
@@ -124,7 +124,6 @@ extern struct group_info init_groups;
	.cpu_timers	= INIT_CPU_TIMERS(tsk.cpu_timers),		\
	.fs_excl	= ATOMIC_INIT(0),				\
	.pi_lock	= SPIN_LOCK_UNLOCKED,				\
	INIT_RT_MUTEXES(tsk)						\
}


+1 −7
Original line number Diff line number Diff line
@@ -14,6 +14,7 @@
#include <linux/prio_tree.h>
#include <linux/fs.h>
#include <linux/mutex.h>
#include <linux/debug_locks.h>

struct mempolicy;
struct anon_vma;
@@ -1034,13 +1035,6 @@ static inline void vm_stat_account(struct mm_struct *mm,
}
#endif /* CONFIG_PROC_FS */

static inline void
debug_check_no_locks_freed(const void *from, unsigned long len)
{
	mutex_debug_check_no_locks_freed(from, len);
	rt_mutex_debug_check_no_locks_freed(from, len);
}

#ifndef CONFIG_DEBUG_PAGEALLOC
static inline void
kernel_map_pages(struct page *page, int numpages, int enable)
Loading