Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9b4d0bab authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'core-fixes-for-linus' of...

Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  lockdep: fix debug_lock_alloc
  lockdep: increase MAX_LOCKDEP_KEYS
  generic-ipi: fix stack and rcu interaction bug in smp_call_function_mask()
  lockdep: fix overflow in the hlock shrinkage code
  lockdep: rename map_[acquire|release]() => lock_map_[acquire|release]()
  lockdep: handle chains involving classes defined in modules
  mm: fix mm_take_all_locks() locking order
  lockdep: annotate mm_take_all_locks()
  lockdep: spin_lock_nest_lock()
  lockdep: lock protection locks
  lockdep: map_acquire
  lockdep: shrink held_lock structure
  lockdep: re-annotate scheduler runqueues
  lockdep: lock_set_subclass - reset a held lock's subclass
  lockdep: change scheduler annotation
  debug_locks: set oops_in_progress if we will log messages.
  lockdep: fix combinatorial explosion in lock subgraph traversal
parents 7019b1b5 23a0ee90
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -291,7 +291,7 @@ handle_t *journal_start(journal_t *journal, int nblocks)
		goto out;
	}

	lock_acquire(&handle->h_lockdep_map, 0, 0, 0, 2, _THIS_IP_);
	lock_map_acquire(&handle->h_lockdep_map);

out:
	return handle;
@@ -1448,7 +1448,7 @@ int journal_stop(handle_t *handle)
		spin_unlock(&journal->j_state_lock);
	}

	lock_release(&handle->h_lockdep_map, 1, _THIS_IP_);
	lock_map_release(&handle->h_lockdep_map);

	jbd_free_handle(handle);
	return err;
+2 −2
Original line number Diff line number Diff line
@@ -301,7 +301,7 @@ handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
		goto out;
	}

	lock_acquire(&handle->h_lockdep_map, 0, 0, 0, 2, _THIS_IP_);
	lock_map_acquire(&handle->h_lockdep_map);
out:
	return handle;
}
@@ -1279,7 +1279,7 @@ int jbd2_journal_stop(handle_t *handle)
		spin_unlock(&journal->j_state_lock);
	}

	lock_release(&handle->h_lockdep_map, 1, _THIS_IP_);
	lock_map_release(&handle->h_lockdep_map);

	jbd2_free_handle(handle);
	return err;
+49 −21
Original line number Diff line number Diff line
@@ -89,6 +89,7 @@ struct lock_class {

	struct lockdep_subclass_key	*key;
	unsigned int			subclass;
	unsigned int			dep_gen_id;

	/*
	 * IRQ/softirq usage tracking bits:
@@ -189,6 +190,14 @@ struct lock_chain {
	u64				chain_key;
};

#define MAX_LOCKDEP_KEYS_BITS		13
/*
 * Subtract one because we offset hlock->class_idx by 1 in order
 * to make 0 mean no class. This avoids overflowing the class_idx
 * bitfield and hitting the BUG in hlock_class().
 */
#define MAX_LOCKDEP_KEYS		((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)

struct held_lock {
	/*
	 * One-way hash of the dependency chain up to this point. We
@@ -205,14 +214,14 @@ struct held_lock {
	 * with zero), here we store the previous hash value:
	 */
	u64				prev_chain_key;
	struct lock_class		*class;
	unsigned long			acquire_ip;
	struct lockdep_map		*instance;

	struct lockdep_map		*nest_lock;
#ifdef CONFIG_LOCK_STAT
	u64 				waittime_stamp;
	u64				holdtime_stamp;
#endif
	unsigned int			class_idx:MAX_LOCKDEP_KEYS_BITS;
	/*
	 * The lock-stack is unified in that the lock chains of interrupt
	 * contexts nest ontop of process context chains, but we 'separate'
@@ -226,11 +235,11 @@ struct held_lock {
	 * The following field is used to detect when we cross into an
	 * interrupt context:
	 */
	int				irq_context;
	int				trylock;
	int				read;
	int				check;
	int				hardirqs_off;
	unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
	unsigned int trylock:1;
	unsigned int read:2;        /* see lock_acquire() comment */
	unsigned int check:2;       /* see lock_acquire() comment */
	unsigned int hardirqs_off:1;
};

/*
@@ -294,11 +303,15 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
 *   2: full validation
 */
extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
			 int trylock, int read, int check, unsigned long ip);
			 int trylock, int read, int check,
			 struct lockdep_map *nest_lock, unsigned long ip);

extern void lock_release(struct lockdep_map *lock, int nested,
			 unsigned long ip);

extern void lock_set_subclass(struct lockdep_map *lock, unsigned int subclass,
			      unsigned long ip);

# define INIT_LOCKDEP				.lockdep_recursion = 0,

#define lockdep_depth(tsk)	(debug_locks ? (tsk)->lockdep_depth : 0)
@@ -313,8 +326,9 @@ static inline void lockdep_on(void)
{
}

# define lock_acquire(l, s, t, r, c, i)		do { } while (0)
# define lock_acquire(l, s, t, r, c, n, i)	do { } while (0)
# define lock_release(l, n, i)			do { } while (0)
# define lock_set_subclass(l, s, i)		do { } while (0)
# define lockdep_init()				do { } while (0)
# define lockdep_info()				do { } while (0)
# define lockdep_init_map(lock, name, key, sub)	do { (void)(key); } while (0)
@@ -400,9 +414,11 @@ static inline void print_irqtrace_events(struct task_struct *curr)

#ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING
#  define spin_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 2, i)
#  define spin_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 2, NULL, i)
#  define spin_acquire_nest(l, s, t, n, i)	lock_acquire(l, s, t, 0, 2, n, i)
# else
#  define spin_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 1, i)
#  define spin_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 1, NULL, i)
#  define spin_acquire_nest(l, s, t, n, i)	lock_acquire(l, s, t, 0, 1, NULL, i)
# endif
# define spin_release(l, n, i)			lock_release(l, n, i)
#else
@@ -412,11 +428,11 @@ static inline void print_irqtrace_events(struct task_struct *curr)

#ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING
#  define rwlock_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 2, i)
#  define rwlock_acquire_read(l, s, t, i)	lock_acquire(l, s, t, 2, 2, i)
#  define rwlock_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 2, NULL, i)
#  define rwlock_acquire_read(l, s, t, i)	lock_acquire(l, s, t, 2, 2, NULL, i)
# else
#  define rwlock_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 1, i)
#  define rwlock_acquire_read(l, s, t, i)	lock_acquire(l, s, t, 2, 1, i)
#  define rwlock_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 1, NULL, i)
#  define rwlock_acquire_read(l, s, t, i)	lock_acquire(l, s, t, 2, 1, NULL, i)
# endif
# define rwlock_release(l, n, i)		lock_release(l, n, i)
#else
@@ -427,9 +443,9 @@ static inline void print_irqtrace_events(struct task_struct *curr)

#ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING
#  define mutex_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 2, i)
#  define mutex_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 2, NULL, i)
# else
#  define mutex_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 1, i)
#  define mutex_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 1, NULL, i)
# endif
# define mutex_release(l, n, i)			lock_release(l, n, i)
#else
@@ -439,11 +455,11 @@ static inline void print_irqtrace_events(struct task_struct *curr)

#ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING
#  define rwsem_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 2, i)
#  define rwsem_acquire_read(l, s, t, i)	lock_acquire(l, s, t, 1, 2, i)
#  define rwsem_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 2, NULL, i)
#  define rwsem_acquire_read(l, s, t, i)	lock_acquire(l, s, t, 1, 2, NULL, i)
# else
#  define rwsem_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 1, i)
#  define rwsem_acquire_read(l, s, t, i)	lock_acquire(l, s, t, 1, 1, i)
#  define rwsem_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 1, NULL, i)
#  define rwsem_acquire_read(l, s, t, i)	lock_acquire(l, s, t, 1, 1, NULL, i)
# endif
# define rwsem_release(l, n, i)			lock_release(l, n, i)
#else
@@ -452,4 +468,16 @@ static inline void print_irqtrace_events(struct task_struct *curr)
# define rwsem_release(l, n, i)			do { } while (0)
#endif

#ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING
#  define lock_map_acquire(l)		lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
# else
#  define lock_map_acquire(l)		lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
# endif
# define lock_map_release(l)			lock_release(l, 1, _THIS_IP_)
#else
# define lock_map_acquire(l)			do { } while (0)
# define lock_map_release(l)			do { } while (0)
#endif

#endif /* __LINUX_LOCKDEP_H */
+1 −1
Original line number Diff line number Diff line
@@ -117,7 +117,7 @@ extern int rcu_needs_cpu(int cpu);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
extern struct lockdep_map rcu_lock_map;
# define rcu_read_acquire()	\
			lock_acquire(&rcu_lock_map, 0, 0, 2, 1, _THIS_IP_)
			lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
# define rcu_read_release()	lock_release(&rcu_lock_map, 1, _THIS_IP_)
#else
# define rcu_read_acquire()	do { } while (0)
+6 −0
Original line number Diff line number Diff line
@@ -183,8 +183,14 @@ do { \

#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass)
# define spin_lock_nest_lock(lock, nest_lock)				\
	 do {								\
		 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
		 _spin_lock_nest_lock(lock, &(nest_lock)->dep_map);	\
	 } while (0)
#else
# define spin_lock_nested(lock, subclass) _spin_lock(lock)
# define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock)
#endif

#define write_lock(lock)		_write_lock(lock)
Loading