Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 5e7481a2 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking updates from Ingo Molnar:
 "The main changes relate to making lock_is_held() et al (and external
  wrappers of them) work on const data types - this requires const
  propagation through the depths of lockdep.

  This removes a number of ugly type hacks the external helpers used"

* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  lockdep: Convert some users to const
  lockdep: Make lockdep checking constant
  lockdep: Assign lock keys on registration
parents b8dbf730 05b93801
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -332,7 +332,7 @@ static inline bool inode_to_wb_is_valid(struct inode *inode)
 * holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the
 * associated wb's list_lock.
 */
static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
{
#ifdef CONFIG_LOCKDEP
	WARN_ON_ONCE(debug_locks &&
+2 −2
Original line number Diff line number Diff line
@@ -337,9 +337,9 @@ extern void lock_release(struct lockdep_map *lock, int nested,
/*
 * Same "read" as for lock_acquire(), except -1 means any.
 */
extern int lock_is_held_type(struct lockdep_map *lock, int read);
extern int lock_is_held_type(const struct lockdep_map *lock, int read);

static inline int lock_is_held(struct lockdep_map *lock)
static inline int lock_is_held(const struct lockdep_map *lock)
{
	return lock_is_held_type(lock, -1);
}
+2 −2
Original line number Diff line number Diff line
@@ -92,7 +92,7 @@ void synchronize_srcu(struct srcu_struct *sp);
 * relies on normal RCU, it can be called from the CPU which
 * is in the idle loop from an RCU point of view or offline.
 */
static inline int srcu_read_lock_held(struct srcu_struct *sp)
static inline int srcu_read_lock_held(const struct srcu_struct *sp)
{
	if (!debug_lockdep_rcu_enabled())
		return 1;
@@ -101,7 +101,7 @@ static inline int srcu_read_lock_held(struct srcu_struct *sp)

#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */

static inline int srcu_read_lock_held(struct srcu_struct *sp)
static inline int srcu_read_lock_held(const struct srcu_struct *sp)
{
	return 1;
}
+1 −3
Original line number Diff line number Diff line
@@ -1445,10 +1445,8 @@ do { \
} while (0)

#ifdef CONFIG_LOCKDEP
static inline bool lockdep_sock_is_held(const struct sock *csk)
static inline bool lockdep_sock_is_held(const struct sock *sk)
{
	struct sock *sk = (struct sock *)csk;

	return lockdep_is_held(&sk->sk_lock) ||
	       lockdep_is_held(&sk->sk_lock.slock);
}
+47 −42
Original line number Diff line number Diff line
@@ -648,18 +648,12 @@ static int count_matching_names(struct lock_class *new_class)
	return count + 1;
}

/*
 * Register a lock's class in the hash-table, if the class is not present
 * yet. Otherwise we look it up. We cache the result in the lock object
 * itself, so actual lookup of the hash should be once per lock object.
 */
static inline struct lock_class *
look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
{
	struct lockdep_subclass_key *key;
	struct hlist_head *hash_head;
	struct lock_class *class;
	bool is_static = false;

	if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
		debug_locks_off();
@@ -672,24 +666,11 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
	}

	/*
	 * Static locks do not have their class-keys yet - for them the key
	 * is the lock object itself. If the lock is in the per cpu area,
	 * the canonical address of the lock (per cpu offset removed) is
	 * used.
	 * If it is not initialised then it has never been locked,
	 * so it won't be present in the hash table.
	 */
	if (unlikely(!lock->key)) {
		unsigned long can_addr, addr = (unsigned long)lock;

		if (__is_kernel_percpu_address(addr, &can_addr))
			lock->key = (void *)can_addr;
		else if (__is_module_percpu_address(addr, &can_addr))
			lock->key = (void *)can_addr;
		else if (static_obj(lock))
			lock->key = (void *)lock;
		else
			return ERR_PTR(-EINVAL);
		is_static = true;
	}
	if (unlikely(!lock->key))
		return NULL;

	/*
	 * NOTE: the class-key must be unique. For dynamic locks, a static
@@ -721,7 +702,35 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
		}
	}

	return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL);
	return NULL;
}

/*
 * Static locks do not have their class-keys yet - for them the key is
 * the lock object itself. If the lock is in the per cpu area, the
 * canonical address of the lock (per cpu offset removed) is used.
 */
static bool assign_lock_key(struct lockdep_map *lock)
{
	unsigned long can_addr, addr = (unsigned long)lock;

	if (__is_kernel_percpu_address(addr, &can_addr))
		lock->key = (void *)can_addr;
	else if (__is_module_percpu_address(addr, &can_addr))
		lock->key = (void *)can_addr;
	else if (static_obj(lock))
		lock->key = (void *)lock;
	else {
		/* Debug-check: all keys must be persistent! */
		debug_locks_off();
		pr_err("INFO: trying to register non-static key.\n");
		pr_err("the code is fine but needs lockdep annotation.\n");
		pr_err("turning off the locking correctness validator.\n");
		dump_stack();
		return false;
	}

	return true;
}

/*
@@ -739,18 +748,13 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
	DEBUG_LOCKS_WARN_ON(!irqs_disabled());

	class = look_up_lock_class(lock, subclass);
	if (likely(!IS_ERR_OR_NULL(class)))
	if (likely(class))
		goto out_set_class_cache;

	/*
	 * Debug-check: all keys must be persistent!
	 */
	if (IS_ERR(class)) {
		debug_locks_off();
		printk("INFO: trying to register non-static key.\n");
		printk("the code is fine but needs lockdep annotation.\n");
		printk("turning off the locking correctness validator.\n");
		dump_stack();
	if (!lock->key) {
		if (!assign_lock_key(lock))
			return NULL;
	} else if (!static_obj(lock->key)) {
		return NULL;
	}

@@ -3273,7 +3277,7 @@ print_lock_nested_lock_not_held(struct task_struct *curr,
	return 0;
}

static int __lock_is_held(struct lockdep_map *lock, int read);
static int __lock_is_held(const struct lockdep_map *lock, int read);

/*
 * This gets called for every mutex_lock*()/spin_lock*() operation.
@@ -3482,13 +3486,14 @@ print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
	return 0;
}

static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
static int match_held_lock(const struct held_lock *hlock,
					const struct lockdep_map *lock)
{
	if (hlock->instance == lock)
		return 1;

	if (hlock->references) {
		struct lock_class *class = lock->class_cache[0];
		const struct lock_class *class = lock->class_cache[0];

		if (!class)
			class = look_up_lock_class(lock, 0);
@@ -3499,7 +3504,7 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
		 * Clearly if the lock hasn't been acquired _ever_, we're not
		 * holding it either, so report failure.
		 */
		if (IS_ERR_OR_NULL(class))
		if (!class)
			return 0;

		/*
@@ -3724,7 +3729,7 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
	return 1;
}

static int __lock_is_held(struct lockdep_map *lock, int read)
static int __lock_is_held(const struct lockdep_map *lock, int read)
{
	struct task_struct *curr = current;
	int i;
@@ -3938,7 +3943,7 @@ void lock_release(struct lockdep_map *lock, int nested,
}
EXPORT_SYMBOL_GPL(lock_release);

int lock_is_held_type(struct lockdep_map *lock, int read)
int lock_is_held_type(const struct lockdep_map *lock, int read)
{
	unsigned long flags;
	int ret = 0;
@@ -4295,7 +4300,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
		 * If the class exists we look it up and zap it:
		 */
		class = look_up_lock_class(lock, j);
		if (!IS_ERR_OR_NULL(class))
		if (class)
			zap_class(class);
	}
	/*