Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0fa7271a authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking fix from Ingo Molnar:
 "A module unload lockdep race fix"

* 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  lockdep: Fix the module unload key range freeing logic
parents 713d25db 35a9393c
Loading
Loading
Loading
Loading
+55 −26
Original line number Diff line number Diff line
@@ -633,7 +633,7 @@ static int count_matching_names(struct lock_class *new_class)
	if (!new_class->name)
		return 0;

	list_for_each_entry(class, &all_lock_classes, lock_entry) {
	list_for_each_entry_rcu(class, &all_lock_classes, lock_entry) {
		if (new_class->key - new_class->subclass == class->key)
			return class->name_version;
		if (class->name && !strcmp(class->name, new_class->name))
@@ -700,10 +700,12 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
	hash_head = classhashentry(key);

	/*
	 * We can walk the hash lockfree, because the hash only
	 * grows, and we are careful when adding entries to the end:
	 * We do an RCU walk of the hash, see lockdep_free_key_range().
	 */
	list_for_each_entry(class, hash_head, hash_entry) {
	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
		return NULL;

	list_for_each_entry_rcu(class, hash_head, hash_entry) {
		if (class->key == key) {
			/*
			 * Huh! same key, different name? Did someone trample
@@ -728,7 +730,8 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
	struct lockdep_subclass_key *key;
	struct list_head *hash_head;
	struct lock_class *class;
	unsigned long flags;

	DEBUG_LOCKS_WARN_ON(!irqs_disabled());

	class = look_up_lock_class(lock, subclass);
	if (likely(class))
@@ -750,28 +753,26 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
	key = lock->key->subkeys + subclass;
	hash_head = classhashentry(key);

	raw_local_irq_save(flags);
	if (!graph_lock()) {
		raw_local_irq_restore(flags);
		return NULL;
	}
	/*
	 * We have to do the hash-walk again, to avoid races
	 * with another CPU:
	 */
	list_for_each_entry(class, hash_head, hash_entry)
	list_for_each_entry_rcu(class, hash_head, hash_entry) {
		if (class->key == key)
			goto out_unlock_set;
	}

	/*
	 * Allocate a new key from the static array, and add it to
	 * the hash:
	 */
	if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
		if (!debug_locks_off_graph_unlock()) {
			raw_local_irq_restore(flags);
			return NULL;
		}
		raw_local_irq_restore(flags);

		print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
		dump_stack();
@@ -798,7 +799,6 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)

	if (verbose(class)) {
		graph_unlock();
		raw_local_irq_restore(flags);

		printk("\nnew class %p: %s", class->key, class->name);
		if (class->name_version > 1)
@@ -806,15 +806,12 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
		printk("\n");
		dump_stack();

		raw_local_irq_save(flags);
		if (!graph_lock()) {
			raw_local_irq_restore(flags);
			return NULL;
		}
	}
out_unlock_set:
	graph_unlock();
	raw_local_irq_restore(flags);

out_set_class_cache:
	if (!subclass || force)
@@ -870,11 +867,9 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
	entry->distance = distance;
	entry->trace = *trace;
	/*
	 * Since we never remove from the dependency list, the list can
	 * be walked lockless by other CPUs, it's only allocation
	 * that must be protected by the spinlock. But this also means
	 * we must make new entries visible only once writes to the
	 * entry become visible - hence the RCU op:
	 * Both allocation and removal are done under the graph lock; but
	 * iteration is under RCU-sched; see look_up_lock_class() and
	 * lockdep_free_key_range().
	 */
	list_add_tail_rcu(&entry->entry, head);

@@ -1025,7 +1020,9 @@ static int __bfs(struct lock_list *source_entry,
		else
			head = &lock->class->locks_before;

		list_for_each_entry(entry, head, entry) {
		DEBUG_LOCKS_WARN_ON(!irqs_disabled());

		list_for_each_entry_rcu(entry, head, entry) {
			if (!lock_accessed(entry)) {
				unsigned int cq_depth;
				mark_lock_accessed(entry, lock);
@@ -2022,7 +2019,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
	 * We can walk it lock-free, because entries only get added
	 * to the hash:
	 */
	list_for_each_entry(chain, hash_head, entry) {
	list_for_each_entry_rcu(chain, hash_head, entry) {
		if (chain->chain_key == chain_key) {
cache_hit:
			debug_atomic_inc(chain_lookup_hits);
@@ -2996,8 +2993,18 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
	if (unlikely(!debug_locks))
		return;

	if (subclass)
	if (subclass) {
		unsigned long flags;

		if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion))
			return;

		raw_local_irq_save(flags);
		current->lockdep_recursion = 1;
		register_lock_class(lock, subclass, 1);
		current->lockdep_recursion = 0;
		raw_local_irq_restore(flags);
	}
}
EXPORT_SYMBOL_GPL(lockdep_init_map);

@@ -3887,9 +3894,17 @@ static inline int within(const void *addr, void *start, unsigned long size)
	return addr >= start && addr < start + size;
}

/*
 * Used in module.c to remove lock classes from memory that is going to be
 * freed; and possibly re-used by other modules.
 *
 * We will have had one sync_sched() before getting here, so we're guaranteed
 * nobody will look up these exact classes -- they're properly dead but still
 * allocated.
 */
void lockdep_free_key_range(void *start, unsigned long size)
{
	struct lock_class *class, *next;
	struct lock_class *class;
	struct list_head *head;
	unsigned long flags;
	int i;
@@ -3905,7 +3920,7 @@ void lockdep_free_key_range(void *start, unsigned long size)
		head = classhash_table + i;
		if (list_empty(head))
			continue;
		list_for_each_entry_safe(class, next, head, hash_entry) {
		list_for_each_entry_rcu(class, head, hash_entry) {
			if (within(class->key, start, size))
				zap_class(class);
			else if (within(class->name, start, size))
@@ -3916,11 +3931,25 @@ void lockdep_free_key_range(void *start, unsigned long size)
	if (locked)
		graph_unlock();
	raw_local_irq_restore(flags);

	/*
	 * Wait for any possible iterators from look_up_lock_class() to pass
	 * before continuing to free the memory they refer to.
	 *
	 * sync_sched() is sufficient because the read-side is IRQ disable.
	 */
	synchronize_sched();

	/*
	 * XXX at this point we could return the resources to the pool;
	 * instead we leak them. We would need to change to bitmap allocators
	 * instead of the linear allocators we have now.
	 */
}

void lockdep_reset_lock(struct lockdep_map *lock)
{
	struct lock_class *class, *next;
	struct lock_class *class;
	struct list_head *head;
	unsigned long flags;
	int i, j;
@@ -3948,7 +3977,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
		head = classhash_table + i;
		if (list_empty(head))
			continue;
		list_for_each_entry_safe(class, next, head, hash_entry) {
		list_for_each_entry_rcu(class, head, hash_entry) {
			int match = 0;

			for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
+4 −4
Original line number Diff line number Diff line
@@ -1865,7 +1865,7 @@ static void free_module(struct module *mod)
	kfree(mod->args);
	percpu_modfree(mod);

	/* Free lock-classes: */
	/* Free lock-classes; relies on the preceding sync_rcu(). */
	lockdep_free_key_range(mod->module_core, mod->core_size);

	/* Finally, free the core (containing the module structure) */
@@ -3349,9 +3349,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
	module_bug_cleanup(mod);
	mutex_unlock(&module_mutex);

	/* Free lock-classes: */
	lockdep_free_key_range(mod->module_core, mod->core_size);

	/* we can't deallocate the module until we clear memory protection */
	unset_module_init_ro_nx(mod);
	unset_module_core_ro_nx(mod);
@@ -3375,6 +3372,9 @@ static int load_module(struct load_info *info, const char __user *uargs,
	synchronize_rcu();
	mutex_unlock(&module_mutex);
 free_module:
	/* Free lock-classes; relies on the preceding sync_rcu() */
	lockdep_free_key_range(mod->module_core, mod->core_size);

	module_deallocate(mod, info);
 free_copy:
	free_copy(info);