Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a63f38cc authored by Andrew Morton's avatar Andrew Morton Committed by Ingo Molnar
Browse files

locking/lockdep: Convert hash tables to hlists



Mike said:

: CONFIG_UBSAN_ALIGNMENT breaks x86-64 kernel with lockdep enabled, i.e.
: kernel with CONFIG_UBSAN_ALIGNMENT=y fails to load without even any error
: message.
:
: The problem is that ubsan callbacks use spinlocks and might be called
: before lockdep is initialized.  Particularly this line in the
: reserve_ebda_region function causes problem:
:
: lowmem = *(unsigned short *)__va(BIOS_LOWMEM_KILOBYTES);
:
: If i put lockdep_init() before reserve_ebda_region call in
: x86_64_start_reservations kernel loads well.

Fix this ordering issue permanently: change lockdep so that it uses hlists
for the hash tables.  Unlike a list_head, an hlist_head is in its
initialized state when it is all-zeroes, so lockdep is ready for operation
immediately upon boot - lockdep_init() need not have run.

The patch will also save some memory.

Probably lockdep_init() and lockdep_initialized can be done away with now.

Suggested-by: default avatarMike Krinkin <krinkin.m.u@gmail.com>
Reported-by: default avatarMike Krinkin <krinkin.m.u@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Cc: mm-commits@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 3aa6b46c
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -66,7 +66,7 @@ struct lock_class {
	/*
	 * class-hash:
	 */
	struct list_head		hash_entry;
	struct hlist_node		hash_entry;

	/*
	 * global list of all lock-classes:
@@ -199,7 +199,7 @@ struct lock_chain {
	u8				irq_context;
	u8				depth;
	u16				base;
	struct list_head		entry;
	struct hlist_node		entry;
	u64				chain_key;
};

+19 −23
Original line number Diff line number Diff line
@@ -292,7 +292,7 @@ LIST_HEAD(all_lock_classes);
#define __classhashfn(key)	hash_long((unsigned long)key, CLASSHASH_BITS)
#define classhashentry(key)	(classhash_table + __classhashfn((key)))

static struct list_head classhash_table[CLASSHASH_SIZE];
static struct hlist_head classhash_table[CLASSHASH_SIZE];

/*
 * We put the lock dependency chains into a hash-table as well, to cache
@@ -303,7 +303,7 @@ static struct list_head classhash_table[CLASSHASH_SIZE];
#define __chainhashfn(chain)	hash_long(chain, CHAINHASH_BITS)
#define chainhashentry(chain)	(chainhash_table + __chainhashfn((chain)))

static struct list_head chainhash_table[CHAINHASH_SIZE];
static struct hlist_head chainhash_table[CHAINHASH_SIZE];

/*
 * The hash key of the lock dependency chains is a hash itself too:
@@ -666,7 +666,7 @@ static inline struct lock_class *
look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
{
	struct lockdep_subclass_key *key;
	struct list_head *hash_head;
	struct hlist_head *hash_head;
	struct lock_class *class;

#ifdef CONFIG_DEBUG_LOCKDEP
@@ -719,7 +719,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
		return NULL;

	list_for_each_entry_rcu(class, hash_head, hash_entry) {
	hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
		if (class->key == key) {
			/*
			 * Huh! same key, different name? Did someone trample
@@ -742,7 +742,7 @@ static inline struct lock_class *
register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
{
	struct lockdep_subclass_key *key;
	struct list_head *hash_head;
	struct hlist_head *hash_head;
	struct lock_class *class;

	DEBUG_LOCKS_WARN_ON(!irqs_disabled());
@@ -774,7 +774,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
	 * We have to do the hash-walk again, to avoid races
	 * with another CPU:
	 */
	list_for_each_entry_rcu(class, hash_head, hash_entry) {
	hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
		if (class->key == key)
			goto out_unlock_set;
	}
@@ -805,7 +805,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
	 * We use RCU's safe list-add method to make
	 * parallel walking of the hash-list safe:
	 */
	list_add_tail_rcu(&class->hash_entry, hash_head);
	hlist_add_head_rcu(&class->hash_entry, hash_head);
	/*
	 * Add it to the global list of classes:
	 */
@@ -2021,7 +2021,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
				     u64 chain_key)
{
	struct lock_class *class = hlock_class(hlock);
	struct list_head *hash_head = chainhashentry(chain_key);
	struct hlist_head *hash_head = chainhashentry(chain_key);
	struct lock_chain *chain;
	struct held_lock *hlock_curr;
	int i, j;
@@ -2037,7 +2037,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
	 * We can walk it lock-free, because entries only get added
	 * to the hash:
	 */
	list_for_each_entry_rcu(chain, hash_head, entry) {
	hlist_for_each_entry_rcu(chain, hash_head, entry) {
		if (chain->chain_key == chain_key) {
cache_hit:
			debug_atomic_inc(chain_lookup_hits);
@@ -2061,7 +2061,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
	/*
	 * We have to walk the chain again locked - to avoid duplicates:
	 */
	list_for_each_entry(chain, hash_head, entry) {
	hlist_for_each_entry(chain, hash_head, entry) {
		if (chain->chain_key == chain_key) {
			graph_unlock();
			goto cache_hit;
@@ -2095,7 +2095,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
		}
		chain_hlocks[chain->base + j] = class - lock_classes;
	}
	list_add_tail_rcu(&chain->entry, hash_head);
	hlist_add_head_rcu(&chain->entry, hash_head);
	debug_atomic_inc(chain_lookup_misses);
	inc_chains();

@@ -3879,7 +3879,7 @@ void lockdep_reset(void)
	nr_process_chains = 0;
	debug_locks = 1;
	for (i = 0; i < CHAINHASH_SIZE; i++)
		INIT_LIST_HEAD(chainhash_table + i);
		INIT_HLIST_HEAD(chainhash_table + i);
	raw_local_irq_restore(flags);
}

@@ -3898,7 +3898,7 @@ static void zap_class(struct lock_class *class)
	/*
	 * Unhash the class and remove it from the all_lock_classes list:
	 */
	list_del_rcu(&class->hash_entry);
	hlist_del_rcu(&class->hash_entry);
	list_del_rcu(&class->lock_entry);

	RCU_INIT_POINTER(class->key, NULL);
@@ -3921,7 +3921,7 @@ static inline int within(const void *addr, void *start, unsigned long size)
void lockdep_free_key_range(void *start, unsigned long size)
{
	struct lock_class *class;
	struct list_head *head;
	struct hlist_head *head;
	unsigned long flags;
	int i;
	int locked;
@@ -3934,9 +3934,7 @@ void lockdep_free_key_range(void *start, unsigned long size)
	 */
	for (i = 0; i < CLASSHASH_SIZE; i++) {
		head = classhash_table + i;
		if (list_empty(head))
			continue;
		list_for_each_entry_rcu(class, head, hash_entry) {
		hlist_for_each_entry_rcu(class, head, hash_entry) {
			if (within(class->key, start, size))
				zap_class(class);
			else if (within(class->name, start, size))
@@ -3966,7 +3964,7 @@ void lockdep_free_key_range(void *start, unsigned long size)
void lockdep_reset_lock(struct lockdep_map *lock)
{
	struct lock_class *class;
	struct list_head *head;
	struct hlist_head *head;
	unsigned long flags;
	int i, j;
	int locked;
@@ -3991,9 +3989,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
	locked = graph_lock();
	for (i = 0; i < CLASSHASH_SIZE; i++) {
		head = classhash_table + i;
		if (list_empty(head))
			continue;
		list_for_each_entry_rcu(class, head, hash_entry) {
		hlist_for_each_entry_rcu(class, head, hash_entry) {
			int match = 0;

			for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
@@ -4031,10 +4027,10 @@ void lockdep_init(void)
		return;

	for (i = 0; i < CLASSHASH_SIZE; i++)
		INIT_LIST_HEAD(classhash_table + i);
		INIT_HLIST_HEAD(classhash_table + i);

	for (i = 0; i < CHAINHASH_SIZE; i++)
		INIT_LIST_HEAD(chainhash_table + i);
		INIT_HLIST_HEAD(chainhash_table + i);

	lockdep_initialized = 1;
}