Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d6d897ce authored by Ingo Molnar's avatar Ingo Molnar Committed by Linus Torvalds
Browse files

[PATCH] lockdep: core, reduce per-lock class-cache size



lockdep_map is embedded into every lock, which blows up data structure
sizes all around the kernel.  Reduce the class-cache to be for the default
class only - that is used in 99.9% of the cases and even if we dont have a
class cached, the lookup in the class-hash is lockless.

This change reduces the per-lock dep_map overhead by 56 bytes on 64-bit
platforms and by 28 bytes on 32-bit platforms.

Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 55794a41
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -120,7 +120,7 @@ struct lock_class {
 */
 */
struct lockdep_map {
struct lockdep_map {
	struct lock_class_key		*key;
	struct lock_class_key		*key;
	struct lock_class		*class[MAX_LOCKDEP_SUBCLASSES];
	struct lock_class		*class_cache;
	const char			*name;
	const char			*name;
};
};


+54 −33
Original line number Original line Diff line number Diff line
@@ -1104,7 +1104,7 @@ extern void __error_too_big_MAX_LOCKDEP_SUBCLASSES(void);
 * itself, so actual lookup of the hash should be once per lock object.
 * itself, so actual lookup of the hash should be once per lock object.
 */
 */
static inline struct lock_class *
static inline struct lock_class *
register_lock_class(struct lockdep_map *lock, unsigned int subclass)
look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
{
{
	struct lockdep_subclass_key *key;
	struct lockdep_subclass_key *key;
	struct list_head *hash_head;
	struct list_head *hash_head;
@@ -1148,7 +1148,26 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass)
	 */
	 */
	list_for_each_entry(class, hash_head, hash_entry)
	list_for_each_entry(class, hash_head, hash_entry)
		if (class->key == key)
		if (class->key == key)
			goto out_set;
			return class;

	return NULL;
}

/*
 * Register a lock's class in the hash-table, if the class is not present
 * yet. Otherwise we look it up. We cache the result in the lock object
 * itself, so actual lookup of the hash should be once per lock object.
 */
static inline struct lock_class *
register_lock_class(struct lockdep_map *lock, unsigned int subclass)
{
	struct lockdep_subclass_key *key;
	struct list_head *hash_head;
	struct lock_class *class;

	class = look_up_lock_class(lock, subclass);
	if (likely(class))
		return class;


	/*
	/*
	 * Debug-check: all keys must be persistent!
	 * Debug-check: all keys must be persistent!
@@ -1163,6 +1182,9 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass)
		return NULL;
		return NULL;
	}
	}


	key = lock->key->subkeys + subclass;
	hash_head = classhashentry(key);

	__raw_spin_lock(&hash_lock);
	__raw_spin_lock(&hash_lock);
	/*
	/*
	 * We have to do the hash-walk again, to avoid races
	 * We have to do the hash-walk again, to avoid races
@@ -1209,8 +1231,8 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass)
out_unlock_set:
out_unlock_set:
	__raw_spin_unlock(&hash_lock);
	__raw_spin_unlock(&hash_lock);


out_set:
	if (!subclass)
	lock->class[subclass] = class;
		lock->class_cache = class;


	DEBUG_LOCKS_WARN_ON(class->subclass != subclass);
	DEBUG_LOCKS_WARN_ON(class->subclass != subclass);


@@ -1914,7 +1936,7 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
	}
	}
	lock->name = name;
	lock->name = name;
	lock->key = key;
	lock->key = key;
	memset(lock->class, 0, sizeof(lock->class[0])*MAX_LOCKDEP_SUBCLASSES);
	lock->class_cache = NULL;
}
}


EXPORT_SYMBOL_GPL(lockdep_init_map);
EXPORT_SYMBOL_GPL(lockdep_init_map);
@@ -1928,8 +1950,8 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
			  unsigned long ip)
			  unsigned long ip)
{
{
	struct task_struct *curr = current;
	struct task_struct *curr = current;
	struct lock_class *class = NULL;
	struct held_lock *hlock;
	struct held_lock *hlock;
	struct lock_class *class;
	unsigned int depth, id;
	unsigned int depth, id;
	int chain_head = 0;
	int chain_head = 0;
	u64 chain_key;
	u64 chain_key;
@@ -1947,8 +1969,11 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
		return 0;
		return 0;
	}
	}


	class = lock->class[subclass];
	if (!subclass)
	/* not cached yet? */
		class = lock->class_cache;
	/*
	 * Not cached yet or subclass?
	 */
	if (unlikely(!class)) {
	if (unlikely(!class)) {
		class = register_lock_class(lock, subclass);
		class = register_lock_class(lock, subclass);
		if (!class)
		if (!class)
@@ -2449,48 +2474,44 @@ void lockdep_free_key_range(void *start, unsigned long size)


void lockdep_reset_lock(struct lockdep_map *lock)
void lockdep_reset_lock(struct lockdep_map *lock)
{
{
	struct lock_class *class, *next, *entry;
	struct lock_class *class, *next;
	struct list_head *head;
	struct list_head *head;
	unsigned long flags;
	unsigned long flags;
	int i, j;
	int i, j;


	raw_local_irq_save(flags);
	raw_local_irq_save(flags);
	__raw_spin_lock(&hash_lock);


	/*
	/*
	 * Remove all classes this lock has:
	 * Remove all classes this lock might have:
	 */
	 */
	for (i = 0; i < CLASSHASH_SIZE; i++) {
		head = classhash_table + i;
		if (list_empty(head))
			continue;
		list_for_each_entry_safe(class, next, head, hash_entry) {
	for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
	for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
				entry = lock->class[j];
		/*
				if (class == entry) {
		 * If the class exists we look it up and zap it:
		 */
		class = look_up_lock_class(lock, j);
		if (class)
			zap_class(class);
			zap_class(class);
					lock->class[j] = NULL;
					break;
				}
	}
	}
		}
	}

	/*
	/*
	 * Debug check: in the end all mapped classes should
	 * Debug check: in the end all mapped classes should
	 * be gone.
	 * be gone.
	 */
	 */
	for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
	__raw_spin_lock(&hash_lock);
		entry = lock->class[j];
	for (i = 0; i < CLASSHASH_SIZE; i++) {
		if (!entry)
		head = classhash_table + i;
		if (list_empty(head))
			continue;
			continue;
		list_for_each_entry_safe(class, next, head, hash_entry) {
			if (unlikely(class == lock->class_cache)) {
				__raw_spin_unlock(&hash_lock);
				__raw_spin_unlock(&hash_lock);
				DEBUG_LOCKS_WARN_ON(1);
				DEBUG_LOCKS_WARN_ON(1);
		raw_local_irq_restore(flags);
				goto out_restore;
		return;
			}
		}
	}
	}

	__raw_spin_unlock(&hash_lock);
	__raw_spin_unlock(&hash_lock);

out_restore:
	raw_local_irq_restore(flags);
	raw_local_irq_restore(flags);
}
}