locking/lockdep: Reuse list entries that are no longer in use

Instead of abandoning elements of list_entries[] that are no longer in
use, make alloc_list_entry() reuse array elements that have been freed.

Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Johannes Berg <johannes@sipsolutions.net>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Waiman Long <longman@redhat.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: johannes.berg@intel.com
Cc: tj@kernel.org
Link: https://lkml.kernel.org/r/20190214230058.196511-13-bvanassche@acm.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Bart Van Assche 2019-02-14 15:00:47 -08:00 committed by Ingo Molnar
parent a0b0fd53e1
commit ace35a7ac4

View File

@ -45,6 +45,7 @@
#include <linux/hash.h>
#include <linux/ftrace.h>
#include <linux/stringify.h>
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/gfp.h>
#include <linux/random.h>
@ -132,6 +133,7 @@ static inline int debug_locks_off_graph_unlock(void)
unsigned long nr_list_entries;
static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
static DECLARE_BITMAP(list_entries_in_use, MAX_LOCKDEP_ENTRIES);
/*
* All data structures here are protected by the global debug_lock.
@ -907,7 +909,10 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
*/
static struct lock_list *alloc_list_entry(void)
{
if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
int idx = find_first_zero_bit(list_entries_in_use,
ARRAY_SIZE(list_entries));
if (idx >= ARRAY_SIZE(list_entries)) {
if (!debug_locks_off_graph_unlock())
return NULL;
@ -915,7 +920,9 @@ static struct lock_list *alloc_list_entry(void)
dump_stack();
return NULL;
}
return list_entries + nr_list_entries++;
nr_list_entries++;
__set_bit(idx, list_entries_in_use);
return list_entries + idx;
}
/*
@ -1019,7 +1026,7 @@ static inline void mark_lock_accessed(struct lock_list *lock,
unsigned long nr;
nr = lock - list_entries;
WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
WARN_ON(nr >= ARRAY_SIZE(list_entries)); /* Out-of-bounds, input fail */
lock->parent = parent;
lock->class->dep_gen_id = lockdep_dependency_gen_id;
}
@ -1029,7 +1036,7 @@ static inline unsigned long lock_accessed(struct lock_list *lock)
unsigned long nr;
nr = lock - list_entries;
WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
WARN_ON(nr >= ARRAY_SIZE(list_entries)); /* Out-of-bounds, input fail */
return lock->class->dep_gen_id == lockdep_dependency_gen_id;
}
@ -4276,13 +4283,13 @@ static void zap_class(struct pending_free *pf, struct lock_class *class)
* Remove all dependencies this lock is
* involved in:
*/
for (i = 0, entry = list_entries; i < nr_list_entries; i++, entry++) {
for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
entry = list_entries + i;
if (entry->class != class && entry->links_to != class)
continue;
__clear_bit(i, list_entries_in_use);
nr_list_entries--;
list_del_rcu(&entry->entry);
/* Clear .class and .links_to to avoid double removal. */
WRITE_ONCE(entry->class, NULL);
WRITE_ONCE(entry->links_to, NULL);
}
if (list_empty(&class->locks_after) &&
list_empty(&class->locks_before)) {
@ -4596,6 +4603,7 @@ void __init lockdep_init(void)
(sizeof(lock_classes) +
sizeof(classhash_table) +
sizeof(list_entries) +
sizeof(list_entries_in_use) +
sizeof(chainhash_table) +
sizeof(delayed_free)
#ifdef CONFIG_PROVE_LOCKING