From ace35a7ac493d4284a57ad807579011bebba891c Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 14 Feb 2019 15:00:47 -0800 Subject: [PATCH] locking/lockdep: Reuse list entries that are no longer in use Instead of abandoning elements of list_entries[] that are no longer in use, make alloc_list_entry() reuse array elements that have been freed. Signed-off-by: Bart Van Assche Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Johannes Berg Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Waiman Long Cc: Will Deacon Cc: johannes.berg@intel.com Cc: tj@kernel.org Link: https://lkml.kernel.org/r/20190214230058.196511-13-bvanassche@acm.org Signed-off-by: Ingo Molnar --- kernel/locking/lockdep.c | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 8ecf355dd163..2c6d0b67e7b6 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -45,6 +45,7 @@ #include #include #include +#include #include #include #include @@ -132,6 +133,7 @@ static inline int debug_locks_off_graph_unlock(void) unsigned long nr_list_entries; static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; +static DECLARE_BITMAP(list_entries_in_use, MAX_LOCKDEP_ENTRIES); /* * All data structures here are protected by the global debug_lock. @@ -907,7 +909,10 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) */ static struct lock_list *alloc_list_entry(void) { - if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) { + int idx = find_first_zero_bit(list_entries_in_use, + ARRAY_SIZE(list_entries)); + + if (idx >= ARRAY_SIZE(list_entries)) { if (!debug_locks_off_graph_unlock()) return NULL; @@ -915,7 +920,9 @@ static struct lock_list *alloc_list_entry(void) dump_stack(); return NULL; } - return list_entries + nr_list_entries++; + nr_list_entries++; + __set_bit(idx, list_entries_in_use); + return list_entries + idx; } /* @@ -1019,7 +1026,7 @@ static inline void mark_lock_accessed(struct lock_list *lock, unsigned long nr; nr = lock - list_entries; - WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */ + WARN_ON(nr >= ARRAY_SIZE(list_entries)); /* Out-of-bounds, input fail */ lock->parent = parent; lock->class->dep_gen_id = lockdep_dependency_gen_id; } @@ -1029,7 +1036,7 @@ static inline unsigned long lock_accessed(struct lock_list *lock) unsigned long nr; nr = lock - list_entries; - WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */ + WARN_ON(nr >= ARRAY_SIZE(list_entries)); /* Out-of-bounds, input fail */ return lock->class->dep_gen_id == lockdep_dependency_gen_id; } @@ -4276,13 +4283,13 @@ static void zap_class(struct pending_free *pf, struct lock_class *class) * Remove all dependencies this lock is * involved in: */ - for (i = 0, entry = list_entries; i < nr_list_entries; i++, entry++) { + for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) { + entry = list_entries + i; if (entry->class != class && entry->links_to != class) continue; + __clear_bit(i, list_entries_in_use); + nr_list_entries--; list_del_rcu(&entry->entry); - /* Clear .class and .links_to to avoid double removal. */ - WRITE_ONCE(entry->class, NULL); - WRITE_ONCE(entry->links_to, NULL); } if (list_empty(&class->locks_after) && list_empty(&class->locks_before)) { @@ -4596,6 +4603,7 @@ void __init lockdep_init(void) (sizeof(lock_classes) + sizeof(classhash_table) + sizeof(list_entries) + + sizeof(list_entries_in_use) + sizeof(chainhash_table) + sizeof(delayed_free) #ifdef CONFIG_PROVE_LOCKING