locking/lockdep: Avoid that add_chain_cache() adds an invalid chain to the cache

Make sure that add_chain_cache() returns 0 and does not modify the
chain hash if nr_chain_hlocks == MAX_LOCKDEP_CHAIN_HLOCKS before this
function is called.

Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Johannes Berg <johannes@sipsolutions.net>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Waiman Long <longman@redhat.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: johannes.berg@intel.com
Cc: tj@kernel.org
Link: https://lkml.kernel.org/r/20190214230058.196511-5-bvanassche@acm.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Bart Van Assche 2019-02-14 15:00:39 -08:00 committed by Ingo Molnar
parent 15ea86b58c
commit 523b113bac

View File

@ -2195,16 +2195,8 @@ static inline int add_chain_cache(struct task_struct *curr,
chain_hlocks[chain->base + j] = lock_id; chain_hlocks[chain->base + j] = lock_id;
} }
chain_hlocks[chain->base + j] = class - lock_classes; chain_hlocks[chain->base + j] = class - lock_classes;
}
if (nr_chain_hlocks < MAX_LOCKDEP_CHAIN_HLOCKS)
nr_chain_hlocks += chain->depth; nr_chain_hlocks += chain->depth;
} else {
#ifdef CONFIG_DEBUG_LOCKDEP
/*
* Important for check_no_collision().
*/
if (unlikely(nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)) {
if (!debug_locks_off_graph_unlock()) if (!debug_locks_off_graph_unlock())
return 0; return 0;
@ -2212,7 +2204,6 @@ static inline int add_chain_cache(struct task_struct *curr,
dump_stack(); dump_stack();
return 0; return 0;
} }
#endif
hlist_add_head_rcu(&chain->entry, hash_head); hlist_add_head_rcu(&chain->entry, hash_head);
debug_atomic_inc(chain_lookup_misses); debug_atomic_inc(chain_lookup_misses);