forked from luck/tmp_suning_uos_patched
locking/lockdep: Rework lockdep_lock
A few sites want to assert we own the graph_lock/lockdep_lock, provide a more conventional lock interface for it with a number of trivial debug checks. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20200313102107.GX12561@hirez.programming.kicks-ass.net
This commit is contained in:
parent
10476e6304
commit
248efb2158
|
@ -84,12 +84,39 @@ module_param(lock_stat, int, 0644);
|
|||
* to use a raw spinlock - we really dont want the spinlock
|
||||
* code to recurse back into the lockdep code...
|
||||
*/
|
||||
static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
||||
static arch_spinlock_t __lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
||||
static struct task_struct *__owner;
|
||||
|
||||
static inline void lockdep_lock(void)
|
||||
{
|
||||
DEBUG_LOCKS_WARN_ON(!irqs_disabled());
|
||||
|
||||
arch_spin_lock(&__lock);
|
||||
__owner = current;
|
||||
current->lockdep_recursion++;
|
||||
}
|
||||
|
||||
static inline void lockdep_unlock(void)
|
||||
{
|
||||
if (debug_locks && DEBUG_LOCKS_WARN_ON(__owner != current))
|
||||
return;
|
||||
|
||||
current->lockdep_recursion--;
|
||||
__owner = NULL;
|
||||
arch_spin_unlock(&__lock);
|
||||
}
|
||||
|
||||
static inline bool lockdep_assert_locked(void)
|
||||
{
|
||||
return DEBUG_LOCKS_WARN_ON(__owner != current);
|
||||
}
|
||||
|
||||
static struct task_struct *lockdep_selftest_task_struct;
|
||||
|
||||
|
||||
static int graph_lock(void)
|
||||
{
|
||||
arch_spin_lock(&lockdep_lock);
|
||||
lockdep_lock();
|
||||
/*
|
||||
* Make sure that if another CPU detected a bug while
|
||||
* walking the graph we dont change it (while the other
|
||||
|
@ -97,27 +124,15 @@ static int graph_lock(void)
|
|||
* dropped already)
|
||||
*/
|
||||
if (!debug_locks) {
|
||||
arch_spin_unlock(&lockdep_lock);
|
||||
lockdep_unlock();
|
||||
return 0;
|
||||
}
|
||||
/* prevent any recursions within lockdep from causing deadlocks */
|
||||
current->lockdep_recursion++;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int graph_unlock(void)
|
||||
static inline void graph_unlock(void)
|
||||
{
|
||||
if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) {
|
||||
/*
|
||||
* The lockdep graph lock isn't locked while we expect it to
|
||||
* be, we're confused now, bye!
|
||||
*/
|
||||
return DEBUG_LOCKS_WARN_ON(1);
|
||||
}
|
||||
|
||||
current->lockdep_recursion--;
|
||||
arch_spin_unlock(&lockdep_lock);
|
||||
return 0;
|
||||
lockdep_unlock();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -128,7 +143,7 @@ static inline int debug_locks_off_graph_unlock(void)
|
|||
{
|
||||
int ret = debug_locks_off();
|
||||
|
||||
arch_spin_unlock(&lockdep_lock);
|
||||
lockdep_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1479,6 +1494,8 @@ static int __bfs(struct lock_list *source_entry,
|
|||
struct circular_queue *cq = &lock_cq;
|
||||
int ret = 1;
|
||||
|
||||
lockdep_assert_locked();
|
||||
|
||||
if (match(source_entry, data)) {
|
||||
*target_entry = source_entry;
|
||||
ret = 0;
|
||||
|
@ -1501,8 +1518,6 @@ static int __bfs(struct lock_list *source_entry,
|
|||
|
||||
head = get_dep_list(lock, offset);
|
||||
|
||||
DEBUG_LOCKS_WARN_ON(!irqs_disabled());
|
||||
|
||||
list_for_each_entry_rcu(entry, head, entry) {
|
||||
if (!lock_accessed(entry)) {
|
||||
unsigned int cq_depth;
|
||||
|
@ -1729,11 +1744,9 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class)
|
|||
this.class = class;
|
||||
|
||||
raw_local_irq_save(flags);
|
||||
current->lockdep_recursion++;
|
||||
arch_spin_lock(&lockdep_lock);
|
||||
lockdep_lock();
|
||||
ret = __lockdep_count_forward_deps(&this);
|
||||
arch_spin_unlock(&lockdep_lock);
|
||||
current->lockdep_recursion--;
|
||||
lockdep_unlock();
|
||||
raw_local_irq_restore(flags);
|
||||
|
||||
return ret;
|
||||
|
@ -1758,11 +1771,9 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
|
|||
this.class = class;
|
||||
|
||||
raw_local_irq_save(flags);
|
||||
current->lockdep_recursion++;
|
||||
arch_spin_lock(&lockdep_lock);
|
||||
lockdep_lock();
|
||||
ret = __lockdep_count_backward_deps(&this);
|
||||
arch_spin_unlock(&lockdep_lock);
|
||||
current->lockdep_recursion--;
|
||||
lockdep_unlock();
|
||||
raw_local_irq_restore(flags);
|
||||
|
||||
return ret;
|
||||
|
@ -3046,7 +3057,7 @@ static inline int add_chain_cache(struct task_struct *curr,
|
|||
* disabled to make this an IRQ-safe lock.. for recursion reasons
|
||||
* lockdep won't complain about its own locking errors.
|
||||
*/
|
||||
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
|
||||
if (lockdep_assert_locked())
|
||||
return 0;
|
||||
|
||||
chain = alloc_lock_chain();
|
||||
|
@ -5181,8 +5192,7 @@ static void free_zapped_rcu(struct rcu_head *ch)
|
|||
return;
|
||||
|
||||
raw_local_irq_save(flags);
|
||||
arch_spin_lock(&lockdep_lock);
|
||||
current->lockdep_recursion++;
|
||||
lockdep_lock();
|
||||
|
||||
/* closed head */
|
||||
pf = delayed_free.pf + (delayed_free.index ^ 1);
|
||||
|
@ -5194,8 +5204,7 @@ static void free_zapped_rcu(struct rcu_head *ch)
|
|||
*/
|
||||
call_rcu_zapped(delayed_free.pf + delayed_free.index);
|
||||
|
||||
current->lockdep_recursion--;
|
||||
arch_spin_unlock(&lockdep_lock);
|
||||
lockdep_unlock();
|
||||
raw_local_irq_restore(flags);
|
||||
}
|
||||
|
||||
|
@ -5240,13 +5249,11 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size)
|
|||
init_data_structures_once();
|
||||
|
||||
raw_local_irq_save(flags);
|
||||
arch_spin_lock(&lockdep_lock);
|
||||
current->lockdep_recursion++;
|
||||
lockdep_lock();
|
||||
pf = get_pending_free();
|
||||
__lockdep_free_key_range(pf, start, size);
|
||||
call_rcu_zapped(pf);
|
||||
current->lockdep_recursion--;
|
||||
arch_spin_unlock(&lockdep_lock);
|
||||
lockdep_unlock();
|
||||
raw_local_irq_restore(flags);
|
||||
|
||||
/*
|
||||
|
@ -5268,10 +5275,10 @@ static void lockdep_free_key_range_imm(void *start, unsigned long size)
|
|||
init_data_structures_once();
|
||||
|
||||
raw_local_irq_save(flags);
|
||||
arch_spin_lock(&lockdep_lock);
|
||||
lockdep_lock();
|
||||
__lockdep_free_key_range(pf, start, size);
|
||||
__free_zapped_classes(pf);
|
||||
arch_spin_unlock(&lockdep_lock);
|
||||
lockdep_unlock();
|
||||
raw_local_irq_restore(flags);
|
||||
}
|
||||
|
||||
|
@ -5367,10 +5374,10 @@ static void lockdep_reset_lock_imm(struct lockdep_map *lock)
|
|||
unsigned long flags;
|
||||
|
||||
raw_local_irq_save(flags);
|
||||
arch_spin_lock(&lockdep_lock);
|
||||
lockdep_lock();
|
||||
__lockdep_reset_lock(pf, lock);
|
||||
__free_zapped_classes(pf);
|
||||
arch_spin_unlock(&lockdep_lock);
|
||||
lockdep_unlock();
|
||||
raw_local_irq_restore(flags);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user