forked from luck/tmp_suning_uos_patched
lockdep: Simplify lock_release()
lock_release() takes this nested argument that's mostly pointless these days, remove the implementation but leave the argument a rudiment for now. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: ktkhai@parallels.com Cc: rostedt@goodmis.org Cc: juri.lelli@gmail.com Cc: pang.xunlei@linaro.org Cc: oleg@redhat.com Cc: wanpeng.li@linux.intel.com Cc: umgwanakikbuti@gmail.com Link: http://lkml.kernel.org/r/20150611124743.840411606@infradead.org Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
5e16bbc2fb
commit
e0f56fd706
|
@ -3260,26 +3260,6 @@ print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Common debugging checks for both nested and non-nested unlock:
|
||||
*/
|
||||
static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
|
||||
unsigned long ip)
|
||||
{
|
||||
if (unlikely(!debug_locks))
|
||||
return 0;
|
||||
/*
|
||||
* Lockdep should run with IRQs disabled, recursion, head-ache, etc..
|
||||
*/
|
||||
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
|
||||
return 0;
|
||||
|
||||
if (curr->lockdep_depth <= 0)
|
||||
return print_unlock_imbalance_bug(curr, lock, ip);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
|
||||
{
|
||||
if (hlock->instance == lock)
|
||||
|
@ -3376,31 +3356,35 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
|
|||
}
|
||||
|
||||
/*
|
||||
* Remove the lock to the list of currently held locks in a
|
||||
* potentially non-nested (out of order) manner. This is a
|
||||
* relatively rare operation, as all the unlock APIs default
|
||||
* to nested mode (which uses lock_release()):
|
||||
* Remove the lock to the list of currently held locks - this gets
|
||||
* called on mutex_unlock()/spin_unlock*() (or on a failed
|
||||
* mutex_lock_interruptible()).
|
||||
*
|
||||
* @nested is an hysterical artifact, needs a tree wide cleanup.
|
||||
*/
|
||||
static int
|
||||
lock_release_non_nested(struct task_struct *curr,
|
||||
struct lockdep_map *lock, unsigned long ip)
|
||||
__lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
|
||||
{
|
||||
struct task_struct *curr = current;
|
||||
struct held_lock *hlock, *prev_hlock;
|
||||
unsigned int depth;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Check whether the lock exists in the current stack
|
||||
* of held locks:
|
||||
*/
|
||||
if (unlikely(!debug_locks))
|
||||
return 0;
|
||||
|
||||
depth = curr->lockdep_depth;
|
||||
/*
|
||||
* So we're all set to release this lock.. wait what lock? We don't
|
||||
* own any locks, you've been drinking again?
|
||||
*/
|
||||
if (DEBUG_LOCKS_WARN_ON(!depth))
|
||||
return 0;
|
||||
if (DEBUG_LOCKS_WARN_ON(depth <= 0))
|
||||
return print_unlock_imbalance_bug(curr, lock, ip);
|
||||
|
||||
/*
|
||||
* Check whether the lock exists in the current stack
|
||||
* of held locks:
|
||||
*/
|
||||
prev_hlock = NULL;
|
||||
for (i = depth-1; i >= 0; i--) {
|
||||
hlock = curr->held_locks + i;
|
||||
|
@ -3456,78 +3440,10 @@ lock_release_non_nested(struct task_struct *curr,
|
|||
*/
|
||||
if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove the lock to the list of currently held locks - this gets
|
||||
* called on mutex_unlock()/spin_unlock*() (or on a failed
|
||||
* mutex_lock_interruptible()). This is done for unlocks that nest
|
||||
* perfectly. (i.e. the current top of the lock-stack is unlocked)
|
||||
*/
|
||||
static int lock_release_nested(struct task_struct *curr,
|
||||
struct lockdep_map *lock, unsigned long ip)
|
||||
{
|
||||
struct held_lock *hlock;
|
||||
unsigned int depth;
|
||||
|
||||
/*
|
||||
* Pop off the top of the lock stack:
|
||||
*/
|
||||
depth = curr->lockdep_depth - 1;
|
||||
hlock = curr->held_locks + depth;
|
||||
|
||||
/*
|
||||
* Is the unlock non-nested:
|
||||
*/
|
||||
if (hlock->instance != lock || hlock->references)
|
||||
return lock_release_non_nested(curr, lock, ip);
|
||||
curr->lockdep_depth--;
|
||||
|
||||
/*
|
||||
* No more locks, but somehow we've got hash left over, who left it?
|
||||
*/
|
||||
if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0)))
|
||||
return 0;
|
||||
|
||||
curr->curr_chain_key = hlock->prev_chain_key;
|
||||
|
||||
lock_release_holdtime(hlock);
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCKDEP
|
||||
hlock->prev_chain_key = 0;
|
||||
hlock->class_idx = 0;
|
||||
hlock->acquire_ip = 0;
|
||||
hlock->irq_context = 0;
|
||||
#endif
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove the lock to the list of currently held locks - this gets
|
||||
* called on mutex_unlock()/spin_unlock*() (or on a failed
|
||||
* mutex_lock_interruptible()). This is done for unlocks that nest
|
||||
* perfectly. (i.e. the current top of the lock-stack is unlocked)
|
||||
*/
|
||||
static void
|
||||
__lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
|
||||
{
|
||||
struct task_struct *curr = current;
|
||||
|
||||
if (!check_unlock(curr, lock, ip))
|
||||
return;
|
||||
|
||||
if (nested) {
|
||||
if (!lock_release_nested(curr, lock, ip))
|
||||
return;
|
||||
} else {
|
||||
if (!lock_release_non_nested(curr, lock, ip))
|
||||
return;
|
||||
}
|
||||
|
||||
check_chain_key(curr);
|
||||
}
|
||||
|
||||
static int __lock_is_held(struct lockdep_map *lock)
|
||||
{
|
||||
struct task_struct *curr = current;
|
||||
|
@ -3639,7 +3555,8 @@ void lock_release(struct lockdep_map *lock, int nested,
|
|||
check_flags(flags);
|
||||
current->lockdep_recursion = 1;
|
||||
trace_lock_release(lock, ip);
|
||||
__lock_release(lock, nested, ip);
|
||||
if (__lock_release(lock, nested, ip))
|
||||
check_chain_key(current);
|
||||
current->lockdep_recursion = 0;
|
||||
raw_local_irq_restore(flags);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user