forked from luck/tmp_suning_uos_patched
locking/mutexes: Unlock the mutex without the wait_lock
When running workloads that have high contention in mutexes on an 8 socket machine, mutex spinners would often spin for a long time with no lock owner. The main reason why this is occuring is in __mutex_unlock_common_slowpath(), if __mutex_slowpath_needs_to_unlock(), then the owner needs to acquire the mutex->wait_lock before releasing the mutex (setting lock->count to 1). When the wait_lock is contended, this delays the mutex from being released. We should be able to release the mutex without holding the wait_lock. Signed-off-by: Jason Low <jason.low2@hp.com> Cc: chegu_vinod@hp.com Cc: paulmck@linux.vnet.ibm.com Cc: Waiman.Long@hp.com Cc: torvalds@linux-foundation.org Cc: tglx@linutronix.de Cc: riel@redhat.com Cc: akpm@linux-foundation.org Cc: davidlohr@hp.com Cc: hpa@zytor.com Cc: andi@firstfloor.org Cc: aswin@hp.com Cc: scott.norton@hp.com Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1390936396-3962-4-git-send-email-jason.low2@hp.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
47667fa150
commit
1d8fe7dc80
|
@ -671,10 +671,6 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
|
||||||
struct mutex *lock = container_of(lock_count, struct mutex, count);
|
struct mutex *lock = container_of(lock_count, struct mutex, count);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_mutex(&lock->wait_lock, flags);
|
|
||||||
mutex_release(&lock->dep_map, nested, _RET_IP_);
|
|
||||||
debug_mutex_unlock(lock);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* some architectures leave the lock unlocked in the fastpath failure
|
* some architectures leave the lock unlocked in the fastpath failure
|
||||||
* case, others need to leave it locked. In the later case we have to
|
* case, others need to leave it locked. In the later case we have to
|
||||||
|
@ -683,6 +679,10 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
|
||||||
if (__mutex_slowpath_needs_to_unlock())
|
if (__mutex_slowpath_needs_to_unlock())
|
||||||
atomic_set(&lock->count, 1);
|
atomic_set(&lock->count, 1);
|
||||||
|
|
||||||
|
spin_lock_mutex(&lock->wait_lock, flags);
|
||||||
|
mutex_release(&lock->dep_map, nested, _RET_IP_);
|
||||||
|
debug_mutex_unlock(lock);
|
||||||
|
|
||||||
if (!list_empty(&lock->wait_list)) {
|
if (!list_empty(&lock->wait_list)) {
|
||||||
/* get the first entry from the wait-list: */
|
/* get the first entry from the wait-list: */
|
||||||
struct mutex_waiter *waiter =
|
struct mutex_waiter *waiter =
|
||||||
|
|
Loading…
Reference in New Issue
Block a user