forked from luck/tmp_suning_uos_patched
locking/rtmutex: Use acquire/release semantics
As of654672d4ba
(locking/atomics: Add _{acquire|release|relaxed}() variants of some atomic operations) and6d79ef2d30
(locking, asm-generic: Add _{relaxed|acquire|release}() variants for 'atomic_long_t'), weakly ordered archs can benefit from more relaxed use of barriers when locking and unlocking, instead of regular full barrier semantics. While currently only arm64 supports such optimizations, updating corresponding locking primitives serves for other archs to immediately benefit as well, once the necessary machinery is implemented of course. Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Paul E.McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Will Deacon <will.deacon@arm.com> Cc: linux-kernel@vger.kernel.org Link: http://lkml.kernel.org/r/1443643395-17016-4-git-send-email-dave@stgolabs.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
81a43adae3
commit
700318d1d7
|
@ -74,14 +74,23 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
|
||||||
* set up.
|
* set up.
|
||||||
*/
|
*/
|
||||||
#ifndef CONFIG_DEBUG_RT_MUTEXES
|
#ifndef CONFIG_DEBUG_RT_MUTEXES
|
||||||
# define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c)
|
# define rt_mutex_cmpxchg_relaxed(l,c,n) (cmpxchg_relaxed(&l->owner, c, n) == c)
|
||||||
|
# define rt_mutex_cmpxchg_acquire(l,c,n) (cmpxchg_acquire(&l->owner, c, n) == c)
|
||||||
|
# define rt_mutex_cmpxchg_release(l,c,n) (cmpxchg_release(&l->owner, c, n) == c)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Callers must hold the ->wait_lock -- which is the whole purpose as we force
|
||||||
|
* all future threads that attempt to [Rmw] the lock to the slowpath. As such
|
||||||
|
* relaxed semantics suffice.
|
||||||
|
*/
|
||||||
static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
|
static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
|
||||||
{
|
{
|
||||||
unsigned long owner, *p = (unsigned long *) &lock->owner;
|
unsigned long owner, *p = (unsigned long *) &lock->owner;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
owner = *p;
|
owner = *p;
|
||||||
} while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
|
} while (cmpxchg_relaxed(p, owner,
|
||||||
|
owner | RT_MUTEX_HAS_WAITERS) != owner);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -121,11 +130,14 @@ static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
|
||||||
* lock(wait_lock);
|
* lock(wait_lock);
|
||||||
* acquire(lock);
|
* acquire(lock);
|
||||||
*/
|
*/
|
||||||
return rt_mutex_cmpxchg(lock, owner, NULL);
|
return rt_mutex_cmpxchg_release(lock, owner, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
# define rt_mutex_cmpxchg(l,c,n) (0)
|
# define rt_mutex_cmpxchg_relaxed(l,c,n) (0)
|
||||||
|
# define rt_mutex_cmpxchg_acquire(l,c,n) (0)
|
||||||
|
# define rt_mutex_cmpxchg_release(l,c,n) (0)
|
||||||
|
|
||||||
static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
|
static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
|
||||||
{
|
{
|
||||||
lock->owner = (struct task_struct *)
|
lock->owner = (struct task_struct *)
|
||||||
|
@ -1321,7 +1333,7 @@ rt_mutex_fastlock(struct rt_mutex *lock, int state,
|
||||||
struct hrtimer_sleeper *timeout,
|
struct hrtimer_sleeper *timeout,
|
||||||
enum rtmutex_chainwalk chwalk))
|
enum rtmutex_chainwalk chwalk))
|
||||||
{
|
{
|
||||||
if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
|
if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
|
||||||
rt_mutex_deadlock_account_lock(lock, current);
|
rt_mutex_deadlock_account_lock(lock, current);
|
||||||
return 0;
|
return 0;
|
||||||
} else
|
} else
|
||||||
|
@ -1337,7 +1349,7 @@ rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
|
||||||
enum rtmutex_chainwalk chwalk))
|
enum rtmutex_chainwalk chwalk))
|
||||||
{
|
{
|
||||||
if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
|
if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
|
||||||
likely(rt_mutex_cmpxchg(lock, NULL, current))) {
|
likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
|
||||||
rt_mutex_deadlock_account_lock(lock, current);
|
rt_mutex_deadlock_account_lock(lock, current);
|
||||||
return 0;
|
return 0;
|
||||||
} else
|
} else
|
||||||
|
@ -1348,7 +1360,7 @@ static inline int
|
||||||
rt_mutex_fasttrylock(struct rt_mutex *lock,
|
rt_mutex_fasttrylock(struct rt_mutex *lock,
|
||||||
int (*slowfn)(struct rt_mutex *lock))
|
int (*slowfn)(struct rt_mutex *lock))
|
||||||
{
|
{
|
||||||
if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
|
if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
|
||||||
rt_mutex_deadlock_account_lock(lock, current);
|
rt_mutex_deadlock_account_lock(lock, current);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@ -1362,7 +1374,7 @@ rt_mutex_fastunlock(struct rt_mutex *lock,
|
||||||
{
|
{
|
||||||
WAKE_Q(wake_q);
|
WAKE_Q(wake_q);
|
||||||
|
|
||||||
if (likely(rt_mutex_cmpxchg(lock, current, NULL))) {
|
if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
|
||||||
rt_mutex_deadlock_account_unlock(current);
|
rt_mutex_deadlock_account_unlock(current);
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
@ -1484,7 +1496,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_unlock);
|
||||||
bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock,
|
bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock,
|
||||||
struct wake_q_head *wqh)
|
struct wake_q_head *wqh)
|
||||||
{
|
{
|
||||||
if (likely(rt_mutex_cmpxchg(lock, current, NULL))) {
|
if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
|
||||||
rt_mutex_deadlock_account_unlock(current);
|
rt_mutex_deadlock_account_unlock(current);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue
Block a user