forked from luck/tmp_suning_uos_patched
locking/osq: Relax atomic semantics
... by using acquire/release for ops around the lock->tail. As such, weakly ordered archs can benefit from more relaxed use of barriers when issuing atomics. Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Waiman Long <Waiman.Long@hpe.com> Link: http://lkml.kernel.org/r/1442216244-4409-3-git-send-email-dave@stgolabs.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
6e1e519697
commit
c55a6ffa62
|
@ -50,7 +50,7 @@ osq_wait_next(struct optimistic_spin_queue *lock,
|
|||
|
||||
for (;;) {
|
||||
if (atomic_read(&lock->tail) == curr &&
|
||||
atomic_cmpxchg(&lock->tail, curr, old) == curr) {
|
||||
atomic_cmpxchg_acquire(&lock->tail, curr, old) == curr) {
|
||||
/*
|
||||
* We were the last queued, we moved @lock back. @prev
|
||||
* will now observe @lock and will complete its
|
||||
|
@ -92,7 +92,11 @@ bool osq_lock(struct optimistic_spin_queue *lock)
|
|||
node->next = NULL;
|
||||
node->cpu = curr;
|
||||
|
||||
old = atomic_xchg(&lock->tail, curr);
|
||||
/*
|
||||
* ACQUIRE semantics, pairs with corresponding RELEASE
|
||||
* in unlock() uncontended, or fastpath.
|
||||
*/
|
||||
old = atomic_xchg_acquire(&lock->tail, curr);
|
||||
if (old == OSQ_UNLOCKED_VAL)
|
||||
return true;
|
||||
|
||||
|
@ -184,7 +188,8 @@ void osq_unlock(struct optimistic_spin_queue *lock)
|
|||
/*
|
||||
* Fast path for the uncontended case.
|
||||
*/
|
||||
if (likely(atomic_cmpxchg(&lock->tail, curr, OSQ_UNLOCKED_VAL) == curr))
|
||||
if (likely(atomic_cmpxchg_release(&lock->tail, curr,
|
||||
OSQ_UNLOCKED_VAL) == curr))
|
||||
return;
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue
Block a user