forked from luck/tmp_suning_uos_patched
locking/mutex: Introduce ww_mutex_set_context_slowpath()
... which is equivalent to the fastpath counter part. This mainly allows getting some WW specific code out of generic mutex paths. Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/1420573509-24774-4-git-send-email-dave@stgolabs.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
e42f678a02
commit
4bd19084fa
|
@ -147,7 +147,7 @@ static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
|
|||
}
|
||||
|
||||
/*
|
||||
* after acquiring lock with fastpath or when we lost out in contested
|
||||
* After acquiring lock with fastpath or when we lost out in contested
|
||||
* slowpath, set ctx and wake up any waiters so they can recheck.
|
||||
*
|
||||
* This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set,
|
||||
|
@ -191,6 +191,30 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock,
|
|||
spin_unlock_mutex(&lock->base.wait_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* After acquiring lock in the slowpath set ctx and wake up any
|
||||
* waiters so they can recheck.
|
||||
*
|
||||
* Callers must hold the mutex wait_lock.
|
||||
*/
|
||||
static __always_inline void
|
||||
ww_mutex_set_context_slowpath(struct ww_mutex *lock,
|
||||
struct ww_acquire_ctx *ctx)
|
||||
{
|
||||
struct mutex_waiter *cur;
|
||||
|
||||
ww_mutex_lock_acquired(lock, ctx);
|
||||
lock->ctx = ctx;
|
||||
|
||||
/*
|
||||
* Give any possible sleeping processes the chance to wake up,
|
||||
* so they can recheck if they have to back off.
|
||||
*/
|
||||
list_for_each_entry(cur, &lock->base.wait_list, list) {
|
||||
debug_mutex_wake_waiter(&lock->base, cur);
|
||||
wake_up_process(cur->task);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
|
||||
static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
|
||||
|
@ -576,23 +600,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|||
|
||||
if (use_ww_ctx) {
|
||||
struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
|
||||
struct mutex_waiter *cur;
|
||||
|
||||
/*
|
||||
* This branch gets optimized out for the common case,
|
||||
* and is only important for ww_mutex_lock.
|
||||
*/
|
||||
ww_mutex_lock_acquired(ww, ww_ctx);
|
||||
ww->ctx = ww_ctx;
|
||||
|
||||
/*
|
||||
* Give any possible sleeping processes the chance to wake up,
|
||||
* so they can recheck if they have to back off.
|
||||
*/
|
||||
list_for_each_entry(cur, &lock->wait_list, list) {
|
||||
debug_mutex_wake_waiter(lock, cur);
|
||||
wake_up_process(cur->task);
|
||||
}
|
||||
ww_mutex_set_context_slowpath(ww, ww_ctx);
|
||||
}
|
||||
|
||||
spin_unlock_mutex(&lock->wait_lock, flags);
|
||||
|
|
Loading…
Reference in New Issue
Block a user