forked from luck/tmp_suning_uos_patched
locking/ww_mutex: Simplify use_ww_ctx & ww_ctx handling
[ Upstream commit 5de2055d31ea88fd9ae9709ac95c372a505a60fa ] The use_ww_ctx flag is passed to mutex_optimistic_spin(), but the function doesn't use it. The frequent use of the (use_ww_ctx && ww_ctx) combination is repetitive. In fact, ww_ctx should not be used at all if !use_ww_ctx. Simplify ww_mutex code by dropping use_ww_ctx from mutex_optimistic_spin() an clear ww_ctx if !use_ww_ctx. In this way, we can replace (use_ww_ctx && ww_ctx) by just (ww_ctx). Signed-off-by: Waiman Long <longman@redhat.com> Signed-off-by: Ingo Molnar <mingo@kernel.org> Acked-by: Davidlohr Bueso <dbueso@suse.de> Link: https://lore.kernel.org/r/20210316153119.13802-2-longman@redhat.com Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
1e2a75c24a
commit
905ef030bd
|
@ -636,7 +636,7 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
|
||||||
*/
|
*/
|
||||||
static __always_inline bool
|
static __always_inline bool
|
||||||
mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
|
mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
|
||||||
const bool use_ww_ctx, struct mutex_waiter *waiter)
|
struct mutex_waiter *waiter)
|
||||||
{
|
{
|
||||||
if (!waiter) {
|
if (!waiter) {
|
||||||
/*
|
/*
|
||||||
|
@ -712,7 +712,7 @@ mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
|
||||||
#else
|
#else
|
||||||
static __always_inline bool
|
static __always_inline bool
|
||||||
mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
|
mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
|
||||||
const bool use_ww_ctx, struct mutex_waiter *waiter)
|
struct mutex_waiter *waiter)
|
||||||
{
|
{
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -932,6 +932,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
||||||
struct ww_mutex *ww;
|
struct ww_mutex *ww;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (!use_ww_ctx)
|
||||||
|
ww_ctx = NULL;
|
||||||
|
|
||||||
might_sleep();
|
might_sleep();
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_MUTEXES
|
#ifdef CONFIG_DEBUG_MUTEXES
|
||||||
|
@ -939,7 +942,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
ww = container_of(lock, struct ww_mutex, base);
|
ww = container_of(lock, struct ww_mutex, base);
|
||||||
if (use_ww_ctx && ww_ctx) {
|
if (ww_ctx) {
|
||||||
if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
|
if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
|
||||||
return -EALREADY;
|
return -EALREADY;
|
||||||
|
|
||||||
|
@ -956,10 +959,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
||||||
mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
|
mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
|
||||||
|
|
||||||
if (__mutex_trylock(lock) ||
|
if (__mutex_trylock(lock) ||
|
||||||
mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) {
|
mutex_optimistic_spin(lock, ww_ctx, NULL)) {
|
||||||
/* got the lock, yay! */
|
/* got the lock, yay! */
|
||||||
lock_acquired(&lock->dep_map, ip);
|
lock_acquired(&lock->dep_map, ip);
|
||||||
if (use_ww_ctx && ww_ctx)
|
if (ww_ctx)
|
||||||
ww_mutex_set_context_fastpath(ww, ww_ctx);
|
ww_mutex_set_context_fastpath(ww, ww_ctx);
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -970,7 +973,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
||||||
* After waiting to acquire the wait_lock, try again.
|
* After waiting to acquire the wait_lock, try again.
|
||||||
*/
|
*/
|
||||||
if (__mutex_trylock(lock)) {
|
if (__mutex_trylock(lock)) {
|
||||||
if (use_ww_ctx && ww_ctx)
|
if (ww_ctx)
|
||||||
__ww_mutex_check_waiters(lock, ww_ctx);
|
__ww_mutex_check_waiters(lock, ww_ctx);
|
||||||
|
|
||||||
goto skip_wait;
|
goto skip_wait;
|
||||||
|
@ -1023,7 +1026,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (use_ww_ctx && ww_ctx) {
|
if (ww_ctx) {
|
||||||
ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
|
ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
|
@ -1036,7 +1039,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
||||||
* ww_mutex needs to always recheck its position since its waiter
|
* ww_mutex needs to always recheck its position since its waiter
|
||||||
* list is not FIFO ordered.
|
* list is not FIFO ordered.
|
||||||
*/
|
*/
|
||||||
if ((use_ww_ctx && ww_ctx) || !first) {
|
if (ww_ctx || !first) {
|
||||||
first = __mutex_waiter_is_first(lock, &waiter);
|
first = __mutex_waiter_is_first(lock, &waiter);
|
||||||
if (first)
|
if (first)
|
||||||
__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
|
__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
|
||||||
|
@ -1049,7 +1052,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
||||||
* or we must see its unlock and acquire.
|
* or we must see its unlock and acquire.
|
||||||
*/
|
*/
|
||||||
if (__mutex_trylock(lock) ||
|
if (__mutex_trylock(lock) ||
|
||||||
(first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter)))
|
(first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
spin_lock(&lock->wait_lock);
|
spin_lock(&lock->wait_lock);
|
||||||
|
@ -1058,7 +1061,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
||||||
acquired:
|
acquired:
|
||||||
__set_current_state(TASK_RUNNING);
|
__set_current_state(TASK_RUNNING);
|
||||||
|
|
||||||
if (use_ww_ctx && ww_ctx) {
|
if (ww_ctx) {
|
||||||
/*
|
/*
|
||||||
* Wound-Wait; we stole the lock (!first_waiter), check the
|
* Wound-Wait; we stole the lock (!first_waiter), check the
|
||||||
* waiters as anyone might want to wound us.
|
* waiters as anyone might want to wound us.
|
||||||
|
@ -1078,7 +1081,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
||||||
/* got the lock - cleanup and rejoice! */
|
/* got the lock - cleanup and rejoice! */
|
||||||
lock_acquired(&lock->dep_map, ip);
|
lock_acquired(&lock->dep_map, ip);
|
||||||
|
|
||||||
if (use_ww_ctx && ww_ctx)
|
if (ww_ctx)
|
||||||
ww_mutex_lock_acquired(ww, ww_ctx);
|
ww_mutex_lock_acquired(ww, ww_ctx);
|
||||||
|
|
||||||
spin_unlock(&lock->wait_lock);
|
spin_unlock(&lock->wait_lock);
|
||||||
|
|
Loading…
Reference in New Issue
Block a user