forked from luck/tmp_suning_uos_patched
locking/rwsem-xadd: Add killable versions of rwsem_down_read_failed()
Rename rwsem_down_read_failed() in __rwsem_down_read_failed_common() and teach it to abort waiting in case of pending signals and killable state argument passed. Note, that we shouldn't wake anybody up in EINTR path, as: We check for (waiter.task) under spinlock before we go to out_nolock path. Current task wasn't able to be woken up, so there are a writer, owning the sem, or a writer, which is the first waiter. In the both cases we shouldn't wake anybody. If there is a writer, owning the sem, and we were the only waiter, remove RWSEM_WAITING_BIAS, as there are no waiters anymore. Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: arnd@arndb.de Cc: avagin@virtuozzo.com Cc: davem@davemloft.net Cc: fenghua.yu@intel.com Cc: gorcunov@virtuozzo.com Cc: heiko.carstens@de.ibm.com Cc: hpa@zytor.com Cc: ink@jurassic.park.msu.ru Cc: mattst88@gmail.com Cc: rth@twiddle.net Cc: schwidefsky@de.ibm.com Cc: tony.luck@intel.com Link: http://lkml.kernel.org/r/149789534632.9059.2901382369609922565.stgit@localhost.localdomain Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
0aa1125fa8
commit
83ced169d9
|
@ -44,6 +44,7 @@ struct rw_semaphore {
|
||||||
};
|
};
|
||||||
|
|
||||||
extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
|
extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
|
||||||
|
extern struct rw_semaphore *rwsem_down_read_failed_killable(struct rw_semaphore *sem);
|
||||||
extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
|
extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
|
||||||
extern struct rw_semaphore *rwsem_down_write_failed_killable(struct rw_semaphore *sem);
|
extern struct rw_semaphore *rwsem_down_write_failed_killable(struct rw_semaphore *sem);
|
||||||
extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
|
extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
|
||||||
|
|
|
@ -221,8 +221,8 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
|
||||||
/*
|
/*
|
||||||
* Wait for the read lock to be granted
|
* Wait for the read lock to be granted
|
||||||
*/
|
*/
|
||||||
__visible
|
static inline struct rw_semaphore __sched *
|
||||||
struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
|
__rwsem_down_read_failed_common(struct rw_semaphore *sem, int state)
|
||||||
{
|
{
|
||||||
long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
|
long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
|
||||||
struct rwsem_waiter waiter;
|
struct rwsem_waiter waiter;
|
||||||
|
@ -255,17 +255,44 @@ struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
|
||||||
|
|
||||||
/* wait to be given the lock */
|
/* wait to be given the lock */
|
||||||
while (true) {
|
while (true) {
|
||||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
set_current_state(state);
|
||||||
if (!waiter.task)
|
if (!waiter.task)
|
||||||
break;
|
break;
|
||||||
|
if (signal_pending_state(state, current)) {
|
||||||
|
raw_spin_lock_irq(&sem->wait_lock);
|
||||||
|
if (waiter.task)
|
||||||
|
goto out_nolock;
|
||||||
|
raw_spin_unlock_irq(&sem->wait_lock);
|
||||||
|
break;
|
||||||
|
}
|
||||||
schedule();
|
schedule();
|
||||||
}
|
}
|
||||||
|
|
||||||
__set_current_state(TASK_RUNNING);
|
__set_current_state(TASK_RUNNING);
|
||||||
return sem;
|
return sem;
|
||||||
|
out_nolock:
|
||||||
|
list_del(&waiter.list);
|
||||||
|
if (list_empty(&sem->wait_list))
|
||||||
|
atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count);
|
||||||
|
raw_spin_unlock_irq(&sem->wait_lock);
|
||||||
|
__set_current_state(TASK_RUNNING);
|
||||||
|
return ERR_PTR(-EINTR);
|
||||||
|
}
|
||||||
|
|
||||||
|
__visible struct rw_semaphore * __sched
|
||||||
|
rwsem_down_read_failed(struct rw_semaphore *sem)
|
||||||
|
{
|
||||||
|
return __rwsem_down_read_failed_common(sem, TASK_UNINTERRUPTIBLE);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(rwsem_down_read_failed);
|
EXPORT_SYMBOL(rwsem_down_read_failed);
|
||||||
|
|
||||||
|
__visible struct rw_semaphore * __sched
|
||||||
|
rwsem_down_read_failed_killable(struct rw_semaphore *sem)
|
||||||
|
{
|
||||||
|
return __rwsem_down_read_failed_common(sem, TASK_KILLABLE);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(rwsem_down_read_failed_killable);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function must be called with the sem->wait_lock held to prevent
|
* This function must be called with the sem->wait_lock held to prevent
|
||||||
* race conditions between checking the rwsem wait list and setting the
|
* race conditions between checking the rwsem wait list and setting the
|
||||||
|
|
Loading…
Reference in New Issue
Block a user