forked from luck/tmp_suning_uos_patched
sched/wake_q: Reduce reference counting for special users
Some users, specifically futexes and rwsems, required fixes that allowed the callers to be safe when wakeups occur before they are expected by wake_up_q(). Such scenarios also play games and rely on reference counting, and until now were pivoting on wake_q doing it. With the wake_q_add() call being moved down, this can no longer be the case. As such we end up with a a double task refcounting overhead; and these callers care enough about this (being rather core-ish). This patch introduces a wake_q_add_safe() call that serves for callers that have already done refcounting and therefore the task is 'safe' from wake_q point of view (int that it requires reference throughout the entire queue/>wakeup cycle). In the one case it has internal reference counting, in the other case it consumes the reference counting. Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Waiman Long <longman@redhat.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Xie Yongji <xieyongji@baidu.com> Cc: Yongji Xie <elohimes@gmail.com> Cc: andrea.parri@amarulasolutions.com Cc: lilin24@baidu.com Cc: liuqi16@baidu.com Cc: nixun@baidu.com Cc: yuanlinsi01@baidu.com Cc: zhangyu31@baidu.com Link: https://lkml.kernel.org/r/20181218195352.7orq3upiwfdbrdne@linux-r8p5 Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
513e1073d5
commit
07879c6a37
|
@ -51,8 +51,8 @@ static inline void wake_q_init(struct wake_q_head *head)
|
|||
head->lastp = &head->first;
|
||||
}
|
||||
|
||||
extern void wake_q_add(struct wake_q_head *head,
|
||||
struct task_struct *task);
|
||||
extern void wake_q_add(struct wake_q_head *head, struct task_struct *task);
|
||||
extern void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task);
|
||||
extern void wake_up_q(struct wake_q_head *head);
|
||||
|
||||
#endif /* _LINUX_SCHED_WAKE_Q_H */
|
||||
|
|
|
@ -1463,8 +1463,7 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
|
|||
* Queue the task for later wakeup for after we've released
|
||||
* the hb->lock. wake_q_add() grabs reference to p.
|
||||
*/
|
||||
wake_q_add(wake_q, p);
|
||||
put_task_struct(p);
|
||||
wake_q_add_safe(wake_q, p);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -211,9 +211,7 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
|
|||
* Ensure issuing the wakeup (either by us or someone else)
|
||||
* after setting the reader waiter to nil.
|
||||
*/
|
||||
wake_q_add(wake_q, tsk);
|
||||
/* wake_q_add() already take the task ref */
|
||||
put_task_struct(tsk);
|
||||
wake_q_add_safe(wake_q, tsk);
|
||||
}
|
||||
|
||||
adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
|
||||
|
|
|
@ -396,6 +396,30 @@ static bool set_nr_if_polling(struct task_struct *p)
|
|||
#endif
|
||||
#endif
|
||||
|
||||
static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
|
||||
{
|
||||
struct wake_q_node *node = &task->wake_q;
|
||||
|
||||
/*
|
||||
* Atomically grab the task, if ->wake_q is !nil already it means
|
||||
* its already queued (either by us or someone else) and will get the
|
||||
* wakeup due to that.
|
||||
*
|
||||
* In order to ensure that a pending wakeup will observe our pending
|
||||
* state, even in the failed case, an explicit smp_mb() must be used.
|
||||
*/
|
||||
smp_mb__before_atomic();
|
||||
if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* The head is context local, there can be no concurrency.
|
||||
*/
|
||||
*head->lastp = node;
|
||||
head->lastp = &node->next;
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* wake_q_add() - queue a wakeup for 'later' waking.
|
||||
* @head: the wake_q_head to add @task to
|
||||
|
@ -410,27 +434,31 @@ static bool set_nr_if_polling(struct task_struct *p)
|
|||
*/
|
||||
void wake_q_add(struct wake_q_head *head, struct task_struct *task)
|
||||
{
|
||||
struct wake_q_node *node = &task->wake_q;
|
||||
if (__wake_q_add(head, task))
|
||||
get_task_struct(task);
|
||||
}
|
||||
|
||||
/*
|
||||
* Atomically grab the task, if ->wake_q is !nil already it means
|
||||
* its already queued (either by us or someone else) and will get the
|
||||
* wakeup due to that.
|
||||
*
|
||||
* In order to ensure that a pending wakeup will observe our pending
|
||||
* state, even in the failed case, an explicit smp_mb() must be used.
|
||||
*/
|
||||
smp_mb__before_atomic();
|
||||
if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
|
||||
return;
|
||||
|
||||
get_task_struct(task);
|
||||
|
||||
/*
|
||||
* The head is context local, there can be no concurrency.
|
||||
*/
|
||||
*head->lastp = node;
|
||||
head->lastp = &node->next;
|
||||
/**
|
||||
* wake_q_add_safe() - safely queue a wakeup for 'later' waking.
|
||||
* @head: the wake_q_head to add @task to
|
||||
* @task: the task to queue for 'later' wakeup
|
||||
*
|
||||
* Queue a task for later wakeup, most likely by the wake_up_q() call in the
|
||||
* same context, _HOWEVER_ this is not guaranteed, the wakeup can come
|
||||
* instantly.
|
||||
*
|
||||
* This function must be used as-if it were wake_up_process(); IOW the task
|
||||
* must be ready to be woken at this location.
|
||||
*
|
||||
* This function is essentially a task-safe equivalent to wake_q_add(). Callers
|
||||
* that already hold reference to @task can call the 'safe' version and trust
|
||||
* wake_q to do the right thing depending whether or not the @task is already
|
||||
* queued for wakeup.
|
||||
*/
|
||||
void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
|
||||
{
|
||||
if (!__wake_q_add(head, task))
|
||||
put_task_struct(task);
|
||||
}
|
||||
|
||||
void wake_up_q(struct wake_q_head *head)
|
||||
|
|
Loading…
Reference in New Issue
Block a user