forked from luck/tmp_suning_uos_patched
task_work: teach task_work_add() to do signal_wake_up()
So that the target task will exit the wait_event_interruptible-like loop and call task_work_run() asap. The patch turns "bool notify" into 0,TWA_RESUME,TWA_SIGNAL enum, the new TWA_SIGNAL flag implies signal_wake_up(). However, it needs to avoid the race with recalc_sigpending(), so the patch also adds the new JOBCTL_TASK_WORK bit included in JOBCTL_PENDING_MASK. TODO: once this patch is merged we need to change all current users of task_work_add(notify = true) to use TWA_RESUME. Cc: stable@vger.kernel.org # v5.7 Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
d60b5fbc1c
commit
e91b481623
|
@ -19,6 +19,7 @@ struct task_struct;
|
||||||
#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */
|
#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */
|
||||||
#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */
|
#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */
|
||||||
#define JOBCTL_TRAP_FREEZE_BIT 23 /* trap for cgroup freezer */
|
#define JOBCTL_TRAP_FREEZE_BIT 23 /* trap for cgroup freezer */
|
||||||
|
#define JOBCTL_TASK_WORK_BIT 24 /* set by TWA_SIGNAL */
|
||||||
|
|
||||||
#define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT)
|
#define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT)
|
||||||
#define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT)
|
#define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT)
|
||||||
|
@ -28,9 +29,10 @@ struct task_struct;
|
||||||
#define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT)
|
#define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT)
|
||||||
#define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT)
|
#define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT)
|
||||||
#define JOBCTL_TRAP_FREEZE (1UL << JOBCTL_TRAP_FREEZE_BIT)
|
#define JOBCTL_TRAP_FREEZE (1UL << JOBCTL_TRAP_FREEZE_BIT)
|
||||||
|
#define JOBCTL_TASK_WORK (1UL << JOBCTL_TASK_WORK_BIT)
|
||||||
|
|
||||||
#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
|
#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
|
||||||
#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
|
#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK | JOBCTL_TASK_WORK)
|
||||||
|
|
||||||
extern bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask);
|
extern bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask);
|
||||||
extern void task_clear_jobctl_trapping(struct task_struct *task);
|
extern void task_clear_jobctl_trapping(struct task_struct *task);
|
||||||
|
|
|
@ -13,7 +13,10 @@ init_task_work(struct callback_head *twork, task_work_func_t func)
|
||||||
twork->func = func;
|
twork->func = func;
|
||||||
}
|
}
|
||||||
|
|
||||||
int task_work_add(struct task_struct *task, struct callback_head *twork, bool);
|
#define TWA_RESUME 1
|
||||||
|
#define TWA_SIGNAL 2
|
||||||
|
int task_work_add(struct task_struct *task, struct callback_head *twork, int);
|
||||||
|
|
||||||
struct callback_head *task_work_cancel(struct task_struct *, task_work_func_t);
|
struct callback_head *task_work_cancel(struct task_struct *, task_work_func_t);
|
||||||
void task_work_run(void);
|
void task_work_run(void);
|
||||||
|
|
||||||
|
|
|
@ -2529,9 +2529,6 @@ bool get_signal(struct ksignal *ksig)
|
||||||
struct signal_struct *signal = current->signal;
|
struct signal_struct *signal = current->signal;
|
||||||
int signr;
|
int signr;
|
||||||
|
|
||||||
if (unlikely(current->task_works))
|
|
||||||
task_work_run();
|
|
||||||
|
|
||||||
if (unlikely(uprobe_deny_signal()))
|
if (unlikely(uprobe_deny_signal()))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
@ -2544,6 +2541,13 @@ bool get_signal(struct ksignal *ksig)
|
||||||
|
|
||||||
relock:
|
relock:
|
||||||
spin_lock_irq(&sighand->siglock);
|
spin_lock_irq(&sighand->siglock);
|
||||||
|
current->jobctl &= ~JOBCTL_TASK_WORK;
|
||||||
|
if (unlikely(current->task_works)) {
|
||||||
|
spin_unlock_irq(&sighand->siglock);
|
||||||
|
task_work_run();
|
||||||
|
goto relock;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Every stopped thread goes here after wakeup. Check to see if
|
* Every stopped thread goes here after wakeup. Check to see if
|
||||||
* we should notify the parent, prepare_signal(SIGCONT) encodes
|
* we should notify the parent, prepare_signal(SIGCONT) encodes
|
||||||
|
|
|
@ -25,9 +25,10 @@ static struct callback_head work_exited; /* all we need is ->next == NULL */
|
||||||
* 0 if succeeds or -ESRCH.
|
* 0 if succeeds or -ESRCH.
|
||||||
*/
|
*/
|
||||||
int
|
int
|
||||||
task_work_add(struct task_struct *task, struct callback_head *work, bool notify)
|
task_work_add(struct task_struct *task, struct callback_head *work, int notify)
|
||||||
{
|
{
|
||||||
struct callback_head *head;
|
struct callback_head *head;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
head = READ_ONCE(task->task_works);
|
head = READ_ONCE(task->task_works);
|
||||||
|
@ -36,8 +37,19 @@ task_work_add(struct task_struct *task, struct callback_head *work, bool notify)
|
||||||
work->next = head;
|
work->next = head;
|
||||||
} while (cmpxchg(&task->task_works, head, work) != head);
|
} while (cmpxchg(&task->task_works, head, work) != head);
|
||||||
|
|
||||||
if (notify)
|
switch (notify) {
|
||||||
|
case TWA_RESUME:
|
||||||
set_notify_resume(task);
|
set_notify_resume(task);
|
||||||
|
break;
|
||||||
|
case TWA_SIGNAL:
|
||||||
|
if (lock_task_sighand(task, &flags)) {
|
||||||
|
task->jobctl |= JOBCTL_TASK_WORK;
|
||||||
|
signal_wake_up(task, 0);
|
||||||
|
unlock_task_sighand(task, &flags);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue
Block a user