kernel_optimize_test/kernel/task_work.c
Will Deacon 506458efaf locking/barriers: Convert users of lockless_dereference() to READ_ONCE()
READ_ONCE() now has an implicit smp_read_barrier_depends() call, so it
can be used instead of lockless_dereference() without any change in
semantics.

Signed-off-by: Will Deacon <will.deacon@arm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1508840570-22169-4-git-send-email-will.deacon@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-10-24 13:17:33 +02:00

118 lines
3.1 KiB
C

#include <linux/spinlock.h>
#include <linux/task_work.h>
#include <linux/tracehook.h>
static struct callback_head work_exited; /* all we need is ->next == NULL */
/**
* task_work_add - ask the @task to execute @work->func()
* @task: the task which should run the callback
* @work: the callback to run
* @notify: send the notification if true
*
* Queue @work for task_work_run() below and notify the @task if @notify.
* Fails if the @task is exiting/exited and thus it can't process this @work.
* Otherwise @work->func() will be called when the @task returns from kernel
* mode or exits.
*
* This is like the signal handler which runs in kernel mode, but it doesn't
* try to wake up the @task.
*
* Note: there is no ordering guarantee on works queued here.
*
* RETURNS:
* 0 if succeeds or -ESRCH.
*/
int
task_work_add(struct task_struct *task, struct callback_head *work, bool notify)
{
struct callback_head *head;
do {
head = READ_ONCE(task->task_works);
if (unlikely(head == &work_exited))
return -ESRCH;
work->next = head;
} while (cmpxchg(&task->task_works, head, work) != head);
if (notify)
set_notify_resume(task);
return 0;
}
/**
* task_work_cancel - cancel a pending work added by task_work_add()
* @task: the task which should execute the work
* @func: identifies the work to remove
*
* Find the last queued pending work with ->func == @func and remove
* it from queue.
*
* RETURNS:
* The found work or NULL if not found.
*/
struct callback_head *
task_work_cancel(struct task_struct *task, task_work_func_t func)
{
struct callback_head **pprev = &task->task_works;
struct callback_head *work;
unsigned long flags;
if (likely(!task->task_works))
return NULL;
/*
* If cmpxchg() fails we continue without updating pprev.
* Either we raced with task_work_add() which added the
* new entry before this work, we will find it again. Or
* we raced with task_work_run(), *pprev == NULL/exited.
*/
raw_spin_lock_irqsave(&task->pi_lock, flags);
while ((work = READ_ONCE(*pprev))) {
if (work->func != func)
pprev = &work->next;
else if (cmpxchg(pprev, work, work->next) == work)
break;
}
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
return work;
}
/**
* task_work_run - execute the works added by task_work_add()
*
* Flush the pending works. Should be used by the core kernel code.
* Called before the task returns to the user-mode or stops, or when
* it exits. In the latter case task_work_add() can no longer add the
* new work after task_work_run() returns.
*/
void task_work_run(void)
{
struct task_struct *task = current;
struct callback_head *work, *head, *next;
for (;;) {
/*
* work->func() can do task_work_add(), do not set
* work_exited unless the list is empty.
*/
raw_spin_lock_irq(&task->pi_lock);
do {
work = READ_ONCE(task->task_works);
head = !work && (task->flags & PF_EXITING) ?
&work_exited : NULL;
} while (cmpxchg(&task->task_works, work, head) != work);
raw_spin_unlock_irq(&task->pi_lock);
if (!work)
break;
do {
next = work->next;
work->func(work);
work = next;
cond_resched();
} while (work);
}
}