forked from luck/tmp_suning_uos_patched
kthread_worker: split code for canceling the delayed work timer
commit 34b3d5344719d14fd2185b2d9459b3abcb8cf9d8 upstream. Patch series "kthread_worker: Fix race between kthread_mod_delayed_work() and kthread_cancel_delayed_work_sync()". This patchset fixes the race between kthread_mod_delayed_work() and kthread_cancel_delayed_work_sync() including proper return value handling. This patch (of 2): Simple code refactoring as a preparation step for fixing a race between kthread_mod_delayed_work() and kthread_cancel_delayed_work_sync(). It does not modify the existing behavior. Link: https://lkml.kernel.org/r/20210610133051.15337-2-pmladek@suse.com Signed-off-by: Petr Mladek <pmladek@suse.com> Cc: <jenhaochen@google.com> Cc: Martin Liu <liumartin@google.com> Cc: Minchan Kim <minchan@google.com> Cc: Nathan Chancellor <nathan@kernel.org> Cc: Nick Desaulniers <ndesaulniers@google.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Tejun Heo <tj@kernel.org> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
02c303f3b9
commit
bfe28af78a
|
@ -1043,6 +1043,33 @@ void kthread_flush_work(struct kthread_work *work)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(kthread_flush_work);
|
||||
|
||||
/*
|
||||
* Make sure that the timer is neither set nor running and could
|
||||
* not manipulate the work list_head any longer.
|
||||
*
|
||||
* The function is called under worker->lock. The lock is temporary
|
||||
* released but the timer can't be set again in the meantime.
|
||||
*/
|
||||
static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
|
||||
unsigned long *flags)
|
||||
{
|
||||
struct kthread_delayed_work *dwork =
|
||||
container_of(work, struct kthread_delayed_work, work);
|
||||
struct kthread_worker *worker = work->worker;
|
||||
|
||||
/*
|
||||
* del_timer_sync() must be called to make sure that the timer
|
||||
* callback is not running. The lock must be temporary released
|
||||
* to avoid a deadlock with the callback. In the meantime,
|
||||
* any queuing is blocked by setting the canceling counter.
|
||||
*/
|
||||
work->canceling++;
|
||||
raw_spin_unlock_irqrestore(&worker->lock, *flags);
|
||||
del_timer_sync(&dwork->timer);
|
||||
raw_spin_lock_irqsave(&worker->lock, *flags);
|
||||
work->canceling--;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function removes the work from the worker queue. Also it makes sure
|
||||
* that it won't get queued later via the delayed work's timer.
|
||||
|
@ -1057,23 +1084,8 @@ static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
|
|||
unsigned long *flags)
|
||||
{
|
||||
/* Try to cancel the timer if exists. */
|
||||
if (is_dwork) {
|
||||
struct kthread_delayed_work *dwork =
|
||||
container_of(work, struct kthread_delayed_work, work);
|
||||
struct kthread_worker *worker = work->worker;
|
||||
|
||||
/*
|
||||
* del_timer_sync() must be called to make sure that the timer
|
||||
* callback is not running. The lock must be temporary released
|
||||
* to avoid a deadlock with the callback. In the meantime,
|
||||
* any queuing is blocked by setting the canceling counter.
|
||||
*/
|
||||
work->canceling++;
|
||||
raw_spin_unlock_irqrestore(&worker->lock, *flags);
|
||||
del_timer_sync(&dwork->timer);
|
||||
raw_spin_lock_irqsave(&worker->lock, *flags);
|
||||
work->canceling--;
|
||||
}
|
||||
if (is_dwork)
|
||||
kthread_cancel_delayed_work_timer(work, flags);
|
||||
|
||||
/*
|
||||
* Try to remove the work from a worker list. It might either
|
||||
|
|
Loading…
Reference in New Issue
Block a user