forked from luck/tmp_suning_uos_patched
workqueue: remove @wakeup from worker_set_flags()
worker_set_flags() has only two callers, each specifying %true and %false for @wakeup. Let's push the wake up to the caller and remove @wakeup from worker_set_flags(). The caller can use the following instead if wakeup is necessary: worker_set_flags(); if (need_more_worker(pool)) wake_up_worker(pool); This makes the code simpler. This patch doesn't introduce behavior changes. tj: Updated description and comments. Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com> Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
parent
a489a03eca
commit
228f1d0018
|
@ -867,35 +867,22 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu)
|
|||
* worker_set_flags - set worker flags and adjust nr_running accordingly
|
||||
* @worker: self
|
||||
* @flags: flags to set
|
||||
* @wakeup: wakeup an idle worker if necessary
|
||||
*
|
||||
* Set @flags in @worker->flags and adjust nr_running accordingly. If
|
||||
* nr_running becomes zero and @wakeup is %true, an idle worker is
|
||||
* woken up.
|
||||
* Set @flags in @worker->flags and adjust nr_running accordingly.
|
||||
*
|
||||
* CONTEXT:
|
||||
* spin_lock_irq(pool->lock)
|
||||
*/
|
||||
static inline void worker_set_flags(struct worker *worker, unsigned int flags,
|
||||
bool wakeup)
|
||||
static inline void worker_set_flags(struct worker *worker, unsigned int flags)
|
||||
{
|
||||
struct worker_pool *pool = worker->pool;
|
||||
|
||||
WARN_ON_ONCE(worker->task != current);
|
||||
|
||||
/*
|
||||
* If transitioning into NOT_RUNNING, adjust nr_running and
|
||||
* wake up an idle worker as necessary if requested by
|
||||
* @wakeup.
|
||||
*/
|
||||
/* If transitioning into NOT_RUNNING, adjust nr_running. */
|
||||
if ((flags & WORKER_NOT_RUNNING) &&
|
||||
!(worker->flags & WORKER_NOT_RUNNING)) {
|
||||
if (wakeup) {
|
||||
if (atomic_dec_and_test(&pool->nr_running) &&
|
||||
!list_empty(&pool->worklist))
|
||||
wake_up_worker(pool);
|
||||
} else
|
||||
atomic_dec(&pool->nr_running);
|
||||
atomic_dec(&pool->nr_running);
|
||||
}
|
||||
|
||||
worker->flags |= flags;
|
||||
|
@ -2041,18 +2028,20 @@ __acquires(&pool->lock)
|
|||
list_del_init(&work->entry);
|
||||
|
||||
/*
|
||||
* CPU intensive works don't participate in concurrency
|
||||
* management. They're the scheduler's responsibility.
|
||||
* CPU intensive works don't participate in concurrency management.
|
||||
* They're the scheduler's responsibility. This takes @worker out
|
||||
* of concurrency management and the next code block will chain
|
||||
* execution of the pending work items.
|
||||
*/
|
||||
if (unlikely(cpu_intensive))
|
||||
worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
|
||||
worker_set_flags(worker, WORKER_CPU_INTENSIVE);
|
||||
|
||||
/*
|
||||
* Wake up another worker if necessary. The condition is always
|
||||
* false for normal per-cpu workers since nr_running would always
|
||||
* be >= 1 at this point. This is used to chain execution of the
|
||||
* pending work items for WORKER_NOT_RUNNING workers such as the
|
||||
* UNBOUND ones.
|
||||
* UNBOUND and CPU_INTENSIVE ones.
|
||||
*/
|
||||
if (need_more_worker(pool))
|
||||
wake_up_worker(pool);
|
||||
|
@ -2210,7 +2199,7 @@ static int worker_thread(void *__worker)
|
|||
}
|
||||
} while (keep_working(pool));
|
||||
|
||||
worker_set_flags(worker, WORKER_PREP, false);
|
||||
worker_set_flags(worker, WORKER_PREP);
|
||||
sleep:
|
||||
/*
|
||||
* pool->lock is held and there's no work to process and no need to
|
||||
|
|
Loading…
Reference in New Issue
Block a user