diff options
| author | Se Wang (Patrick) Oh <sewango@codeaurora.org> | 2015-08-19 14:44:21 -0700 |
|---|---|---|
| committer | Moyster <oysterized@gmail.com> | 2017-12-05 18:05:29 +0100 |
| commit | 9e10bbcea8e5b5953640641dce2b7db0f1c3049b (patch) | |
| tree | 5f18ddeb87b210eda52eb2d02023d11e340adaa0 | |
| parent | 48e21688ff9068cd0148ee2ed78d0df38b0c3b7d (diff) | |
workqueue: Fix workqueue stall issue after cpu down failure
When the hotplug notifier call chain with CPU_DOWN_PREPARE
is broken before reaching workqueue_cpu_down_callback(),
rebind_workers() adds WORKER_REBOUND flag for running workers.
Hence, the nr_running of the pool is not increased when scheduler
wakes up the worker. The fix is skipping adding WORKER_REBOUND
flag when the worker doesn't have WORKER_UNBOUND flag in
CPU_DOWN_FAILED path.
Change-Id: I2528e9154f4913d9ec14b63adbcbcd1eaa8a8452
Signed-off-by: Se Wang (Patrick) Oh <sewango@codeaurora.org>
Signed-off-by: franciscofranco <franciscofranco.1990@gmail.com>
| -rw-r--r-- | kernel/workqueue.c | 24 |
1 files changed, 15 insertions, 9 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 45d890cb7..7e82480d9 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -4739,10 +4739,13 @@ static void wq_unbind_fn(struct work_struct *work) /** * rebind_workers - rebind all workers of a pool to the associated CPU * @pool: pool of interest + * @force: if it is true, replace WORKER_UNBOUND with WORKER_REBOUND + * irrespective of flags of workers. Otherwise, replace the flags only + * when workers have WORKER_UNBOUND flag. * * @pool->cpu is coming online. Rebind all workers to the CPU. */ -static void rebind_workers(struct worker_pool *pool) +static void rebind_workers(struct worker_pool *pool, bool force) { struct worker *worker; int wi; @@ -4792,10 +4795,12 @@ static void rebind_workers(struct worker_pool *pool) * fail incorrectly leading to premature concurrency * management operations. */ - WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND)); - worker_flags |= WORKER_REBOUND; - worker_flags &= ~WORKER_UNBOUND; - ACCESS_ONCE(worker->flags) = worker_flags; + if (force || (worker_flags & WORKER_UNBOUND)) { + WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND)); + worker_flags |= WORKER_REBOUND; + worker_flags &= ~WORKER_UNBOUND; + ACCESS_ONCE(worker->flags) = worker_flags; + } } spin_unlock_irq(&pool->lock); @@ -4864,11 +4869,12 @@ static int workqueue_cpu_up_callback(struct notifier_block *nfb, for_each_pool(pool, pi) { mutex_lock(&pool->manager_mutex); - if (pool->cpu == cpu) { - rebind_workers(pool); - } else if (pool->cpu < 0) { + if (pool->cpu == cpu) + rebind_workers(pool, + (action & ~CPU_TASKS_FROZEN) + != CPU_DOWN_FAILED); + else if (pool->cpu < 0) restore_unbound_workers_cpumask(pool, cpu); - } mutex_unlock(&pool->manager_mutex); } |
