From 9e10bbcea8e5b5953640641dce2b7db0f1c3049b Mon Sep 17 00:00:00 2001 From: "Se Wang (Patrick) Oh" Date: Wed, 19 Aug 2015 14:44:21 -0700 Subject: workqueue: Fix workqueue stall issue after cpu down failure When the hotplug notifier call chain with CPU_DOWN_PREPARE is broken before reaching workqueue_cpu_down_callback(), rebind_workers() adds WORKER_REBOUND flag for running workers. Hence, the nr_running of the pool is not increased when scheduler wakes up the worker. The fix is skipping adding WORKER_REBOUND flag when the worker doesn't have WORKER_UNBOUND flag in CPU_DOWN_FAILED path. Change-Id: I2528e9154f4913d9ec14b63adbcbcd1eaa8a8452 Signed-off-by: Se Wang (Patrick) Oh Signed-off-by: franciscofranco --- kernel/workqueue.c | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) (limited to 'kernel/workqueue.c') diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 45d890cb7..7e82480d9 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -4739,10 +4739,13 @@ static void wq_unbind_fn(struct work_struct *work) /** * rebind_workers - rebind all workers of a pool to the associated CPU * @pool: pool of interest + * @force: if it is true, replace WORKER_UNBOUND with WORKER_REBOUND + * irrespective of flags of workers. Otherwise, replace the flags only + * when workers have WORKER_UNBOUND flag. * * @pool->cpu is coming online. Rebind all workers to the CPU. */ -static void rebind_workers(struct worker_pool *pool) +static void rebind_workers(struct worker_pool *pool, bool force) { struct worker *worker; int wi; @@ -4792,10 +4795,12 @@ static void rebind_workers(struct worker_pool *pool) * fail incorrectly leading to premature concurrency * management operations. */ - WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND)); - worker_flags |= WORKER_REBOUND; - worker_flags &= ~WORKER_UNBOUND; - ACCESS_ONCE(worker->flags) = worker_flags; + if (force || (worker_flags & WORKER_UNBOUND)) { + WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND)); + worker_flags |= WORKER_REBOUND; + worker_flags &= ~WORKER_UNBOUND; + ACCESS_ONCE(worker->flags) = worker_flags; + } } spin_unlock_irq(&pool->lock); @@ -4864,11 +4869,12 @@ static int workqueue_cpu_up_callback(struct notifier_block *nfb, for_each_pool(pool, pi) { mutex_lock(&pool->manager_mutex); - if (pool->cpu == cpu) { - rebind_workers(pool); - } else if (pool->cpu < 0) { + if (pool->cpu == cpu) + rebind_workers(pool, + (action & ~CPU_TASKS_FROZEN) + != CPU_DOWN_FAILED); + else if (pool->cpu < 0) restore_unbound_workers_cpumask(pool, cpu); - } mutex_unlock(&pool->manager_mutex); } -- cgit v1.2.3