aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/workqueue.c24
1 files changed, 15 insertions, 9 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 45d890cb7..7e82480d9 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -4739,10 +4739,13 @@ static void wq_unbind_fn(struct work_struct *work)
/**
* rebind_workers - rebind all workers of a pool to the associated CPU
* @pool: pool of interest
+ * @force: if it is true, replace WORKER_UNBOUND with WORKER_REBOUND
+ * irrespective of flags of workers. Otherwise, replace the flags only
+ * when workers have WORKER_UNBOUND flag.
*
* @pool->cpu is coming online. Rebind all workers to the CPU.
*/
-static void rebind_workers(struct worker_pool *pool)
+static void rebind_workers(struct worker_pool *pool, bool force)
{
struct worker *worker;
int wi;
@@ -4792,10 +4795,12 @@ static void rebind_workers(struct worker_pool *pool)
* fail incorrectly leading to premature concurrency
* management operations.
*/
- WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
- worker_flags |= WORKER_REBOUND;
- worker_flags &= ~WORKER_UNBOUND;
- ACCESS_ONCE(worker->flags) = worker_flags;
+ if (force || (worker_flags & WORKER_UNBOUND)) {
+ WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
+ worker_flags |= WORKER_REBOUND;
+ worker_flags &= ~WORKER_UNBOUND;
+ ACCESS_ONCE(worker->flags) = worker_flags;
+ }
}
spin_unlock_irq(&pool->lock);
@@ -4864,11 +4869,12 @@ static int workqueue_cpu_up_callback(struct notifier_block *nfb,
for_each_pool(pool, pi) {
mutex_lock(&pool->manager_mutex);
- if (pool->cpu == cpu) {
- rebind_workers(pool);
- } else if (pool->cpu < 0) {
+ if (pool->cpu == cpu)
+ rebind_workers(pool,
+ (action & ~CPU_TASKS_FROZEN)
+ != CPU_DOWN_FAILED);
+ else if (pool->cpu < 0)
restore_unbound_workers_cpumask(pool, cpu);
- }
mutex_unlock(&pool->manager_mutex);
}