aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorSteve Muckle <smuckle@codeaurora.org>2013-05-23 15:24:57 -0700
committerMoyster <oysterized@gmail.com>2016-09-28 15:15:21 +0200
commit9648bb0846a073b25d8341a6e12b0ba9193d8973 (patch)
treeebd6f3d6f8378e1c8ddfff86aef9512b9cc6a1ee /kernel
parent4c47f397c0636a7bc8202ce6fa274d37e4de2eb1 (diff)
sched: remove migration notification from RT class
Commit 88a7e37d265 (sched: provide per cpu-cgroup option to notify on migrations) added a notifier call when a task is moved to a different CPU. Unfortunately the two call sites in the RT sched class where this occurs happens with a runqueue lock held. This can result in a deadlock if the notifier call attempts to do something like wake up a task. Fortunately the benefit of 88a7e37d265 comes mainly from notifying on migration of non-RT tasks, so we can simply ignore the movements of RT tasks. CRs-Fixed: 491370 Change-Id: I8849d826bf1eeaf85a6f6ad872acb475247c5926 Signed-off-by: Steve Muckle <smuckle@codeaurora.org> Signed-off-by: W4TCH0UT <ateekujjawal@gmail.com> Conflicts: kernel/sched/rt.c
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/rt.c20
1 files changed, 1 insertions, 19 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index c1b6096fc..692fb1e77 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -2401,7 +2401,6 @@ static int push_rt_task(struct rq *rq)
struct task_struct *next_task;
struct rq *lowest_rq;
int ret = 0;
- bool moved = false;
if (!rq->rt.overloaded)
return 0;
@@ -2466,7 +2465,6 @@ retry:
deactivate_task(rq, next_task, 0);
set_task_cpu(next_task, lowest_rq->cpu);
- moved = true;
activate_task(lowest_rq, next_task, 0);
ret = 1;
@@ -2477,11 +2475,6 @@ retry:
out:
put_task_struct(next_task);
- if (moved && task_notify_on_migrate(next_task))
- atomic_notifier_call_chain(&migration_notifier_head,
- cpu_of(lowest_rq),
- (void *)cpu_of(rq));
-
return ret;
}
@@ -2621,11 +2614,9 @@ static int pull_rt_task(struct rq *this_rq)
int ret = 0;
#ifndef CONFIG_MT_RT_SCHED
int cpu;
- struct task_struct *p = NULL;
+ struct task_struct *p;
struct rq *src_rq;
#endif
- bool moved = false;
- int src_cpu = 0;
mt_sched_printf(sched_rt_info, "0. pull_rt_task %d %d ",
rt_overloaded(this_rq), this_rq->cpu);
@@ -2698,10 +2689,6 @@ static int pull_rt_task(struct rq *this_rq)
deactivate_task(src_rq, p, 0);
set_task_cpu(p, this_cpu);
activate_task(this_rq, p, 0);
-
- moved = true;
- src_cpu = cpu_of(src_rq);
-
/*
* We continue with the search, just in
* case there's an even higher prio task
@@ -2714,11 +2701,6 @@ skip:
}
#endif
- if (moved && task_notify_on_migrate(p))
- atomic_notifier_call_chain(&migration_notifier_head,
- this_cpu,
- (void *)src_cpu);
-
return ret;
}