aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2015-09-28 18:02:03 +0200
committerMoyster <oysterized@gmail.com>2019-05-02 18:30:05 +0200
commit559fd8158b1f63102feece0dcc05dacb357bd0f6 (patch)
treea7caf11f7c93043748042027c4a1f0e05de5156d
parent48db9eb1237317a087d36f5ecd6e7ac96ff23b81 (diff)
sched/core: Rework TASK_DEAD preemption exception
TASK_DEAD is special in that the final schedule call from do_exit() must be done with preemption disabled. This means we end up scheduling with a preempt_count() higher than usual (3 instead of the 'expected' 2). Since future patches will want to rely on an invariant preempt_count() value during schedule, fix this up. Change-Id: Icd3a40be4171553bb2d71fd7a82b6fac55218a12 Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com> Reviewed-by: Steven Rostedt <rostedt@goodmis.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/sched/core.c18
1 files changed, 12 insertions, 6 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 65c3ff587..83259a8ad 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3250,12 +3250,7 @@ static noinline void __schedule_bug(struct task_struct *prev)
*/
static inline void schedule_debug(struct task_struct *prev)
{
- /*
- * Test if we are atomic. Since do_exit() needs to call into
- * schedule() atomically, we ignore that path for now.
- * Otherwise, whine if we are scheduling when we should not be.
- */
- if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
+ if (unlikely(in_atomic_preempt_off()))
__schedule_bug(prev);
rcu_sleep_check();
@@ -3352,6 +3347,17 @@ need_resched:
rcu_note_context_switch(cpu);
prev = rq->curr;
+ /*
+ * do_exit() calls schedule() with preemption disabled as an exception;
+ * however we must fix that up, otherwise the next task will see an
+ * inconsistent (higher) preempt count.
+ *
+ * It also avoids the below schedule_debug() test from complaining
+ * about this.
+ */
+ if (unlikely(prev->state == TASK_DEAD))
+ preempt_enable_no_resched_notrace();
+
schedule_debug(prev);
if (sched_feat(HRTICK))