aboutsummaryrefslogtreecommitdiff
path: root/kernel/cpu
diff options
context:
space:
mode:
authorNicolas Pitre <nicolas.pitre@linaro.org>2014-01-26 23:42:01 -0500
committerMoyster <oysterized@gmail.com>2017-12-05 18:06:10 +0100
commit5084d13c2a2919bbcdafb5868241673ce83459fc (patch)
tree005771e8b155e83521035fb6888f4fbbc059e61d /kernel/cpu
parentb92b53b5a51b7402c36942cbfbde58701d87cdb7 (diff)
sched/idle: Move cpu/idle.c to sched/idle.c
Integration of cpuidle with the scheduler requires that the idle loop be closely integrated with the scheduler proper. Moving cpu/idle.c into the sched directory will allow for a smoother integration, and eliminate a subdirectory which contained only one source file. Signed-off-by: Nicolas Pitre <nico@linaro.org> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/alpine.LFD.2.11.1401301102210.1652@knanqh.ubzr Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/cpu')
-rw-r--r--kernel/cpu/Makefile1
-rw-r--r--kernel/cpu/idle.c174
2 files changed, 0 insertions, 175 deletions
diff --git a/kernel/cpu/Makefile b/kernel/cpu/Makefile
deleted file mode 100644
index 59ab052ef..000000000
--- a/kernel/cpu/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-y = idle.o
diff --git a/kernel/cpu/idle.c b/kernel/cpu/idle.c
deleted file mode 100644
index c378dc659..000000000
--- a/kernel/cpu/idle.c
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Generic entry point for the idle threads
- */
-#include <linux/sched.h>
-#include <linux/cpu.h>
-#include <linux/tick.h>
-#include <linux/mm.h>
-#ifdef CONFIG_MT_LOAD_BALANCE_PROFILER
-#include <mtlbprof/mtlbprof.h>
-#endif
-#include <linux/stackprotector.h>
-#include <linux/percpu.h>
-
-#include <asm/tlb.h>
-
-#include <trace/events/power.h>
-
-static int __read_mostly cpu_idle_force_poll;
-
-void cpu_idle_poll_ctrl(bool enable)
-{
- if (enable) {
- cpu_idle_force_poll++;
- } else {
- cpu_idle_force_poll--;
- WARN_ON_ONCE(cpu_idle_force_poll < 0);
- }
-
- /* Make sure poll mode is entered on all CPUs after the flag is set */
- mb();
-}
-
-static DEFINE_PER_CPU(int, idle_force_poll);
-
-void per_cpu_idle_poll_ctrl(int cpu, bool enable)
-{
- if (enable) {
- per_cpu(idle_force_poll, cpu)++;
- } else {
- per_cpu(idle_force_poll, cpu)--;
- WARN_ON_ONCE(per_cpu(idle_force_poll, cpu) < 0);
- }
-
- /*
- * Make sure poll mode is entered on the relevant CPU after the flag is
- * set
- */
- mb();
-}
-
-#ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
-static int __init cpu_idle_poll_setup(char *__unused)
-{
- cpu_idle_force_poll = 1;
- return 1;
-}
-__setup("nohlt", cpu_idle_poll_setup);
-
-static int __init cpu_idle_nopoll_setup(char *__unused)
-{
- cpu_idle_force_poll = 0;
- return 1;
-}
-__setup("hlt", cpu_idle_nopoll_setup);
-#endif
-
-static inline int cpu_idle_poll(void)
-{
- rcu_idle_enter();
- trace_cpu_idle_rcuidle(0, smp_processor_id());
- local_irq_enable();
- while (!tif_need_resched() && (cpu_idle_force_poll ||
- __get_cpu_var(idle_force_poll)))
- cpu_relax();
- trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
- rcu_idle_exit();
- return 1;
-}
-
-/* Weak implementations for optional arch specific functions */
-void __weak arch_cpu_idle_prepare(void) { }
-void __weak arch_cpu_idle_enter(void) { }
-void __weak arch_cpu_idle_exit(void) { }
-void __weak arch_cpu_idle_dead(void) { }
-void __weak arch_cpu_idle(void)
-{
- cpu_idle_force_poll = 1;
- local_irq_enable();
-}
-
-/*
- * Generic idle loop implementation
- */
-static void cpu_idle_loop(void)
-{
-#ifdef CONFIG_MT_LOAD_BALANCE_PROFILER
- mt_lbprof_update_state(smp_processor_id(), MT_LBPROF_NO_TASK_STATE);
-#endif
-
- while (1) {
- tick_nohz_idle_enter();
-
- while (!need_resched()) {
- check_pgt_cache();
- rmb();
-
- if (cpu_is_offline(smp_processor_id()))
- arch_cpu_idle_dead();
-
-#ifdef CONFIG_MT_LOAD_BALANCE_PROFILER
- mt_lbprof_update_state(smp_processor_id(), MT_LBPROF_IDLE_STATE);
-#endif
-
- local_irq_disable();
- arch_cpu_idle_enter();
-
- /*
- * In poll mode we reenable interrupts and spin.
- *
- * Also if we detected in the wakeup from idle
- * path that the tick broadcast device expired
- * for us, we don't want to go deep idle as we
- * know that the IPI is going to arrive right
- * away
- */
- if (cpu_idle_force_poll ||
- tick_check_broadcast_expired() ||
- __get_cpu_var(idle_force_poll)) {
- cpu_idle_poll();
- } else {
- if (!current_clr_polling_and_test()) {
- stop_critical_timings();
- rcu_idle_enter();
- arch_cpu_idle();
- WARN_ON_ONCE(irqs_disabled());
- rcu_idle_exit();
- start_critical_timings();
-#ifdef CONFIG_MT_LOAD_BALANCE_PROFILER
- mt_lbprof_update_state(smp_processor_id(), MT_LBPROF_NO_TASK_STATE);
-#endif
- } else {
- local_irq_enable();
- }
- __current_set_polling();
- }
- arch_cpu_idle_exit();
- }
- tick_nohz_idle_exit();
- schedule_preempt_disabled();
- }
-}
-
-void cpu_startup_entry(enum cpuhp_state state)
-{
- /*
- * This #ifdef needs to die, but it's too late in the cycle to
- * make this generic (arm and sh have never invoked the canary
- * init for the non boot cpus!). Will be fixed in 3.11
- */
-#ifdef CONFIG_X86
- /*
- * If we're the non-boot CPU, nothing set the stack canary up
- * for us. The boot CPU already has it initialized but no harm
- * in doing it again. This is a good place for updating it, as
- * we wont ever return from this function (so the invalid
- * canaries already on the stack wont ever trigger).
- */
- boot_init_stack_canary();
-#endif
- __current_set_polling();
- arch_cpu_idle_prepare();
- per_cpu(idle_force_poll, smp_processor_id()) = 0;
- cpu_idle_loop();
-}