diff options
| author | Vikram Mulukutla <markivx@codeaurora.org> | 2014-10-13 21:29:33 -0700 |
|---|---|---|
| committer | Mister Oyster <oysterized@gmail.com> | 2017-04-13 12:32:11 +0200 |
| commit | 662287558f37ed46e0612a34d5e58b89f6af3a48 (patch) | |
| tree | caa1c648885d6e50b12eff95be9969664d51005f /kernel | |
| parent | 248d7100ffa9197cd7de996a31cb2edc637ca05f (diff) | |
idle: Implement a per-cpu idle-polling mode
cpu_idle_poll_ctrl provides a way of switching the
idle thread to use cpu_idle_poll instead of the arch
specific lower power mode callbacks (arch_cpu_idle).
cpu_idle_poll spins on a flag in a tight loop with
interrupts enabled.
In some cases it may be useful to enter the tight loop
polling mode only on a particular CPU. This allows
other CPUs to continue using the arch specific low
power mode callbacks. Provide an API that allows this.
Change-Id: I7c47c3590eb63345996a1c780faa79dbd1d9fdb4
Signed-off-by: Vikram Mulukutla <markivx@codeaurora.org>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/cpu/idle.c | 27 |
1 files changed, 25 insertions, 2 deletions
diff --git a/kernel/cpu/idle.c b/kernel/cpu/idle.c index 2e45a5852..c378dc659 100644 --- a/kernel/cpu/idle.c +++ b/kernel/cpu/idle.c @@ -9,6 +9,7 @@ #include <mtlbprof/mtlbprof.h> #endif #include <linux/stackprotector.h> +#include <linux/percpu.h> #include <asm/tlb.h> @@ -29,6 +30,24 @@ void cpu_idle_poll_ctrl(bool enable) mb(); } +static DEFINE_PER_CPU(int, idle_force_poll); + +void per_cpu_idle_poll_ctrl(int cpu, bool enable) +{ + if (enable) { + per_cpu(idle_force_poll, cpu)++; + } else { + per_cpu(idle_force_poll, cpu)--; + WARN_ON_ONCE(per_cpu(idle_force_poll, cpu) < 0); + } + + /* + * Make sure poll mode is entered on the relevant CPU after the flag is + * set + */ + mb(); +} + #ifdef CONFIG_GENERIC_IDLE_POLL_SETUP static int __init cpu_idle_poll_setup(char *__unused) { @@ -50,7 +69,8 @@ static inline int cpu_idle_poll(void) rcu_idle_enter(); trace_cpu_idle_rcuidle(0, smp_processor_id()); local_irq_enable(); - while (!tif_need_resched() && cpu_idle_force_poll) + while (!tif_need_resched() && (cpu_idle_force_poll || + __get_cpu_var(idle_force_poll))) cpu_relax(); trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); rcu_idle_exit(); @@ -103,7 +123,9 @@ static void cpu_idle_loop(void) * know that the IPI is going to arrive right * away */ - if (cpu_idle_force_poll || tick_check_broadcast_expired()) { + if (cpu_idle_force_poll || + tick_check_broadcast_expired() || + __get_cpu_var(idle_force_poll)) { cpu_idle_poll(); } else { if (!current_clr_polling_and_test()) { @@ -147,5 +169,6 @@ void cpu_startup_entry(enum cpuhp_state state) #endif __current_set_polling(); arch_cpu_idle_prepare(); + per_cpu(idle_force_poll, smp_processor_id()) = 0; cpu_idle_loop(); } |
