diff options
| author | Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> | 2016-07-11 16:24:34 +0100 |
|---|---|---|
| committer | Moyster <oysterized@gmail.com> | 2016-11-07 13:46:49 +0100 |
| commit | 2887bc5181e3ec626e0e2ebebeab40b0e88285e3 (patch) | |
| tree | b3bebbcc65d146bdd148a399f10be2d913d07802 /arch/arm64/kernel | |
| parent | 7db3ae6a43aac5e1fb2feeee53ddc1123cec50ec (diff) | |
arm64: kernel: perf: add cpu hotplug notifier
When a CPU is taken offline, its PMU registers content is lost
and needs to be reset on power up, since for most of the PMU registers
content is UNKNOWN upon CPU reset. This patch implements a cpu hotplug
notifier and hooks the reset call in the respective notifier callback
function.
Cc: Will Deacon <will.deacon at arm.com>
Cc: Mark Rutland <mark.rutland at arm.com>
Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi at arm.com>
Change-Id: I779158c4edb4e88f15d9bfebf6cf8af208a64259
Ticket: PORRIDGE-450
Diffstat (limited to 'arch/arm64/kernel')
| -rw-r--r-- | arch/arm64/kernel/perf_event.c | 135 |
1 files changed, 134 insertions, 1 deletions
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index cea1594ff..90d813c29 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -21,6 +21,7 @@ #define pr_fmt(fmt) "hw perfevents: " fmt #include <linux/bitmap.h> +#include <linux/cpu_pm.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/export.h> @@ -1225,6 +1226,117 @@ cpu_pmu_reset(void) arch_initcall(cpu_pmu_reset); /* + * PMU hardware loses all context when a CPU goes offline. + * When a CPU is hotplugged back in, since some hardware registers are + * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading + * junk values out of them. + */ +static int __cpuinit cpu_pmu_notify(struct notifier_block *b, + unsigned long action, void *hcpu) +{ + int cpu = (unsigned long)hcpu; + struct arm_pmu *pmu = container_of(b, struct arm_pmu, hotplug_nb); + if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING) + return NOTIFY_DONE; + if (!cpumask_test_cpu(cpu, cpu_online_mask)) + return NOTIFY_DONE; + + if (pmu->reset) + cpu_pmu->reset(pmu); + else + return NOTIFY_DONE; + return NOTIFY_OK; +} + +#ifdef CONFIG_CPU_PM +static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd) +{ + struct pmu_hw_events *hw_events = cpu_pmu->get_hw_events(); + struct perf_event *event; + int idx; + + for (idx = 0; idx < armpmu->num_events; idx++) { + /* + * If the counter is not used skip it, there is no + * need of stopping/restarting it. + */ + if (!test_bit(idx, hw_events->used_mask)) + continue; + + event = hw_events->events[idx]; + + switch (cmd) { + case CPU_PM_ENTER: + /* + * Stop and update the counter + */ + armpmu_stop(event, PERF_EF_UPDATE); + break; + case CPU_PM_EXIT: + case CPU_PM_ENTER_FAILED: + /* Restore and enable the counter */ + armpmu_start(event, PERF_EF_RELOAD); + break; + default: + break; + } + } +} + +static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd, + void *v) +{ + struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb); + struct pmu_hw_events *hw_events = cpu_pmu->get_hw_events(); + int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); + + if (!cpumask_test_cpu(smp_processor_id(), cpu_online_mask)) + return NOTIFY_DONE; + + /* + * Always reset the PMU registers on power-up even if + * there are no events running. + */ + if (cmd == CPU_PM_EXIT && armpmu->reset) + armpmu->reset(armpmu); + + if (!enabled) + return NOTIFY_OK; + + switch (cmd) { + case CPU_PM_ENTER: + armpmu->stop(); + cpu_pm_pmu_setup(armpmu, cmd); + break; + case CPU_PM_EXIT: + cpu_pm_pmu_setup(armpmu, cmd); + case CPU_PM_ENTER_FAILED: + armpmu->start(); + break; + default: + return NOTIFY_DONE; + } + + return NOTIFY_OK; +} + + +static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) +{ + cpu_pmu->cpu_pm_nb.notifier_call = cpu_pm_pmu_notify; + return cpu_pm_register_notifier(&cpu_pmu->cpu_pm_nb); +} + +static void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) +{ + cpu_pm_unregister_notifier(&cpu_pmu->cpu_pm_nb); +} +#else +static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; } +static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { } +#endif + +/* * PMU platform driver and devicetree bindings. */ static struct of_device_id armpmu_of_device_ids[] = { @@ -1251,7 +1363,27 @@ static struct platform_driver armpmu_driver = { static int __init register_pmu_driver(void) { - return platform_driver_register(&armpmu_driver); + int err; + + cpu_pmu->hotplug_nb.notifier_call = cpu_pmu_notify; + err = register_cpu_notifier(&cpu_pmu->hotplug_nb); + if (err) + return err; + + err = cpu_pm_pmu_register(cpu_pmu); + if (err) + goto err_cpu_pm; + + err = platform_driver_register(&armpmu_driver); + if (err) + goto err_driver; + return 0; + +err_driver: + cpu_pm_pmu_unregister(cpu_pmu); +err_cpu_pm: + unregister_cpu_notifier(&cpu_pmu->hotplug_nb); + return err; } device_initcall(register_pmu_driver); @@ -1260,6 +1392,7 @@ static struct pmu_hw_events *armpmu_get_cpu_events(void) return &__get_cpu_var(cpu_hw_events); } + static void __init cpu_pmu_init(struct arm_pmu *armpmu) { int cpu; |
