diff options
| author | Meizu OpenSource <patchwork@meizu.com> | 2016-08-15 10:19:42 +0800 |
|---|---|---|
| committer | Meizu OpenSource <patchwork@meizu.com> | 2016-08-15 10:19:42 +0800 |
| commit | d2e1446d81725c351dc73a03b397ce043fb18452 (patch) | |
| tree | 4dbc616b7f92aea39cd697a9084205ddb805e344 /kernel/time/sched_clock.c | |
| download | android_kernel_m2note-d2e1446d81725c351dc73a03b397ce043fb18452.tar.gz | |
first commit
Diffstat (limited to 'kernel/time/sched_clock.c')
| -rw-r--r-- | kernel/time/sched_clock.c | 241 |
1 files changed, 241 insertions, 0 deletions
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c new file mode 100644 index 000000000..6c46d6c0f --- /dev/null +++ b/kernel/time/sched_clock.c @@ -0,0 +1,241 @@ +/* + * sched_clock.c: support for extending counters to full 64-bit ns counter + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/clocksource.h> +#include <linux/init.h> +#include <linux/jiffies.h> +#include <linux/ktime.h> +#include <linux/kernel.h> +#include <linux/moduleparam.h> +#include <linux/sched.h> +#include <linux/syscore_ops.h> +#include <linux/hrtimer.h> +#include <linux/sched_clock.h> +#include <linux/seqlock.h> +#include <linux/bitops.h> + +struct clock_data { + ktime_t wrap_kt; + u64 epoch_ns; + u64 epoch_cyc; + u64 epoch_cyc_copy; /*add this to protect read & write share data (cd.epoch_ns/cd.epoch_cyc) sync (same as arch/arm/sched_clock.c)*/ + seqcount_t seq; + unsigned long rate; + u32 mult; + u32 shift; + bool suspended; +}; + +static struct hrtimer sched_clock_timer; +static int irqtime = -1; + +core_param(irqtime, irqtime, int, 0400); + +static struct clock_data cd = { + .mult = NSEC_PER_SEC / HZ, +}; + +static u64 __read_mostly sched_clock_mask; + +static u64 notrace jiffy_sched_clock_read(void) +{ + /* + * We don't need to use get_jiffies_64 on 32-bit arches here + * because we register with BITS_PER_LONG + */ + return (u64)(jiffies - INITIAL_JIFFIES); +} + +static u64 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read; + +static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift) +{ + return (cyc * mult) >> shift; +} + +unsigned long long notrace sched_clock(void) +{ + u64 epoch_ns; + u64 epoch_cyc; + u64 cyc; + unsigned long seq; + + if (cd.suspended) + return cd.epoch_ns; + /* + * used cd.epoch_cyc_copy instead of read/write seqcount + * because of read/write seqcount cannot sync the read/write flow + * race condition will happen for epoch_cyc/epoch_ns + */ + do { + //seq = raw_read_seqcount_begin(&cd.seq); + //seq = read_seqcount_begin(&cd.seq); + epoch_cyc = cd.epoch_cyc; + smp_rmb(); + epoch_ns = cd.epoch_ns; + smp_rmb(); + } while (epoch_cyc != cd.epoch_cyc_copy); + + cyc = read_sched_clock(); + cyc = (cyc - epoch_cyc) & sched_clock_mask; + return epoch_ns + cyc_to_ns(cyc, cd.mult, cd.shift); +} + +/* + * Atomically update the sched_clock epoch. + */ +static void notrace update_sched_clock(void) +{ + unsigned long flags; + u64 cyc; + u64 ns; + + cyc = read_sched_clock(); + ns = cd.epoch_ns + + cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask, + cd.mult, cd.shift); + + raw_local_irq_save(flags); + /* + * same with sched_clock() comment + */ + + //raw_write_seqcount_begin(&cd.seq); + //write_seqcount_begin(&cd.seq); + cd.epoch_cyc_copy = cyc; + smp_wmb(); + cd.epoch_ns = ns; + smp_wmb(); + cd.epoch_cyc = cyc; + //raw_write_seqcount_end(&cd.seq); + //write_seqcount_end(&cd.seq); + raw_local_irq_restore(flags); +} + +static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt) +{ + update_sched_clock(); + hrtimer_forward_now(hrt, cd.wrap_kt); + return HRTIMER_RESTART; +} + +void __init sched_clock_register(u64 (*read)(void), int bits, + unsigned long rate) +{ + u64 res, wrap, new_mask, new_epoch, cyc, ns; + u32 new_mult, new_shift; + ktime_t new_wrap_kt; + unsigned long r; + char r_unit; + + if (cd.rate > rate) + return; + + WARN_ON(!irqs_disabled()); + + /* calculate the mult/shift to convert counter ticks to ns. */ + clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 0); + + //new_mask = CLOCKSOURCE_MASK(bits); + /* calculate how many ns until we wrap */ + //wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask); + + //under kernel 3.10 + new_mask = (1ULL<<(bits))-1; + wrap = cyc_to_ns((1ULL << bits) - 1, new_mult, new_shift); + + new_wrap_kt = ns_to_ktime(wrap - (wrap >> 3)); + + /* update epoch for new counter and update epoch_ns from old counter*/ + new_epoch = read(); + cyc = read_sched_clock(); + ns = cd.epoch_ns + cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask, + cd.mult, cd.shift); + + //raw_write_seqcount_begin(&cd.seq); + write_seqcount_begin(&cd.seq); + read_sched_clock = read; + sched_clock_mask = new_mask; + cd.rate = rate; + cd.wrap_kt = new_wrap_kt; + cd.mult = new_mult; + cd.shift = new_shift; + cd.epoch_cyc = new_epoch; + cd.epoch_ns = ns; + //raw_write_seqcount_end(&cd.seq); + write_seqcount_end(&cd.seq); + + r = rate; + if (r >= 4000000) { + r /= 1000000; + r_unit = 'M'; + } else if (r >= 1000) { + r /= 1000; + r_unit = 'k'; + } else + r_unit = ' '; + + /* calculate the ns resolution of this counter */ + res = cyc_to_ns(1ULL, new_mult, new_shift); + + pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n", + bits, r, r_unit, res, wrap); + + /* Enable IRQ time accounting if we have a fast enough sched_clock */ + if (irqtime > 0 || (irqtime == -1 && rate >= 1000000)) + enable_sched_clock_irqtime(); + + pr_debug("Registered %pF as sched_clock source\n", read); +} + +void __init sched_clock_postinit(void) +{ + /* + * If no sched_clock function has been provided at that point, + * make it the final one one. + */ + if (read_sched_clock == jiffy_sched_clock_read) + sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ); + + update_sched_clock(); + + /* + * Start the timer to keep sched_clock() properly updated and + * sets the initial epoch. + */ + hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + sched_clock_timer.function = sched_clock_poll; + hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL); +} + +static int sched_clock_suspend(void) +{ + update_sched_clock(); + hrtimer_cancel(&sched_clock_timer); + cd.suspended = true; + return 0; +} + +static void sched_clock_resume(void) +{ + cd.epoch_cyc = read_sched_clock(); + cd.epoch_cyc_copy = cd.epoch_cyc; + hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL); + cd.suspended = false; +} + +static struct syscore_ops sched_clock_ops = { + .suspend = sched_clock_suspend, + .resume = sched_clock_resume, +}; + +static int __init sched_clock_syscore_init(void) +{ + register_syscore_ops(&sched_clock_ops); + return 0; +} +device_initcall(sched_clock_syscore_init); |
