aboutsummaryrefslogtreecommitdiff
path: root/drivers/misc/mediatek/kernel
diff options
context:
space:
mode:
authorMeizu OpenSource <patchwork@meizu.com>2016-08-15 10:19:42 +0800
committerMeizu OpenSource <patchwork@meizu.com>2016-08-15 10:19:42 +0800
commitd2e1446d81725c351dc73a03b397ce043fb18452 (patch)
tree4dbc616b7f92aea39cd697a9084205ddb805e344 /drivers/misc/mediatek/kernel
first commit
Diffstat (limited to 'drivers/misc/mediatek/kernel')
-rwxr-xr-xdrivers/misc/mediatek/kernel/Makefile15
-rw-r--r--drivers/misc/mediatek/kernel/fiq_smp_call.c200
-rw-r--r--drivers/misc/mediatek/kernel/kdb_enhance.c31
-rw-r--r--drivers/misc/mediatek/kernel/mt_cache_v7.S1018
-rwxr-xr-xdrivers/misc/mediatek/kernel/mt_cache_v8.S259
-rw-r--r--drivers/misc/mediatek/kernel/mtk_memcfg.c544
-rw-r--r--drivers/misc/mediatek/kernel/mtk_meminfo.c110
-rw-r--r--drivers/misc/mediatek/kernel/mtk_trace.c221
-rw-r--r--drivers/misc/mediatek/kernel/sched/Makefile13
-rw-r--r--drivers/misc/mediatek/kernel/sched/cputopo.c130
-rw-r--r--drivers/misc/mediatek/kernel/sched/prio_tracer.c518
-rw-r--r--drivers/misc/mediatek/kernel/sched/rq_stats.c712
-rw-r--r--drivers/misc/mediatek/kernel/sched/sched_avg.c135
-rw-r--r--drivers/misc/mediatek/kernel/sec_osal.c365
-rw-r--r--drivers/misc/mediatek/kernel/system.c48
15 files changed, 4319 insertions, 0 deletions
diff --git a/drivers/misc/mediatek/kernel/Makefile b/drivers/misc/mediatek/kernel/Makefile
new file mode 100755
index 000000000..0c4f28dea
--- /dev/null
+++ b/drivers/misc/mediatek/kernel/Makefile
@@ -0,0 +1,15 @@
+include $(srctree)/drivers/misc/mediatek/Makefile.custom
+
+ccflags-y += -I$(srctree)
+obj-$(CONFIG_KGDB_KDB) += kdb_enhance.o
+obj-$(CONFIG_MT65XX_TRACER) += trace_mt65xx_mon.o
+obj-y += sched/
+obj-$(CONFIG_MTK_MEMCFG) += mtk_memcfg.o
+obj-$(CONFIG_TRACING) += mtk_trace.o
+obj-y += mtk_meminfo.o
+obj-$(CONFIG_MTK_MEMCFG) += mtk_memcfg.o
+obj-y += system.o
+obj-$(CONFIG_ARM) += mt_cache_v7.o
+obj-$(CONFIG_ARM64) += mt_cache_v8.o
+obj-y += fiq_smp_call.o
+obj-y += sec_osal.o
diff --git a/drivers/misc/mediatek/kernel/fiq_smp_call.c b/drivers/misc/mediatek/kernel/fiq_smp_call.c
new file mode 100644
index 000000000..880055f26
--- /dev/null
+++ b/drivers/misc/mediatek/kernel/fiq_smp_call.c
@@ -0,0 +1,200 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/cpu.h>
+#include <linux/percpu.h>
+#include <linux/smp.h>
+#include <mach/smp.h>
+#include <mach/irqs.h>
+#include <mach/fiq_smp_call.h>
+
+#if defined(CONFIG_FIQ_GLUE)
+
+#if defined(CONFIG_TRUSTONIC_TEE_SUPPORT)
+#include <mach/mt_secure_api.h>
+#endif
+
+enum {
+ CSD_FLAG_LOCK = 0x01,
+};
+
+struct call_function_data {
+ struct call_single_data csd;
+ fiq_smp_call_func_t func;
+ atomic_t refs;
+ cpumask_var_t cpumask;
+};
+
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, fiq_cfd_data);
+static struct call_function_data *current_cfd_data = NULL;
+
+extern void irq_raise_softirq(const struct cpumask *mask, unsigned int irq);
+
+static int __csd_lock_wait(struct call_single_data *data)
+{
+ int cpu, nr_online_cpus = 0;
+
+ while (data->flags & CSD_FLAG_LOCK) {
+ for_each_cpu(cpu, data->cpumask) {
+ if (cpu_online(cpu)) {
+ nr_online_cpus++;
+ }
+ }
+ if (!nr_online_cpus)
+ return -ENXIO;
+ cpu_relax();
+ }
+
+ return 0;
+}
+
+static void __csd_lock(struct call_single_data *data)
+{
+ __csd_lock_wait(data);
+ data->flags = CSD_FLAG_LOCK;
+
+ /*
+ * prevent CPU from reordering the above assignment
+ * to ->flags with any subsequent assignments to other
+ * fields of the specified call_single_data structure:
+ */
+ smp_mb();
+}
+
+static void __csd_unlock(struct call_single_data *data)
+{
+ WARN_ON(!(data->flags & CSD_FLAG_LOCK));
+
+ /*
+ * ensure we're all done before releasing data:
+ */
+ smp_mb();
+
+ data->flags &= ~CSD_FLAG_LOCK;
+}
+
+#if defined(CONFIG_TRUSTONIC_TEE_SUPPORT)
+static void fiq_security_fastcall(const struct cpumask *mask)
+{
+ unsigned long map = *cpus_addr(*mask);
+ mt_secure_call(MC_FC_MTK_AEEDUMP, map, 0, 0);
+}
+#endif
+
+/*
+ * fiq_smp_call_function: FIQ version of smp_call_function.
+ * @func:
+ * @info:
+ * @wait:
+ * Return 0 for success and error code for failure.
+ *
+ * This function is designed for the debugger only.
+ * Other kernel code or drivers should NOT use this function.
+ * This function can only be used in the FIQ-WDT handler.
+ */
+int fiq_smp_call_function(fiq_smp_call_func_t func, void *info, int wait)
+{
+ struct cpumask *mask = (struct cpumask *)cpu_online_mask;
+ struct call_function_data *data;
+ int refs, install_csd, this_cpu = 0;
+
+ this_cpu = get_HW_cpuid();
+ data = &__get_cpu_var(fiq_cfd_data);
+ __csd_lock(&data->csd);
+
+ atomic_set(&data->refs, 0);
+
+ data->func = func;
+ data->csd.info = info;
+
+ smp_wmb();
+
+ cpumask_and(data->cpumask, mask, cpu_online_mask);
+ cpumask_clear_cpu(this_cpu, data->cpumask);
+ refs = cpumask_weight(data->cpumask);
+ cpumask_and(data->csd.cpumask, data->cpumask, data->cpumask);
+
+ if (unlikely(!refs)) {
+ __csd_unlock(&data->csd);
+ goto fiq_smp_call_function_exit;
+ }
+
+ /* poll to install data on current_cfd_data */
+ install_csd = 0;
+ do {
+#if 0 /* no need to protect due to FIQ-WDT */
+ spin_lock(&fiq_smp_call_lock);
+#endif
+
+ if (!current_cfd_data) {
+ atomic_set(&data->refs, refs);
+ current_cfd_data = data;
+ install_csd = 1;
+ }
+#if 0
+ spin_unlock(&fiq_smp_call_lock);
+#endif
+ } while (!install_csd);
+
+ smp_mb();
+
+ /* send a message to all CPUs in the map */
+#if !defined(CONFIG_TRUSTONIC_TEE_SUPPORT)
+ irq_raise_softirq(data->cpumask, FIQ_SMP_CALL_SGI);
+#else
+ fiq_security_fastcall(data->cpumask);
+#endif
+
+ if (wait)
+ __csd_lock_wait(&data->csd);
+
+ fiq_smp_call_function_exit:
+ return 0;
+}
+
+static void fiq_smp_call_handler(void *arg, void *regs, void *svc_sp)
+{
+ struct call_function_data *data;
+ int cpu = 0, refs;
+ fiq_smp_call_func_t func;
+
+ /* get the current cpu id */
+ asm volatile ("MRC p15, 0, %0, c0, c0, 5\n" "AND %0, %0, #0xf\n":"+r" (cpu)
+ : : "cc");
+
+ data = current_cfd_data;
+ if (data) {
+ func = data->func;
+ func(data->csd.info, regs, svc_sp);
+
+ cpumask_clear_cpu(cpu, data->csd.cpumask);
+ refs = atomic_dec_return(&data->refs);
+
+ if (refs == 0) {
+ __csd_unlock(&data->csd);
+ current_cfd_data = NULL;
+ }
+ }
+}
+
+static void __fiq_smp_call_init(void *info)
+{
+ int err;
+
+ err = request_fiq(FIQ_SMP_CALL_SGI, fiq_smp_call_handler, 0, NULL);
+ if (err) {
+ pr_err("fail to request FIQ for FIQ_SMP_CALL_SGI\n");
+ } else {
+ pr_debug("Request FIQ for FIQ_SMP_CALL_SGI\n");
+ }
+}
+
+static int __init fiq_smp_call_init(void)
+{
+ __fiq_smp_call_init(NULL);
+ smp_call_function(__fiq_smp_call_init, NULL, 1);
+
+ return 0;
+}
+arch_initcall(fiq_smp_call_init);
+
+#endif /* CONFIG_FIQ_GLUE */
diff --git a/drivers/misc/mediatek/kernel/kdb_enhance.c b/drivers/misc/mediatek/kernel/kdb_enhance.c
new file mode 100644
index 000000000..c29d7fdf1
--- /dev/null
+++ b/drivers/misc/mediatek/kernel/kdb_enhance.c
@@ -0,0 +1,31 @@
+#include <linux/sched.h>
+#include <linux/utsname.h>
+#include <linux/kdb.h>
+
+#ifdef CONFIG_SCHED_DEBUG
+
+DEFINE_PER_CPU(int, kdb_in_use) = 0;
+
+extern int sysrq_sched_debug_show(void);
+
+/*
+ * Display sched_debug information
+ */
+static int kdb_sched_debug(int argc, const char **argv)
+{
+ sysrq_sched_debug_show();
+ return 0;
+}
+
+#endif
+
+static __init int kdb_enhance_register(void)
+{
+#ifdef CONFIG_SCHED_DEBUG
+ kdb_register_repeat("sched_debug", kdb_sched_debug, "",
+ "Display sched_debug information", 0, KDB_REPEAT_NONE);
+#endif
+ return 0;
+}
+
+__initcall(kdb_enhance_register);
diff --git a/drivers/misc/mediatek/kernel/mt_cache_v7.S b/drivers/misc/mediatek/kernel/mt_cache_v7.S
new file mode 100644
index 000000000..e3487003c
--- /dev/null
+++ b/drivers/misc/mediatek/kernel/mt_cache_v7.S
@@ -0,0 +1,1018 @@
+ .text
+ .global __inner_flush_dcache_all
+ .global __inner_flush_dcache_L1
+ .global __inner_flush_dcache_L2
+ .global __inner_clean_dcache_all
+ .global __inner_clean_dcache_L1
+ .global __inner_clean_dcache_L2
+ .global __inner_inv_dcache_all
+ .global __inner_inv_dcache_L1
+ .global __inner_inv_dcache_L2
+ .global __enable_dcache
+ .global __enable_icache
+ .global __enable_cache
+ .global __disable_dcache
+ .global __disable_icache
+ .global __disable_cache
+ .global __disable_dcache__inner_flush_dcache_L1
+ .global __disable_dcache__inner_flush_dcache_L1__inner_flush_dcache_L2
+ .global __disable_dcache__inner_flush_dcache_L1__inner_clean_dcache_L2
+ .global d_i_dis_flush_all
+.equ C1_IBIT , 0x00001000
+.equ C1_CBIT , 0x00000004
+.equ PSR_F_BIT, 0x00000040
+.equ PSR_I_BIT, 0x00000080
+
+__enable_icache:
+ MRC p15,0,r0,c1,c0,0
+ ORR r0,r0,#C1_IBIT
+ MCR p15,0,r0,c1,c0,0
+ BX lr
+__disable_icache:
+ MRC p15,0,r0,c1,c0,0
+ BIC r0,r0,#C1_IBIT
+ MCR p15,0,r0,c1,c0,0
+ BX lr
+__enable_dcache:
+ MRC p15,0,r0,c1,c0,0
+ ORR r0,r0,#C1_CBIT
+ dsb
+ MCR p15,0,r0,c1,c0,0
+ dsb
+ isb
+ BX lr
+__disable_dcache:
+ MRC p15,0,r0,c1,c0,0
+ BIC r0,r0,#C1_CBIT
+ dsb
+ MCR p15,0,r0,c1,c0,0
+ dsb
+ isb
+ /*
+Erratum:794322,An instruction fetch can be allocated into the L2 cache after the cache is disabled Status
+This erratum can be avoided by inserting both of the following after the SCTLR.C bit is cleared to 0, and before the caches are cleaned or invalidated:
+1) A TLBIMVA operation to any address.
+2) A DSB instruction.
+*/
+ MCR p15,0,r0,c8,c7,1
+ dsb
+ isb
+ BX lr
+__enable_cache:
+ MRC p15,0,r0,c1,c0,0
+ ORR r0,r0,#C1_IBIT
+ ORR r0,r0,#C1_CBIT
+ MCR p15,0,r0,c1,c0,0
+ BX lr
+__disable_cache:
+ MRC p15,0,r0,c1,c0,0
+ BIC r0,r0,#C1_IBIT
+ BIC r0,r0,#C1_CBIT
+ MCR p15,0,r0,c1,c0,0
+/*
+Erratum:794322,An instruction fetch can be allocated into the L2 cache after the cache is disabled Status
+This erratum can be avoided by inserting both of the following after the SCTLR.C bit is cleared to 0, and before the caches are cleaned or invalidated:
+1) A TLBIMVA operation to any address.
+2) A DSB instruction.
+*/
+ MCR p15,0,r0,c8,c7,1
+ dsb
+ BX lr
+
+
+__inner_flush_dcache_all:
+ push {r0,r1,r2,r3,r4,r5,r7,r8,r9,r10,r11,r14}
+ dmb @ ensure ordering with previous memory accesses
+ mrc p15, 1, r0, c0, c0, 1 @ read clidr
+ ands r3, r0, #0x7000000 @ extract loc from clidr
+ mov r3, r3, lsr #23 @ left align loc bit field
+ beq all_finished @ if loc is 0, then no need to clean
+ mov r10, #0 @ start clean at cache level 0
+all_loop1:
+ add r2, r10, r10, lsr #1 @ work out 3x current cache level
+ mov r1, r0, lsr r2 @ extract cache type bits from clidr
+ and r1, r1, #7 @ mask of the bits for current cache only
+ cmp r1, #2 @ see what cache we have at this level
+ blt all_skip @ skip if no cache, or just i-cache
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ isb @ isb to sych the new cssr&csidr
+ mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
+ and r2, r1, #7 @ extract the length of the cache lines
+ add r2, r2, #4 @ add 4 (line length offset)
+ ldr r4, =0x3ff
+ ands r4, r4, r1, lsr #3 @ find maximum number on the way size
+ clz r5, r4 @ find bit position of way size increment
+ ldr r7, =0x7fff
+ ands r7, r7, r1, lsr #13 @ extract max number of the index size
+all_loop2:
+ mov r9, r4 @ create working copy of max way size
+all_loop3:
+ orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
+ orr r11, r11, r7, lsl r2 @ factor index number into r11
+#ifdef CONFIG_L1C_OPT
+#replace DCCISW by DCISW+DCCSW
+ cmp r10, #2
+ mrsne r1, cpsr @disable IRQ and save flag to make clean and invalidate atomic
+ orrne r8, r1, #PSR_I_BIT | PSR_F_BIT
+ msrne cpsr_c, r8
+ mcrne p15, 0, r11, c7, c10, 2 @ clean by set/way
+ mcrne p15, 0, r11, c7, c6, 2 @ invalidate by set/way
+ msrne cpsr_c, r1
+ mcreq p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
+#else
+ mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
+#endif
+ subs r9, r9, #1 @ decrement the way
+ bge all_loop3
+ subs r7, r7, #1 @ decrement the index
+ bge all_loop2
+all_skip:
+ add r10, r10, #2 @ increment cache number
+ cmp r3, r10
+ bgt all_loop1
+all_finished:
+ mov r10, #0 @ swith back to cache level 0
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ dsb
+ isb
+ pop {r0,r1,r2,r3,r4,r5,r7,r8,r9,r10,r11,r14}
+ bx lr
+
+__inner_flush_dcache_L1:
+ push {r0,r1,r2,r3,r4,r5,r7,r8,r9,r10,r11,r14}
+ dmb @ ensure ordering with previous memory accesses
+ mrc p15, 1, r0, c0, c0, 1 @ read clidr
+ ands r3, r0, #0x7000000 @ extract loc from clidr
+ mov r3, r3, lsr #23 @ left align loc bit field
+ beq L1_finished @ if loc is 0, then no need to clean
+ mov r10, #0 @ start clean at cache level 1
+L1_loop1:
+ add r2, r10, r10, lsr #1 @ work out 3x current cache level
+ mov r1, r0, lsr r2 @ extract cache type bits from clidr
+ and r1, r1, #7 @ mask of the bits for current cache only
+ cmp r1, #2 @ see what cache we have at this level
+ blt L1_skip @ skip if no cache, or just i-cache
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ isb @ isb to sych the new cssr&csidr
+ mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
+ and r2, r1, #7 @ extract the length of the cache lines
+ add r2, r2, #4 @ add 4 (line length offset)
+ ldr r4, =0x3ff
+ ands r4, r4, r1, lsr #3 @ find maximum number on the way size
+ clz r5, r4 @ find bit position of way size increment
+ ldr r7, =0x7fff
+ ands r7, r7, r1, lsr #13 @ extract max number of the index size
+L1_loop2:
+ mov r9, r4 @ create working copy of max way size
+L1_loop3:
+ orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
+ orr r11, r11, r7, lsl r2 @ factor index number into r11
+#ifdef CONFIG_L1C_OPT
+#replace DCCISW by DCISW+DCCSW
+ mrs r1, cpsr @disable IRQ and save flag to make clean and invalidate atomic
+ orr r8, r1, #PSR_I_BIT | PSR_F_BIT
+ msr cpsr_c, r8
+ mcr p15, 0, r11, c7, c10, 2 @ clean by set/way
+ mcr p15, 0, r11, c7, c6, 2 @ invalidate by set/way
+ msr cpsr_c, r1
+#else
+ mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
+#endif
+ subs r9, r9, #1 @ decrement the way
+ bge L1_loop3
+ subs r7, r7, #1 @ decrement the index
+ bge L1_loop2
+L1_skip:
+ @add r10, r10, #2 @ increment cache number
+ @cmp r3, r10
+ @bgt L1_loop1
+L1_finished:
+ mov r10, #0 @ swith back to cache level 0
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ dsb
+ isb
+ pop {r0,r1,r2,r3,r4,r5,r7,r8,r9,r10,r11,r14}
+ bx lr
+
+__inner_flush_dcache_L2:
+ push {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
+ @push {r4,r5,r7,r9,r10,r11}
+ dmb @ ensure ordering with previous memory accesses
+ mrc p15, 1, r0, c0, c0, 1 @ read clidr
+ ands r3, r0, #0x7000000 @ extract loc from clidr
+ mov r3, r3, lsr #23 @ left align loc bit field
+ beq L2_finished @ if loc is 0, then no need to clean
+ mov r10, #2 @ start clean at cache level 2
+L2_loop1:
+ add r2, r10, r10, lsr #1 @ work out 3x current cache level
+ mov r1, r0, lsr r2 @ extract cache type bits from clidr
+ and r1, r1, #7 @ mask of the bits for current cache only
+ cmp r1, #2 @ see what cache we have at this level
+ blt L2_skip @ skip if no cache, or just i-cache
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ isb @ isb to sych the new cssr&csidr
+ mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
+ and r2, r1, #7 @ extract the length of the cache lines
+ add r2, r2, #4 @ add 4 (line length offset)
+ ldr r4, =0x3ff
+ ands r4, r4, r1, lsr #3 @ find maximum number on the way size
+ clz r5, r4 @ find bit position of way size increment
+ ldr r7, =0x7fff
+ ands r7, r7, r1, lsr #13 @ extract max number of the index size
+L2_loop2:
+ mov r9, r4 @ create working copy of max way size
+L2_loop3:
+ orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
+ orr r11, r11, r7, lsl r2 @ factor index number into r11
+ mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
+ subs r9, r9, #1 @ decrement the way
+ bge L2_loop3
+ subs r7, r7, #1 @ decrement the index
+ bge L2_loop2
+L2_skip:
+ @add r10, r10, #2 @ increment cache number
+ @cmp r3, r10
+ @bgt L2_loop1
+L2_finished:
+ mov r10, #0 @ swith back to cache level 0
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ dsb
+ isb
+ @pop {r4,r5,r7,r9,r10,r11}
+ pop {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
+ bx lr
+
+ __inner_clean_dcache_all:
+ push {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
+ @push {r4,r5,r7,r9,r10,r11}
+ dmb @ ensure ordering with previous memory accesses
+ mrc p15, 1, r0, c0, c0, 1 @ read clidr
+ ands r3, r0, #0x7000000 @ extract loc from clidr
+ mov r3, r3, lsr #23 @ left align loc bit field
+ beq all_cl_finished @ if loc is 0, then no need to clean
+ mov r10, #0 @ start clean at cache level 0
+all_cl_loop1:
+ add r2, r10, r10, lsr #1 @ work out 3x current cache level
+ mov r1, r0, lsr r2 @ extract cache type bits from clidr
+ and r1, r1, #7 @ mask of the bits for current cache only
+ cmp r1, #2 @ see what cache we have at this level
+ blt all_cl_skip @ skip if no cache, or just i-cache
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ isb @ isb to sych the new cssr&csidr
+ mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
+ and r2, r1, #7 @ extract the length of the cache lines
+ add r2, r2, #4 @ add 4 (line length offset)
+ ldr r4, =0x3ff
+ ands r4, r4, r1, lsr #3 @ find maximum number on the way size
+ clz r5, r4 @ find bit position of way size increment
+ ldr r7, =0x7fff
+ ands r7, r7, r1, lsr #13 @ extract max number of the index size
+all_cl_loop2:
+ mov r9, r4 @ create working copy of max way size
+all_cl_loop3:
+ orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
+ orr r11, r11, r7, lsl r2 @ factor index number into r11
+ mcr p15, 0, r11, c7, c10, 2 @ clean by set/way
+
+ subs r9, r9, #1 @ decrement the way
+ bge all_cl_loop3
+ subs r7, r7, #1 @ decrement the index
+ bge all_cl_loop2
+all_cl_skip:
+ add r10, r10, #2 @ increment cache number
+ cmp r3, r10
+ bgt all_cl_loop1
+all_cl_finished:
+ mov r10, #0 @ swith back to cache level 0
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ dsb
+ isb
+ @pop {r4,r5,r7,r9,r10,r11}
+ pop {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
+ bx lr
+
+__inner_clean_dcache_L1:
+ push {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
+ @push {r4,r5,r7,r9,r10,r11}
+ dmb @ ensure ordering with previous memory accesses
+ mrc p15, 1, r0, c0, c0, 1 @ read clidr
+ ands r3, r0, #0x7000000 @ extract loc from clidr
+ mov r3, r3, lsr #23 @ left align loc bit field
+ beq L1_cl_finished @ if loc is 0, then no need to clean
+ mov r10, #0 @ start clean at cache level 1
+L1_cl_loop1:
+ add r2, r10, r10, lsr #1 @ work out 3x current cache level
+ mov r1, r0, lsr r2 @ extract cache type bits from clidr
+ and r1, r1, #7 @ mask of the bits for current cache only
+ cmp r1, #2 @ see what cache we have at this level
+ blt L1_cl_skip @ skip if no cache, or just i-cache
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ isb @ isb to sych the new cssr&csidr
+ mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
+ and r2, r1, #7 @ extract the length of the cache lines
+ add r2, r2, #4 @ add 4 (line length offset)
+ ldr r4, =0x3ff
+ ands r4, r4, r1, lsr #3 @ find maximum number on the way size
+ clz r5, r4 @ find bit position of way size increment
+ ldr r7, =0x7fff
+ ands r7, r7, r1, lsr #13 @ extract max number of the index size
+L1_cl_loop2:
+ mov r9, r4 @ create working copy of max way size
+L1_cl_loop3:
+ orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
+ orr r11, r11, r7, lsl r2 @ factor index number into r11
+ mcr p15, 0, r11, c7, c10, 2 @ clean by set/way
+
+ subs r9, r9, #1 @ decrement the way
+ bge L1_cl_loop3
+ subs r7, r7, #1 @ decrement the index
+ bge L1_cl_loop2
+L1_cl_skip:
+ @add r10, r10, #2 @ increment cache number
+ @cmp r3, r10
+ @bgt L1_cl_loop1
+L1_cl_finished:
+ mov r10, #0 @ swith back to cache level 0
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ dsb
+ isb
+ @pop {r4,r5,r7,r9,r10,r11}
+ pop {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
+ bx lr
+
+__inner_clean_dcache_L2:
+#if 0
+ mov r0, sp
+ mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
+ dsb
+ sub r0, r0, #64
+ mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
+ dsb
+#endif
+ push {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
+ @push {r4,r5,r7,r9,r10,r11}
+#if 0
+ mov r0, sp
+ mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
+ dsb
+ sub r0, r0, #64
+ mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
+ dsb
+#endif
+ dmb @ ensure ordering with previous memory accesses
+ mrc p15, 1, r0, c0, c0, 1 @ read clidr
+ ands r3, r0, #0x7000000 @ extract loc from clidr
+ mov r3, r3, lsr #23 @ left align loc bit field
+ beq L2_cl_finished @ if loc is 0, then no need to clean
+ mov r10, #2 @ start clean at cache level 2
+L2_cl_loop1:
+ add r2, r10, r10, lsr #1 @ work out 3x current cache level
+ mov r1, r0, lsr r2 @ extract cache type bits from clidr
+ and r1, r1, #7 @ mask of the bits for current cache only
+ cmp r1, #2 @ see what cache we have at this level
+ blt L2_cl_skip @ skip if no cache, or just i-cache
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ isb @ isb to sych the new cssr&csidr
+ mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
+ and r2, r1, #7 @ extract the length of the cache lines
+ add r2, r2, #4 @ add 4 (line length offset)
+ ldr r4, =0x3ff
+ ands r4, r4, r1, lsr #3 @ find maximum number on the way size
+ clz r5, r4 @ find bit position of way size increment
+ ldr r7, =0x7fff
+ ands r7, r7, r1, lsr #13 @ extract max number of the index size
+L2_cl_loop2:
+ mov r9, r4 @ create working copy of max way size
+L2_cl_loop3:
+ orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
+ orr r11, r11, r7, lsl r2 @ factor index number into r11
+ mcr p15, 0, r11, c7, c10, 2 @ clean by set/way
+ subs r9, r9, #1 @ decrement the way
+ bge L2_cl_loop3
+ subs r7, r7, #1 @ decrement the index
+ bge L2_cl_loop2
+L2_cl_skip:
+ @add r10, r10, #2 @ increment cache number
+ @cmp r3, r10
+ @bgt L2_cl_loop1
+L2_cl_finished:
+ mov r10, #0 @ swith back to cache level 0
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ dsb
+ isb
+ @pop {r4,r5,r7,r9,r10,r11}
+ pop {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
+ bx lr
+ __inner_inv_dcache_all:
+ push {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
+ @push {r4,r5,r7,r9,r10,r11}
+ dmb @ ensure ordering with previous memory accesses
+ mrc p15, 1, r0, c0, c0, 1 @ read clidr
+ ands r3, r0, #0x7000000 @ extract loc from clidr
+ mov r3, r3, lsr #23 @ left align loc bit field
+ beq all_inv_finished @ if loc is 0, then no need to clean
+ mov r10, #0 @ start clean at cache level 0
+all_inv_loop1:
+ add r2, r10, r10, lsr #1 @ work out 3x current cache level
+ mov r1, r0, lsr r2 @ extract cache type bits from clidr
+ and r1, r1, #7 @ mask of the bits for current cache only
+ cmp r1, #2 @ see what cache we have at this level
+ blt all_inv_skip @ skip if no cache, or just i-cache
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ isb @ isb to sych the new cssr&csidr
+ mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
+ and r2, r1, #7 @ extract the length of the cache lines
+ add r2, r2, #4 @ add 4 (line length offset)
+ ldr r4, =0x3ff
+ ands r4, r4, r1, lsr #3 @ find maximum number on the way size
+ clz r5, r4 @ find bit position of way size increment
+ ldr r7, =0x7fff
+ ands r7, r7, r1, lsr #13 @ extract max number of the index size
+all_inv_loop2:
+ mov r9, r4 @ create working copy of max way size
+all_inv_loop3:
+ orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
+ orr r11, r11, r7, lsl r2 @ factor index number into r11
+ mcr p15, 0, r11, c7, c6, 2 @ invalidate by set/way
+
+ subs r9, r9, #1 @ decrement the way
+ bge all_inv_loop3
+ subs r7, r7, #1 @ decrement the index
+ bge all_inv_loop2
+all_inv_skip:
+ add r10, r10, #2 @ increment cache number
+ cmp r3, r10
+ bgt all_inv_loop1
+all_inv_finished:
+ mov r10, #0 @ swith back to cache level 0
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ dsb
+ isb
+ @pop {r4,r5,r7,r9,r10,r11}
+ pop {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
+ bx lr
+
+__inner_inv_dcache_L1:
+ push {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
+ @push {r4,r5,r7,r9,r10,r11}
+ dmb @ ensure ordering with previous memory accesses
+ mrc p15, 1, r0, c0, c0, 1 @ read clidr
+ ands r3, r0, #0x7000000 @ extract loc from clidr
+ mov r3, r3, lsr #23 @ left align loc bit field
+ beq L1_inv_finished @ if loc is 0, then no need to clean
+ mov r10, #0 @ start clean at cache level 1
+L1_inv_loop1:
+ add r2, r10, r10, lsr #1 @ work out 3x current cache level
+ mov r1, r0, lsr r2 @ extract cache type bits from clidr
+ and r1, r1, #7 @ mask of the bits for current cache only
+ cmp r1, #2 @ see what cache we have at this level
+ blt L1_inv_skip @ skip if no cache, or just i-cache
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ isb @ isb to sych the new cssr&csidr
+ mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
+ and r2, r1, #7 @ extract the length of the cache lines
+ add r2, r2, #4 @ add 4 (line length offset)
+ ldr r4, =0x3ff
+ ands r4, r4, r1, lsr #3 @ find maximum number on the way size
+ clz r5, r4 @ find bit position of way size increment
+ ldr r7, =0x7fff
+ ands r7, r7, r1, lsr #13 @ extract max number of the index size
+L1_inv_loop2:
+ mov r9, r4 @ create working copy of max way size
+L1_inv_loop3:
+ orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
+ orr r11, r11, r7, lsl r2 @ factor index number into r11
+ mcr p15, 0, r11, c7, c6, 2 @ invalidate by set/way
+ subs r9, r9, #1 @ decrement the way
+ bge L1_inv_loop3
+ subs r7, r7, #1 @ decrement the index
+ bge L1_inv_loop2
+L1_inv_skip:
+ @add r10, r10, #2 @ increment cache number
+ @cmp r3, r10
+ @bgt L1_inv_loop1
+L1_inv_finished:
+ mov r10, #0 @ swith back to cache level 0
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ dsb
+ isb
+ @pop {r4,r5,r7,r9,r10,r11}
+ pop {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
+ bx lr
+
+__inner_inv_dcache_L2:
+ push {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
+ @push {r4,r5,r7,r9,r10,r11}
+ dmb @ ensure ordering with previous memory accesses
+ mrc p15, 1, r0, c0, c0, 1 @ read clidr
+ ands r3, r0, #0x7000000 @ extract loc from clidr
+ mov r3, r3, lsr #23 @ left align loc bit field
+ beq L2_inv_finished @ if loc is 0, then no need to clean
+ mov r10, #2 @ start clean at cache level 2
+L2_inv_loop1:
+ add r2, r10, r10, lsr #1 @ work out 3x current cache level
+ mov r1, r0, lsr r2 @ extract cache type bits from clidr
+ and r1, r1, #7 @ mask of the bits for current cache only
+ cmp r1, #2 @ see what cache we have at this level
+ blt L2_inv_skip @ skip if no cache, or just i-cache
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ isb @ isb to sych the new cssr&csidr
+ mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
+ and r2, r1, #7 @ extract the length of the cache lines
+ add r2, r2, #4 @ add 4 (line length offset)
+ ldr r4, =0x3ff
+ ands r4, r4, r1, lsr #3 @ find maximum number on the way size
+ clz r5, r4 @ find bit position of way size increment
+ ldr r7, =0x7fff
+ ands r7, r7, r1, lsr #13 @ extract max number of the index size
+L2_inv_loop2:
+ mov r9, r4 @ create working copy of max way size
+L2_inv_loop3:
+ orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
+ orr r11, r11, r7, lsl r2 @ factor index number into r11
+ mcr p15, 0, r11, c7, c6, 2 @ invalidate by set/way
+ subs r9, r9, #1 @ decrement the way
+ bge L2_inv_loop3
+ subs r7, r7, #1 @ decrement the index
+ bge L2_inv_loop2
+L2_inv_skip:
+ @add r10, r10, #2 @ increment cache number
+ @cmp r3, r10
+ @bgt L2_inv_loop1
+L2_inv_finished:
+ mov r10, #0 @ swith back to cache level 0
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ dsb
+ isb
+ @pop {r4,r5,r7,r9,r10,r11}
+ pop {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
+ bx lr
+
+__disable_dcache__inner_flush_dcache_L1:
+/*******************************************************************************
+ * push stack *
+ ******************************************************************************/
+ push {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
+/*******************************************************************************
+ * __disable_dcache *
+ ******************************************************************************/
+ MRC p15,0,r0,c1,c0,0
+ BIC r0,r0,#C1_CBIT
+ dsb
+ MCR p15,0,r0,c1,c0,0
+ dsb
+ isb
+/*
+Erratum:794322,An instruction fetch can be allocated into the L2 cache after the cache is disabled Status
+This erratum can be avoided by inserting both of the following after the SCTLR.C bit is cleared to 0, and before the caches are cleaned or invalidated:
+1) A TLBIMVA operation to any address.
+2) A DSB instruction.
+*/
+ MCR p15,0,r0,c8,c7,1
+ dsb
+ isb
+/*******************************************************************************
+ * __inner_flush_dcache_L1 *
+ ******************************************************************************/
+ dmb @ ensure ordering with previous memory accesses
+ mrc p15, 1, r0, c0, c0, 1 @ read clidr
+ ands r3, r0, #0x7000000 @ extract loc from clidr
+ mov r3, r3, lsr #23 @ left align loc bit field
+ beq DF1_L1_finished @ if loc is 0, then no need to clean
+ mov r10, #0 @ start clean at cache level 1
+DF1_L1_loop1:
+ add r2, r10, r10, lsr #1 @ work out 3x current cache level
+ mov r1, r0, lsr r2 @ extract cache type bits from clidr
+ and r1, r1, #7 @ mask of the bits for current cache only
+ cmp r1, #2 @ see what cache we have at this level
+ blt DF1_L1_skip @ skip if no cache, or just i-cache
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ isb @ isb to sych the new cssr&csidr
+ mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
+ and r2, r1, #7 @ extract the length of the cache lines
+ add r2, r2, #4 @ add 4 (line length offset)
+ ldr r4, =0x3ff
+ ands r4, r4, r1, lsr #3 @ find maximum number on the way size
+ clz r5, r4 @ find bit position of way size increment
+ ldr r7, =0x7fff
+ ands r7, r7, r1, lsr #13 @ extract max number of the index size
+DF1_L1_loop2:
+ mov r9, r4 @ create working copy of max way size
+DF1_L1_loop3:
+ orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
+ orr r11, r11, r7, lsl r2 @ factor index number into r11
+#if 1
+ mcr p15, 0, r11, c7, c10, 2 @ clean by set/way
+ mcr p15, 0, r11, c7, c6, 2 @ invalidate by set/way
+#endif
+
+#if 0
+ mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
+#endif
+ subs r9, r9, #1 @ decrement the way
+ bge DF1_L1_loop3
+ subs r7, r7, #1 @ decrement the index
+ bge DF1_L1_loop2
+DF1_L1_skip:
+ @add r10, r10, #2 @ increment cache number
+ @cmp r3, r10
+ @bgt DF1_L1_loop1
+DF1_L1_finished:
+ mov r10, #0 @ swith back to cache level 0
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ dsb
+ isb
+/*******************************************************************************
+ * pop stack *
+ ******************************************************************************/
+ pop {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
+ bx lr
+
+__disable_dcache__inner_flush_dcache_L1__inner_flush_dcache_L2:
+/*******************************************************************************
+ * push stack *
+ ******************************************************************************/
+ push {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
+/*******************************************************************************
+ * __disable_dcache *
+ ******************************************************************************/
+ MRC p15,0,r0,c1,c0,0
+ BIC r0,r0,#C1_CBIT
+ dsb
+ MCR p15,0,r0,c1,c0,0
+ dsb
+ isb
+/*
+Erratum:794322,An instruction fetch can be allocated into the L2 cache after the cache is disabled Status
+This erratum can be avoided by inserting both of the following after the SCTLR.C bit is cleared to 0, and before the caches are cleaned or invalidated:
+1) A TLBIMVA operation to any address.
+2) A DSB instruction.
+*/
+ MCR p15,0,r0,c8,c7,1
+ dsb
+ isb
+/*******************************************************************************
+ * __inner_flush_dcache_L1 *
+ ******************************************************************************/
+ dmb @ ensure ordering with previous memory accesses
+ mrc p15, 1, r0, c0, c0, 1 @ read clidr
+ ands r3, r0, #0x7000000 @ extract loc from clidr
+ mov r3, r3, lsr #23 @ left align loc bit field
+ beq DF1F2_L1_finished @ if loc is 0, then no need to clean
+ mov r10, #0 @ start clean at cache level 1
+DF1F2_L1_loop1:
+ add r2, r10, r10, lsr #1 @ work out 3x current cache level
+ mov r1, r0, lsr r2 @ extract cache type bits from clidr
+ and r1, r1, #7 @ mask of the bits for current cache only
+ cmp r1, #2 @ see what cache we have at this level
+ blt DF1F2_L1_skip @ skip if no cache, or just i-cache
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ isb @ isb to sych the new cssr&csidr
+ mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
+ and r2, r1, #7 @ extract the length of the cache lines
+ add r2, r2, #4 @ add 4 (line length offset)
+ ldr r4, =0x3ff
+ ands r4, r4, r1, lsr #3 @ find maximum number on the way size
+ clz r5, r4 @ find bit position of way size increment
+ ldr r7, =0x7fff
+ ands r7, r7, r1, lsr #13 @ extract max number of the index size
+DF1F2_L1_loop2:
+ mov r9, r4 @ create working copy of max way size
+DF1F2_L1_loop3:
+ orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
+ orr r11, r11, r7, lsl r2 @ factor index number into r11
+#if 1
+ mcr p15, 0, r11, c7, c10, 2 @ clean by set/way
+ mcr p15, 0, r11, c7, c6, 2 @ invalidate by set/way
+#endif
+
+#if 0
+ mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
+#endif
+ subs r9, r9, #1 @ decrement the way
+ bge DF1F2_L1_loop3
+ subs r7, r7, #1 @ decrement the index
+ bge DF1F2_L1_loop2
+DF1F2_L1_skip:
+ @add r10, r10, #2 @ increment cache number
+ @cmp r3, r10
+ @bgt DF1F2_L1_loop1
+DF1F2_L1_finished:
+ mov r10, #0 @ swith back to cache level 0
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ dsb
+ isb
+/*******************************************************************************
+ * clrex *
+ ******************************************************************************/
+ clrex
+/*******************************************************************************
+ * __inner_flush_dcache_L2 *
+ ******************************************************************************/
+ dmb @ ensure ordering with previous memory accesses
+ mrc p15, 1, r0, c0, c0, 1 @ read clidr
+ ands r3, r0, #0x7000000 @ extract loc from clidr
+ mov r3, r3, lsr #23 @ left align loc bit field
+ beq DF1F2_L2_finished @ if loc is 0, then no need to clean
+ mov r10, #2 @ start clean at cache level 2
+DF1F2_L2_loop1:
+ add r2, r10, r10, lsr #1 @ work out 3x current cache level
+ mov r1, r0, lsr r2 @ extract cache type bits from clidr
+ and r1, r1, #7 @ mask of the bits for current cache only
+ cmp r1, #2 @ see what cache we have at this level
+ blt DF1F2_L2_skip @ skip if no cache, or just i-cache
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ isb @ isb to sych the new cssr&csidr
+ mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
+ and r2, r1, #7 @ extract the length of the cache lines
+ add r2, r2, #4 @ add 4 (line length offset)
+ ldr r4, =0x3ff
+ ands r4, r4, r1, lsr #3 @ find maximum number on the way size
+ clz r5, r4 @ find bit position of way size increment
+ ldr r7, =0x7fff
+ ands r7, r7, r1, lsr #13 @ extract max number of the index size
+DF1F2_L2_loop2:
+ mov r9, r4 @ create working copy of max way size
+DF1F2_L2_loop3:
+ orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
+ orr r11, r11, r7, lsl r2 @ factor index number into r11
+ mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
+ subs r9, r9, #1 @ decrement the way
+ bge DF1F2_L2_loop3
+ subs r7, r7, #1 @ decrement the index
+ bge DF1F2_L2_loop2
+DF1F2_L2_skip:
+ @add r10, r10, #2 @ increment cache number
+ @cmp r3, r10
+ @bgt DF1F2_L2_loop1
+DF1F2_L2_finished:
+ mov r10, #0 @ swith back to cache level 0
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ dsb
+ isb
+/*******************************************************************************
+ * pop stack *
+ ******************************************************************************/
+ pop {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
+ bx lr
+
+__disable_dcache__inner_flush_dcache_L1__inner_clean_dcache_L2:
+/*******************************************************************************
+ * push stack *
+ ******************************************************************************/
+ push {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
+/*******************************************************************************
+ * __disable_dcache *
+ ******************************************************************************/
+ MRC p15,0,r0,c1,c0,0
+ BIC r0,r0,#C1_CBIT
+ dsb
+ MCR p15,0,r0,c1,c0,0
+ dsb
+ isb
+/*
+Erratum:794322,An instruction fetch can be allocated into the L2 cache after the cache is disabled Status
+This erratum can be avoided by inserting both of the following after the SCTLR.C bit is cleared to 0, and before the caches are cleaned or invalidated:
+1) A TLBIMVA operation to any address.
+2) A DSB instruction.
+*/
+ MCR p15,0,r0,c8,c7,1
+ dsb
+ isb
+/*******************************************************************************
+ * __inner_flush_dcache_L1 *
+ ******************************************************************************/
+ dmb @ ensure ordering with previous memory accesses
+ mrc p15, 1, r0, c0, c0, 1 @ read clidr
+ ands r3, r0, #0x7000000 @ extract loc from clidr
+ mov r3, r3, lsr #23 @ left align loc bit field
+ beq DF1C2_L1_finished @ if loc is 0, then no need to clean
+ mov r10, #0 @ start clean at cache level 1
+DF1C2_L1_loop1:
+ add r2, r10, r10, lsr #1 @ work out 3x current cache level
+ mov r1, r0, lsr r2 @ extract cache type bits from clidr
+ and r1, r1, #7 @ mask of the bits for current cache only
+ cmp r1, #2 @ see what cache we have at this level
+ blt DF1C2_L1_skip @ skip if no cache, or just i-cache
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ isb @ isb to sych the new cssr&csidr
+ mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
+ and r2, r1, #7 @ extract the length of the cache lines
+ add r2, r2, #4 @ add 4 (line length offset)
+ ldr r4, =0x3ff
+ ands r4, r4, r1, lsr #3 @ find maximum number on the way size
+ clz r5, r4 @ find bit position of way size increment
+ ldr r7, =0x7fff
+ ands r7, r7, r1, lsr #13 @ extract max number of the index size
+DF1C2_L1_loop2:
+ mov r9, r4 @ create working copy of max way size
+DF1C2_L1_loop3:
+ orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
+ orr r11, r11, r7, lsl r2 @ factor index number into r11
+#if 1
+ mcr p15, 0, r11, c7, c10, 2 @ clean by set/way
+ mcr p15, 0, r11, c7, c6, 2 @ invalidate by set/way
+#endif
+
+#if 0
+ mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
+#endif
+ subs r9, r9, #1 @ decrement the way
+ bge DF1C2_L1_loop3
+ subs r7, r7, #1 @ decrement the index
+ bge DF1C2_L1_loop2
+DF1C2_L1_skip:
+ @add r10, r10, #2 @ increment cache number
+ @cmp r3, r10
+ @bgt DF1C2_L1_loop1
+DF1C2_L1_finished:
+ mov r10, #0 @ swith back to cache level 0
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ dsb
+ isb
+/*******************************************************************************
+ * clrex *
+ ******************************************************************************/
+ clrex
+/*******************************************************************************
+ * __inner_clean_dcache_L2 *
+ ******************************************************************************/
+ dmb @ ensure ordering with previous memory accesses
+ mrc p15, 1, r0, c0, c0, 1 @ read clidr
+ ands r3, r0, #0x7000000 @ extract loc from clidr
+ mov r3, r3, lsr #23 @ left align loc bit field
+ beq DF1C2_L2_cl_finished @ if loc is 0, then no need to clean
+ mov r10, #2 @ start clean at cache level 2
+DF1C2_L2_cl_loop1:
+ add r2, r10, r10, lsr #1 @ work out 3x current cache level
+ mov r1, r0, lsr r2 @ extract cache type bits from clidr
+ and r1, r1, #7 @ mask of the bits for current cache only
+ cmp r1, #2 @ see what cache we have at this level
+ blt DF1C2_L2_cl_skip @ skip if no cache, or just i-cache
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ isb @ isb to sych the new cssr&csidr
+ mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
+ and r2, r1, #7 @ extract the length of the cache lines
+ add r2, r2, #4 @ add 4 (line length offset)
+ ldr r4, =0x3ff
+ ands r4, r4, r1, lsr #3 @ find maximum number on the way size
+ clz r5, r4 @ find bit position of way size increment
+ ldr r7, =0x7fff
+ ands r7, r7, r1, lsr #13 @ extract max number of the index size
+DF1C2_L2_cl_loop2:
+ mov r9, r4 @ create working copy of max way size
+DF1C2_L2_cl_loop3:
+ orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
+ orr r11, r11, r7, lsl r2 @ factor index number into r11
+ mcr p15, 0, r11, c7, c10, 2 @ clean by set/way
+ subs r9, r9, #1 @ decrement the way
+ bge DF1C2_L2_cl_loop3
+ subs r7, r7, #1 @ decrement the index
+ bge DF1C2_L2_cl_loop2
+DF1C2_L2_cl_skip:
+ @add r10, r10, #2 @ increment cache number
+ @cmp r3, r10
+ @bgt DF1C2_L2_cl_loop1
+DF1C2_L2_cl_finished:
+ mov r10, #0 @ swith back to cache level 0
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ dsb
+ isb
+/*******************************************************************************
+ * pop stack *
+ ******************************************************************************/
+ pop {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
+ bx lr
+
+
+d_i_dis_flush_all:
+/*******************************************************************************
+ * push stack *
+ ******************************************************************************/
+ push {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
+/*******************************************************************************
+ * __disable_dcache *
+ ******************************************************************************/
+ MRC p15,0,r0,c1,c0,0
+ BIC r0,r0,#C1_CBIT
+ BIC r0,r0,#C1_IBIT
+ dsb
+ MCR p15,0,r0,c1,c0,0
+ dsb
+ isb
+/*
+Erratum:794322,An instruction fetch can be allocated into the L2 cache after the cache is disabled Status
+This erratum can be avoided by inserting both of the following after the SCTLR.C bit is cleared to 0, and before the caches are cleaned or invalidated:
+1) A TLBIMVA operation to any address.
+2) A DSB instruction.
+*/
+ MCR p15,0,r0,c8,c7,1
+ dsb
+ isb
+/*******************************************************************************
+ * __inner_flush_dcache_L1 *
+ ******************************************************************************/
+ dmb @ ensure ordering with previous memory accesses
+ mrc p15, 1, r0, c0, c0, 1 @ read clidr
+ ands r3, r0, #0x7000000 @ extract loc from clidr
+ mov r3, r3, lsr #23 @ left align loc bit field
+ beq DIF1F2_L1_finished @ if loc is 0, then no need to clean
+ mov r10, #0 @ start clean at cache level 1
+DIF1F2_L1_loop1:
+ add r2, r10, r10, lsr #1 @ work out 3x current cache level
+ mov r1, r0, lsr r2 @ extract cache type bits from clidr
+ and r1, r1, #7 @ mask of the bits for current cache only
+ cmp r1, #2 @ see what cache we have at this level
+ blt DIF1F2_L1_skip @ skip if no cache, or just i-cache
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ isb @ isb to sych the new cssr&csidr
+ mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
+ and r2, r1, #7 @ extract the length of the cache lines
+ add r2, r2, #4 @ add 4 (line length offset)
+ ldr r4, =0x3ff
+ ands r4, r4, r1, lsr #3 @ find maximum number on the way size
+ clz r5, r4 @ find bit position of way size increment
+ ldr r7, =0x7fff
+ ands r7, r7, r1, lsr #13 @ extract max number of the index size
+DIF1F2_L1_loop2:
+ mov r9, r4 @ create working copy of max way size
+DIF1F2_L1_loop3:
+ orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
+ orr r11, r11, r7, lsl r2 @ factor index number into r11
+#if 1
+ mcr p15, 0, r11, c7, c10, 2 @ clean by set/way
+ mcr p15, 0, r11, c7, c6, 2 @ invalidate by set/way
+#endif
+
+#if 0
+ mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
+#endif
+ subs r9, r9, #1 @ decrement the way
+ bge DIF1F2_L1_loop3
+ subs r7, r7, #1 @ decrement the index
+ bge DIF1F2_L1_loop2
+DIF1F2_L1_skip:
+ @add r10, r10, #2 @ increment cache number
+ @cmp r3, r10
+ @bgt DIF1F2_L1_loop1
+DIF1F2_L1_finished:
+ mov r10, #0 @ swith back to cache level 0
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ dsb
+ isb
+/*******************************************************************************
+ * clrex *
+ ******************************************************************************/
+ clrex
+/*******************************************************************************
+ * __inner_flush_dcache_L2 *
+ ******************************************************************************/
+ dmb @ ensure ordering with previous memory accesses
+ mrc p15, 1, r0, c0, c0, 1 @ read clidr
+ ands r3, r0, #0x7000000 @ extract loc from clidr
+ mov r3, r3, lsr #23 @ left align loc bit field
+ beq DIF1F2_L2_finished @ if loc is 0, then no need to clean
+ mov r10, #2 @ start clean at cache level 2
+DIF1F2_L2_loop1:
+ add r2, r10, r10, lsr #1 @ work out 3x current cache level
+ mov r1, r0, lsr r2 @ extract cache type bits from clidr
+ and r1, r1, #7 @ mask of the bits for current cache only
+ cmp r1, #2 @ see what cache we have at this level
+ blt DIF1F2_L2_skip @ skip if no cache, or just i-cache
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ isb @ isb to sych the new cssr&csidr
+ mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
+ and r2, r1, #7 @ extract the length of the cache lines
+ add r2, r2, #4 @ add 4 (line length offset)
+ ldr r4, =0x3ff
+ ands r4, r4, r1, lsr #3 @ find maximum number on the way size
+ clz r5, r4 @ find bit position of way size increment
+ ldr r7, =0x7fff
+ ands r7, r7, r1, lsr #13 @ extract max number of the index size
+DIF1F2_L2_loop2:
+ mov r9, r4 @ create working copy of max way size
+DIF1F2_L2_loop3:
+ orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
+ orr r11, r11, r7, lsl r2 @ factor index number into r11
+ mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
+ subs r9, r9, #1 @ decrement the way
+ bge DIF1F2_L2_loop3
+ subs r7, r7, #1 @ decrement the index
+ bge DIF1F2_L2_loop2
+DIF1F2_L2_skip:
+ @add r10, r10, #2 @ increment cache number
+ @cmp r3, r10
+ @bgt DIF1F2_L2_loop1
+DIF1F2_L2_finished:
+ mov r10, #0 @ swith back to cache level 0
+ mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
+ dsb
+ isb
+/*******************************************************************************
+ * pop stack *
+ ******************************************************************************/
+ pop {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
+ bx lr
+
+
+ .end
diff --git a/drivers/misc/mediatek/kernel/mt_cache_v8.S b/drivers/misc/mediatek/kernel/mt_cache_v8.S
new file mode 100755
index 000000000..1f098d700
--- /dev/null
+++ b/drivers/misc/mediatek/kernel/mt_cache_v8.S
@@ -0,0 +1,259 @@
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/assembler.h>
+
+ .text
+.equ SCTLR_C_BIT, 0x00000004
+.equ SCTLR_I_BIT, 0x00001000
+.equ DCISW, 0x0
+.equ DCCISW, 0x1
+.equ DCCSW, 0x2
+.equ LOC_SHIFT, 24
+.equ CLIDR_FIELD_WIDTH, 3
+.equ LEVEL_SHIFT, 1
+
+ENTRY(__enable_icache)
+ mrs x0, SCTLR_EL1
+ orr x0, x0, #SCTLR_I_BIT
+ msr SCTLR_EL1, x0
+ ret
+ENDPROC(__enable_icache)
+
+ENTRY(__disable_icache)
+ mrs x0, SCTLR_EL1
+ bic x0, x0, #SCTLR_I_BIT
+ msr SCTLR_EL1, x0
+ ret
+ENDPROC(__disable_icache)
+
+/* might pollute x0 */
+.macro __dis_D
+ mrs x0, SCTLR_EL1
+ bic x0, x0, #SCTLR_C_BIT
+ dsb sy
+ msr SCTLR_EL1, x0
+ dsb sy
+ isb sy
+.endm
+
+ENTRY(__enable_dcache)
+ mrs x0, SCTLR_EL1
+ orr x0, x0, #SCTLR_C_BIT
+ dsb sy
+ msr SCTLR_EL1, x0
+ dsb sy
+ isb sy
+ ret
+ENDPROC(__enable_dcache)
+
+ENTRY(__disable_dcache)
+ mrs x0, SCTLR_EL1
+ bic x0, x0, #SCTLR_C_BIT
+ dsb sy
+ msr SCTLR_EL1, x0
+ dsb sy
+ isb sy
+ ret
+ENDPROC(__disable_dcache)
+
+ENTRY(__enable_cache)
+ mrs x0, SCTLR_EL1
+ orr x0, x0, #SCTLR_I_BIT
+ orr x0, x0, #SCTLR_C_BIT
+ dsb sy
+ msr SCTLR_EL1, x0
+ dsb sy
+ isb sy
+ ret
+ENDPROC(__enable_cache)
+
+ENTRY(__disable_cache)
+ mrs x0, SCTLR_EL1
+ bic x0, x0, #SCTLR_I_BIT
+ bic x0, x0, #SCTLR_C_BIT
+ dsb sy
+ msr SCTLR_EL1, x0
+ dsb sy
+ isb sy
+ ret
+ENDPROC(__disable_cache)
+
+/* ---------------------------------------------------------------
+* Data cache operations by set/way to the level specified
+*
+* The main function, do_dcsw_op requires:
+* x0: The operation type (0-2), as defined in arch.h
+* x1: The first cache level to operate on
+* x3: The last cache level to operate on
+* x9: clidr_el1
+* and will carry out the operation on each data cache from level 0
+* to the level in x3 in sequence
+*
+* The dcsw_op macro sets up the x3 and x9 parameters based on
+* clidr_el1 cache information before invoking the main function
+* ---------------------------------------------------------------
+*/
+ENTRY(do_dcsw_op)
+ lsl x3, x3, #1
+ cbz x3, exit
+ sub x1, x1, #1
+ lsl x1, x1, #1
+ mov x10, x1
+ adr x14, dcsw_loop_table // compute inner loop address
+ add x14, x14, x0, lsl #5 // inner loop is 8x32-bit instructions
+ mov x0, x9
+ mov w8, #1
+loop:
+ add x2, x10, x10, lsr #1 // work out 3x current cache level
+ lsr x1, x0, x2 // extract cache type bits from clidr
+ and x1, x1, #7 // mask the bits for current cache only
+ cmp x1, #2 // see what cache we have at this level
+ b.lt level_done // nothing to do if no cache or icache
+
+ msr csselr_el1, x10 // select current cache level in csselr
+ isb // isb to sych the new cssr&csidr
+ mrs x1, ccsidr_el1 // read the new ccsidr
+ and x2, x1, #7 // extract the length of the cache lines
+ add x2, x2, #4 // add 4 (line length offset)
+ ubfx x4, x1, #3, #10 // maximum way number
+ clz w5, w4 // bit position of way size increment
+ lsl w9, w4, w5 // w9 = aligned max way number
+ lsl w16, w8, w5 // w16 = way number loop decrement
+ orr w9, w10, w9 // w9 = combine way and cache number
+ ubfx w6, w1, #13, #15 // w6 = max set number
+ lsl w17, w8, w2 // w17 = set number loop decrement
+ dsb sy // barrier before we start this level
+ br x14 // jump to DC operation specific loop
+
+ .macro dcsw_loop _op
+loop2_\_op:
+ lsl w7, w6, w2 // w7 = aligned max set number
+
+loop3_\_op:
+ orr w11, w9, w7 // combine cache, way and set number
+ dc \_op, x11
+ subs w7, w7, w17 // decrement set number
+ b.ge loop3_\_op
+
+ subs x9, x9, x16 // decrement way number
+ b.ge loop2_\_op
+
+ b level_done
+ .endm
+
+level_done:
+ add x10, x10, #2 // increment cache number
+ cmp x3, x10
+ b.gt loop
+ msr csselr_el1, xzr // select cache level 0 in csselr
+ dsb sy // barrier to complete final cache operation
+ isb
+exit:
+ ret
+ENDPROC(do_dcsw_op)
+
+dcsw_loop_table:
+ dcsw_loop isw
+ dcsw_loop cisw
+ dcsw_loop csw
+
+.macro __inner_dcache_all mode
+ mov x0, \mode
+ mov x1, #1
+ mrs x9, clidr_el1
+ ubfx x3, x9, #24, #0x7 /* LOC as last cache level */
+ b do_dcsw_op
+.endm
+
+.macro __inner_dcache_L1 mode
+ mov x0, \mode
+ mov x1, #1
+ mov x3, #1
+ mrs x9, clidr_el1
+ b do_dcsw_op
+.endm
+
+.macro __inner_dcache_L2 mode
+ mov x0, \mode
+ mov x1, #2
+ mov x3, #2
+ mrs x9, clidr_el1
+ b do_dcsw_op
+.endm
+
+.macro __inner_dcache_L1_L2 mode
+ mov x0, \mode
+ mov x1, #1
+ mov x3, #2
+ mrs x9, clidr_el1
+ b do_dcsw_op
+.endm
+
+ENTRY(__inner_flush_dcache_all)
+ __inner_dcache_all #DCCISW
+ENDPROC(__inner_flush_dcache_all)
+
+ENTRY(__inner_flush_dcache_L1)
+ __inner_dcache_L1 #DCCISW
+ENDPROC(__inner_flush_dcache_L1)
+
+ENTRY(__inner_flush_dcache_L2)
+ __inner_dcache_L2 #DCCISW
+ENDPROC(__inner_flush_dcache_L2)
+
+ENTRY(__inner_clean_dcache_all)
+ __inner_dcache_all #DCCSW
+ENDPROC(__inner_clean_dcache_all)
+
+ENTRY(__inner_clean_dcache_L1)
+ __inner_dcache_L1 #DCCSW
+ENDPROC(__inner_clean_dcache_L1)
+
+ENTRY(__inner_clean_dcache_L2)
+ __inner_dcache_L2 #DCCSW
+ENDPROC(__inner_clean_dcache_L2)
+
+ENTRY(__inner_inv_dcache_all)
+ __inner_dcache_all #DCISW
+ENDPROC(__inner_inv_dcache_all)
+
+ENTRY(__inner_inv_dcache_L1)
+ __inner_dcache_L1 #DCISW
+ENDPROC(__inner_clean_dcache_L1)
+
+ENTRY(__inner_inv_dcache_L2)
+ __inner_dcache_L2 #DCISW
+ENDPROC(__inner_clean_dcache_L2)
+
+ENTRY(__disable_dcache__inner_flush_dcache_L1)
+ __dis_D
+ __inner_dcache_L1 #DCCISW
+ENDPROC(__disable_dcache__inner_flush_dcache_L1)
+
+ENTRY(__disable_dcache__inner_flush_dcache_L1__inner_flush_dcache_L2)
+ __dis_D
+ __inner_dcache_L1_L2 #DCCISW
+ENDPROC(__disable_dcache__inner_flush_dcache_L1__inner_flush_dcache_L2)
+
+ENTRY(__disable_dcache__inner_clean_dcache_L1__inner_clean_dcache_L2)
+ __dis_D
+ __inner_dcache_L1_L2 #DCCSW
+ENDPROC(__disable_dcache__inner_clean_dcache_L1__inner_clean_dcache_L2)
+
+ENTRY(__disable_dcache__inner_flush_dcache_L1__inner_clean_dcache_L2)
+ __dis_D
+ /* since we need to do different operations for L1/L2,
+ and our current implementation would jump from do_dcsw_op to caller(who invokes the last bl) directly,
+ we need to construct stack frame by ourself here.
+ We use two caller-saved registers, x12 & x13, to save lr & sp,
+ to prevent any memory access during cache operation
+ NOTICE: any macro or function MUST not corrupt x12 & x13 here
+ */
+ mov x12, x29
+ mov x13, x30
+ mov x29, sp
+ bl __inner_flush_dcache_L1
+ mov x29, x12
+ mov x30, x13
+ __inner_dcache_L2 #DCCSW
+ENDPROC(__disable_dcache__inner_flush_dcache_L1__inner_clean_dcache_L2)
diff --git a/drivers/misc/mediatek/kernel/mtk_memcfg.c b/drivers/misc/mediatek/kernel/mtk_memcfg.c
new file mode 100644
index 000000000..971d8e3c6
--- /dev/null
+++ b/drivers/misc/mediatek/kernel/mtk_memcfg.c
@@ -0,0 +1,544 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+#include <linux/spinlock.h>
+#include <linux/seq_file.h>
+#include <linux/aee.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <asm/uaccess.h>
+#include <asm/setup.h>
+#include <mach/mtk_memcfg.h>
+#include <linux/of_fdt.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/mod_devicetable.h>
+#include <asm/io.h>
+#include <linux/memblock.h>
+
+#define MTK_MEMCFG_SIMPLE_BUFFER_LEN 16
+#define MTK_MEMCFG_LARGE_BUFFER_LEN (2048) /* it should not be larger than 1 page */
+
+#if defined(CONFIG_MTK_FB)
+extern unsigned int DISP_GetVRamSizeBoot(char *cmdline);
+#ifdef CONFIG_OF
+extern phys_addr_t mtkfb_get_fb_base(void);
+#endif
+#endif
+
+struct mtk_memcfg_info_buf {
+ unsigned long max_len;
+ unsigned long curr_pos;
+ char buf[MTK_MEMCFG_LARGE_BUFFER_LEN];
+};
+
+static struct mtk_memcfg_info_buf mtk_memcfg_layout_buf = {
+ .buf = {[0 ... (MTK_MEMCFG_LARGE_BUFFER_LEN - 1)] = 0,},
+ .max_len = MTK_MEMCFG_LARGE_BUFFER_LEN,
+ .curr_pos = 0,
+};
+
+static unsigned long mtk_memcfg_late_warning_flag;
+
+void mtk_memcfg_write_memory_layout_buf(char *fmt, ...)
+{
+ va_list ap;
+ struct mtk_memcfg_info_buf *layout_buf = &mtk_memcfg_layout_buf;
+ if (layout_buf->curr_pos <= layout_buf->max_len) {
+ va_start(ap, fmt);
+ layout_buf->curr_pos +=
+ vsnprintf((layout_buf->buf + layout_buf->curr_pos),
+ (layout_buf->max_len - layout_buf->curr_pos), fmt,
+ ap);
+ va_end(ap);
+ }
+}
+
+void mtk_memcfg_late_warning(unsigned long flag)
+{
+ mtk_memcfg_late_warning_flag |= flag;
+}
+
+/* kenerl memory information */
+
+static int mtk_memcfg_memory_layout_show(struct seq_file *m, void *v)
+{
+ seq_printf(m, "%s", mtk_memcfg_layout_buf.buf);
+ seq_printf(m, "buffer usage: %lu/%lu\n",
+ (mtk_memcfg_layout_buf.curr_pos <=
+ mtk_memcfg_layout_buf.max_len ?
+ mtk_memcfg_layout_buf.curr_pos :
+ mtk_memcfg_layout_buf.max_len),
+ mtk_memcfg_layout_buf.max_len);
+
+ return 0;
+}
+
+static int mtk_memcfg_memory_layout_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mtk_memcfg_memory_layout_show, NULL);
+}
+
+/* end of kenerl memory information */
+
+/* kenerl memory fragmentation trigger */
+
+static LIST_HEAD(frag_page_list);
+static DEFINE_SPINLOCK(frag_page_list_lock);
+static unsigned long mtk_memcfg_frag_round;
+static struct kmem_cache *frag_page_cache;
+
+struct frag_page {
+ struct list_head list;
+ struct page *page;
+};
+
+static int mtk_memcfg_frag_show(struct seq_file *m, void *v)
+{
+ int cnt = 0;
+ struct frag_page *frag_page, *n_frag_page;
+ spin_lock(&frag_page_list_lock);
+ list_for_each_entry_safe(frag_page, n_frag_page, &frag_page_list, list) {
+ cnt++;
+ }
+ spin_unlock(&frag_page_list_lock);
+ seq_printf(m, "round: %lu, fragmentation-trigger held %d pages, %d MB\n",
+ mtk_memcfg_frag_round,
+ cnt, (cnt << PAGE_SHIFT) >> 20);
+
+ return 0;
+}
+
+static int mtk_memcfg_frag_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mtk_memcfg_frag_show, NULL);
+}
+
+void split_page(struct page *page, unsigned int order);
+
+static int do_fragmentation(void *n)
+{
+ struct frag_page *frag_page, *n_frag_page;
+ struct page *page;
+ gfp_t gfp_mask = GFP_ATOMIC;
+ unsigned int max_order = 2;
+ int cnt = 0, i;
+
+ /* trigger fragmentation */
+ /*
+ * Allocate an order-2-page, split it into 4 order-0-pages,
+ * and free 3 of them, repeatedly.
+ * In this way, we split all high order pages to
+ * order-0-pages and order-1-pages to create a
+ * fragmentation scenario.
+ *
+ * In current stage, we only trigger fragmentation in
+ * normal zone.
+ */
+ while (1) {
+#if 1
+ if (cnt >= 10000) {
+ /*
+ * release all memory and restart the fragmentation
+ * Allocating too much frag_page consumes too mush order-0 pages
+ */
+ spin_lock(&frag_page_list_lock);
+ list_for_each_entry_safe(frag_page, n_frag_page,
+ &frag_page_list, list) {
+ list_del(&frag_page->list);
+ __free_page(frag_page->page);
+ kmem_cache_free(frag_page_cache, frag_page);
+ cnt--;
+ }
+ spin_unlock(&frag_page_list_lock);
+ pr_alert("round: %lu, fragmentation-trigger free pages %d left\n",
+ mtk_memcfg_frag_round, cnt);
+ }
+#endif
+ while (1) {
+ frag_page = kmem_cache_alloc(frag_page_cache, gfp_mask);
+ if (!frag_page)
+ break;
+ page = alloc_pages(gfp_mask, max_order);
+ if (!page) {
+ kfree(frag_page);
+ break;
+ }
+ split_page(page, 0);
+ INIT_LIST_HEAD(&frag_page->list);
+ frag_page->page = page;
+ spin_lock(&frag_page_list_lock);
+ list_add(&frag_page->list, &frag_page_list);
+ spin_unlock(&frag_page_list_lock);
+ for (i = 1; i < (1 << max_order); i++)
+ __free_page(page + i);
+ cnt++;
+ }
+ mtk_memcfg_frag_round++;
+ pr_alert("round: %lu, fragmentation-trigger allocate %d pages %d MB\n",
+ mtk_memcfg_frag_round, cnt, (cnt << PAGE_SHIFT) >> 20);
+ msleep(500);
+ }
+
+ return 0;
+}
+
+static ssize_t
+mtk_memcfg_frag_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
+{
+ static char state;
+ static struct task_struct *p;
+ if (count > 0) {
+ if (get_user(state, buffer))
+ return -EFAULT;
+ state -= '0';
+ pr_alert("%s state = %d\n", __func__, state);
+ if (state) {
+ pr_alert("activate do_fragmentation kthread\n");
+ p = kthread_create(do_fragmentation, NULL,
+ "fragmentationd");
+ if (!IS_ERR(p))
+ wake_up_process(p);
+ }
+ }
+ return count;
+}
+
+/* end of kenerl memory fragmentation trigger */
+
+static int __init mtk_memcfg_init(void)
+{
+ return 0;
+}
+
+static void __exit mtk_memcfg_exit(void)
+{
+}
+
+static const struct file_operations mtk_memcfg_memory_layout_operations = {
+ .open = mtk_memcfg_memory_layout_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations mtk_memcfg_frag_operations = {
+ .open = mtk_memcfg_frag_open,
+ .write = mtk_memcfg_frag_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+#ifdef CONFIG_SLUB_DEBUG
+
+extern int slabtrace_open(struct inode *inode, struct file *file);
+static const struct file_operations proc_slabtrace_operations = {
+ .open = slabtrace_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif
+static int __init mtk_memcfg_late_init(void)
+{
+ struct proc_dir_entry *entry = NULL;
+ struct proc_dir_entry *mtk_memcfg_dir = NULL;
+
+ pr_info("[%s] start\n", __func__);
+
+ mtk_memcfg_dir = proc_mkdir("mtk_memcfg", NULL);
+
+ if (!mtk_memcfg_dir) {
+ pr_err("[%s]: mkdir /proc/mtk_memcfg failed\n", __func__);
+ } else {
+ /* display kernel memory layout */
+ entry = proc_create("memory_layout",
+ S_IRUGO | S_IWUSR, mtk_memcfg_dir,
+ &mtk_memcfg_memory_layout_operations);
+
+ if (!entry)
+ pr_err("create memory_layout proc entry failed\n");
+
+ /* display kernel memory layout */
+ entry = proc_create("frag-trigger",
+ S_IRUGO | S_IWUSR, mtk_memcfg_dir,
+ &mtk_memcfg_frag_operations);
+
+ if (!entry)
+ pr_err("create memory_layout proc entry failed\n");
+
+ frag_page_cache = kmem_cache_create("frag_page_cache",
+ sizeof(struct frag_page),
+ 0, SLAB_PANIC, NULL);
+
+ if (!frag_page_cache)
+ pr_err("create frag_page_cache failed\n");
+
+#ifdef CONFIG_SLUB_DEBUG
+ /* slabtrace - full slub object backtrace */
+ entry = proc_create("slabtrace",
+ S_IRUSR, mtk_memcfg_dir,
+ &proc_slabtrace_operations);
+
+ if (!entry)
+ pr_err("create slabtrace proc entry failed\n");
+#endif
+ }
+ return 0;
+}
+
+module_init(mtk_memcfg_init);
+module_exit(mtk_memcfg_exit);
+
+#ifdef CONFIG_HIGHMEM
+extern unsigned long totalhigh_pages;
+#endif /* end of CONFIG_HIGHMEM */
+static int __init mtk_memcfg_late_sanity_test(void)
+{
+ /* trigger kernel warning if warning flag is set */
+ if (mtk_memcfg_late_warning_flag & WARN_MEMBLOCK_CONFLICT) {
+ aee_kernel_warning("[memory layout conflict]",
+ mtk_memcfg_layout_buf.buf);
+ }
+
+ if (mtk_memcfg_late_warning_flag & WARN_MEMSIZE_CONFLICT) {
+ aee_kernel_warning("[memory size conflict]",
+ mtk_memcfg_layout_buf.buf);
+ }
+
+ if (mtk_memcfg_late_warning_flag & WARN_API_NOT_INIT) {
+ aee_kernel_warning("[API is not initialized]",
+ mtk_memcfg_layout_buf.buf);
+ }
+
+#ifdef CONFIG_HIGHMEM
+ /* check highmem zone size */
+ if (unlikely
+ (totalhigh_pages && (totalhigh_pages << PAGE_SHIFT) < SZ_8M)) {
+ aee_kernel_warning("[high zone lt 8MB]", __func__);
+ }
+#endif /* end of CONFIG_HIGHMEM */
+
+ return 0;
+}
+
+/* scan memory layout */
+#ifdef CONFIG_OF
+static int dt_scan_memory(unsigned long node, const char *uname, int depth, void *data)
+{
+ char *type = of_get_flat_dt_prop(node, "device_type", NULL);
+ int i;
+ unsigned long l;
+ u64 kernel_mem_sz = 0;
+ u64 phone_dram_sz = 0x0; /* original phone DRAM size */
+ u64 dram_sz = 0; /* total DRAM size of all modules */
+ dram_info_t *dram_info;
+ mem_desc_t *mem_desc;
+ mblock_info_t *mblock_info;
+ __be32 *reg, *endp;
+ u64 fb_base = 0x12345678, fb_size = 0;
+
+ /* We are scanning "memory" nodes only */
+ if (type == NULL) {
+ /*
+ * The longtrail doesn't have a device_type on the
+ * /memory node, so look for the node called /memory@0.
+ */
+ if (depth != 1 || strcmp(uname, "memory@0") != 0)
+ return 0;
+ } else if (strcmp(type, "memory") != 0) {
+ return 0;
+ }
+
+ reg = of_get_flat_dt_prop(node, "reg", &l);
+ if (reg == NULL)
+ return 0;
+
+ endp = reg + (l / sizeof(__be32));
+
+ while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
+ u64 base, size;
+
+ base = dt_mem_next_cell(dt_root_addr_cells, &reg);
+ size = dt_mem_next_cell(dt_root_size_cells, &reg);
+
+ if (size == 0)
+ continue;
+
+ MTK_MEMCFG_LOG_AND_PRINTK(KERN_ALERT
+ "[debug]DRAM size (dt) : 0x%llx - 0x%llx (0x%llx)\n",
+ (unsigned long long)base,
+ (unsigned long long)base + (unsigned long long)size - 1,
+ (unsigned long long)size);
+ kernel_mem_sz += size;
+ }
+
+ /* orig_dram_info */
+ dram_info = (dram_info_t *)of_get_flat_dt_prop(node,
+ "orig_dram_info", NULL);
+ if (dram_info) {
+ for (i = 0; i < dram_info->rank_num; i++) {
+ MTK_MEMCFG_LOG_AND_PRINTK(KERN_ALERT
+ "[debug]orig_dram rank[%d] : "
+ "0x%08llx - 0x%08llx (0x%llx)\n",
+ i,
+ dram_info->rank_info[i].start,
+ dram_info->rank_info[i].start +
+ dram_info->rank_info[i].size - 1,
+ dram_info->rank_info[i].size
+ );
+ phone_dram_sz += dram_info->rank_info[i].size;
+ }
+ }
+
+ /* mblock_info */
+ mblock_info = (mblock_info_t *)of_get_flat_dt_prop(node,
+ "mblock_info", NULL);
+ if (mblock_info) {
+ for (i = 0; i < mblock_info->mblock_num; i++) {
+ MTK_MEMCFG_LOG_AND_PRINTK(KERN_ALERT
+ "[debug]mblock[%d][r%d] : "
+ "0x%08llx - 0x%08llx (0x%llx)\n",
+ i,
+ mblock_info->mblock[i].rank,
+ mblock_info->mblock[i].start,
+ mblock_info->mblock[i].start +
+ mblock_info->mblock[i].size - 1,
+ mblock_info->mblock[i].size
+ );
+ dram_sz += mblock_info->mblock[i].size;
+ }
+ }
+
+ /* lca reserved memory */
+ mem_desc = (mem_desc_t *)of_get_flat_dt_prop(node,
+ "lca_reserved_mem", NULL);
+ if (mem_desc && mem_desc->size) {
+ MTK_MEMCFG_LOG_AND_PRINTK(KERN_ALERT
+ "[PHY layout]lca_reserved_mem : "
+ "0x%08llx - 0x%08llx (0x%llx)\n",
+ mem_desc->start,
+ mem_desc->start +
+ mem_desc->size - 1,
+ mem_desc->size
+ );
+ dram_sz += mem_desc->size;
+ }
+
+ /* tee reserved memory */
+ mem_desc = (mem_desc_t *)of_get_flat_dt_prop(node,
+ "tee_reserved_mem", NULL);
+ if (mem_desc && mem_desc->size) {
+ MTK_MEMCFG_LOG_AND_PRINTK(KERN_ALERT
+ "[PHY layout]tee_reserved_mem : "
+ "0x%08llx - 0x%08llx (0x%llx)\n",
+ mem_desc->start,
+ mem_desc->start +
+ mem_desc->size - 1,
+ mem_desc->size
+ );
+ dram_sz += mem_desc->size;
+ }
+
+ /* frame buffer */
+ fb_size = (u64)DISP_GetVRamSizeBoot(NULL);
+ fb_base = (u64)mtkfb_get_fb_base();
+ dram_sz += fb_size;
+
+ /* verify memory size */
+#if 0
+ if (dram_sz != phone_dram_sz) {
+ MTK_MEMCFG_LOG_AND_PRINTK(KERN_ALERT
+ "memory size not matched: dram_sz: 0x%llx, "
+ "phone_dram_sz: 0x%llx\n",
+ (unsigned long long)dram_sz,
+ (unsigned long long)phone_dram_sz);
+ mtk_memcfg_late_warning(WARN_MEMSIZE_CONFLICT);
+ }
+#endif
+
+ /* print memory information */
+ MTK_MEMCFG_LOG_AND_PRINTK(KERN_ALERT
+ "[debug]available DRAM size = 0x%llx\n"
+ "[PHY layout]FB (dt) : 0x%llx - 0x%llx (0x%llx)\n",
+ (unsigned long long)kernel_mem_sz,
+ (unsigned long long)fb_base,
+ (unsigned long long)fb_base + fb_size - 1,
+ (unsigned long long)fb_size);
+
+ pr_alert("fb base: 0x%llx, size: 0x%llx\n",
+ (unsigned long long)DISP_GetVRamSizeBoot(NULL),
+ (unsigned long long)mtkfb_get_fb_base());
+
+ return node;
+}
+
+static int __init display_early_memory_info(void)
+{
+ int node;
+ /* system memory */
+ node = of_scan_flat_dt(dt_scan_memory, NULL);
+ return 0;
+}
+
+
+#endif /* end of CONFIG_OF */
+
+late_initcall(mtk_memcfg_late_init);
+late_initcall(mtk_memcfg_late_sanity_test);
+#ifdef CONFIG_OF
+pure_initcall(display_early_memory_info);
+#endif /* end of CONFIG_OF */
+
+
+#if 0 /* test code of of_reserve */
+/* test memory-reservd code */
+phys_addr_t test_base = 0;
+phys_addr_t test_size = 0;
+reservedmem_of_init_fn reserve_memory_test_fn(struct reserved_mem *rmem,
+ unsigned long node, const char *uname)
+{
+ pr_alert("%s, name: %s, uname: %s, base: 0x%llx, size: 0x%llx\n",
+ __func__, rmem->name, uname,
+ (unsigned long long)rmem->base,
+ (unsigned long long)rmem->size);
+ /* memblock_free(rmem->base, rmem->size); */
+ test_base = rmem->base;
+ test_size = rmem->size;
+
+ return 0;
+}
+
+static int __init init_test_reserve_memory(void)
+{
+ void *p = 0;
+ p = ioremap(test_base, (size_t)test_size);
+ if (p) {
+ pr_alert("%s:%d ioremap ok: %p\n", __func__, __LINE__,
+ p);
+ } else {
+ pr_alert("%s:%d ioremap failed\n", __func__, __LINE__);
+ }
+ return 0;
+}
+late_initcall(init_test_reserve_memory);
+
+reservedmem_of_init_fn mrdump_reserve_initfn(struct reserved_mem *rmem,
+ unsigned long node, const char *uname)
+{
+ pr_alert("%s, name: %s, uname: %s, base: 0x%llx, size: 0x%llx\n",
+ __func__, rmem->name, uname,
+ (unsigned long long)rmem->base,
+ (unsigned long long)rmem->size);
+
+ return 0;
+}
+
+RESERVEDMEM_OF_DECLARE(reserve_memory_test1, "reserve-memory-test",
+ reserve_memory_test_fn);
+RESERVEDMEM_OF_DECLARE(mrdump_reserved_memory, "mrdump-reserved-memory",
+ mrdump_reserve_initfn);
+#endif /* end of test code of of_reserve */
diff --git a/drivers/misc/mediatek/kernel/mtk_meminfo.c b/drivers/misc/mediatek/kernel/mtk_meminfo.c
new file mode 100644
index 000000000..917af97ec
--- /dev/null
+++ b/drivers/misc/mediatek/kernel/mtk_meminfo.c
@@ -0,0 +1,110 @@
+#include <asm/page.h>
+#include <asm/setup.h>
+#include <linux/module.h>
+#include <linux/of_fdt.h>
+#include <mach/mtk_memcfg.h>
+
+#ifdef CONFIG_OF
+/* return the actual physical DRAM size */
+static u64 kernel_mem_sz;
+static u64 phone_dram_sz; /* original phone DRAM size */
+static int dt_scan_memory(unsigned long node, const char *uname, int depth, void *data)
+{
+ char *type = of_get_flat_dt_prop(node, "device_type", NULL);
+ int i;
+ __be32 *reg, *endp;
+ unsigned long l;
+ dram_info_t *dram_info;
+
+ /* We are scanning "memory" nodes only */
+ if (type == NULL) {
+ /*
+ * The longtrail doesn't have a device_type on the
+ * /memory node, so look for the node called /memory@0.
+ */
+ if (depth != 1 || strcmp(uname, "memory@0") != 0)
+ return 0;
+ } else if (strcmp(type, "memory") != 0) {
+ return 0;
+ }
+
+ /*
+ * Use kernel_mem_sz if phone_dram_sz is not available (workaround)
+ * Projects use device tree should have orig_dram_info entry in their
+ * device tree.
+ * After the porting is done, kernel_mem_sz will be removed.
+ */
+ reg = of_get_flat_dt_prop(node, "reg", &l);
+ if (reg == NULL)
+ return 0;
+
+ endp = reg + (l / sizeof(__be32));
+ while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
+ u64 base, size;
+
+ base = dt_mem_next_cell(dt_root_addr_cells, &reg);
+ size = dt_mem_next_cell(dt_root_size_cells, &reg);
+
+ if (size == 0)
+ continue;
+
+ kernel_mem_sz += size;
+ }
+
+ /* orig_dram_info */
+ dram_info = (dram_info_t *)of_get_flat_dt_prop(node,
+ "orig_dram_info", NULL);
+ if (dram_info) {
+ for (i = 0; i < dram_info->rank_num; i++)
+ phone_dram_sz += dram_info->rank_info[i].size;
+ }
+
+ return node;
+}
+
+static int __init init_get_max_DRAM_size(void)
+{
+ if (!phone_dram_sz && !kernel_mem_sz) {
+ if (of_scan_flat_dt(dt_scan_memory, NULL)) {
+ pr_alert("init_get_max_DRAM_size done. phone_dram_sz: 0x%llx, kernel_mem_sz: 0x%llx\n",
+ (unsigned long long)phone_dram_sz,
+ (unsigned long long)kernel_mem_sz);
+ } else {
+ pr_err("init_get_max_DRAM_size fail\n");
+ BUG();
+ }
+ }
+ return 0;
+}
+
+phys_addr_t get_max_DRAM_size(void)
+{
+ if (!phone_dram_sz && !kernel_mem_sz)
+ init_get_max_DRAM_size();
+ return phone_dram_sz ? (phys_addr_t)phone_dram_sz : (phys_addr_t)kernel_mem_sz;
+}
+early_initcall(init_get_max_DRAM_size);
+#else
+extern phys_addr_t mtk_get_max_DRAM_size(void);
+phys_addr_t get_max_DRAM_size(void)
+{
+ return mtk_get_max_DRAM_size();
+}
+#endif /* end of CONFIG_OF */
+EXPORT_SYMBOL(get_max_DRAM_size);
+
+/*
+ * Return the DRAM size used by Linux kernel.
+ * In current stage, use phone DRAM size directly
+ */
+phys_addr_t get_memory_size(void)
+{
+ return get_max_DRAM_size();
+}
+EXPORT_SYMBOL(get_memory_size);
+
+phys_addr_t get_phys_offset(void)
+{
+ return PHYS_OFFSET;
+}
+EXPORT_SYMBOL(get_phys_offset);
diff --git a/drivers/misc/mediatek/kernel/mtk_trace.c b/drivers/misc/mediatek/kernel/mtk_trace.c
new file mode 100644
index 000000000..17bb45e52
--- /dev/null
+++ b/drivers/misc/mediatek/kernel/mtk_trace.c
@@ -0,0 +1,221 @@
+#include <linux/ring_buffer.h>
+#include "kernel/trace/trace.h"
+#include <linux/mtk_ftrace.h>
+
+#ifdef CONFIG_MTK_KERNEL_MARKER
+static unsigned long __read_mostly tracing_mark_write_addr = 0;
+static int kernel_marker_on = 0;
+
+static void inline update_tracing_mark_write_addr(void)
+{
+ if (unlikely(tracing_mark_write_addr == 0))
+ tracing_mark_write_addr = kallsyms_lookup_name("tracing_mark_write");
+}
+
+void inline mt_kernel_trace_begin(char *name)
+{
+ if (unlikely(kernel_marker_on) && name)
+ event_trace_printk(tracing_mark_write_addr, "B|%d|%s\n", current->tgid, name);
+}
+EXPORT_SYMBOL(mt_kernel_trace_begin);
+
+void inline mt_kernel_trace_counter(char *name, int count)
+{
+ if (unlikely(kernel_marker_on) && name)
+ event_trace_printk(tracing_mark_write_addr,
+ "C|%d|%s|%d\n", current->tgid, name, count);
+}
+EXPORT_SYMBOL(mt_kernel_trace_counter);
+
+void inline mt_kernel_trace_end(void)
+{
+ if (unlikely(kernel_marker_on))
+ event_trace_printk(tracing_mark_write_addr, "E\n");
+}
+EXPORT_SYMBOL(mt_kernel_trace_end);
+
+static ssize_t
+kernel_marker_on_simple_read(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ int r;
+
+ r = sprintf(buf, "%d\n", kernel_marker_on);
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+static ssize_t
+kernel_marker_on_simple_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
+ if (ret)
+ return ret;
+
+ kernel_marker_on = !!val;
+
+ if (kernel_marker_on)
+ update_tracing_mark_write_addr();
+
+ (*ppos)++;
+
+ return cnt;
+}
+static const struct file_operations kernel_marker_on_simple_fops = {
+ .open = tracing_open_generic,
+ .read = kernel_marker_on_simple_read,
+ .write = kernel_marker_on_simple_write,
+ .llseek = default_llseek,
+};
+
+static __init int init_kernel_marker(void)
+{
+ struct dentry *d_tracer;
+
+ d_tracer = tracing_init_dentry();
+ if (!d_tracer)
+ return 0;
+
+ trace_create_file("kernel_marker_on", 0644, d_tracer,
+ NULL, &kernel_marker_on_simple_fops);
+
+ return 0;
+}
+fs_initcall(init_kernel_marker);
+#endif
+
+#if defined(CONFIG_MTK_HIBERNATION) && defined(CONFIG_MTK_SCHED_TRACERS)
+int resize_ring_buffer_for_hibernation(int enable)
+{
+ int ret = 0;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
+ struct trace_array *tr = NULL;
+#endif
+
+ if (enable) {
+ ring_buffer_expanded = 0;
+ ret = tracing_update_buffers();
+ } else {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
+ ret = tracing_resize_ring_buffer(0, RING_BUFFER_ALL_CPUS);
+#else
+ tr = top_trace_array();
+ if (tr)
+ ret = tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
+#endif
+ }
+
+ return ret;
+}
+#endif
+
+#ifdef CONFIG_MTK_SCHED_TRACERS
+void print_enabled_events(struct seq_file *m)
+{
+
+ struct ftrace_event_call *call;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
+ struct ftrace_event_file *file;
+ struct trace_array *tr;
+#endif
+
+ seq_puts(m, "# enabled events:");
+ /* mutex_lock(&event_mutex); */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
+ list_for_each_entry(call, &ftrace_events, list) {
+ if (call->flags & TRACE_EVENT_FL_ENABLED)
+ seq_printf(m, " %s:%s", call->class->system, call->name);
+ }
+#else
+ list_for_each_entry(tr, &ftrace_trace_arrays, list) {
+ list_for_each_entry(file, &tr->events, list) {
+ call = file->event_call;
+ if (file->flags & FTRACE_EVENT_FL_ENABLED)
+ seq_printf(m, " %s:%s", call->class->system, call->name);
+ }
+ }
+#endif
+ /* mutex_unlock(&event_mutex); */
+ seq_puts(m, "\n");
+}
+
+/* ftrace's switch function for MTK solution */
+void mt_ftrace_enable_disable(int enable)
+{
+ if (enable) {
+ trace_set_clr_event(NULL, "sched_switch", 1);
+ trace_set_clr_event(NULL, "sched_wakeup", 1);
+ trace_set_clr_event(NULL, "sched_wakeup_new", 1);
+ trace_set_clr_event(NULL, "softirq_entry", 1);
+ trace_set_clr_event(NULL, "softirq_exit", 1);
+ trace_set_clr_event(NULL, "softirq_raise", 1);
+#ifdef CONFIG_SMP
+ trace_set_clr_event(NULL, "sched_migrate_task", 1);
+#endif
+ trace_set_clr_event(NULL, "workqueue_execute_start", 1);
+ trace_set_clr_event(NULL, "workqueue_execute_end", 1);
+
+ trace_set_clr_event(NULL, "block_bio_frontmerge", 1);
+ trace_set_clr_event(NULL, "block_bio_backmerge", 1);
+ trace_set_clr_event(NULL, "block_rq_issue", 1);
+ trace_set_clr_event(NULL, "block_rq_insert", 1);
+ trace_set_clr_event(NULL, "block_rq_complete", 1);
+ trace_set_clr_event(NULL, "debug_allocate_large_pages", 1);
+ trace_set_clr_event(NULL, "dump_allocate_large_pages", 1);
+
+
+ trace_set_clr_event("mtk_events", NULL, 1);
+ trace_set_clr_event("ipi", NULL, 1);
+
+ trace_set_clr_event("met_bio", NULL, 1);
+ trace_set_clr_event("met_fuse", NULL, 1);
+
+ tracing_on();
+ } else {
+ tracing_off();
+ trace_set_clr_event(NULL, NULL, 0);
+ }
+}
+#endif
+
+#if defined(CONFIG_MTK_SCHED_TRACERS) && defined(CONFIG_HOTPLUG_CPU)
+#include <linux/cpu.h>
+#include <trace/events/mtk_events.h>
+
+static DEFINE_PER_CPU(unsigned long long, last_event_ts);
+static struct notifier_block hotplug_event_notifier;
+
+static int hotplug_event_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ long cpu = (long)hcpu;
+ switch (action) {
+ case CPU_STARTING:
+ case CPU_STARTING_FROZEN:
+ trace_cpu_hotplug(cpu, 1, per_cpu(last_event_ts, cpu));
+ per_cpu(last_event_ts, cpu) = ns2usecs(ftrace_now(cpu));
+ break;
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ trace_cpu_hotplug(cpu, 0, per_cpu(last_event_ts, cpu));
+ per_cpu(last_event_ts, cpu) = ns2usecs(ftrace_now(cpu));
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static __init int hotplug_events_init(void)
+{
+ hotplug_event_notifier.notifier_call = hotplug_event_notify;
+ hotplug_event_notifier.priority = 0;
+ register_cpu_notifier(&hotplug_event_notifier);
+ return 0;
+}
+early_initcall(hotplug_events_init);
+#endif
diff --git a/drivers/misc/mediatek/kernel/sched/Makefile b/drivers/misc/mediatek/kernel/sched/Makefile
new file mode 100644
index 000000000..f40af42c0
--- /dev/null
+++ b/drivers/misc/mediatek/kernel/sched/Makefile
@@ -0,0 +1,13 @@
+LINUXINCLUDE += -include $(srctree)/kernel/sched/sched.h
+
+obj-$(CONFIG_MT_PRIO_TRACER) += prio_tracer.o
+
+#
+# RQ stats for TLP estimation
+#
+# For user space operation
+obj-$(CONFIG_MTK_SCHED_RQAVG_US) += rq_stats.o
+# For kernel space operation
+obj-$(CONFIG_MTK_SCHED_RQAVG_KS) += sched_avg.o
+# For query cpu topology
+obj-y += cputopo.o
diff --git a/drivers/misc/mediatek/kernel/sched/cputopo.c b/drivers/misc/mediatek/kernel/sched/cputopo.c
new file mode 100644
index 000000000..76e29facd
--- /dev/null
+++ b/drivers/misc/mediatek/kernel/sched/cputopo.c
@@ -0,0 +1,130 @@
+#include <linux/device.h>
+#include <linux/proc_fs.h>
+#include <linux/topology.h>
+
+#define MAX_LONG_SIZE 24
+
+struct kobject *cputopo_glb_kobj;
+
+/*
+ * nr_clusters attribute
+ */
+static ssize_t nr_clusters_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return snprintf(buf, MAX_LONG_SIZE, "%u\n", arch_get_nr_clusters());
+}
+static struct kobj_attribute nr_clusters_attr = __ATTR_RO(nr_clusters);
+
+/*
+ * is_big_little attribute
+ */
+static ssize_t is_big_little_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return snprintf(buf, MAX_LONG_SIZE, "%u\n", arch_is_big_little());
+}
+static struct kobj_attribute is_big_little_attr = __ATTR_RO(is_big_little);
+
+/*
+ * is_multi_cluster attribute
+ */
+static ssize_t is_multi_cluster_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return snprintf(buf, MAX_LONG_SIZE, "%u\n", arch_is_multi_cluster());
+}
+static struct kobj_attribute is_multi_cluster_attr = __ATTR_RO(is_multi_cluster);
+
+/*
+ * little_cpumask attribute
+ */
+static ssize_t little_cpumask_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct cpumask big, little;
+
+ arch_get_big_little_cpus(&big, &little);
+ return snprintf(buf, MAX_LONG_SIZE, "%02lx\n", *cpumask_bits(&little));
+}
+static struct kobj_attribute little_cpumask_attr = __ATTR_RO(little_cpumask);
+
+/*
+ * big_cpumask attribute
+ */
+static ssize_t big_cpumask_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct cpumask big, little;
+
+ arch_get_big_little_cpus(&big, &little);
+ return snprintf(buf, MAX_LONG_SIZE, "%02lx\n", *cpumask_bits(&big));
+}
+static struct kobj_attribute big_cpumask_attr = __ATTR_RO(big_cpumask);
+
+
+/*
+ * glbinfo attribute
+ */
+static ssize_t glbinfo_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ int len = 0;
+ struct cpumask big, little;
+
+ arch_get_big_little_cpus(&big, &little);
+ len += snprintf(buf + len, PAGE_SIZE - len - 1, "big/little arch: %s\n",
+ arch_is_big_little() ? "yes" : "no");
+ len += snprintf(buf + len, PAGE_SIZE - len - 1, "big/little cpumask:%0lx/%0lx\n",
+ *cpumask_bits(&big), *cpumask_bits(&little));
+ len += snprintf(buf + len, PAGE_SIZE - len - 1, "nr_cups: %u\n",
+ nr_cpu_ids);
+ len += snprintf(buf + len, PAGE_SIZE - len - 1, "nr_clusters: %u\n",
+ arch_get_nr_clusters());
+
+ return len;
+}
+static struct kobj_attribute glbinfo_attr = __ATTR_RO(glbinfo);
+
+
+
+static struct attribute *cputopo_attrs[] = {
+ &nr_clusters_attr.attr,
+ &is_big_little_attr.attr,
+ &is_multi_cluster_attr.attr,
+ &little_cpumask_attr.attr,
+ &big_cpumask_attr.attr,
+ &glbinfo_attr.attr,
+ NULL,
+};
+
+static struct attribute_group cputopo_attr_group = {
+ .attrs = cputopo_attrs,
+};
+
+static int init_cputopo_attribs(void)
+{
+ int err;
+
+ /* Create /sys/devices/system/cpu/cputopo/... */
+ cputopo_glb_kobj = kobject_create_and_add("cputopo", &cpu_subsys.dev_root->kobj);
+ if (!cputopo_glb_kobj)
+ return -ENOMEM;
+
+ err = sysfs_create_group(cputopo_glb_kobj, &cputopo_attr_group);
+ if (err)
+ kobject_put(cputopo_glb_kobj);
+
+ return err;
+}
+
+static int __init cputopo_info_init(void)
+{
+ int ret = 0;
+
+ ret = init_cputopo_attribs();
+
+ return ret;
+}
+
+core_initcall(cputopo_info_init);
diff --git a/drivers/misc/mediatek/kernel/sched/prio_tracer.c b/drivers/misc/mediatek/kernel/sched/prio_tracer.c
new file mode 100644
index 000000000..67a371f7d
--- /dev/null
+++ b/drivers/misc/mediatek/kernel/sched/prio_tracer.c
@@ -0,0 +1,518 @@
+/*
+ * abbreviation of "pts_" stands for Priority TracerS.
+ */
+#define PRIORITY_TRACER "v0.1"
+
+#ifdef CONFIG_MT_PRIO_TRACER
+
+#include <linux/types.h>
+#include <linux/prio_tracer.h>
+#include <linux/debugfs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/uaccess.h>
+
+DEFINE_SPINLOCK(pts_lock);
+
+static struct rb_root priority_tracers;
+static struct dentry *pts_debugfs_dir_root;
+static struct dentry *pts_debugfs_dir_proc;
+static const struct file_operations pts_proc_fops;
+static unsigned long pts_enable;
+
+struct prio_set {
+ int prio;
+ int policy;
+};
+
+struct prio_tracer {
+ pid_t tid;
+ struct rb_node rb_node;
+ struct dentry *debugfs_entry;
+
+ unsigned int count_usr;
+ unsigned int count_ker;
+ unsigned int change_usr;
+ unsigned int change_ker;
+ struct prio_set ps[4];
+ int prio_binder;
+};
+
+/*
+ * copy from kernel/kernel/sched/core.c
+ *
+ * __normal_prio - return the priority that is based on the static prio
+ */
+static inline int __normal_prio(struct task_struct *p)
+{
+ return p->static_prio;
+}
+
+/**
+ * select_set -
+ * tracer is designed to record the latest 2 priority setting from
+ * syscall and kernel respectively. the record will be saved like the
+ * followings. syscall uses the first 2, while kernel takes the rest 2.
+ *
+ * MUST hold pts_lock.
+ *
+ * count: 1 2 3 4 5 ...
+ * set: t1 t0 t1 t0 t1 ...
+ *
+ * | next | latest
+ * ---------------------
+ * odd | t0 | t1
+ * even | t1 | t0
+ *
+ * @next: 2 meanings while set. one is to find the next to overwrite.
+ * and, the other is to point to the second latest.
+ * @kernel: 0 for user, 1 for kernel. 2 for binder, but here we treat it
+ * as kernel does .
+ */
+static struct prio_set *select_set(struct prio_tracer *pt, int next, int kernel)
+{
+ int count = kernel ? pt->count_ker : pt->count_usr;
+ int i;
+
+ i = (((count % 2) == (!!next)) ? 0 : 1) + (kernel ? 2 : 0);
+ return &pt->ps[i];
+}
+
+/**
+ * query_prio_tracer -
+ * note: called under @pts_lock protected
+ */
+static struct prio_tracer *query_prio_tracer(pid_t tid)
+{
+ struct rb_node *n = priority_tracers.rb_node;
+ struct prio_tracer *pt;
+
+ while (n) {
+ pt = rb_entry(n, struct prio_tracer, rb_node);
+
+ if (tid < pt->tid)
+ n = n->rb_left;
+ else if (tid > pt->tid)
+ n = n->rb_right;
+ else {
+ return pt;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * update_prio_tracer -
+ * @prio: equivalent to prio of task structure.
+ */
+void update_prio_tracer(pid_t tid, int prio, int policy, int kernel)
+{
+ struct prio_tracer *pt;
+ struct prio_set *ps, *ps_latest;
+ unsigned long flags;
+
+ if (!pts_enable)
+ return;
+
+ spin_lock_irqsave(&pts_lock, flags);
+ pt = query_prio_tracer(tid);
+ if (!pt) {
+ spin_unlock_irqrestore(&pts_lock, flags);
+ return;
+ }
+
+ ps = select_set(pt, 1, kernel);
+ ps->prio = prio;
+ ps->policy = policy;
+
+ ps_latest = select_set(pt, 0, kernel);
+ if (ps_latest->prio != prio || ps_latest->policy != policy)
+ kernel ? pt->change_ker++ : pt->change_usr++;
+
+ kernel ? pt->count_ker++ : pt->count_usr++;
+
+ /* binder priority inherit */
+ if (kernel == PTS_BNDR)
+ pt->prio_binder = prio;
+ spin_unlock_irqrestore(&pts_lock, flags);
+}
+
+void create_prio_tracer(pid_t tid)
+{
+ struct rb_node **p = &priority_tracers.rb_node;
+ struct rb_node *parent = NULL;
+ struct prio_tracer *new_pt, *pt;
+ struct dentry *d = NULL;
+ unsigned long flags;
+ int i;
+
+ new_pt = kzalloc(sizeof(struct prio_tracer), GFP_KERNEL);
+ if (!new_pt) {
+ pr_err("%s: alloc failed\n", __func__);
+ return;
+ }
+
+ if (pts_debugfs_dir_proc) {
+ char strbuf[11];
+ snprintf(strbuf, sizeof(strbuf), "%u", tid);
+ /* debugfs involves mutex... */
+ d = debugfs_create_file(strbuf,
+ S_IRUGO, pts_debugfs_dir_proc, new_pt, &pts_proc_fops);
+ }
+
+ spin_lock_irqsave(&pts_lock, flags);
+ while (*p) {
+ parent = *p;
+ pt = rb_entry(parent, struct prio_tracer, rb_node);
+
+ if (tid < pt->tid)
+ p = &(*p)->rb_left;
+ else if (tid > pt->tid)
+ p = &(*p)->rb_right;
+ else {
+ spin_unlock_irqrestore(&pts_lock, flags);
+ debugfs_remove(d);
+ kfree(new_pt);
+ pr_debug("%s: find same pid\n", __func__);
+ return;
+ }
+ }
+
+ new_pt->tid = tid;
+ for (i = 0; i < 4; i++)
+ new_pt->ps[i].policy = -1;
+ new_pt->prio_binder = PTS_DEFAULT_PRIO;
+ new_pt->debugfs_entry = d;
+
+ rb_link_node(&new_pt->rb_node, parent, p);
+ rb_insert_color(&new_pt->rb_node, &priority_tracers);
+ spin_unlock_irqrestore(&pts_lock, flags);
+}
+
+void delete_prio_tracer(pid_t tid)
+{
+ struct prio_tracer *pt;
+ struct dentry *d;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pts_lock, flags);
+ pt = query_prio_tracer(tid);
+ if (!pt) {
+ spin_unlock_irqrestore(&pts_lock, flags);
+ return;
+ }
+ d = pt->debugfs_entry;
+ spin_unlock_irqrestore(&pts_lock, flags);
+
+ /* debugfs involves mutex... */
+ debugfs_remove(d);
+
+ spin_lock_irqsave(&pts_lock, flags);
+ rb_erase(&pt->rb_node, &priority_tracers);
+ kfree(pt);
+ spin_unlock_irqrestore(&pts_lock, flags);
+}
+
+void set_user_nice_syscall(struct task_struct *p, long nice)
+{
+ set_user_nice_core(p, nice);
+ update_prio_tracer(task_pid_nr(p), NICE_TO_PRIO(nice), 0, PTS_USER);
+}
+
+void set_user_nice_binder(struct task_struct *p, long nice)
+{
+ set_user_nice_core(p, nice);
+ update_prio_tracer(task_pid_nr(p), NICE_TO_PRIO(nice), 0, PTS_BNDR);
+}
+
+int sched_setscheduler_syscall(struct task_struct *p, int policy, const struct sched_param *param)
+{
+ int retval;
+
+ retval = sched_setscheduler_core(p, policy, param);
+ if (!retval) {
+ int prio = param->sched_priority & ~MT_ALLOW_RT_PRIO_BIT;
+ if (!rt_policy(policy))
+ prio = __normal_prio(p);
+ else
+ prio = MAX_RT_PRIO - 1 - prio;
+ update_prio_tracer(task_pid_nr(p), prio, policy, PTS_USER);
+ }
+ return retval;
+}
+
+int sched_setscheduler_nocheck_binder(struct task_struct *p, int policy,
+ const struct sched_param *param)
+{
+ int retval;
+
+ retval = sched_setscheduler_nocheck_core(p, policy, param);
+ if (!retval) {
+ int prio = param->sched_priority & ~MT_ALLOW_RT_PRIO_BIT;
+ if (!rt_policy(policy))
+ prio = __normal_prio(p);
+ else
+ prio = MAX_RT_PRIO - 1 - prio;
+ update_prio_tracer(task_pid_nr(p), prio, policy, PTS_BNDR);
+ }
+ return retval;
+}
+
+static void pts_proc_print(struct seq_file *m, struct prio_set *ps)
+{
+ int prio = ps->prio;
+
+ if (ps->policy == -1) {
+ seq_puts(m, "0 0 0 -1 ");
+ return;
+ }
+
+ if (rt_prio(prio))
+ seq_printf(m, "%d %d %d", (prio - MAX_RT_PRIO), 0, (MAX_RT_PRIO - 1 - prio));
+ else
+ seq_printf(m, "%d %d %d", USER_PRIO(prio), PRIO_TO_NICE(prio), 0);
+ seq_printf(m, " %d ", ps->policy);
+}
+
+static int pts_proc_show(struct seq_file *m, void *unused)
+{
+ struct prio_tracer *pt = m->private;
+ struct prio_set ps_copy[4];
+ unsigned int count_usr, count_ker;
+ unsigned int change_usr, change_ker;
+ int prio_binder;
+ int i, j;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pts_lock, flags);
+ count_usr = pt->count_usr;
+ count_ker = pt->count_ker;
+ change_usr = pt->change_usr;
+ change_ker = pt->change_ker;
+ prio_binder = pt->prio_binder;
+
+ for (i = 0; i < 2; i++) {
+ for (j = 0; j < 2; j++) {
+ memcpy(&ps_copy[((i * 2) + j)], select_set(pt, j, i),
+ sizeof(struct prio_set));
+ }
+ }
+ spin_unlock_irqrestore(&pts_lock, flags);
+
+ seq_printf(m, "%u %u ", count_usr, change_usr);
+ for (i = 0; i < 2; i++)
+ pts_proc_print(m, &ps_copy[i]);
+
+ seq_printf(m, " %u %u ", count_ker, change_ker);
+ for (i = 2; i < 4; i++)
+ pts_proc_print(m, &ps_copy[i]);
+
+ if (prio_binder != PTS_DEFAULT_PRIO) {
+ int tmp = prio_binder;
+ prio_binder = rt_prio(tmp) ? (tmp - MAX_RT_PRIO) : USER_PRIO(tmp);
+ }
+ seq_printf(m, " %d\n", prio_binder);
+ return 0;
+}
+
+static int pts_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pts_proc_show, inode->i_private);
+}
+
+static const struct file_operations pts_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = pts_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int pts_enable_show(struct seq_file *m, void *unused)
+{
+ seq_printf(m, "%lu\n", pts_enable);
+ return 0;
+}
+
+static int pts_enable_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pts_enable_show, inode->i_private);
+}
+
+static ssize_t pts_enable_write(struct file *flip, const char *ubuf, size_t cnt, loff_t *data)
+{
+ char buf[32];
+ size_t copy_size = cnt;
+ unsigned long val;
+ int ret;
+
+ if (cnt >= sizeof(buf))
+ copy_size = 32 - 1;
+ buf[copy_size] = '\0';
+
+ if (copy_from_user(&buf, ubuf, copy_size))
+ return -EFAULT;
+
+ ret = strict_strtoul(buf, 10, &val);
+ pts_enable = !!val;
+ pr_debug("%s: set %s", __func__, pts_enable ? "enable" : "disable");
+ return cnt;
+}
+
+static const struct file_operations pts_enable_fops = {
+ .owner = THIS_MODULE,
+ .open = pts_enable_open,
+ .read = seq_read,
+ .write = pts_enable_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int pts_utest_show(struct seq_file *m, void *unused)
+{
+ seq_printf(m, "usage: echo $type $prio $tid\n"
+ " $type 0 user / 1 kernel / 2 binder\n"
+ " $prio\n"
+ " H L\n"
+ " rt |<----------->|\n"
+ " 0 98\n"
+ " ((RT) 99 1)\n\n"
+ " H L\n"
+ " normal |<-------->|\n"
+ " 100 139\n"
+ " ((nice) -20 19)\n");
+ return 0;
+}
+
+static int pts_utest_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pts_utest_show, inode->i_private);
+}
+
+/* # echo @ut_type @ut_prio @ut_tid > utest */
+static ssize_t pts_utest_write(struct file *flip, const char *ubuf, size_t cnt, loff_t *data)
+{
+ char buf[32];
+ size_t copy_size = cnt;
+ unsigned long val;
+ int ut_type, ut_tid, ut_prio;
+ int ret, i = 0, j;
+ struct task_struct *p;
+
+
+ if (cnt >= sizeof(buf))
+ copy_size = 32 - 1;
+ buf[copy_size] = '\0';
+
+ if (copy_from_user(&buf, ubuf, copy_size))
+ return -EFAULT;
+
+ do {
+ } while (buf[i++] != ' ');
+ buf[(i - 1)] = '\0';
+ ret = strict_strtoul(buf, 10, &val);
+ ut_type = (int)val;
+
+ j = i;
+ do {
+ } while (buf[i++] != ' ');
+ buf[(i - 1)] = '\0';
+ ret = strict_strtoul((const char *)(&buf[j]), 10, &val);
+ ut_prio = (int)val;
+
+ ret = strict_strtoul((const char *)(&buf[i]), 10, &val);
+ ut_tid = (int)val;
+
+ pr_debug("%s: unit test %s tid %d prio %d j %d i %d", __func__,
+ (ut_type == PTS_USER) ? "user" :
+ ((ut_type == PTS_KRNL) ? "kernel" :
+ ((ut_type == PTS_BNDR) ? "binder" : "unknown")), ut_tid, ut_prio, j, i);
+
+
+ /* start to test api */
+ p = find_task_by_vpid(ut_tid);
+ if (!p)
+ goto utest_out;
+
+ if ((ut_prio >= 0) && (ut_prio < MAX_RT_PRIO)) {
+ struct sched_param param;
+
+ /* sched_priority is rt priority rather than effective one */
+ ut_prio = MAX_RT_PRIO - 1 - ut_prio;
+ param.sched_priority = ut_prio | MT_ALLOW_RT_PRIO_BIT;
+
+ switch (ut_type) {
+ case PTS_USER:
+ sched_setscheduler_syscall(p, SCHED_RR, &param);
+ break;
+ case PTS_KRNL:
+ sched_setscheduler_nocheck(p, SCHED_RR, &param);
+ break;
+ case PTS_BNDR:
+ sched_setscheduler_nocheck_binder(p, SCHED_RR, &param);
+ break;
+ default:
+ break;
+ }
+ } else { /* assume normal */
+ switch (ut_type) {
+ case PTS_USER:
+ set_user_nice_syscall(p, PRIO_TO_NICE(ut_prio));
+ break;
+ case PTS_KRNL:
+ set_user_nice(p, PRIO_TO_NICE(ut_prio));
+ break;
+ case PTS_BNDR:
+ set_user_nice_binder(p, PRIO_TO_NICE(ut_prio));
+ break;
+ default:
+ break;
+ }
+ }
+
+ utest_out:
+ return cnt;
+}
+
+static const struct file_operations pts_utest_fops = {
+ .owner = THIS_MODULE,
+ .open = pts_utest_open,
+ .read = seq_read,
+ .write = pts_utest_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init prio_tracer_init(void)
+{
+ pts_debugfs_dir_root = debugfs_create_dir("prio_tracer", NULL);
+
+ if (pts_debugfs_dir_root) {
+ pts_debugfs_dir_proc = debugfs_create_dir("proc", pts_debugfs_dir_root);
+
+ debugfs_create_file("enable",
+ (S_IRUGO | S_IWUSR | S_IWGRP),
+ pts_debugfs_dir_root, NULL, &pts_enable_fops);
+
+ debugfs_create_file("utest",
+ (S_IRUGO | S_IWUSR | S_IWGRP),
+ pts_debugfs_dir_root, NULL, &pts_utest_fops);
+ }
+
+ /* if built-in, default on */
+ pts_enable = 1;
+ return 0;
+}
+device_initcall(prio_tracer_init);
+
+MODULE_LICENSE("GPL v2");
+
+#endif
diff --git a/drivers/misc/mediatek/kernel/sched/rq_stats.c b/drivers/misc/mediatek/kernel/sched/rq_stats.c
new file mode 100644
index 000000000..0ba011825
--- /dev/null
+++ b/drivers/misc/mediatek/kernel/sched/rq_stats.c
@@ -0,0 +1,712 @@
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/hrtimer.h>
+#include <linux/cpu.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+#include <linux/notifier.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/rq_stats.h>
+#include <linux/cpufreq.h>
+#include <linux/kernel_stat.h>
+#include <linux/tick.h>
+#include <linux/suspend.h>
+#include <linux/version.h>
+#include <asm/smp_plat.h>
+
+#include <trace/events/sched.h>
+
+#define MAX_LONG_SIZE 24
+#define DEFAULT_RQ_POLL_JIFFIES 1
+#define DEFAULT_DEF_TIMER_JIFFIES 5
+#define CPU_FREQ_VARIANT 0
+#ifdef CONFIG_SCHED_HMP_PRIO_FILTER
+static unsigned int heavy_task_prio = NICE_TO_PRIO(CONFIG_SCHED_HMP_PRIO_FILTER_VAL);
+#define task_low_priority(prio) ((prio >= heavy_task_prio)?1:0)
+#endif
+
+//struct notifier_block freq_policy;
+struct notifier_block freq_transition;
+struct notifier_block cpu_hotplug;
+static unsigned int heavy_task_threshold = 650; // max=1023
+
+struct cpu_load_data {
+ cputime64_t prev_cpu_idle;
+ cputime64_t prev_cpu_wall;
+ cputime64_t prev_cpu_iowait;
+ unsigned int avg_load_maxfreq;
+ unsigned int samples;
+ unsigned int window_size;
+ unsigned int cur_freq;
+ unsigned int policy_max;
+ cpumask_var_t related_cpus;
+ spinlock_t cpu_load_lock;
+};
+
+static DEFINE_PER_CPU(struct cpu_load_data, cpuload);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0))
+#define RQSTATS_USE_CPU_IDLE_INTERNAL 1
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)) */
+
+#if defined(RQSTATS_USE_CPU_IDLE_INTERNAL) || !defined(CONFIG_CPU_FREQ)
+static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
+{
+ u64 idle_time;
+ u64 cur_wall_time;
+ u64 busy_time;
+
+ cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
+
+ busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
+
+ idle_time = cur_wall_time - busy_time;
+ if (wall)
+ *wall = cputime_to_usecs(cur_wall_time);
+
+ return cputime_to_usecs(idle_time);
+}
+
+static inline cputime64_t get_cpu_idle_time_internal(unsigned int cpu, cputime64_t *wall)
+{
+ u64 idle_time = get_cpu_idle_time_us(cpu, NULL);
+
+ if (idle_time == -1ULL)
+ return get_cpu_idle_time_jiffy(cpu, wall);
+ else
+ idle_time += get_cpu_iowait_time_us(cpu, wall);
+
+ return idle_time;
+}
+#else /* !RQSTATS_USE_CPU_IDLE_INTERNAL && CONFIG_CPU_FREQ */
+extern u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
+#endif /* RQSTATS_USE_CPU_IDLE_INTERNAL || !CONFIG_CPU_FREQ */
+
+static inline cputime64_t get_cpu_iowait_time(unsigned int cpu,
+ cputime64_t *wall)
+{
+ u64 iowait_time = get_cpu_iowait_time_us(cpu, wall);
+
+ if (iowait_time == -1ULL)
+ return 0;
+
+ return iowait_time;
+}
+
+static int update_average_load(unsigned int freq, unsigned int cpu, bool use_maxfreq)
+{
+
+ struct cpu_load_data *pcpu = &per_cpu(cpuload, cpu);
+ cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;
+ unsigned int idle_time, wall_time, iowait_time;
+ unsigned int cur_load, load_at_max_freq;
+
+#if defined(RQSTATS_USE_CPU_IDLE_INTERNAL) || !defined(CONFIG_CPU_FREQ)
+ cur_idle_time = get_cpu_idle_time_internal(cpu, &cur_wall_time);
+#else /* !RQSTATS_USE_CPU_IDLE_INTERNAL && CONFIG_CPU_FREQ */
+ cur_idle_time = get_cpu_idle_time(cpu, &cur_wall_time, 0);
+#endif /* RQSTATS_USE_CPU_IDLE_INTERNAL || !CONFIG_CPU_FREQ */
+ cur_iowait_time = get_cpu_iowait_time(cpu, &cur_wall_time);
+
+ wall_time = (unsigned int) (cur_wall_time - pcpu->prev_cpu_wall);
+ pcpu->prev_cpu_wall = cur_wall_time;
+
+ idle_time = (unsigned int) (cur_idle_time - pcpu->prev_cpu_idle);
+ pcpu->prev_cpu_idle = cur_idle_time;
+
+ iowait_time = (unsigned int) (cur_iowait_time - pcpu->prev_cpu_iowait);
+ pcpu->prev_cpu_iowait = cur_iowait_time;
+
+ if (idle_time >= iowait_time)
+ idle_time -= iowait_time;
+
+ if (unlikely(!wall_time || wall_time < idle_time))
+ return 0;
+
+ if (freq)
+ cur_load = 100 * (wall_time - idle_time) / wall_time;
+ else
+ cur_load = 0;
+
+ /* Calculate the scaled load across CPU */
+ if (cpu_online(cpu))
+ {
+ if (use_maxfreq)
+ load_at_max_freq = (cur_load * freq) / pcpu->policy_max;
+ else
+ load_at_max_freq = cur_load;
+ }
+ else
+ load_at_max_freq = 0;
+
+#if 1
+ if (!pcpu->avg_load_maxfreq) {
+ /* This is the first sample in this window*/
+ pcpu->avg_load_maxfreq = load_at_max_freq;
+ pcpu->window_size = wall_time;
+ } else {
+ /*
+ * The is already a sample available in this window.
+ * Compute weighted average with prev entry, so that we get
+ * the precise weighted load.
+ */
+ pcpu->avg_load_maxfreq =
+ ((pcpu->avg_load_maxfreq * pcpu->window_size) +
+ (load_at_max_freq * wall_time)) /
+ (wall_time + pcpu->window_size);
+
+ pcpu->window_size += wall_time;
+ }
+#else // debug
+ pcpu->avg_load_maxfreq = load_at_max_freq;
+ pcpu->window_size = wall_time;
+#endif
+
+ return 0;
+}
+
+#if 0
+static unsigned int report_load_at_max_freq(bool reset)
+{
+ int cpu;
+ struct cpu_load_data *pcpu;
+ unsigned int total_load = 0;
+ unsigned long flags;
+
+ for_each_online_cpu(cpu) {
+ pcpu = &per_cpu(cpuload, cpu);
+ spin_lock_irqsave(&pcpu->cpu_load_lock, flags);
+ update_average_load(pcpu->cur_freq, cpu, 0);
+ total_load += pcpu->avg_load_maxfreq;
+ if (reset)
+ pcpu->avg_load_maxfreq = 0;
+ spin_unlock_irqrestore(&pcpu->cpu_load_lock, flags);
+ }
+ return total_load;
+}
+#endif
+
+unsigned int sched_get_percpu_load(int cpu, bool reset, bool use_maxfreq)
+{
+ struct cpu_load_data *pcpu;
+ unsigned int load = 0;
+ unsigned long flags;
+
+#if 0
+ if (!cpu_online(cpu))
+ return 0;
+#endif
+
+ if (rq_info.init != 1)
+ return 100;
+
+ pcpu = &per_cpu(cpuload, cpu);
+ spin_lock_irqsave(&pcpu->cpu_load_lock, flags);
+ update_average_load(pcpu->cur_freq, cpu, use_maxfreq);
+ load = pcpu->avg_load_maxfreq;
+ if (reset)
+ pcpu->avg_load_maxfreq = 0;
+ spin_unlock_irqrestore(&pcpu->cpu_load_lock, flags);
+
+ return load;
+}
+EXPORT_SYMBOL(sched_get_percpu_load);
+
+#define HMP_RATIO 10/17
+//#define DETECT_HTASK_HEAT
+
+#ifdef DETECT_HTASK_HEAT
+#define MAX_HTASK_TEMPERATURE 10
+static unsigned int htask_temperature = 0;
+static void __heat_refined(int *count)
+{
+ if (arch_is_big_little()) {
+ if (*count) {
+ htask_temperature += (htask_temperature < MAX_HTASK_TEMPERATURE) ? 1 : 0;
+ } else {
+ *count = (htask_temperature > 0) ? 1 : 0;
+ htask_temperature -= (htask_temperature > 0) ? 1 : 0;
+ }
+ }
+}
+#else
+static inline void __heat_refined(int *count) {}
+#endif
+
+static void __trace_out(int heavy, int cpu, struct task_struct *p)
+{
+#define TRACEBUF_LEN 128
+ char tracebuf[TRACEBUF_LEN];
+
+#ifdef CONFIG_ARCH_SCALE_INVARIANT_CPU_CAPACITY
+ snprintf(tracebuf, TRACEBUF_LEN, " %s cpu=%d load=%4lu cpucap=%4lu/%4lu pid=%4d name=%s",
+ heavy ? "Y" : "N",
+ cpu, p->se.avg.load_avg_ratio,
+ topology_cpu_capacity(cpu), topology_max_cpu_capacity(cpu),
+ p->pid, p->comm);
+#else
+ snprintf(tracebuf, TRACEBUF_LEN, " %s cpu=%d load=%4lu pid=%4d name=%s",
+ heavy ? "Y" : "N",
+ cpu, p->se.avg.load_avg_ratio,
+ p->pid, p->comm);
+#endif
+ trace_sched_heavy_task(tracebuf);
+
+ if (unlikely(heavy))
+ trace_sched_task_entity_avg(5, p, &p->se.avg);
+}
+
+static unsigned int htask_statistic = 0;
+#ifdef CONFIG_ARCH_SCALE_INVARIANT_CPU_CAPACITY
+#define OVER_L_TH(cpu) ((topology_cpu_capacity(cpu) >= topology_max_cpu_capacity(cpu)) ? 1:0)
+#define OVER_B_TH(cpu) ((topology_cpu_capacity(cpu)*8 > topology_max_cpu_capacity(cpu)*5) ? 1:0)
+#else
+#define OVER_L_TH(cpu) (1)
+#define OVER_B_TH(cpu) (1)
+#endif
+unsigned int sched_get_nr_heavy_task_by_threshold(unsigned int threshold)
+{
+ int cpu;
+ struct task_struct *p;
+ unsigned long flags;
+ unsigned int count = 0;
+ int is_heavy = 0;
+ unsigned int hmp_threshold;
+
+ if (rq_info.init != 1)
+ return 0;
+
+ for_each_online_cpu(cpu) {
+ int bigcore = arch_cpu_is_big(cpu);
+ hmp_threshold = bigcore ? threshold * HMP_RATIO : threshold;
+ raw_spin_lock_irqsave(&cpu_rq(cpu)->lock, flags);
+ list_for_each_entry(p, &cpu_rq(cpu)->cfs_tasks, se.group_node) {
+ is_heavy = 0;
+#ifdef CONFIG_SCHED_HMP_PRIO_FILTER
+ if (task_low_priority(p->prio))
+ continue;
+#endif
+ if (p->se.avg.load_avg_ratio >= hmp_threshold) {
+ is_heavy = (!bigcore && OVER_L_TH(cpu)) || (bigcore && OVER_B_TH(cpu));
+ }
+ count += is_heavy ? 1 : 0;
+ __trace_out(is_heavy, cpu, p);
+ }
+ raw_spin_unlock_irqrestore(&cpu_rq(cpu)->lock, flags);
+ }
+
+ __heat_refined(&count);
+ if (count)
+ htask_statistic++;
+ return count;
+}
+EXPORT_SYMBOL(sched_get_nr_heavy_task_by_threshold);
+
+unsigned int sched_get_nr_heavy_task(void)
+{
+ return sched_get_nr_heavy_task_by_threshold(heavy_task_threshold);
+}
+EXPORT_SYMBOL(sched_get_nr_heavy_task);
+
+void sched_set_heavy_task_threshold(unsigned int val)
+{
+ heavy_task_threshold = val;
+}
+EXPORT_SYMBOL(sched_set_heavy_task_threshold);
+
+#if 0
+static int cpufreq_policy_handler(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ int cpu = 0;
+ struct cpufreq_policy *policy = data;
+ struct cpu_load_data *this_cpu;
+
+ if (event == CPUFREQ_START)
+ return 0;
+
+ if (event != CPUFREQ_INCOMPATIBLE)
+ return 0;
+
+ /* Make sure that all CPUs impacted by this policy are
+ * updated since we will only get a notification when the
+ * user explicitly changes the policy on a CPU.
+ */
+ for_each_cpu(cpu, policy->cpus) {
+ struct cpu_load_data *pcpu = &per_cpu(cpuload, j);
+ spin_lock_irqsave(&pcpu->cpu_load_lock, flags);
+ pcpu->policy_max = policy->cpuinfo.max_freq;
+ spin_unlock_irqrestore(&pcpu->cpu_load_lock, flags);
+ }
+
+ return 0;
+}
+#endif
+
+static int cpufreq_transition_handler(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+#if 1
+ struct cpufreq_freqs *freqs = data;
+ struct cpu_load_data *this_cpu = &per_cpu(cpuload, freqs->cpu);
+ int j;
+ unsigned long flags;
+
+ if (rq_info.init != 1)
+ return 0;
+
+ switch (val) {
+ case CPUFREQ_POSTCHANGE:
+ for_each_cpu(j, this_cpu->related_cpus) {
+ struct cpu_load_data *pcpu = &per_cpu(cpuload, j);
+ // flush previous laod
+ spin_lock_irqsave(&pcpu->cpu_load_lock, flags);
+ if (cpu_online(j))
+ update_average_load(freqs->old, freqs->cpu, 0);
+ pcpu->cur_freq = freqs->new;
+ spin_unlock_irqrestore(&pcpu->cpu_load_lock, flags);
+ }
+ break;
+ }
+#endif
+ return 0;
+}
+
+static int cpu_hotplug_handler(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+#if 1
+ unsigned int cpu = (unsigned long)data;
+ struct cpu_load_data *this_cpu = &per_cpu(cpuload, cpu);
+ unsigned long flags;
+
+ if (rq_info.init != 1)
+ return NOTIFY_OK;
+
+ switch (val) {
+ case CPU_UP_PREPARE:
+ // cpu_online()=0 here, count cpu offline period as idle
+ spin_lock_irqsave(&this_cpu->cpu_load_lock, flags);
+ update_average_load(0, cpu, 0);
+ spin_unlock_irqrestore(&this_cpu->cpu_load_lock, flags);
+ break;
+ case CPU_DOWN_PREPARE:
+ // cpu_online()=1 here, flush previous load
+ spin_lock_irqsave(&this_cpu->cpu_load_lock, flags);
+ update_average_load(this_cpu->cur_freq, cpu, 0);
+ spin_unlock_irqrestore(&this_cpu->cpu_load_lock, flags);
+ break;
+ }
+#endif
+ return NOTIFY_OK;
+}
+
+static int system_suspend_handler(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ switch (val) {
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ case PM_POST_RESTORE:
+ rq_info.hotplug_disabled = 0;
+ break;
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ rq_info.hotplug_disabled = 1;
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+ return NOTIFY_OK;
+}
+
+
+static ssize_t hotplug_disable_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ unsigned int val = 0;
+ val = rq_info.hotplug_disabled;
+ return snprintf(buf, MAX_LONG_SIZE, "%d\n", val);
+}
+
+static struct kobj_attribute hotplug_disabled_attr = __ATTR_RO(hotplug_disable);
+
+#ifdef CONFIG_MTK_SCHED_RQAVG_US_ENABLE_WQ
+static void def_work_fn(struct work_struct *work)
+{
+ int64_t diff;
+
+ diff = ktime_to_ns(ktime_get()) - rq_info.def_start_time;
+ do_div(diff, 1000 * 1000);
+ rq_info.def_interval = (unsigned int) diff;
+
+ /* Notify polling threads on change of value */
+ sysfs_notify(rq_info.kobj, NULL, "def_timer_ms");
+}
+#endif
+
+static ssize_t run_queue_avg_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ unsigned int val = 0;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&rq_lock, flags);
+ /* rq avg currently available only on one core */
+ val = rq_info.rq_avg;
+ rq_info.rq_avg = 0;
+ spin_unlock_irqrestore(&rq_lock, flags);
+
+ return snprintf(buf, PAGE_SIZE, "%d.%d\n", val/10, val%10);
+}
+
+static struct kobj_attribute run_queue_avg_attr = __ATTR_RO(run_queue_avg);
+
+static ssize_t show_run_queue_poll_ms(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ int ret = 0;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&rq_lock, flags);
+ ret = snprintf(buf, MAX_LONG_SIZE, "%u\n",
+ jiffies_to_msecs(rq_info.rq_poll_jiffies));
+ spin_unlock_irqrestore(&rq_lock, flags);
+
+ return ret;
+}
+
+static ssize_t store_run_queue_poll_ms(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int val = 0;
+ unsigned long flags = 0;
+ static DEFINE_MUTEX(lock_poll_ms);
+
+ mutex_lock(&lock_poll_ms);
+
+ spin_lock_irqsave(&rq_lock, flags);
+ sscanf(buf, "%u", &val);
+ rq_info.rq_poll_jiffies = msecs_to_jiffies(val);
+ spin_unlock_irqrestore(&rq_lock, flags);
+
+ mutex_unlock(&lock_poll_ms);
+
+ return count;
+}
+
+static struct kobj_attribute run_queue_poll_ms_attr =
+ __ATTR(run_queue_poll_ms, S_IWUSR | S_IRUSR, show_run_queue_poll_ms,
+ store_run_queue_poll_ms);
+
+static ssize_t show_def_timer_ms(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return snprintf(buf, MAX_LONG_SIZE, "%u\n", rq_info.def_interval);
+}
+
+static ssize_t store_def_timer_ms(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ unsigned int val = 0;
+
+ sscanf(buf, "%u", &val);
+ rq_info.def_timer_jiffies = msecs_to_jiffies(val);
+
+ rq_info.def_start_time = ktime_to_ns(ktime_get());
+ return count;
+}
+
+static ssize_t store_heavy_task_threshold(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ unsigned int val = 0;
+
+ sscanf(buf, "%u", &val);
+ sched_set_heavy_task_threshold(val);
+
+ return count;
+}
+
+static struct kobj_attribute def_timer_ms_attr =
+ __ATTR(def_timer_ms, S_IWUSR | S_IRUSR, show_def_timer_ms,
+ store_def_timer_ms);
+
+static ssize_t show_cpu_normalized_load(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ int cpu;
+ unsigned int len = 0;
+ unsigned int load = 0;
+ unsigned int max_len = 4096;
+
+ //len = snprintf(buf, MAX_LONG_SIZE, "%u\n", report_load_at_max_freq(0));
+ for_each_possible_cpu(cpu) {
+ // reset cpu load
+ //load = sched_get_percpu_load(cpu, 1, 0);
+ // not reset
+ load = sched_get_percpu_load(cpu, 0, 0);
+ len += snprintf(buf+len, max_len-len, "cpu(%d)=%d\n", cpu, load);
+#if 0
+ unsigned int idle_time, wall_time, iowait_time;
+ struct cpu_load_data *pcpu = &per_cpu(cpuload, cpu);
+
+ idle_time = get_cpu_idle_time(cpu, &wall_time, 0);
+ iowait_time = get_cpu_iowait_time(cpu, &wall_time);
+ len += snprintf(buf+len, max_len-len, "curr idle=%u, io=%u, wall=%u\n",
+ (unsigned int)idle_time,
+ (unsigned int)iowait_time,
+ (unsigned int)wall_time);
+ len += snprintf(buf+len, max_len-len, "prev idle=%u, io=%u, wall=%u, l=%u, w=%u, f=%u m=%u, %s\n",
+ (unsigned int)pcpu->prev_cpu_idle,
+ (unsigned int)pcpu->prev_cpu_iowait,
+ (unsigned int)pcpu->prev_cpu_wall,
+ pcpu->avg_load_maxfreq,
+ pcpu->window_size,
+ pcpu->cur_freq,
+ pcpu->policy_max,
+ (unsigned int)(cpu_online(cpu))?"on":"off");
+#endif
+ }
+ len += snprintf(buf+len, max_len-len, "htask_threshold=%d, current_htask#=%u, total_htask#=%d\n",
+ heavy_task_threshold, sched_get_nr_heavy_task(), htask_statistic);
+
+ return len;
+}
+
+static struct kobj_attribute cpu_normalized_load_attr =
+ __ATTR(cpu_normalized_load, S_IWUSR | S_IRUSR, show_cpu_normalized_load,
+ store_heavy_task_threshold);
+
+static struct attribute *rq_attrs[] = {
+ &cpu_normalized_load_attr.attr,
+ &def_timer_ms_attr.attr,
+ &run_queue_avg_attr.attr,
+ &run_queue_poll_ms_attr.attr,
+ &hotplug_disabled_attr.attr,
+ NULL,
+};
+
+static struct attribute_group rq_attr_group = {
+ .attrs = rq_attrs,
+};
+
+static int init_rq_attribs(void)
+{
+ int err;
+
+ rq_info.rq_avg = 0;
+ rq_info.attr_group = &rq_attr_group;
+
+ /* Create /sys/devices/system/cpu/cpu0/rq-stats/... */
+ rq_info.kobj = kobject_create_and_add("rq-stats",
+ &get_cpu_device(0)->kobj);
+ if (!rq_info.kobj)
+ return -ENOMEM;
+
+ err = sysfs_create_group(rq_info.kobj, rq_info.attr_group);
+ if (err)
+ kobject_put(rq_info.kobj);
+ else
+ kobject_uevent(rq_info.kobj, KOBJ_ADD);
+
+ return err;
+}
+
+static int __init rq_stats_init(void)
+{
+ int ret = 0;
+ int i;
+#if CPU_FREQ_VARIANT
+ struct cpufreq_policy cpu_policy;
+#endif
+ /* Bail out if this is not an SMP Target */
+/* FIX-ME : mark to avoid arm64 build error
+ if (!is_smp()) {
+ rq_info.init = 0;
+ return -ENOSYS;
+ }
+*/
+
+#ifdef CONFIG_MTK_SCHED_RQAVG_US_ENABLE_WQ
+ rq_wq = create_singlethread_workqueue("rq_stats");
+ BUG_ON(!rq_wq);
+ INIT_WORK(&rq_info.def_timer_work, def_work_fn);
+#endif
+
+ spin_lock_init(&rq_lock);
+ rq_info.rq_poll_jiffies = DEFAULT_RQ_POLL_JIFFIES;
+ rq_info.def_timer_jiffies = DEFAULT_DEF_TIMER_JIFFIES;
+ rq_info.rq_poll_last_jiffy = 0;
+ rq_info.def_timer_last_jiffy = 0;
+ rq_info.hotplug_disabled = 0;
+ ret = init_rq_attribs();
+
+ for_each_possible_cpu(i) {
+ struct cpu_load_data *pcpu = &per_cpu(cpuload, i);
+ spin_lock_init(&pcpu->cpu_load_lock);
+
+#if CPU_FREQ_VARIANT
+ cpufreq_get_policy(&cpu_policy, i);
+ pcpu->policy_max = cpu_policy.cpuinfo.max_freq;
+ if (cpu_online(i))
+ pcpu->cur_freq = cpufreq_get(i);
+ cpumask_copy(pcpu->related_cpus, cpu_policy.cpus);
+#else
+ pcpu->policy_max = 1;
+ pcpu->cur_freq = 1;
+#endif
+
+ }
+ freq_transition.notifier_call = cpufreq_transition_handler;
+ //freq_policy.notifier_call = cpufreq_policy_handler;
+ cpu_hotplug.notifier_call = cpu_hotplug_handler;
+
+#if CPU_FREQ_VARIANT
+ cpufreq_register_notifier(&freq_transition, CPUFREQ_TRANSITION_NOTIFIER);
+#endif
+ //cpufreq_register_notifier(&freq_policy, CPUFREQ_POLICY_NOTIFIER);
+ register_hotcpu_notifier(&cpu_hotplug);
+
+ rq_info.init = 1;
+
+ return ret;
+}
+late_initcall(rq_stats_init);
+
+static int __init rq_stats_early_init(void)
+{
+
+ /* Bail out if this is not an SMP Target */
+/* FIX-ME : mark to avoid arm64 build error
+ if (!is_smp()) {
+ rq_info.init = 0;
+ return -ENOSYS;
+ }
+*/
+ pm_notifier(system_suspend_handler, 0);
+ return 0;
+}
+core_initcall(rq_stats_early_init);
diff --git a/drivers/misc/mediatek/kernel/sched/sched_avg.c b/drivers/misc/mediatek/kernel/sched/sched_avg.c
new file mode 100644
index 000000000..e2b487e88
--- /dev/null
+++ b/drivers/misc/mediatek/kernel/sched/sched_avg.c
@@ -0,0 +1,135 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Scheduler hook for average runqueue determination
+ */
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/hrtimer.h>
+#include <linux/sched.h>
+#include <linux/math64.h>
+
+static DEFINE_PER_CPU(u64, nr_prod_sum);
+static DEFINE_PER_CPU(u64, last_time);
+static DEFINE_PER_CPU(u64, nr);
+static DEFINE_PER_CPU(unsigned long, iowait_prod_sum);
+static DEFINE_PER_CPU(spinlock_t, nr_lock) = __SPIN_LOCK_UNLOCKED(nr_lock);
+static u64 last_get_time;
+
+/**
+ * sched_get_nr_running_avg
+ * @return: Average nr_running and iowait value since last poll.
+ * Returns the avg * 100 to return up to two decimal points
+ * of accuracy.
+ *
+ * Obtains the average nr_running value since the last poll.
+ * This function may not be called concurrently with itself
+ */
+void sched_get_nr_running_avg(int *avg, int *iowait_avg)
+{
+ int cpu;
+ u64 curr_time = sched_clock();
+ s64 diff = (s64) (curr_time - last_get_time);
+ u64 tmp_avg = 0, tmp_iowait = 0, old_lgt;
+ bool clk_faulty = 0;
+ u32 cpumask = 0;
+
+ *avg = 0;
+ *iowait_avg = 0;
+
+ if (!diff)
+ return;
+ WARN(diff<0, "[sched_get_nr_running_avg] time last:%llu curr:%llu ",
+ last_get_time, curr_time);
+
+ old_lgt = last_get_time;
+ last_get_time = curr_time;
+ /* read and reset nr_running counts */
+ for_each_possible_cpu(cpu) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&per_cpu(nr_lock, cpu), flags);
+ /* error handling for problematic clock violation */
+ if ((s64) (curr_time - per_cpu(last_time, cpu) < 0)) {
+ clk_faulty = 1;
+ cpumask |= 1 << cpu;
+ }
+ /* ////// */
+ tmp_avg += per_cpu(nr_prod_sum, cpu);
+ tmp_avg += per_cpu(nr, cpu) * (curr_time - per_cpu(last_time, cpu));
+ tmp_iowait = per_cpu(iowait_prod_sum, cpu);
+ tmp_iowait += nr_iowait_cpu(cpu) * (curr_time - per_cpu(last_time, cpu));
+ per_cpu(last_time, cpu) = curr_time;
+ per_cpu(nr_prod_sum, cpu) = 0;
+ per_cpu(iowait_prod_sum, cpu) = 0;
+ spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags);
+ }
+
+ /* error handling for problematic clock violation */
+ if (clk_faulty) {
+ *avg = 0;
+ *iowait_avg = 0;
+ pr_warn("[%s] **** CPU (0x%08x)clock may unstable !!\n", __func__, cpumask);
+ return;
+ }
+ /* ///// */
+
+ *avg = (int)div64_u64(tmp_avg * 100, (u64) diff);
+ *iowait_avg = (int)div64_u64(tmp_iowait * 100, (u64) diff);
+
+ WARN(*avg<0, "[sched_get_nr_running_avg] avg:%d(%llu/%lld), time last:%llu curr:%llu ",
+ *avg, tmp_avg, diff, old_lgt, curr_time);
+ if (unlikely(*avg<0))
+ *avg = 0;
+ WARN(*iowait_avg<0, "[sched_get_nr_running_avg] iowait_avg:%d(%llu/%lld) time last:%llu curr:%llu ",
+ *iowait_avg, tmp_iowait, diff, old_lgt, curr_time);
+ if (unlikely(*iowait_avg<0))
+ *iowait_avg = 0;
+}
+EXPORT_SYMBOL(sched_get_nr_running_avg);
+
+/**
+ * sched_update_nr_prod
+ * @cpu: The core id of the nr running driver.
+ * @nr: Updated nr running value for cpu.
+ * @inc: Whether we are increasing or decreasing the count
+ * @return: N/A
+ *
+ * Update average with latest nr_running value for CPU
+ */
+void sched_update_nr_prod(int cpu, unsigned long nr_running, bool inc)
+{
+ s64 diff;
+ u64 curr_time;
+ unsigned long flags;
+
+ spin_lock_irqsave(&per_cpu(nr_lock, cpu), flags);
+ curr_time = sched_clock();
+ diff = (s64) (curr_time - per_cpu(last_time, cpu));
+ /* skip this problematic clock violation */
+ if (diff < 0) {
+ spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags);
+ return;
+ }
+ /* ////////////////////////////////////// */
+
+ per_cpu(last_time, cpu) = curr_time;
+ per_cpu(nr, cpu) = nr_running + (inc ? 1 : -1);
+
+ BUG_ON(per_cpu(nr, cpu) < 0);
+
+ per_cpu(nr_prod_sum, cpu) += nr_running * diff;
+ per_cpu(iowait_prod_sum, cpu) += nr_iowait_cpu(cpu) * diff;
+ spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags);
+}
+EXPORT_SYMBOL(sched_update_nr_prod);
diff --git a/drivers/misc/mediatek/kernel/sec_osal.c b/drivers/misc/mediatek/kernel/sec_osal.c
new file mode 100644
index 000000000..10319fee8
--- /dev/null
+++ b/drivers/misc/mediatek/kernel/sec_osal.c
@@ -0,0 +1,365 @@
+/******************************************************************************
+ * KERNEL HEADER
+ ******************************************************************************/
+#include <mach/sec_osal.h>
+
+#include <linux/string.h>
+#include <linux/bug.h>
+#include <linux/spinlock.h>
+#include <linux/semaphore.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/mtd/mtd.h>
+#include <linux/fs.h>
+#include <linux/mtd/partitions.h>
+#include <asm/uaccess.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
+#include <linux/mtd/nand.h>
+#endif
+#include <linux/vmalloc.h>
+
+/*****************************************************************************
+ * MACRO
+ *****************************************************************************/
+#ifndef ASSERT
+#define ASSERT(expr) BUG_ON(!(expr))
+#endif
+
+/*****************************************************************************
+ * GLOBAL VARIABLE
+ *****************************************************************************/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36))
+DECLARE_MUTEX(hacc_sem);
+DECLARE_MUTEX(mtd_sem);
+DECLARE_MUTEX(rid_sem);
+DECLARE_MUTEX(sec_mm_sem);
+DECLARE_MUTEX(osal_fp_sem);
+DECLARE_MUTEX(osal_verify_sem);
+DECLARE_MUTEX(osal_secro_sem);
+DECLARE_MUTEX(osal_secro_v5_sem);
+#else /* (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)) */
+DEFINE_SEMAPHORE(hacc_sem);
+DEFINE_SEMAPHORE(mtd_sem);
+DEFINE_SEMAPHORE(rid_sem);
+DEFINE_SEMAPHORE(sec_mm_sem);
+DEFINE_SEMAPHORE(osal_fp_sem);
+DEFINE_SEMAPHORE(osal_verify_sem);
+DEFINE_SEMAPHORE(osal_secro_sem);
+DEFINE_SEMAPHORE(osal_secro_v5_sem);
+#endif
+
+/*****************************************************************************
+ * LOCAL VARIABLE
+ *****************************************************************************/
+static mm_segment_t curr_fs;
+#define OSAL_MAX_FP_COUNT 4096
+#define OSAL_FP_OVERFLOW OSAL_MAX_FP_COUNT
+/* The array 0 will be not be used, and fp_id=0 will be though as NULL file */
+static struct file *g_osal_fp[OSAL_MAX_FP_COUNT] = { 0 };
+
+/*****************************************************************************
+ * PORTING LAYER
+ *****************************************************************************/
+void osal_kfree(void *buf)
+{
+/* kfree(buf); */
+ vfree(buf);
+}
+
+void *osal_kmalloc(unsigned int size)
+{
+/* return kmalloc(size,GFP_KERNEL); */
+ return vmalloc(size);
+}
+
+unsigned long osal_copy_from_user(void *to, void *from, unsigned long size)
+{
+ return copy_from_user(to, from, size);
+}
+
+unsigned long osal_copy_to_user(void *to, void *from, unsigned long size)
+{
+ return copy_to_user(to, from, size);
+}
+
+int osal_hacc_lock(void)
+{
+ return down_interruptible(&hacc_sem);
+}
+
+void osal_hacc_unlock(void)
+{
+ up(&hacc_sem);
+}
+
+
+int osal_verify_lock(void)
+{
+ return down_interruptible(&osal_verify_sem);
+}
+
+void osal_verify_unlock(void)
+{
+ up(&osal_verify_sem);
+}
+
+int osal_secro_lock(void)
+{
+ return down_interruptible(&osal_secro_sem);
+}
+
+void osal_secro_unlock(void)
+{
+ up(&osal_secro_sem);
+}
+
+int osal_secro_v5_lock(void)
+{
+ return down_interruptible(&osal_secro_v5_sem);
+}
+
+void osal_secro_v5_unlock(void)
+{
+ up(&osal_secro_v5_sem);
+}
+
+int osal_mtd_lock(void)
+{
+ return down_interruptible(&mtd_sem);
+}
+
+void osal_mtd_unlock(void)
+{
+ up(&mtd_sem);
+}
+
+int osal_rid_lock(void)
+{
+ return down_interruptible(&rid_sem);
+}
+
+void osal_rid_unlock(void)
+{
+ up(&rid_sem);
+}
+
+void osal_msleep(unsigned int msec)
+{
+ msleep(msec);
+}
+
+void osal_assert(unsigned int val)
+{
+ ASSERT(val);
+}
+
+int osal_set_kernel_fs(void)
+{
+ int val = 0;
+ val = down_interruptible(&sec_mm_sem);
+ curr_fs = get_fs();
+ set_fs(KERNEL_DS);
+ return val;
+}
+
+void osal_restore_fs(void)
+{
+ set_fs(curr_fs);
+ up(&sec_mm_sem);
+}
+
+int osal_filp_open_read_only(const char *file_path)
+{
+ int filp_id = 0;
+ int val = 0;
+
+ val = down_interruptible(&osal_fp_sem);
+
+ for (filp_id = 1; filp_id < OSAL_MAX_FP_COUNT; filp_id++) {
+ if (g_osal_fp[filp_id] == NULL) {
+ break;
+ }
+ }
+
+ g_osal_fp[filp_id] = filp_open(file_path, O_RDONLY, 0777);
+
+ if (IS_ERR(g_osal_fp[filp_id])) {
+ g_osal_fp[OSAL_FILE_NULL] = g_osal_fp[filp_id]; /* Record the fail reason in pos 0 */
+ g_osal_fp[filp_id] = NULL;
+ filp_id = OSAL_FILE_NULL;
+ }
+
+ up(&osal_fp_sem);
+
+ /* the fp_id = 0 will be thought as NULL file ponter */
+ if (filp_id >= OSAL_FP_OVERFLOW) {
+ g_osal_fp[OSAL_FILE_NULL] = (struct file *)(-ENOMEM); /* Out of memory */
+ return OSAL_FILE_NULL;
+ }
+
+ return filp_id;
+}
+
+void *osal_get_filp_struct(int fp_id)
+{
+ int val = 0;
+ struct file *ret;
+
+ if (fp_id >= 1 && fp_id < OSAL_MAX_FP_COUNT) {
+ val = down_interruptible(&osal_fp_sem);
+
+ ret = g_osal_fp[fp_id];
+
+ up(&osal_fp_sem);
+
+ return (void *)ret;
+ }
+
+ return (struct file *)(-ENOENT); /* No such file or directory */
+}
+
+int osal_filp_close(int fp_id)
+{
+ int val = 0;
+ int ret = 0;
+
+ if (fp_id >= 1 && fp_id < OSAL_MAX_FP_COUNT) {
+ val = down_interruptible(&osal_fp_sem);
+
+ if (!IS_ERR(g_osal_fp[fp_id])) {
+ ret = filp_close(g_osal_fp[fp_id], NULL);
+ }
+ g_osal_fp[fp_id] = NULL;
+
+ up(&osal_fp_sem);
+
+ return ret;
+ }
+
+ return OSAL_FILE_CLOSE_FAIL;
+}
+
+loff_t osal_filp_seek_set(int fp_id, loff_t off)
+{
+ loff_t offset;
+ int val = 0;
+
+ if (fp_id >= 1 && fp_id < OSAL_MAX_FP_COUNT) {
+ val = down_interruptible(&osal_fp_sem);
+
+ offset = g_osal_fp[fp_id]->f_op->llseek(g_osal_fp[fp_id], off, SEEK_SET);
+
+ up(&osal_fp_sem);
+
+ return offset;
+ }
+
+ return OSAL_FILE_SEEK_FAIL;
+}
+
+loff_t osal_filp_seek_end(int fp_id, loff_t off)
+{
+ loff_t offset;
+ int val = 0;
+
+ if (fp_id >= 1 && fp_id < OSAL_MAX_FP_COUNT) {
+ val = down_interruptible(&osal_fp_sem);
+
+ offset = g_osal_fp[fp_id]->f_op->llseek(g_osal_fp[fp_id], off, SEEK_END);
+
+ up(&osal_fp_sem);
+
+ return offset;
+ }
+
+ return OSAL_FILE_SEEK_FAIL;
+}
+
+loff_t osal_filp_pos(int fp_id)
+{
+ loff_t offset;
+ int val = 0;
+
+ if (fp_id >= 1 && fp_id < OSAL_MAX_FP_COUNT) {
+ val = down_interruptible(&osal_fp_sem);
+
+ offset = g_osal_fp[fp_id]->f_pos;
+
+ up(&osal_fp_sem);
+
+ return offset;
+ }
+
+ return OSAL_FILE_GET_POS_FAIL;
+}
+
+long osal_filp_read(int fp_id, char *buf, unsigned long len)
+{
+ ssize_t read_len;
+ int val = 0;
+
+ if (fp_id >= 1 && fp_id < OSAL_MAX_FP_COUNT) {
+ val = down_interruptible(&osal_fp_sem);
+
+ read_len =
+ g_osal_fp[fp_id]->f_op->read(g_osal_fp[fp_id], buf, len,
+ &g_osal_fp[fp_id]->f_pos);
+
+ up(&osal_fp_sem);
+
+ return read_len;
+ }
+
+ return OSAL_FILE_READ_FAIL;
+}
+
+long osal_is_err(int fp_id)
+{
+ bool err;
+ int val = 0;
+
+ if (fp_id >= 1 && fp_id < OSAL_MAX_FP_COUNT) {
+ val = down_interruptible(&osal_fp_sem);
+
+ err = IS_ERR(g_osal_fp[fp_id]);
+
+ up(&osal_fp_sem);
+
+ return err;
+ }
+
+ osal_assert(0);
+ return 1;
+}
+EXPORT_SYMBOL(osal_kfree);
+EXPORT_SYMBOL(osal_kmalloc);
+EXPORT_SYMBOL(osal_copy_from_user);
+EXPORT_SYMBOL(osal_copy_to_user);
+EXPORT_SYMBOL(osal_hacc_lock);
+EXPORT_SYMBOL(osal_hacc_unlock);
+EXPORT_SYMBOL(osal_verify_lock);
+EXPORT_SYMBOL(osal_verify_unlock);
+EXPORT_SYMBOL(osal_secro_lock);
+EXPORT_SYMBOL(osal_secro_unlock);
+EXPORT_SYMBOL(osal_secro_v5_lock);
+EXPORT_SYMBOL(osal_secro_v5_unlock);
+EXPORT_SYMBOL(osal_mtd_lock);
+EXPORT_SYMBOL(osal_mtd_unlock);
+EXPORT_SYMBOL(osal_rid_lock);
+EXPORT_SYMBOL(osal_rid_unlock);
+EXPORT_SYMBOL(osal_msleep);
+EXPORT_SYMBOL(osal_assert);
+EXPORT_SYMBOL(osal_set_kernel_fs);
+EXPORT_SYMBOL(osal_restore_fs);
+EXPORT_SYMBOL(osal_get_filp_struct);
+EXPORT_SYMBOL(osal_filp_close);
+EXPORT_SYMBOL(osal_filp_seek_set);
+EXPORT_SYMBOL(osal_filp_seek_end);
+EXPORT_SYMBOL(osal_filp_pos);
+EXPORT_SYMBOL(osal_filp_read);
+EXPORT_SYMBOL(osal_is_err);
+EXPORT_SYMBOL(osal_filp_open_read_only);
diff --git a/drivers/misc/mediatek/kernel/system.c b/drivers/misc/mediatek/kernel/system.c
new file mode 100644
index 000000000..45296a2fb
--- /dev/null
+++ b/drivers/misc/mediatek/kernel/system.c
@@ -0,0 +1,48 @@
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+#include <mach/mtk_rtc.h>
+#include <mach/wd_api.h>
+extern void wdt_arch_reset(char);
+
+
+
+void arch_reset(char mode, const char *cmd)
+{
+ char reboot = 0;
+ int res = 0;
+ struct wd_api *wd_api = NULL;
+#ifdef CONFIG_FPGA_EARLY_PORTING
+ return ;
+#else
+
+ res = get_wd_api(&wd_api);
+ pr_notice("arch_reset: cmd = %s\n", cmd ? : "NULL");
+
+ if (cmd && !strcmp(cmd, "charger")) {
+ /* do nothing */
+ } else if (cmd && !strcmp(cmd, "recovery")) {
+ #ifndef CONFIG_MTK_FPGA
+ rtc_mark_recovery();
+ #endif
+ } else if (cmd && !strcmp(cmd, "bootloader")) {
+ #ifndef CONFIG_MTK_FPGA
+ rtc_mark_fast();
+ #endif
+ }
+#ifdef CONFIG_MTK_KERNEL_POWER_OFF_CHARGING
+ else if (cmd && !strcmp(cmd, "kpoc")) {
+ rtc_mark_kpoc();
+ }
+#endif
+ else {
+ reboot = 1;
+ }
+
+ if (res) {
+ pr_notice("arch_reset, get wd api error %d\n", res);
+ } else {
+ wd_api->wd_sw_reset(reboot);
+ }
+ #endif
+}