diff options
| author | Meizu OpenSource <patchwork@meizu.com> | 2016-08-15 10:19:42 +0800 |
|---|---|---|
| committer | Meizu OpenSource <patchwork@meizu.com> | 2016-08-15 10:19:42 +0800 |
| commit | d2e1446d81725c351dc73a03b397ce043fb18452 (patch) | |
| tree | 4dbc616b7f92aea39cd697a9084205ddb805e344 /arch/arm64/include/asm | |
first commit
Diffstat (limited to 'arch/arm64/include/asm')
93 files changed, 8688 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild new file mode 100644 index 000000000..85715a1d5 --- /dev/null +++ b/arch/arm64/include/asm/Kbuild @@ -0,0 +1,52 @@ + + +generic-y += bug.h +generic-y += bugs.h +generic-y += checksum.h +generic-y += clkdev.h +generic-y += cputime.h +generic-y += current.h +generic-y += delay.h +generic-y += div64.h +generic-y += dma.h +generic-y += emergency-restart.h +generic-y += errno.h +generic-y += ftrace.h +generic-y += hw_irq.h +generic-y += ioctl.h +generic-y += ioctls.h +generic-y += ipcbuf.h +generic-y += irq_regs.h +generic-y += kdebug.h +generic-y += kmap_types.h +generic-y += kvm_para.h +generic-y += local.h +generic-y += local64.h +generic-y += mman.h +generic-y += msgbuf.h +generic-y += mutex.h +generic-y += pci.h +generic-y += poll.h +generic-y += posix_types.h +generic-y += resource.h +generic-y += scatterlist.h +generic-y += sections.h +generic-y += segment.h +generic-y += sembuf.h +generic-y += serial.h +generic-y += shmbuf.h +generic-y += simd.h +generic-y += sizes.h +generic-y += socket.h +generic-y += sockios.h +generic-y += switch_to.h +generic-y += swab.h +generic-y += termbits.h +generic-y += termios.h +generic-y += topology.h +generic-y += trace_clock.h +generic-y += types.h +generic-y += unaligned.h +generic-y += user.h +generic-y += vga.h +generic-y += xor.h diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h new file mode 100644 index 000000000..bf6ab242f --- /dev/null +++ b/arch/arm64/include/asm/arch_timer.h @@ -0,0 +1,138 @@ +/* + * arch/arm64/include/asm/arch_timer.h + * + * Copyright (C) 2012 ARM Ltd. + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_ARCH_TIMER_H +#define __ASM_ARCH_TIMER_H + +#include <asm/barrier.h> + +#include <linux/init.h> +#include <linux/types.h> + +#include <clocksource/arm_arch_timer.h> + +static inline void arch_timer_reg_write(int access, int reg, u32 val) +{ + if (access == ARCH_TIMER_PHYS_ACCESS) { + switch (reg) { + case ARCH_TIMER_REG_CTRL: + asm volatile("msr cntp_ctl_el0, %0" : : "r" (val)); + break; + case ARCH_TIMER_REG_TVAL: + asm volatile("msr cntp_tval_el0, %0" : : "r" (val)); + break; + default: + BUILD_BUG(); + } + } else if (access == ARCH_TIMER_VIRT_ACCESS) { + switch (reg) { + case ARCH_TIMER_REG_CTRL: + asm volatile("msr cntv_ctl_el0, %0" : : "r" (val)); + break; + case ARCH_TIMER_REG_TVAL: + asm volatile("msr cntv_tval_el0, %0" : : "r" (val)); + break; + default: + BUILD_BUG(); + } + } else { + BUILD_BUG(); + } + + isb(); +} + +static inline u32 arch_timer_reg_read(int access, int reg) +{ + u32 val; + + if (access == ARCH_TIMER_PHYS_ACCESS) { + switch (reg) { + case ARCH_TIMER_REG_CTRL: + asm volatile("mrs %0, cntp_ctl_el0" : "=r" (val)); + break; + case ARCH_TIMER_REG_TVAL: + asm volatile("mrs %0, cntp_tval_el0" : "=r" (val)); + break; + default: + BUILD_BUG(); + } + } else if (access == ARCH_TIMER_VIRT_ACCESS) { + switch (reg) { + case ARCH_TIMER_REG_CTRL: + asm volatile("mrs %0, cntv_ctl_el0" : "=r" (val)); + break; + case ARCH_TIMER_REG_TVAL: + asm volatile("mrs %0, cntv_tval_el0" : "=r" (val)); + break; + default: + BUILD_BUG(); + } + } else { + BUILD_BUG(); + } + + return val; +} + +static inline u32 arch_timer_get_cntfrq(void) +{ + u32 val; + asm volatile("mrs %0, cntfrq_el0" : "=r" (val)); + return val; +} + +static inline void __cpuinit arch_counter_set_user_access(void) +{ + u32 cntkctl; + + /* Disable user access to the timers and the physical counter. */ + asm volatile("mrs %0, cntkctl_el1" : "=r" (cntkctl)); + cntkctl &= ~((3 << 8) | (1 << 0)); + + /* Enable user access to the virtual counter and frequency. */ + cntkctl |= (1 << 1); + asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl)); +} + +static inline u64 arch_counter_get_cntpct(void) +{ + u64 cval; + + isb(); + asm volatile("mrs %0, cntpct_el0" : "=r" (cval)); + + return cval; +} + +static inline u64 arch_counter_get_cntvct(void) +{ + u64 cval; + + isb(); + asm volatile("mrs %0, cntvct_el0" : "=r" (cval)); + + return cval; +} + +static inline int arch_timer_arch_init(void) +{ + return 0; +} + +#endif diff --git a/arch/arm64/include/asm/asm-offsets.h b/arch/arm64/include/asm/asm-offsets.h new file mode 100644 index 000000000..d370ee36a --- /dev/null +++ b/arch/arm64/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include <generated/asm-offsets.h> diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h new file mode 100644 index 000000000..fd3e39240 --- /dev/null +++ b/arch/arm64/include/asm/assembler.h @@ -0,0 +1,148 @@ +/* + * Based on arch/arm/include/asm/assembler.h + * + * Copyright (C) 1996-2000 Russell King + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASSEMBLY__ +#error "Only include this from assembly code" +#endif + +#include <asm/ptrace.h> + +/* + * Stack pushing/popping (register pairs only). Equivalent to store decrement + * before, load increment after. + */ + .macro push, xreg1, xreg2 + stp \xreg1, \xreg2, [sp, #-16]! + .endm + + .macro pop, xreg1, xreg2 + ldp \xreg1, \xreg2, [sp], #16 + .endm + +/* + * Enable and disable interrupts. + */ + .macro disable_irq + msr daifset, #2 + .endm + + .macro enable_irq + msr daifclr, #2 + .endm + +/* + * Save/disable and restore interrupts. + */ + .macro save_and_disable_irqs, olddaif + mrs \olddaif, daif + disable_irq + .endm + + .macro restore_irqs, olddaif + msr daif, \olddaif + .endm + +/* + * Enable and disable debug exceptions. + */ + .macro disable_dbg + msr daifset, #8 + .endm + + .macro enable_dbg + msr daifclr, #8 + .endm + + .macro disable_step, tmp + mrs \tmp, mdscr_el1 + bic \tmp, \tmp, #1 + msr mdscr_el1, \tmp + .endm + + .macro enable_step, tmp + mrs \tmp, mdscr_el1 + orr \tmp, \tmp, #1 + msr mdscr_el1, \tmp + .endm + + .macro enable_dbg_if_not_stepping, tmp + mrs \tmp, mdscr_el1 + tbnz \tmp, #0, 9990f + enable_dbg +9990: + .endm + +/* + * SMP data memory barrier + */ + .macro smp_dmb, opt +#ifdef CONFIG_SMP + dmb \opt +#endif + .endm + +#define USER(l, x...) \ +9999: x; \ + .section __ex_table,"a"; \ + .align 3; \ + .quad 9999b,l; \ + .previous + +/* + * Register aliases. + */ +lr .req x30 // link register + +/* + * Vector entry + */ + .macro ventry label + .align 7 + b \label + .endm + +/* + * Select code when configured for BE. + */ +#ifdef CONFIG_CPU_BIG_ENDIAN +#define CPU_BE(code...) code +#else +#define CPU_BE(code...) +#endif + +/* + * Select code when configured for LE. + */ +#ifdef CONFIG_CPU_BIG_ENDIAN +#define CPU_LE(code...) +#else +#define CPU_LE(code...) code +#endif + +/* + * Define a macro that constructs a 64-bit value by concatenating two + * 32-bit registers. Note that on big endian systems the order of the + * registers is swapped. + */ +#ifndef CONFIG_CPU_BIG_ENDIAN + .macro regs_to_64, rd, lbits, hbits +#else + .macro regs_to_64, rd, hbits, lbits +#endif + orr \rd, \lbits, \hbits, lsl #32 + .endm diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h new file mode 100644 index 000000000..836364468 --- /dev/null +++ b/arch/arm64/include/asm/atomic.h @@ -0,0 +1,305 @@ +/* + * Based on arch/arm/include/asm/atomic.h + * + * Copyright (C) 1996 Russell King. + * Copyright (C) 2002 Deep Blue Solutions Ltd. + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_ATOMIC_H +#define __ASM_ATOMIC_H + +#include <linux/compiler.h> +#include <linux/types.h> + +#include <asm/barrier.h> +#include <asm/cmpxchg.h> + +#define ATOMIC_INIT(i) { (i) } + +#ifdef __KERNEL__ + +/* + * On ARM, ordinary assignment (str instruction) doesn't clear the local + * strex/ldrex monitor on some implementations. The reason we can use it for + * atomic_set() is the clrex or dummy strex done on every exception return. + */ +#define atomic_read(v) (*(volatile int *)&(v)->counter) +#define atomic_set(v,i) (((v)->counter) = (i)) + +/* + * AArch64 UP and SMP safe atomic ops. We use load exclusive and + * store exclusive to ensure that these are atomic. We may loop + * to ensure that the update happens. + */ +static inline void atomic_add(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + asm volatile("// atomic_add\n" +"1: ldxr %w0, %2\n" +" add %w0, %w0, %w3\n" +" stxr %w1, %w0, %2\n" +" cbnz %w1, 1b" + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : "Ir" (i) + : "cc"); +} + +static inline int atomic_add_return(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + asm volatile("// atomic_add_return\n" +"1: ldaxr %w0, %2\n" +" add %w0, %w0, %w3\n" +" stlxr %w1, %w0, %2\n" +" cbnz %w1, 1b" + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : "Ir" (i) + : "cc", "memory"); + + return result; +} + +static inline void atomic_sub(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + asm volatile("// atomic_sub\n" +"1: ldxr %w0, %2\n" +" sub %w0, %w0, %w3\n" +" stxr %w1, %w0, %2\n" +" cbnz %w1, 1b" + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : "Ir" (i) + : "cc"); +} + +static inline int atomic_sub_return(int i, atomic_t *v) +{ + unsigned long tmp; + int result; + + asm volatile("// atomic_sub_return\n" +"1: ldaxr %w0, %2\n" +" sub %w0, %w0, %w3\n" +" stlxr %w1, %w0, %2\n" +" cbnz %w1, 1b" + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : "Ir" (i) + : "cc", "memory"); + + return result; +} + +static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) +{ + unsigned long tmp; + int oldval; + + asm volatile("// atomic_cmpxchg\n" +"1: ldaxr %w1, %2\n" +" cmp %w1, %w3\n" +" b.ne 2f\n" +" stlxr %w0, %w4, %2\n" +" cbnz %w0, 1b\n" +"2:" + : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter) + : "Ir" (old), "r" (new) + : "cc", "memory"); + + return oldval; +} + +static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) +{ + unsigned long tmp, tmp2; + + asm volatile("// atomic_clear_mask\n" +"1: ldxr %0, %2\n" +" bic %0, %0, %3\n" +" stxr %w1, %0, %2\n" +" cbnz %w1, 1b" + : "=&r" (tmp), "=&r" (tmp2), "+Q" (*addr) + : "Ir" (mask) + : "cc"); +} + +#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) + +static inline int __atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + + c = atomic_read(v); + while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c) + c = old; + return c; +} + +#define atomic_inc(v) atomic_add(1, v) +#define atomic_dec(v) atomic_sub(1, v) + +#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) +#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) +#define atomic_inc_return(v) (atomic_add_return(1, v)) +#define atomic_dec_return(v) (atomic_sub_return(1, v)) +#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) + +#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) + +#define smp_mb__before_atomic_dec() smp_mb() +#define smp_mb__after_atomic_dec() smp_mb() +#define smp_mb__before_atomic_inc() smp_mb() +#define smp_mb__after_atomic_inc() smp_mb() + +/* + * 64-bit atomic operations. + */ +#define ATOMIC64_INIT(i) { (i) } + +#define atomic64_read(v) (*(volatile long long *)&(v)->counter) +#define atomic64_set(v,i) (((v)->counter) = (i)) + +static inline void atomic64_add(u64 i, atomic64_t *v) +{ + long result; + unsigned long tmp; + + asm volatile("// atomic64_add\n" +"1: ldxr %0, %2\n" +" add %0, %0, %3\n" +" stxr %w1, %0, %2\n" +" cbnz %w1, 1b" + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : "Ir" (i) + : "cc"); +} + +static inline long atomic64_add_return(long i, atomic64_t *v) +{ + long result; + unsigned long tmp; + + asm volatile("// atomic64_add_return\n" +"1: ldaxr %0, %2\n" +" add %0, %0, %3\n" +" stlxr %w1, %0, %2\n" +" cbnz %w1, 1b" + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : "Ir" (i) + : "cc", "memory"); + + return result; +} + +static inline void atomic64_sub(u64 i, atomic64_t *v) +{ + long result; + unsigned long tmp; + + asm volatile("// atomic64_sub\n" +"1: ldxr %0, %2\n" +" sub %0, %0, %3\n" +" stxr %w1, %0, %2\n" +" cbnz %w1, 1b" + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : "Ir" (i) + : "cc"); +} + +static inline long atomic64_sub_return(long i, atomic64_t *v) +{ + long result; + unsigned long tmp; + + asm volatile("// atomic64_sub_return\n" +"1: ldaxr %0, %2\n" +" sub %0, %0, %3\n" +" stlxr %w1, %0, %2\n" +" cbnz %w1, 1b" + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : "Ir" (i) + : "cc", "memory"); + + return result; +} + +static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new) +{ + long oldval; + unsigned long res; + + asm volatile("// atomic64_cmpxchg\n" +"1: ldaxr %1, %2\n" +" cmp %1, %3\n" +" b.ne 2f\n" +" stlxr %w0, %4, %2\n" +" cbnz %w0, 1b\n" +"2:" + : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter) + : "Ir" (old), "r" (new) + : "cc", "memory"); + + return oldval; +} + +#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) + +static inline long atomic64_dec_if_positive(atomic64_t *v) +{ + long result; + unsigned long tmp; + + asm volatile("// atomic64_dec_if_positive\n" +"1: ldaxr %0, %2\n" +" subs %0, %0, #1\n" +" b.mi 2f\n" +" stlxr %w1, %0, %2\n" +" cbnz %w1, 1b\n" +"2:" + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : + : "cc", "memory"); + + return result; +} + +static inline int atomic64_add_unless(atomic64_t *v, long a, long u) +{ + long c, old; + + c = atomic64_read(v); + while (c != u && (old = atomic64_cmpxchg((v), c, c + a)) != c) + c = old; + + return c != u; +} + +#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) +#define atomic64_inc(v) atomic64_add(1LL, (v)) +#define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) +#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) +#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) +#define atomic64_dec(v) atomic64_sub(1LL, (v)) +#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) +#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) +#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) + +#endif +#endif diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h new file mode 100644 index 000000000..78e20ba88 --- /dev/null +++ b/arch/arm64/include/asm/barrier.h @@ -0,0 +1,102 @@ +/* + * Based on arch/arm/include/asm/barrier.h + * + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_BARRIER_H +#define __ASM_BARRIER_H + +#ifndef __ASSEMBLY__ + +#define sev() asm volatile("sev" : : : "memory") +#define wfe() asm volatile("wfe" : : : "memory") +#define wfi() asm volatile("wfi" : : : "memory") + +#define isb() asm volatile("isb" : : : "memory") +#define dsb() asm volatile("dsb sy" : : : "memory") + +#define mb() dsb() +#define rmb() asm volatile("dsb ld" : : : "memory") +#define wmb() asm volatile("dsb st" : : : "memory") + +#ifndef CONFIG_SMP +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() + +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ACCESS_ONCE(*p) = (v); \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = ACCESS_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + smp_mb(); \ + ___p1; \ +}) + +#else + +#define smp_mb() asm volatile("dmb ish" : : : "memory") +#define smp_rmb() asm volatile("dmb ishld" : : : "memory") +#define smp_wmb() asm volatile("dmb ishst" : : : "memory") + +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + switch (sizeof(*p)) { \ + case 4: \ + asm volatile ("stlr %w1, %0" \ + : "=Q" (*p) : "r" (v) : "memory"); \ + break; \ + case 8: \ + asm volatile ("stlr %1, %0" \ + : "=Q" (*p) : "r" (v) : "memory"); \ + break; \ + } \ +} while (0) + +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1; \ + compiletime_assert_atomic_type(*p); \ + switch (sizeof(*p)) { \ + case 4: \ + asm volatile ("ldar %w0, %1" \ + : "=r" (___p1) : "Q" (*p) : "memory"); \ + break; \ + case 8: \ + asm volatile ("ldar %0, %1" \ + : "=r" (___p1) : "Q" (*p) : "memory"); \ + break; \ + } \ + ___p1; \ +}) + +#endif + +#define read_barrier_depends() do { } while(0) +#define smp_read_barrier_depends() do { } while(0) + +#define set_mb(var, value) do { var = value; smp_mb(); } while (0) +#define nop() asm volatile("nop"); + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_BARRIER_H */ diff --git a/arch/arm64/include/asm/bitops.h b/arch/arm64/include/asm/bitops.h new file mode 100644 index 000000000..aa5b59d6b --- /dev/null +++ b/arch/arm64/include/asm/bitops.h @@ -0,0 +1,67 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_BITOPS_H +#define __ASM_BITOPS_H + +#include <linux/compiler.h> + +#include <asm/barrier.h> + +/* + * clear_bit may not imply a memory barrier + */ +#ifndef smp_mb__before_clear_bit +#define smp_mb__before_clear_bit() smp_mb() +#define smp_mb__after_clear_bit() smp_mb() +#endif + +#ifndef _LINUX_BITOPS_H +#error only <linux/bitops.h> can be included directly +#endif + +/* + * Little endian assembly atomic bitops. + */ +extern void set_bit(int nr, volatile unsigned long *p); +extern void clear_bit(int nr, volatile unsigned long *p); +extern void change_bit(int nr, volatile unsigned long *p); +extern int test_and_set_bit(int nr, volatile unsigned long *p); +extern int test_and_clear_bit(int nr, volatile unsigned long *p); +extern int test_and_change_bit(int nr, volatile unsigned long *p); + +#include <asm-generic/bitops/builtin-__ffs.h> +#include <asm-generic/bitops/builtin-ffs.h> +#include <asm-generic/bitops/builtin-__fls.h> +#include <asm-generic/bitops/builtin-fls.h> + +#include <asm-generic/bitops/ffz.h> +#include <asm-generic/bitops/fls64.h> +#include <asm-generic/bitops/find.h> + +#include <asm-generic/bitops/sched.h> +#include <asm-generic/bitops/hweight.h> +#include <asm-generic/bitops/lock.h> + +#include <asm-generic/bitops/non-atomic.h> +#include <asm-generic/bitops/le.h> + +/* + * Ext2 is defined to use little-endian byte ordering. + */ +#define ext2_set_bit_atomic(lock, nr, p) test_and_set_bit_le(nr, p) +#define ext2_clear_bit_atomic(lock, nr, p) test_and_clear_bit_le(nr, p) + +#endif /* __ASM_BITOPS_H */ diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h new file mode 100644 index 000000000..390308a67 --- /dev/null +++ b/arch/arm64/include/asm/cache.h @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_CACHE_H +#define __ASM_CACHE_H + +#define L1_CACHE_SHIFT 6 +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) + +/* + * Memory returned by kmalloc() may be used for DMA, so we must make + * sure that all such allocations are cache aligned. Otherwise, + * unrelated code may cause parts of the buffer to be read into the + * cache before the transfer is done, causing old data to be seen by + * the CPU. + */ +#define ARCH_DMA_MINALIGN L1_CACHE_BYTES +#define ARCH_SLAB_MINALIGN 8 + +#endif diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h new file mode 100644 index 000000000..d6ac12e11 --- /dev/null +++ b/arch/arm64/include/asm/cacheflush.h @@ -0,0 +1,163 @@ +/* + * Based on arch/arm/include/asm/cacheflush.h + * + * Copyright (C) 1999-2002 Russell King. + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_CACHEFLUSH_H +#define __ASM_CACHEFLUSH_H + +#include <linux/mm.h> + +/* + * This flag is used to indicate that the page pointed to by a pte is clean + * and does not require cleaning before returning it to the user. + */ +#define PG_dcache_clean PG_arch_1 + +/* + * MM Cache Management + * =================== + * + * The arch/arm64/mm/cache.S implements these methods. + * + * Start addresses are inclusive and end addresses are exclusive; start + * addresses should be rounded down, end addresses up. + * + * See Documentation/cachetlb.txt for more information. Please note that + * the implementation assumes non-aliasing VIPT D-cache and (aliasing) + * VIPT or ASID-tagged VIVT I-cache. + * + * flush_cache_all() + * + * Unconditionally clean and invalidate the entire cache. + * + * flush_cache_mm(mm) + * + * Clean and invalidate all user space cache entries + * before a change of page tables. + * + * flush_icache_range(start, end) + * + * Ensure coherency between the I-cache and the D-cache in the + * region described by start, end. + * - start - virtual start address + * - end - virtual end address + * + * __flush_cache_user_range(start, end) + * + * Ensure coherency between the I-cache and the D-cache in the + * region described by start, end. + * - start - virtual start address + * - end - virtual end address + * + * __flush_dcache_area(kaddr, size) + * + * Ensure that the data held in page is written back. + * - kaddr - page address + * - size - region size + */ +extern void flush_cache_all(void); +extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); +extern void flush_icache_range(unsigned long start, unsigned long end); +extern void __flush_dcache_area(void *addr, size_t len); +extern void __flush_cache_user_range(unsigned long start, unsigned long end); + +static inline void flush_cache_mm(struct mm_struct *mm) +{ +} + +static inline void flush_cache_page(struct vm_area_struct *vma, + unsigned long user_addr, unsigned long pfn) +{ +} + +/* + * Cache maintenance functions used by the DMA API. No to be used directly. + */ +extern void __dma_map_area(const void *, size_t, int); +extern void __dma_unmap_area(const void *, size_t, int); +extern void __dma_flush_range(const void *, const void *); + +/* + * Copy user data from/to a page which is mapped into a different + * processes address space. Really, we want to allow our "user + * space" model to handle this. + */ +extern void copy_to_user_page(struct vm_area_struct *, struct page *, + unsigned long, void *, const void *, unsigned long); +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ + do { \ + memcpy(dst, src, len); \ + } while (0) + +#define flush_cache_dup_mm(mm) flush_cache_mm(mm) + +/* + * flush_dcache_page is used when the kernel has written to the page + * cache page at virtual address page->virtual. + * + * If this page isn't mapped (ie, page_mapping == NULL), or it might + * have userspace mappings, then we _must_ always clean + invalidate + * the dcache entries associated with the kernel mapping. + * + * Otherwise we can defer the operation, and clean the cache when we are + * about to change to user space. This is the same method as used on SPARC64. + * See update_mmu_cache for the user space part. + */ +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 +extern void flush_dcache_page(struct page *); + +static inline void __flush_icache_all(void) +{ + asm("ic ialluis"); + dsb(); +} + +#define flush_dcache_mmap_lock(mapping) \ + spin_lock_irq(&(mapping)->tree_lock) +#define flush_dcache_mmap_unlock(mapping) \ + spin_unlock_irq(&(mapping)->tree_lock) + +#define flush_icache_user_range(vma,page,addr,len) \ + flush_dcache_page(page) + +/* + * We don't appear to need to do anything here. In fact, if we did, we'd + * duplicate cache flushing elsewhere performed by flush_dcache_page(). + */ +#define flush_icache_page(vma,page) do { } while (0) + +/* + * flush_cache_vmap() is used when creating mappings (eg, via vmap, + * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT + * caches, since the direct-mappings of these pages may contain cached + * data, we need to do a full cache flush to ensure that writebacks + * don't corrupt data placed into these pages via the new mappings. + */ +static inline void flush_cache_vmap(unsigned long start, unsigned long end) +{ + /* + * set_pte_at() called from vmap_pte_range() does not + * have a DSB after cleaning the cache line. + */ + dsb(); +} + +static inline void flush_cache_vunmap(unsigned long start, unsigned long end) +{ +} + +#endif diff --git a/arch/arm64/include/asm/cachetype.h b/arch/arm64/include/asm/cachetype.h new file mode 100644 index 000000000..85f5f5113 --- /dev/null +++ b/arch/arm64/include/asm/cachetype.h @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_CACHETYPE_H +#define __ASM_CACHETYPE_H + +#include <asm/cputype.h> + +#define CTR_L1IP_SHIFT 14 +#define CTR_L1IP_MASK 3 + +#define ICACHE_POLICY_RESERVED 0 +#define ICACHE_POLICY_AIVIVT 1 +#define ICACHE_POLICY_VIPT 2 +#define ICACHE_POLICY_PIPT 3 + +static inline u32 icache_policy(void) +{ + return (read_cpuid_cachetype() >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK; +} + +/* + * Whilst the D-side always behaves as PIPT on AArch64, aliasing is + * permitted in the I-cache. + */ +static inline int icache_is_aliasing(void) +{ + return icache_policy() != ICACHE_POLICY_PIPT; +} + +static inline int icache_is_aivivt(void) +{ + return icache_policy() == ICACHE_POLICY_AIVIVT; +} + +#endif /* __ASM_CACHETYPE_H */ diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h new file mode 100644 index 000000000..a03583d47 --- /dev/null +++ b/arch/arm64/include/asm/cmpxchg.h @@ -0,0 +1,187 @@ +/* + * Based on arch/arm/include/asm/cmpxchg.h + * + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_CMPXCHG_H +#define __ASM_CMPXCHG_H + +#include <linux/bug.h> + +#include <asm/barrier.h> + +static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) +{ + unsigned long ret, tmp; + + switch (size) { + case 1: + asm volatile("// __xchg1\n" + "1: ldaxrb %w0, %2\n" + " stlxrb %w1, %w3, %2\n" + " cbnz %w1, 1b\n" + : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr) + : "r" (x) + : "cc", "memory"); + break; + case 2: + asm volatile("// __xchg2\n" + "1: ldaxrh %w0, %2\n" + " stlxrh %w1, %w3, %2\n" + " cbnz %w1, 1b\n" + : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr) + : "r" (x) + : "cc", "memory"); + break; + case 4: + asm volatile("// __xchg4\n" + "1: ldaxr %w0, %2\n" + " stlxr %w1, %w3, %2\n" + " cbnz %w1, 1b\n" + : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr) + : "r" (x) + : "cc", "memory"); + break; + case 8: + asm volatile("// __xchg8\n" + "1: ldaxr %0, %2\n" + " stlxr %w1, %3, %2\n" + " cbnz %w1, 1b\n" + : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr) + : "r" (x) + : "cc", "memory"); + break; + default: + BUILD_BUG(); + } + + return ret; +} + +#define xchg(ptr,x) \ +({ \ + __typeof__(*(ptr)) __ret; \ + __ret = (__typeof__(*(ptr))) \ + __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \ + __ret; \ +}) + +static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, + unsigned long new, int size) +{ + unsigned long oldval = 0, res; + + switch (size) { + case 1: + do { + asm volatile("// __cmpxchg1\n" + " ldxrb %w1, %2\n" + " mov %w0, #0\n" + " cmp %w1, %w3\n" + " b.ne 1f\n" + " stxrb %w0, %w4, %2\n" + "1:\n" + : "=&r" (res), "=&r" (oldval), "+Q" (*(u8 *)ptr) + : "Ir" (old), "r" (new) + : "cc"); + } while (res); + break; + + case 2: + do { + asm volatile("// __cmpxchg2\n" + " ldxrh %w1, %2\n" + " mov %w0, #0\n" + " cmp %w1, %w3\n" + " b.ne 1f\n" + " stxrh %w0, %w4, %2\n" + "1:\n" + : "=&r" (res), "=&r" (oldval), "+Q" (*(u16 *)ptr) + : "Ir" (old), "r" (new) + : "cc"); + } while (res); + break; + + case 4: + do { + asm volatile("// __cmpxchg4\n" + " ldxr %w1, %2\n" + " mov %w0, #0\n" + " cmp %w1, %w3\n" + " b.ne 1f\n" + " stxr %w0, %w4, %2\n" + "1:\n" + : "=&r" (res), "=&r" (oldval), "+Q" (*(u32 *)ptr) + : "Ir" (old), "r" (new) + : "cc"); + } while (res); + break; + + case 8: + do { + asm volatile("// __cmpxchg8\n" + " ldxr %1, %2\n" + " mov %w0, #0\n" + " cmp %1, %3\n" + " b.ne 1f\n" + " stxr %w0, %4, %2\n" + "1:\n" + : "=&r" (res), "=&r" (oldval), "+Q" (*(u64 *)ptr) + : "Ir" (old), "r" (new) + : "cc"); + } while (res); + break; + + default: + BUILD_BUG(); + } + + return oldval; +} + +static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, + unsigned long new, int size) +{ + unsigned long ret; + + smp_mb(); + ret = __cmpxchg(ptr, old, new, size); + smp_mb(); + + return ret; +} + +#define cmpxchg(ptr, o, n) \ +({ \ + __typeof__(*(ptr)) __ret; \ + __ret = (__typeof__(*(ptr))) \ + __cmpxchg_mb((ptr), (unsigned long)(o), (unsigned long)(n), \ + sizeof(*(ptr))); \ + __ret; \ +}) + +#define cmpxchg_local(ptr, o, n) \ +({ \ + __typeof__(*(ptr)) __ret; \ + __ret = (__typeof__(*(ptr))) \ + __cmpxchg((ptr), (unsigned long)(o), \ + (unsigned long)(n), sizeof(*(ptr))); \ + __ret; \ +}) + +#define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n)) +#define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n)) + +#endif /* __ASM_CMPXCHG_H */ diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h new file mode 100644 index 000000000..a203e973b --- /dev/null +++ b/arch/arm64/include/asm/compat.h @@ -0,0 +1,308 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_COMPAT_H +#define __ASM_COMPAT_H +#ifdef __KERNEL__ +#ifdef CONFIG_COMPAT + +/* + * Architecture specific compatibility types + */ +#include <linux/types.h> +#include <linux/sched.h> +#include <linux/ptrace.h> + +#define COMPAT_USER_HZ 100 +#define COMPAT_UTS_MACHINE "armv8l\0\0" + +typedef u32 compat_size_t; +typedef s32 compat_ssize_t; +typedef s32 compat_time_t; +typedef s32 compat_clock_t; +typedef s32 compat_pid_t; +typedef u16 __compat_uid_t; +typedef u16 __compat_gid_t; +typedef u16 __compat_uid16_t; +typedef u16 __compat_gid16_t; +typedef u32 __compat_uid32_t; +typedef u32 __compat_gid32_t; +typedef u16 compat_mode_t; +typedef u32 compat_ino_t; +typedef u32 compat_dev_t; +typedef s32 compat_off_t; +typedef s64 compat_loff_t; +typedef s32 compat_nlink_t; +typedef u16 compat_ipc_pid_t; +typedef s32 compat_daddr_t; +typedef u32 compat_caddr_t; +typedef __kernel_fsid_t compat_fsid_t; +typedef s32 compat_key_t; +typedef s32 compat_timer_t; + +typedef s16 compat_short_t; +typedef s32 compat_int_t; +typedef s32 compat_long_t; +typedef s64 compat_s64; +typedef u16 compat_ushort_t; +typedef u32 compat_uint_t; +typedef u32 compat_ulong_t; +typedef u64 compat_u64; +typedef u32 compat_uptr_t; + +struct compat_timespec { + compat_time_t tv_sec; + s32 tv_nsec; +}; + +struct compat_timeval { + compat_time_t tv_sec; + s32 tv_usec; +}; + +struct compat_stat { + compat_dev_t st_dev; + compat_ino_t st_ino; + compat_mode_t st_mode; + compat_ushort_t st_nlink; + __compat_uid16_t st_uid; + __compat_gid16_t st_gid; + compat_dev_t st_rdev; + compat_off_t st_size; + compat_off_t st_blksize; + compat_off_t st_blocks; + compat_time_t st_atime; + compat_ulong_t st_atime_nsec; + compat_time_t st_mtime; + compat_ulong_t st_mtime_nsec; + compat_time_t st_ctime; + compat_ulong_t st_ctime_nsec; + compat_ulong_t __unused4[2]; +}; + +struct compat_flock { + short l_type; + short l_whence; + compat_off_t l_start; + compat_off_t l_len; + compat_pid_t l_pid; +}; + +#define F_GETLK64 12 /* using 'struct flock64' */ +#define F_SETLK64 13 +#define F_SETLKW64 14 + +struct compat_flock64 { + short l_type; + short l_whence; + compat_loff_t l_start; + compat_loff_t l_len; + compat_pid_t l_pid; +}; + +struct compat_statfs { + int f_type; + int f_bsize; + int f_blocks; + int f_bfree; + int f_bavail; + int f_files; + int f_ffree; + compat_fsid_t f_fsid; + int f_namelen; /* SunOS ignores this field. */ + int f_frsize; + int f_flags; + int f_spare[4]; +}; + +#define COMPAT_RLIM_INFINITY 0xffffffff + +typedef u32 compat_old_sigset_t; + +#define _COMPAT_NSIG 64 +#define _COMPAT_NSIG_BPW 32 + +typedef u32 compat_sigset_word; + +typedef union compat_sigval { + compat_int_t sival_int; + compat_uptr_t sival_ptr; +} compat_sigval_t; + +typedef struct compat_siginfo { + int si_signo; + int si_errno; + int si_code; + + union { + /* The padding is the same size as AArch64. */ + int _pad[128/sizeof(int) - 3]; + + /* kill() */ + struct { + compat_pid_t _pid; /* sender's pid */ + __compat_uid32_t _uid; /* sender's uid */ + } _kill; + + /* POSIX.1b timers */ + struct { + compat_timer_t _tid; /* timer id */ + int _overrun; /* overrun count */ + compat_sigval_t _sigval; /* same as below */ + int _sys_private; /* not to be passed to user */ + } _timer; + + /* POSIX.1b signals */ + struct { + compat_pid_t _pid; /* sender's pid */ + __compat_uid32_t _uid; /* sender's uid */ + compat_sigval_t _sigval; + } _rt; + + /* SIGCHLD */ + struct { + compat_pid_t _pid; /* which child */ + __compat_uid32_t _uid; /* sender's uid */ + int _status; /* exit code */ + compat_clock_t _utime; + compat_clock_t _stime; + } _sigchld; + + /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ + struct { + compat_uptr_t _addr; /* faulting insn/memory ref. */ + short _addr_lsb; /* LSB of the reported address */ + } _sigfault; + + /* SIGPOLL */ + struct { + compat_long_t _band; /* POLL_IN, POLL_OUT, POLL_MSG */ + int _fd; + } _sigpoll; + + /* SIGSYS */ + struct { + compat_uptr_t _call_addr; /* calling user insn */ + int _syscall; /* triggering system call number */ + unsigned int _arch; /* AUDIT_ARCH_* of syscall */ + } _sigsys; + } _sifields; +} compat_siginfo_t; + +#define COMPAT_OFF_T_MAX 0x7fffffff +#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL + +/* + * A pointer passed in from user mode. This should not + * be used for syscall parameters, just declare them + * as pointers because the syscall entry code will have + * appropriately converted them already. + */ + +static inline void __user *compat_ptr(compat_uptr_t uptr) +{ + return (void __user *)(unsigned long)uptr; +} + +static inline compat_uptr_t ptr_to_compat(void __user *uptr) +{ + return (u32)(unsigned long)uptr; +} + +#define compat_user_stack_pointer() (user_stack_pointer(current_pt_regs())) + +static inline void __user *arch_compat_alloc_user_space(long len) +{ + return (void __user *)compat_user_stack_pointer() - len; +} + +struct compat_ipc64_perm { + compat_key_t key; + __compat_uid32_t uid; + __compat_gid32_t gid; + __compat_uid32_t cuid; + __compat_gid32_t cgid; + unsigned short mode; + unsigned short __pad1; + unsigned short seq; + unsigned short __pad2; + compat_ulong_t unused1; + compat_ulong_t unused2; +}; + +struct compat_semid64_ds { + struct compat_ipc64_perm sem_perm; + compat_time_t sem_otime; + compat_ulong_t __unused1; + compat_time_t sem_ctime; + compat_ulong_t __unused2; + compat_ulong_t sem_nsems; + compat_ulong_t __unused3; + compat_ulong_t __unused4; +}; + +struct compat_msqid64_ds { + struct compat_ipc64_perm msg_perm; + compat_time_t msg_stime; + compat_ulong_t __unused1; + compat_time_t msg_rtime; + compat_ulong_t __unused2; + compat_time_t msg_ctime; + compat_ulong_t __unused3; + compat_ulong_t msg_cbytes; + compat_ulong_t msg_qnum; + compat_ulong_t msg_qbytes; + compat_pid_t msg_lspid; + compat_pid_t msg_lrpid; + compat_ulong_t __unused4; + compat_ulong_t __unused5; +}; + +struct compat_shmid64_ds { + struct compat_ipc64_perm shm_perm; + compat_size_t shm_segsz; + compat_time_t shm_atime; + compat_ulong_t __unused1; + compat_time_t shm_dtime; + compat_ulong_t __unused2; + compat_time_t shm_ctime; + compat_ulong_t __unused3; + compat_pid_t shm_cpid; + compat_pid_t shm_lpid; + compat_ulong_t shm_nattch; + compat_ulong_t __unused4; + compat_ulong_t __unused5; +}; + +static inline int is_compat_task(void) +{ + return test_thread_flag(TIF_32BIT); +} + +static inline int is_compat_thread(struct thread_info *thread) +{ + return test_ti_thread_flag(thread, TIF_32BIT); +} + +#else /* !CONFIG_COMPAT */ + +static inline int is_compat_thread(struct thread_info *thread) +{ + return 0; +} + +#endif /* CONFIG_COMPAT */ +#endif /* __KERNEL__ */ +#endif /* __ASM_COMPAT_H */ diff --git a/arch/arm64/include/asm/compiler.h b/arch/arm64/include/asm/compiler.h new file mode 100644 index 000000000..ee35fd0f2 --- /dev/null +++ b/arch/arm64/include/asm/compiler.h @@ -0,0 +1,30 @@ +/* + * Based on arch/arm/include/asm/compiler.h + * + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_COMPILER_H +#define __ASM_COMPILER_H + +/* + * This is used to ensure the compiler did actually allocate the register we + * asked it for some inline assembly sequences. Apparently we can't trust the + * compiler from one version to another so a bit of paranoia won't hurt. This + * string is meant to be concatenated with the inline asm string and will + * cause compilation to stop on mismatch. (for details, see gcc PR 15089) + */ +#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t" + +#endif /* __ASM_COMPILER_H */ diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h new file mode 100644 index 000000000..d7b4b38a8 --- /dev/null +++ b/arch/arm64/include/asm/cpu_ops.h @@ -0,0 +1,67 @@ +/* + * Copyright (C) 2013 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_CPU_OPS_H +#define __ASM_CPU_OPS_H + +#include <linux/init.h> +#include <linux/threads.h> + +struct device_node; + +/** + * struct cpu_operations - Callback operations for hotplugging CPUs. + * + * @name: Name of the property as appears in a devicetree cpu node's + * enable-method property. + * @cpu_init: Reads any data necessary for a specific enable-method from the + * devicetree, for a given cpu node and proposed logical id. + * @cpu_prepare: Early one-time preparation step for a cpu. If there is a + * mechanism for doing so, tests whether it is possible to boot + * the given CPU. + * @cpu_boot: Boots a cpu into the kernel. + * @cpu_postboot: Optionally, perform any post-boot cleanup or necesary + * synchronisation. Called from the cpu being booted. + * @cpu_disable: Prepares a cpu to die. May fail for some mechanism-specific + * reason, which will cause the hot unplug to be aborted. Called + * from the cpu to be killed. + * @cpu_die: Makes a cpu leave the kernel. Must not fail. Called from the + * cpu being killed. + * @cpu_kill: Ensures a cpu has left the kernel. Called from another cpu. + * @cpu_suspend: Suspends a cpu and saves the required context. May fail owing + * to wrong parameters or error conditions. Called from the + * CPU being suspended. Must be called with IRQs disabled. + */ +struct cpu_operations { + const char *name; + int (*cpu_init)(struct device_node *, unsigned int); + int (*cpu_prepare)(unsigned int); + int (*cpu_boot)(unsigned int); + void (*cpu_postboot)(void); +#ifdef CONFIG_HOTPLUG_CPU + int (*cpu_disable)(unsigned int cpu); + void (*cpu_die)(unsigned int cpu); + int (*cpu_kill)(unsigned int cpu); +#endif +#ifdef CONFIG_ARM64_CPU_SUSPEND + int (*cpu_suspend)(unsigned long); +#endif +}; + +extern const struct cpu_operations *cpu_ops[NR_CPUS]; +extern int __init cpu_read_ops(struct device_node *dn, int cpu); +extern void __init cpu_read_bootcpu_ops(void); + +#endif /* ifndef __ASM_CPU_OPS_H */ diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h new file mode 100644 index 000000000..cd4ac0516 --- /dev/null +++ b/arch/arm64/include/asm/cpufeature.h @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ASM_CPUFEATURE_H +#define __ASM_CPUFEATURE_H + +#include <asm/hwcap.h> + +/* + * In the arm64 world (as in the ARM world), elf_hwcap is used both internally + * in the kernel and for user space to keep track of which optional features + * are supported by the current system. So let's map feature 'x' to HWCAP_x. + * Note that HWCAP_x constants are bit fields so we need to take the log. + */ + +#define MAX_CPU_FEATURES (8 * sizeof(elf_hwcap)) +#define cpu_feature(x) ilog2(HWCAP_ ## x) + +static inline bool cpu_have_feature(unsigned int num) +{ + return elf_hwcap & (1UL << num); +} + +#endif diff --git a/arch/arm64/include/asm/cputable.h b/arch/arm64/include/asm/cputable.h new file mode 100644 index 000000000..e3bd983d3 --- /dev/null +++ b/arch/arm64/include/asm/cputable.h @@ -0,0 +1,30 @@ +/* + * arch/arm64/include/asm/cputable.h + * + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_CPUTABLE_H +#define __ASM_CPUTABLE_H + +struct cpu_info { + unsigned int cpu_id_val; + unsigned int cpu_id_mask; + const char *cpu_name; + unsigned long (*cpu_setup)(void); +}; + +extern struct cpu_info *lookup_processor_type(unsigned int); + +#endif diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h new file mode 100644 index 000000000..3c1f29592 --- /dev/null +++ b/arch/arm64/include/asm/cputype.h @@ -0,0 +1,90 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_CPUTYPE_H +#define __ASM_CPUTYPE_H + +#define INVALID_HWID ULONG_MAX + +#define MPIDR_SMP_BITMASK (0x3 << 30) +#define MPIDR_SMP_VALUE (0x2 << 30) + +#define MPIDR_MT_BITMASK (0x1 << 24) + +#define MPIDR_HWID_BITMASK 0xff00ffffff + +#define MPIDR_INVALID (~MPIDR_HWID_BITMASK) + +#define MPIDR_LEVEL_BITS_SHIFT 3 +#define MPIDR_LEVEL_BITS (1 << MPIDR_LEVEL_BITS_SHIFT) +#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1) + +#define MPIDR_LEVEL_SHIFT(level) \ + (((1 << level) >> 1) << MPIDR_LEVEL_BITS_SHIFT) + +#define MPIDR_AFFINITY_LEVEL(mpidr, level) \ + ((mpidr >> MPIDR_LEVEL_SHIFT(level)) & MPIDR_LEVEL_MASK) + +#define read_cpuid(reg) ({ \ + u64 __val; \ + asm("mrs %0, " #reg : "=r" (__val)); \ + __val; \ +}) + +#define ARM_CPU_IMP_ARM 0x41 +#define ARM_CPU_IMP_APM 0x50 + +#define ARM_CPU_PART_AEM_V8 0xD0F0 +#define ARM_CPU_PART_FOUNDATION 0xD000 +#define ARM_CPU_PART_CORTEX_A53 0xD030 +#define ARM_CPU_PART_CORTEX_A57 0xD070 + +#define APM_CPU_PART_POTENZA 0x0000 + +#ifndef __ASSEMBLY__ + +/* + * The CPU ID never changes at run time, so we might as well tell the + * compiler that it's constant. Use this function to read the CPU ID + * rather than directly reading processor_id or read_cpuid() directly. + */ +static inline u32 __attribute_const__ read_cpuid_id(void) +{ + return read_cpuid(MIDR_EL1); +} + +static inline u64 __attribute_const__ read_cpuid_mpidr(void) +{ + return read_cpuid(MPIDR_EL1); +} + +static inline unsigned int __attribute_const__ read_cpuid_implementor(void) +{ + return (read_cpuid_id() & 0xFF000000) >> 24; +} + +static inline unsigned int __attribute_const__ read_cpuid_part_number(void) +{ + return (read_cpuid_id() & 0xFFF0); +} + +static inline u32 __attribute_const__ read_cpuid_cachetype(void) +{ + return read_cpuid(CTR_EL0); +} + +#endif /* __ASSEMBLY__ */ + +#endif diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h new file mode 100644 index 000000000..f779fa429 --- /dev/null +++ b/arch/arm64/include/asm/debug-monitors.h @@ -0,0 +1,149 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_DEBUG_MONITORS_H +#define __ASM_DEBUG_MONITORS_H + +#ifdef __KERNEL__ + +#define DBG_ESR_EVT(x) (((x) >> 27) & 0x7) + +/* AArch64 */ +#define DBG_ESR_EVT_HWBP 0x0 +#define DBG_ESR_EVT_HWSS 0x1 +#define DBG_ESR_EVT_HWWP 0x2 +#define DBG_ESR_EVT_BRK 0x6 + +/* + * Break point instruction encoding + */ +#define BREAK_INSTR_SIZE 4 + +/* + * ESR values expected for dynamic and compile time BRK instruction + */ +#define DBG_ESR_VAL_BRK(x) (0xf2000000 | ((x) & 0xfffff)) + +/* + * #imm16 values used for BRK instruction generation + * Allowed values for kgbd are 0x400 - 0x7ff + * 0x400: for dynamic BRK instruction + * 0x401: for compile time BRK instruction + */ +#define KGDB_DYN_DBG_BRK_IMM 0x400 +#define KGDB_COMPILED_DBG_BRK_IMM 0x401 + + +/* + * BRK instruction encoding + * The #imm16 value should be placed at bits[20:5] within BRK ins + */ +#define AARCH64_BREAK_MON 0xd4200000 + +/* + * Extract byte from BRK instruction + */ +#define KGDB_DYN_DBG_BRK_INS_BYTE(x) \ + ((((AARCH64_BREAK_MON) & 0xffe0001f) >> (x * 8)) & 0xff) + +/* + * Extract byte from BRK #imm16 + */ +#define KGBD_DYN_DBG_BRK_IMM_BYTE(x) \ + (((((KGDB_DYN_DBG_BRK_IMM) & 0xffff) << 5) >> (x * 8)) & 0xff) + +#define KGDB_DYN_DBG_BRK_BYTE(x) \ + (KGDB_DYN_DBG_BRK_INS_BYTE(x) | KGBD_DYN_DBG_BRK_IMM_BYTE(x)) + +#define KGDB_DYN_BRK_INS_BYTE0 KGDB_DYN_DBG_BRK_BYTE(0) +#define KGDB_DYN_BRK_INS_BYTE1 KGDB_DYN_DBG_BRK_BYTE(1) +#define KGDB_DYN_BRK_INS_BYTE2 KGDB_DYN_DBG_BRK_BYTE(2) +#define KGDB_DYN_BRK_INS_BYTE3 KGDB_DYN_DBG_BRK_BYTE(3) + +#define CACHE_FLUSH_IS_SAFE 1 + +enum debug_el { + DBG_ACTIVE_EL0 = 0, + DBG_ACTIVE_EL1, +}; + +/* AArch32 */ +#define DBG_ESR_EVT_BKPT 0x4 +#define DBG_ESR_EVT_VECC 0x5 + +#define AARCH32_BREAK_ARM 0x07f001f0 +#define AARCH32_BREAK_THUMB 0xde01 +#define AARCH32_BREAK_THUMB2_LO 0xf7f0 +#define AARCH32_BREAK_THUMB2_HI 0xa000 + +#ifndef __ASSEMBLY__ +struct task_struct; + +#define DBG_ARCH_ID_RESERVED 0 /* In case of ptrace ABI updates. */ + +#define DBG_HOOK_HANDLED 0 +#define DBG_HOOK_ERROR 1 + +struct step_hook { + struct list_head node; + int (*fn)(struct pt_regs *regs, unsigned int esr); +}; + +void register_step_hook(struct step_hook *hook); +void unregister_step_hook(struct step_hook *hook); + +struct break_hook { + struct list_head node; + u32 esr_val; + u32 esr_mask; + int (*fn)(struct pt_regs *regs, unsigned int esr); +}; + +void register_break_hook(struct break_hook *hook); +void unregister_break_hook(struct break_hook *hook); + +u8 debug_monitors_arch(void); + +void enable_debug_monitors(enum debug_el el); +void disable_debug_monitors(enum debug_el el); + +void user_rewind_single_step(struct task_struct *task); +void user_fastforward_single_step(struct task_struct *task); + +void kernel_enable_single_step(struct pt_regs *regs); +void kernel_disable_single_step(void); +int kernel_active_single_step(void); + +#ifdef CONFIG_HAVE_HW_BREAKPOINT +int reinstall_suspended_bps(struct pt_regs *regs); +#else +static inline int reinstall_suspended_bps(struct pt_regs *regs) +{ + return -ENODEV; +} +#endif + +#ifdef CONFIG_COMPAT +int aarch32_break_handler(struct pt_regs *regs); +#else +static int aarch32_break_handler(struct pt_regs *regs) +{ + return -EFAULT; +} +#endif + +#endif /* __ASSEMBLY */ +#endif /* __KERNEL__ */ +#endif /* __ASM_DEBUG_MONITORS_H */ diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h new file mode 100644 index 000000000..0d8453c75 --- /dev/null +++ b/arch/arm64/include/asm/device.h @@ -0,0 +1,26 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_DEVICE_H +#define __ASM_DEVICE_H + +struct dev_archdata { + struct dma_map_ops *dma_ops; +}; + +struct pdev_archdata { +}; + +#endif diff --git a/arch/arm64/include/asm/dma-contiguous.h b/arch/arm64/include/asm/dma-contiguous.h new file mode 100644 index 000000000..72bc72197 --- /dev/null +++ b/arch/arm64/include/asm/dma-contiguous.h @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _ASM_DMA_CONTIGUOUS_H +#define _ASM_DMA_CONTIGUOUS_H + +#ifdef __KERNEL__ +#ifdef CONFIG_CMA + +#include <linux/types.h> + +static inline void +dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { } + +#endif +#endif + +#endif diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h new file mode 100644 index 000000000..b26c02630 --- /dev/null +++ b/arch/arm64/include/asm/dma-mapping.h @@ -0,0 +1,133 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_DMA_MAPPING_H +#define __ASM_DMA_MAPPING_H + +#ifdef __KERNEL__ + +#include <linux/types.h> +#include <linux/vmalloc.h> + +#include <asm-generic/dma-coherent.h> + +#define ARCH_HAS_DMA_GET_REQUIRED_MASK + +extern struct dma_map_ops *dma_ops; +extern struct dma_map_ops coherent_swiotlb_dma_ops; +extern struct dma_map_ops noncoherent_swiotlb_dma_ops; + +static inline struct dma_map_ops *get_dma_ops(struct device *dev) +{ + if (unlikely(!dev) || !dev->archdata.dma_ops) + return dma_ops; + else + return dev->archdata.dma_ops; +} + +static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) +{ + dev->archdata.dma_ops = ops; +} + + +#include <asm-generic/dma-mapping-common.h> + +static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) +{ + return (dma_addr_t)paddr; +} + +static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr) +{ + return (phys_addr_t)dev_addr; +} + +static inline int dma_mapping_error(struct device *dev, dma_addr_t dev_addr) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + debug_dma_mapping_error(dev, dev_addr); + return ops->mapping_error(dev, dev_addr); +} + +static inline int dma_supported(struct device *dev, u64 mask) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + return ops->dma_supported(dev, mask); +} + +static inline int dma_set_mask(struct device *dev, u64 mask) +{ + if (!dev->dma_mask || !dma_supported(dev, mask)) + return -EIO; + *dev->dma_mask = mask; + + return 0; +} + +static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) +{ + if (!dev->dma_mask) + return 0; + + return addr + size - 1 <= *dev->dma_mask; +} + +static inline void dma_mark_clean(void *addr, size_t size) +{ +} + +static inline void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flags) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + void *vaddr; + + if (dma_alloc_from_coherent(dev, size, dma_handle, &vaddr)) + return vaddr; + + vaddr = ops->alloc(dev, size, dma_handle, flags, NULL); + debug_dma_alloc_coherent(dev, size, *dma_handle, vaddr); + return vaddr; +} + +static inline void dma_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dev_addr) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + + if (dma_release_from_coherent(dev, get_order(size), vaddr)) + return; + + debug_dma_free_coherent(dev, size, vaddr, dev_addr); + ops->free(dev, size, vaddr, dev_addr, NULL); +} + +/* + * There is no dma_cache_sync() implementation, so just return NULL here. + */ +static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, + dma_addr_t *handle, gfp_t flags) +{ + return NULL; +} + +static inline void dma_free_noncoherent(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t handle) +{ +} + +#endif /* __KERNEL__ */ +#endif /* __ASM_DMA_MAPPING_H */ diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h new file mode 100644 index 000000000..e7fa87f92 --- /dev/null +++ b/arch/arm64/include/asm/elf.h @@ -0,0 +1,176 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_ELF_H +#define __ASM_ELF_H + +#include <asm/hwcap.h> + +/* + * ELF register definitions.. + */ +#include <asm/ptrace.h> +#include <asm/user.h> + +typedef unsigned long elf_greg_t; + +#define ELF_NGREG (sizeof(struct user_pt_regs) / sizeof(elf_greg_t)) +#define ELF_CORE_COPY_REGS(dest, regs) \ + *(struct user_pt_regs *)&(dest) = (regs)->user_regs; + +typedef elf_greg_t elf_gregset_t[ELF_NGREG]; +typedef struct user_fpsimd_state elf_fpregset_t; + +/* + * AArch64 static relocation types. + */ + +/* Miscellaneous. */ +#define R_ARM_NONE 0 +#define R_AARCH64_NONE 256 + +/* Data. */ +#define R_AARCH64_ABS64 257 +#define R_AARCH64_ABS32 258 +#define R_AARCH64_ABS16 259 +#define R_AARCH64_PREL64 260 +#define R_AARCH64_PREL32 261 +#define R_AARCH64_PREL16 262 + +/* Instructions. */ +#define R_AARCH64_MOVW_UABS_G0 263 +#define R_AARCH64_MOVW_UABS_G0_NC 264 +#define R_AARCH64_MOVW_UABS_G1 265 +#define R_AARCH64_MOVW_UABS_G1_NC 266 +#define R_AARCH64_MOVW_UABS_G2 267 +#define R_AARCH64_MOVW_UABS_G2_NC 268 +#define R_AARCH64_MOVW_UABS_G3 269 + +#define R_AARCH64_MOVW_SABS_G0 270 +#define R_AARCH64_MOVW_SABS_G1 271 +#define R_AARCH64_MOVW_SABS_G2 272 + +#define R_AARCH64_LD_PREL_LO19 273 +#define R_AARCH64_ADR_PREL_LO21 274 +#define R_AARCH64_ADR_PREL_PG_HI21 275 +#define R_AARCH64_ADR_PREL_PG_HI21_NC 276 +#define R_AARCH64_ADD_ABS_LO12_NC 277 +#define R_AARCH64_LDST8_ABS_LO12_NC 278 + +#define R_AARCH64_TSTBR14 279 +#define R_AARCH64_CONDBR19 280 +#define R_AARCH64_JUMP26 282 +#define R_AARCH64_CALL26 283 +#define R_AARCH64_LDST16_ABS_LO12_NC 284 +#define R_AARCH64_LDST32_ABS_LO12_NC 285 +#define R_AARCH64_LDST64_ABS_LO12_NC 286 +#define R_AARCH64_LDST128_ABS_LO12_NC 299 + +#define R_AARCH64_MOVW_PREL_G0 287 +#define R_AARCH64_MOVW_PREL_G0_NC 288 +#define R_AARCH64_MOVW_PREL_G1 289 +#define R_AARCH64_MOVW_PREL_G1_NC 290 +#define R_AARCH64_MOVW_PREL_G2 291 +#define R_AARCH64_MOVW_PREL_G2_NC 292 +#define R_AARCH64_MOVW_PREL_G3 293 + +/* + * These are used to set parameters in the core dumps. + */ +#define ELF_CLASS ELFCLASS64 +#define ELF_DATA ELFDATA2LSB +#define ELF_ARCH EM_AARCH64 + +#define ELF_PLATFORM_SIZE 16 +#define ELF_PLATFORM ("aarch64") + +/* + * This is used to ensure we don't load something for the wrong architecture. + */ +#define elf_check_arch(x) ((x)->e_machine == EM_AARCH64) + +#define elf_read_implies_exec(ex,stk) (stk != EXSTACK_DISABLE_X) + +#define CORE_DUMP_USE_REGSET +#define ELF_EXEC_PAGESIZE PAGE_SIZE + +/* + * This is the location that an ET_DYN program is loaded if exec'ed. Typical + * use of this is to invoke "./ld.so someprog" to test out a new version of + * the loader. We need to make sure that it is out of the way of the program + * that it will "exec", and that there is sufficient room for the brk. + */ +extern unsigned long randomize_et_dyn(unsigned long base); +#define ELF_ET_DYN_BASE (randomize_et_dyn(2 * TASK_SIZE_64 / 3)) + +/* + * When the program starts, a1 contains a pointer to a function to be + * registered with atexit, as per the SVR4 ABI. A value of 0 means we have no + * such handler. + */ +#define ELF_PLAT_INIT(_r, load_addr) (_r)->regs[0] = 0 + +#define SET_PERSONALITY(ex) clear_thread_flag(TIF_32BIT); + +#define ARCH_DLINFO \ +do { \ + NEW_AUX_ENT(AT_SYSINFO_EHDR, \ + (elf_addr_t)current->mm->context.vdso); \ +} while (0) + +#define ARCH_HAS_SETUP_ADDITIONAL_PAGES +struct linux_binprm; +extern int arch_setup_additional_pages(struct linux_binprm *bprm, + int uses_interp); + +/* 1GB of VA */ +#ifdef CONFIG_COMPAT +#define STACK_RND_MASK (test_thread_flag(TIF_32BIT) ? \ + 0x7ff >> (PAGE_SHIFT - 12) : \ + 0x3ffff >> (PAGE_SHIFT - 12)) +#else +#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12)) +#endif + +struct mm_struct; +extern unsigned long arch_randomize_brk(struct mm_struct *mm); +#define arch_randomize_brk arch_randomize_brk + +#ifdef CONFIG_COMPAT +#define COMPAT_ELF_PLATFORM ("v8l") + +#define COMPAT_ELF_ET_DYN_BASE (randomize_et_dyn(2 * TASK_SIZE_32 / 3)) + +/* AArch32 registers. */ +#define COMPAT_ELF_NGREG 18 +typedef unsigned int compat_elf_greg_t; +typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG]; + +/* AArch32 EABI. */ +#define EF_ARM_EABI_MASK 0xff000000 +#define compat_elf_check_arch(x) (((x)->e_machine == EM_ARM) && \ + ((x)->e_flags & EF_ARM_EABI_MASK)) + +#define compat_start_thread compat_start_thread +#define COMPAT_SET_PERSONALITY(ex) set_thread_flag(TIF_32BIT); +#define COMPAT_ARCH_DLINFO +extern int aarch32_setup_vectors_page(struct linux_binprm *bprm, + int uses_interp); +#define compat_arch_setup_additional_pages \ + aarch32_setup_vectors_page + +#endif /* CONFIG_COMPAT */ + +#endif diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h new file mode 100644 index 000000000..78834123a --- /dev/null +++ b/arch/arm64/include/asm/esr.h @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2013 - ARM Ltd + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ASM_ESR_H +#define __ASM_ESR_H + +#define ESR_EL1_EC_SHIFT (26) +#define ESR_EL1_IL (1U << 25) + +#define ESR_EL1_EC_UNKNOWN (0x00) +#define ESR_EL1_EC_WFI (0x01) +#define ESR_EL1_EC_CP15_32 (0x03) +#define ESR_EL1_EC_CP15_64 (0x04) +#define ESR_EL1_EC_CP14_MR (0x05) +#define ESR_EL1_EC_CP14_LS (0x06) +#define ESR_EL1_EC_FP_ASIMD (0x07) +#define ESR_EL1_EC_CP10_ID (0x08) +#define ESR_EL1_EC_CP14_64 (0x0C) +#define ESR_EL1_EC_ILL_ISS (0x0E) +#define ESR_EL1_EC_SVC32 (0x11) +#define ESR_EL1_EC_SVC64 (0x15) +#define ESR_EL1_EC_SYS64 (0x18) +#define ESR_EL1_EC_IABT_EL0 (0x20) +#define ESR_EL1_EC_IABT_EL1 (0x21) +#define ESR_EL1_EC_PC_ALIGN (0x22) +#define ESR_EL1_EC_DABT_EL0 (0x24) +#define ESR_EL1_EC_DABT_EL1 (0x25) +#define ESR_EL1_EC_SP_ALIGN (0x26) +#define ESR_EL1_EC_FP_EXC32 (0x28) +#define ESR_EL1_EC_FP_EXC64 (0x2C) +#define ESR_EL1_EC_SERRROR (0x2F) +#define ESR_EL1_EC_BREAKPT_EL0 (0x30) +#define ESR_EL1_EC_BREAKPT_EL1 (0x31) +#define ESR_EL1_EC_SOFTSTP_EL0 (0x32) +#define ESR_EL1_EC_SOFTSTP_EL1 (0x33) +#define ESR_EL1_EC_WATCHPT_EL0 (0x34) +#define ESR_EL1_EC_WATCHPT_EL1 (0x35) +#define ESR_EL1_EC_BKPT32 (0x38) +#define ESR_EL1_EC_BRK64 (0x3C) + +#endif /* __ASM_ESR_H */ diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h new file mode 100644 index 000000000..0303705fc --- /dev/null +++ b/arch/arm64/include/asm/exception.h @@ -0,0 +1,24 @@ +/* + * Based on arch/arm/include/asm/exception.h + * + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_EXCEPTION_H +#define __ASM_EXCEPTION_H + +#define __exception __attribute__((section(".exception.text"))) +#define __exception_irq_entry __exception + +#endif /* __ASM_EXCEPTION_H */ diff --git a/arch/arm64/include/asm/exec.h b/arch/arm64/include/asm/exec.h new file mode 100644 index 000000000..db0563c23 --- /dev/null +++ b/arch/arm64/include/asm/exec.h @@ -0,0 +1,23 @@ +/* + * Based on arch/arm/include/asm/exec.h + * + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_EXEC_H +#define __ASM_EXEC_H + +extern unsigned long arch_align_stack(unsigned long sp); + +#endif /* __ASM_EXEC_H */ diff --git a/arch/arm64/include/asm/fb.h b/arch/arm64/include/asm/fb.h new file mode 100644 index 000000000..adb88a64b --- /dev/null +++ b/arch/arm64/include/asm/fb.h @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_FB_H_ +#define __ASM_FB_H_ + +#include <linux/fb.h> +#include <linux/fs.h> +#include <asm/page.h> + +static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, + unsigned long off) +{ + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); +} + +static inline int fb_is_primary_device(struct fb_info *info) +{ + return 0; +} + +#endif /* __ASM_FB_H_ */ diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h new file mode 100644 index 000000000..50f559f57 --- /dev/null +++ b/arch/arm64/include/asm/fpsimd.h @@ -0,0 +1,86 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_FP_H +#define __ASM_FP_H + +#include <asm/ptrace.h> + +#ifndef __ASSEMBLY__ + +/* + * FP/SIMD storage area has: + * - FPSR and FPCR + * - 32 128-bit data registers + * + * Note that user_fpsimd forms a prefix of this structure, which is + * relied upon in the ptrace FP/SIMD accessors. + */ +struct fpsimd_state { + union { + struct user_fpsimd_state user_fpsimd; + struct { + __uint128_t vregs[32]; + u32 fpsr; + u32 fpcr; + }; + }; + /* the id of the last cpu to have restored this state */ + unsigned int cpu; +}; + +/* + * Struct for stacking the bottom 'n' FP/SIMD registers. + */ +struct fpsimd_partial_state { + u32 fpsr; + u32 fpcr; + u32 num_regs; + __uint128_t vregs[32]; +}; + + +#if defined(__KERNEL__) && defined(CONFIG_COMPAT) +/* Masks for extracting the FPSR and FPCR from the FPSCR */ +#define VFP_FPSCR_STAT_MASK 0xf800009f +#define VFP_FPSCR_CTRL_MASK 0x07f79f00 +/* + * The VFP state has 32x64-bit registers and a single 32-bit + * control/status register. + */ +#define VFP_STATE_SIZE ((32 * 8) + 4) +#endif + +struct task_struct; + +extern void fpsimd_save_state(struct fpsimd_state *state); +extern void fpsimd_load_state(struct fpsimd_state *state); + +extern void fpsimd_thread_switch(struct task_struct *next); +extern void fpsimd_flush_thread(void); + +extern void fpsimd_preserve_current_state(void); +extern void fpsimd_restore_current_state(void); +extern void fpsimd_update_current_state(struct fpsimd_state *state); + +extern void fpsimd_flush_task_state(struct task_struct *target); + +extern void fpsimd_save_partial_state(struct fpsimd_partial_state *state, + u32 num_regs); +extern void fpsimd_load_partial_state(struct fpsimd_partial_state *state); + +#endif + +#endif diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h new file mode 100644 index 000000000..768414d55 --- /dev/null +++ b/arch/arm64/include/asm/fpsimdmacros.h @@ -0,0 +1,99 @@ +/* + * FP/SIMD state saving and restoring macros + * + * Copyright (C) 2012 ARM Ltd. + * Author: Catalin Marinas <catalin.marinas@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +.macro fpsimd_save state, tmpnr + stp q0, q1, [\state, #16 * 0] + stp q2, q3, [\state, #16 * 2] + stp q4, q5, [\state, #16 * 4] + stp q6, q7, [\state, #16 * 6] + stp q8, q9, [\state, #16 * 8] + stp q10, q11, [\state, #16 * 10] + stp q12, q13, [\state, #16 * 12] + stp q14, q15, [\state, #16 * 14] + stp q16, q17, [\state, #16 * 16] + stp q18, q19, [\state, #16 * 18] + stp q20, q21, [\state, #16 * 20] + stp q22, q23, [\state, #16 * 22] + stp q24, q25, [\state, #16 * 24] + stp q26, q27, [\state, #16 * 26] + stp q28, q29, [\state, #16 * 28] + stp q30, q31, [\state, #16 * 30]! + mrs x\tmpnr, fpsr + str w\tmpnr, [\state, #16 * 2] + mrs x\tmpnr, fpcr + str w\tmpnr, [\state, #16 * 2 + 4] +.endm + +.macro fpsimd_restore state, tmpnr + ldp q0, q1, [\state, #16 * 0] + ldp q2, q3, [\state, #16 * 2] + ldp q4, q5, [\state, #16 * 4] + ldp q6, q7, [\state, #16 * 6] + ldp q8, q9, [\state, #16 * 8] + ldp q10, q11, [\state, #16 * 10] + ldp q12, q13, [\state, #16 * 12] + ldp q14, q15, [\state, #16 * 14] + ldp q16, q17, [\state, #16 * 16] + ldp q18, q19, [\state, #16 * 18] + ldp q20, q21, [\state, #16 * 20] + ldp q22, q23, [\state, #16 * 22] + ldp q24, q25, [\state, #16 * 24] + ldp q26, q27, [\state, #16 * 26] + ldp q28, q29, [\state, #16 * 28] + ldp q30, q31, [\state, #16 * 30]! + ldr w\tmpnr, [\state, #16 * 2] + msr fpsr, x\tmpnr + ldr w\tmpnr, [\state, #16 * 2 + 4] + msr fpcr, x\tmpnr +.endm + +.altmacro +.macro fpsimd_save_partial state, numnr, tmpnr1, tmpnr2 + mrs x\tmpnr1, fpsr + str w\numnr, [\state, #8] + mrs x\tmpnr2, fpcr + stp w\tmpnr1, w\tmpnr2, [\state] + adr x\tmpnr1, 0f + add \state, \state, x\numnr, lsl #4 + sub x\tmpnr1, x\tmpnr1, x\numnr, lsl #1 + br x\tmpnr1 + .irp qa, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0 + .irp qb, %(qa + 1) + stp q\qa, q\qb, [\state, # -16 * \qa - 16] + .endr + .endr +0: +.endm + +.macro fpsimd_restore_partial state, tmpnr1, tmpnr2 + ldp w\tmpnr1, w\tmpnr2, [\state] + msr fpsr, x\tmpnr1 + msr fpcr, x\tmpnr2 + adr x\tmpnr1, 0f + ldr w\tmpnr2, [\state, #8] + add \state, \state, x\tmpnr2, lsl #4 + sub x\tmpnr1, x\tmpnr1, x\tmpnr2, lsl #1 + br x\tmpnr1 + .irp qa, 30, 28, 26, 24, 22, 20, 18, 16, 14, 12, 10, 8, 6, 4, 2, 0 + .irp qb, %(qa + 1) + ldp q\qa, q\qb, [\state, # -16 * \qa - 16] + .endr + .endr +0: +.endm diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h new file mode 100644 index 000000000..c5534facf --- /dev/null +++ b/arch/arm64/include/asm/ftrace.h @@ -0,0 +1,59 @@ +/* + * arch/arm64/include/asm/ftrace.h + * + * Copyright (C) 2013 Linaro Limited + * Author: AKASHI Takahiro <takahiro.akashi@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __ASM_FTRACE_H +#define __ASM_FTRACE_H + +#include <asm/insn.h> + +#define MCOUNT_ADDR ((unsigned long)_mcount) +#define MCOUNT_INSN_SIZE AARCH64_INSN_SIZE + +#ifndef __ASSEMBLY__ +#include <linux/compat.h> + +extern void _mcount(unsigned long); +extern void *return_address(unsigned int); + +struct dyn_arch_ftrace { + /* No extra data needed for arm64 */ +}; + +extern unsigned long ftrace_graph_call; + +static inline unsigned long ftrace_call_adjust(unsigned long addr) +{ + /* + * addr is the address of the mcount call instruction. + * recordmcount does the necessary offset calculation. + */ + return addr; +} + +#define ftrace_return_address(n) return_address(n) + +/* + * Because AArch32 mode does not share the same syscall table with AArch64, + * tracing compat syscalls may result in reporting bogus syscalls or even + * hang-up, so just do not trace them. + * See kernel/trace/trace_syscalls.c + * + * x86 code says: + * If the user realy wants these, then they should use the + * raw syscall tracepoints with filtering. + */ +#define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS +static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs) +{ + return is_compat_task(); +} +#endif /* ifndef __ASSEMBLY__ */ + +#endif /* __ASM_FTRACE_H */ diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h new file mode 100644 index 000000000..c582fa316 --- /dev/null +++ b/arch/arm64/include/asm/futex.h @@ -0,0 +1,136 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_FUTEX_H +#define __ASM_FUTEX_H + +#ifdef __KERNEL__ + +#include <linux/futex.h> +#include <linux/uaccess.h> +#include <asm/errno.h> + +#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \ + asm volatile( \ +"1: ldaxr %w1, %2\n" \ + insn "\n" \ +"2: stlxr %w3, %w0, %2\n" \ +" cbnz %w3, 1b\n" \ +"3:\n" \ +" .pushsection .fixup,\"ax\"\n" \ +"4: mov %w0, %w5\n" \ +" b 3b\n" \ +" .popsection\n" \ +" .pushsection __ex_table,\"a\"\n" \ +" .align 3\n" \ +" .quad 1b, 4b, 2b, 4b\n" \ +" .popsection\n" \ + : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \ + : "r" (oparg), "Ir" (-EFAULT) \ + : "cc", "memory") + +static inline int +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) +{ + int op = (encoded_op >> 28) & 7; + int cmp = (encoded_op >> 24) & 15; + int oparg = (encoded_op << 8) >> 20; + int cmparg = (encoded_op << 20) >> 20; + int oldval = 0, ret, tmp; + + if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) + oparg = 1 << oparg; + + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + return -EFAULT; + + pagefault_disable(); /* implies preempt_disable() */ + + switch (op) { + case FUTEX_OP_SET: + __futex_atomic_op("mov %w0, %w4", + ret, oldval, uaddr, tmp, oparg); + break; + case FUTEX_OP_ADD: + __futex_atomic_op("add %w0, %w1, %w4", + ret, oldval, uaddr, tmp, oparg); + break; + case FUTEX_OP_OR: + __futex_atomic_op("orr %w0, %w1, %w4", + ret, oldval, uaddr, tmp, oparg); + break; + case FUTEX_OP_ANDN: + __futex_atomic_op("and %w0, %w1, %w4", + ret, oldval, uaddr, tmp, ~oparg); + break; + case FUTEX_OP_XOR: + __futex_atomic_op("eor %w0, %w1, %w4", + ret, oldval, uaddr, tmp, oparg); + break; + default: + ret = -ENOSYS; + } + + pagefault_enable(); /* subsumes preempt_enable() */ + + if (!ret) { + switch (cmp) { + case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; + case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; + case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; + case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; + case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; + case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; + default: ret = -ENOSYS; + } + } + return ret; +} + +static inline int +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) +{ + int ret = 0; + u32 val, tmp; + + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + return -EFAULT; + + asm volatile("// futex_atomic_cmpxchg_inatomic\n" +"1: ldaxr %w1, %2\n" +" sub %w3, %w1, %w4\n" +" cbnz %w3, 3f\n" +"2: stlxr %w3, %w5, %2\n" +" cbnz %w3, 1b\n" +"3:\n" +" .pushsection .fixup,\"ax\"\n" +"4: mov %w0, %w6\n" +" b 3b\n" +" .popsection\n" +" .pushsection __ex_table,\"a\"\n" +" .align 3\n" +" .quad 1b, 4b, 2b, 4b\n" +" .popsection\n" + : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp) + : "r" (oldval), "r" (newval), "Ir" (-EFAULT) + : "cc", "memory"); + + *uval = val; + return ret; +} + +#endif /* __KERNEL__ */ +#endif /* __ASM_FUTEX_H */ diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h new file mode 100644 index 000000000..990c051e7 --- /dev/null +++ b/arch/arm64/include/asm/hardirq.h @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_HARDIRQ_H +#define __ASM_HARDIRQ_H + +#include <linux/cache.h> +#include <linux/threads.h> +#include <asm/irq.h> + +#define NR_IPI 4 + +typedef struct { + unsigned int __softirq_pending; +#ifdef CONFIG_SMP + unsigned int ipi_irqs[NR_IPI]; +#endif +} ____cacheline_aligned irq_cpustat_t; + +#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ + +#define __inc_irq_stat(cpu, member) __IRQ_STAT(cpu, member)++ +#define __get_irq_stat(cpu, member) __IRQ_STAT(cpu, member) + +#ifdef CONFIG_SMP +u64 smp_irq_stat_cpu(unsigned int cpu); +#define arch_irq_stat_cpu smp_irq_stat_cpu +#endif + +#define __ARCH_IRQ_EXIT_IRQS_DISABLED 1 + +static inline void ack_bad_irq(unsigned int irq) +{ + extern unsigned long irq_err_count; + irq_err_count++; +} + +extern void handle_IRQ(unsigned int, struct pt_regs *); + +/* + * No arch-specific IRQ flags. + */ +#define set_irq_flags(irq, flags) + +#endif /* __ASM_HARDIRQ_H */ diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h new file mode 100644 index 000000000..52b484b6a --- /dev/null +++ b/arch/arm64/include/asm/hw_breakpoint.h @@ -0,0 +1,136 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_HW_BREAKPOINT_H +#define __ASM_HW_BREAKPOINT_H + +#ifdef __KERNEL__ + +struct arch_hw_breakpoint_ctrl { + u32 __reserved : 19, + len : 8, + type : 2, + privilege : 2, + enabled : 1; +}; + +struct arch_hw_breakpoint { + u64 address; + u64 trigger; + struct arch_hw_breakpoint_ctrl ctrl; +}; + +static inline u32 encode_ctrl_reg(struct arch_hw_breakpoint_ctrl ctrl) +{ + return (ctrl.len << 5) | (ctrl.type << 3) | (ctrl.privilege << 1) | + ctrl.enabled; +} + +static inline void decode_ctrl_reg(u32 reg, + struct arch_hw_breakpoint_ctrl *ctrl) +{ + ctrl->enabled = reg & 0x1; + reg >>= 1; + ctrl->privilege = reg & 0x3; + reg >>= 2; + ctrl->type = reg & 0x3; + reg >>= 2; + ctrl->len = reg & 0xff; +} + +/* Breakpoint */ +#define ARM_BREAKPOINT_EXECUTE 0 + +/* Watchpoints */ +#define ARM_BREAKPOINT_LOAD 1 +#define ARM_BREAKPOINT_STORE 2 +#define AARCH64_ESR_ACCESS_MASK (1 << 6) + +/* Privilege Levels */ +#define AARCH64_BREAKPOINT_EL1 1 +#define AARCH64_BREAKPOINT_EL0 2 + +/* Lengths */ +#define ARM_BREAKPOINT_LEN_1 0x1 +#define ARM_BREAKPOINT_LEN_2 0x3 +#define ARM_BREAKPOINT_LEN_4 0xf +#define ARM_BREAKPOINT_LEN_8 0xff + +/* Kernel stepping */ +#define ARM_KERNEL_STEP_NONE 0 +#define ARM_KERNEL_STEP_ACTIVE 1 +#define ARM_KERNEL_STEP_SUSPEND 2 + +/* + * Limits. + * Changing these will require modifications to the register accessors. + */ +#define ARM_MAX_BRP 16 +#define ARM_MAX_WRP 16 + +/* Virtual debug register bases. */ +#define AARCH64_DBG_REG_BVR 0 +#define AARCH64_DBG_REG_BCR (AARCH64_DBG_REG_BVR + ARM_MAX_BRP) +#define AARCH64_DBG_REG_WVR (AARCH64_DBG_REG_BCR + ARM_MAX_BRP) +#define AARCH64_DBG_REG_WCR (AARCH64_DBG_REG_WVR + ARM_MAX_WRP) + +/* Debug register names. */ +#define AARCH64_DBG_REG_NAME_BVR "bvr" +#define AARCH64_DBG_REG_NAME_BCR "bcr" +#define AARCH64_DBG_REG_NAME_WVR "wvr" +#define AARCH64_DBG_REG_NAME_WCR "wcr" + +/* Accessor macros for the debug registers. */ +#define AARCH64_DBG_READ(N, REG, VAL) do {\ + asm volatile("mrs %0, dbg" REG #N "_el1" : "=r" (VAL));\ +} while (0) + +#define AARCH64_DBG_WRITE(N, REG, VAL) do {\ + asm volatile("msr dbg" REG #N "_el1, %0" :: "r" (VAL));\ +} while (0) + +struct task_struct; +struct notifier_block; +struct perf_event; +struct pmu; + +extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, + int *gen_len, int *gen_type); +extern int arch_check_bp_in_kernelspace(struct perf_event *bp); +extern int arch_validate_hwbkpt_settings(struct perf_event *bp); +extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, + unsigned long val, void *data); + +extern int arch_install_hw_breakpoint(struct perf_event *bp); +extern void arch_uninstall_hw_breakpoint(struct perf_event *bp); +extern void hw_breakpoint_pmu_read(struct perf_event *bp); +extern int hw_breakpoint_slots(int type); + +#ifdef CONFIG_HAVE_HW_BREAKPOINT +extern void hw_breakpoint_thread_switch(struct task_struct *next); +extern void ptrace_hw_copy_thread(struct task_struct *task); +#else +static inline void hw_breakpoint_thread_switch(struct task_struct *next) +{ +} +static inline void ptrace_hw_copy_thread(struct task_struct *task) +{ +} +#endif + +extern struct pmu perf_ops_bp; + +#endif /* __KERNEL__ */ +#endif /* __ASM_BREAKPOINT_H */ diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h new file mode 100644 index 000000000..9a4cbd60c --- /dev/null +++ b/arch/arm64/include/asm/hwcap.h @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_HWCAP_H +#define __ASM_HWCAP_H + +#include <uapi/asm/hwcap.h> + +#define COMPAT_HWCAP_HALF (1 << 1) +#define COMPAT_HWCAP_THUMB (1 << 2) +#define COMPAT_HWCAP_FAST_MULT (1 << 4) +#define COMPAT_HWCAP_VFP (1 << 6) +#define COMPAT_HWCAP_EDSP (1 << 7) +#define COMPAT_HWCAP_NEON (1 << 12) +#define COMPAT_HWCAP_VFPv3 (1 << 13) +#define COMPAT_HWCAP_TLS (1 << 15) +#define COMPAT_HWCAP_VFPv4 (1 << 16) +#define COMPAT_HWCAP_IDIVA (1 << 17) +#define COMPAT_HWCAP_IDIVT (1 << 18) +#define COMPAT_HWCAP_IDIV (COMPAT_HWCAP_IDIVA|COMPAT_HWCAP_IDIVT) +#define COMPAT_HWCAP_EVTSTRM (1 << 21) + +#ifndef __ASSEMBLY__ +/* + * This yields a mask that user programs can use to figure out what + * instruction set this cpu supports. + */ +#define ELF_HWCAP (elf_hwcap) + +#ifdef CONFIG_COMPAT +#define COMPAT_ELF_HWCAP (compat_elf_hwcap) +#define COMPAT_ELF_HWCAP2 (compat_elf_hwcap2) +extern unsigned int compat_elf_hwcap, compat_elf_hwcap2; +#endif + +extern unsigned long elf_hwcap; +#endif +#endif diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h new file mode 100644 index 000000000..dc1f73b13 --- /dev/null +++ b/arch/arm64/include/asm/insn.h @@ -0,0 +1,110 @@ +/* + * Copyright (C) 2013 Huawei Ltd. + * Author: Jiang Liu <liuj97@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_INSN_H +#define __ASM_INSN_H +#include <linux/types.h> + +/* A64 instructions are always 32 bits. */ +#define AARCH64_INSN_SIZE 4 + +#ifndef __ASSEMBLY__ +/* + * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a + * Section C3.1 "A64 instruction index by encoding": + * AArch64 main encoding table + * Bit position + * 28 27 26 25 Encoding Group + * 0 0 - - Unallocated + * 1 0 0 - Data processing, immediate + * 1 0 1 - Branch, exception generation and system instructions + * - 1 - 0 Loads and stores + * - 1 0 1 Data processing - register + * 0 1 1 1 Data processing - SIMD and floating point + * 1 1 1 1 Data processing - SIMD and floating point + * "-" means "don't care" + */ +enum aarch64_insn_encoding_class { + AARCH64_INSN_CLS_UNKNOWN, /* UNALLOCATED */ + AARCH64_INSN_CLS_DP_IMM, /* Data processing - immediate */ + AARCH64_INSN_CLS_DP_REG, /* Data processing - register */ + AARCH64_INSN_CLS_DP_FPSIMD, /* Data processing - SIMD and FP */ + AARCH64_INSN_CLS_LDST, /* Loads and stores */ + AARCH64_INSN_CLS_BR_SYS, /* Branch, exception generation and + * system instructions */ +}; + +enum aarch64_insn_hint_op { + AARCH64_INSN_HINT_NOP = 0x0 << 5, + AARCH64_INSN_HINT_YIELD = 0x1 << 5, + AARCH64_INSN_HINT_WFE = 0x2 << 5, + AARCH64_INSN_HINT_WFI = 0x3 << 5, + AARCH64_INSN_HINT_SEV = 0x4 << 5, + AARCH64_INSN_HINT_SEVL = 0x5 << 5, +}; + +enum aarch64_insn_imm_type { + AARCH64_INSN_IMM_ADR, + AARCH64_INSN_IMM_26, + AARCH64_INSN_IMM_19, + AARCH64_INSN_IMM_16, + AARCH64_INSN_IMM_14, + AARCH64_INSN_IMM_12, + AARCH64_INSN_IMM_9, + AARCH64_INSN_IMM_MAX +}; + +enum aarch64_insn_branch_type { + AARCH64_INSN_BRANCH_NOLINK, + AARCH64_INSN_BRANCH_LINK, +}; + +#define __AARCH64_INSN_FUNCS(abbr, mask, val) \ +static __always_inline bool aarch64_insn_is_##abbr(u32 code) \ +{ return (code & (mask)) == (val); } \ +static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \ +{ return (val); } + +__AARCH64_INSN_FUNCS(b, 0xFC000000, 0x14000000) +__AARCH64_INSN_FUNCS(bl, 0xFC000000, 0x94000000) +__AARCH64_INSN_FUNCS(svc, 0xFFE0001F, 0xD4000001) +__AARCH64_INSN_FUNCS(hvc, 0xFFE0001F, 0xD4000002) +__AARCH64_INSN_FUNCS(smc, 0xFFE0001F, 0xD4000003) +__AARCH64_INSN_FUNCS(brk, 0xFFE0001F, 0xD4200000) +__AARCH64_INSN_FUNCS(hint, 0xFFFFF01F, 0xD503201F) + +#undef __AARCH64_INSN_FUNCS + +bool aarch64_insn_is_nop(u32 insn); + +int aarch64_insn_read(void *addr, u32 *insnp); +int aarch64_insn_write(void *addr, u32 insn); +enum aarch64_insn_encoding_class aarch64_get_insn_class(u32 insn); +u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, + u32 insn, u64 imm); +u32 aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr, + enum aarch64_insn_branch_type type); +u32 aarch64_insn_gen_hint(enum aarch64_insn_hint_op op); +u32 aarch64_insn_gen_nop(void); + +bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn); + +int aarch64_insn_patch_text_nosync(void *addr, u32 insn); +int aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt); +int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt); +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_INSN_H */ diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h new file mode 100644 index 000000000..e0c0b1f1f --- /dev/null +++ b/arch/arm64/include/asm/io.h @@ -0,0 +1,267 @@ +/* + * Based on arch/arm/include/asm/io.h + * + * Copyright (C) 1996-2000 Russell King + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_IO_H +#define __ASM_IO_H + +#ifdef __KERNEL__ + +#include <linux/types.h> + +#include <asm/byteorder.h> +#include <asm/barrier.h> +#include <asm/pgtable.h> + +/* + * Generic IO read/write. These perform native-endian accesses. + */ +static inline void __raw_writeb(u8 val, volatile void __iomem *addr) +{ + asm volatile("strb %w0, [%1]" : : "r" (val), "r" (addr)); +} + +static inline void __raw_writew(u16 val, volatile void __iomem *addr) +{ + asm volatile("strh %w0, [%1]" : : "r" (val), "r" (addr)); +} + +static inline void __raw_writel(u32 val, volatile void __iomem *addr) +{ + asm volatile("str %w0, [%1]" : : "r" (val), "r" (addr)); +} + +static inline void __raw_writeq(u64 val, volatile void __iomem *addr) +{ + asm volatile("str %0, [%1]" : : "r" (val), "r" (addr)); +} + +static inline u8 __raw_readb(const volatile void __iomem *addr) +{ + u8 val; + asm volatile("ldrb %w0, [%1]" : "=r" (val) : "r" (addr)); + return val; +} + +static inline u16 __raw_readw(const volatile void __iomem *addr) +{ + u16 val; + asm volatile("ldrh %w0, [%1]" : "=r" (val) : "r" (addr)); + return val; +} + +static inline u32 __raw_readl(const volatile void __iomem *addr) +{ + u32 val; + asm volatile("ldr %w0, [%1]" : "=r" (val) : "r" (addr)); + return val; +} + +static inline u64 __raw_readq(const volatile void __iomem *addr) +{ + u64 val; + asm volatile("ldr %0, [%1]" : "=r" (val) : "r" (addr)); + return val; +} + +/* IO barriers */ +#define __iormb() rmb() +#define __iowmb() wmb() + +#define mmiowb() do { } while (0) + +/* + * Relaxed I/O memory access primitives. These follow the Device memory + * ordering rules but do not guarantee any ordering relative to Normal memory + * accesses. + */ +#define readb_relaxed(c) ({ u8 __v = __raw_readb(c); __v; }) +#define readw_relaxed(c) ({ u16 __v = le16_to_cpu((__force __le16)__raw_readw(c)); __v; }) +#define readl_relaxed(c) ({ u32 __v = le32_to_cpu((__force __le32)__raw_readl(c)); __v; }) +#define readq_relaxed(c) ({ u64 __v = le64_to_cpu((__force __le64)__raw_readq(c)); __v; }) + +#define writeb_relaxed(v,c) ((void)__raw_writeb((v),(c))) +#define writew_relaxed(v,c) ((void)__raw_writew((__force u16)cpu_to_le16(v),(c))) +#define writel_relaxed(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c))) +#define writeq_relaxed(v,c) ((void)__raw_writeq((__force u64)cpu_to_le64(v),(c))) + +/* + * I/O memory access primitives. Reads are ordered relative to any + * following Normal memory access. Writes are ordered relative to any prior + * Normal memory access. + */ +#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) +#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) +#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) +#define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(); __v; }) + +#define writeb(v,c) ({ __iowmb(); writeb_relaxed((v),(c)); }) +#define writew(v,c) ({ __iowmb(); writew_relaxed((v),(c)); }) +#define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c)); }) +#define writeq(v,c) ({ __iowmb(); writeq_relaxed((v),(c)); }) + +/* + * I/O port access primitives. + */ +#define IO_SPACE_LIMIT 0xffff +#define PCI_IOBASE ((void __iomem *)(MODULES_VADDR - SZ_2M)) + +#define IOMEM(x) ((void __force __iomem *)(x)) + +static inline u8 inb(unsigned long addr) +{ + return readb(addr + PCI_IOBASE); +} + +static inline u16 inw(unsigned long addr) +{ + return readw(addr + PCI_IOBASE); +} + +static inline u32 inl(unsigned long addr) +{ + return readl(addr + PCI_IOBASE); +} + +static inline void outb(u8 b, unsigned long addr) +{ + writeb(b, addr + PCI_IOBASE); +} + +static inline void outw(u16 b, unsigned long addr) +{ + writew(b, addr + PCI_IOBASE); +} + +static inline void outl(u32 b, unsigned long addr) +{ + writel(b, addr + PCI_IOBASE); +} + +#define inb_p(addr) inb(addr) +#define inw_p(addr) inw(addr) +#define inl_p(addr) inl(addr) + +#define outb_p(x, addr) outb((x), (addr)) +#define outw_p(x, addr) outw((x), (addr)) +#define outl_p(x, addr) outl((x), (addr)) + +static inline void insb(unsigned long addr, void *buffer, int count) +{ + u8 *buf = buffer; + while (count--) + *buf++ = __raw_readb(addr + PCI_IOBASE); +} + +static inline void insw(unsigned long addr, void *buffer, int count) +{ + u16 *buf = buffer; + while (count--) + *buf++ = __raw_readw(addr + PCI_IOBASE); +} + +static inline void insl(unsigned long addr, void *buffer, int count) +{ + u32 *buf = buffer; + while (count--) + *buf++ = __raw_readl(addr + PCI_IOBASE); +} + +static inline void outsb(unsigned long addr, const void *buffer, int count) +{ + const u8 *buf = buffer; + while (count--) + __raw_writeb(*buf++, addr + PCI_IOBASE); +} + +static inline void outsw(unsigned long addr, const void *buffer, int count) +{ + const u16 *buf = buffer; + while (count--) + __raw_writew(*buf++, addr + PCI_IOBASE); +} + +static inline void outsl(unsigned long addr, const void *buffer, int count) +{ + const u32 *buf = buffer; + while (count--) + __raw_writel(*buf++, addr + PCI_IOBASE); +} + +#define insb_p(port,to,len) insb(port,to,len) +#define insw_p(port,to,len) insw(port,to,len) +#define insl_p(port,to,len) insl(port,to,len) + +#define outsb_p(port,from,len) outsb(port,from,len) +#define outsw_p(port,from,len) outsw(port,from,len) +#define outsl_p(port,from,len) outsl(port,from,len) + +/* + * String version of I/O memory access operations. + */ +extern void __memcpy_fromio(void *, const volatile void __iomem *, size_t); +extern void __memcpy_toio(volatile void __iomem *, const void *, size_t); +extern void __memset_io(volatile void __iomem *, int, size_t); + +#define memset_io(c,v,l) __memset_io((c),(v),(l)) +#define memcpy_fromio(a,c,l) __memcpy_fromio((a),(c),(l)) +#define memcpy_toio(c,a,l) __memcpy_toio((c),(a),(l)) + +/* + * I/O memory mapping functions. + */ +extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot); +extern void __iounmap(volatile void __iomem *addr); + +#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY) +#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) +#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC)) + +#define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) +#define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) +#define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) +#define iounmap __iounmap + +#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF) +#define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PTE_PXN | PTE_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE)) + +#define ARCH_HAS_IOREMAP_WC +#include <asm-generic/iomap.h> + +/* + * More restrictive address range checking than the default implementation + * (PHYS_OFFSET and PHYS_MASK taken into account). + */ +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE +extern int valid_phys_addr_range(unsigned long addr, size_t size); +extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); + +extern int devmem_is_allowed(unsigned long pfn); + +/* + * Convert a physical pointer to a virtual kernel pointer for /dev/mem + * access + */ +#define xlate_dev_mem_ptr(p) __va(p) + +/* + * Convert a virtual cached pointer to an uncached pointer + */ +#define xlate_dev_kmem_ptr(p) p + +#endif /* __KERNEL__ */ +#endif /* __ASM_IO_H */ diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h new file mode 100644 index 000000000..e1f7ecdde --- /dev/null +++ b/arch/arm64/include/asm/irq.h @@ -0,0 +1,10 @@ +#ifndef __ASM_IRQ_H +#define __ASM_IRQ_H + +#include <asm-generic/irq.h> + +extern void (*handle_arch_irq)(struct pt_regs *); +extern void migrate_irqs(void); +extern void set_handle_irq(void (*handle_irq)(struct pt_regs *)); + +#endif diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h new file mode 100644 index 000000000..0ed52c691 --- /dev/null +++ b/arch/arm64/include/asm/irqflags.h @@ -0,0 +1,114 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_IRQFLAGS_H +#define __ASM_IRQFLAGS_H + +#ifdef __KERNEL__ + +#include <asm/ptrace.h> + +/* + * CPU interrupt mask handling. + */ +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags; + asm volatile( + "mrs %0, daif // arch_local_irq_save\n" + "msr daifset, #2" + : "=r" (flags) + : + : "memory"); + return flags; +} + +static inline void arch_local_irq_enable(void) +{ + asm volatile( + "msr daifclr, #2 // arch_local_irq_enable" + : + : + : "memory"); +} + +static inline void arch_local_irq_disable(void) +{ + asm volatile( + "msr daifset, #2 // arch_local_irq_disable" + : + : + : "memory"); +} + +#define local_fiq_enable() asm("msr daifclr, #1" : : : "memory") +#define local_fiq_disable() asm("msr daifset, #1" : : : "memory") + +/* + * Save the current interrupt enable state. + */ +static inline unsigned long arch_local_save_flags(void) +{ + unsigned long flags; + asm volatile( + "mrs %0, daif // arch_local_save_flags" + : "=r" (flags) + : + : "memory"); + return flags; +} + +/* + * restore saved IRQ state + */ +static inline void arch_local_irq_restore(unsigned long flags) +{ + asm volatile( + "msr daif, %0 // arch_local_irq_restore" + : + : "r" (flags) + : "memory"); +} + +static inline int arch_irqs_disabled_flags(unsigned long flags) +{ + return flags & PSR_I_BIT; +} + +/* + * save and restore debug state + */ +#define local_dbg_save(flags) \ + do { \ + typecheck(unsigned long, flags); \ + asm volatile( \ + "mrs %0, daif // local_dbg_save\n" \ + "msr daifset, #8" \ + : "=r" (flags) : : "memory"); \ + } while (0) + +#define local_dbg_restore(flags) \ + do { \ + typecheck(unsigned long, flags); \ + asm volatile( \ + "msr daif, %0 // local_dbg_restore\n" \ + : : "r" (flags) : "memory"); \ + } while (0) + +#define local_dbg_enable() asm("msr daifclr, #8" : : : "memory") +#define local_dbg_disable() asm("msr daifset, #8" : : : "memory") + +#endif +#endif diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h new file mode 100644 index 000000000..076a1c714 --- /dev/null +++ b/arch/arm64/include/asm/jump_label.h @@ -0,0 +1,52 @@ +/* + * Copyright (C) 2013 Huawei Ltd. + * Author: Jiang Liu <liuj97@gmail.com> + * + * Based on arch/arm/include/asm/jump_label.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_JUMP_LABEL_H +#define __ASM_JUMP_LABEL_H +#include <linux/types.h> +#include <asm/insn.h> + +#ifdef __KERNEL__ + +#define JUMP_LABEL_NOP_SIZE AARCH64_INSN_SIZE + +static __always_inline bool arch_static_branch(struct static_key *key) +{ + asm goto("1: nop\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + ".align 3\n\t" + ".quad 1b, %l[l_yes], %c0\n\t" + ".popsection\n\t" + : : "i"(key) : : l_yes); + + return false; +l_yes: + return true; +} + +#endif /* __KERNEL__ */ + +typedef u64 jump_label_t; + +struct jump_entry { + jump_label_t code; + jump_label_t target; + jump_label_t key; +}; + +#endif /* __ASM_JUMP_LABEL_H */ diff --git a/arch/arm64/include/asm/kgdb.h b/arch/arm64/include/asm/kgdb.h new file mode 100644 index 000000000..f69f69c81 --- /dev/null +++ b/arch/arm64/include/asm/kgdb.h @@ -0,0 +1,84 @@ +/* + * AArch64 KGDB support + * + * Based on arch/arm/include/kgdb.h + * + * Copyright (C) 2013 Cavium Inc. + * Author: Vijaya Kumar K <vijaya.kumar@caviumnetworks.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ARM_KGDB_H +#define __ARM_KGDB_H + +#include <linux/ptrace.h> +#include <asm/debug-monitors.h> + +#ifndef __ASSEMBLY__ + +static inline void arch_kgdb_breakpoint(void) +{ + asm ("brk %0" : : "I" (KGDB_COMPILED_DBG_BRK_IMM)); +} + +extern void kgdb_handle_bus_error(void); +extern int kgdb_fault_expected; + +#endif /* !__ASSEMBLY__ */ + +/* + * gdb is expecting the following registers layout. + * + * General purpose regs: + * r0-r30: 64 bit + * sp,pc : 64 bit + * pstate : 64 bit + * Total: 34 + * FPU regs: + * f0-f31: 128 bit + * Total: 32 + * Extra regs + * fpsr & fpcr: 32 bit + * Total: 2 + * + */ + +#define _GP_REGS 34 +#define _FP_REGS 32 +#define _EXTRA_REGS 2 +/* + * general purpose registers size in bytes. + * pstate is only 4 bytes. subtract 4 bytes + */ +#define GP_REG_BYTES (_GP_REGS * 8) +#define DBG_MAX_REG_NUM (_GP_REGS + _FP_REGS + _EXTRA_REGS) + +/* + * Size of I/O buffer for gdb packet. + * considering to hold all register contents, size is set + */ + +#define BUFMAX 2048 + +/* + * Number of bytes required for gdb_regs buffer. + * _GP_REGS: 8 bytes, _FP_REGS: 16 bytes and _EXTRA_REGS: 4 bytes each + * GDB fails to connect for size beyond this with error + * "'g' packet reply is too long" + */ + +#define NUMREGBYTES ((_GP_REGS * 8) + (_FP_REGS * 16) + \ + (_EXTRA_REGS * 4)) + +#endif /* __ASM_KGDB_H */ diff --git a/arch/arm64/include/asm/linkage.h b/arch/arm64/include/asm/linkage.h new file mode 100644 index 000000000..636c1bced --- /dev/null +++ b/arch/arm64/include/asm/linkage.h @@ -0,0 +1,7 @@ +#ifndef __ASM_LINKAGE_H +#define __ASM_LINKAGE_H + +#define __ALIGN .align 4 +#define __ALIGN_STR ".align 4" + +#endif diff --git a/arch/arm64/include/asm/memblock.h b/arch/arm64/include/asm/memblock.h new file mode 100644 index 000000000..6afeed246 --- /dev/null +++ b/arch/arm64/include/asm/memblock.h @@ -0,0 +1,21 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_MEMBLOCK_H +#define __ASM_MEMBLOCK_H + +extern void arm64_memblock_init(void); + +#endif diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h new file mode 100644 index 000000000..34d4cb5e9 --- /dev/null +++ b/arch/arm64/include/asm/memory.h @@ -0,0 +1,147 @@ +/* + * Based on arch/arm/include/asm/memory.h + * + * Copyright (C) 2000-2002 Russell King + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * Note: this file should not be included by non-asm/.h files + */ +#ifndef __ASM_MEMORY_H +#define __ASM_MEMORY_H + +#include <linux/compiler.h> +#include <linux/const.h> +#include <linux/types.h> +#include <asm/sizes.h> + +/* + * Allow for constants defined here to be used from assembly code + * by prepending the UL suffix only with actual C code compilation. + */ +#define UL(x) _AC(x, UL) + +/* + * PAGE_OFFSET - the virtual address of the start of the kernel image. + * VA_BITS - the maximum number of bits for virtual addresses. + * TASK_SIZE - the maximum size of a user space task. + * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area. + * The module space lives between the addresses given by TASK_SIZE + * and PAGE_OFFSET - it must be within 128MB of the kernel text. + */ +#define PAGE_OFFSET UL(0xffffffc000000000) +#define MODULES_END (PAGE_OFFSET) +#define MODULES_VADDR (MODULES_END - SZ_64M) +#define EARLYCON_IOBASE (MODULES_VADDR - SZ_4M) +#define VA_BITS (39) +#define TASK_SIZE_64 (UL(1) << VA_BITS) + +#ifdef CONFIG_COMPAT +#define TASK_SIZE_32 UL(0x100000000) +#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ + TASK_SIZE_32 : TASK_SIZE_64) +#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \ + TASK_SIZE_32 : TASK_SIZE_64) +#else +#define TASK_SIZE TASK_SIZE_64 +#endif /* CONFIG_COMPAT */ + +#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4)) + +#if TASK_SIZE_64 > MODULES_VADDR +#error Top of 64-bit user space clashes with start of module space +#endif + +/* + * Physical vs virtual RAM address space conversion. These are + * private definitions which should NOT be used outside memory.h + * files. Use virt_to_phys/phys_to_virt/__pa/__va instead. + */ +#define __virt_to_phys(x) (((phys_addr_t)(x) - PAGE_OFFSET + PHYS_OFFSET)) +#define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET + PAGE_OFFSET)) + +/* + * Convert a physical address to a Page Frame Number and back + */ +#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT)) +#define __pfn_to_phys(pfn) ((phys_addr_t)(pfn) << PAGE_SHIFT) + +/* + * Convert a page to/from a physical address + */ +#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page))) +#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys))) + +/* + * Memory types available. + */ +#define MT_DEVICE_nGnRnE 0 +#define MT_DEVICE_nGnRE 1 +#define MT_DEVICE_GRE 2 +#define MT_NORMAL_NC 3 +#define MT_NORMAL 4 + +#ifndef __ASSEMBLY__ + +extern phys_addr_t memstart_addr; +/* PHYS_OFFSET - the physical address of the start of memory. */ +#define PHYS_OFFSET ({ memstart_addr; }) + +/* + * PFNs are used to describe any physical page; this means + * PFN 0 == physical address 0. + * + * This is the PFN of the first RAM page in the kernel + * direct-mapped view. We assume this is the first page + * of RAM in the mem_map as well. + */ +#define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT) + +/* + * Note: Drivers should NOT use these. They are the wrong + * translation for translating DMA addresses. Use the driver + * DMA support - see dma-mapping.h. + */ +static inline phys_addr_t virt_to_phys(const volatile void *x) +{ + return __virt_to_phys((unsigned long)(x)); +} + +static inline void *phys_to_virt(phys_addr_t x) +{ + return (void *)(__phys_to_virt(x)); +} + +/* + * Drivers should NOT use these either. + */ +#define __pa(x) __virt_to_phys((unsigned long)(x)) +#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) +#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) + +/* + * virt_to_page(k) convert a _valid_ virtual address to struct page * + * virt_addr_valid(k) indicates whether a virtual address is valid + */ +#define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET) + +#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) +#define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \ + ((void *)(kaddr) < (void *)high_memory)) + +#endif + +#include <asm-generic/memory_model.h> + +#endif diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h new file mode 100644 index 000000000..b0c99e089 --- /dev/null +++ b/arch/arm64/include/asm/mmu.h @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_MMU_H +#define __ASM_MMU_H + +typedef struct { + unsigned int id; + raw_spinlock_t id_lock; + void *vdso; +} mm_context_t; + +#define INIT_MM_CONTEXT(name) \ + .context.id_lock = __RAW_SPIN_LOCK_UNLOCKED(name.context.id_lock), + +#define ASID(mm) ((mm)->context.id & 0xffff) + +extern void paging_init(void); +extern void setup_mm_for_reboot(void); +extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); + +#endif diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h new file mode 100644 index 000000000..e2bc385ad --- /dev/null +++ b/arch/arm64/include/asm/mmu_context.h @@ -0,0 +1,167 @@ +/* + * Based on arch/arm/include/asm/mmu_context.h + * + * Copyright (C) 1996 Russell King. + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_MMU_CONTEXT_H +#define __ASM_MMU_CONTEXT_H + +#include <linux/compiler.h> +#include <linux/sched.h> + +#include <asm/cacheflush.h> +#include <asm/proc-fns.h> +#include <asm-generic/mm_hooks.h> +#include <asm/cputype.h> +#include <asm/pgtable.h> + +#define MAX_ASID_BITS 16 + +extern unsigned int cpu_last_asid; + +void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); +void __new_context(struct mm_struct *mm); + +#ifdef CONFIG_PID_IN_CONTEXTIDR +static inline void contextidr_thread_switch(struct task_struct *next) +{ + asm( + " msr contextidr_el1, %0\n" + " isb" + : + : "r" (task_pid_nr(next))); +} +#else +static inline void contextidr_thread_switch(struct task_struct *next) +{ +} +#endif + +/* + * Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0. + */ +static inline void cpu_set_reserved_ttbr0(void) +{ + unsigned long ttbr = page_to_phys(empty_zero_page); + + asm( + " msr ttbr0_el1, %0 // set TTBR0\n" + " isb" + : + : "r" (ttbr)); +} + +static inline void switch_new_context(struct mm_struct *mm) +{ + unsigned long flags; + + __new_context(mm); + + local_irq_save(flags); + cpu_switch_mm(mm->pgd, mm); + local_irq_restore(flags); +} + +static inline void check_and_switch_context(struct mm_struct *mm, + struct task_struct *tsk) +{ + /* + * Required during context switch to avoid speculative page table + * walking with the wrong TTBR. + */ + cpu_set_reserved_ttbr0(); + + if (!((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS)) + /* + * The ASID is from the current generation, just switch to the + * new pgd. This condition is only true for calls from + * context_switch() and interrupts are already disabled. + */ + cpu_switch_mm(mm->pgd, mm); + else if (irqs_disabled()) + /* + * Defer the new ASID allocation until after the context + * switch critical region since __new_context() cannot be + * called with interrupts disabled. + */ + set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM); + else + /* + * That is a direct call to switch_mm() or activate_mm() with + * interrupts enabled and a new context. + */ + switch_new_context(mm); +} + +#define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0) +#define destroy_context(mm) do { } while(0) + +#define finish_arch_post_lock_switch \ + finish_arch_post_lock_switch +static inline void finish_arch_post_lock_switch(void) +{ + if (test_and_clear_thread_flag(TIF_SWITCH_MM)) { + struct mm_struct *mm = current->mm; + unsigned long flags; + + __new_context(mm); + + local_irq_save(flags); + cpu_switch_mm(mm->pgd, mm); + local_irq_restore(flags); + } +} + +/* + * This is called when "tsk" is about to enter lazy TLB mode. + * + * mm: describes the currently active mm context + * tsk: task which is entering lazy tlb + * cpu: cpu number which is entering lazy tlb + * + * tsk->mm will be NULL + */ +static inline void +enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) +{ +} + +/* + * This is the actual mm switch as far as the scheduler + * is concerned. No registers are touched. We avoid + * calling the CPU specific function when the mm hasn't + * actually changed. + */ +static inline void +switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) +{ + unsigned int cpu = smp_processor_id(); + +#ifdef CONFIG_SMP + /* check for possible thread migration */ + if (!cpumask_empty(mm_cpumask(next)) && + !cpumask_test_cpu(cpu, mm_cpumask(next))) + __flush_icache_all(); +#endif + if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) + check_and_switch_context(next, tsk); +} + +#define deactivate_mm(tsk,mm) do { } while (0) +#define activate_mm(prev,next) switch_mm(prev, next, NULL) + +#endif diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h new file mode 100644 index 000000000..e80e232b7 --- /dev/null +++ b/arch/arm64/include/asm/module.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_MODULE_H +#define __ASM_MODULE_H + +#include <asm-generic/module.h> + +#define MODULE_ARCH_VERMAGIC "aarch64" + +#endif /* __ASM_MODULE_H */ diff --git a/arch/arm64/include/asm/neon.h b/arch/arm64/include/asm/neon.h new file mode 100644 index 000000000..13ce4cc18 --- /dev/null +++ b/arch/arm64/include/asm/neon.h @@ -0,0 +1,18 @@ +/* + * linux/arch/arm64/include/asm/neon.h + * + * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/types.h> + +#define cpu_has_neon() (1) + +#define kernel_neon_begin() kernel_neon_begin_partial(32) + +void kernel_neon_begin_partial(u32 num_regs); +void kernel_neon_end(void); diff --git a/arch/arm64/include/asm/opcodes.h b/arch/arm64/include/asm/opcodes.h new file mode 100644 index 000000000..fd189a522 --- /dev/null +++ b/arch/arm64/include/asm/opcodes.h @@ -0,0 +1,231 @@ +/* + * Copied from arch/arm/include/asm/opcodes.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ASM_ARM_OPCODES_H +#define __ASM_ARM_OPCODES_H + +#ifndef __ASSEMBLY__ +#include <linux/linkage.h> +extern asmlinkage unsigned int arm_check_condition(u32 opcode, u64 psr); +#endif + +#define ARM_OPCODE_CONDTEST_FAIL 0 +#define ARM_OPCODE_CONDTEST_PASS 1 +#define ARM_OPCODE_CONDTEST_UNCOND 2 + + +/* + * Assembler opcode byteswap helpers. + * These are only intended for use by this header: don't use them directly, + * because they will be suboptimal in most cases. + */ +#define ___asm_opcode_swab32(x) ( \ + (((x) << 24) & 0xFF000000) \ + | (((x) << 8) & 0x00FF0000) \ + | (((x) >> 8) & 0x0000FF00) \ + | (((x) >> 24) & 0x000000FF) \ +) +#define ___asm_opcode_swab16(x) ( \ + (((x) << 8) & 0xFF00) \ + | (((x) >> 8) & 0x00FF) \ +) +#define ___asm_opcode_swahb32(x) ( \ + (((x) << 8) & 0xFF00FF00) \ + | (((x) >> 8) & 0x00FF00FF) \ +) +#define ___asm_opcode_swahw32(x) ( \ + (((x) << 16) & 0xFFFF0000) \ + | (((x) >> 16) & 0x0000FFFF) \ +) +#define ___asm_opcode_identity32(x) ((x) & 0xFFFFFFFF) +#define ___asm_opcode_identity16(x) ((x) & 0xFFFF) + + +/* + * Opcode byteswap helpers + * + * These macros help with converting instructions between a canonical integer + * format and in-memory representation, in an endianness-agnostic manner. + * + * __mem_to_opcode_*() convert from in-memory representation to canonical form. + * __opcode_to_mem_*() convert from canonical form to in-memory representation. + * + * + * Canonical instruction representation: + * + * ARM: 0xKKLLMMNN + * Thumb 16-bit: 0x0000KKLL, where KK < 0xE8 + * Thumb 32-bit: 0xKKLLMMNN, where KK >= 0xE8 + * + * There is no way to distinguish an ARM instruction in canonical representation + * from a Thumb instruction (just as these cannot be distinguished in memory). + * Where this distinction is important, it needs to be tracked separately. + * + * Note that values in the range 0x0000E800..0xE7FFFFFF intentionally do not + * represent any valid Thumb-2 instruction. For this range, + * __opcode_is_thumb32() and __opcode_is_thumb16() will both be false. + * + * The ___asm variants are intended only for use by this header, in situations + * involving inline assembler. For .S files, the normal __opcode_*() macros + * should do the right thing. + */ +#ifdef __ASSEMBLY__ + +#define ___opcode_swab32(x) ___asm_opcode_swab32(x) +#define ___opcode_swab16(x) ___asm_opcode_swab16(x) +#define ___opcode_swahb32(x) ___asm_opcode_swahb32(x) +#define ___opcode_swahw32(x) ___asm_opcode_swahw32(x) +#define ___opcode_identity32(x) ___asm_opcode_identity32(x) +#define ___opcode_identity16(x) ___asm_opcode_identity16(x) + +#else /* ! __ASSEMBLY__ */ + +#include <linux/types.h> +#include <linux/swab.h> + +#define ___opcode_swab32(x) swab32(x) +#define ___opcode_swab16(x) swab16(x) +#define ___opcode_swahb32(x) swahb32(x) +#define ___opcode_swahw32(x) swahw32(x) +#define ___opcode_identity32(x) ((u32)(x)) +#define ___opcode_identity16(x) ((u16)(x)) + +#endif /* ! __ASSEMBLY__ */ + + +#ifdef CONFIG_CPU_ENDIAN_BE8 + +#define __opcode_to_mem_arm(x) ___opcode_swab32(x) +#define __opcode_to_mem_thumb16(x) ___opcode_swab16(x) +#define __opcode_to_mem_thumb32(x) ___opcode_swahb32(x) +#define ___asm_opcode_to_mem_arm(x) ___asm_opcode_swab32(x) +#define ___asm_opcode_to_mem_thumb16(x) ___asm_opcode_swab16(x) +#define ___asm_opcode_to_mem_thumb32(x) ___asm_opcode_swahb32(x) + +#else /* ! CONFIG_CPU_ENDIAN_BE8 */ + +#define __opcode_to_mem_arm(x) ___opcode_identity32(x) +#define __opcode_to_mem_thumb16(x) ___opcode_identity16(x) +#define ___asm_opcode_to_mem_arm(x) ___asm_opcode_identity32(x) +#define ___asm_opcode_to_mem_thumb16(x) ___asm_opcode_identity16(x) +#ifndef CONFIG_CPU_ENDIAN_BE32 +/* + * On BE32 systems, using 32-bit accesses to store Thumb instructions will not + * work in all cases, due to alignment constraints. For now, a correct + * version is not provided for BE32. + */ +#define __opcode_to_mem_thumb32(x) ___opcode_swahw32(x) +#define ___asm_opcode_to_mem_thumb32(x) ___asm_opcode_swahw32(x) +#endif + +#endif /* ! CONFIG_CPU_ENDIAN_BE8 */ + +#define __mem_to_opcode_arm(x) __opcode_to_mem_arm(x) +#define __mem_to_opcode_thumb16(x) __opcode_to_mem_thumb16(x) +#ifndef CONFIG_CPU_ENDIAN_BE32 +#define __mem_to_opcode_thumb32(x) __opcode_to_mem_thumb32(x) +#endif + +/* Operations specific to Thumb opcodes */ + +/* Instruction size checks: */ +#define __opcode_is_thumb32(x) ( \ + ((x) & 0xF8000000) == 0xE8000000 \ + || ((x) & 0xF0000000) == 0xF0000000 \ +) +#define __opcode_is_thumb16(x) ( \ + ((x) & 0xFFFF0000) == 0 \ + && !(((x) & 0xF800) == 0xE800 || ((x) & 0xF000) == 0xF000) \ +) + +/* Operations to construct or split 32-bit Thumb instructions: */ +#define __opcode_thumb32_first(x) (___opcode_identity16((x) >> 16)) +#define __opcode_thumb32_second(x) (___opcode_identity16(x)) +#define __opcode_thumb32_compose(first, second) ( \ + (___opcode_identity32(___opcode_identity16(first)) << 16) \ + | ___opcode_identity32(___opcode_identity16(second)) \ +) +#define ___asm_opcode_thumb32_first(x) (___asm_opcode_identity16((x) >> 16)) +#define ___asm_opcode_thumb32_second(x) (___asm_opcode_identity16(x)) +#define ___asm_opcode_thumb32_compose(first, second) ( \ + (___asm_opcode_identity32(___asm_opcode_identity16(first)) << 16) \ + | ___asm_opcode_identity32(___asm_opcode_identity16(second)) \ +) + +/* + * Opcode injection helpers + * + * In rare cases it is necessary to assemble an opcode which the + * assembler does not support directly, or which would normally be + * rejected because of the CFLAGS or AFLAGS used to build the affected + * file. + * + * Before using these macros, consider carefully whether it is feasible + * instead to change the build flags for your file, or whether it really + * makes sense to support old assembler versions when building that + * particular kernel feature. + * + * The macros defined here should only be used where there is no viable + * alternative. + * + * + * __inst_arm(x): emit the specified ARM opcode + * __inst_thumb16(x): emit the specified 16-bit Thumb opcode + * __inst_thumb32(x): emit the specified 32-bit Thumb opcode + * + * __inst_arm_thumb16(arm, thumb): emit either the specified arm or + * 16-bit Thumb opcode, depending on whether an ARM or Thumb-2 + * kernel is being built + * + * __inst_arm_thumb32(arm, thumb): emit either the specified arm or + * 32-bit Thumb opcode, depending on whether an ARM or Thumb-2 + * kernel is being built + * + * + * Note that using these macros directly is poor practice. Instead, you + * should use them to define human-readable wrapper macros to encode the + * instructions that you care about. In code which might run on ARMv7 or + * above, you can usually use the __inst_arm_thumb{16,32} macros to + * specify the ARM and Thumb alternatives at the same time. This ensures + * that the correct opcode gets emitted depending on the instruction set + * used for the kernel build. + * + * Look at opcodes-virt.h for an example of how to use these macros. + */ +#include <linux/stringify.h> + +#define __inst_arm(x) ___inst_arm(___asm_opcode_to_mem_arm(x)) +#define __inst_thumb32(x) ___inst_thumb32( \ + ___asm_opcode_to_mem_thumb16(___asm_opcode_thumb32_first(x)), \ + ___asm_opcode_to_mem_thumb16(___asm_opcode_thumb32_second(x)) \ +) +#define __inst_thumb16(x) ___inst_thumb16(___asm_opcode_to_mem_thumb16(x)) + +#ifdef CONFIG_THUMB2_KERNEL +#define __inst_arm_thumb16(arm_opcode, thumb_opcode) \ + __inst_thumb16(thumb_opcode) +#define __inst_arm_thumb32(arm_opcode, thumb_opcode) \ + __inst_thumb32(thumb_opcode) +#else +#define __inst_arm_thumb16(arm_opcode, thumb_opcode) __inst_arm(arm_opcode) +#define __inst_arm_thumb32(arm_opcode, thumb_opcode) __inst_arm(arm_opcode) +#endif + +/* Helpers for the helpers. Don't use these directly. */ +#ifdef __ASSEMBLY__ +#define ___inst_arm(x) .long x +#define ___inst_thumb16(x) .short x +#define ___inst_thumb32(first, second) .short first, second +#else +#define ___inst_arm(x) ".long " __stringify(x) "\n\t" +#define ___inst_thumb16(x) ".short " __stringify(x) "\n\t" +#define ___inst_thumb32(first, second) \ + ".short " __stringify(first) ", " __stringify(second) "\n\t" +#endif + +#endif /* __ASM_ARM_OPCODES_H */ diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h new file mode 100644 index 000000000..46bf66628 --- /dev/null +++ b/arch/arm64/include/asm/page.h @@ -0,0 +1,67 @@ +/* + * Based on arch/arm/include/asm/page.h + * + * Copyright (C) 1995-2003 Russell King + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_PAGE_H +#define __ASM_PAGE_H + +/* PAGE_SHIFT determines the page size */ +#ifdef CONFIG_ARM64_64K_PAGES +#define PAGE_SHIFT 16 +#else +#define PAGE_SHIFT 12 +#endif +#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE-1)) + +/* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */ +#define __HAVE_ARCH_GATE_AREA 1 + +#ifndef __ASSEMBLY__ + +#ifdef CONFIG_ARM64_64K_PAGES +#include <asm/pgtable-2level-types.h> +#else +#include <asm/pgtable-3level-types.h> +#endif + +extern void __cpu_clear_user_page(void *p, unsigned long user); +extern void __cpu_copy_user_page(void *to, const void *from, + unsigned long user); +extern void copy_page(void *to, const void *from); +extern void clear_page(void *to); + +#define clear_user_page(addr,vaddr,pg) __cpu_clear_user_page(addr, vaddr) +#define copy_user_page(to,from,vaddr,pg) __cpu_copy_user_page(to, from, vaddr) + +typedef struct page *pgtable_t; + +#ifdef CONFIG_HAVE_ARCH_PFN_VALID +extern int pfn_valid(unsigned long); +#endif + +#include <asm/memory.h> + +#endif /* !__ASSEMBLY__ */ + +#define VM_DATA_DEFAULT_FLAGS \ + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + +#include <asm-generic/getorder.h> + +#endif diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h new file mode 100644 index 000000000..73c86f3f7 --- /dev/null +++ b/arch/arm64/include/asm/percpu.h @@ -0,0 +1,49 @@ +/* + * Copyright (C) 2013 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_PERCPU_H +#define __ASM_PERCPU_H + +#if 0 //def CONFIG_SMP + +static inline void set_my_cpu_offset(unsigned long off) +{ + asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory"); +} + +static inline unsigned long __my_cpu_offset(void) +{ + unsigned long off; + register unsigned long *sp asm ("sp"); + + /* + * We want to allow caching the value, so avoid using volatile and + * instead use a fake stack read to hazard against barrier(). + */ + asm("mrs %0, tpidr_el1" : "=r" (off) : "Q" (*sp)); + + return off; +} +#define __my_cpu_offset __my_cpu_offset() + +#else /* !CONFIG_SMP */ + +#define set_my_cpu_offset(x) do { } while (0) + +#endif /* CONFIG_SMP */ + +#include <asm-generic/percpu.h> + +#endif /* __ASM_PERCPU_H */ diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h new file mode 100644 index 000000000..d26d1d53c --- /dev/null +++ b/arch/arm64/include/asm/perf_event.h @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ASM_PERF_EVENT_H +#define __ASM_PERF_EVENT_H + +#ifdef CONFIG_HW_PERF_EVENTS +struct pt_regs; +extern unsigned long perf_instruction_pointer(struct pt_regs *regs); +extern unsigned long perf_misc_flags(struct pt_regs *regs); +#define perf_misc_flags(regs) perf_misc_flags(regs) +#endif + +#endif diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h new file mode 100644 index 000000000..f214069ec --- /dev/null +++ b/arch/arm64/include/asm/pgalloc.h @@ -0,0 +1,113 @@ +/* + * Based on arch/arm/include/asm/pgalloc.h + * + * Copyright (C) 2000-2001 Russell King + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_PGALLOC_H +#define __ASM_PGALLOC_H + +#include <asm/pgtable-hwdef.h> +#include <asm/processor.h> +#include <asm/cacheflush.h> +#include <asm/tlbflush.h> + +#define check_pgt_cache() do { } while (0) + +#ifndef CONFIG_ARM64_64K_PAGES + +static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) +{ + return (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_REPEAT); +} + +static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) +{ + BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); + free_page((unsigned long)pmd); +} + +static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) +{ + set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE)); +} + +#endif /* CONFIG_ARM64_64K_PAGES */ + +extern pgd_t *pgd_alloc(struct mm_struct *mm); +extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); + +#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO) + +static inline pte_t * +pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) +{ + return (pte_t *)__get_free_page(PGALLOC_GFP); +} + +static inline pgtable_t +pte_alloc_one(struct mm_struct *mm, unsigned long addr) +{ + struct page *pte; + + pte = alloc_pages(PGALLOC_GFP, 0); + if (pte) + pgtable_page_ctor(pte); + + return pte; +} + +/* + * Free a PTE table. + */ +static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) +{ + if (pte) + free_page((unsigned long)pte); +} + +static inline void pte_free(struct mm_struct *mm, pgtable_t pte) +{ + pgtable_page_dtor(pte); + __free_page(pte); +} + +static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte, + pmdval_t prot) +{ + set_pmd(pmdp, __pmd(pte | prot)); +} + +/* + * Populate the pmdp entry with a pointer to the pte. This pmd is part + * of the mm address space. + */ +static inline void +pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep) +{ + /* + * The pmd must be loaded with the physical address of the PTE table + */ + __pmd_populate(pmdp, __pa(ptep), PMD_TYPE_TABLE); +} + +static inline void +pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep) +{ + __pmd_populate(pmdp, page_to_phys(ptep), PMD_TYPE_TABLE); +} +#define pmd_pgtable(pmd) pmd_page(pmd) + +#endif diff --git a/arch/arm64/include/asm/pgtable-2level-hwdef.h b/arch/arm64/include/asm/pgtable-2level-hwdef.h new file mode 100644 index 000000000..0a8ed3f94 --- /dev/null +++ b/arch/arm64/include/asm/pgtable-2level-hwdef.h @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_PGTABLE_2LEVEL_HWDEF_H +#define __ASM_PGTABLE_2LEVEL_HWDEF_H + +/* + * With LPAE and 64KB pages, there are 2 levels of page tables. Each level has + * 8192 entries of 8 bytes each, occupying a 64KB page. Levels 0 and 1 are not + * used. The 2nd level table (PGD for Linux) can cover a range of 4TB, each + * entry representing 512MB. The user and kernel address spaces are limited to + * 512GB and therefore we only use 1024 entries in the PGD. + */ +#define PTRS_PER_PTE 8192 +#define PTRS_PER_PGD 1024 + +/* + * PGDIR_SHIFT determines the size a top-level page table entry can map. + */ +#define PGDIR_SHIFT 29 +#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) + +/* + * section address mask and size definitions. + */ +#define SECTION_SHIFT 29 +#define SECTION_SIZE (_AC(1, UL) << SECTION_SHIFT) +#define SECTION_MASK (~(SECTION_SIZE-1)) + +#endif diff --git a/arch/arm64/include/asm/pgtable-2level-types.h b/arch/arm64/include/asm/pgtable-2level-types.h new file mode 100644 index 000000000..5f101e63d --- /dev/null +++ b/arch/arm64/include/asm/pgtable-2level-types.h @@ -0,0 +1,62 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_PGTABLE_2LEVEL_TYPES_H +#define __ASM_PGTABLE_2LEVEL_TYPES_H + +#include <asm/types.h> + +typedef u64 pteval_t; +typedef u64 pgdval_t; +typedef pgdval_t pmdval_t; + +#undef STRICT_MM_TYPECHECKS + +#ifdef STRICT_MM_TYPECHECKS + +/* + * These are used to make use of C type-checking.. + */ +typedef struct { pteval_t pte; } pte_t; +typedef struct { pgdval_t pgd; } pgd_t; +typedef struct { pteval_t pgprot; } pgprot_t; + +#define pte_val(x) ((x).pte) +#define pgd_val(x) ((x).pgd) +#define pgprot_val(x) ((x).pgprot) + +#define __pte(x) ((pte_t) { (x) } ) +#define __pgd(x) ((pgd_t) { (x) } ) +#define __pgprot(x) ((pgprot_t) { (x) } ) + +#else /* !STRICT_MM_TYPECHECKS */ + +typedef pteval_t pte_t; +typedef pgdval_t pgd_t; +typedef pteval_t pgprot_t; + +#define pte_val(x) (x) +#define pgd_val(x) (x) +#define pgprot_val(x) (x) + +#define __pte(x) (x) +#define __pgd(x) (x) +#define __pgprot(x) (x) + +#endif /* STRICT_MM_TYPECHECKS */ + +#include <asm-generic/pgtable-nopmd.h> + +#endif /* __ASM_PGTABLE_2LEVEL_TYPES_H */ diff --git a/arch/arm64/include/asm/pgtable-3level-hwdef.h b/arch/arm64/include/asm/pgtable-3level-hwdef.h new file mode 100644 index 000000000..3dbf941d7 --- /dev/null +++ b/arch/arm64/include/asm/pgtable-3level-hwdef.h @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_PGTABLE_3LEVEL_HWDEF_H +#define __ASM_PGTABLE_3LEVEL_HWDEF_H + +/* + * With LPAE and 4KB pages, there are 3 levels of page tables. Each level has + * 512 entries of 8 bytes each, occupying a 4K page. The first level table + * covers a range of 512GB, each entry representing 1GB. The user and kernel + * address spaces are limited to 512GB each. + */ +#define PTRS_PER_PTE 512 +#define PTRS_PER_PMD 512 +#define PTRS_PER_PGD 512 + +/* + * PGDIR_SHIFT determines the size a top-level page table entry can map. + */ +#define PGDIR_SHIFT 30 +#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) + +/* + * PMD_SHIFT determines the size a middle-level page table entry can map. + */ +#define PMD_SHIFT 21 +#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE-1)) + +/* + * section address mask and size definitions. + */ +#define SECTION_SHIFT 21 +#define SECTION_SIZE (_AC(1, UL) << SECTION_SHIFT) +#define SECTION_MASK (~(SECTION_SIZE-1)) + +#endif diff --git a/arch/arm64/include/asm/pgtable-3level-types.h b/arch/arm64/include/asm/pgtable-3level-types.h new file mode 100644 index 000000000..4e9442493 --- /dev/null +++ b/arch/arm64/include/asm/pgtable-3level-types.h @@ -0,0 +1,68 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_PGTABLE_3LEVEL_TYPES_H +#define __ASM_PGTABLE_3LEVEL_TYPES_H + +#include <asm/types.h> + +typedef u64 pteval_t; +typedef u64 pmdval_t; +typedef u64 pgdval_t; + +#undef STRICT_MM_TYPECHECKS + +#ifdef STRICT_MM_TYPECHECKS + +/* + * These are used to make use of C type-checking.. + */ +typedef struct { pteval_t pte; } pte_t; +typedef struct { pmdval_t pmd; } pmd_t; +typedef struct { pgdval_t pgd; } pgd_t; +typedef struct { pteval_t pgprot; } pgprot_t; + +#define pte_val(x) ((x).pte) +#define pmd_val(x) ((x).pmd) +#define pgd_val(x) ((x).pgd) +#define pgprot_val(x) ((x).pgprot) + +#define __pte(x) ((pte_t) { (x) } ) +#define __pmd(x) ((pmd_t) { (x) } ) +#define __pgd(x) ((pgd_t) { (x) } ) +#define __pgprot(x) ((pgprot_t) { (x) } ) + +#else /* !STRICT_MM_TYPECHECKS */ + +typedef pteval_t pte_t; +typedef pmdval_t pmd_t; +typedef pgdval_t pgd_t; +typedef pteval_t pgprot_t; + +#define pte_val(x) (x) +#define pmd_val(x) (x) +#define pgd_val(x) (x) +#define pgprot_val(x) (x) + +#define __pte(x) (x) +#define __pmd(x) (x) +#define __pgd(x) (x) +#define __pgprot(x) (x) + +#endif /* STRICT_MM_TYPECHECKS */ + +#include <asm-generic/pgtable-nopud.h> + +#endif /* __ASM_PGTABLE_3LEVEL_TYPES_H */ diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h new file mode 100644 index 000000000..7eeed1ae2 --- /dev/null +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -0,0 +1,97 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_PGTABLE_HWDEF_H +#define __ASM_PGTABLE_HWDEF_H + +#ifdef CONFIG_ARM64_64K_PAGES +#include <asm/pgtable-2level-hwdef.h> +#else +#include <asm/pgtable-3level-hwdef.h> +#endif + +/* + * Hardware page table definitions. + * + * Level 2 descriptor (PMD). + */ +#define PMD_TYPE_MASK (_AT(pmdval_t, 3) << 0) +#define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0) +#define PMD_TYPE_TABLE (_AT(pmdval_t, 3) << 0) +#define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0) + +/* + * Section + */ +#define PMD_SECT_S (_AT(pmdval_t, 3) << 8) +#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10) +#define PMD_SECT_NG (_AT(pmdval_t, 1) << 11) +#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53) +#define PMD_SECT_UXN (_AT(pmdval_t, 1) << 54) + +/* + * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers). + */ +#define PMD_ATTRINDX(t) (_AT(pmdval_t, (t)) << 2) +#define PMD_ATTRINDX_MASK (_AT(pmdval_t, 7) << 2) + +/* + * Level 3 descriptor (PTE). + */ +#define PTE_TYPE_MASK (_AT(pteval_t, 3) << 0) +#define PTE_TYPE_FAULT (_AT(pteval_t, 0) << 0) +#define PTE_TYPE_PAGE (_AT(pteval_t, 3) << 0) +#define PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */ +#define PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */ +#define PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ +#define PTE_AF (_AT(pteval_t, 1) << 10) /* Access Flag */ +#define PTE_NG (_AT(pteval_t, 1) << 11) /* nG */ +#define PTE_PXN (_AT(pteval_t, 1) << 53) /* Privileged XN */ +#define PTE_UXN (_AT(pteval_t, 1) << 54) /* User XN */ + +/* + * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers). + */ +#define PTE_ATTRINDX(t) (_AT(pteval_t, (t)) << 2) +#define PTE_ATTRINDX_MASK (_AT(pteval_t, 7) << 2) + +/* + * 40-bit physical address supported. + */ +#define PHYS_MASK_SHIFT (40) +#define PHYS_MASK ((UL(1) << PHYS_MASK_SHIFT) - 1) + +/* + * TCR flags. + */ +#define TCR_TxSZ(x) (((UL(64) - (x)) << 16) | ((UL(64) - (x)) << 0)) +#define TCR_IRGN_NC ((UL(0) << 8) | (UL(0) << 24)) +#define TCR_IRGN_WBWA ((UL(1) << 8) | (UL(1) << 24)) +#define TCR_IRGN_WT ((UL(2) << 8) | (UL(2) << 24)) +#define TCR_IRGN_WBnWA ((UL(3) << 8) | (UL(3) << 24)) +#define TCR_IRGN_MASK ((UL(3) << 8) | (UL(3) << 24)) +#define TCR_ORGN_NC ((UL(0) << 10) | (UL(0) << 26)) +#define TCR_ORGN_WBWA ((UL(1) << 10) | (UL(1) << 26)) +#define TCR_ORGN_WT ((UL(2) << 10) | (UL(2) << 26)) +#define TCR_ORGN_WBnWA ((UL(3) << 10) | (UL(3) << 26)) +#define TCR_ORGN_MASK ((UL(3) << 10) | (UL(3) << 26)) +#define TCR_SHARED ((UL(3) << 12) | (UL(3) << 28)) +#define TCR_TG0_64K (UL(1) << 14) +#define TCR_TG1_64K (UL(1) << 30) +#define TCR_IPS_40BIT (UL(2) << 32) +#define TCR_ASID16 (UL(1) << 36) +#define TCR_TBI0 (UL(1) << 37) + +#endif diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h new file mode 100644 index 000000000..3a710d7b1 --- /dev/null +++ b/arch/arm64/include/asm/pgtable.h @@ -0,0 +1,334 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_PGTABLE_H +#define __ASM_PGTABLE_H + +#include <asm/proc-fns.h> + +#include <asm/memory.h> +#include <asm/pgtable-hwdef.h> + +/* + * Software defined PTE bits definition. + */ +#define PTE_VALID (_AT(pteval_t, 1) << 0) +#define PTE_PROT_NONE (_AT(pteval_t, 1) << 1) /* only when !PTE_VALID */ +#define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */ +#define PTE_DIRTY (_AT(pteval_t, 1) << 55) +#define PTE_SPECIAL (_AT(pteval_t, 1) << 56) + +/* + * VMALLOC and SPARSEMEM_VMEMMAP ranges. + */ +#define VMALLOC_START UL(0xffffff8000000000) +#define VMALLOC_END (PAGE_OFFSET - UL(0x400000000) - SZ_64K) + +#define vmemmap ((struct page *)(VMALLOC_END + SZ_64K)) + +#define FIRST_USER_ADDRESS 0 + +#ifndef __ASSEMBLY__ +extern void __pte_error(const char *file, int line, unsigned long val); +extern void __pmd_error(const char *file, int line, unsigned long val); +extern void __pgd_error(const char *file, int line, unsigned long val); + +#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte)) +#ifndef CONFIG_ARM64_64K_PAGES +#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd)) +#endif +#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) + +/* + * The pgprot_* and protection_map entries will be fixed up at runtime to + * include the cachable and bufferable bits based on memory policy, as well as + * any architecture dependent bits like global/ASID and SMP shared mapping + * bits. + */ +#define _PAGE_DEFAULT PTE_TYPE_PAGE | PTE_AF + +extern pgprot_t pgprot_default; + +#define __pgprot_modify(prot,mask,bits) \ + __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) + +#define _MOD_PROT(p, b) __pgprot_modify(p, 0, b) + +#define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE) +#define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) +#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN) +#define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) +#define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) +#define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) +#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) +#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY) +#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY) + +#define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE) +#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) +#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) +#define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) +#define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) +#define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) +#define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY) + +#endif /* __ASSEMBLY__ */ + +#define __P000 __PAGE_NONE +#define __P001 __PAGE_READONLY +#define __P010 __PAGE_COPY +#define __P011 __PAGE_COPY +#define __P100 __PAGE_READONLY_EXEC +#define __P101 __PAGE_READONLY_EXEC +#define __P110 __PAGE_COPY_EXEC +#define __P111 __PAGE_COPY_EXEC + +#define __S000 __PAGE_NONE +#define __S001 __PAGE_READONLY +#define __S010 __PAGE_SHARED +#define __S011 __PAGE_SHARED +#define __S100 __PAGE_READONLY_EXEC +#define __S101 __PAGE_READONLY_EXEC +#define __S110 __PAGE_SHARED_EXEC +#define __S111 __PAGE_SHARED_EXEC + +#ifndef __ASSEMBLY__ +/* + * ZERO_PAGE is a global shared page that is always zero: used + * for zero-mapped memory areas etc.. + */ +extern struct page *empty_zero_page; +#define ZERO_PAGE(vaddr) (empty_zero_page) + +#define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT) + +#define pfn_pte(pfn,prot) (__pte(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) + +#define pte_none(pte) (!pte_val(pte)) +#define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0)) +#define pte_page(pte) (pfn_to_page(pte_pfn(pte))) +#define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr)) + +#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) +#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) +#define pte_unmap(pte) do { } while (0) +#define pte_unmap_nested(pte) do { } while (0) + +/* + * The following only work if pte_present(). Undefined behaviour otherwise. + */ +#define pte_present(pte) (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) +#define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY) +#define pte_young(pte) (pte_val(pte) & PTE_AF) +#define pte_special(pte) (pte_val(pte) & PTE_SPECIAL) +#define pte_write(pte) (!(pte_val(pte) & PTE_RDONLY)) +#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) + +#define pte_valid_user(pte) \ + ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) + +#define PTE_BIT_FUNC(fn,op) \ +static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } + +PTE_BIT_FUNC(wrprotect, |= PTE_RDONLY); +PTE_BIT_FUNC(mkwrite, &= ~PTE_RDONLY); +PTE_BIT_FUNC(mkclean, &= ~PTE_DIRTY); +PTE_BIT_FUNC(mkdirty, |= PTE_DIRTY); +PTE_BIT_FUNC(mkold, &= ~PTE_AF); +PTE_BIT_FUNC(mkyoung, |= PTE_AF); +PTE_BIT_FUNC(mkspecial, |= PTE_SPECIAL); + +static inline void set_pte(pte_t *ptep, pte_t pte) +{ + *ptep = pte; +} + +extern void __sync_icache_dcache(pte_t pteval, unsigned long addr); + +static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte) +{ + if (pte_valid_user(pte)) { + if (!pte_special(pte) && pte_exec(pte)) + __sync_icache_dcache(pte, addr); + if (!pte_dirty(pte)) + pte = pte_wrprotect(pte); + } + + set_pte(ptep, pte); +} + +/* + * Huge pte definitions. + */ +#define pte_huge(pte) ((pte_val(pte) & PTE_TYPE_MASK) == PTE_TYPE_HUGEPAGE) +#define pte_mkhuge(pte) (__pte((pte_val(pte) & ~PTE_TYPE_MASK) | PTE_TYPE_HUGEPAGE)) + +#define __HAVE_ARCH_PTE_SPECIAL + +/* + * Mark the prot value as uncacheable and unbufferable. + */ +#define pgprot_noncached(prot) \ + __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN) +#define pgprot_writecombine(prot) \ + __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) +#define pgprot_dmacoherent(prot) \ + __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) +#define __HAVE_PHYS_MEM_ACCESS_PROT +struct file; +extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t vma_prot); + +#define pmd_none(pmd) (!pmd_val(pmd)) +#define pmd_present(pmd) (pmd_val(pmd)) + +#define pmd_bad(pmd) (!(pmd_val(pmd) & 2)) + +static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) +{ + *pmdp = pmd; + dsb(); +} + +static inline void pmd_clear(pmd_t *pmdp) +{ + set_pmd(pmdp, __pmd(0)); +} + +static inline pte_t *pmd_page_vaddr(pmd_t pmd) +{ + return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK); +} + +#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) + +/* + * Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + */ +#define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot) + +#ifndef CONFIG_ARM64_64K_PAGES + +#define pud_none(pud) (!pud_val(pud)) +#define pud_bad(pud) (!(pud_val(pud) & 2)) +#define pud_present(pud) (pud_val(pud)) + +static inline void set_pud(pud_t *pudp, pud_t pud) +{ + *pudp = pud; + dsb(); +} + +static inline void pud_clear(pud_t *pudp) +{ + set_pud(pudp, __pud(0)); +} + +static inline pmd_t *pud_page_vaddr(pud_t pud) +{ + return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK); +} + +#endif /* CONFIG_ARM64_64K_PAGES */ + +/* to find an entry in a page-table-directory */ +#define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) + +#define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr)) + +/* to find an entry in a kernel page-table-directory */ +#define pgd_offset_k(addr) pgd_offset(&init_mm, addr) + +/* Find an entry in the second-level page table.. */ +#ifndef CONFIG_ARM64_64K_PAGES +#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) +static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) +{ + return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr); +} +#endif + +/* Find an entry in the third-level page table.. */ +#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) + +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | + PTE_PROT_NONE | PTE_VALID; + pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); + return pte; +} + +extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; +extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; + +#define SWAPPER_DIR_SIZE (3 * PAGE_SIZE) +#define IDMAP_DIR_SIZE (2 * PAGE_SIZE) + +/* + * Encode and decode a swap entry: + * bits 0-1: present (must be zero) + * bit 2: PTE_FILE + * bits 3-8: swap type + * bits 9-63: swap offset + */ +#define __SWP_TYPE_SHIFT 3 +#define __SWP_TYPE_BITS 6 +#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) +#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) + +#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) +#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT) +#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) + +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) +#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) + +/* + * Ensure that there are not more swap files than can be encoded in the kernel + * the PTEs. + */ +#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) + +/* + * Encode and decode a file entry: + * bits 0-1: present (must be zero) + * bit 2: PTE_FILE + * bits 3-63: file offset / PAGE_SIZE + */ +#define pte_file(pte) (pte_val(pte) & PTE_FILE) +#define pte_to_pgoff(x) (pte_val(x) >> 3) +#define pgoff_to_pte(x) __pte(((x) << 3) | PTE_FILE) + +#define PTE_FILE_MAX_BITS 61 + +extern int kern_addr_valid(unsigned long addr); + +#include <asm-generic/pgtable.h> + +/* + * remap a physical page `pfn' of size `size' with page protection `prot' + * into virtual address `from' + */ +#define io_remap_pfn_range(vma,from,pfn,size,prot) \ + remap_pfn_range(vma, from, pfn, size, prot) + +#define pgtable_cache_init() do { } while (0) + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_PGTABLE_H */ diff --git a/arch/arm64/include/asm/pmu.h b/arch/arm64/include/asm/pmu.h new file mode 100644 index 000000000..e6f087806 --- /dev/null +++ b/arch/arm64/include/asm/pmu.h @@ -0,0 +1,82 @@ +/* + * Based on arch/arm/include/asm/pmu.h + * + * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_PMU_H +#define __ASM_PMU_H + +#ifdef CONFIG_HW_PERF_EVENTS + +/* The events for a given PMU register set. */ +struct pmu_hw_events { + /* + * The events that are active on the PMU for the given index. + */ + struct perf_event **events; + + /* + * A 1 bit for an index indicates that the counter is being used for + * an event. A 0 means that the counter can be used. + */ + unsigned long *used_mask; + + /* + * Hardware lock to serialize accesses to PMU registers. Needed for the + * read/modify/write sequences. + */ + raw_spinlock_t pmu_lock; +}; + +struct arm_pmu { + struct pmu pmu; + cpumask_t active_irqs; + const char *name; + irqreturn_t (*handle_irq)(int irq_num, void *dev); + void (*enable)(struct hw_perf_event *evt, int idx); + void (*disable)(struct hw_perf_event *evt, int idx); + int (*get_event_idx)(struct pmu_hw_events *hw_events, + struct hw_perf_event *hwc); + int (*set_event_filter)(struct hw_perf_event *evt, + struct perf_event_attr *attr); + u32 (*read_counter)(int idx); + void (*write_counter)(int idx, u32 val); + void (*start)(void); + void (*stop)(void); + void (*reset)(void *); + int (*map_event)(struct perf_event *event); + int num_events; + atomic_t active_events; + struct mutex reserve_mutex; + u64 max_period; + struct platform_device *plat_device; + struct pmu_hw_events *(*get_hw_events)(void); +}; + +#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) + +int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type); + +u64 armpmu_event_update(struct perf_event *event, + struct hw_perf_event *hwc, + int idx); + +int armpmu_event_set_period(struct perf_event *event, + struct hw_perf_event *hwc, + int idx); + +#endif /* CONFIG_HW_PERF_EVENTS */ +#endif /* __ASM_PMU_H */ diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h new file mode 100644 index 000000000..0c657bb54 --- /dev/null +++ b/arch/arm64/include/asm/proc-fns.h @@ -0,0 +1,53 @@ +/* + * Based on arch/arm/include/asm/proc-fns.h + * + * Copyright (C) 1997-1999 Russell King + * Copyright (C) 2000 Deep Blue Solutions Ltd + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_PROCFNS_H +#define __ASM_PROCFNS_H + +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ + +#include <asm/page.h> + +struct mm_struct; +struct cpu_suspend_ctx; + +extern void cpu_cache_off(void); +extern void cpu_do_idle(void); +extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); +extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); +extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr); +extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); + +#include <asm/memory.h> + +#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm) + +#define cpu_get_pgd() \ +({ \ + unsigned long pg; \ + asm("mrs %0, ttbr0_el1\n" \ + : "=r" (pg)); \ + pg &= ~0xffff000000003ffful; \ + (pgd_t *)phys_to_virt(pg); \ +}) + +#endif /* __ASSEMBLY__ */ +#endif /* __KERNEL__ */ +#endif /* __ASM_PROCFNS_H */ diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h new file mode 100644 index 000000000..3b7bb031f --- /dev/null +++ b/arch/arm64/include/asm/processor.h @@ -0,0 +1,162 @@ +/* + * Based on arch/arm/include/asm/processor.h + * + * Copyright (C) 1995-1999 Russell King + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_PROCESSOR_H +#define __ASM_PROCESSOR_H + +/* + * Default implementation of macro that returns current + * instruction pointer ("program counter"). + */ +#define current_text_addr() ({ __label__ _l; _l: &&_l;}) + +#ifdef __KERNEL__ + +#include <linux/string.h> + +#include <asm/fpsimd.h> +#include <asm/hw_breakpoint.h> +#include <asm/ptrace.h> +#include <asm/types.h> + +#ifdef __KERNEL__ +#define STACK_TOP_MAX TASK_SIZE_64 +#ifdef CONFIG_COMPAT +#define AARCH32_VECTORS_BASE 0xffff0000 +#define STACK_TOP (test_thread_flag(TIF_32BIT) ? \ + AARCH32_VECTORS_BASE : STACK_TOP_MAX) +#else +#define STACK_TOP STACK_TOP_MAX +#endif /* CONFIG_COMPAT */ + +#define ARCH_LOW_ADDRESS_LIMIT PHYS_MASK +#endif /* __KERNEL__ */ + +struct debug_info { + /* Have we suspended stepping by a debugger? */ + int suspended_step; + /* Allow breakpoints and watchpoints to be disabled for this thread. */ + int bps_disabled; + int wps_disabled; + /* Hardware breakpoints pinned to this task. */ + struct perf_event *hbp_break[ARM_MAX_BRP]; + struct perf_event *hbp_watch[ARM_MAX_WRP]; +}; + +struct cpu_context { + unsigned long x19; + unsigned long x20; + unsigned long x21; + unsigned long x22; + unsigned long x23; + unsigned long x24; + unsigned long x25; + unsigned long x26; + unsigned long x27; + unsigned long x28; + unsigned long fp; + unsigned long sp; + unsigned long pc; +}; + +struct thread_struct { + struct cpu_context cpu_context; /* cpu context */ + unsigned long tp_value; + struct fpsimd_state fpsimd_state; + unsigned long fault_address; /* fault info */ + struct debug_info debug; /* debugging */ +}; + +#define INIT_THREAD { } + +static inline void start_thread_common(struct pt_regs *regs, unsigned long pc) +{ + memset(regs, 0, sizeof(*regs)); + regs->syscallno = ~0UL; + regs->pc = pc; +} + +static inline void start_thread(struct pt_regs *regs, unsigned long pc, + unsigned long sp) +{ + start_thread_common(regs, pc); + regs->pstate = PSR_MODE_EL0t; + regs->sp = sp; +} + +#ifdef CONFIG_COMPAT +static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, + unsigned long sp) +{ + start_thread_common(regs, pc); + regs->pstate = COMPAT_PSR_MODE_USR; + if (pc & 1) + regs->pstate |= COMPAT_PSR_T_BIT; + regs->compat_sp = sp; +} +#endif + +/* Forward declaration, a strange C thing */ +struct task_struct; + +/* Free all resources held by a thread. */ +extern void release_thread(struct task_struct *); + +/* Prepare to copy thread state - unlazy all lazy status */ +#define prepare_to_copy(tsk) do { } while (0) + +unsigned long get_wchan(struct task_struct *p); + +#define cpu_relax() barrier() + +/* Thread switching */ +extern struct task_struct *cpu_switch_to(struct task_struct *prev, + struct task_struct *next); + +#define task_pt_regs(p) \ + ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1) + +#define KSTK_EIP(tsk) ((unsigned long)task_pt_regs(tsk)->pc) +#define KSTK_ESP(tsk) user_stack_pointer(task_pt_regs(tsk)) + +/* + * Prefetching support + */ +#define ARCH_HAS_PREFETCH +static inline void prefetch(const void *ptr) +{ + asm volatile("prfm pldl1keep, %a0\n" : : "p" (ptr)); +} + +#define ARCH_HAS_PREFETCHW +static inline void prefetchw(const void *ptr) +{ + asm volatile("prfm pstl1keep, %a0\n" : : "p" (ptr)); +} + +#define ARCH_HAS_SPINLOCK_PREFETCH +static inline void spin_lock_prefetch(const void *x) +{ + prefetchw(x); +} + +#define HAVE_ARCH_PICK_MMAP_LAYOUT + +#endif + +#endif /* __ASM_PROCESSOR_H */ diff --git a/arch/arm64/include/asm/prom.h b/arch/arm64/include/asm/prom.h new file mode 100644 index 000000000..68b90e682 --- /dev/null +++ b/arch/arm64/include/asm/prom.h @@ -0,0 +1 @@ +/* Empty for now */ diff --git a/arch/arm64/include/asm/psci.h b/arch/arm64/include/asm/psci.h new file mode 100644 index 000000000..a5922961e --- /dev/null +++ b/arch/arm64/include/asm/psci.h @@ -0,0 +1,42 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2013 ARM Limited + */ + +#ifndef __ASM_PSCI_H +#define __ASM_PSCI_H + +#define PSCI_POWER_STATE_TYPE_STANDBY 0 +#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1 + +struct psci_power_state { + u16 id; + u8 type; + u8 affinity_level; +}; + + +struct psci_operations { + int (*cpu_suspend)(struct psci_power_state state, + unsigned long entry_point); + int (*cpu_off)(struct psci_power_state state); + int (*cpu_on)(unsigned long cpuid, unsigned long entry_point); + int (*migrate)(unsigned long cpuid); + int (*affinity_info)(unsigned long target_affinity, + unsigned long lowest_affinity_level); + int (*migrate_info_type)(void); +}; + +extern struct psci_operations psci_ops; + +int psci_init(void); + +#endif /* __ASM_PSCI_H */ diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h new file mode 100644 index 000000000..2861aed5a --- /dev/null +++ b/arch/arm64/include/asm/ptrace.h @@ -0,0 +1,197 @@ +/* + * Based on arch/arm/include/asm/ptrace.h + * + * Copyright (C) 1996-2003 Russell King + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_PTRACE_H +#define __ASM_PTRACE_H + +#include <uapi/asm/ptrace.h> + +/* AArch32-specific ptrace requests */ +#define COMPAT_PTRACE_GETREGS 12 +#define COMPAT_PTRACE_SETREGS 13 +#define COMPAT_PTRACE_GET_THREAD_AREA 22 +#define COMPAT_PTRACE_SET_SYSCALL 23 +#define COMPAT_PTRACE_GETVFPREGS 27 +#define COMPAT_PTRACE_SETVFPREGS 28 +#define COMPAT_PTRACE_GETHBPREGS 29 +#define COMPAT_PTRACE_SETHBPREGS 30 + +/* AArch32 CPSR bits */ +#define COMPAT_PSR_MODE_MASK 0x0000001f +#define COMPAT_PSR_MODE_USR 0x00000010 +#define COMPAT_PSR_MODE_FIQ 0x00000011 +#define COMPAT_PSR_MODE_IRQ 0x00000012 +#define COMPAT_PSR_MODE_SVC 0x00000013 +#define COMPAT_PSR_MODE_ABT 0x00000017 +#define COMPAT_PSR_MODE_HYP 0x0000001a +#define COMPAT_PSR_MODE_UND 0x0000001b +#define COMPAT_PSR_MODE_SYS 0x0000001f +#define COMPAT_PSR_T_BIT 0x00000020 +#define COMPAT_PSR_F_BIT 0x00000040 +#define COMPAT_PSR_I_BIT 0x00000080 +#define COMPAT_PSR_A_BIT 0x00000100 +#define COMPAT_PSR_E_BIT 0x00000200 +#define COMPAT_PSR_J_BIT 0x01000000 +#define COMPAT_PSR_Q_BIT 0x08000000 +#define COMPAT_PSR_V_BIT 0x10000000 +#define COMPAT_PSR_C_BIT 0x20000000 +#define COMPAT_PSR_Z_BIT 0x40000000 +#define COMPAT_PSR_N_BIT 0x80000000 +#define COMPAT_PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */ +/* + * These are 'magic' values for PTRACE_PEEKUSR that return info about where a + * process is located in memory. + */ +#define COMPAT_PT_TEXT_ADDR 0x10000 +#define COMPAT_PT_DATA_ADDR 0x10004 +#define COMPAT_PT_TEXT_END_ADDR 0x10008 + +/* + * used to skip a system call when tracer changes its number to -1 + * with ptrace(PTRACE_SET_SYSCALL) + */ +#define RET_SKIP_SYSCALL -1 +#define RET_SKIP_SYSCALL_TRACE -2 +#define IS_SKIP_SYSCALL(no) ((int)(no & 0xffffffff) == -1) + +#ifndef __ASSEMBLY__ + +/* sizeof(struct user) for AArch32 */ +#define COMPAT_USER_SZ 296 + +/* Architecturally defined mapping between AArch32 and AArch64 registers */ +#define compat_usr(x) regs[(x)] +#define compat_sp regs[13] +#define compat_lr regs[14] +#define compat_sp_hyp regs[15] +#define compat_sp_irq regs[16] +#define compat_lr_irq regs[17] +#define compat_sp_svc regs[18] +#define compat_lr_svc regs[19] +#define compat_sp_abt regs[20] +#define compat_lr_abt regs[21] +#define compat_sp_und regs[22] +#define compat_lr_und regs[23] +#define compat_r8_fiq regs[24] +#define compat_r9_fiq regs[25] +#define compat_r10_fiq regs[26] +#define compat_r11_fiq regs[27] +#define compat_r12_fiq regs[28] +#define compat_sp_fiq regs[29] +#define compat_lr_fiq regs[30] + +/* + * This struct defines the way the registers are stored on the stack during an + * exception. Note that sizeof(struct pt_regs) has to be a multiple of 16 (for + * stack alignment). struct user_pt_regs must form a prefix of struct pt_regs. + */ +struct pt_regs { + union { + struct user_pt_regs user_regs; + struct { + u64 regs[31]; + u64 sp; + u64 pc; + u64 pstate; + }; + }; + u64 orig_x0; + u64 syscallno; +}; + +#define arch_has_single_step() (1) + +#ifdef CONFIG_COMPAT +#define compat_thumb_mode(regs) \ + (((regs)->pstate & COMPAT_PSR_T_BIT)) +#else +#define compat_thumb_mode(regs) (0) +#endif + +#define user_mode(regs) \ + (((regs)->pstate & PSR_MODE_MASK) == PSR_MODE_EL0t) + +#define compat_user_mode(regs) \ + (((regs)->pstate & (PSR_MODE32_BIT | PSR_MODE_MASK)) == \ + (PSR_MODE32_BIT | PSR_MODE_EL0t)) + +#define processor_mode(regs) \ + ((regs)->pstate & PSR_MODE_MASK) + +#define interrupts_enabled(regs) \ + (!((regs)->pstate & PSR_I_BIT)) + +#define fast_interrupts_enabled(regs) \ + (!((regs)->pstate & PSR_F_BIT)) + +#define user_stack_pointer(regs) \ + (!compat_user_mode(regs) ? (regs)->sp : (regs)->compat_sp) + +static inline unsigned long regs_return_value(struct pt_regs *regs) +{ + return regs->regs[0]; +} + +/* + * Are the current registers suitable for user mode? (used to maintain + * security in signal handlers) + */ +static inline int valid_user_regs(struct user_pt_regs *regs) +{ + if (user_mode(regs) && (regs->pstate & PSR_I_BIT) == 0) { + regs->pstate &= ~(PSR_F_BIT | PSR_A_BIT); + + /* The T bit is reserved for AArch64 */ + if (!(regs->pstate & PSR_MODE32_BIT)) + regs->pstate &= ~COMPAT_PSR_T_BIT; + + return 1; + } + + /* + * Force PSR to something logical... + */ + regs->pstate &= PSR_f | PSR_s | (PSR_x & ~PSR_A_BIT) | \ + COMPAT_PSR_T_BIT | PSR_MODE32_BIT; + + if (!(regs->pstate & PSR_MODE32_BIT)) { + regs->pstate &= ~COMPAT_PSR_T_BIT; + regs->pstate |= PSR_MODE_EL0t; + } + + return 0; +} + +#define instruction_pointer(regs) ((unsigned long)(regs)->pc) + +#ifdef CONFIG_SMP +extern unsigned long profile_pc(struct pt_regs *regs); +#else +#define profile_pc(regs) instruction_pointer(regs) +#endif + +/* + * True if instr is a 32-bit thumb instruction. This works if instr + * is the first or only half-word of a thumb instruction. It also works + * when instr holds all 32-bits of a wide thumb instruction if stored + * in the form (first_half<<16)|(second_half) + */ +#define is_wide_instruction(instr) ((unsigned)(instr) >= 0xe800) + +#endif /* __ASSEMBLY__ */ +#endif diff --git a/arch/arm64/include/asm/scatterlist.h b/arch/arm64/include/asm/scatterlist.h new file mode 100644 index 000000000..cefdb8f89 --- /dev/null +++ b/arch/arm64/include/asm/scatterlist.h @@ -0,0 +1,12 @@ +#ifndef _ASMARM_SCATTERLIST_H +#define _ASMARM_SCATTERLIST_H + +#ifdef CONFIG_ARM_HAS_SG_CHAIN +#define ARCH_HAS_SG_CHAIN +#endif + +#include <asm/memory.h> +#include <asm/types.h> +#include <asm-generic/scatterlist.h> + +#endif /* _ASMARM_SCATTERLIST_H */ diff --git a/arch/arm64/include/asm/seccomp.h b/arch/arm64/include/asm/seccomp.h new file mode 100644 index 000000000..bec3a43f7 --- /dev/null +++ b/arch/arm64/include/asm/seccomp.h @@ -0,0 +1,25 @@ +/* + * arch/arm64/include/asm/seccomp.h + * + * Copyright (C) 2014 Linaro Limited + * Author: AKASHI Takahiro <takahiro.akashi <at> linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _ASM_SECCOMP_H +#define _ASM_SECCOMP_H + +#include <asm/unistd.h> + +#ifdef CONFIG_COMPAT +#define __NR_seccomp_read_32 __NR_compat_read +#define __NR_seccomp_write_32 __NR_compat_write +#define __NR_seccomp_exit_32 __NR_compat_exit +#define __NR_seccomp_sigreturn_32 __NR_compat_rt_sigreturn +#endif /* CONFIG_COMPAT */ + +#include <asm-generic/seccomp.h> + +#endif /* _ASM_SECCOMP_H */ diff --git a/arch/arm64/include/asm/shmparam.h b/arch/arm64/include/asm/shmparam.h new file mode 100644 index 000000000..4df608a84 --- /dev/null +++ b/arch/arm64/include/asm/shmparam.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_SHMPARAM_H +#define __ASM_SHMPARAM_H + +/* + * For IPC syscalls from compat tasks, we need to use the legacy 16k + * alignment value. Since we don't have aliasing D-caches, the rest of + * the time we can safely use PAGE_SIZE. + */ +#define COMPAT_SHMLBA 0x4000 + +#include <asm-generic/shmparam.h> + +#endif /* __ASM_SHMPARAM_H */ diff --git a/arch/arm64/include/asm/sigcontext.h b/arch/arm64/include/asm/sigcontext.h new file mode 100644 index 000000000..dca1094ac --- /dev/null +++ b/arch/arm64/include/asm/sigcontext.h @@ -0,0 +1,31 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_SIGCONTEXT_H +#define __ASM_SIGCONTEXT_H + +#include <uapi/asm/sigcontext.h> + +/* + * Auxiliary context saved in the sigcontext.__reserved array. Not exported to + * user space as it will change with the addition of new context. User space + * should check the magic/size information. + */ +struct aux_context { + struct fpsimd_context fpsimd; + /* additional context to be added before "end" */ + struct _aarch64_ctx end; +}; +#endif diff --git a/arch/arm64/include/asm/signal32.h b/arch/arm64/include/asm/signal32.h new file mode 100644 index 000000000..7c275e3b6 --- /dev/null +++ b/arch/arm64/include/asm/signal32.h @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_SIGNAL32_H +#define __ASM_SIGNAL32_H + +#ifdef __KERNEL__ +#ifdef CONFIG_COMPAT +#include <linux/compat.h> + +#define AARCH32_KERN_SIGRET_CODE_OFFSET 0x500 + +extern const compat_ulong_t aarch32_sigret_code[6]; + +int compat_setup_frame(int usig, struct k_sigaction *ka, sigset_t *set, + struct pt_regs *regs); +int compat_setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, + sigset_t *set, struct pt_regs *regs); + +void compat_setup_restart_syscall(struct pt_regs *regs); +#else + +static inline int compat_setup_frame(int usid, struct k_sigaction *ka, + sigset_t *set, struct pt_regs *regs) +{ + return -ENOSYS; +} + +static inline int compat_setup_rt_frame(int usig, struct k_sigaction *ka, + siginfo_t *info, sigset_t *set, + struct pt_regs *regs) +{ + return -ENOSYS; +} + +static inline void compat_setup_restart_syscall(struct pt_regs *regs) +{ +} +#endif /* CONFIG_COMPAT */ +#endif /* __KERNEL__ */ +#endif /* __ASM_SIGNAL32_H */ diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h new file mode 100644 index 000000000..a498f2cd2 --- /dev/null +++ b/arch/arm64/include/asm/smp.h @@ -0,0 +1,73 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_SMP_H +#define __ASM_SMP_H + +#include <linux/threads.h> +#include <linux/cpumask.h> +#include <linux/thread_info.h> + +#ifndef CONFIG_SMP +# error "<asm/smp.h> included in non-SMP build" +#endif + +#define raw_smp_processor_id() (current_thread_info()->cpu) + +struct seq_file; + +/* + * generate IPI list text + */ +extern void show_ipi_list(struct seq_file *p, int prec); + +/* + * Called from C code, this handles an IPI. + */ +extern void handle_IPI(int ipinr, struct pt_regs *regs); + +/* + * Setup the set of possible CPUs (via set_cpu_possible) + */ +extern void smp_init_cpus(void); + +/* + * Provide a function to raise an IPI cross call on CPUs in callmap. + */ +extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int)); + +/* + * Called from the secondary holding pen, this is the secondary CPU entry point. + */ +asmlinkage void secondary_start_kernel(void); + +/* + * Initial data for bringing up a secondary CPU. + */ +struct secondary_data { + void *stack; +}; +extern struct secondary_data secondary_data; +extern void secondary_entry(void); + +extern void arch_send_call_function_single_ipi(int cpu); +extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); + +extern int __cpu_disable(void); + +extern void __cpu_die(unsigned int cpu); +extern void cpu_die(void); + +#endif /* ifndef __ASM_SMP_H */ diff --git a/arch/arm64/include/asm/smp_plat.h b/arch/arm64/include/asm/smp_plat.h new file mode 100644 index 000000000..59e282311 --- /dev/null +++ b/arch/arm64/include/asm/smp_plat.h @@ -0,0 +1,43 @@ +/* + * Definitions specific to SMP platforms. + * + * Copyright (C) 2013 ARM Ltd. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ASM_SMP_PLAT_H +#define __ASM_SMP_PLAT_H + +#include <asm/types.h> + +struct mpidr_hash { + u64 mask; + u32 shift_aff[4]; + u32 bits; +}; + +extern struct mpidr_hash mpidr_hash; + +static inline u32 mpidr_hash_size(void) +{ + return 1 << mpidr_hash.bits; +} + +/* + * Logical CPU mapping. + */ +extern u64 __cpu_logical_map[NR_CPUS]; +#define cpu_logical_map(cpu) __cpu_logical_map[cpu] + +#endif /* __ASM_SMP_PLAT_H */ diff --git a/arch/arm64/include/asm/sparsemem.h b/arch/arm64/include/asm/sparsemem.h new file mode 100644 index 000000000..1be62bcb9 --- /dev/null +++ b/arch/arm64/include/asm/sparsemem.h @@ -0,0 +1,24 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_SPARSEMEM_H +#define __ASM_SPARSEMEM_H + +#ifdef CONFIG_SPARSEMEM +#define MAX_PHYSMEM_BITS 40 +#define SECTION_SIZE_BITS 30 +#endif + +#endif diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h new file mode 100644 index 000000000..0defa0728 --- /dev/null +++ b/arch/arm64/include/asm/spinlock.h @@ -0,0 +1,203 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_SPINLOCK_H +#define __ASM_SPINLOCK_H + +#include <asm/spinlock_types.h> +#include <asm/processor.h> + +/* + * Spinlock implementation. + * + * The old value is read exclusively and the new one, if unlocked, is written + * exclusively. In case of failure, the loop is restarted. + * + * The memory barriers are implicit with the load-acquire and store-release + * instructions. + * + * Unlocked value: 0 + * Locked value: 1 + */ + +#define arch_spin_is_locked(x) ((x)->lock != 0) +#define arch_spin_unlock_wait(lock) \ + do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) + +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) + +static inline void arch_spin_lock(arch_spinlock_t *lock) +{ + unsigned int tmp; + + asm volatile( + " sevl\n" + "1: wfe\n" + "2: ldaxr %w0, %1\n" + " cbnz %w0, 1b\n" + " stxr %w0, %w2, %1\n" + " cbnz %w0, 2b\n" + : "=&r" (tmp), "+Q" (lock->lock) + : "r" (1) + : "cc", "memory"); +} + +static inline int arch_spin_trylock(arch_spinlock_t *lock) +{ + unsigned int tmp; + + asm volatile( + "2: ldaxr %w0, %1\n" + " cbnz %w0, 1f\n" + " stxr %w0, %w2, %1\n" + " cbnz %w0, 2b\n" + "1:\n" + : "=&r" (tmp), "+Q" (lock->lock) + : "r" (1) + : "cc", "memory"); + + return !tmp; +} + +static inline void arch_spin_unlock(arch_spinlock_t *lock) +{ + asm volatile( + " stlr %w1, %0\n" + : "=Q" (lock->lock) : "r" (0) : "memory"); +} + +/* + * Write lock implementation. + * + * Write locks set bit 31. Unlocking, is done by writing 0 since the lock is + * exclusively held. + * + * The memory barriers are implicit with the load-acquire and store-release + * instructions. + */ + +static inline void arch_write_lock(arch_rwlock_t *rw) +{ + unsigned int tmp; + + asm volatile( + " sevl\n" + "1: wfe\n" + "2: ldaxr %w0, %1\n" + " cbnz %w0, 1b\n" + " stxr %w0, %w2, %1\n" + " cbnz %w0, 2b\n" + : "=&r" (tmp), "+Q" (rw->lock) + : "r" (0x80000000) + : "cc", "memory"); +} + +static inline int arch_write_trylock(arch_rwlock_t *rw) +{ + unsigned int tmp; + + asm volatile( + " ldaxr %w0, %1\n" + " cbnz %w0, 1f\n" + " stxr %w0, %w2, %1\n" + "1:\n" + : "=&r" (tmp), "+Q" (rw->lock) + : "r" (0x80000000) + : "cc", "memory"); + + return !tmp; +} + +static inline void arch_write_unlock(arch_rwlock_t *rw) +{ + asm volatile( + " stlr %w1, %0\n" + : "=Q" (rw->lock) : "r" (0) : "memory"); +} + +/* write_can_lock - would write_trylock() succeed? */ +#define arch_write_can_lock(x) ((x)->lock == 0) + +/* + * Read lock implementation. + * + * It exclusively loads the lock value, increments it and stores the new value + * back if positive and the CPU still exclusively owns the location. If the + * value is negative, the lock is already held. + * + * During unlocking there may be multiple active read locks but no write lock. + * + * The memory barriers are implicit with the load-acquire and store-release + * instructions. + */ +static inline void arch_read_lock(arch_rwlock_t *rw) +{ + unsigned int tmp, tmp2; + + asm volatile( + " sevl\n" + "1: wfe\n" + "2: ldaxr %w0, %2\n" + " add %w0, %w0, #1\n" + " tbnz %w0, #31, 1b\n" + " stxr %w1, %w0, %2\n" + " cbnz %w1, 2b\n" + : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) + : + : "cc", "memory"); +} + +static inline void arch_read_unlock(arch_rwlock_t *rw) +{ + unsigned int tmp, tmp2; + + asm volatile( + "1: ldxr %w0, %2\n" + " sub %w0, %w0, #1\n" + " stlxr %w1, %w0, %2\n" + " cbnz %w1, 1b\n" + : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) + : + : "cc", "memory"); +} + +static inline int arch_read_trylock(arch_rwlock_t *rw) +{ + unsigned int tmp, tmp2 = 1; + + asm volatile( + " ldaxr %w0, %2\n" + " add %w0, %w0, #1\n" + " tbnz %w0, #31, 1f\n" + " stxr %w1, %w0, %2\n" + "1:\n" + : "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock) + : + : "cc", "memory"); + + return !tmp2; +} + +/* read_can_lock - would read_trylock() succeed? */ +#define arch_read_can_lock(x) ((x)->lock < 0x80000000) + +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) + +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() + +#endif /* __ASM_SPINLOCK_H */ diff --git a/arch/arm64/include/asm/spinlock_types.h b/arch/arm64/include/asm/spinlock_types.h new file mode 100644 index 000000000..9a494346e --- /dev/null +++ b/arch/arm64/include/asm/spinlock_types.h @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_SPINLOCK_TYPES_H +#define __ASM_SPINLOCK_TYPES_H + +#if !defined(__LINUX_SPINLOCK_TYPES_H) && !defined(__ASM_SPINLOCK_H) +# error "please don't include this file directly" +#endif + +/* We only require natural alignment for exclusive accesses. */ +#define __lock_aligned + +typedef struct { + volatile unsigned int lock; +} arch_spinlock_t; + +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } + +typedef struct { + volatile unsigned int lock; +} arch_rwlock_t; + +#define __ARCH_RW_LOCK_UNLOCKED { 0 } + +#endif diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h new file mode 100644 index 000000000..7318f6d54 --- /dev/null +++ b/arch/arm64/include/asm/stacktrace.h @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_STACKTRACE_H +#define __ASM_STACKTRACE_H + +struct stackframe { + unsigned long fp; + unsigned long sp; + unsigned long pc; +}; + +extern int unwind_frame(struct stackframe *frame); +extern void walk_stackframe(struct stackframe *frame, + int (*fn)(struct stackframe *, void *), void *data); + +#endif /* __ASM_STACKTRACE_H */ diff --git a/arch/arm64/include/asm/stat.h b/arch/arm64/include/asm/stat.h new file mode 100644 index 000000000..15e35598a --- /dev/null +++ b/arch/arm64/include/asm/stat.h @@ -0,0 +1,61 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_STAT_H +#define __ASM_STAT_H + +#include <uapi/asm/stat.h> + +#ifdef CONFIG_COMPAT + +#include <asm/compat.h> + +/* + * struct stat64 is needed for compat tasks only. Its definition is different + * from the generic struct stat64. + */ +struct stat64 { + compat_u64 st_dev; + unsigned char __pad0[4]; + +#define STAT64_HAS_BROKEN_ST_INO 1 + compat_ulong_t __st_ino; + compat_uint_t st_mode; + compat_uint_t st_nlink; + + compat_ulong_t st_uid; + compat_ulong_t st_gid; + + compat_u64 st_rdev; + unsigned char __pad3[4]; + + compat_s64 st_size; + compat_ulong_t st_blksize; + compat_u64 st_blocks; /* Number of 512-byte blocks allocated. */ + + compat_ulong_t st_atime; + compat_ulong_t st_atime_nsec; + + compat_ulong_t st_mtime; + compat_ulong_t st_mtime_nsec; + + compat_ulong_t st_ctime; + compat_ulong_t st_ctime_nsec; + + compat_u64 st_ino; +}; + +#endif +#endif diff --git a/arch/arm64/include/asm/string.h b/arch/arm64/include/asm/string.h new file mode 100644 index 000000000..3ee8b303d --- /dev/null +++ b/arch/arm64/include/asm/string.h @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2013 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_STRING_H +#define __ASM_STRING_H + +#define __HAVE_ARCH_STRRCHR +extern char *strrchr(const char *, int c); + +#define __HAVE_ARCH_STRCHR +extern char *strchr(const char *, int c); + +#define __HAVE_ARCH_MEMCPY +extern void *memcpy(void *, const void *, __kernel_size_t); + +#define __HAVE_ARCH_MEMMOVE +extern void *memmove(void *, const void *, __kernel_size_t); + +#define __HAVE_ARCH_MEMCHR +extern void *memchr(const void *, int, __kernel_size_t); + +#define __HAVE_ARCH_MEMSET +extern void *memset(void *, int, __kernel_size_t); + +#endif diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h new file mode 100644 index 000000000..69de2b2bd --- /dev/null +++ b/arch/arm64/include/asm/suspend.h @@ -0,0 +1,27 @@ +#ifndef __ASM_SUSPEND_H +#define __ASM_SUSPEND_H + +#define NR_CTX_REGS (11+2+2) + +/* + * struct cpu_suspend_ctx must be 16-byte aligned since it is allocated on + * the stack, which must be 16-byte aligned on v8 + */ +struct cpu_suspend_ctx { + /* + * This struct must be kept in sync with + * cpu_do_{suspend/resume} in mm/proc.S + */ + u64 ctx_regs[NR_CTX_REGS]; + u64 sp; +} __aligned(16); + +struct sleep_save_sp { + phys_addr_t *save_ptr_stash; + phys_addr_t save_ptr_stash_phys; +}; + +extern void cpu_resume(void); +extern int cpu_suspend(unsigned long); + +#endif diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h new file mode 100644 index 000000000..709a57446 --- /dev/null +++ b/arch/arm64/include/asm/syscall.h @@ -0,0 +1,122 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_SYSCALL_H +#define __ASM_SYSCALL_H + +#include <uapi/linux/audit.h> +#include <linux/compat.h> +#include <linux/err.h> + +extern const void *sys_call_table[]; + +static inline int syscall_get_nr(struct task_struct *task, + struct pt_regs *regs) +{ + return regs->syscallno; +} + +static inline void syscall_rollback(struct task_struct *task, + struct pt_regs *regs) +{ + regs->regs[0] = regs->orig_x0; +} + + +static inline long syscall_get_error(struct task_struct *task, + struct pt_regs *regs) +{ + unsigned long error = regs->regs[0]; + return IS_ERR_VALUE(error) ? error : 0; +} + +static inline long syscall_get_return_value(struct task_struct *task, + struct pt_regs *regs) +{ + return regs->regs[0]; +} + +static inline void syscall_set_return_value(struct task_struct *task, + struct pt_regs *regs, + int error, long val) +{ + regs->regs[0] = (long) error ? error : val; +} + +#define SYSCALL_MAX_ARGS 6 + +static inline void syscall_get_arguments(struct task_struct *task, + struct pt_regs *regs, + unsigned int i, unsigned int n, + unsigned long *args) +{ + if (n == 0) + return; + + if (i + n > SYSCALL_MAX_ARGS) { + unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i; + unsigned int n_bad = n + i - SYSCALL_MAX_ARGS; + pr_warning("%s called with max args %d, handling only %d\n", + __func__, i + n, SYSCALL_MAX_ARGS); + memset(args_bad, 0, n_bad * sizeof(args[0])); + } + + if (i == 0) { + args[0] = regs->orig_x0; + args++; + i++; + n--; + } + + memcpy(args, ®s->regs[i], n * sizeof(args[0])); +} + +static inline void syscall_set_arguments(struct task_struct *task, + struct pt_regs *regs, + unsigned int i, unsigned int n, + const unsigned long *args) +{ + if (n == 0) + return; + + if (i + n > SYSCALL_MAX_ARGS) { + pr_warning("%s called with max args %d, handling only %d\n", + __func__, i + n, SYSCALL_MAX_ARGS); + n = SYSCALL_MAX_ARGS - i; + } + + if (i == 0) { + regs->orig_x0 = args[0]; + args++; + i++; + n--; + } + + memcpy(®s->regs[i], args, n * sizeof(args[0])); +} + +/* + * We don't care about endianness (__AUDIT_ARCH_LE bit) here because + * AArch64 has the same system calls both on little- and big- endian. + */ +static inline int syscall_get_arch(void) +{ + if (is_compat_task()) + return AUDIT_ARCH_ARM; + + return AUDIT_ARCH_AARCH64; +} + +#endif /* __ASM_SYSCALL_H */ diff --git a/arch/arm64/include/asm/syscalls.h b/arch/arm64/include/asm/syscalls.h new file mode 100644 index 000000000..48fe7c600 --- /dev/null +++ b/arch/arm64/include/asm/syscalls.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_SYSCALLS_H +#define __ASM_SYSCALLS_H + +#include <linux/linkage.h> +#include <linux/compiler.h> +#include <linux/signal.h> + +/* + * System call wrappers implemented in kernel/entry.S. + */ +asmlinkage long sys_rt_sigreturn_wrapper(void); + +#include <asm-generic/syscalls.h> + +#endif /* __ASM_SYSCALLS_H */ diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h new file mode 100644 index 000000000..b3b903a66 --- /dev/null +++ b/arch/arm64/include/asm/system_misc.h @@ -0,0 +1,60 @@ +/* + * Based on arch/arm/include/asm/system_misc.h + * + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_SYSTEM_MISC_H +#define __ASM_SYSTEM_MISC_H + +#ifndef __ASSEMBLY__ + +#include <linux/compiler.h> +#include <linux/linkage.h> +#include <linux/irqflags.h> + +struct pt_regs; + +void die(const char *msg, struct pt_regs *regs, int err); + +struct siginfo; +void arm64_notify_die(const char *str, struct pt_regs *regs, + struct siginfo *info, int err); + +void hook_debug_fault_code(int nr, int (*fn)(unsigned long, unsigned int, + struct pt_regs *), + int sig, int code, const char *name); +#ifdef CONFIG_MEDIATEK_SOLUTION +void hook_fault_code(int nr, + int (*fn)(unsigned long, unsigned int, struct pt_regs *), + int sig, int code, const char *name); +#endif + +struct mm_struct; +extern void show_pte(struct mm_struct *mm, unsigned long addr); +extern void __show_regs(struct pt_regs *); + +void soft_restart(unsigned long); +extern void (*arm_pm_restart)(char str, const char *cmd); +extern void (*arm_pm_idle)(void); + +#define UDBG_UNDEFINED (1 << 0) +#define UDBG_SYSCALL (1 << 1) +#define UDBG_BADABORT (1 << 2) +#define UDBG_SEGV (1 << 3) +#define UDBG_BUS (1 << 4) + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_SYSTEM_MISC_H */ diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h new file mode 100644 index 000000000..5d23739f7 --- /dev/null +++ b/arch/arm64/include/asm/thread_info.h @@ -0,0 +1,149 @@ +/* + * Based on arch/arm/include/asm/thread_info.h + * + * Copyright (C) 2002 Russell King. + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_THREAD_INFO_H +#define __ASM_THREAD_INFO_H + +#ifdef __KERNEL__ + +#include <linux/compiler.h> + +#ifndef CONFIG_ARM64_64K_PAGES +#define THREAD_SIZE_ORDER 2 +#endif + +#define THREAD_SIZE 16384 +#define THREAD_START_SP (THREAD_SIZE - 16) + +#ifndef __ASSEMBLY__ + +struct task_struct; +struct exec_domain; + +#include <asm/types.h> + +typedef unsigned long mm_segment_t; + +/* + * low level task data that entry.S needs immediate access to. + * __switch_to() assumes cpu_context follows immediately after cpu_domain. + */ +struct thread_info { + unsigned long flags; /* low level flags */ + mm_segment_t addr_limit; /* address limit */ + struct task_struct *task; /* main task structure */ + struct exec_domain *exec_domain; /* execution domain */ + struct restart_block restart_block; + int preempt_count; /* 0 => preemptable, <0 => bug */ + int cpu; /* cpu */ + void *regs_on_excp; + int cpu_excp; +}; + +#define INIT_THREAD_INFO(tsk) \ +{ \ + .task = &tsk, \ + .exec_domain = &default_exec_domain, \ + .flags = 0, \ + .preempt_count = INIT_PREEMPT_COUNT, \ + .addr_limit = KERNEL_DS, \ + .restart_block = { \ + .fn = do_no_restart_syscall, \ + }, \ + .cpu_excp = 0, \ +} + +#define init_thread_info (init_thread_union.thread_info) +#define init_stack (init_thread_union.stack) + +/* + * how to get the thread information struct from C + */ +static inline struct thread_info *current_thread_info(void) __attribute_const__; + +static inline struct thread_info *current_thread_info(void) +{ + register unsigned long sp asm ("sp"); + return (struct thread_info *)(sp & ~(THREAD_SIZE - 1)); +} + +#define thread_saved_pc(tsk) \ + ((unsigned long)(tsk->thread.cpu_context.pc)) +#define thread_saved_sp(tsk) \ + ((unsigned long)(tsk->thread.cpu_context.sp)) +#define thread_saved_fp(tsk) \ + ((unsigned long)(tsk->thread.cpu_context.fp)) + +#endif + +/* + * We use bit 30 of the preempt_count to indicate that kernel + * preemption is occurring. See <asm/hardirq.h>. + */ +#define PREEMPT_ACTIVE 0x40000000 + +/* + * thread information flags: + * TIF_SYSCALL_TRACE - syscall trace active + * TIF_SYSCALL_TRACEPOINT - syscall tracepoint for ftrace + * TIF_SYSCALL_AUDIT - syscall auditing + * TIF_SECOMP - syscall secure computing + * TIF_SIGPENDING - signal pending + * TIF_NEED_RESCHED - rescheduling necessary + * TIF_NOTIFY_RESUME - callback before returning to user + * TIF_USEDFPU - FPU was used by this task this quantum (SMP) + * TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED + */ +#define TIF_SIGPENDING 0 +#define TIF_NEED_RESCHED 1 +#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ +#define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */ +#define TIF_SYSCALL_TRACE 8 +#define TIF_SYSCALL_AUDIT 9 +#define TIF_SYSCALL_TRACEPOINT 10 +#define TIF_SECCOMP 11 +#define TIF_POLLING_NRFLAG 16 +#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ +#define TIF_FREEZE 19 +#define TIF_RESTORE_SIGMASK 20 +#define TIF_SINGLESTEP 21 +#define TIF_32BIT 22 /* 32bit process */ +#define TIF_SWITCH_MM 23 /* deferred switch_mm */ +#if defined(CONFIG_MT_RT_SCHED) +#define TIF_NEED_RELEASED 31 +#endif + + +#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) +#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) +#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) +#define _TIF_FOREIGN_FPSTATE (1 << TIF_FOREIGN_FPSTATE) +#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) +#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) +#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) +#define _TIF_SECCOMP (1 << TIF_SECCOMP) +#define _TIF_32BIT (1 << TIF_32BIT) + +#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ + _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE) + +#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ + _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP) + +#endif /* __KERNEL__ */ +#endif /* __ASM_THREAD_INFO_H */ diff --git a/arch/arm64/include/asm/timex.h b/arch/arm64/include/asm/timex.h new file mode 100644 index 000000000..b24a31a7e --- /dev/null +++ b/arch/arm64/include/asm/timex.h @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_TIMEX_H +#define __ASM_TIMEX_H + +/* + * Use the current timer as a cycle counter since this is what we use for + * the delay loop. + */ +#define get_cycles() ({ cycles_t c; read_current_timer(&c); c; }) + +#include <asm-generic/timex.h> + +#define ARCH_HAS_READ_CURRENT_TIMER + +#endif diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h new file mode 100644 index 000000000..cd78b5c3c --- /dev/null +++ b/arch/arm64/include/asm/tlb.h @@ -0,0 +1,193 @@ +/* + * Based on arch/arm/include/asm/tlb.h + * + * Copyright (C) 2002 Russell King + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_TLB_H +#define __ASM_TLB_H + +#include <linux/pagemap.h> +#include <linux/swap.h> + +#include <asm/pgalloc.h> +#include <asm/tlbflush.h> + +#define MMU_GATHER_BUNDLE 8 + +/* + * TLB handling. This allows us to remove pages from the page + * tables, and efficiently handle the TLB issues. + */ +struct mmu_gather { + struct mm_struct *mm; + unsigned int fullmm; + struct vm_area_struct *vma; + unsigned long start, end; + unsigned long range_start; + unsigned long range_end; + unsigned int nr; + unsigned int max; + struct page **pages; + struct page *local[MMU_GATHER_BUNDLE]; +}; + +/* + * This is unnecessarily complex. There's three ways the TLB shootdown + * code is used: + * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region(). + * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called. + * tlb->vma will be non-NULL. + * 2. Unmapping all vmas. See exit_mmap(). + * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called. + * tlb->vma will be non-NULL. Additionally, page tables will be freed. + * 3. Unmapping argument pages. See shift_arg_pages(). + * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called. + * tlb->vma will be NULL. + */ +static inline void tlb_flush(struct mmu_gather *tlb) +{ + if (tlb->fullmm || !tlb->vma) + flush_tlb_mm(tlb->mm); + else if (tlb->range_end > 0) { + flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end); + tlb->range_start = TASK_SIZE; + tlb->range_end = 0; + } +} + +static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr) +{ + if (!tlb->fullmm) { + if (addr < tlb->range_start) + tlb->range_start = addr; + if (addr + PAGE_SIZE > tlb->range_end) + tlb->range_end = addr + PAGE_SIZE; + } +} + +static inline void __tlb_alloc_page(struct mmu_gather *tlb) +{ + unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); + + if (addr) { + tlb->pages = (void *)addr; + tlb->max = PAGE_SIZE / sizeof(struct page *); + } +} + +static inline void tlb_flush_mmu(struct mmu_gather *tlb) +{ + tlb_flush(tlb); + free_pages_and_swap_cache(tlb->pages, tlb->nr); + tlb->nr = 0; + if (tlb->pages == tlb->local) + __tlb_alloc_page(tlb); +} + +static inline void +tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) +{ + tlb->mm = mm; + tlb->fullmm = !(start | (end+1)); + tlb->start = start; + tlb->end = end; + tlb->vma = NULL; + tlb->max = ARRAY_SIZE(tlb->local); + tlb->pages = tlb->local; + tlb->nr = 0; + __tlb_alloc_page(tlb); +} + +static inline void +tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) +{ + tlb_flush_mmu(tlb); + + /* keep the page table cache within bounds */ + check_pgt_cache(); + + if (tlb->pages != tlb->local) + free_pages((unsigned long)tlb->pages, 0); +} + +/* + * Memorize the range for the TLB flush. + */ +static inline void +tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr) +{ + tlb_add_flush(tlb, addr); +} + +/* + * In the case of tlb vma handling, we can optimise these away in the + * case where we're doing a full MM flush. When we're doing a munmap, + * the vmas are adjusted to only cover the region to be torn down. + */ +static inline void +tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) +{ + if (!tlb->fullmm) { + tlb->vma = vma; + tlb->range_start = ULONG_MAX; + tlb->range_end = 0; + } +} + +static inline void +tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) +{ + if (!tlb->fullmm) + tlb_flush(tlb); +} + +static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) +{ + tlb->pages[tlb->nr++] = page; + VM_BUG_ON(tlb->nr > tlb->max); + return tlb->max - tlb->nr; +} + +static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) +{ + if (!__tlb_remove_page(tlb, page)) + tlb_flush_mmu(tlb); +} + +static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, + unsigned long addr) +{ + pgtable_page_dtor(pte); + tlb_add_flush(tlb, addr); + tlb_remove_page(tlb, pte); +} + +#ifndef CONFIG_ARM64_64K_PAGES +static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, + unsigned long addr) +{ + tlb_add_flush(tlb, addr); + tlb_remove_page(tlb, virt_to_page(pmdp)); +} +#endif + +#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr) +#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr) +#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp) + +#define tlb_migrate_finish(mm) do { } while (0) + +#endif diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h new file mode 100644 index 000000000..122d6320f --- /dev/null +++ b/arch/arm64/include/asm/tlbflush.h @@ -0,0 +1,122 @@ +/* + * Based on arch/arm/include/asm/tlbflush.h + * + * Copyright (C) 1999-2003 Russell King + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_TLBFLUSH_H +#define __ASM_TLBFLUSH_H + +#ifndef __ASSEMBLY__ + +#include <linux/sched.h> +#include <asm/cputype.h> + +extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *); +extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long); + +extern struct cpu_tlb_fns cpu_tlb; + +/* + * TLB Management + * ============== + * + * The arch/arm64/mm/tlb.S files implement these methods. + * + * The TLB specific code is expected to perform whatever tests it needs + * to determine if it should invalidate the TLB for each call. Start + * addresses are inclusive and end addresses are exclusive; it is safe to + * round these addresses down. + * + * flush_tlb_all() + * + * Invalidate the entire TLB. + * + * flush_tlb_mm(mm) + * + * Invalidate all TLB entries in a particular address space. + * - mm - mm_struct describing address space + * + * flush_tlb_range(mm,start,end) + * + * Invalidate a range of TLB entries in the specified address + * space. + * - mm - mm_struct describing address space + * - start - start address (may not be aligned) + * - end - end address (exclusive, may not be aligned) + * + * flush_tlb_page(vaddr,vma) + * + * Invalidate the specified page in the specified address range. + * - vaddr - virtual address (may not be aligned) + * - vma - vma_struct describing address range + * + * flush_kern_tlb_page(kaddr) + * + * Invalidate the TLB entry for the specified page. The address + * will be in the kernels virtual memory space. Current uses + * only require the D-TLB to be invalidated. + * - kaddr - Kernel virtual memory address + */ +static inline void flush_tlb_all(void) +{ + dsb(); + asm("tlbi vmalle1is"); + dsb(); + isb(); +} + +static inline void flush_tlb_mm(struct mm_struct *mm) +{ + unsigned long asid = (unsigned long)ASID(mm) << 48; + + dsb(); + asm("tlbi aside1is, %0" : : "r" (asid)); + dsb(); +} + +static inline void flush_tlb_page(struct vm_area_struct *vma, + unsigned long uaddr) +{ + unsigned long addr = uaddr >> 12 | + ((unsigned long)ASID(vma->vm_mm) << 48); + + dsb(); + asm("tlbi vae1is, %0" : : "r" (addr)); + dsb(); +} + +/* + * Convert calls to our calling convention. + */ +#define flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma) +#define flush_tlb_kernel_range(s,e) __cpu_flush_kern_tlb_range(s,e) + +/* + * On AArch64, the cache coherency is handled via the set_pte_at() function. + */ +static inline void update_mmu_cache(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ + /* + * set_pte() does not have a DSB, so make sure that the page table + * write is visible. + */ + dsb(); +} + +#endif + +#endif diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h new file mode 100644 index 000000000..4ca109701 --- /dev/null +++ b/arch/arm64/include/asm/topology.h @@ -0,0 +1,127 @@ +#ifndef _ASM_ARM_TOPOLOGY_H +#define _ASM_ARM_TOPOLOGY_H + +#ifdef CONFIG_ARM_CPU_TOPOLOGY + +#include <linux/cpumask.h> + +struct cputopo_arm { + int thread_id; + int core_id; + int socket_id; + cpumask_t thread_sibling; + cpumask_t core_sibling; +}; + +extern struct cputopo_arm cpu_topology[NR_CPUS]; + +#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id) +#define topology_core_id(cpu) (cpu_topology[cpu].core_id) +#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling) +#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) +#ifdef CONFIG_ARCH_SCALE_INVARIANT_CPU_CAPACITY +#define CPUPOWER_FREQSCALE_SHIFT 10 +#define CPUPOWER_FREQSCALE_DEFAULT (1L << CPUPOWER_FREQSCALE_SHIFT) +extern unsigned long arch_get_max_cpu_capacity(int); +extern unsigned long arch_get_cpu_capacity(int); +extern int arch_get_invariant_power_enabled(void); +extern int arch_get_cpu_throttling(int); + +#define topology_max_cpu_capacity(cpu) (arch_get_max_cpu_capacity(cpu)) +#define topology_cpu_capacity(cpu) (arch_get_cpu_capacity(cpu)) +#define topology_cpu_throttling(cpu) (arch_get_cpu_capacity(cpu)) +#define topology_cpu_inv_power_en(void) (arch_get_invariant_power_enabled()) +#endif /* CONFIG_ARCH_SCALE_INVARIANT_CPU_CAPACITY */ + +#define mc_capable() (cpu_topology[0].socket_id != -1) +#define smt_capable() (cpu_topology[0].thread_id != -1) + +void init_cpu_topology(void); +void store_cpu_topology(unsigned int cpuid); +const struct cpumask *cpu_coregroup_mask(int cpu); +int cluster_to_logical_mask(unsigned int socket_id, cpumask_t *cluster_mask); + +#ifdef CONFIG_DISABLE_CPU_SCHED_DOMAIN_BALANCE + +#if defined (CONFIG_HMP_PACK_SMALL_TASK) && !defined(CONFIG_MTK_SCHED_CMP) +/* Common values for CPUs */ +#ifndef SD_CPU_INIT +#define SD_CPU_INIT (struct sched_domain) { \ + .min_interval = 1, \ + .max_interval = 4, \ + .busy_factor = 64, \ + .imbalance_pct = 125, \ + .cache_nice_tries = 1, \ + .busy_idx = 2, \ + .idle_idx = 1, \ + .newidle_idx = 0, \ + .wake_idx = 0, \ + .forkexec_idx = 0, \ + \ + .flags = 0*SD_LOAD_BALANCE \ + | 1*SD_BALANCE_NEWIDLE \ + | 1*SD_BALANCE_EXEC \ + | 1*SD_BALANCE_FORK \ + | 0*SD_BALANCE_WAKE \ + | 1*SD_WAKE_AFFINE \ + | 0*SD_SHARE_CPUPOWER \ + | 0*SD_SHARE_PKG_RESOURCES \ + | arch_sd_share_power_line() \ + | 0*SD_SERIALIZE \ + , \ + .last_balance = jiffies, \ + .balance_interval = 1, \ +} +#endif +#endif /* CONFIG_HMP_PACK_SMALL_TASK */ + +#endif /* CONFIG_DISABLE_CPU_SCHED_DOMAIN_BALANCE */ + +/* CPU cluster functions */ +extern void arch_build_cpu_topology_domain(void); +extern int arch_cpu_is_big(unsigned int cpu); +extern int arch_cpu_is_little(unsigned int cpu); +extern int arch_is_multi_cluster(void); +extern int arch_is_big_little(void); +extern int arch_get_nr_clusters(void); +extern int arch_get_cluster_id(unsigned int cpu); +extern void arch_get_cluster_cpus(struct cpumask *cpus, int cluster_id); +extern void arch_get_big_little_cpus(struct cpumask *big, struct cpumask *little); + +#else /* !CONFIG_ARM_CPU_TOPOLOGY */ + +static inline void init_cpu_topology(void) { } +static inline void store_cpu_topology(unsigned int cpuid) { } +static inline int cluster_to_logical_mask(unsigned int socket_id, + cpumask_t *cluster_mask) { return -EINVAL; } + +static inline void arch_build_cpu_topology_domain(void) {} +static inline int arch_cpu_is_big(unsigned int cpu) { return 0; } +static inline int arch_cpu_is_little(unsigned int cpu) { return 1; } +static inline int arch_is_multi_cluster(void) { return 0; } +static inline int arch_is_big_little(void) { return 0; } +static inline int arch_get_nr_clusters(void) { return 1; } +static inline int arch_get_cluster_id(unsigned int cpu) { return 0; } +static inline void arch_get_cluster_cpus(struct cpumask *cpus, int cluster_id) +{ + cpumask_clear(cpus); + if (0 == cluster_id) { + unsigned int cpu; + for_each_possible_cpu(cpu) + cpumask_set_cpu(cpu, cpus); + } +} +static inline void arch_get_big_little_cpus(struct cpumask *big,struct cpumask *little) +{ + unsigned int cpu; + cpumask_clear(big); + cpumask_clear(little); + for_each_possible_cpu(cpu) + cpumask_set_cpu(cpu, little); +} + +#endif /* CONFIG_ARM_CPU_TOPOLOGY */ + +#include <asm-generic/topology.h> + +#endif /* _ASM_ARM_TOPOLOGY_H */ diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h new file mode 100644 index 000000000..5921cf1c7 --- /dev/null +++ b/arch/arm64/include/asm/traps.h @@ -0,0 +1,49 @@ +/* + * Based on arch/arm/include/asm/traps.h + * + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_TRAP_H +#define __ASM_TRAP_H + +#include <linux/list.h> + +struct undef_hook { + struct list_head node; + u32 instr_mask; + u32 instr_val; + u32 pstate_mask; + u32 pstate_val; + int (*fn)(struct pt_regs *regs, unsigned int instr); +}; + +void register_undef_hook(struct undef_hook *hook); + +#ifdef CONFIG_MEDIATEK_SOLUTION +struct pt_regs; + +int register_async_abort_handler(void (*fn)(struct pt_regs *regs, void *), void *priv); +#endif + +static inline int in_exception_text(unsigned long ptr) +{ + extern char __exception_text_start[]; + extern char __exception_text_end[]; + + return ptr >= (unsigned long)&__exception_text_start && + ptr < (unsigned long)&__exception_text_end; +} + +#endif diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h new file mode 100644 index 000000000..7ecc2b238 --- /dev/null +++ b/arch/arm64/include/asm/uaccess.h @@ -0,0 +1,299 @@ +/* + * Based on arch/arm/include/asm/uaccess.h + * + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_UACCESS_H +#define __ASM_UACCESS_H + +/* + * User space memory access functions + */ +#include <linux/string.h> +#include <linux/thread_info.h> + +#include <asm/ptrace.h> +#include <asm/errno.h> +#include <asm/memory.h> +#include <asm/compiler.h> + +#define VERIFY_READ 0 +#define VERIFY_WRITE 1 + +/* + * The exception table consists of pairs of addresses: the first is the + * address of an instruction that is allowed to fault, and the second is + * the address at which the program should continue. No registers are + * modified, so it is entirely up to the continuation code to figure out + * what to do. + * + * All the routines below use bits of fixup code that are out of line + * with the main instruction path. This means when everything is well, + * we don't even have to jump over them. Further, they do not intrude + * on our cache or tlb entries. + */ + +struct exception_table_entry +{ + unsigned long insn, fixup; +}; + +extern int fixup_exception(struct pt_regs *regs); + +#define KERNEL_DS (-1UL) +#define get_ds() (KERNEL_DS) + +#define USER_DS TASK_SIZE_64 +#define get_fs() (current_thread_info()->addr_limit) + +static inline void set_fs(mm_segment_t fs) +{ + current_thread_info()->addr_limit = fs; +} + +#define segment_eq(a,b) ((a) == (b)) + +/* + * Return 1 if addr < current->addr_limit, 0 otherwise. + */ +#define __addr_ok(addr) \ +({ \ + unsigned long flag; \ + asm("cmp %1, %0; cset %0, lo" \ + : "=&r" (flag) \ + : "r" (addr), "0" (current_thread_info()->addr_limit) \ + : "cc"); \ + flag; \ +}) + +/* + * Test whether a block of memory is a valid user space address. + * Returns 1 if the range is valid, 0 otherwise. + * + * This is equivalent to the following test: + * (u65)addr + (u65)size < (u65)current->addr_limit + * + * This needs 65-bit arithmetic. + */ +#define __range_ok(addr, size) \ +({ \ + unsigned long flag, roksum; \ + __chk_user_ptr(addr); \ + asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, cc" \ + : "=&r" (flag), "=&r" (roksum) \ + : "1" (addr), "Ir" (size), \ + "r" (current_thread_info()->addr_limit) \ + : "cc"); \ + flag; \ +}) + +#define access_ok(type, addr, size) __range_ok(addr, size) + +/* + * The "__xxx" versions of the user access functions do not verify the address + * space - it must have been done previously with a separate "access_ok()" + * call. + * + * The "__xxx_error" versions set the third argument to -EFAULT if an error + * occurs, and leave it unchanged on success. + */ +#define __get_user_asm(instr, reg, x, addr, err) \ + asm volatile( \ + "1: " instr " " reg "1, [%2]\n" \ + "2:\n" \ + " .section .fixup, \"ax\"\n" \ + " .align 2\n" \ + "3: mov %w0, %3\n" \ + " mov %1, #0\n" \ + " b 2b\n" \ + " .previous\n" \ + " .section __ex_table,\"a\"\n" \ + " .align 3\n" \ + " .quad 1b, 3b\n" \ + " .previous" \ + : "+r" (err), "=&r" (x) \ + : "r" (addr), "i" (-EFAULT)) + +#define __get_user_err(x, ptr, err) \ +do { \ + unsigned long __gu_val; \ + __chk_user_ptr(ptr); \ + switch (sizeof(*(ptr))) { \ + case 1: \ + __get_user_asm("ldrb", "%w", __gu_val, (ptr), (err)); \ + break; \ + case 2: \ + __get_user_asm("ldrh", "%w", __gu_val, (ptr), (err)); \ + break; \ + case 4: \ + __get_user_asm("ldr", "%w", __gu_val, (ptr), (err)); \ + break; \ + case 8: \ + __get_user_asm("ldr", "%", __gu_val, (ptr), (err)); \ + break; \ + default: \ + BUILD_BUG(); \ + } \ + (x) = (__typeof__(*(ptr)))__gu_val; \ +} while (0) + +#define __get_user(x, ptr) \ +({ \ + int __gu_err = 0; \ + __get_user_err((x), (ptr), __gu_err); \ + __gu_err; \ +}) + +#define __get_user_error(x, ptr, err) \ +({ \ + __get_user_err((x), (ptr), (err)); \ + (void)0; \ +}) + +#define __get_user_unaligned __get_user + +#define get_user(x, ptr) \ +({ \ + __typeof__(*(ptr)) __user *__p = (ptr); \ + might_fault(); \ + access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \ + __get_user((x), __p) : \ + ((x) = 0, -EFAULT); \ +}) + +#define __put_user_asm(instr, reg, x, addr, err) \ + asm volatile( \ + "1: " instr " " reg "1, [%2]\n" \ + "2:\n" \ + " .section .fixup,\"ax\"\n" \ + " .align 2\n" \ + "3: mov %w0, %3\n" \ + " b 2b\n" \ + " .previous\n" \ + " .section __ex_table,\"a\"\n" \ + " .align 3\n" \ + " .quad 1b, 3b\n" \ + " .previous" \ + : "+r" (err) \ + : "r" (x), "r" (addr), "i" (-EFAULT)) + +#define __put_user_err(x, ptr, err) \ +do { \ + __typeof__(*(ptr)) __pu_val = (x); \ + __chk_user_ptr(ptr); \ + switch (sizeof(*(ptr))) { \ + case 1: \ + __put_user_asm("strb", "%w", __pu_val, (ptr), (err)); \ + break; \ + case 2: \ + __put_user_asm("strh", "%w", __pu_val, (ptr), (err)); \ + break; \ + case 4: \ + __put_user_asm("str", "%w", __pu_val, (ptr), (err)); \ + break; \ + case 8: \ + __put_user_asm("str", "%", __pu_val, (ptr), (err)); \ + break; \ + default: \ + BUILD_BUG(); \ + } \ +} while (0) + +#define __put_user(x, ptr) \ +({ \ + int __pu_err = 0; \ + __put_user_err((x), (ptr), __pu_err); \ + __pu_err; \ +}) + +#define __put_user_error(x, ptr, err) \ +({ \ + __put_user_err((x), (ptr), (err)); \ + (void)0; \ +}) + +#define __put_user_unaligned __put_user + +#define put_user(x, ptr) \ +({ \ + __typeof__(*(ptr)) __user *__p = (ptr); \ + might_fault(); \ + access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \ + __put_user((x), __p) : \ + -EFAULT; \ +}) + +extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n); +extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n); +extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n); +extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); + +extern unsigned long __must_check __strncpy_from_user(char *to, const char __user *from, unsigned long count); +extern unsigned long __must_check __strnlen_user(const char __user *s, long n); + +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) +{ + if (access_ok(VERIFY_READ, from, n)) + n = __copy_from_user(to, from, n); + else /* security hole - plug it */ + memset(to, 0, n); + return n; +} + +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) +{ + if (access_ok(VERIFY_WRITE, to, n)) + n = __copy_to_user(to, from, n); + return n; +} + +static inline unsigned long __must_check copy_in_user(void __user *to, const void __user *from, unsigned long n) +{ + if (access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n)) + n = __copy_in_user(to, from, n); + return n; +} + +#define __copy_to_user_inatomic __copy_to_user +#define __copy_from_user_inatomic __copy_from_user + +static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) +{ + if (access_ok(VERIFY_WRITE, to, n)) + n = __clear_user(to, n); + return n; +} + +static inline long __must_check strncpy_from_user(char *dst, const char __user *src, long count) +{ + long res = -EFAULT; + if (access_ok(VERIFY_READ, src, 1)) + res = __strncpy_from_user(dst, src, count); + return res; +} + +#define strlen_user(s) strnlen_user(s, ~0UL >> 1) + +static inline long __must_check strnlen_user(const char __user *s, long n) +{ + unsigned long res = 0; + + if (__addr_ok(s)) + res = __strnlen_user(s, n); + + return res; +} + +#endif /* __ASM_UACCESS_H */ diff --git a/arch/arm64/include/asm/ucontext.h b/arch/arm64/include/asm/ucontext.h new file mode 100644 index 000000000..42e04c877 --- /dev/null +++ b/arch/arm64/include/asm/ucontext.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_UCONTEXT_H +#define __ASM_UCONTEXT_H + +struct ucontext { + unsigned long uc_flags; + struct ucontext *uc_link; + stack_t uc_stack; + sigset_t uc_sigmask; + /* glibc uses a 1024-bit sigset_t */ + __u8 __unused[1024 / 8 - sizeof(sigset_t)]; + /* last for future expansion */ + struct sigcontext uc_mcontext; +}; + +#endif /* __ASM_UCONTEXT_H */ diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h new file mode 100644 index 000000000..f67d0ec20 --- /dev/null +++ b/arch/arm64/include/asm/unistd.h @@ -0,0 +1,52 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifdef CONFIG_COMPAT +#define __ARCH_WANT_COMPAT_STAT64 +#define __ARCH_WANT_SYS_GETHOSTNAME +#define __ARCH_WANT_SYS_PAUSE +#define __ARCH_WANT_SYS_GETPGRP +#define __ARCH_WANT_SYS_LLSEEK +#define __ARCH_WANT_SYS_NICE +#define __ARCH_WANT_SYS_SIGPENDING +#define __ARCH_WANT_SYS_SIGPROCMASK +#define __ARCH_WANT_COMPAT_SYS_SENDFILE +#define __ARCH_WANT_SYS_FORK +#define __ARCH_WANT_SYS_VFORK + +/* + * Compat syscall numbers used by the AArch64 kernel. + */ +#define __NR_compat_restart_syscall 0 +#define __NR_compat_exit 1 +#define __NR_compat_read 3 +#define __NR_compat_write 4 +#define __NR_compat_sigreturn 119 +#define __NR_compat_rt_sigreturn 173 + +/* + * The following SVCs are ARM private. + */ +#define __ARM_NR_COMPAT_BASE 0x0f0000 +#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) +#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) + +#define __NR_compat_syscalls 384 +#endif + +#define __ARCH_WANT_SYS_CLONE +#include <uapi/asm/unistd.h> + +#define NR_syscalls (__NR_syscalls) diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h new file mode 100644 index 000000000..76d094565 --- /dev/null +++ b/arch/arm64/include/asm/unistd32.h @@ -0,0 +1,791 @@ +/* + * AArch32 (compat) system call definitions. + * + * Copyright (C) 2001-2005 Russell King + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __SYSCALL +#define __SYSCALL(x, y) +#endif + +#define __NR_restart_syscall 0 +__SYSCALL(__NR_restart_syscall, sys_restart_syscall) +#define __NR_exit 1 +__SYSCALL(__NR_exit, sys_exit) +#define __NR_fork 2 +__SYSCALL(__NR_fork, sys_fork) +#define __NR_read 3 +__SYSCALL(__NR_read, sys_read) +#define __NR_write 4 +__SYSCALL(__NR_write, sys_write) +#define __NR_open 5 +__SYSCALL(__NR_open, compat_sys_open) +#define __NR_close 6 +__SYSCALL(__NR_close, sys_close) + /* 7 was sys_waitpid */ +__SYSCALL(7, sys_ni_syscall) +#define __NR_creat 8 +__SYSCALL(__NR_creat, sys_creat) +#define __NR_link 9 +__SYSCALL(__NR_link, sys_link) +#define __NR_unlink 10 +__SYSCALL(__NR_unlink, sys_unlink) +#define __NR_execve 11 +__SYSCALL(__NR_execve, compat_sys_execve) +#define __NR_chdir 12 +__SYSCALL(__NR_chdir, sys_chdir) + /* 13 was sys_time */ +__SYSCALL(13, sys_ni_syscall) +#define __NR_mknod 14 +__SYSCALL(__NR_mknod, sys_mknod) +#define __NR_chmod 15 +__SYSCALL(__NR_chmod, sys_chmod) +#define __NR_lchown 16 +__SYSCALL(__NR_lchown, sys_lchown16) + /* 17 was sys_break */ +__SYSCALL(17, sys_ni_syscall) + /* 18 was sys_stat */ +__SYSCALL(18, sys_ni_syscall) +#define __NR_lseek 19 +__SYSCALL(__NR_lseek, compat_sys_lseek) +#define __NR_getpid 20 +__SYSCALL(__NR_getpid, sys_getpid) +#define __NR_mount 21 +__SYSCALL(__NR_mount, compat_sys_mount) + /* 22 was sys_umount */ +__SYSCALL(22, sys_ni_syscall) +#define __NR_setuid 23 +__SYSCALL(__NR_setuid, sys_setuid16) +#define __NR_getuid 24 +__SYSCALL(__NR_getuid, sys_getuid16) + /* 25 was sys_stime */ +__SYSCALL(25, sys_ni_syscall) +#define __NR_ptrace 26 +__SYSCALL(__NR_ptrace, compat_sys_ptrace) + /* 27 was sys_alarm */ +__SYSCALL(27, sys_ni_syscall) + /* 28 was sys_fstat */ +__SYSCALL(28, sys_ni_syscall) +#define __NR_pause 29 +__SYSCALL(__NR_pause, sys_pause) + /* 30 was sys_utime */ +__SYSCALL(30, sys_ni_syscall) + /* 31 was sys_stty */ +__SYSCALL(31, sys_ni_syscall) + /* 32 was sys_gtty */ +__SYSCALL(32, sys_ni_syscall) +#define __NR_access 33 +__SYSCALL(__NR_access, sys_access) +#define __NR_nice 34 +__SYSCALL(__NR_nice, sys_nice) + /* 35 was sys_ftime */ +__SYSCALL(35, sys_ni_syscall) +#define __NR_sync 36 +__SYSCALL(__NR_sync, sys_sync) +#define __NR_kill 37 +__SYSCALL(__NR_kill, sys_kill) +#define __NR_rename 38 +__SYSCALL(__NR_rename, sys_rename) +#define __NR_mkdir 39 +__SYSCALL(__NR_mkdir, sys_mkdir) +#define __NR_rmdir 40 +__SYSCALL(__NR_rmdir, sys_rmdir) +#define __NR_dup 41 +__SYSCALL(__NR_dup, sys_dup) +#define __NR_pipe 42 +__SYSCALL(__NR_pipe, sys_pipe) +#define __NR_times 43 +__SYSCALL(__NR_times, compat_sys_times) + /* 44 was sys_prof */ +__SYSCALL(44, sys_ni_syscall) +#define __NR_brk 45 +__SYSCALL(__NR_brk, sys_brk) +#define __NR_setgid 46 +__SYSCALL(__NR_setgid, sys_setgid16) +#define __NR_getgid 47 +__SYSCALL(__NR_getgid, sys_getgid16) + /* 48 was sys_signal */ +__SYSCALL(48, sys_ni_syscall) +#define __NR_geteuid 49 +__SYSCALL(__NR_geteuid, sys_geteuid16) +#define __NR_getegid 50 +__SYSCALL(__NR_getegid, sys_getegid16) +#define __NR_acct 51 +__SYSCALL(__NR_acct, sys_acct) +#define __NR_umount2 52 +__SYSCALL(__NR_umount2, sys_umount) + /* 53 was sys_lock */ +__SYSCALL(53, sys_ni_syscall) +#define __NR_ioctl 54 +__SYSCALL(__NR_ioctl, compat_sys_ioctl) +#define __NR_fcntl 55 +__SYSCALL(__NR_fcntl, compat_sys_fcntl) + /* 56 was sys_mpx */ +__SYSCALL(56, sys_ni_syscall) +#define __NR_setpgid 57 +__SYSCALL(__NR_setpgid, sys_setpgid) + /* 58 was sys_ulimit */ +__SYSCALL(58, sys_ni_syscall) + /* 59 was sys_olduname */ +__SYSCALL(59, sys_ni_syscall) +#define __NR_umask 60 +__SYSCALL(__NR_umask, sys_umask) +#define __NR_chroot 61 +__SYSCALL(__NR_chroot, sys_chroot) +#define __NR_ustat 62 +__SYSCALL(__NR_ustat, compat_sys_ustat) +#define __NR_dup2 63 +__SYSCALL(__NR_dup2, sys_dup2) +#define __NR_getppid 64 +__SYSCALL(__NR_getppid, sys_getppid) +#define __NR_getpgrp 65 +__SYSCALL(__NR_getpgrp, sys_getpgrp) +#define __NR_setsid 66 +__SYSCALL(__NR_setsid, sys_setsid) +#define __NR_sigaction 67 +__SYSCALL(__NR_sigaction, compat_sys_sigaction) + /* 68 was sys_sgetmask */ +__SYSCALL(68, sys_ni_syscall) + /* 69 was sys_ssetmask */ +__SYSCALL(69, sys_ni_syscall) +#define __NR_setreuid 70 +__SYSCALL(__NR_setreuid, sys_setreuid16) +#define __NR_setregid 71 +__SYSCALL(__NR_setregid, sys_setregid16) +#define __NR_sigsuspend 72 +__SYSCALL(__NR_sigsuspend, sys_sigsuspend) +#define __NR_sigpending 73 +__SYSCALL(__NR_sigpending, compat_sys_sigpending) +#define __NR_sethostname 74 +__SYSCALL(__NR_sethostname, sys_sethostname) +#define __NR_setrlimit 75 +__SYSCALL(__NR_setrlimit, compat_sys_setrlimit) + /* 76 was compat_sys_getrlimit */ +__SYSCALL(76, sys_ni_syscall) +#define __NR_getrusage 77 +__SYSCALL(__NR_getrusage, compat_sys_getrusage) +#define __NR_gettimeofday 78 +__SYSCALL(__NR_gettimeofday, compat_sys_gettimeofday) +#define __NR_settimeofday 79 +__SYSCALL(__NR_settimeofday, compat_sys_settimeofday) +#define __NR_getgroups 80 +__SYSCALL(__NR_getgroups, sys_getgroups16) +#define __NR_setgroups 81 +__SYSCALL(__NR_setgroups, sys_setgroups16) + /* 82 was compat_sys_select */ +__SYSCALL(82, sys_ni_syscall) +#define __NR_symlink 83 +__SYSCALL(__NR_symlink, sys_symlink) + /* 84 was sys_lstat */ +__SYSCALL(84, sys_ni_syscall) +#define __NR_readlink 85 +__SYSCALL(__NR_readlink, sys_readlink) +#define __NR_uselib 86 +__SYSCALL(__NR_uselib, sys_uselib) +#define __NR_swapon 87 +__SYSCALL(__NR_swapon, sys_swapon) +#define __NR_reboot 88 +__SYSCALL(__NR_reboot, sys_reboot) + /* 89 was sys_readdir */ +__SYSCALL(89, sys_ni_syscall) + /* 90 was sys_mmap */ +__SYSCALL(90, sys_ni_syscall) +#define __NR_munmap 91 +__SYSCALL(__NR_munmap, sys_munmap) +#define __NR_truncate 92 +__SYSCALL(__NR_truncate, compat_sys_truncate) +#define __NR_ftruncate 93 +__SYSCALL(__NR_ftruncate, compat_sys_ftruncate) +#define __NR_fchmod 94 +__SYSCALL(__NR_fchmod, sys_fchmod) +#define __NR_fchown 95 +__SYSCALL(__NR_fchown, sys_fchown16) +#define __NR_getpriority 96 +__SYSCALL(__NR_getpriority, sys_getpriority) +#define __NR_setpriority 97 +__SYSCALL(__NR_setpriority, sys_setpriority) + /* 98 was sys_profil */ +__SYSCALL(98, sys_ni_syscall) +#define __NR_statfs 99 +__SYSCALL(__NR_statfs, compat_sys_statfs) +#define __NR_fstatfs 100 +__SYSCALL(__NR_fstatfs, compat_sys_fstatfs) + /* 101 was sys_ioperm */ +__SYSCALL(101, sys_ni_syscall) + /* 102 was sys_socketcall */ +__SYSCALL(102, sys_ni_syscall) +#define __NR_syslog 103 +__SYSCALL(__NR_syslog, sys_syslog) +#define __NR_setitimer 104 +__SYSCALL(__NR_setitimer, compat_sys_setitimer) +#define __NR_getitimer 105 +__SYSCALL(__NR_getitimer, compat_sys_getitimer) +#define __NR_stat 106 +__SYSCALL(__NR_stat, compat_sys_newstat) +#define __NR_lstat 107 +__SYSCALL(__NR_lstat, compat_sys_newlstat) +#define __NR_fstat 108 +__SYSCALL(__NR_fstat, compat_sys_newfstat) + /* 109 was sys_uname */ +__SYSCALL(109, sys_ni_syscall) + /* 110 was sys_iopl */ +__SYSCALL(110, sys_ni_syscall) +#define __NR_vhangup 111 +__SYSCALL(__NR_vhangup, sys_vhangup) + /* 112 was sys_idle */ +__SYSCALL(112, sys_ni_syscall) + /* 113 was sys_syscall */ +__SYSCALL(113, sys_ni_syscall) +#define __NR_wait4 114 +__SYSCALL(__NR_wait4, compat_sys_wait4) +#define __NR_swapoff 115 +__SYSCALL(__NR_swapoff, sys_swapoff) +#define __NR_sysinfo 116 +__SYSCALL(__NR_sysinfo, compat_sys_sysinfo) + /* 117 was sys_ipc */ +__SYSCALL(117, sys_ni_syscall) +#define __NR_fsync 118 +__SYSCALL(__NR_fsync, sys_fsync) +#define __NR_sigreturn 119 +__SYSCALL(__NR_sigreturn, compat_sys_sigreturn_wrapper) +#define __NR_clone 120 +__SYSCALL(__NR_clone, sys_clone) +#define __NR_setdomainname 121 +__SYSCALL(__NR_setdomainname, sys_setdomainname) +#define __NR_uname 122 +__SYSCALL(__NR_uname, sys_newuname) + /* 123 was sys_modify_ldt */ +__SYSCALL(123, sys_ni_syscall) +#define __NR_adjtimex 124 +__SYSCALL(__NR_adjtimex, compat_sys_adjtimex) +#define __NR_mprotect 125 +__SYSCALL(__NR_mprotect, sys_mprotect) +#define __NR_sigprocmask 126 +__SYSCALL(__NR_sigprocmask, compat_sys_sigprocmask) + /* 127 was sys_create_module */ +__SYSCALL(127, sys_ni_syscall) +#define __NR_init_module 128 +__SYSCALL(__NR_init_module, sys_init_module) +#define __NR_delete_module 129 +__SYSCALL(__NR_delete_module, sys_delete_module) + /* 130 was sys_get_kernel_syms */ +__SYSCALL(130, sys_ni_syscall) +#define __NR_quotactl 131 +__SYSCALL(__NR_quotactl, sys_quotactl) +#define __NR_getpgid 132 +__SYSCALL(__NR_getpgid, sys_getpgid) +#define __NR_fchdir 133 +__SYSCALL(__NR_fchdir, sys_fchdir) +#define __NR_bdflush 134 +__SYSCALL(__NR_bdflush, sys_bdflush) +#define __NR_sysfs 135 +__SYSCALL(__NR_sysfs, sys_sysfs) +#define __NR_personality 136 +__SYSCALL(__NR_personality, sys_personality) + /* 137 was sys_afs_syscall */ +__SYSCALL(137, sys_ni_syscall) +#define __NR_setfsuid 138 +__SYSCALL(__NR_setfsuid, sys_setfsuid16) +#define __NR_setfsgid 139 +__SYSCALL(__NR_setfsgid, sys_setfsgid16) +#define __NR__llseek 140 +__SYSCALL(__NR__llseek, sys_llseek) +#define __NR_getdents 141 +__SYSCALL(__NR_getdents, compat_sys_getdents) +#define __NR__newselect 142 +__SYSCALL(__NR__newselect, compat_sys_select) +#define __NR_flock 143 +__SYSCALL(__NR_flock, sys_flock) +#define __NR_msync 144 +__SYSCALL(__NR_msync, sys_msync) +#define __NR_readv 145 +__SYSCALL(__NR_readv, compat_sys_readv) +#define __NR_writev 146 +__SYSCALL(__NR_writev, compat_sys_writev) +#define __NR_getsid 147 +__SYSCALL(__NR_getsid, sys_getsid) +#define __NR_fdatasync 148 +__SYSCALL(__NR_fdatasync, sys_fdatasync) +#define __NR__sysctl 149 +__SYSCALL(__NR__sysctl, compat_sys_sysctl) +#define __NR_mlock 150 +__SYSCALL(__NR_mlock, sys_mlock) +#define __NR_munlock 151 +__SYSCALL(__NR_munlock, sys_munlock) +#define __NR_mlockall 152 +__SYSCALL(__NR_mlockall, sys_mlockall) +#define __NR_munlockall 153 +__SYSCALL(__NR_munlockall, sys_munlockall) +#define __NR_sched_setparam 154 +__SYSCALL(__NR_sched_setparam, sys_sched_setparam) +#define __NR_sched_getparam 155 +__SYSCALL(__NR_sched_getparam, sys_sched_getparam) +#define __NR_sched_setscheduler 156 +__SYSCALL(__NR_sched_setscheduler, sys_sched_setscheduler) +#define __NR_sched_getscheduler 157 +__SYSCALL(__NR_sched_getscheduler, sys_sched_getscheduler) +#define __NR_sched_yield 158 +__SYSCALL(__NR_sched_yield, sys_sched_yield) +#define __NR_sched_get_priority_max 159 +__SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max) +#define __NR_sched_get_priority_min 160 +__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min) +#define __NR_sched_rr_get_interval 161 +__SYSCALL(__NR_sched_rr_get_interval, compat_sys_sched_rr_get_interval) +#define __NR_nanosleep 162 +__SYSCALL(__NR_nanosleep, compat_sys_nanosleep) +#define __NR_mremap 163 +__SYSCALL(__NR_mremap, sys_mremap) +#define __NR_setresuid 164 +__SYSCALL(__NR_setresuid, sys_setresuid16) +#define __NR_getresuid 165 +__SYSCALL(__NR_getresuid, sys_getresuid16) + /* 166 was sys_vm86 */ +__SYSCALL(166, sys_ni_syscall) + /* 167 was sys_query_module */ +__SYSCALL(167, sys_ni_syscall) +#define __NR_poll 168 +__SYSCALL(__NR_poll, sys_poll) +#define __NR_nfsservctl 169 +__SYSCALL(__NR_nfsservctl, sys_ni_syscall) +#define __NR_setresgid 170 +__SYSCALL(__NR_setresgid, sys_setresgid16) +#define __NR_getresgid 171 +__SYSCALL(__NR_getresgid, sys_getresgid16) +#define __NR_prctl 172 +__SYSCALL(__NR_prctl, sys_prctl) +#define __NR_rt_sigreturn 173 +__SYSCALL(__NR_rt_sigreturn, compat_sys_rt_sigreturn_wrapper) +#define __NR_rt_sigaction 174 +__SYSCALL(__NR_rt_sigaction, compat_sys_rt_sigaction) +#define __NR_rt_sigprocmask 175 +__SYSCALL(__NR_rt_sigprocmask, compat_sys_rt_sigprocmask) +#define __NR_rt_sigpending 176 +__SYSCALL(__NR_rt_sigpending, compat_sys_rt_sigpending) +#define __NR_rt_sigtimedwait 177 +__SYSCALL(__NR_rt_sigtimedwait, compat_sys_rt_sigtimedwait) +#define __NR_rt_sigqueueinfo 178 +__SYSCALL(__NR_rt_sigqueueinfo, compat_sys_rt_sigqueueinfo) +#define __NR_rt_sigsuspend 179 +__SYSCALL(__NR_rt_sigsuspend, compat_sys_rt_sigsuspend) +#define __NR_pread64 180 +__SYSCALL(__NR_pread64, compat_sys_pread64_wrapper) +#define __NR_pwrite64 181 +__SYSCALL(__NR_pwrite64, compat_sys_pwrite64_wrapper) +#define __NR_chown 182 +__SYSCALL(__NR_chown, sys_chown16) +#define __NR_getcwd 183 +__SYSCALL(__NR_getcwd, sys_getcwd) +#define __NR_capget 184 +__SYSCALL(__NR_capget, sys_capget) +#define __NR_capset 185 +__SYSCALL(__NR_capset, sys_capset) +#define __NR_sigaltstack 186 +__SYSCALL(__NR_sigaltstack, compat_sys_sigaltstack) +#define __NR_sendfile 187 +__SYSCALL(__NR_sendfile, compat_sys_sendfile) + /* 188 reserved */ +__SYSCALL(188, sys_ni_syscall) + /* 189 reserved */ +__SYSCALL(189, sys_ni_syscall) +#define __NR_vfork 190 +__SYSCALL(__NR_vfork, sys_vfork) +#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */ +__SYSCALL(__NR_ugetrlimit, compat_sys_getrlimit) /* SuS compliant getrlimit */ +#define __NR_mmap2 192 +__SYSCALL(__NR_mmap2, sys_mmap_pgoff) +#define __NR_truncate64 193 +__SYSCALL(__NR_truncate64, compat_sys_truncate64_wrapper) +#define __NR_ftruncate64 194 +__SYSCALL(__NR_ftruncate64, compat_sys_ftruncate64_wrapper) +#define __NR_stat64 195 +__SYSCALL(__NR_stat64, sys_stat64) +#define __NR_lstat64 196 +__SYSCALL(__NR_lstat64, sys_lstat64) +#define __NR_fstat64 197 +__SYSCALL(__NR_fstat64, sys_fstat64) +#define __NR_lchown32 198 +__SYSCALL(__NR_lchown32, sys_lchown) +#define __NR_getuid32 199 +__SYSCALL(__NR_getuid32, sys_getuid) +#define __NR_getgid32 200 +__SYSCALL(__NR_getgid32, sys_getgid) +#define __NR_geteuid32 201 +__SYSCALL(__NR_geteuid32, sys_geteuid) +#define __NR_getegid32 202 +__SYSCALL(__NR_getegid32, sys_getegid) +#define __NR_setreuid32 203 +__SYSCALL(__NR_setreuid32, sys_setreuid) +#define __NR_setregid32 204 +__SYSCALL(__NR_setregid32, sys_setregid) +#define __NR_getgroups32 205 +__SYSCALL(__NR_getgroups32, sys_getgroups) +#define __NR_setgroups32 206 +__SYSCALL(__NR_setgroups32, sys_setgroups) +#define __NR_fchown32 207 +__SYSCALL(__NR_fchown32, sys_fchown) +#define __NR_setresuid32 208 +__SYSCALL(__NR_setresuid32, sys_setresuid) +#define __NR_getresuid32 209 +__SYSCALL(__NR_getresuid32, sys_getresuid) +#define __NR_setresgid32 210 +__SYSCALL(__NR_setresgid32, sys_setresgid) +#define __NR_getresgid32 211 +__SYSCALL(__NR_getresgid32, sys_getresgid) +#define __NR_chown32 212 +__SYSCALL(__NR_chown32, sys_chown) +#define __NR_setuid32 213 +__SYSCALL(__NR_setuid32, sys_setuid) +#define __NR_setgid32 214 +__SYSCALL(__NR_setgid32, sys_setgid) +#define __NR_setfsuid32 215 +__SYSCALL(__NR_setfsuid32, sys_setfsuid) +#define __NR_setfsgid32 216 +__SYSCALL(__NR_setfsgid32, sys_setfsgid) +#define __NR_getdents64 217 +__SYSCALL(__NR_getdents64, compat_sys_getdents64) +#define __NR_pivot_root 218 +__SYSCALL(__NR_pivot_root, sys_pivot_root) +#define __NR_mincore 219 +__SYSCALL(__NR_mincore, sys_mincore) +#define __NR_madvise 220 +__SYSCALL(__NR_madvise, sys_madvise) +#define __NR_fcntl64 221 +__SYSCALL(__NR_fcntl64, compat_sys_fcntl64) + /* 222 for tux */ +__SYSCALL(222, sys_ni_syscall) + /* 223 is unused */ +__SYSCALL(223, sys_ni_syscall) +#define __NR_gettid 224 +__SYSCALL(__NR_gettid, sys_gettid) +#define __NR_readahead 225 +__SYSCALL(__NR_readahead, compat_sys_readahead_wrapper) +#define __NR_setxattr 226 +__SYSCALL(__NR_setxattr, sys_setxattr) +#define __NR_lsetxattr 227 +__SYSCALL(__NR_lsetxattr, sys_lsetxattr) +#define __NR_fsetxattr 228 +__SYSCALL(__NR_fsetxattr, sys_fsetxattr) +#define __NR_getxattr 229 +__SYSCALL(__NR_getxattr, sys_getxattr) +#define __NR_lgetxattr 230 +__SYSCALL(__NR_lgetxattr, sys_lgetxattr) +#define __NR_fgetxattr 231 +__SYSCALL(__NR_fgetxattr, sys_fgetxattr) +#define __NR_listxattr 232 +__SYSCALL(__NR_listxattr, sys_listxattr) +#define __NR_llistxattr 233 +__SYSCALL(__NR_llistxattr, sys_llistxattr) +#define __NR_flistxattr 234 +__SYSCALL(__NR_flistxattr, sys_flistxattr) +#define __NR_removexattr 235 +__SYSCALL(__NR_removexattr, sys_removexattr) +#define __NR_lremovexattr 236 +__SYSCALL(__NR_lremovexattr, sys_lremovexattr) +#define __NR_fremovexattr 237 +__SYSCALL(__NR_fremovexattr, sys_fremovexattr) +#define __NR_tkill 238 +__SYSCALL(__NR_tkill, sys_tkill) +#define __NR_sendfile64 239 +__SYSCALL(__NR_sendfile64, sys_sendfile64) +#define __NR_futex 240 +__SYSCALL(__NR_futex, compat_sys_futex) +#define __NR_sched_setaffinity 241 +__SYSCALL(__NR_sched_setaffinity, compat_sys_sched_setaffinity) +#define __NR_sched_getaffinity 242 +__SYSCALL(__NR_sched_getaffinity, compat_sys_sched_getaffinity) +#define __NR_io_setup 243 +__SYSCALL(__NR_io_setup, compat_sys_io_setup) +#define __NR_io_destroy 244 +__SYSCALL(__NR_io_destroy, sys_io_destroy) +#define __NR_io_getevents 245 +__SYSCALL(__NR_io_getevents, compat_sys_io_getevents) +#define __NR_io_submit 246 +__SYSCALL(__NR_io_submit, compat_sys_io_submit) +#define __NR_io_cancel 247 +__SYSCALL(__NR_io_cancel, sys_io_cancel) +#define __NR_exit_group 248 +__SYSCALL(__NR_exit_group, sys_exit_group) +#define __NR_lookup_dcookie 249 +__SYSCALL(__NR_lookup_dcookie, compat_sys_lookup_dcookie) +#define __NR_epoll_create 250 +__SYSCALL(__NR_epoll_create, sys_epoll_create) +#define __NR_epoll_ctl 251 +__SYSCALL(__NR_epoll_ctl, sys_epoll_ctl) +#define __NR_epoll_wait 252 +__SYSCALL(__NR_epoll_wait, sys_epoll_wait) +#define __NR_remap_file_pages 253 +__SYSCALL(__NR_remap_file_pages, sys_remap_file_pages) + /* 254 for set_thread_area */ +__SYSCALL(254, sys_ni_syscall) + /* 255 for get_thread_area */ +__SYSCALL(255, sys_ni_syscall) +#define __NR_set_tid_address 256 +__SYSCALL(__NR_set_tid_address, sys_set_tid_address) +#define __NR_timer_create 257 +__SYSCALL(__NR_timer_create, compat_sys_timer_create) +#define __NR_timer_settime 258 +__SYSCALL(__NR_timer_settime, compat_sys_timer_settime) +#define __NR_timer_gettime 259 +__SYSCALL(__NR_timer_gettime, compat_sys_timer_gettime) +#define __NR_timer_getoverrun 260 +__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun) +#define __NR_timer_delete 261 +__SYSCALL(__NR_timer_delete, sys_timer_delete) +#define __NR_clock_settime 262 +__SYSCALL(__NR_clock_settime, compat_sys_clock_settime) +#define __NR_clock_gettime 263 +__SYSCALL(__NR_clock_gettime, compat_sys_clock_gettime) +#define __NR_clock_getres 264 +__SYSCALL(__NR_clock_getres, compat_sys_clock_getres) +#define __NR_clock_nanosleep 265 +__SYSCALL(__NR_clock_nanosleep, compat_sys_clock_nanosleep) +#define __NR_statfs64 266 +__SYSCALL(__NR_statfs64, compat_sys_statfs64_wrapper) +#define __NR_fstatfs64 267 +__SYSCALL(__NR_fstatfs64, compat_sys_fstatfs64_wrapper) +#define __NR_tgkill 268 +__SYSCALL(__NR_tgkill, sys_tgkill) +#define __NR_utimes 269 +__SYSCALL(__NR_utimes, compat_sys_utimes) +#define __NR_arm_fadvise64_64 270 +__SYSCALL(__NR_arm_fadvise64_64, compat_sys_fadvise64_64_wrapper) +#define __NR_pciconfig_iobase 271 +__SYSCALL(__NR_pciconfig_iobase, sys_pciconfig_iobase) +#define __NR_pciconfig_read 272 +__SYSCALL(__NR_pciconfig_read, sys_pciconfig_read) +#define __NR_pciconfig_write 273 +__SYSCALL(__NR_pciconfig_write, sys_pciconfig_write) +#define __NR_mq_open 274 +__SYSCALL(__NR_mq_open, compat_sys_mq_open) +#define __NR_mq_unlink 275 +__SYSCALL(__NR_mq_unlink, sys_mq_unlink) +#define __NR_mq_timedsend 276 +__SYSCALL(__NR_mq_timedsend, compat_sys_mq_timedsend) +#define __NR_mq_timedreceive 277 +__SYSCALL(__NR_mq_timedreceive, compat_sys_mq_timedreceive) +#define __NR_mq_notify 278 +__SYSCALL(__NR_mq_notify, compat_sys_mq_notify) +#define __NR_mq_getsetattr 279 +__SYSCALL(__NR_mq_getsetattr, compat_sys_mq_getsetattr) +#define __NR_waitid 280 +__SYSCALL(__NR_waitid, compat_sys_waitid) +#define __NR_socket 281 +__SYSCALL(__NR_socket, sys_socket) +#define __NR_bind 282 +__SYSCALL(__NR_bind, sys_bind) +#define __NR_connect 283 +__SYSCALL(__NR_connect, sys_connect) +#define __NR_listen 284 +__SYSCALL(__NR_listen, sys_listen) +#define __NR_accept 285 +__SYSCALL(__NR_accept, sys_accept) +#define __NR_getsockname 286 +__SYSCALL(__NR_getsockname, sys_getsockname) +#define __NR_getpeername 287 +__SYSCALL(__NR_getpeername, sys_getpeername) +#define __NR_socketpair 288 +__SYSCALL(__NR_socketpair, sys_socketpair) +#define __NR_send 289 +__SYSCALL(__NR_send, sys_send) +#define __NR_sendto 290 +__SYSCALL(__NR_sendto, sys_sendto) +#define __NR_recv 291 +__SYSCALL(__NR_recv, compat_sys_recv) +#define __NR_recvfrom 292 +__SYSCALL(__NR_recvfrom, compat_sys_recvfrom) +#define __NR_shutdown 293 +__SYSCALL(__NR_shutdown, sys_shutdown) +#define __NR_setsockopt 294 +__SYSCALL(__NR_setsockopt, compat_sys_setsockopt) +#define __NR_getsockopt 295 +__SYSCALL(__NR_getsockopt, compat_sys_getsockopt) +#define __NR_sendmsg 296 +__SYSCALL(__NR_sendmsg, compat_sys_sendmsg) +#define __NR_recvmsg 297 +__SYSCALL(__NR_recvmsg, compat_sys_recvmsg) +#define __NR_semop 298 +__SYSCALL(__NR_semop, sys_semop) +#define __NR_semget 299 +__SYSCALL(__NR_semget, sys_semget) +#define __NR_semctl 300 +__SYSCALL(__NR_semctl, compat_sys_semctl) +#define __NR_msgsnd 301 +__SYSCALL(__NR_msgsnd, compat_sys_msgsnd) +#define __NR_msgrcv 302 +__SYSCALL(__NR_msgrcv, compat_sys_msgrcv) +#define __NR_msgget 303 +__SYSCALL(__NR_msgget, sys_msgget) +#define __NR_msgctl 304 +__SYSCALL(__NR_msgctl, compat_sys_msgctl) +#define __NR_shmat 305 +__SYSCALL(__NR_shmat, compat_sys_shmat) +#define __NR_shmdt 306 +__SYSCALL(__NR_shmdt, sys_shmdt) +#define __NR_shmget 307 +__SYSCALL(__NR_shmget, sys_shmget) +#define __NR_shmctl 308 +__SYSCALL(__NR_shmctl, compat_sys_shmctl) +#define __NR_add_key 309 +__SYSCALL(__NR_add_key, sys_add_key) +#define __NR_request_key 310 +__SYSCALL(__NR_request_key, sys_request_key) +#define __NR_keyctl 311 +__SYSCALL(__NR_keyctl, compat_sys_keyctl) +#define __NR_semtimedop 312 +__SYSCALL(__NR_semtimedop, compat_sys_semtimedop) +#define __NR_vserver 313 +__SYSCALL(__NR_vserver, sys_ni_syscall) +#define __NR_ioprio_set 314 +__SYSCALL(__NR_ioprio_set, sys_ioprio_set) +#define __NR_ioprio_get 315 +__SYSCALL(__NR_ioprio_get, sys_ioprio_get) +#define __NR_inotify_init 316 +__SYSCALL(__NR_inotify_init, sys_inotify_init) +#define __NR_inotify_add_watch 317 +__SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch) +#define __NR_inotify_rm_watch 318 +__SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch) +#define __NR_mbind 319 +__SYSCALL(__NR_mbind, compat_sys_mbind) +#define __NR_get_mempolicy 320 +__SYSCALL(__NR_get_mempolicy, compat_sys_get_mempolicy) +#define __NR_set_mempolicy 321 +__SYSCALL(__NR_set_mempolicy, compat_sys_set_mempolicy) +#define __NR_openat 322 +__SYSCALL(__NR_openat, compat_sys_openat) +#define __NR_mkdirat 323 +__SYSCALL(__NR_mkdirat, sys_mkdirat) +#define __NR_mknodat 324 +__SYSCALL(__NR_mknodat, sys_mknodat) +#define __NR_fchownat 325 +__SYSCALL(__NR_fchownat, sys_fchownat) +#define __NR_futimesat 326 +__SYSCALL(__NR_futimesat, compat_sys_futimesat) +#define __NR_fstatat64 327 +__SYSCALL(__NR_fstatat64, sys_fstatat64) +#define __NR_unlinkat 328 +__SYSCALL(__NR_unlinkat, sys_unlinkat) +#define __NR_renameat 329 +__SYSCALL(__NR_renameat, sys_renameat) +#define __NR_linkat 330 +__SYSCALL(__NR_linkat, sys_linkat) +#define __NR_symlinkat 331 +__SYSCALL(__NR_symlinkat, sys_symlinkat) +#define __NR_readlinkat 332 +__SYSCALL(__NR_readlinkat, sys_readlinkat) +#define __NR_fchmodat 333 +__SYSCALL(__NR_fchmodat, sys_fchmodat) +#define __NR_faccessat 334 +__SYSCALL(__NR_faccessat, sys_faccessat) +#define __NR_pselect6 335 +__SYSCALL(__NR_pselect6, compat_sys_pselect6) +#define __NR_ppoll 336 +__SYSCALL(__NR_ppoll, compat_sys_ppoll) +#define __NR_unshare 337 +__SYSCALL(__NR_unshare, sys_unshare) +#define __NR_set_robust_list 338 +__SYSCALL(__NR_set_robust_list, compat_sys_set_robust_list) +#define __NR_get_robust_list 339 +__SYSCALL(__NR_get_robust_list, compat_sys_get_robust_list) +#define __NR_splice 340 +__SYSCALL(__NR_splice, sys_splice) +#define __NR_sync_file_range2 341 +__SYSCALL(__NR_sync_file_range2, compat_sys_sync_file_range2_wrapper) +#define __NR_tee 342 +__SYSCALL(__NR_tee, sys_tee) +#define __NR_vmsplice 343 +__SYSCALL(__NR_vmsplice, compat_sys_vmsplice) +#define __NR_move_pages 344 +__SYSCALL(__NR_move_pages, compat_sys_move_pages) +#define __NR_getcpu 345 +__SYSCALL(__NR_getcpu, sys_getcpu) +#define __NR_epoll_pwait 346 +__SYSCALL(__NR_epoll_pwait, compat_sys_epoll_pwait) +#define __NR_kexec_load 347 +__SYSCALL(__NR_kexec_load, compat_sys_kexec_load) +#define __NR_utimensat 348 +__SYSCALL(__NR_utimensat, compat_sys_utimensat) +#define __NR_signalfd 349 +__SYSCALL(__NR_signalfd, compat_sys_signalfd) +#define __NR_timerfd_create 350 +__SYSCALL(__NR_timerfd_create, sys_timerfd_create) +#define __NR_eventfd 351 +__SYSCALL(__NR_eventfd, sys_eventfd) +#define __NR_fallocate 352 +__SYSCALL(__NR_fallocate, compat_sys_fallocate_wrapper) +#define __NR_timerfd_settime 353 +__SYSCALL(__NR_timerfd_settime, compat_sys_timerfd_settime) +#define __NR_timerfd_gettime 354 +__SYSCALL(__NR_timerfd_gettime, compat_sys_timerfd_gettime) +#define __NR_signalfd4 355 +__SYSCALL(__NR_signalfd4, compat_sys_signalfd4) +#define __NR_eventfd2 356 +__SYSCALL(__NR_eventfd2, sys_eventfd2) +#define __NR_epoll_create1 357 +__SYSCALL(__NR_epoll_create1, sys_epoll_create1) +#define __NR_dup3 358 +__SYSCALL(__NR_dup3, sys_dup3) +#define __NR_pipe2 359 +__SYSCALL(__NR_pipe2, sys_pipe2) +#define __NR_inotify_init1 360 +__SYSCALL(__NR_inotify_init1, sys_inotify_init1) +#define __NR_preadv 361 +__SYSCALL(__NR_preadv, compat_sys_preadv) +#define __NR_pwritev 362 +__SYSCALL(__NR_pwritev, compat_sys_pwritev) +#define __NR_rt_tgsigqueueinfo 363 +__SYSCALL(__NR_rt_tgsigqueueinfo, compat_sys_rt_tgsigqueueinfo) +#define __NR_perf_event_open 364 +__SYSCALL(__NR_perf_event_open, sys_perf_event_open) +#define __NR_recvmmsg 365 +__SYSCALL(__NR_recvmmsg, compat_sys_recvmmsg) +#define __NR_accept4 366 +__SYSCALL(__NR_accept4, sys_accept4) +#define __NR_fanotify_init 367 +__SYSCALL(__NR_fanotify_init, sys_fanotify_init) +#define __NR_fanotify_mark 368 +__SYSCALL(__NR_fanotify_mark, compat_sys_fanotify_mark) +#define __NR_prlimit64 369 +__SYSCALL(__NR_prlimit64, sys_prlimit64) +#define __NR_name_to_handle_at 370 +__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at) +#define __NR_open_by_handle_at 371 +__SYSCALL(__NR_open_by_handle_at, compat_sys_open_by_handle_at) +#define __NR_clock_adjtime 372 +__SYSCALL(__NR_clock_adjtime, compat_sys_clock_adjtime) +#define __NR_syncfs 373 +__SYSCALL(__NR_syncfs, sys_syncfs) +#define __NR_sendmmsg 374 +__SYSCALL(__NR_sendmmsg, compat_sys_sendmmsg) +#define __NR_setns 375 +__SYSCALL(__NR_setns, sys_setns) +#define __NR_process_vm_readv 376 +__SYSCALL(__NR_process_vm_readv, compat_sys_process_vm_readv) +#define __NR_process_vm_writev 377 +__SYSCALL(__NR_process_vm_writev, compat_sys_process_vm_writev) +#define __NR_kcmp 378 +__SYSCALL(__NR_kcmp, sys_kcmp) +#define __NR_finit_module 379 +__SYSCALL(__NR_finit_module, sys_finit_module) +/* #define __NR_sched_setattr 380 */ +__SYSCALL(380, sys_ni_syscall) +/* #define __NR_sched_getattr 381 */ +__SYSCALL(381, sys_ni_syscall) +/* #define __NR_renameat2 382 */ +__SYSCALL(382, sys_ni_syscall) +#define __NR_seccomp 383 +__SYSCALL(__NR_seccomp, sys_seccomp) diff --git a/arch/arm64/include/asm/vdso.h b/arch/arm64/include/asm/vdso.h new file mode 100644 index 000000000..839ce0031 --- /dev/null +++ b/arch/arm64/include/asm/vdso.h @@ -0,0 +1,41 @@ +/* + * Copyright (C) 2012 ARM Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_VDSO_H +#define __ASM_VDSO_H + +#ifdef __KERNEL__ + +/* + * Default link address for the vDSO. + * Since we randomise the VDSO mapping, there's little point in trying + * to prelink this. + */ +#define VDSO_LBASE 0x0 + +#ifndef __ASSEMBLY__ + +#include <generated/vdso-offsets.h> + +#define VDSO_SYMBOL(base, name) \ +({ \ + (void *)(vdso_offset_##name - VDSO_LBASE + (unsigned long)(base)); \ +}) + +#endif /* !__ASSEMBLY__ */ + +#endif /* __KERNEL__ */ + +#endif /* __ASM_VDSO_H */ diff --git a/arch/arm64/include/asm/vdso_datapage.h b/arch/arm64/include/asm/vdso_datapage.h new file mode 100644 index 000000000..de6619967 --- /dev/null +++ b/arch/arm64/include/asm/vdso_datapage.h @@ -0,0 +1,43 @@ +/* + * Copyright (C) 2012 ARM Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __ASM_VDSO_DATAPAGE_H +#define __ASM_VDSO_DATAPAGE_H + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ + +struct vdso_data { + __u64 cs_cycle_last; /* Timebase at clocksource init */ + __u64 xtime_clock_sec; /* Kernel time */ + __u64 xtime_clock_nsec; + __u64 xtime_coarse_sec; /* Coarse time */ + __u64 xtime_coarse_nsec; + __u64 wtm_clock_sec; /* Wall to monotonic time */ + __u64 wtm_clock_nsec; + __u32 tb_seq_count; /* Timebase sequence counter */ + __u32 cs_mult; /* Clocksource multiplier */ + __u32 cs_shift; /* Clocksource shift */ + __u32 tz_minuteswest; /* Whacky timezone stuff */ + __u32 tz_dsttime; + __u32 use_syscall; +}; + +#endif /* !__ASSEMBLY__ */ + +#endif /* __KERNEL__ */ + +#endif /* __ASM_VDSO_DATAPAGE_H */ diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h new file mode 100644 index 000000000..26e310c54 --- /dev/null +++ b/arch/arm64/include/asm/virt.h @@ -0,0 +1,67 @@ +/* + * Copyright (C) 2012 ARM Ltd. + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ASM__VIRT_H +#define __ASM__VIRT_H + +#define BOOT_CPU_MODE_EL2 (0x0e12b007) + +#ifndef __ASSEMBLY__ +#include <asm/cacheflush.h> + +/* + * __boot_cpu_mode records what mode CPUs were booted in. + * A correctly-implemented bootloader must start all CPUs in the same mode: + * In this case, both 32bit halves of __boot_cpu_mode will contain the + * same value (either 0 if booted in EL1, BOOT_CPU_MODE_EL2 if booted in EL2). + * + * Should the bootloader fail to do this, the two values will be different. + * This allows the kernel to flag an error when the secondaries have come up. + */ +extern u32 __boot_cpu_mode[2]; + +void __hyp_set_vectors(phys_addr_t phys_vector_base); +phys_addr_t __hyp_get_vectors(void); + +static inline void sync_boot_mode(void) +{ + /* + * As secondaries write to __boot_cpu_mode with caches disabled, we + * must flush the corresponding cache entries to ensure the visibility + * of their writes. + */ + __flush_dcache_area(__boot_cpu_mode, sizeof(__boot_cpu_mode)); +} + +/* Reports the availability of HYP mode */ +static inline bool is_hyp_mode_available(void) +{ + sync_boot_mode(); + return (__boot_cpu_mode[0] == BOOT_CPU_MODE_EL2 && + __boot_cpu_mode[1] == BOOT_CPU_MODE_EL2); +} + +/* Check if the bootloader has booted CPUs in different modes */ +static inline bool is_hyp_mode_mismatched(void) +{ + sync_boot_mode(); + return __boot_cpu_mode[0] != __boot_cpu_mode[1]; +} + +#endif /* __ASSEMBLY__ */ + +#endif /* ! __ASM__VIRT_H */ |
