aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorfranciscofranco <franciscofranco.1990@gmail.com>2015-12-04 07:09:49 +0000
committerMoyster <oysterized@gmail.com>2016-09-10 12:07:12 +0200
commit10e50e57efc8edc1a6d83c8aeb9089b0b3626779 (patch)
treed710d13992879ff916bc12752efdf784d2e700d7 /arch
parent456d57a4dffc3231b7fe10714189736cf53c3320 (diff)
arm64: atomic: add missing macros
Signed-off-by: franciscofranco <franciscofranco.1990@gmail.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/include/asm/atomic.h64
1 files changed, 64 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 76d5c219d..f40a4808c 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -41,6 +41,70 @@
#define cpu_relaxed_read_atomic(v) ldax32((volatile int *)&(v->counter))
/*
+ * Macros for generating inline functions to use special load and store
+ * instructions (exlusive and aquire/release).
+ */
+
+#define _LD(_name, _type, _inst, _reg) \
+static inline _type _name (volatile _type *p) \
+{ \
+ _type ret; \
+ asm volatile( \
+ _inst " %" _reg "0, %1": "=&r" (ret) : "Q" (*p) : "memory"); \
+ return ret; \
+}
+
+#define _STX(_name, _type, _inst, _reg) \
+static inline int _name (volatile _type *p, _type v) \
+{ \
+ int ret; \
+ asm volatile( \
+ _inst " %" _reg "0, %" _reg "1, %2" \
+ : "=&r" (ret) \
+ : "r" (v), "Q" (*p) \
+ : "memory"); \
+ return ret; \
+}
+
+#define _STL(_name, _type, _inst, _reg) \
+static inline void _name (volatile _type *p, _type v) \
+{ \
+ asm volatile( \
+ _inst " %" _reg "0, %1" \
+ : \
+ : "r" (v), "Q" (*p) \
+ : "memory"); \
+}
+
+_LD( ldx64, u64, "ldxr", "x")
+_STX( stx64, u64, "stxr", "x")
+_LD( ldax64, u64, "ldaxr", "x")
+_STX(stlx64, u64, "stlxr", "x")
+_LD( lda64, u64, "ldar", "x")
+_STL( stl64, u64, "stlr", "x")
+
+_LD( ldx32, u32, "ldxr", "w")
+_STX( stx32, u32, "stxr", "w")
+_LD( ldax32, u32, "ldaxr", "w")
+_STX(stlx32, u32, "stlxr", "w")
+_LD( lda32, u32, "ldar", "w")
+_STL( stl32, u32, "stlr", "w")
+
+_LD( ldx16, u16, "ldxrh", "w")
+_STX( stx16, u16, "stxrh", "w")
+_LD( ldax16, u16, "ldaxrh", "w")
+_STX(stlx16, u16, "stlxrh", "w")
+_LD( lda16, u16, "ldarh", "w")
+_STL( stl16, u16, "stlrh", "w")
+
+_LD( ldx8, u8, "ldxrb", "w")
+_STX( stx8, u8, "stxrb", "w")
+_LD( ldax8, u8, "ldaxrb", "w")
+_STX( stlx8, u8, "stlxrb", "w")
+_LD( lda8, u8, "ldarb", "w")
+_STL( stl8, u8, "stlrb", "w")
+
+/*
* AArch64 UP and SMP safe atomic ops. We use load exclusive and
* store exclusive to ensure that these are atomic. We may loop
* to ensure that the update happens.