aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorfranciscofranco <franciscofranco.1990@gmail.com>2015-12-05 04:01:17 +0000
committerMister Oyster <oysterized@gmail.com>2017-12-25 16:11:14 +0100
commit2d6a958ab8f0aaa9c20738a0ac22ba94ae000362 (patch)
tree1c2fafd423477f54ef91480567b8937d9a5215dc
parente5a41b03c4e0a299623929f41a3a1e17fa82076a (diff)
arm64: use the new *_relaxed macros for lower power usage
Signed-off-by: franciscofranco <franciscofranco.1990@gmail.com> Signed-off-by: Joe Maples <joe@frap129.org> Signed-off-by: Mister Oyster <oysterized@gmail.com>
-rw-r--r--arch/arm64/include/asm/compat.h4
-rw-r--r--arch/arm64/include/asm/elf.h2
-rw-r--r--arch/arm64/include/asm/memory.h2
-rw-r--r--arch/arm64/include/asm/processor.h2
-rw-r--r--arch/arm64/kernel/ptrace.c8
-rw-r--r--drivers/input/input-compat.h6
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/seqlock.h5
-rw-r--r--kernel/cpuset.c6
-rw-r--r--kernel/freezer.c2
-rw-r--r--kernel/irq_work.c4
-rw-r--r--kernel/mutex.c8
-rw-r--r--kernel/sched/core.c13
-rw-r--r--kernel/smp.c5
-rw-r--r--mm/memcontrol.c4
-rw-r--r--mm/page_alloc.c6
-rw-r--r--net/rds/ib_rdma.c2
17 files changed, 42 insertions, 39 deletions
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index a203e973b..d7a68ab51 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -288,12 +288,12 @@ struct compat_shmid64_ds {
static inline int is_compat_task(void)
{
- return test_thread_flag(TIF_32BIT);
+ return test_thread_flag_relaxed(TIF_32BIT);
}
static inline int is_compat_thread(struct thread_info *thread)
{
- return test_ti_thread_flag(thread, TIF_32BIT);
+ return test_ti_thread_flag_relaxed(thread, TIF_32BIT);
}
#else /* !CONFIG_COMPAT */
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index a4e1758c4..3ba055440 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -138,7 +138,7 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
/* 1GB of VA */
#ifdef CONFIG_COMPAT
-#define STACK_RND_MASK (test_thread_flag(TIF_32BIT) ? \
+#define STACK_RND_MASK (test_thread_flag_relaxed(TIF_32BIT) ? \
0x7ff >> (PAGE_SHIFT - 12) : \
0x3ffff >> (PAGE_SHIFT - 12))
#else
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 34d4cb5e9..c775203d7 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -49,7 +49,7 @@
#ifdef CONFIG_COMPAT
#define TASK_SIZE_32 UL(0x100000000)
-#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
+#define TASK_SIZE (test_thread_flag_relaxed(TIF_32BIT) ? \
TASK_SIZE_32 : TASK_SIZE_64)
#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
TASK_SIZE_32 : TASK_SIZE_64)
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index d3dd246ed..ed23bb439 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -40,7 +40,7 @@
#define STACK_TOP_MAX TASK_SIZE_64
#ifdef CONFIG_COMPAT
#define AARCH32_VECTORS_BASE 0xffff0000
-#define STACK_TOP (test_thread_flag(TIF_32BIT) ? \
+#define STACK_TOP (test_thread_flag_relaxed(TIF_32BIT) ? \
AARCH32_VECTORS_BASE : STACK_TOP_MAX)
#else
#define STACK_TOP STACK_TOP_MAX
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 28d39b95a..4b242c448 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -1177,10 +1177,10 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
if (secure_computing() == -1)
return RET_SKIP_SYSCALL_TRACE;
- if (test_thread_flag(TIF_SYSCALL_TRACE))
+ if (test_thread_flag_relaxed(TIF_SYSCALL_TRACE))
tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
- if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
+ if (test_thread_flag_relaxed(TIF_SYSCALL_TRACEPOINT))
trace_sys_enter(regs, regs->syscallno);
if (IS_SKIP_SYSCALL(regs->syscallno)) {
@@ -1211,9 +1211,9 @@ asmlinkage void syscall_trace_exit(struct pt_regs *regs)
{
audit_syscall_exit(regs);
- if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
+ if (test_thread_flag_relaxed(TIF_SYSCALL_TRACEPOINT))
trace_sys_exit(regs, regs_return_value(regs));
- if (test_thread_flag(TIF_SYSCALL_TRACE))
+ if (test_thread_flag_relaxed(TIF_SYSCALL_TRACE))
tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
}
diff --git a/drivers/input/input-compat.h b/drivers/input/input-compat.h
index 148f66fe3..5b8c62ef6 100644
--- a/drivers/input/input-compat.h
+++ b/drivers/input/input-compat.h
@@ -22,11 +22,11 @@
#if defined(CONFIG_X86_64) || defined(CONFIG_TILE)
# define INPUT_COMPAT_TEST is_compat_task()
#elif defined(CONFIG_S390)
-# define INPUT_COMPAT_TEST test_thread_flag(TIF_31BIT)
+# define INPUT_COMPAT_TEST test_thread_flag_relaxed(TIF_31BIT)
#elif defined(CONFIG_MIPS)
-# define INPUT_COMPAT_TEST test_thread_flag(TIF_32BIT_ADDR)
+# define INPUT_COMPAT_TEST test_thread_flag_relaxed(TIF_32BIT_ADDR)
#else
-# define INPUT_COMPAT_TEST test_thread_flag(TIF_32BIT)
+# define INPUT_COMPAT_TEST test_thread_flag_relaxed(TIF_32BIT)
#endif
struct input_event_compat {
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 08bd577ac..aa76cc2ae 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2554,7 +2554,7 @@ static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int fl
static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
{
- return test_ti_thread_flag(task_thread_info(tsk), flag);
+ return test_ti_thread_flag_relaxed(task_thread_info(tsk), flag);
}
static inline void set_tsk_need_resched(struct task_struct *tsk)
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 182990574..d4cf5229c 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -29,6 +29,7 @@
#include <linux/spinlock.h>
#include <linux/preempt.h>
#include <asm/processor.h>
+#include <asm/relaxed.h>
/*
* Version using sequence counter only.
@@ -61,9 +62,9 @@ static inline unsigned __read_seqcount_begin(const seqcount_t *s)
unsigned ret;
repeat:
- ret = ACCESS_ONCE(s->sequence);
+ ret = cpu_relaxed_read((volatile u32 *)&s->sequence);
if (unlikely(ret & 1)) {
- cpu_relax();
+ cpu_read_relax();
goto repeat;
}
return ret;
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 66a980fcf..d384b9d21 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -973,7 +973,7 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk,
* Allow tasks that have access to memory reserves because they have
* been OOM killed to get memory anywhere.
*/
- if (unlikely(test_thread_flag(TIF_MEMDIE)))
+ if (unlikely(test_thread_flag_relaxed(TIF_MEMDIE)))
return;
if (current->flags & PF_EXITING) /* Let dying task have memory */
return;
@@ -2451,7 +2451,7 @@ int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
* Allow tasks that have access to memory reserves because they have
* been OOM killed to get memory anywhere.
*/
- if (unlikely(test_thread_flag(TIF_MEMDIE)))
+ if (unlikely(test_thread_flag_relaxed(TIF_MEMDIE)))
return 1;
if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */
return 0;
@@ -2504,7 +2504,7 @@ int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
* Allow tasks that have access to memory reserves because they have
* been OOM killed to get memory anywhere.
*/
- if (unlikely(test_thread_flag(TIF_MEMDIE)))
+ if (unlikely(test_thread_flag_relaxed(TIF_MEMDIE)))
return 1;
return 0;
}
diff --git a/kernel/freezer.c b/kernel/freezer.c
index 4ada72f5f..adf590772 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -42,7 +42,7 @@ bool freezing_slow_path(struct task_struct *p)
if (p->flags & PF_NOFREEZE)
return false;
- if (test_thread_flag(TIF_MEMDIE))
+ if (test_thread_flag_relaxed(TIF_MEMDIE))
return false;
if (pm_nosig_freezing || cgroup_freezing(p))
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index 55fcce606..4bcbeafc6 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -91,7 +91,7 @@ bool irq_work_needs_cpu(void)
struct llist_head *this_list;
this_list = &__get_cpu_var(irq_work_list);
- if (llist_empty(this_list))
+ if (llist_empty_relaxed(this_list))
return false;
/* All work should have been flushed before going offline */
@@ -116,7 +116,7 @@ static void __irq_work_run(void)
barrier();
this_list = &__get_cpu_var(irq_work_list);
- if (llist_empty(this_list))
+ if (llist_empty_relaxed(this_list))
return;
BUG_ON(!irqs_disabled());
diff --git a/kernel/mutex.c b/kernel/mutex.c
index d77807b1e..9d953374e 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -143,8 +143,8 @@ void mspin_lock(struct mspin_node **lock, struct mspin_node *node)
ACCESS_ONCE(prev->next) = node;
smp_wmb();
/* Wait until the lock holder passes the lock down */
- while (!ACCESS_ONCE(node->locked))
- arch_mutex_cpu_relax();
+ while (!cpu_relaxed_read(&(node->locked)))
+ cpu_read_relax();
}
static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node)
@@ -158,8 +158,8 @@ static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node)
if (cmpxchg(lock, node, NULL) == node)
return;
/* Wait until the next pointer is set */
- while (!(next = ACCESS_ONCE(node->next)))
- arch_mutex_cpu_relax();
+ while (!(next = (struct mspin_node*)(cpu_relaxed_read_long(&(node->next)))))
+ cpu_read_relax();
}
ACCESS_ONCE(next->locked) = 1;
smp_wmb();
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index c6d3219b1..265f8359e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1271,9 +1271,10 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
* is actually now running somewhere else!
*/
while (task_running(rq, p)) {
- if (match_state && unlikely(p->state != match_state))
+ if (match_state && unlikely(cpu_relaxed_read_long
+ (&(p->state)) != match_state))
return 0;
- cpu_relax();
+ cpu_read_relax();
}
/*
@@ -1603,7 +1604,7 @@ enum ipi_msg_type {
};
void scheduler_ipi(void)
{
- if (llist_empty(&this_rq()->wake_list)
+ if (llist_empty_relaxed(&this_rq()->wake_list)
&& !tick_nohz_full_cpu(smp_processor_id())
&& !got_nohz_idle_kick()){
mt_trace_ISR_start(IPI_RESCHEDULE);
@@ -1753,8 +1754,8 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
* If the owning (remote) cpu is still in the middle of schedule() with
* this task as prev, wait until its done referencing the task.
*/
- while (p->on_cpu)
- cpu_relax();
+ while (cpu_relaxed_read(&(p->on_cpu)))
+ cpu_read_relax();
/*
* Pairs with the smp_wmb() in finish_lock_switch().
*/
@@ -4261,7 +4262,7 @@ int idle_cpu(int cpu)
return 0;
#ifdef CONFIG_SMP
- if (!llist_empty(&rq->wake_list))
+ if (!llist_empty_relaxed(&rq->wake_list))
return 0;
#endif
diff --git a/kernel/smp.c b/kernel/smp.c
index 87f82e6cc..d02575408 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -12,6 +12,7 @@
#include <linux/gfp.h>
#include <linux/smp.h>
#include <linux/cpu.h>
+#include <asm/relaxed.h>
#include "smpboot.h"
@@ -101,8 +102,8 @@ void __init call_function_init(void)
*/
static void csd_lock_wait(struct call_single_data *csd)
{
- while (csd->flags & CSD_FLAG_LOCK)
- cpu_relax();
+ while (cpu_relaxed_read_short(&csd->flags) & CSD_FLAG_LOCK)
+ cpu_read_relax();
}
static void csd_lock(struct call_single_data *csd)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 36300fda5..7f9608355 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2713,7 +2713,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
* in system level. So, allow to go ahead dying process in addition to
* MEMDIE process.
*/
- if (unlikely(test_thread_flag(TIF_MEMDIE)
+ if (unlikely(test_thread_flag_relaxed(TIF_MEMDIE)
|| fatal_signal_pending(current)))
goto bypass;
@@ -4110,7 +4110,7 @@ static void mem_cgroup_do_uncharge(struct mem_cgroup *memcg,
* because we want to do uncharge as soon as possible.
*/
- if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
+ if (!batch->do_batch || test_thread_flag_relaxed(TIF_MEMDIE))
goto direct_uncharge;
if (nr_pages > 1)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1581cefe0..884c13be9 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2415,7 +2415,7 @@ void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
* of allowed nodes.
*/
if (!(gfp_mask & __GFP_NOMEMALLOC))
- if (test_thread_flag(TIF_MEMDIE) ||
+ if (test_thread_flag_relaxed(TIF_MEMDIE) ||
(current->flags & (PF_MEMALLOC | PF_EXITING)))
filter &= ~SHOW_MEM_FILTER_NODES;
if (in_interrupt() || !(gfp_mask & __GFP_WAIT))
@@ -2762,7 +2762,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
alloc_flags |= ALLOC_NO_WATERMARKS;
else if (!in_interrupt() &&
((current->flags & PF_MEMALLOC) ||
- unlikely(test_thread_flag(TIF_MEMDIE))))
+ unlikely(test_thread_flag_relaxed(TIF_MEMDIE))))
alloc_flags |= ALLOC_NO_WATERMARKS;
}
#if !defined(CONFIG_CMA) || !defined(CONFIG_MTK_SVP) // SVP 15
@@ -2904,7 +2904,7 @@ rebalance:
goto nopage;
/* Avoid allocations with no watermarks from looping endlessly */
- if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
+ if (test_thread_flag_relaxed(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
goto nopage;
/*
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index a985158d9..a9724205c 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -593,7 +593,7 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
prepare_to_wait(&pool->flush_wait, &wait,
TASK_UNINTERRUPTIBLE);
- if (llist_empty(&pool->clean_list))
+ if (llist_empty_relaxed(&pool->clean_list))
schedule();
ibmr = rds_ib_reuse_fmr(pool);