aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpuset.c18
-rw-r--r--kernel/exit.c4
-rw-r--r--kernel/power/process.c50
3 files changed, 52 insertions, 20 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index f3a417c1d..ab0be4565 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2601,26 +2601,28 @@ int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
#define CPUSET_NODELIST_LEN (256)
/**
- * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed
+ * cpuset_print_task_mems_allowed - prints task's cpuset and mems_allowed
+ * @task: pointer to task_struct of some task.
*
- * Description: Prints current's name, cpuset name, and cached copy of its
- * mems_allowed to the kernel log.
+ * Description: Prints @task's name, cpuset name, and cached copy of its
+ * mems_allowed to the kernel log. Must hold task_lock(task) to allow
+ * dereferencing task_cs(task).
*/
-void cpuset_print_current_mems_allowed(void)
+void cpuset_print_task_mems_allowed(struct task_struct *tsk)
{
/* Statically allocated to prevent using excess stack. */
static char cpuset_nodelist[CPUSET_NODELIST_LEN];
static DEFINE_SPINLOCK(cpuset_buffer_lock);
- struct cgroup *cgrp = task_cs(current)->css.cgroup;
+ struct cgroup *cgrp = task_cs(tsk)->css.cgroup;
rcu_read_lock();
spin_lock(&cpuset_buffer_lock);
nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN,
- current->mems_allowed);
- pr_info("%s cpuset=%s mems_allowed=%s\n",
- current->comm, cgroup_name(cgrp), cpuset_nodelist);
+ tsk->mems_allowed);
+ printk(KERN_INFO "%s cpuset=%s mems_allowed=%s\n",
+ tsk->comm, cgroup_name(cgrp), cpuset_nodelist);
spin_unlock(&cpuset_buffer_lock);
rcu_read_unlock();
diff --git a/kernel/exit.c b/kernel/exit.c
index d9153652a..dffb92e9b 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -458,10 +458,8 @@ static void exit_mm(struct task_struct *tsk)
enter_lazy_tlb(mm, current);
task_unlock(tsk);
mm_update_next_owner(mm);
-
mmput(mm);
- if (test_thread_flag(TIF_MEMDIE))
- exit_oom_victim();
+ unmark_oom_victim();
}
static struct task_struct *find_alive_thread(struct task_struct *p)
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 899e07479..3bd3a1185 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -118,6 +118,30 @@ static int try_to_freeze_tasks(bool user_only)
return todo ? -EBUSY : 0;
}
+static bool __check_frozen_processes(void)
+{
+ struct task_struct *g, *p;
+
+ for_each_process_thread(g, p)
+ if (p != current && !freezer_should_skip(p) && !frozen(p))
+ return false;
+
+ return true;
+}
+
+/*
+ * Returns true if all freezable tasks (except for current) are frozen already
+ */
+static bool check_frozen_processes(void)
+{
+ bool ret;
+
+ read_lock(&tasklist_lock);
+ ret = __check_frozen_processes();
+ read_unlock(&tasklist_lock);
+ return ret;
+}
+
/**
* freeze_processes - Signal user space processes to enter the refrigerator.
* The current thread will not be frozen. The same process that calls
@@ -128,6 +152,7 @@ static int try_to_freeze_tasks(bool user_only)
int freeze_processes(void)
{
int error;
+ int oom_kills_saved;
error = __usermodehelper_disable(UMH_FREEZING);
if (error)
@@ -142,22 +167,29 @@ int freeze_processes(void)
pm_wakeup_clear();
pr_info("Freezing user space processes ... ");
pm_freezing = true;
+ oom_kills_saved = oom_kills_count();
error = try_to_freeze_tasks(true);
if (!error) {
__usermodehelper_set_disable_depth(UMH_DISABLED);
- pr_cont("done.");
+ oom_killer_disable();
+
+ /*
+ * There might have been an OOM kill while we were
+ * freezing tasks and the killed task might be still
+ * on the way out so we have to double check for race.
+ */
+ if (oom_kills_count() != oom_kills_saved &&
+ !check_frozen_processes()) {
+ __usermodehelper_set_disable_depth(UMH_ENABLED);
+ pr_cont("OOM in progress.");
+ error = -EBUSY;
+ } else {
+ pr_cont("done.");
+ }
}
pr_cont("\n");
BUG_ON(in_atomic());
- /*
- * Now that the whole userspace is frozen we need to disbale
- * the OOM killer to disallow any further interference with
- * killable tasks.
- */
- if (!error && !oom_killer_disable())
- error = -EBUSY;
-
if (error)
thaw_processes();
return error;