Commit 96ed7532 authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge branch 'freezer'

* freezer:
  PM / freezer: Clean up code after recent fixes
  PM: convert do_each_thread to for_each_process_thread
  OOM, PM: OOM killed task shouldn't escape PM suspend
  freezer: remove obsolete comments in __thaw_task()
  freezer: Do not freeze tasks killed by OOM killer
parents 37c72cac 71be2114
...@@ -50,6 +50,9 @@ static inline bool oom_task_origin(const struct task_struct *p) ...@@ -50,6 +50,9 @@ static inline bool oom_task_origin(const struct task_struct *p)
extern unsigned long oom_badness(struct task_struct *p, extern unsigned long oom_badness(struct task_struct *p,
struct mem_cgroup *memcg, const nodemask_t *nodemask, struct mem_cgroup *memcg, const nodemask_t *nodemask,
unsigned long totalpages); unsigned long totalpages);
extern int oom_kills_count(void);
extern void note_oom_kill(void);
extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
unsigned int points, unsigned long totalpages, unsigned int points, unsigned long totalpages,
struct mem_cgroup *memcg, nodemask_t *nodemask, struct mem_cgroup *memcg, nodemask_t *nodemask,
......
...@@ -42,6 +42,9 @@ bool freezing_slow_path(struct task_struct *p) ...@@ -42,6 +42,9 @@ bool freezing_slow_path(struct task_struct *p)
if (p->flags & (PF_NOFREEZE | PF_SUSPEND_TASK)) if (p->flags & (PF_NOFREEZE | PF_SUSPEND_TASK))
return false; return false;
if (test_thread_flag(TIF_MEMDIE))
return false;
if (pm_nosig_freezing || cgroup_freezing(p)) if (pm_nosig_freezing || cgroup_freezing(p))
return true; return true;
...@@ -147,12 +150,6 @@ void __thaw_task(struct task_struct *p) ...@@ -147,12 +150,6 @@ void __thaw_task(struct task_struct *p)
{ {
unsigned long flags; unsigned long flags;
/*
* Clear freezing and kick @p if FROZEN. Clearing is guaranteed to
* be visible to @p as waking up implies wmb. Waking up inside
* freezer_lock also prevents wakeups from leaking outside
* refrigerator.
*/
spin_lock_irqsave(&freezer_lock, flags); spin_lock_irqsave(&freezer_lock, flags);
if (frozen(p)) if (frozen(p))
wake_up_process(p); wake_up_process(p);
......
...@@ -46,13 +46,13 @@ static int try_to_freeze_tasks(bool user_only) ...@@ -46,13 +46,13 @@ static int try_to_freeze_tasks(bool user_only)
while (true) { while (true) {
todo = 0; todo = 0;
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
do_each_thread(g, p) { for_each_process_thread(g, p) {
if (p == current || !freeze_task(p)) if (p == current || !freeze_task(p))
continue; continue;
if (!freezer_should_skip(p)) if (!freezer_should_skip(p))
todo++; todo++;
} while_each_thread(g, p); }
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
if (!user_only) { if (!user_only) {
...@@ -93,11 +93,11 @@ static int try_to_freeze_tasks(bool user_only) ...@@ -93,11 +93,11 @@ static int try_to_freeze_tasks(bool user_only)
if (!wakeup) { if (!wakeup) {
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
do_each_thread(g, p) { for_each_process_thread(g, p) {
if (p != current && !freezer_should_skip(p) if (p != current && !freezer_should_skip(p)
&& freezing(p) && !frozen(p)) && freezing(p) && !frozen(p))
sched_show_task(p); sched_show_task(p);
} while_each_thread(g, p); }
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
} }
} else { } else {
...@@ -108,6 +108,30 @@ static int try_to_freeze_tasks(bool user_only) ...@@ -108,6 +108,30 @@ static int try_to_freeze_tasks(bool user_only)
return todo ? -EBUSY : 0; return todo ? -EBUSY : 0;
} }
static bool __check_frozen_processes(void)
{
struct task_struct *g, *p;
for_each_process_thread(g, p)
if (p != current && !freezer_should_skip(p) && !frozen(p))
return false;
return true;
}
/*
* Returns true if all freezable tasks (except for current) are frozen already
*/
static bool check_frozen_processes(void)
{
bool ret;
read_lock(&tasklist_lock);
ret = __check_frozen_processes();
read_unlock(&tasklist_lock);
return ret;
}
/** /**
* freeze_processes - Signal user space processes to enter the refrigerator. * freeze_processes - Signal user space processes to enter the refrigerator.
* The current thread will not be frozen. The same process that calls * The current thread will not be frozen. The same process that calls
...@@ -118,6 +142,7 @@ static int try_to_freeze_tasks(bool user_only) ...@@ -118,6 +142,7 @@ static int try_to_freeze_tasks(bool user_only)
int freeze_processes(void) int freeze_processes(void)
{ {
int error; int error;
int oom_kills_saved;
error = __usermodehelper_disable(UMH_FREEZING); error = __usermodehelper_disable(UMH_FREEZING);
if (error) if (error)
...@@ -132,11 +157,25 @@ int freeze_processes(void) ...@@ -132,11 +157,25 @@ int freeze_processes(void)
pm_wakeup_clear(); pm_wakeup_clear();
printk("Freezing user space processes ... "); printk("Freezing user space processes ... ");
pm_freezing = true; pm_freezing = true;
oom_kills_saved = oom_kills_count();
error = try_to_freeze_tasks(true); error = try_to_freeze_tasks(true);
if (!error) { if (!error) {
printk("done.");
__usermodehelper_set_disable_depth(UMH_DISABLED); __usermodehelper_set_disable_depth(UMH_DISABLED);
oom_killer_disable(); oom_killer_disable();
/*
* There might have been an OOM kill while we were
* freezing tasks and the killed task might be still
* on the way out so we have to double check for race.
*/
if (oom_kills_count() != oom_kills_saved &&
!check_frozen_processes()) {
__usermodehelper_set_disable_depth(UMH_ENABLED);
printk("OOM in progress.");
error = -EBUSY;
} else {
printk("done.");
}
} }
printk("\n"); printk("\n");
BUG_ON(in_atomic()); BUG_ON(in_atomic());
...@@ -191,11 +230,11 @@ void thaw_processes(void) ...@@ -191,11 +230,11 @@ void thaw_processes(void)
thaw_workqueues(); thaw_workqueues();
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
do_each_thread(g, p) { for_each_process_thread(g, p) {
/* No other threads should have PF_SUSPEND_TASK set */ /* No other threads should have PF_SUSPEND_TASK set */
WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK)); WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK));
__thaw_task(p); __thaw_task(p);
} while_each_thread(g, p); }
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
WARN_ON(!(curr->flags & PF_SUSPEND_TASK)); WARN_ON(!(curr->flags & PF_SUSPEND_TASK));
...@@ -218,10 +257,10 @@ void thaw_kernel_threads(void) ...@@ -218,10 +257,10 @@ void thaw_kernel_threads(void)
thaw_workqueues(); thaw_workqueues();
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
do_each_thread(g, p) { for_each_process_thread(g, p) {
if (p->flags & (PF_KTHREAD | PF_WQ_WORKER)) if (p->flags & (PF_KTHREAD | PF_WQ_WORKER))
__thaw_task(p); __thaw_task(p);
} while_each_thread(g, p); }
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
schedule(); schedule();
......
...@@ -404,6 +404,23 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, ...@@ -404,6 +404,23 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
dump_tasks(memcg, nodemask); dump_tasks(memcg, nodemask);
} }
/*
* Number of OOM killer invocations (including memcg OOM killer).
* Primarily used by PM freezer to check for potential races with
* OOM killed frozen task.
*/
static atomic_t oom_kills = ATOMIC_INIT(0);
int oom_kills_count(void)
{
return atomic_read(&oom_kills);
}
void note_oom_kill(void)
{
atomic_inc(&oom_kills);
}
#define K(x) ((x) << (PAGE_SHIFT-10)) #define K(x) ((x) << (PAGE_SHIFT-10))
/* /*
* Must be called while holding a reference to p, which will be released upon * Must be called while holding a reference to p, which will be released upon
......
...@@ -2251,6 +2251,14 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, ...@@ -2251,6 +2251,14 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
return NULL; return NULL;
} }
/*
* PM-freezer should be notified that there might be an OOM killer on
* its way to kill and wake somebody up. This is too early and we might
* end up not killing anything but false positives are acceptable.
* See freeze_processes.
*/
note_oom_kill();
/* /*
* Go through the zonelist yet one more time, keep very high watermark * Go through the zonelist yet one more time, keep very high watermark
* here, this is only to catch a parallel oom killing, we must fail if * here, this is only to catch a parallel oom killing, we must fail if
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment