Release 4.15 kernel/power/process.c
// SPDX-License-Identifier: GPL-2.0
/*
* drivers/power/process.c - Functions for starting/stopping processes on
* suspend transitions.
*
* Originally from swsusp.
*/
#undef DEBUG
#include <linux/interrupt.h>
#include <linux/oom.h>
#include <linux/suspend.h>
#include <linux/module.h>
#include <linux/sched/debug.h>
#include <linux/sched/task.h>
#include <linux/syscalls.h>
#include <linux/freezer.h>
#include <linux/delay.h>
#include <linux/workqueue.h>
#include <linux/kmod.h>
#include <trace/events/power.h>
#include <linux/cpuset.h>
/*
* Timeout for stopping processes
*/
unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC;
static int try_to_freeze_tasks(bool user_only)
{
struct task_struct *g, *p;
unsigned long end_time;
unsigned int todo;
bool wq_busy = false;
ktime_t start, end, elapsed;
unsigned int elapsed_msecs;
bool wakeup = false;
int sleep_usecs = USEC_PER_MSEC;
start = ktime_get_boottime();
end_time = jiffies + msecs_to_jiffies(freeze_timeout_msecs);
if (!user_only)
freeze_workqueues_begin();
while (true) {
todo = 0;
read_lock(&tasklist_lock);
for_each_process_thread(g, p) {
if (p == current || !freeze_task(p))
continue;
if (!freezer_should_skip(p))
todo++;
}
read_unlock(&tasklist_lock);
if (!user_only) {
wq_busy = freeze_workqueues_busy();
todo += wq_busy;
}
if (!todo || time_after(jiffies, end_time))
break;
if (pm_wakeup_pending()) {
wakeup = true;
break;
}
/*
* We need to retry, but first give the freezing tasks some
* time to enter the refrigerator. Start with an initial
* 1 ms sleep followed by exponential backoff until 8 ms.
*/
usleep_range(sleep_usecs / 2, sleep_usecs);
if (sleep_usecs < 8 * USEC_PER_MSEC)
sleep_usecs *= 2;
}
end = ktime_get_boottime();
elapsed = ktime_sub(end, start);
elapsed_msecs = ktime_to_ms(elapsed);
if (todo) {
pr_cont("\n");
pr_err("Freezing of tasks %s after %d.%03d seconds "
"(%d tasks refusing to freeze, wq_busy=%d):\n",
wakeup ? "aborted" : "failed",
elapsed_msecs / 1000, elapsed_msecs % 1000,
todo - wq_busy, wq_busy);
if (wq_busy)
show_workqueue_state();
if (!wakeup) {
read_lock(&tasklist_lock);
for_each_process_thread(g, p) {
if (p != current && !freezer_should_skip(p)
&& freezing(p) && !frozen(p))
sched_show_task(p);
}
read_unlock(&tasklist_lock);
}
} else {
pr_cont("(elapsed %d.%03d seconds) ", elapsed_msecs / 1000,
elapsed_msecs % 1000);
}
return todo ? -EBUSY : 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rafael J. Wysocki | 128 | 38.32% | 11 | 42.31% |
Patrick Mochel | 56 | 16.77% | 2 | 7.69% |
Tejun Heo | 54 | 16.17% | 5 | 19.23% |
Colin Cross | 36 | 10.78% | 1 | 3.85% |
Pavel Machek | 27 | 8.08% | 1 | 3.85% |
Abhilash Jindal | 16 | 4.79% | 1 | 3.85% |
Roger Lu | 7 | 2.10% | 1 | 3.85% |
Michal Hocko | 5 | 1.50% | 2 | 7.69% |
Li Fei | 4 | 1.20% | 1 | 3.85% |
Xiaotian Feng | 1 | 0.30% | 1 | 3.85% |
Total | 334 | 100.00% | 26 | 100.00% |
/**
* freeze_processes - Signal user space processes to enter the refrigerator.
* The current thread will not be frozen. The same process that calls
* freeze_processes must later call thaw_processes.
*
* On success, returns 0. On failure, -errno and system is fully thawed.
*/
int freeze_processes(void)
{
int error;
error = __usermodehelper_disable(UMH_FREEZING);
if (error)
return error;
/* Make sure this task doesn't get frozen */
current->flags |= PF_SUSPEND_TASK;
if (!pm_freezing)
atomic_inc(&system_freezing_cnt);
pm_wakeup_clear(true);
pr_info("Freezing user space processes ... ");
pm_freezing = true;
error = try_to_freeze_tasks(true);
if (!error) {
__usermodehelper_set_disable_depth(UMH_DISABLED);
pr_cont("done.");
}
pr_cont("\n");
BUG_ON(in_atomic());
/*
* Now that the whole userspace is frozen we need to disbale
* the OOM killer to disallow any further interference with
* killable tasks. There is no guarantee oom victims will
* ever reach a point they go away we have to wait with a timeout.
*/
if (!error && !oom_killer_disable(msecs_to_jiffies(freeze_timeout_msecs)))
error = -EBUSY;
if (error)
thaw_processes();
return error;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rafael J. Wysocki | 65 | 53.72% | 9 | 56.25% |
Michal Hocko | 27 | 22.31% | 4 | 25.00% |
Tejun Heo | 22 | 18.18% | 2 | 12.50% |
Colin Cross | 7 | 5.79% | 1 | 6.25% |
Total | 121 | 100.00% | 16 | 100.00% |
/**
* freeze_kernel_threads - Make freezable kernel threads go to the refrigerator.
*
* On success, returns 0. On failure, -errno and only the kernel threads are
* thawed, so as to give a chance to the caller to do additional cleanups
* (if any) before thawing the userspace tasks. So, it is the responsibility
* of the caller to thaw the userspace tasks, when the time is right.
*/
int freeze_kernel_threads(void)
{
int error;
pr_info("Freezing remaining freezable tasks ... ");
pm_nosig_freezing = true;
error = try_to_freeze_tasks(false);
if (!error)
pr_cont("done.");
pr_cont("\n");
BUG_ON(in_atomic());
if (error)
thaw_kernel_threads();
return error;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rafael J. Wysocki | 31 | 54.39% | 6 | 54.55% |
Patrick Mochel | 12 | 21.05% | 1 | 9.09% |
Tejun Heo | 10 | 17.54% | 2 | 18.18% |
Michal Hocko | 3 | 5.26% | 1 | 9.09% |
Srivatsa S. Bhat | 1 | 1.75% | 1 | 9.09% |
Total | 57 | 100.00% | 11 | 100.00% |
void thaw_processes(void)
{
struct task_struct *g, *p;
struct task_struct *curr = current;
trace_suspend_resume(TPS("thaw_processes"), 0, true);
if (pm_freezing)
atomic_dec(&system_freezing_cnt);
pm_freezing = false;
pm_nosig_freezing = false;
oom_killer_enable();
pr_info("Restarting tasks ... ");
__usermodehelper_set_disable_depth(UMH_FREEZING);
thaw_workqueues();
cpuset_wait_for_hotplug();
read_lock(&tasklist_lock);
for_each_process_thread(g, p) {
/* No other threads should have PF_SUSPEND_TASK set */
WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK));
__thaw_task(p);
}
read_unlock(&tasklist_lock);
WARN_ON(!(curr->flags & PF_SUSPEND_TASK));
curr->flags &= ~PF_SUSPEND_TASK;
usermodehelper_enable();
schedule();
pr_cont("done.\n");
trace_suspend_resume(TPS("thaw_processes"), 0, false);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Colin Cross | 44 | 28.21% | 1 | 7.69% |
Patrick Mochel | 42 | 26.92% | 2 | 15.38% |
Tejun Heo | 31 | 19.87% | 3 | 23.08% |
Todd E Brandt | 24 | 15.38% | 1 | 7.69% |
Takashi Iwai | 5 | 3.21% | 1 | 7.69% |
Michal Hocko | 3 | 1.92% | 2 | 15.38% |
Rafael J. Wysocki | 3 | 1.92% | 1 | 7.69% |
Peter Zijlstra | 3 | 1.92% | 1 | 7.69% |
Nigel Cunningham | 1 | 0.64% | 1 | 7.69% |
Total | 156 | 100.00% | 13 | 100.00% |
void thaw_kernel_threads(void)
{
struct task_struct *g, *p;
pm_nosig_freezing = false;
pr_info("Restarting kernel threads ... ");
thaw_workqueues();
read_lock(&tasklist_lock);
for_each_process_thread(g, p) {
if (p->flags & (PF_KTHREAD | PF_WQ_WORKER))
__thaw_task(p);
}
read_unlock(&tasklist_lock);
schedule();
pr_cont("done.\n");
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rafael J. Wysocki | 69 | 95.83% | 1 | 33.33% |
Michal Hocko | 3 | 4.17% | 2 | 66.67% |
Total | 72 | 100.00% | 3 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rafael J. Wysocki | 302 | 37.94% | 19 | 36.54% |
Patrick Mochel | 123 | 15.45% | 2 | 3.85% |
Tejun Heo | 123 | 15.45% | 9 | 17.31% |
Colin Cross | 88 | 11.06% | 2 | 3.85% |
Michal Hocko | 41 | 5.15% | 5 | 9.62% |
Pavel Machek | 27 | 3.39% | 1 | 1.92% |
Todd E Brandt | 27 | 3.39% | 1 | 1.92% |
Abhilash Jindal | 16 | 2.01% | 1 | 1.92% |
Li Fei | 13 | 1.63% | 1 | 1.92% |
Roger Lu | 7 | 0.88% | 1 | 1.92% |
Peter Zijlstra | 7 | 0.88% | 1 | 1.92% |
Ingo Molnar | 6 | 0.75% | 2 | 3.85% |
Takashi Iwai | 5 | 0.63% | 1 | 1.92% |
Nigel Cunningham | 4 | 0.50% | 2 | 3.85% |
Alexey Dobriyan | 3 | 0.38% | 1 | 1.92% |
Srivatsa S. Bhat | 2 | 0.25% | 1 | 1.92% |
Xiaotian Feng | 1 | 0.13% | 1 | 1.92% |
Greg Kroah-Hartman | 1 | 0.13% | 1 | 1.92% |
Total | 796 | 100.00% | 52 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.