Release 4.10 kernel/power/process.c
/*
* drivers/power/process.c - Functions for starting/stopping processes on
* suspend transitions.
*
* Originally from swsusp.
*/
#undef DEBUG
#include <linux/interrupt.h>
#include <linux/oom.h>
#include <linux/suspend.h>
#include <linux/module.h>
#include <linux/syscalls.h>
#include <linux/freezer.h>
#include <linux/delay.h>
#include <linux/workqueue.h>
#include <linux/kmod.h>
#include <trace/events/power.h>
/*
* Timeout for stopping processes
*/
unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC;
static int try_to_freeze_tasks(bool user_only)
{
struct task_struct *g, *p;
unsigned long end_time;
unsigned int todo;
bool wq_busy = false;
ktime_t start, end, elapsed;
unsigned int elapsed_msecs;
bool wakeup = false;
int sleep_usecs = USEC_PER_MSEC;
start = ktime_get_boottime();
end_time = jiffies + msecs_to_jiffies(freeze_timeout_msecs);
if (!user_only)
freeze_workqueues_begin();
while (true) {
todo = 0;
read_lock(&tasklist_lock);
for_each_process_thread(g, p) {
if (p == current || !freeze_task(p))
continue;
if (!freezer_should_skip(p))
todo++;
}
read_unlock(&tasklist_lock);
if (!user_only) {
wq_busy = freeze_workqueues_busy();
todo += wq_busy;
}
if (!todo || time_after(jiffies, end_time))
break;
if (pm_wakeup_pending()) {
wakeup = true;
break;
}
/*
* We need to retry, but first give the freezing tasks some
* time to enter the refrigerator. Start with an initial
* 1 ms sleep followed by exponential backoff until 8 ms.
*/
usleep_range(sleep_usecs / 2, sleep_usecs);
if (sleep_usecs < 8 * USEC_PER_MSEC)
sleep_usecs *= 2;
}
end = ktime_get_boottime();
elapsed = ktime_sub(end, start);
elapsed_msecs = ktime_to_ms(elapsed);
if (todo) {
pr_cont("\n");
pr_err("Freezing of tasks %s after %d.%03d seconds "
"(%d tasks refusing to freeze, wq_busy=%d):\n",
wakeup ? "aborted" : "failed",
elapsed_msecs / 1000, elapsed_msecs % 1000,
todo - wq_busy, wq_busy);
if (wq_busy)
show_workqueue_state();
if (!wakeup) {
read_lock(&tasklist_lock);
for_each_process_thread(g, p) {
if (p != current && !freezer_should_skip(p)
&& freezing(p) && !frozen(p))
sched_show_task(p);
}
read_unlock(&tasklist_lock);
}
} else {
pr_cont("(elapsed %d.%03d seconds) ", elapsed_msecs / 1000,
elapsed_msecs % 1000);
}
return todo ? -EBUSY : 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
rafael j. wysocki | rafael j. wysocki | 128 | 38.32% | 11 | 42.31% |
patrick mochel | patrick mochel | 56 | 16.77% | 2 | 7.69% |
tejun heo | tejun heo | 54 | 16.17% | 5 | 19.23% |
colin cross | colin cross | 36 | 10.78% | 1 | 3.85% |
pavel machek | pavel machek | 27 | 8.08% | 1 | 3.85% |
abhilash jindal | abhilash jindal | 16 | 4.79% | 1 | 3.85% |
roger lu | roger lu | 7 | 2.10% | 1 | 3.85% |
michal hocko | michal hocko | 5 | 1.50% | 2 | 7.69% |
li fei | li fei | 4 | 1.20% | 1 | 3.85% |
xiaotian feng | xiaotian feng | 1 | 0.30% | 1 | 3.85% |
| Total | 334 | 100.00% | 26 | 100.00% |
/**
* freeze_processes - Signal user space processes to enter the refrigerator.
* The current thread will not be frozen. The same process that calls
* freeze_processes must later call thaw_processes.
*
* On success, returns 0. On failure, -errno and system is fully thawed.
*/
int freeze_processes(void)
{
int error;
error = __usermodehelper_disable(UMH_FREEZING);
if (error)
return error;
/* Make sure this task doesn't get frozen */
current->flags |= PF_SUSPEND_TASK;
if (!pm_freezing)
atomic_inc(&system_freezing_cnt);
pm_wakeup_clear();
pr_info("Freezing user space processes ... ");
pm_freezing = true;
error = try_to_freeze_tasks(true);
if (!error) {
__usermodehelper_set_disable_depth(UMH_DISABLED);
pr_cont("done.");
}
pr_cont("\n");
BUG_ON(in_atomic());
/*
* Now that the whole userspace is frozen we need to disbale
* the OOM killer to disallow any further interference with
* killable tasks. There is no guarantee oom victims will
* ever reach a point they go away we have to wait with a timeout.
*/
if (!error && !oom_killer_disable(msecs_to_jiffies(freeze_timeout_msecs)))
error = -EBUSY;
if (error)
thaw_processes();
return error;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
rafael j. wysocki | rafael j. wysocki | 63 | 52.94% | 8 | 53.33% |
michal hocko | michal hocko | 27 | 22.69% | 4 | 26.67% |
tejun heo | tejun heo | 22 | 18.49% | 2 | 13.33% |
colin cross | colin cross | 7 | 5.88% | 1 | 6.67% |
| Total | 119 | 100.00% | 15 | 100.00% |
/**
* freeze_kernel_threads - Make freezable kernel threads go to the refrigerator.
*
* On success, returns 0. On failure, -errno and only the kernel threads are
* thawed, so as to give a chance to the caller to do additional cleanups
* (if any) before thawing the userspace tasks. So, it is the responsibility
* of the caller to thaw the userspace tasks, when the time is right.
*/
int freeze_kernel_threads(void)
{
int error;
pr_info("Freezing remaining freezable tasks ... ");
pm_nosig_freezing = true;
error = try_to_freeze_tasks(false);
if (!error)
pr_cont("done.");
pr_cont("\n");
BUG_ON(in_atomic());
if (error)
thaw_kernel_threads();
return error;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
rafael j. wysocki | rafael j. wysocki | 31 | 54.39% | 6 | 54.55% |
patrick mochel | patrick mochel | 12 | 21.05% | 1 | 9.09% |
tejun heo | tejun heo | 10 | 17.54% | 2 | 18.18% |
michal hocko | michal hocko | 3 | 5.26% | 1 | 9.09% |
srivatsa s. bhat | srivatsa s. bhat | 1 | 1.75% | 1 | 9.09% |
| Total | 57 | 100.00% | 11 | 100.00% |
void thaw_processes(void)
{
struct task_struct *g, *p;
struct task_struct *curr = current;
trace_suspend_resume(TPS("thaw_processes"), 0, true);
if (pm_freezing)
atomic_dec(&system_freezing_cnt);
pm_freezing = false;
pm_nosig_freezing = false;
oom_killer_enable();
pr_info("Restarting tasks ... ");
__usermodehelper_set_disable_depth(UMH_FREEZING);
thaw_workqueues();
read_lock(&tasklist_lock);
for_each_process_thread(g, p) {
/* No other threads should have PF_SUSPEND_TASK set */
WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK));
__thaw_task(p);
}
read_unlock(&tasklist_lock);
WARN_ON(!(curr->flags & PF_SUSPEND_TASK));
curr->flags &= ~PF_SUSPEND_TASK;
usermodehelper_enable();
schedule();
pr_cont("done.\n");
trace_suspend_resume(TPS("thaw_processes"), 0, false);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
colin cross | colin cross | 44 | 28.76% | 1 | 8.33% |
patrick mochel | patrick mochel | 42 | 27.45% | 2 | 16.67% |
tejun heo | tejun heo | 31 | 20.26% | 3 | 25.00% |
todd e brandt | todd e brandt | 24 | 15.69% | 1 | 8.33% |
takashi iwai | takashi iwai | 5 | 3.27% | 1 | 8.33% |
michal hocko | michal hocko | 3 | 1.96% | 2 | 16.67% |
rafael j. wysocki | rafael j. wysocki | 3 | 1.96% | 1 | 8.33% |
nigel cunningham | nigel cunningham | 1 | 0.65% | 1 | 8.33% |
| Total | 153 | 100.00% | 12 | 100.00% |
void thaw_kernel_threads(void)
{
struct task_struct *g, *p;
pm_nosig_freezing = false;
pr_info("Restarting kernel threads ... ");
thaw_workqueues();
read_lock(&tasklist_lock);
for_each_process_thread(g, p) {
if (p->flags & (PF_KTHREAD | PF_WQ_WORKER))
__thaw_task(p);
}
read_unlock(&tasklist_lock);
schedule();
pr_cont("done.\n");
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
rafael j. wysocki | rafael j. wysocki | 69 | 95.83% | 1 | 33.33% |
michal hocko | michal hocko | 3 | 4.17% | 2 | 66.67% |
| Total | 72 | 100.00% | 3 | 100.00% |
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
rafael j. wysocki | rafael j. wysocki | 300 | 38.41% | 18 | 38.30% |
patrick mochel | patrick mochel | 124 | 15.88% | 2 | 4.26% |
tejun heo | tejun heo | 123 | 15.75% | 9 | 19.15% |
colin cross | colin cross | 88 | 11.27% | 2 | 4.26% |
michal hocko | michal hocko | 41 | 5.25% | 5 | 10.64% |
pavel machek | pavel machek | 27 | 3.46% | 1 | 2.13% |
todd e brandt | todd e brandt | 27 | 3.46% | 1 | 2.13% |
abhilash jindal | abhilash jindal | 16 | 2.05% | 1 | 2.13% |
li fei | li fei | 13 | 1.66% | 1 | 2.13% |
roger lu | roger lu | 7 | 0.90% | 1 | 2.13% |
takashi iwai | takashi iwai | 5 | 0.64% | 1 | 2.13% |
nigel cunningham | nigel cunningham | 4 | 0.51% | 2 | 4.26% |
alexey dobriyan | alexey dobriyan | 3 | 0.38% | 1 | 2.13% |
srivatsa s. bhat | srivatsa s. bhat | 2 | 0.26% | 1 | 2.13% |
xiaotian feng | xiaotian feng | 1 | 0.13% | 1 | 2.13% |
| Total | 781 | 100.00% | 47 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.