Release 4.15 kernel/freezer.c
/*
* kernel/freezer.c - Function to freeze a process
*
* Originally from kernel/power/process.c
*/
#include <linux/interrupt.h>
#include <linux/suspend.h>
#include <linux/export.h>
#include <linux/syscalls.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
/* total number of freezing conditions in effect */
atomic_t system_freezing_cnt = ATOMIC_INIT(0);
EXPORT_SYMBOL(system_freezing_cnt);
/* indicate whether PM freezing is in effect, protected by pm_mutex */
bool pm_freezing;
bool pm_nosig_freezing;
/*
* Temporary export for the deadlock workaround in ata_scsi_hotplug().
* Remove once the hack becomes unnecessary.
*/
EXPORT_SYMBOL_GPL(pm_freezing);
/* protects freezing and frozen transitions */
static DEFINE_SPINLOCK(freezer_lock);
/**
* freezing_slow_path - slow path for testing whether a task needs to be frozen
* @p: task to be tested
*
* This function is called by freezing() if system_freezing_cnt isn't zero
* and tests whether @p needs to enter and stay in frozen state. Can be
* called under any context. The freezers are responsible for ensuring the
* target tasks see the updated state.
*/
bool freezing_slow_path(struct task_struct *p)
{
if (p->flags & (PF_NOFREEZE | PF_SUSPEND_TASK))
return false;
if (test_tsk_thread_flag(p, TIF_MEMDIE))
return false;
if (pm_nosig_freezing || cgroup_freezing(p))
return true;
if (pm_freezing && !(p->flags & PF_KTHREAD))
return true;
return false;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tejun Heo | 52 | 76.47% | 2 | 40.00% |
Américo Wang | 9 | 13.24% | 1 | 20.00% |
Colin Cross | 4 | 5.88% | 1 | 20.00% |
Michal Hocko | 3 | 4.41% | 1 | 20.00% |
Total | 68 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL(freezing_slow_path);
/* Refrigerator is place where frozen processes are stored :-). */
bool __refrigerator(bool check_kthr_stop)
{
/* Hmm, should we be allowed to suspend when there are realtime
processes around? */
bool was_frozen = false;
long save = current->state;
pr_debug("%s entered refrigerator\n", current->comm);
for (;;) {
set_current_state(TASK_UNINTERRUPTIBLE);
spin_lock_irq(&freezer_lock);
current->flags |= PF_FROZEN;
if (!freezing(current) ||
(check_kthr_stop && kthread_should_stop()))
current->flags &= ~PF_FROZEN;
spin_unlock_irq(&freezer_lock);
if (!(current->flags & PF_FROZEN))
break;
was_frozen = true;
schedule();
}
pr_debug("%s left refrigerator\n", current->comm);
/*
* Restore saved task state before returning. The mb'd version
* needs to be used; otherwise, it might silently break
* synchronization which depends on ordered task state change.
*/
set_current_state(save);
return was_frozen;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tejun Heo | 64 | 53.78% | 5 | 83.33% |
Matt Helsley | 55 | 46.22% | 1 | 16.67% |
Total | 119 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL(__refrigerator);
static void fake_signal_wake_up(struct task_struct *p)
{
unsigned long flags;
if (lock_task_sighand(p, &flags)) {
signal_wake_up(p, 0);
unlock_task_sighand(p, &flags);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matt Helsley | 32 | 76.19% | 1 | 33.33% |
Tejun Heo | 10 | 23.81% | 2 | 66.67% |
Total | 42 | 100.00% | 3 | 100.00% |
/**
* freeze_task - send a freeze request to given task
* @p: task to send the request to
*
* If @p is freezing, the freeze request is sent either by sending a fake
* signal (if it's not a kernel thread) or waking it up (if it's a kernel
* thread).
*
* RETURNS:
* %false, if @p is not freezing or already frozen; %true, otherwise
*/
bool freeze_task(struct task_struct *p)
{
unsigned long flags;
/*
* This check can race with freezer_do_not_count, but worst case that
* will result in an extra wakeup being sent to the task. It does not
* race with freezer_count(), the barriers in freezer_count() and
* freezer_should_skip() ensure that either freezer_count() sees
* freezing == true in try_to_freeze() and freezes, or
* freezer_should_skip() sees !PF_FREEZE_SKIP and freezes the task
* normally.
*/
if (freezer_should_skip(p))
return false;
spin_lock_irqsave(&freezer_lock, flags);
if (!freezing(p) || frozen(p)) {
spin_unlock_irqrestore(&freezer_lock, flags);
return false;
}
if (!(p->flags & PF_KTHREAD))
fake_signal_wake_up(p);
else
wake_up_state(p, TASK_INTERRUPTIBLE);
spin_unlock_irqrestore(&freezer_lock, flags);
return true;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matt Helsley | 43 | 45.74% | 1 | 16.67% |
Tejun Heo | 40 | 42.55% | 4 | 66.67% |
Colin Cross | 11 | 11.70% | 1 | 16.67% |
Total | 94 | 100.00% | 6 | 100.00% |
void __thaw_task(struct task_struct *p)
{
unsigned long flags;
spin_lock_irqsave(&freezer_lock, flags);
if (frozen(p))
wake_up_process(p);
spin_unlock_irqrestore(&freezer_lock, flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matt Helsley | 27 | 64.29% | 1 | 33.33% |
Tejun Heo | 15 | 35.71% | 2 | 66.67% |
Total | 42 | 100.00% | 3 | 100.00% |
/**
* set_freezable - make %current freezable
*
* Mark %current freezable and enter refrigerator if necessary.
*/
bool set_freezable(void)
{
might_sleep();
/*
* Modify flags while holding freezer_lock. This ensures the
* freezer notices that we aren't frozen yet or the freezing
* condition is visible to try_to_freeze() below.
*/
spin_lock_irq(&freezer_lock);
current->flags &= ~PF_NOFREEZE;
spin_unlock_irq(&freezer_lock);
return try_to_freeze();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tejun Heo | 34 | 100.00% | 2 | 100.00% |
Total | 34 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(set_freezable);
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tejun Heo | 261 | 55.41% | 14 | 66.67% |
Matt Helsley | 181 | 38.43% | 1 | 4.76% |
Colin Cross | 15 | 3.18% | 2 | 9.52% |
Américo Wang | 9 | 1.91% | 1 | 4.76% |
Michal Hocko | 3 | 0.64% | 1 | 4.76% |
Paul Gortmaker | 1 | 0.21% | 1 | 4.76% |
Marcos Paulo de Souza | 1 | 0.21% | 1 | 4.76% |
Total | 471 | 100.00% | 21 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.