cregit-Linux how code gets into the kernel

Release 4.13 kernel/hung_task.c

Directory: kernel
/*
 * Detect Hung Task
 *
 * kernel/hung_task.c - kernel thread for detecting tasks stuck in D state
 *
 */

#include <linux/mm.h>
#include <linux/cpu.h>
#include <linux/nmi.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/lockdep.h>
#include <linux/export.h>
#include <linux/sysctl.h>
#include <linux/utsname.h>
#include <linux/sched/signal.h>
#include <linux/sched/debug.h>

#include <trace/events/sched.h>

/*
 * The number of tasks checked:
 */

int __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT;

/*
 * Limit number of tasks checked in a batch.
 *
 * This value controls the preemptibility of khungtaskd since preemption
 * is disabled during the critical section. It also controls the size of
 * the RCU grace period. So it needs to be upper-bound.
 */

#define HUNG_TASK_BATCHING 1024

/*
 * Zero means infinite timeout - no checking done:
 */

unsigned long __read_mostly sysctl_hung_task_timeout_secs = CONFIG_DEFAULT_HUNG_TASK_TIMEOUT;


int __read_mostly sysctl_hung_task_warnings = 10;


static int __read_mostly did_panic;

static bool hung_task_show_lock;


static struct task_struct *watchdog_task;

/*
 * Should we panic (and reboot, if panic_timeout= is set) when a
 * hung task is detected:
 */

unsigned int __read_mostly sysctl_hung_task_panic =
				CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE;


static int __init hung_task_panic_setup(char *str) { int rc = kstrtouint(str, 0, &sysctl_hung_task_panic); if (rc) return rc; return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Mandeep Singh Baines1955.88%150.00%
Fabian Frederick1544.12%150.00%
Total34100.00%2100.00%

__setup("hung_task_panic=", hung_task_panic_setup);
static int hung_task_panic(struct notifier_block *this, unsigned long event, void *ptr) { did_panic = 1; return NOTIFY_DONE; }

Contributors

PersonTokensPropCommitsCommitProp
Mandeep Singh Baines26100.00%1100.00%
Total26100.00%1100.00%

static struct notifier_block panic_block = { .notifier_call = hung_task_panic, };
static void check_hung_task(struct task_struct *t, unsigned long timeout) { unsigned long switch_count = t->nvcsw + t->nivcsw; /* * Ensure the task is not frozen. * Also, skip vfork and any other user process that freezer should skip. */ if (unlikely(t->flags & (PF_FROZEN | PF_FREEZER_SKIP))) return; /* * When a freshly created task is scheduled once, changes its state to * TASK_UNINTERRUPTIBLE without having ever been switched out once, it * musn't be checked. */ if (unlikely(!switch_count)) return; if (switch_count != t->last_switch_count) { t->last_switch_count = switch_count; return; } trace_sched_process_hang(t); if (!sysctl_hung_task_warnings && !sysctl_hung_task_panic) return; /* * Ok, the task did not get scheduled for more than 2 minutes, * complain: */ if (sysctl_hung_task_warnings) { if (sysctl_hung_task_warnings > 0) sysctl_hung_task_warnings--; pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n", t->comm, t->pid, timeout); pr_err(" %s %s %.*s\n", print_tainted(), init_utsname()->release, (int)strcspn(init_utsname()->version, " "), init_utsname()->version); pr_err("\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\"" " disables this message.\n"); sched_show_task(t); hung_task_show_lock = true; } touch_nmi_watchdog(); if (sysctl_hung_task_panic) { if (hung_task_show_lock) debug_show_all_locks(); trigger_all_cpu_backtrace(); panic("hung_task: blocked tasks"); } }

Contributors

PersonTokensPropCommitsCommitProp
Mandeep Singh Baines11159.68%327.27%
Oleg Nesterov3920.97%218.18%
Tetsuo Handa168.60%218.18%
John Siddle63.23%19.09%
Frédéric Weisbecker52.69%19.09%
Sasha Levin52.69%19.09%
Aaron Tomlin42.15%19.09%
Total186100.00%11100.00%

/* * To avoid extending the RCU grace period for an unbounded amount of time, * periodically exit the critical section and enter a new one. * * For preemptible RCU it is sufficient to call rcu_read_unlock in order * to exit the grace period. For classic RCU, a reschedule is required. */
static bool rcu_lock_break(struct task_struct *g, struct task_struct *t) { bool can_cont; get_task_struct(g); get_task_struct(t); rcu_read_unlock(); cond_resched(); rcu_read_lock(); can_cont = pid_alive(g) && pid_alive(t); put_task_struct(t); put_task_struct(g); return can_cont; }

Contributors

PersonTokensPropCommitsCommitProp
Mandeep Singh Baines4469.84%150.00%
Oleg Nesterov1930.16%150.00%
Total63100.00%2100.00%

/* * Check whether a TASK_UNINTERRUPTIBLE does not get woken up for * a really long time (120 seconds). If that happens, print out * a warning. */
static void check_hung_uninterruptible_tasks(unsigned long timeout) { int max_count = sysctl_hung_task_check_count; int batch_count = HUNG_TASK_BATCHING; struct task_struct *g, *t; /* * If the system crashed already then all bets are off, * do not report extra hung tasks: */ if (test_taint(TAINT_DIE) || did_panic) return; hung_task_show_lock = false; rcu_read_lock(); for_each_process_thread(g, t) { if (!max_count--) goto unlock; if (!--batch_count) { batch_count = HUNG_TASK_BATCHING; if (!rcu_lock_break(g, t)) goto unlock; } /* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */ if (t->state == TASK_UNINTERRUPTIBLE) check_hung_task(t, timeout); } unlock: rcu_read_unlock(); if (hung_task_show_lock) debug_show_all_locks(); }

Contributors

PersonTokensPropCommitsCommitProp
Mandeep Singh Baines10086.21%450.00%
Tetsuo Handa119.48%112.50%
Oleg Nesterov32.59%112.50%
Anton Blanchard10.86%112.50%
Aaron Tomlin10.86%112.50%
Total116100.00%8100.00%


static long hung_timeout_jiffies(unsigned long last_checked, unsigned long timeout) { /* timeout of 0 will disable the watchdog */ return timeout ? last_checked - jiffies + timeout * HZ : MAX_SCHEDULE_TIMEOUT; }

Contributors

PersonTokensPropCommitsCommitProp
Mandeep Singh Baines1967.86%266.67%
Tetsuo Handa932.14%133.33%
Total28100.00%3100.00%

/* * Process updating of timeout sysctl */
int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int ret; ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); if (ret || !write) goto out; wake_up_process(watchdog_task); out: return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Mandeep Singh Baines64100.00%1100.00%
Total64100.00%1100.00%

static atomic_t reset_hung_task = ATOMIC_INIT(0);
void reset_hung_task_detector(void) { atomic_set(&reset_hung_task, 1); }

Contributors

PersonTokensPropCommitsCommitProp
Marcelo Tosatti15100.00%1100.00%
Total15100.00%1100.00%

EXPORT_SYMBOL_GPL(reset_hung_task_detector); /* * kthread which checks for tasks stuck in D state */
static int watchdog(void *dummy) { unsigned long hung_last_checked = jiffies; set_user_nice(current, 0); for ( ; ; ) { unsigned long timeout = sysctl_hung_task_timeout_secs; long t = hung_timeout_jiffies(hung_last_checked, timeout); if (t <= 0) { if (!atomic_xchg(&reset_hung_task, 0)) check_hung_uninterruptible_tasks(timeout); hung_last_checked = jiffies; continue; } schedule_timeout_interruptible(t); } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Mandeep Singh Baines4250.60%360.00%
Tetsuo Handa3137.35%120.00%
Marcelo Tosatti1012.05%120.00%
Total83100.00%5100.00%


static int __init hung_task_init(void) { atomic_notifier_chain_register(&panic_notifier_list, &panic_block); watchdog_task = kthread_run(watchdog, NULL, "khungtaskd"); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Mandeep Singh Baines32100.00%1100.00%
Total32100.00%1100.00%

subsys_initcall(hung_task_init);

Overall Contributors

PersonTokensPropCommitsCommitProp
Mandeep Singh Baines55571.06%622.22%
Tetsuo Handa719.09%311.11%
Oleg Nesterov678.58%311.11%
Marcelo Tosatti394.99%13.70%
Fabian Frederick151.92%13.70%
Aaron Tomlin60.77%27.41%
John Siddle60.77%13.70%
Ingo Molnar60.77%27.41%
Frédéric Weisbecker50.64%13.70%
Sasha Levin50.64%13.70%
Paul Gortmaker20.26%27.41%
Jeff Mahoney10.13%13.70%
Anton Blanchard10.13%13.70%
Li Zefan10.13%13.70%
John Kacur10.13%13.70%
Total781100.00%27100.00%
Directory: kernel
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.