Release 4.15 kernel/watchdog_hld.c
// SPDX-License-Identifier: GPL-2.0
/*
* Detect hard lockups on a system
*
* started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
*
* Note: Most of this code is borrowed heavily from the original softlockup
* detector, so thanks to Ingo for the initial implementation.
* Some chunks also taken from the old x86-specific nmi watchdog code, thanks
* to those contributors as well.
*/
#define pr_fmt(fmt) "NMI watchdog: " fmt
#include <linux/nmi.h>
#include <linux/atomic.h>
#include <linux/module.h>
#include <linux/sched/debug.h>
#include <asm/irq_regs.h>
#include <linux/perf_event.h>
static DEFINE_PER_CPU(bool, hard_watchdog_warn);
static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
static DEFINE_PER_CPU(struct perf_event *, dead_event);
static struct cpumask dead_events_mask;
static unsigned long hardlockup_allcpu_dumped;
static atomic_t watchdog_cpus = ATOMIC_INIT(0);
void arch_touch_nmi_watchdog(void)
{
/*
* Using __raw here because some code paths have
* preemption enabled. If preemption is enabled
* then interrupts should be enabled too, in which
* case we shouldn't have to worry about the watchdog
* going off.
*/
raw_cpu_write(watchdog_nmi_touch, true);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Babu Moger | 14 | 93.33% | 1 | 50.00% |
Nicholas Piggin | 1 | 6.67% | 1 | 50.00% |
Total | 15 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(arch_touch_nmi_watchdog);
#ifdef CONFIG_HARDLOCKUP_CHECK_TIMESTAMP
static DEFINE_PER_CPU(ktime_t, last_timestamp);
static DEFINE_PER_CPU(unsigned int, nmi_rearmed);
static ktime_t watchdog_hrtimer_sample_threshold __read_mostly;
void watchdog_update_hrtimer_threshold(u64 period)
{
/*
* The hrtimer runs with a period of (watchdog_threshold * 2) / 5
*
* So it runs effectively with 2.5 times the rate of the NMI
* watchdog. That means the hrtimer should fire 2-3 times before
* the NMI watchdog expires. The NMI watchdog on x86 is based on
* unhalted CPU cycles, so if Turbo-Mode is enabled the CPU cycles
* might run way faster than expected and the NMI fires in a
* smaller period than the one deduced from the nominal CPU
* frequency. Depending on the Turbo-Mode factor this might be fast
* enough to get the NMI period smaller than the hrtimer watchdog
* period and trigger false positives.
*
* The sample threshold is used to check in the NMI handler whether
* the minimum time between two NMI samples has elapsed. That
* prevents false positives.
*
* Set this to 4/5 of the actual watchdog threshold period so the
* hrtimer is guaranteed to fire at least once within the real
* watchdog threshold.
*/
watchdog_hrtimer_sample_threshold = period * 2;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 15 | 100.00% | 1 | 100.00% |
Total | 15 | 100.00% | 1 | 100.00% |
static bool watchdog_check_timestamp(void)
{
ktime_t delta, now = ktime_get_mono_fast_ns();
delta = now - __this_cpu_read(last_timestamp);
if (delta < watchdog_hrtimer_sample_threshold) {
/*
* If ktime is jiffies based, a stalled timer would prevent
* jiffies from being incremented and the filter would look
* at a stale timestamp and never trigger.
*/
if (__this_cpu_inc_return(nmi_rearmed) < 10)
return false;
}
__this_cpu_write(nmi_rearmed, 0);
__this_cpu_write(last_timestamp, now);
return true;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 63 | 100.00% | 1 | 100.00% |
Total | 63 | 100.00% | 1 | 100.00% |
#else
static inline bool watchdog_check_timestamp(void)
{
return true;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 12 | 100.00% | 1 | 100.00% |
Total | 12 | 100.00% | 1 | 100.00% |
#endif
static struct perf_event_attr wd_hw_attr = {
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES,
.size = sizeof(struct perf_event_attr),
.pinned = 1,
.disabled = 1,
};
/* Callback function for perf event subsystem */
static void watchdog_overflow_callback(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs)
{
/* Ensure the watchdog never gets throttled */
event->hw.interrupts = 0;
if (__this_cpu_read(watchdog_nmi_touch) == true) {
__this_cpu_write(watchdog_nmi_touch, false);
return;
}
if (!watchdog_check_timestamp())
return;
/* check for a hardlockup
* This is done by making sure our timer interrupt
* is incrementing. The timer interrupt should have
* fired multiple times before we overflow'd. If it hasn't
* then this is a good indication the cpu is stuck
*/
if (is_hardlockup()) {
int this_cpu = smp_processor_id();
/* only print hardlockups once */
if (__this_cpu_read(hard_watchdog_warn) == true)
return;
pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
print_modules();
print_irqtrace_events(current);
if (regs)
show_regs(regs);
else
dump_stack();
/*
* Perform all-CPU dump only once to avoid multiple hardlockups
* generating interleaving traces
*/
if (sysctl_hardlockup_all_cpu_backtrace &&
!test_and_set_bit(0, &hardlockup_allcpu_dumped))
trigger_allbutself_cpu_backtrace();
if (hardlockup_panic)
nmi_panic(regs, "Hard LOCKUP");
__this_cpu_write(hard_watchdog_warn, true);
return;
}
__this_cpu_write(hard_watchdog_warn, false);
return;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Babu Moger | 146 | 95.42% | 1 | 50.00% |
Thomas Gleixner | 7 | 4.58% | 1 | 50.00% |
Total | 153 | 100.00% | 2 | 100.00% |
static int hardlockup_detector_event_create(void)
{
unsigned int cpu = smp_processor_id();
struct perf_event_attr *wd_attr;
struct perf_event *evt;
wd_attr = &wd_hw_attr;
wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
/* Try to register using hardware perf events */
evt = perf_event_create_kernel_counter(wd_attr, cpu, NULL,
watchdog_overflow_callback, NULL);
if (IS_ERR(evt)) {
pr_info("Perf event create on CPU %d failed with %ld\n", cpu,
PTR_ERR(evt));
return PTR_ERR(evt);
}
this_cpu_write(watchdog_ev, evt);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 92 | 100.00% | 1 | 100.00% |
Total | 92 | 100.00% | 1 | 100.00% |
/**
* hardlockup_detector_perf_enable - Enable the local event
*/
void hardlockup_detector_perf_enable(void)
{
if (hardlockup_detector_event_create())
return;
/* use original value for check */
if (!atomic_fetch_inc(&watchdog_cpus))
pr_info("Enabled. Permanently consumes one hw-PMU counter.\n");
perf_event_enable(this_cpu_read(watchdog_ev));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 31 | 86.11% | 2 | 66.67% |
Don Zickus | 5 | 13.89% | 1 | 33.33% |
Total | 36 | 100.00% | 3 | 100.00% |
/**
* hardlockup_detector_perf_disable - Disable the local event
*/
void hardlockup_detector_perf_disable(void)
{
struct perf_event *event = this_cpu_read(watchdog_ev);
if (event) {
perf_event_disable(event);
this_cpu_write(watchdog_ev, NULL);
this_cpu_write(dead_event, event);
cpumask_set_cpu(smp_processor_id(), &dead_events_mask);
atomic_dec(&watchdog_cpus);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Babu Moger | 26 | 45.61% | 1 | 20.00% |
Thomas Gleixner | 26 | 45.61% | 2 | 40.00% |
Don Zickus | 4 | 7.02% | 1 | 20.00% |
Prarit Bhargava | 1 | 1.75% | 1 | 20.00% |
Total | 57 | 100.00% | 5 | 100.00% |
/**
* hardlockup_detector_perf_cleanup - Cleanup disabled events and destroy them
*
* Called from lockup_detector_cleanup(). Serialized by the caller.
*/
void hardlockup_detector_perf_cleanup(void)
{
int cpu;
for_each_cpu(cpu, &dead_events_mask) {
struct perf_event *event = per_cpu(dead_event, cpu);
/*
* Required because for_each_cpu() reports unconditionally
* CPU0 as set on UP kernels. Sigh.
*/
if (event)
perf_event_release_kernel(event);
per_cpu(dead_event, cpu) = NULL;
}
cpumask_clear(&dead_events_mask);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 55 | 100.00% | 3 | 100.00% |
Total | 55 | 100.00% | 3 | 100.00% |
/**
* hardlockup_detector_perf_stop - Globally stop watchdog events
*
* Special interface for x86 to handle the perf HT bug.
*/
void __init hardlockup_detector_perf_stop(void)
{
int cpu;
lockdep_assert_cpus_held();
for_each_online_cpu(cpu) {
struct perf_event *event = per_cpu(watchdog_ev, cpu);
if (event)
perf_event_disable(event);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 41 | 100.00% | 1 | 100.00% |
Total | 41 | 100.00% | 1 | 100.00% |
/**
* hardlockup_detector_perf_restart - Globally restart watchdog events
*
* Special interface for x86 to handle the perf HT bug.
*/
void __init hardlockup_detector_perf_restart(void)
{
int cpu;
lockdep_assert_cpus_held();
if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
return;
for_each_online_cpu(cpu) {
struct perf_event *event = per_cpu(watchdog_ev, cpu);
if (event)
perf_event_enable(event);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 51 | 100.00% | 1 | 100.00% |
Total | 51 | 100.00% | 1 | 100.00% |
/**
* hardlockup_detector_perf_init - Probe whether NMI event is available at all
*/
int __init hardlockup_detector_perf_init(void)
{
int ret = hardlockup_detector_event_create();
if (ret) {
pr_info("Perf NMI watchdog permanently disabled\n");
} else {
perf_event_release_kernel(this_cpu_read(watchdog_ev));
this_cpu_write(watchdog_ev, NULL);
}
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 45 | 97.83% | 1 | 50.00% |
Colin Ian King | 1 | 2.17% | 1 | 50.00% |
Total | 46 | 100.00% | 2 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 396 | 49.87% | 8 | 50.00% |
Babu Moger | 278 | 35.01% | 1 | 6.25% |
Peter Zijlstra | 94 | 11.84% | 1 | 6.25% |
Don Zickus | 18 | 2.27% | 1 | 6.25% |
Ingo Molnar | 3 | 0.38% | 1 | 6.25% |
Nicholas Piggin | 2 | 0.25% | 1 | 6.25% |
Greg Kroah-Hartman | 1 | 0.13% | 1 | 6.25% |
Prarit Bhargava | 1 | 0.13% | 1 | 6.25% |
Colin Ian King | 1 | 0.13% | 1 | 6.25% |
Total | 794 | 100.00% | 16 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.