Contributors: 33
	  
        
          | Author | 
          Tokens | 
          Token Proportion | 
          Commits | 
          Commit Proportion | 
        
	  
	  
        
        
          | Ingo Molnar | 
          143 | 
          17.06% | 
          4 | 
          5.06% | 
        
        
          | Thomas Gleixner | 
          85 | 
          10.14% | 
          8 | 
          10.13% | 
        
        
          | Doug Anderson | 
          74 | 
          8.83% | 
          10 | 
          12.66% | 
        
        
          | Chris Metcalf | 
          68 | 
          8.11% | 
          2 | 
          2.53% | 
        
        
          | Peter Zijlstra | 
          58 | 
          6.92% | 
          4 | 
          5.06% | 
        
        
          | Paul E. McKenney | 
          44 | 
          5.25% | 
          1 | 
          1.27% | 
        
        
          | Ulrich Obergfell | 
          43 | 
          5.13% | 
          5 | 
          6.33% | 
        
        
          | Russell King | 
          39 | 
          4.65% | 
          3 | 
          3.80% | 
        
        
          | Nicholas Piggin | 
          37 | 
          4.42% | 
          2 | 
          2.53% | 
        
        
          | Don Zickus | 
          32 | 
          3.82% | 
          5 | 
          6.33% | 
        
        
          | Linus Torvalds (pre-git) | 
          32 | 
          3.82% | 
          6 | 
          7.59% | 
        
        
          | Aaron Tomlin | 
          29 | 
          3.46% | 
          2 | 
          2.53% | 
        
        
          | Babu Moger | 
          23 | 
          2.74% | 
          1 | 
          1.27% | 
        
        
          | Song Liu | 
          22 | 
          2.63% | 
          1 | 
          1.27% | 
        
        
          | Petr Mladek | 
          16 | 
          1.91% | 
          4 | 
          5.06% | 
        
        
          | Guenter Roeck | 
          15 | 
          1.79% | 
          1 | 
          1.27% | 
        
        
          | Linus Torvalds | 
          10 | 
          1.19% | 
          1 | 
          1.27% | 
        
        
          | Laurent Dufour | 
          10 | 
          1.19% | 
          1 | 
          1.27% | 
        
        
          | Jiri Kosina | 
          8 | 
          0.95% | 
          1 | 
          1.27% | 
        
        
          | Andi Kleen | 
          7 | 
          0.84% | 
          2 | 
          2.53% | 
        
        
          | Lecopzer Chen | 
          7 | 
          0.84% | 
          2 | 
          2.53% | 
        
        
          | Andrew Morton | 
          6 | 
          0.72% | 
          1 | 
          1.27% | 
        
        
          | Mandeep Singh Baines | 
          6 | 
          0.72% | 
          2 | 
          2.53% | 
        
        
          | Tomasz Nowicki | 
          5 | 
          0.60% | 
          1 | 
          1.27% | 
        
        
          | Marcelo Tosatti | 
          4 | 
          0.48% | 
          1 | 
          1.27% | 
        
        
          | Jason Wessel | 
          3 | 
          0.36% | 
          1 | 
          1.27% | 
        
        
          | Tejun Heo | 
          3 | 
          0.36% | 
          1 | 
          1.27% | 
        
        
          | Michal Schmidt | 
          3 | 
          0.36% | 
          1 | 
          1.27% | 
        
        
          | Wim Van Sebroeck | 
          2 | 
          0.24% | 
          1 | 
          1.27% | 
        
        
          | Frédéric Weisbecker | 
          1 | 
          0.12% | 
          1 | 
          1.27% | 
        
        
          | Stéphane Eranian | 
          1 | 
          0.12% | 
          1 | 
          1.27% | 
        
        
          | Mathieu Malaterre | 
          1 | 
          0.12% | 
          1 | 
          1.27% | 
        
        
          | Greg Kroah-Hartman | 
          1 | 
          0.12% | 
          1 | 
          1.27% | 
        
	  
	  
        
          | Total | 
          838 | 
           | 
          79 | 
           | 
	    
	  
    
 
/* SPDX-License-Identifier: GPL-2.0 */
/*
 *  linux/include/linux/nmi.h
 */
#ifndef LINUX_NMI_H
#define LINUX_NMI_H
#include <linux/sched.h>
#include <asm/irq.h>
/* Arch specific watchdogs might need to share extra watchdog-related APIs. */
#if defined(CONFIG_HARDLOCKUP_DETECTOR_ARCH) || defined(CONFIG_HARDLOCKUP_DETECTOR_SPARC64)
#include <asm/nmi.h>
#endif
#ifdef CONFIG_LOCKUP_DETECTOR
void lockup_detector_init(void);
void lockup_detector_retry_init(void);
void lockup_detector_soft_poweroff(void);
void lockup_detector_cleanup(void);
extern int watchdog_user_enabled;
extern int watchdog_thresh;
extern unsigned long watchdog_enabled;
extern struct cpumask watchdog_cpumask;
extern unsigned long *watchdog_cpumask_bits;
#ifdef CONFIG_SMP
extern int sysctl_softlockup_all_cpu_backtrace;
extern int sysctl_hardlockup_all_cpu_backtrace;
#else
#define sysctl_softlockup_all_cpu_backtrace 0
#define sysctl_hardlockup_all_cpu_backtrace 0
#endif /* !CONFIG_SMP */
#else /* CONFIG_LOCKUP_DETECTOR */
static inline void lockup_detector_init(void) { }
static inline void lockup_detector_retry_init(void) { }
static inline void lockup_detector_soft_poweroff(void) { }
static inline void lockup_detector_cleanup(void) { }
#endif /* !CONFIG_LOCKUP_DETECTOR */
#ifdef CONFIG_SOFTLOCKUP_DETECTOR
extern void touch_softlockup_watchdog_sched(void);
extern void touch_softlockup_watchdog(void);
extern void touch_softlockup_watchdog_sync(void);
extern void touch_all_softlockup_watchdogs(void);
extern unsigned int  softlockup_panic;
extern int lockup_detector_online_cpu(unsigned int cpu);
extern int lockup_detector_offline_cpu(unsigned int cpu);
#else /* CONFIG_SOFTLOCKUP_DETECTOR */
static inline void touch_softlockup_watchdog_sched(void) { }
static inline void touch_softlockup_watchdog(void) { }
static inline void touch_softlockup_watchdog_sync(void) { }
static inline void touch_all_softlockup_watchdogs(void) { }
#define lockup_detector_online_cpu	NULL
#define lockup_detector_offline_cpu	NULL
#endif /* CONFIG_SOFTLOCKUP_DETECTOR */
#ifdef CONFIG_DETECT_HUNG_TASK
void reset_hung_task_detector(void);
#else
static inline void reset_hung_task_detector(void) { }
#endif
/*
 * The run state of the lockup detectors is controlled by the content of the
 * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit -
 * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector.
 *
 * 'watchdog_user_enabled', 'watchdog_hardlockup_user_enabled' and
 * 'watchdog_softlockup_user_enabled' are variables that are only used as an
 * 'interface' between the parameters in /proc/sys/kernel and the internal
 * state bits in 'watchdog_enabled'. The 'watchdog_thresh' variable is
 * handled differently because its value is not boolean, and the lockup
 * detectors are 'suspended' while 'watchdog_thresh' is equal zero.
 */
#define WATCHDOG_HARDLOCKUP_ENABLED_BIT  0
#define WATCHDOG_SOFTOCKUP_ENABLED_BIT   1
#define WATCHDOG_HARDLOCKUP_ENABLED     (1 << WATCHDOG_HARDLOCKUP_ENABLED_BIT)
#define WATCHDOG_SOFTOCKUP_ENABLED      (1 << WATCHDOG_SOFTOCKUP_ENABLED_BIT)
#if defined(CONFIG_HARDLOCKUP_DETECTOR)
extern void hardlockup_detector_disable(void);
extern unsigned int hardlockup_panic;
#else
static inline void hardlockup_detector_disable(void) {}
#endif
/* Sparc64 has special implemetantion that is always enabled. */
#if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HARDLOCKUP_DETECTOR_SPARC64)
void arch_touch_nmi_watchdog(void);
#else
static inline void arch_touch_nmi_watchdog(void) { }
#endif
#if defined(CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER)
void watchdog_hardlockup_touch_cpu(unsigned int cpu);
void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs);
#endif
#if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF)
extern void hardlockup_detector_perf_stop(void);
extern void hardlockup_detector_perf_restart(void);
extern void hardlockup_detector_perf_cleanup(void);
extern void hardlockup_config_perf_event(const char *str);
#else
static inline void hardlockup_detector_perf_stop(void) { }
static inline void hardlockup_detector_perf_restart(void) { }
static inline void hardlockup_detector_perf_cleanup(void) { }
static inline void hardlockup_config_perf_event(const char *str) { }
#endif
void watchdog_hardlockup_stop(void);
void watchdog_hardlockup_start(void);
int watchdog_hardlockup_probe(void);
void watchdog_hardlockup_enable(unsigned int cpu);
void watchdog_hardlockup_disable(unsigned int cpu);
void lockup_detector_reconfigure(void);
#ifdef CONFIG_HARDLOCKUP_DETECTOR_BUDDY
void watchdog_buddy_check_hardlockup(int hrtimer_interrupts);
#else
static inline void watchdog_buddy_check_hardlockup(int hrtimer_interrupts) {}
#endif
/**
 * touch_nmi_watchdog - manually reset the hardlockup watchdog timeout.
 *
 * If we support detecting hardlockups, touch_nmi_watchdog() may be
 * used to pet the watchdog (reset the timeout) - for code which
 * intentionally disables interrupts for a long time. This call is stateless.
 *
 * Though this function has "nmi" in the name, the hardlockup watchdog might
 * not be backed by NMIs. This function will likely be renamed to
 * touch_hardlockup_watchdog() in the future.
 */
static inline void touch_nmi_watchdog(void)
{
	/*
	 * Pass on to the hardlockup detector selected via CONFIG_. Note that
	 * the hardlockup detector may not be arch-specific nor using NMIs
	 * and the arch_touch_nmi_watchdog() function will likely be renamed
	 * in the future.
	 */
	arch_touch_nmi_watchdog();
	touch_softlockup_watchdog();
}
/*
 * Create trigger_all_cpu_backtrace() out of the arch-provided
 * base function. Return whether such support was available,
 * to allow calling code to fall back to some other mechanism:
 */
#ifdef arch_trigger_cpumask_backtrace
static inline bool trigger_all_cpu_backtrace(void)
{
	arch_trigger_cpumask_backtrace(cpu_online_mask, -1);
	return true;
}
static inline bool trigger_allbutcpu_cpu_backtrace(int exclude_cpu)
{
	arch_trigger_cpumask_backtrace(cpu_online_mask, exclude_cpu);
	return true;
}
static inline bool trigger_cpumask_backtrace(struct cpumask *mask)
{
	arch_trigger_cpumask_backtrace(mask, -1);
	return true;
}
static inline bool trigger_single_cpu_backtrace(int cpu)
{
	arch_trigger_cpumask_backtrace(cpumask_of(cpu), -1);
	return true;
}
/* generic implementation */
void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
				   int exclude_cpu,
				   void (*raise)(cpumask_t *mask));
bool nmi_cpu_backtrace(struct pt_regs *regs);
#else
static inline bool trigger_all_cpu_backtrace(void)
{
	return false;
}
static inline bool trigger_allbutcpu_cpu_backtrace(int exclude_cpu)
{
	return false;
}
static inline bool trigger_cpumask_backtrace(struct cpumask *mask)
{
	return false;
}
static inline bool trigger_single_cpu_backtrace(int cpu)
{
	return false;
}
#endif
#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
u64 hw_nmi_get_sample_period(int watchdog_thresh);
bool arch_perf_nmi_is_available(void);
#endif
#if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \
    defined(CONFIG_HARDLOCKUP_DETECTOR_PERF)
void watchdog_update_hrtimer_threshold(u64 period);
#else
static inline void watchdog_update_hrtimer_threshold(u64 period) { }
#endif
#ifdef CONFIG_HAVE_ACPI_APEI_NMI
#include <asm/nmi.h>
#endif
#ifdef CONFIG_NMI_CHECK_CPU
void nmi_backtrace_stall_snap(const struct cpumask *btp);
void nmi_backtrace_stall_check(const struct cpumask *btp);
#else
static inline void nmi_backtrace_stall_snap(const struct cpumask *btp) {}
static inline void nmi_backtrace_stall_check(const struct cpumask *btp) {}
#endif
#endif