cregit-Linux how code gets into the kernel

Release 4.14 arch/x86/kernel/cpu/mcheck/mce_intel.c

// SPDX-License-Identifier: GPL-2.0
/*
 * Intel specific MCE features.
 * Copyright 2004 Zwane Mwaikambo <zwane@linuxpower.ca>
 * Copyright (C) 2008, 2009 Intel Corporation
 * Author: Andi Kleen
 */

#include <linux/gfp.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/cpumask.h>
#include <asm/apic.h>
#include <asm/cpufeature.h>
#include <asm/intel-family.h>
#include <asm/processor.h>
#include <asm/msr.h>
#include <asm/mce.h>

#include "mce-internal.h"

/*
 * Support for Intel Correct Machine Check Interrupts. This allows
 * the CPU to raise an interrupt when a corrected machine check happened.
 * Normally we pick those up using a regular polling timer.
 * Also supports reliable discovery of shared banks.
 */

/*
 * CMCI can be delivered to multiple cpus that share a machine check bank
 * so we need to designate a single cpu to process errors logged in each bank
 * in the interrupt handler (otherwise we would have many races and potential
 * double reporting of the same error).
 * Note that this can change when a cpu is offlined or brought online since
 * some MCA banks are shared across cpus. When a cpu is offlined, cmci_clear()
 * disables CMCI on all banks owned by the cpu and clears this bitfield. At
 * this point, cmci_rediscover() kicks in and a different cpu may end up
 * taking ownership of some of the shared MCA banks that were previously
 * owned by the offlined cpu.
 */
static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned);

/*
 * CMCI storm detection backoff counter
 *
 * During storm, we reset this counter to INITIAL_CHECK_INTERVAL in case we've
 * encountered an error. If not, we decrement it by one. We signal the end of
 * the CMCI storm when it reaches 0.
 */
static DEFINE_PER_CPU(int, cmci_backoff_cnt);

/*
 * cmci_discover_lock protects against parallel discovery attempts
 * which could race against each other.
 */
static DEFINE_RAW_SPINLOCK(cmci_discover_lock);


#define CMCI_THRESHOLD		1

#define CMCI_POLL_INTERVAL	(30 * HZ)

#define CMCI_STORM_INTERVAL	(HZ)

#define CMCI_STORM_THRESHOLD	15

static DEFINE_PER_CPU(unsigned long, cmci_time_stamp);
static DEFINE_PER_CPU(unsigned int, cmci_storm_cnt);
static DEFINE_PER_CPU(unsigned int, cmci_storm_state);

enum {
	
CMCI_STORM_NONE,
	
CMCI_STORM_ACTIVE,
	
CMCI_STORM_SUBSIDED,
};


static atomic_t cmci_storm_on_cpus;


static int cmci_supported(int *banks) { u64 cap; if (mca_cfg.cmci_disabled || mca_cfg.ignore_ce) return 0; /* * Vendor check is not strictly needed, but the initial * initialization is vendor keyed and this * makes sure none of the backdoors are entered otherwise. */ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) return 0; if (!boot_cpu_has(X86_FEATURE_APIC) || lapic_get_maxlvt() < 6) return 0; rdmsrl(MSR_IA32_MCG_CAP, cap); *banks = min_t(unsigned, MAX_NR_BANKS, cap & 0xff); return !!(cap & MCG_CMCI_P); }

Contributors

PersonTokensPropCommitsCommitProp
Andi Kleen6779.76%125.00%
Borislav Petkov1011.90%250.00%
Hidetoshi Seto78.33%125.00%
Total84100.00%4100.00%


static bool lmce_supported(void) { u64 tmp; if (mca_cfg.lmce_disabled) return false; rdmsrl(MSR_IA32_MCG_CAP, tmp); /* * LMCE depends on recovery support in the processor. Hence both * MCG_SER_P and MCG_LMCE_P should be present in MCG_CAP. */ if ((tmp & (MCG_SER_P | MCG_LMCE_P)) != (MCG_SER_P | MCG_LMCE_P)) return false; /* * BIOS should indicate support for LMCE by setting bit 20 in * IA32_FEATURE_CONTROL without which touching MCG_EXT_CTL will * generate a #GP fault. */ rdmsrl(MSR_IA32_FEATURE_CONTROL, tmp); if ((tmp & (FEATURE_CONTROL_LOCKED | FEATURE_CONTROL_LMCE)) == (FEATURE_CONTROL_LOCKED | FEATURE_CONTROL_LMCE)) return true; return false; }

Contributors

PersonTokensPropCommitsCommitProp
Ashok Raj81100.00%1100.00%
Total81100.00%1100.00%


bool mce_intel_cmci_poll(void) { if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE) return false; /* * Reset the counter if we've logged an error in the last poll * during the storm. */ if (machine_check_poll(0, this_cpu_ptr(&mce_banks_owned))) this_cpu_write(cmci_backoff_cnt, INITIAL_CHECK_INTERVAL); else this_cpu_dec(cmci_backoff_cnt); return true; }

Contributors

PersonTokensPropCommitsCommitProp
Borislav Petkov2448.98%250.00%
Chen Gong2346.94%125.00%
Christoph Lameter24.08%125.00%
Total49100.00%4100.00%


void mce_intel_hcpu_update(unsigned long cpu) { if (per_cpu(cmci_storm_state, cpu) == CMCI_STORM_ACTIVE) atomic_dec(&cmci_storm_on_cpus); per_cpu(cmci_storm_state, cpu) = CMCI_STORM_NONE; }

Contributors

PersonTokensPropCommitsCommitProp
Chen Gong35100.00%1100.00%
Total35100.00%1100.00%


static void cmci_toggle_interrupt_mode(bool on) { unsigned long flags, *owned; int bank; u64 val; raw_spin_lock_irqsave(&cmci_discover_lock, flags); owned = this_cpu_ptr(mce_banks_owned); for_each_set_bit(bank, owned, MAX_NR_BANKS) { rdmsrl(MSR_IA32_MCx_CTL2(bank), val); if (on) val |= MCI_CTL2_CMCI_EN; else val &= ~MCI_CTL2_CMCI_EN; wrmsrl(MSR_IA32_MCx_CTL2(bank), val); } raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Xie XiuQi89100.00%1100.00%
Total89100.00%1100.00%


unsigned long cmci_intel_adjust_timer(unsigned long interval) { if ((this_cpu_read(cmci_backoff_cnt) > 0) && (__this_cpu_read(cmci_storm_state) == CMCI_STORM_ACTIVE)) { mce_notify_irq(); return CMCI_STORM_INTERVAL; } switch (__this_cpu_read(cmci_storm_state)) { case CMCI_STORM_ACTIVE: /* * We switch back to interrupt mode once the poll timer has * silenced itself. That means no events recorded and the timer * interval is back to our poll interval. */ __this_cpu_write(cmci_storm_state, CMCI_STORM_SUBSIDED); if (!atomic_sub_return(1, &cmci_storm_on_cpus)) pr_notice("CMCI storm subsided: switching to interrupt mode\n"); /* FALLTHROUGH */ case CMCI_STORM_SUBSIDED: /* * We wait for all CPUs to go back to SUBSIDED state. When that * happens we switch back to interrupt mode. */ if (!atomic_read(&cmci_storm_on_cpus)) { __this_cpu_write(cmci_storm_state, CMCI_STORM_NONE); cmci_toggle_interrupt_mode(true); cmci_recheck(); } return CMCI_POLL_INTERVAL; default: /* We have shiny weather. Let the poll do whatever it thinks. */ return interval; } }

Contributors

PersonTokensPropCommitsCommitProp
Chen Gong7969.91%133.33%
Borislav Petkov3026.55%133.33%
Xie XiuQi43.54%133.33%
Total113100.00%3100.00%


static bool cmci_storm_detect(void) { unsigned int cnt = __this_cpu_read(cmci_storm_cnt); unsigned long ts = __this_cpu_read(cmci_time_stamp); unsigned long now = jiffies; int r; if (__this_cpu_read(cmci_storm_state) != CMCI_STORM_NONE) return true; if (time_before_eq(now, ts + CMCI_STORM_INTERVAL)) { cnt++; } else { cnt = 1; __this_cpu_write(cmci_time_stamp, now); } __this_cpu_write(cmci_storm_cnt, cnt); if (cnt <= CMCI_STORM_THRESHOLD) return false; cmci_toggle_interrupt_mode(false); __this_cpu_write(cmci_storm_state, CMCI_STORM_ACTIVE); r = atomic_add_return(1, &cmci_storm_on_cpus); mce_timer_kick(CMCI_STORM_INTERVAL); this_cpu_write(cmci_backoff_cnt, INITIAL_CHECK_INTERVAL); if (r == 1) pr_notice("CMCI storm detected: switching to poll mode\n"); return true; }

Contributors

PersonTokensPropCommitsCommitProp
Chen Gong12991.49%133.33%
Borislav Petkov85.67%133.33%
Xie XiuQi42.84%133.33%
Total141100.00%3100.00%

/* * The interrupt handler. This is called on every event. * Just call the poller directly to log any events. * This could in theory increase the threshold under high load, * but doesn't for now. */
static void intel_threshold_interrupt(void) { if (cmci_storm_detect()) return; machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned)); }

Contributors

PersonTokensPropCommitsCommitProp
Andi Kleen1768.00%133.33%
Chen Gong624.00%133.33%
Christoph Lameter28.00%133.33%
Total25100.00%3100.00%

/* * Enable CMCI (Corrected Machine Check Interrupt) for available MCE banks * on this CPU. Use the algorithm recommended in the SDM to discover shared * banks. */
static void cmci_discover(int banks) { unsigned long *owned = (void *)this_cpu_ptr(&mce_banks_owned); unsigned long flags; int i; int bios_wrong_thresh = 0; raw_spin_lock_irqsave(&cmci_discover_lock, flags); for (i = 0; i < banks; i++) { u64 val; int bios_zero_thresh = 0; if (test_bit(i, owned)) continue; /* Skip banks in firmware first mode */ if (test_bit(i, mce_banks_ce_disabled)) continue; rdmsrl(MSR_IA32_MCx_CTL2(i), val); /* Already owned by someone else? */ if (val & MCI_CTL2_CMCI_EN) { clear_bit(i, owned); __clear_bit(i, this_cpu_ptr(mce_poll_banks)); continue; } if (!mca_cfg.bios_cmci_threshold) { val &= ~MCI_CTL2_CMCI_THRESHOLD_MASK; val |= CMCI_THRESHOLD; } else if (!(val & MCI_CTL2_CMCI_THRESHOLD_MASK)) { /* * If bios_cmci_threshold boot option was specified * but the threshold is zero, we'll try to initialize * it to 1. */ bios_zero_thresh = 1; val |= CMCI_THRESHOLD; } val |= MCI_CTL2_CMCI_EN; wrmsrl(MSR_IA32_MCx_CTL2(i), val); rdmsrl(MSR_IA32_MCx_CTL2(i), val); /* Did the enable bit stick? -- the bank supports CMCI */ if (val & MCI_CTL2_CMCI_EN) { set_bit(i, owned); __clear_bit(i, this_cpu_ptr(mce_poll_banks)); /* * We are able to set thresholds for some banks that * had a threshold of 0. This means the BIOS has not * set the thresholds properly or does not work with * this boot option. Note down now and report later. */ if (mca_cfg.bios_cmci_threshold && bios_zero_thresh && (val & MCI_CTL2_CMCI_THRESHOLD_MASK)) bios_wrong_thresh = 1; } else { WARN_ON(!test_bit(i, this_cpu_ptr(mce_poll_banks))); } } raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); if (mca_cfg.bios_cmci_threshold && bios_wrong_thresh) { pr_info_once( "bios_cmci_threshold: Some banks do not have valid thresholds set\n"); pr_info_once( "bios_cmci_threshold: Make sure your BIOS supports this boot option\n"); } }

Contributors

PersonTokensPropCommitsCommitProp
Andi Kleen16057.55%218.18%
Naveen N. Rao8530.58%218.18%
Borislav Petkov93.24%19.09%
Hidetoshi Seto82.88%19.09%
Huang Ying72.52%218.18%
Christoph Lameter51.80%19.09%
Thomas Gleixner20.72%19.09%
Tony Luck20.72%19.09%
Total278100.00%11100.00%

/* * Just in case we missed an event during initialization check * all the CMCI owned banks. */
void cmci_recheck(void) { unsigned long flags; int banks; if (!mce_available(raw_cpu_ptr(&cpu_info)) || !cmci_supported(&banks)) return; local_irq_save(flags); machine_check_poll(0, this_cpu_ptr(&mce_banks_owned)); local_irq_restore(flags); }

Contributors

PersonTokensPropCommitsCommitProp
Andi Kleen4887.27%125.00%
Tejun Heo35.45%125.00%
Christoph Lameter35.45%125.00%
Borislav Petkov11.82%125.00%
Total55100.00%4100.00%

/* Caller must hold the lock on cmci_discover_lock */
static void __cmci_disable_bank(int bank) { u64 val; if (!test_bit(bank, this_cpu_ptr(mce_banks_owned))) return; rdmsrl(MSR_IA32_MCx_CTL2(bank), val); val &= ~MCI_CTL2_CMCI_EN; wrmsrl(MSR_IA32_MCx_CTL2(bank), val); __clear_bit(bank, this_cpu_ptr(mce_banks_owned)); }

Contributors

PersonTokensPropCommitsCommitProp
Naveen N. Rao5996.72%150.00%
Christoph Lameter23.28%150.00%
Total61100.00%2100.00%

/* * Disable CMCI on this CPU for all banks it owns when it goes down. * This allows other CPUs to claim the banks on rediscovery. */
void cmci_clear(void) { unsigned long flags; int i; int banks; if (!cmci_supported(&banks)) return; raw_spin_lock_irqsave(&cmci_discover_lock, flags); for (i = 0; i < banks; i++) __cmci_disable_bank(i); raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Andi Kleen5081.97%125.00%
Hidetoshi Seto813.11%125.00%
Thomas Gleixner23.28%125.00%
Naveen N. Rao11.64%125.00%
Total61100.00%4100.00%


static void cmci_rediscover_work_func(void *arg) { int banks; /* Recheck banks in case CPUs don't all have the same */ if (cmci_supported(&banks)) cmci_discover(banks); }

Contributors

PersonTokensPropCommitsCommitProp
Tang Chen2696.30%150.00%
Srivatsa S. Bhat13.70%150.00%
Total27100.00%2100.00%

/* After a CPU went down cycle through all the others and rediscover */
void cmci_rediscover(void) { int banks; if (!cmci_supported(&banks)) return; on_each_cpu(cmci_rediscover_work_func, NULL, 1); }

Contributors

PersonTokensPropCommitsCommitProp
Andi Kleen2172.41%133.33%
Tang Chen413.79%133.33%
Srivatsa S. Bhat413.79%133.33%
Total29100.00%3100.00%

/* * Reenable CMCI on this CPU in case a CPU down failed. */
void cmci_reenable(void) { int banks; if (cmci_supported(&banks)) cmci_discover(banks); }

Contributors

PersonTokensPropCommitsCommitProp
Andi Kleen23100.00%1100.00%
Total23100.00%1100.00%


void cmci_disable_bank(int bank) { int banks; unsigned long flags; if (!cmci_supported(&banks)) return; raw_spin_lock_irqsave(&cmci_discover_lock, flags); __cmci_disable_bank(bank); raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Naveen N. Rao4495.65%150.00%
Thomas Gleixner24.35%150.00%
Total46100.00%2100.00%


static void intel_init_cmci(void) { int banks; if (!cmci_supported(&banks)) return; mce_threshold_vector = intel_threshold_interrupt; cmci_discover(banks); /* * For CPU #0 this runs with still disabled APIC, but that's * ok because only the vector is set up. We still do another * check for the banks later for CPU #0 just to make sure * to not miss any events. */ apic_write(APIC_LVTCMCI, THRESHOLD_APIC_VECTOR|APIC_DM_FIXED); cmci_recheck(); }

Contributors

PersonTokensPropCommitsCommitProp
Andi Kleen43100.00%1100.00%
Total43100.00%1100.00%


static void intel_init_lmce(void) { u64 val; if (!lmce_supported()) return; rdmsrl(MSR_IA32_MCG_EXT_CTL, val); if (!(val & MCG_EXT_CTL_LMCE_EN)) wrmsrl(MSR_IA32_MCG_EXT_CTL, val | MCG_EXT_CTL_LMCE_EN); }

Contributors

PersonTokensPropCommitsCommitProp
Ashok Raj43100.00%2100.00%
Total43100.00%2100.00%


static void intel_clear_lmce(void) { u64 val; if (!lmce_supported()) return; rdmsrl(MSR_IA32_MCG_EXT_CTL, val); val &= ~MCG_EXT_CTL_LMCE_EN; wrmsrl(MSR_IA32_MCG_EXT_CTL, val); }

Contributors

PersonTokensPropCommitsCommitProp
Ashok Raj37100.00%1100.00%
Total37100.00%1100.00%


static void intel_ppin_init(struct cpuinfo_x86 *c) { unsigned long long val; /* * Even if testing the presence of the MSR would be enough, we don't * want to risk the situation where other models reuse this MSR for * other purposes. */ switch (c->x86_model) { case INTEL_FAM6_IVYBRIDGE_X: case INTEL_FAM6_HASWELL_X: case INTEL_FAM6_BROADWELL_XEON_D: case INTEL_FAM6_BROADWELL_X: case INTEL_FAM6_SKYLAKE_X: case INTEL_FAM6_XEON_PHI_KNL: case INTEL_FAM6_XEON_PHI_KNM: if (rdmsrl_safe(MSR_PPIN_CTL, &val)) return; if ((val & 3UL) == 1UL) { /* PPIN available but disabled: */ return; } /* If PPIN is disabled, but not locked, try to enable: */ if (!(val & 3UL)) { wrmsrl_safe(MSR_PPIN_CTL, val | 2UL); rdmsrl_safe(MSR_PPIN_CTL, &val); } if ((val & 3UL) == 2UL) set_cpu_cap(c, X86_FEATURE_INTEL_PPIN); } }

Contributors

PersonTokensPropCommitsCommitProp
Tony Luck11194.87%150.00%
Piotr Luc65.13%150.00%
Total117100.00%2100.00%


void mce_intel_feature_init(struct cpuinfo_x86 *c) { intel_init_thermal(c); intel_init_cmci(); intel_init_lmce(); intel_ppin_init(c); }

Contributors

PersonTokensPropCommitsCommitProp
Zwane Mwaikambo1557.69%125.00%
Tony Luck519.23%125.00%
Andi Kleen311.54%125.00%
Ashok Raj311.54%125.00%
Total26100.00%4100.00%


void mce_intel_feature_clear(struct cpuinfo_x86 *c) { intel_clear_lmce(); }

Contributors

PersonTokensPropCommitsCommitProp
Ashok Raj13100.00%1100.00%
Total13100.00%1100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
Andi Kleen45728.56%26.06%
Chen Gong32620.38%26.06%
Naveen N. Rao19111.94%39.09%
Ashok Raj17711.06%39.09%
Tony Luck1247.75%26.06%
Xie XiuQi976.06%13.03%
Borislav Petkov925.75%515.15%
Zwane Mwaikambo301.88%13.03%
Tang Chen301.88%13.03%
Hidetoshi Seto231.44%26.06%
Christoph Lameter140.88%13.03%
Huang Ying70.44%26.06%
Thomas Gleixner70.44%13.03%
Srivatsa S. Bhat60.38%13.03%
Tejun Heo60.38%26.06%
Piotr Luc60.38%13.03%
Alexey Dobriyan30.19%13.03%
H. Peter Anvin30.19%13.03%
Greg Kroah-Hartman10.06%13.03%
Total1600100.00%33100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.