Contributors: 47
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Len Brown |
113 |
14.85% |
11 |
12.94% |
Juergen Gross |
78 |
10.25% |
4 |
4.71% |
Alexey Y. Starikovskiy |
64 |
8.41% |
2 |
2.35% |
Alexander Chiang |
49 |
6.44% |
2 |
2.35% |
Josh Boyer |
39 |
5.12% |
1 |
1.18% |
Michal Wilczynski |
32 |
4.20% |
5 |
5.88% |
Thomas Gleixner |
31 |
4.07% |
4 |
4.71% |
Venkatesh Pallipadi |
27 |
3.55% |
3 |
3.53% |
Smita Koralahalli |
25 |
3.29% |
1 |
1.18% |
Jonathan (Zhixiong) Zhang |
25 |
3.29% |
1 |
1.18% |
Andi Kleen |
24 |
3.15% |
3 |
3.53% |
Kirill A. Shutemov |
20 |
2.63% |
2 |
2.35% |
Andy Grover |
17 |
2.23% |
3 |
3.53% |
Andy Shevchenko |
16 |
2.10% |
2 |
2.35% |
Graeme Gregory |
14 |
1.84% |
1 |
1.18% |
Roger Pau Monné |
14 |
1.84% |
1 |
1.18% |
Yakui Zhao |
13 |
1.71% |
3 |
3.53% |
Linus Torvalds |
13 |
1.71% |
1 |
1.18% |
Harvey Harrison |
12 |
1.58% |
1 |
1.18% |
Jiang Liu |
11 |
1.45% |
1 |
1.18% |
Hans de Goede |
10 |
1.31% |
1 |
1.18% |
Yinghai Lu |
9 |
1.18% |
3 |
3.53% |
FUJITA Tomonori |
8 |
1.05% |
1 |
1.18% |
Naveen N. Rao |
8 |
1.05% |
1 |
1.18% |
Yazen Ghannam |
7 |
0.92% |
1 |
1.18% |
Sean Christopherson |
7 |
0.92% |
2 |
2.35% |
Jeremy Fitzhardinge |
7 |
0.92% |
2 |
2.35% |
Harald Welte |
6 |
0.79% |
1 |
1.18% |
Ingo Molnar |
6 |
0.79% |
2 |
2.35% |
Patrick Mochel |
5 |
0.66% |
1 |
1.18% |
Liu Jinsong |
5 |
0.66% |
1 |
1.18% |
Alok N Kataria |
5 |
0.66% |
1 |
1.18% |
Andreas Herrmann |
4 |
0.53% |
1 |
1.18% |
H. Peter Anvin |
4 |
0.53% |
2 |
2.35% |
Aleksey Makarov |
4 |
0.53% |
1 |
1.18% |
Mike Travis |
4 |
0.53% |
1 |
1.18% |
Borislav Petkov |
4 |
0.53% |
1 |
1.18% |
Shaohua Li |
4 |
0.53% |
1 |
1.18% |
David Rientjes |
3 |
0.39% |
1 |
1.18% |
Konrad Rzeszutek Wilk |
3 |
0.39% |
1 |
1.18% |
David Mosberger-Tang |
2 |
0.26% |
1 |
1.18% |
Keith Busch |
2 |
0.26% |
1 |
1.18% |
Rafael J. Wysocki |
2 |
0.26% |
1 |
1.18% |
Tom Lendacky |
2 |
0.26% |
1 |
1.18% |
Tejun Heo |
1 |
0.13% |
1 |
1.18% |
jia zhang |
1 |
0.13% |
1 |
1.18% |
Erich Focht |
1 |
0.13% |
1 |
1.18% |
Total |
761 |
|
85 |
|
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _ASM_X86_ACPI_H
#define _ASM_X86_ACPI_H
/*
* Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
* Copyright (C) 2001 Patrick Mochel <mochel@osdl.org>
*/
#include <acpi/proc_cap_intel.h>
#include <asm/numa.h>
#include <asm/fixmap.h>
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/mpspec.h>
#include <asm/x86_init.h>
#include <asm/cpufeature.h>
#include <asm/irq_vectors.h>
#include <asm/xen/hypervisor.h>
#include <xen/xen.h>
#ifdef CONFIG_ACPI_APEI
# include <asm/pgtable_types.h>
#endif
#ifdef CONFIG_ACPI
extern int acpi_lapic;
extern int acpi_ioapic;
extern int acpi_noirq;
extern int acpi_strict;
extern int acpi_disabled;
extern int acpi_pci_disabled;
extern int acpi_skip_timer_override;
extern int acpi_use_timer_override;
extern int acpi_fix_pin2_polarity;
extern int acpi_disable_cmcff;
extern bool acpi_int_src_ovr[NR_IRQS_LEGACY];
extern u8 acpi_sci_flags;
extern u32 acpi_sci_override_gsi;
void acpi_pic_sci_set_trigger(unsigned int, u16);
struct device;
extern int (*__acpi_register_gsi)(struct device *dev, u32 gsi,
int trigger, int polarity);
extern void (*__acpi_unregister_gsi)(u32 gsi);
static inline void disable_acpi(void)
{
acpi_disabled = 1;
acpi_pci_disabled = 1;
acpi_noirq = 1;
}
extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
extern int acpi_blacklisted(void);
static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
static inline void acpi_disable_pci(void)
{
acpi_pci_disabled = 1;
acpi_noirq_set();
}
/* Low-level suspend routine. */
extern int (*acpi_suspend_lowlevel)(void);
/* Physical address to resume after wakeup */
unsigned long acpi_get_wakeup_address(void);
static inline bool acpi_skip_set_wakeup_address(void)
{
return cpu_feature_enabled(X86_FEATURE_XENPV);
}
#define acpi_skip_set_wakeup_address acpi_skip_set_wakeup_address
union acpi_subtable_headers;
int __init acpi_parse_mp_wake(union acpi_subtable_headers *header,
const unsigned long end);
void asm_acpi_mp_play_dead(u64 reset_vector, u64 pgd_pa);
/*
* Check if the CPU can handle C2 and deeper
*/
static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate)
{
/*
* Early models (<=5) of AMD Opterons are not supposed to go into
* C2 state.
*
* Steppings 0x0A and later are good
*/
if (boot_cpu_data.x86 == 0x0F &&
boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
boot_cpu_data.x86_model <= 0x05 &&
boot_cpu_data.x86_stepping < 0x0A)
return 1;
else if (boot_cpu_has(X86_BUG_AMD_APIC_C1E))
return 1;
else
return max_cstate;
}
static inline bool arch_has_acpi_pdc(void)
{
struct cpuinfo_x86 *c = &cpu_data(0);
return (c->x86_vendor == X86_VENDOR_INTEL ||
c->x86_vendor == X86_VENDOR_CENTAUR);
}
static inline void arch_acpi_set_proc_cap_bits(u32 *cap)
{
struct cpuinfo_x86 *c = &cpu_data(0);
*cap |= ACPI_PROC_CAP_C_CAPABILITY_SMP;
/* Enable coordination with firmware's _TSD info */
*cap |= ACPI_PROC_CAP_SMP_T_SWCOORD;
if (cpu_has(c, X86_FEATURE_EST))
*cap |= ACPI_PROC_CAP_EST_CAPABILITY_SWSMP;
if (cpu_has(c, X86_FEATURE_ACPI))
*cap |= ACPI_PROC_CAP_T_FFH;
if (cpu_has(c, X86_FEATURE_HWP))
*cap |= ACPI_PROC_CAP_COLLAB_PROC_PERF;
/*
* If mwait/monitor is unsupported, C_C1_FFH and
* C2/C3_FFH will be disabled.
*/
if (!cpu_has(c, X86_FEATURE_MWAIT) ||
boot_option_idle_override == IDLE_NOMWAIT)
*cap &= ~(ACPI_PROC_CAP_C_C1_FFH | ACPI_PROC_CAP_C_C2C3_FFH);
if (xen_initial_domain()) {
/*
* When Linux is running as Xen dom0, the hypervisor is the
* entity in charge of the processor power management, and so
* Xen needs to check the OS capabilities reported in the
* processor capabilities buffer matches what the hypervisor
* driver supports.
*/
xen_sanitize_proc_cap_bits(cap);
}
}
static inline bool acpi_has_cpu_in_madt(void)
{
return !!acpi_lapic;
}
#define ACPI_HAVE_ARCH_SET_ROOT_POINTER
static inline void acpi_arch_set_root_pointer(u64 addr)
{
x86_init.acpi.set_root_pointer(addr);
}
#define ACPI_HAVE_ARCH_GET_ROOT_POINTER
static inline u64 acpi_arch_get_root_pointer(void)
{
return x86_init.acpi.get_root_pointer();
}
void acpi_generic_reduced_hw_init(void);
void x86_default_set_root_pointer(u64 addr);
u64 x86_default_get_root_pointer(void);
#ifdef CONFIG_XEN_PV
/* A Xen PV domain needs a special acpi_os_ioremap() handling. */
extern void __iomem * (*acpi_os_ioremap)(acpi_physical_address phys,
acpi_size size);
void __iomem *x86_acpi_os_ioremap(acpi_physical_address phys, acpi_size size);
#define acpi_os_ioremap acpi_os_ioremap
#endif
#else /* !CONFIG_ACPI */
#define acpi_lapic 0
#define acpi_ioapic 0
#define acpi_disable_cmcff 0
static inline void acpi_noirq_set(void) { }
static inline void acpi_disable_pci(void) { }
static inline void disable_acpi(void) { }
static inline void acpi_generic_reduced_hw_init(void) { }
static inline void x86_default_set_root_pointer(u64 addr) { }
static inline u64 x86_default_get_root_pointer(void)
{
return 0;
}
#endif /* !CONFIG_ACPI */
#define ARCH_HAS_POWER_INIT 1
#ifdef CONFIG_ACPI_NUMA
extern int x86_acpi_numa_init(void);
#endif /* CONFIG_ACPI_NUMA */
struct cper_ia_proc_ctx;
#ifdef CONFIG_ACPI_APEI
static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr)
{
/*
* We currently have no way to look up the EFI memory map
* attributes for a region in a consistent way, because the
* memmap is discarded after efi_free_boot_services(). So if
* you call efi_mem_attributes() during boot and at runtime,
* you could theoretically see different attributes.
*
* We are yet to see any x86 platforms that require anything
* other than PAGE_KERNEL (some ARM64 platforms require the
* equivalent of PAGE_KERNEL_NOCACHE). Additionally, if SME
* is active, the ACPI information will not be encrypted,
* so return PAGE_KERNEL_NOENC until we know differently.
*/
return PAGE_KERNEL_NOENC;
}
int arch_apei_report_x86_error(struct cper_ia_proc_ctx *ctx_info,
u64 lapic_id);
#else
static inline int arch_apei_report_x86_error(struct cper_ia_proc_ctx *ctx_info,
u64 lapic_id)
{
return -EINVAL;
}
#endif
#define ACPI_TABLE_UPGRADE_MAX_PHYS (max_low_pfn_mapped << PAGE_SHIFT)
#endif /* _ASM_X86_ACPI_H */