Contributors: 29
Author Tokens Token Proportion Commits Commit Proportion
Andi Kleen 174 25.59% 4 7.14%
Pavel Machek 148 21.76% 4 7.14%
H. Peter Anvin 72 10.59% 7 12.50%
David Woodhouse 60 8.82% 3 5.36%
Rafael J. Wysocki 37 5.44% 6 10.71%
Kees Cook 36 5.29% 1 1.79%
Sean Christopherson 17 2.50% 2 3.57%
Kristen Carlson Accardi 16 2.35% 1 1.79%
Lv Zheng 14 2.06% 1 1.79%
Shaohua Li 14 2.06% 2 3.57%
Jarkko Sakkinen 12 1.76% 3 5.36%
Brian Gerst 11 1.62% 1 1.79%
Todd E Brandt 11 1.62% 1 1.79%
Marcin Ślusarz 10 1.47% 1 1.79%
Glauber de Oliveira Costa 6 0.88% 1 1.79%
Ingo Molnar 6 0.88% 2 3.57%
Borislav Petkov 5 0.74% 1 1.79%
Matt Mackall 5 0.74% 2 3.57%
Alexander Chiang 5 0.74% 1 1.79%
Jaswinder Singh Rajput 4 0.59% 2 3.57%
Yinghai Lu 3 0.44% 1 1.79%
Konrad Rzeszutek Wilk 3 0.44% 2 3.57%
Mike Rapoport 3 0.44% 1 1.79%
Toshi Kani 2 0.29% 1 1.79%
Björn Mork 2 0.29% 1 1.79%
Alexander Duyck 1 0.15% 1 1.79%
Greg Kroah-Hartman 1 0.15% 1 1.79%
Andrew Lutomirski 1 0.15% 1 1.79%
Alexey Dobriyan 1 0.15% 1 1.79%
Total 680 56


// SPDX-License-Identifier: GPL-2.0
/*
 * sleep.c - x86-specific ACPI sleep support.
 *
 *  Copyright (C) 2001-2003 Patrick Mochel
 *  Copyright (C) 2001-2003 Pavel Machek <pavel@ucw.cz>
 */

#include <linux/acpi.h>
#include <linux/memblock.h>
#include <linux/dmi.h>
#include <linux/cpumask.h>
#include <linux/pgtable.h>
#include <asm/segment.h>
#include <asm/desc.h>
#include <asm/cacheflush.h>
#include <asm/realmode.h>
#include <asm/hypervisor.h>
#include <asm/smp.h>

#include <linux/ftrace.h>
#include "../../realmode/rm/wakeup.h"
#include "sleep.h"

unsigned long acpi_realmode_flags;

#if defined(CONFIG_SMP) && defined(CONFIG_64BIT)
static char temp_stack[4096];
#endif

/**
 * acpi_get_wakeup_address - provide physical address for S3 wakeup
 *
 * Returns the physical address where the kernel should be resumed after the
 * system awakes from S3, e.g. for programming into the firmware waking vector.
 */
unsigned long acpi_get_wakeup_address(void)
{
	return ((unsigned long)(real_mode_header->wakeup_start));
}

/**
 * x86_acpi_enter_sleep_state - enter sleep state
 * @state: Sleep state to enter.
 *
 * Wrapper around acpi_enter_sleep_state() to be called by assembly.
 */
asmlinkage acpi_status __visible x86_acpi_enter_sleep_state(u8 state)
{
	return acpi_enter_sleep_state(state);
}

/**
 * x86_acpi_suspend_lowlevel - save kernel state
 *
 * Create an identity mapped page table and copy the wakeup routine to
 * low memory.
 */
int x86_acpi_suspend_lowlevel(void)
{
	struct wakeup_header *header =
		(struct wakeup_header *) __va(real_mode_header->wakeup_header);

	if (header->signature != WAKEUP_HEADER_SIGNATURE) {
		printk(KERN_ERR "wakeup header does not match\n");
		return -EINVAL;
	}

	header->video_mode = saved_video_mode;

	header->pmode_behavior = 0;

#ifndef CONFIG_64BIT
	native_store_gdt((struct desc_ptr *)&header->pmode_gdt);

	/*
	 * We have to check that we can write back the value, and not
	 * just read it.  At least on 90 nm Pentium M (Family 6, Model
	 * 13), reading an invalid MSR is not guaranteed to trap, see
	 * Erratum X4 in "Intel Pentium M Processor on 90 nm Process
	 * with 2-MB L2 Cache and Intel® Processor A100 and A110 on 90
	 * nm process with 512-KB L2 Cache Specification Update".
	 */
	if (!rdmsr_safe(MSR_EFER,
			&header->pmode_efer_low,
			&header->pmode_efer_high) &&
	    !wrmsr_safe(MSR_EFER,
			header->pmode_efer_low,
			header->pmode_efer_high))
		header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_EFER);
#endif /* !CONFIG_64BIT */

	header->pmode_cr0 = read_cr0();
	if (__this_cpu_read(cpu_info.cpuid_level) >= 0) {
		header->pmode_cr4 = __read_cr4();
		header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_CR4);
	}
	if (!rdmsr_safe(MSR_IA32_MISC_ENABLE,
			&header->pmode_misc_en_low,
			&header->pmode_misc_en_high) &&
	    !wrmsr_safe(MSR_IA32_MISC_ENABLE,
			header->pmode_misc_en_low,
			header->pmode_misc_en_high))
		header->pmode_behavior |=
			(1 << WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE);
	header->realmode_flags = acpi_realmode_flags;
	header->real_magic = 0x12345678;

#ifndef CONFIG_64BIT
	header->pmode_entry = (u32)&wakeup_pmode_return;
	header->pmode_cr3 = (u32)__pa_symbol(initial_page_table);
	saved_magic = 0x12345678;
#else /* CONFIG_64BIT */
#ifdef CONFIG_SMP
	/*
	 * As each CPU starts up, it will find its own stack pointer
	 * from its current_task->thread.sp. Typically that will be
	 * the idle thread for a newly-started AP, or even the boot
	 * CPU which will find it set to &init_task in the static
	 * per-cpu data.
	 *
	 * Make the resuming CPU use the temporary stack at startup
	 * by setting current->thread.sp to point to that. The true
	 * %rsp will be restored with the rest of the CPU context,
	 * by do_suspend_lowlevel(). And unwinders don't care about
	 * the abuse of ->thread.sp because it's a dead variable
	 * while the thread is running on the CPU anyway; the true
	 * value is in the actual %rsp register.
	 */
	current->thread.sp = (unsigned long)temp_stack + sizeof(temp_stack);
	/*
	 * Ensure the CPU knows which one it is when it comes back, if
	 * it isn't in parallel mode and expected to work that out for
	 * itself.
	 */
	if (!(smpboot_control & STARTUP_PARALLEL_MASK))
		smpboot_control = smp_processor_id();
#endif
	initial_code = (unsigned long)wakeup_long64;
	saved_magic = 0x123456789abcdef0L;
#endif /* CONFIG_64BIT */

	/*
	 * Pause/unpause graph tracing around do_suspend_lowlevel as it has
	 * inconsistent call/return info after it jumps to the wakeup vector.
	 */
	pause_graph_tracing();
	do_suspend_lowlevel();
	unpause_graph_tracing();
	return 0;
}

static int __init acpi_sleep_setup(char *str)
{
	while ((str != NULL) && (*str != '\0')) {
		if (strncmp(str, "s3_bios", 7) == 0)
			acpi_realmode_flags |= 1;
		if (strncmp(str, "s3_mode", 7) == 0)
			acpi_realmode_flags |= 2;
		if (strncmp(str, "s3_beep", 7) == 0)
			acpi_realmode_flags |= 4;
#ifdef CONFIG_HIBERNATION
		if (strncmp(str, "s4_hwsig", 8) == 0)
			acpi_check_s4_hw_signature = 1;
		if (strncmp(str, "s4_nohwsig", 10) == 0)
			acpi_check_s4_hw_signature = 0;
#endif
		if (strncmp(str, "nonvs", 5) == 0)
			acpi_nvs_nosave();
		if (strncmp(str, "nonvs_s3", 8) == 0)
			acpi_nvs_nosave_s3();
		if (strncmp(str, "old_ordering", 12) == 0)
			acpi_old_suspend_ordering();
		if (strncmp(str, "nobl", 4) == 0)
			acpi_sleep_no_blacklist();
		str = strchr(str, ',');
		if (str != NULL)
			str += strspn(str, ", \t");
	}
	return 1;
}

__setup("acpi_sleep=", acpi_sleep_setup);

#if defined(CONFIG_HIBERNATION) && defined(CONFIG_HYPERVISOR_GUEST)
static int __init init_s4_sigcheck(void)
{
	/*
	 * If running on a hypervisor, honour the ACPI specification
	 * by default and trigger a clean reboot when the hardware
	 * signature in FACS is changed after hibernation.
	 */
	if (acpi_check_s4_hw_signature == -1 &&
	    !hypervisor_is_type(X86_HYPER_NATIVE))
		acpi_check_s4_hw_signature = 1;

	return 0;
}
/* This must happen before acpi_init() which is a subsys initcall */
arch_initcall(init_s4_sigcheck);
#endif