Contributors: 11
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Joerg Roedel |
96 |
44.86% |
3 |
18.75% |
Ard Biesheuvel |
84 |
39.25% |
3 |
18.75% |
Uros Bizjak |
14 |
6.54% |
1 |
6.25% |
Andi Kleen |
8 |
3.74% |
2 |
12.50% |
Suresh B. Siddha |
2 |
0.93% |
1 |
6.25% |
Mike Rapoport |
2 |
0.93% |
1 |
6.25% |
Vivek Goyal |
2 |
0.93% |
1 |
6.25% |
Kuppuswamy Sathyanarayanan |
2 |
0.93% |
1 |
6.25% |
Brian Gerst |
2 |
0.93% |
1 |
6.25% |
Greg Kroah-Hartman |
1 |
0.47% |
1 |
6.25% |
Brijesh Singh |
1 |
0.47% |
1 |
6.25% |
Total |
214 |
|
16 |
|
// SPDX-License-Identifier: GPL-2.0
#include <linux/linkage.h>
#include <linux/types.h>
#include <asm/desc.h>
#include <asm/init.h>
#include <asm/setup.h>
#include <asm/sev.h>
#include <asm/trapnr.h>
/*
* Data structures and code used for IDT setup in head_64.S. The bringup-IDT is
* used until the idt_table takes over. On the boot CPU this happens in
* x86_64_start_kernel(), on secondary CPUs in start_secondary(). In both cases
* this happens in the functions called from head_64.S.
*
* The idt_table can't be used that early because all the code modifying it is
* in idt.c and can be instrumented by tracing or KASAN, which both don't work
* during early CPU bringup. Also the idt_table has the runtime vectors
* configured which require certain CPU state to be setup already (like TSS),
* which also hasn't happened yet in early CPU bringup.
*/
static gate_desc bringup_idt_table[NUM_EXCEPTION_VECTORS] __page_aligned_data;
/* This may run while still in the direct mapping */
void __head startup_64_load_idt(void *vc_handler)
{
struct desc_ptr desc = {
.address = (unsigned long)rip_rel_ptr(bringup_idt_table),
.size = sizeof(bringup_idt_table) - 1,
};
struct idt_data data;
gate_desc idt_desc;
/* @vc_handler is set only for a VMM Communication Exception */
if (vc_handler) {
init_idt_data(&data, X86_TRAP_VC, vc_handler);
idt_init_desc(&idt_desc, &data);
native_write_idt_entry((gate_desc *)desc.address, X86_TRAP_VC, &idt_desc);
}
native_load_idt(&desc);
}
/*
* Setup boot CPU state needed before kernel switches to virtual addresses.
*/
void __head startup_64_setup_gdt_idt(void)
{
struct gdt_page *gp = rip_rel_ptr((void *)(__force unsigned long)&gdt_page);
void *handler = NULL;
struct desc_ptr startup_gdt_descr = {
.address = (unsigned long)gp->gdt,
.size = GDT_SIZE - 1,
};
/* Load GDT */
native_load_gdt(&startup_gdt_descr);
/* New GDT is live - reload data segment registers */
asm volatile("movl %%eax, %%ds\n"
"movl %%eax, %%ss\n"
"movl %%eax, %%es\n" : : "a"(__KERNEL_DS) : "memory");
if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT))
handler = rip_rel_ptr(vc_no_ghcb);
startup_64_load_idt(handler);
}