Contributors: 21
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Rick Edgecombe |
376 |
66.90% |
2 |
6.06% |
Peter Zijlstra |
66 |
11.74% |
1 |
3.03% |
Eric W. Biedermann |
23 |
4.09% |
2 |
6.06% |
Linus Torvalds (pre-git) |
19 |
3.38% |
5 |
15.15% |
Gleb Natapov |
14 |
2.49% |
1 |
3.03% |
Oleg Nesterov |
11 |
1.96% |
3 |
9.09% |
Andrew Lutomirski |
10 |
1.78% |
2 |
6.06% |
Alexander van Heukelum |
9 |
1.60% |
3 |
9.09% |
Frédéric Weisbecker |
8 |
1.42% |
1 |
3.03% |
Masoud Asgharifard Sharbiani |
7 |
1.25% |
1 |
3.03% |
Hiroshi Shimamoto |
4 |
0.71% |
1 |
3.03% |
Ingo Molnar |
2 |
0.36% |
2 |
6.06% |
Vincent Hanquez |
2 |
0.36% |
1 |
3.03% |
Al Viro |
2 |
0.36% |
1 |
3.03% |
Jan Beulich |
2 |
0.36% |
1 |
3.03% |
Linus Torvalds |
2 |
0.36% |
1 |
3.03% |
Josh Poimboeuf |
1 |
0.18% |
1 |
3.03% |
Srikar Dronamraju |
1 |
0.18% |
1 |
3.03% |
Alexander Kuleshov |
1 |
0.18% |
1 |
3.03% |
David S. Miller |
1 |
0.18% |
1 |
3.03% |
Alexey Dobriyan |
1 |
0.18% |
1 |
3.03% |
Total |
562 |
|
33 |
|
// SPDX-License-Identifier: GPL-2.0
#include <linux/ptrace.h>
#include <asm/bugs.h>
#include <asm/traps.h>
enum cp_error_code {
CP_EC = (1 << 15) - 1,
CP_RET = 1,
CP_IRET = 2,
CP_ENDBR = 3,
CP_RSTRORSSP = 4,
CP_SETSSBSY = 5,
CP_ENCL = 1 << 15,
};
static const char cp_err[][10] = {
[0] = "unknown",
[1] = "near ret",
[2] = "far/iret",
[3] = "endbranch",
[4] = "rstorssp",
[5] = "setssbsy",
};
static const char *cp_err_string(unsigned long error_code)
{
unsigned int cpec = error_code & CP_EC;
if (cpec >= ARRAY_SIZE(cp_err))
cpec = 0;
return cp_err[cpec];
}
static void do_unexpected_cp(struct pt_regs *regs, unsigned long error_code)
{
WARN_ONCE(1, "Unexpected %s #CP, error_code: %s\n",
user_mode(regs) ? "user mode" : "kernel mode",
cp_err_string(error_code));
}
static DEFINE_RATELIMIT_STATE(cpf_rate, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
static void do_user_cp_fault(struct pt_regs *regs, unsigned long error_code)
{
struct task_struct *tsk;
unsigned long ssp;
/*
* An exception was just taken from userspace. Since interrupts are disabled
* here, no scheduling should have messed with the registers yet and they
* will be whatever is live in userspace. So read the SSP before enabling
* interrupts so locking the fpregs to do it later is not required.
*/
rdmsrl(MSR_IA32_PL3_SSP, ssp);
cond_local_irq_enable(regs);
tsk = current;
tsk->thread.error_code = error_code;
tsk->thread.trap_nr = X86_TRAP_CP;
/* Ratelimit to prevent log spamming. */
if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
__ratelimit(&cpf_rate)) {
pr_emerg("%s[%d] control protection ip:%lx sp:%lx ssp:%lx error:%lx(%s)%s",
tsk->comm, task_pid_nr(tsk),
regs->ip, regs->sp, ssp, error_code,
cp_err_string(error_code),
error_code & CP_ENCL ? " in enclave" : "");
print_vma_addr(KERN_CONT " in ", regs->ip);
pr_cont("\n");
}
force_sig_fault(SIGSEGV, SEGV_CPERR, (void __user *)0);
cond_local_irq_disable(regs);
}
static __ro_after_init bool ibt_fatal = true;
static void do_kernel_cp_fault(struct pt_regs *regs, unsigned long error_code)
{
if ((error_code & CP_EC) != CP_ENDBR) {
do_unexpected_cp(regs, error_code);
return;
}
if (unlikely(regs->ip == (unsigned long)&ibt_selftest_noendbr)) {
regs->ax = 0;
return;
}
pr_err("Missing ENDBR: %pS\n", (void *)instruction_pointer(regs));
if (!ibt_fatal) {
printk(KERN_DEFAULT CUT_HERE);
__warn(__FILE__, __LINE__, (void *)regs->ip, TAINT_WARN, regs, NULL);
return;
}
BUG();
}
static int __init ibt_setup(char *str)
{
if (!strcmp(str, "off"))
setup_clear_cpu_cap(X86_FEATURE_IBT);
if (!strcmp(str, "warn"))
ibt_fatal = false;
return 1;
}
__setup("ibt=", ibt_setup);
DEFINE_IDTENTRY_ERRORCODE(exc_control_protection)
{
if (user_mode(regs)) {
if (cpu_feature_enabled(X86_FEATURE_USER_SHSTK))
do_user_cp_fault(regs, error_code);
else
do_unexpected_cp(regs, error_code);
} else {
if (cpu_feature_enabled(X86_FEATURE_IBT))
do_kernel_cp_fault(regs, error_code);
else
do_unexpected_cp(regs, error_code);
}
}