Release 4.14 arch/x86/entry/vsyscall/vsyscall_64.c
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2012-2014 Andy Lutomirski <luto@amacapital.net>
*
* Based on the original implementation which is:
* Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
* Copyright 2003 Andi Kleen, SuSE Labs.
*
* Parts of the original code have been moved to arch/x86/vdso/vma.c
*
* This file implements vsyscall emulation. vsyscalls are a legacy ABI:
* Userspace can request certain kernel services by calling fixed
* addresses. This concept is problematic:
*
* - It interferes with ASLR.
* - It's awkward to write code that lives in kernel addresses but is
* callable by userspace at fixed addresses.
* - The whole concept is impossible for 32-bit compat userspace.
* - UML cannot easily virtualize a vsyscall.
*
* As of mid-2014, I believe that there is no new userspace code that
* will use a vsyscall if the vDSO is present. I hope that there will
* soon be no new userspace code that will ever use a vsyscall.
*
* The code in this file emulates vsyscalls when notified of a page
* fault to a vsyscall address.
*/
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/sched/signal.h>
#include <linux/mm_types.h>
#include <linux/syscalls.h>
#include <linux/ratelimit.h>
#include <asm/vsyscall.h>
#include <asm/unistd.h>
#include <asm/fixmap.h>
#include <asm/traps.h>
#define CREATE_TRACE_POINTS
#include "vsyscall_trace.h"
static enum { EMULATE, NATIVE, NONE } vsyscall_mode =
#if defined(CONFIG_LEGACY_VSYSCALL_NATIVE)
NATIVE;
#elif defined(CONFIG_LEGACY_VSYSCALL_NONE)
NONE;
#else
EMULATE;
#endif
static int __init vsyscall_setup(char *str)
{
if (str) {
if (!strcmp("emulate", str))
vsyscall_mode = EMULATE;
else if (!strcmp("native", str))
vsyscall_mode = NATIVE;
else if (!strcmp("none", str))
vsyscall_mode = NONE;
else
return -EINVAL;
return 0;
}
return -EINVAL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Lutomirski | 73 | 100.00% | 1 | 100.00% |
Total | 73 | 100.00% | 1 | 100.00% |
early_param("vsyscall", vsyscall_setup);
static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
const char *message)
{
if (!show_unhandled_signals)
return;
printk_ratelimited("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n",
level, current->comm, task_pid_nr(current),
message, regs->ip, regs->cs,
regs->sp, regs->ax, regs->si, regs->di);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Lutomirski | 58 | 84.06% | 4 | 66.67% |
Andi Kleen | 9 | 13.04% | 1 | 16.67% |
Joe Perches | 2 | 2.90% | 1 | 16.67% |
Total | 69 | 100.00% | 6 | 100.00% |
static int addr_to_vsyscall_nr(unsigned long addr)
{
int nr;
if ((addr & ~0xC00UL) != VSYSCALL_ADDR)
return -EINVAL;
nr = (addr & 0xC00UL) >> 10;
if (nr >= 3)
return -EINVAL;
return nr;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Lutomirski | 51 | 100.00% | 2 | 100.00% |
Total | 51 | 100.00% | 2 | 100.00% |
static bool write_ok_or_segv(unsigned long ptr, size_t size)
{
/*
* XXX: if access_ok, get_user, and put_user handled
* sig_on_uaccess_err, this could go away.
*/
if (!access_ok(VERIFY_WRITE, (void __user *)ptr, size)) {
siginfo_t info;
struct thread_struct *thread = ¤t->thread;
thread->error_code = 6; /* user fault, no page, write */
thread->cr2 = ptr;
thread->trap_nr = X86_TRAP_PF;
memset(&info, 0, sizeof(info));
info.si_signo = SIGSEGV;
info.si_errno = 0;
info.si_code = SEGV_MAPERR;
info.si_addr = (void __user *)ptr;
force_sig_info(SIGSEGV, &info, current);
return false;
} else {
return true;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Lutomirski | 123 | 97.62% | 1 | 33.33% |
Srikar Dronamraju | 2 | 1.59% | 1 | 33.33% |
Ingo Molnar | 1 | 0.79% | 1 | 33.33% |
Total | 126 | 100.00% | 3 | 100.00% |
bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
{
struct task_struct *tsk;
unsigned long caller;
int vsyscall_nr, syscall_nr, tmp;
int prev_sig_on_uaccess_err;
long ret;
/*
* No point in checking CS -- the only way to get here is a user mode
* trap to a high address, which means that we're in 64-bit user code.
*/
WARN_ON_ONCE(address != regs->ip);
if (vsyscall_mode == NONE) {
warn_bad_vsyscall(KERN_INFO, regs,
"vsyscall attempted with vsyscall=none");
return false;
}
vsyscall_nr = addr_to_vsyscall_nr(address);
trace_emulate_vsyscall(vsyscall_nr);
if (vsyscall_nr < 0) {
warn_bad_vsyscall(KERN_WARNING, regs,
"misaligned vsyscall (exploit attempt or buggy program) -- look up the vsyscall kernel parameter if you need a workaround");
goto sigsegv;
}
if (get_user(caller, (unsigned long __user *)regs->sp) != 0) {
warn_bad_vsyscall(KERN_WARNING, regs,
"vsyscall with bad stack (exploit attempt?)");
goto sigsegv;
}
tsk = current;
/*
* Check for access_ok violations and find the syscall nr.
*
* NULL is a valid user pointer (in the access_ok sense) on 32-bit and
* 64-bit, so we don't need to special-case it here. For all the
* vsyscalls, NULL means "don't write anything" not "write it at
* address 0".
*/
switch (vsyscall_nr) {
case 0:
if (!write_ok_or_segv(regs->di, sizeof(struct timeval)) ||
!write_ok_or_segv(regs->si, sizeof(struct timezone))) {
ret = -EFAULT;
goto check_fault;
}
syscall_nr = __NR_gettimeofday;
break;
case 1:
if (!write_ok_or_segv(regs->di, sizeof(time_t))) {
ret = -EFAULT;
goto check_fault;
}
syscall_nr = __NR_time;
break;
case 2:
if (!write_ok_or_segv(regs->di, sizeof(unsigned)) ||
!write_ok_or_segv(regs->si, sizeof(unsigned))) {
ret = -EFAULT;
goto check_fault;
}
syscall_nr = __NR_getcpu;
break;
}
/*
* Handle seccomp. regs->ip must be the original value.
* See seccomp_send_sigsys and Documentation/prctl/seccomp_filter.txt.
*
* We could optimize the seccomp disabled case, but performance
* here doesn't matter.
*/
regs->orig_ax = syscall_nr;
regs->ax = -ENOSYS;
tmp = secure_computing(NULL);
if ((!tmp && regs->orig_ax != syscall_nr) || regs->ip != address) {
warn_bad_vsyscall(KERN_DEBUG, regs,
"seccomp tried to change syscall nr or ip");
do_exit(SIGSYS);
}
regs->orig_ax = -1;
if (tmp)
goto do_ret; /* skip requested */
/*
* With a real vsyscall, page faults cause SIGSEGV. We want to
* preserve that behavior to make writing exploits harder.
*/
prev_sig_on_uaccess_err = current->thread.sig_on_uaccess_err;
current->thread.sig_on_uaccess_err = 1;
ret = -EFAULT;
switch (vsyscall_nr) {
case 0:
ret = sys_gettimeofday(
(struct timeval __user *)regs->di,
(struct timezone __user *)regs->si);
break;
case 1:
ret = sys_time((time_t __user *)regs->di);
break;
case 2:
ret = sys_getcpu((unsigned __user *)regs->di,
(unsigned __user *)regs->si,
NULL);
break;
}
current->thread.sig_on_uaccess_err = prev_sig_on_uaccess_err;
check_fault:
if (ret == -EFAULT) {
/* Bad news -- userspace fed a bad pointer to a vsyscall. */
warn_bad_vsyscall(KERN_INFO, regs,
"vsyscall fault (exploit attempt?)");
/*
* If we failed to generate a signal for any reason,
* generate one here. (This should be impossible.)
*/
if (WARN_ON_ONCE(!sigismember(&tsk->pending.signal, SIGBUS) &&
!sigismember(&tsk->pending.signal, SIGSEGV)))
goto sigsegv;
return true; /* Don't emulate the ret. */
}
regs->ax = ret;
do_ret:
/* Emulate a ret instruction. */
regs->ip = caller;
regs->sp += 8;
return true;
sigsegv:
force_sig(SIGSEGV, current);
return true;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Lutomirski | 481 | 87.77% | 11 | 55.00% |
Vojtech Pavlik | 22 | 4.01% | 1 | 5.00% |
John Stultz | 19 | 3.47% | 1 | 5.00% |
Andi Kleen | 10 | 1.82% | 2 | 10.00% |
Ingo Molnar | 6 | 1.09% | 1 | 5.00% |
Eric Dumazet | 5 | 0.91% | 2 | 10.00% |
Will Drewry | 4 | 0.73% | 1 | 5.00% |
Emil Goode | 1 | 0.18% | 1 | 5.00% |
Total | 548 | 100.00% | 20 | 100.00% |
/*
* A pseudo VMA to allow ptrace access for the vsyscall page. This only
* covers the 64bit vsyscall page now. 32bit has a real VMA now and does
* not need special handling anymore:
*/
static const char *gate_vma_name(struct vm_area_struct *vma)
{
return "[vsyscall]";
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Lutomirski | 16 | 100.00% | 1 | 100.00% |
Total | 16 | 100.00% | 1 | 100.00% |
static const struct vm_operations_struct gate_vma_ops = {
.name = gate_vma_name,
};
static struct vm_area_struct gate_vma = {
.vm_start = VSYSCALL_ADDR,
.vm_end = VSYSCALL_ADDR + PAGE_SIZE,
.vm_page_prot = PAGE_READONLY_EXEC,
.vm_flags = VM_READ | VM_EXEC,
.vm_ops = &gate_vma_ops,
};
struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
{
#ifdef CONFIG_COMPAT
if (!mm || mm->context.ia32_compat)
return NULL;
#endif
if (vsyscall_mode == NONE)
return NULL;
return &gate_vma;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Lutomirski | 43 | 97.73% | 2 | 66.67% |
Brian Gerst | 1 | 2.27% | 1 | 33.33% |
Total | 44 | 100.00% | 3 | 100.00% |
int in_gate_area(struct mm_struct *mm, unsigned long addr)
{
struct vm_area_struct *vma = get_gate_vma(mm);
if (!vma)
return 0;
return (addr >= vma->vm_start) && (addr < vma->vm_end);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Lutomirski | 49 | 100.00% | 1 | 100.00% |
Total | 49 | 100.00% | 1 | 100.00% |
/*
* Use this when you have no reliable mm, typically from interrupt
* context. It is less reliable than using a task's mm and may give
* false positives.
*/
int in_gate_area_no_mm(unsigned long addr)
{
return vsyscall_mode != NONE && (addr & PAGE_MASK) == VSYSCALL_ADDR;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Lutomirski | 22 | 100.00% | 2 | 100.00% |
Total | 22 | 100.00% | 2 | 100.00% |
void __init map_vsyscall(void)
{
extern char __vsyscall_page;
unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
if (vsyscall_mode != NONE)
__set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
vsyscall_mode == NATIVE
? PAGE_KERNEL_VSYSCALL
: PAGE_KERNEL_VVAR);
BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
(unsigned long)VSYSCALL_ADDR);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Lutomirski | 37 | 60.66% | 4 | 66.67% |
Andi Kleen | 24 | 39.34% | 2 | 33.33% |
Total | 61 | 100.00% | 6 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Lutomirski | 1037 | 87.22% | 17 | 48.57% |
Andi Kleen | 57 | 4.79% | 2 | 5.71% |
John Stultz | 22 | 1.85% | 1 | 2.86% |
Vojtech Pavlik | 22 | 1.85% | 1 | 2.86% |
Ingo Molnar | 13 | 1.09% | 3 | 8.57% |
Kees Cook | 13 | 1.09% | 1 | 2.86% |
Borislav Petkov | 8 | 0.67% | 1 | 2.86% |
Eric Dumazet | 5 | 0.42% | 2 | 5.71% |
Will Drewry | 4 | 0.34% | 1 | 2.86% |
Joe Perches | 2 | 0.17% | 1 | 2.86% |
Srikar Dronamraju | 2 | 0.17% | 1 | 2.86% |
Kirill A. Shutemov | 1 | 0.08% | 1 | 2.86% |
Greg Kroah-Hartman | 1 | 0.08% | 1 | 2.86% |
Emil Goode | 1 | 0.08% | 1 | 2.86% |
Brian Gerst | 1 | 0.08% | 1 | 2.86% |
Total | 1189 | 100.00% | 35 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.