cregit-Linux how code gets into the kernel

Release 4.14 arch/x86/entry/vsyscall/vsyscall_64.c

// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (c) 2012-2014 Andy Lutomirski <luto@amacapital.net>
 *
 * Based on the original implementation which is:
 *  Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
 *  Copyright 2003 Andi Kleen, SuSE Labs.
 *
 *  Parts of the original code have been moved to arch/x86/vdso/vma.c
 *
 * This file implements vsyscall emulation.  vsyscalls are a legacy ABI:
 * Userspace can request certain kernel services by calling fixed
 * addresses.  This concept is problematic:
 *
 * - It interferes with ASLR.
 * - It's awkward to write code that lives in kernel addresses but is
 *   callable by userspace at fixed addresses.
 * - The whole concept is impossible for 32-bit compat userspace.
 * - UML cannot easily virtualize a vsyscall.
 *
 * As of mid-2014, I believe that there is no new userspace code that
 * will use a vsyscall if the vDSO is present.  I hope that there will
 * soon be no new userspace code that will ever use a vsyscall.
 *
 * The code in this file emulates vsyscalls when notified of a page
 * fault to a vsyscall address.
 */

#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/sched/signal.h>
#include <linux/mm_types.h>
#include <linux/syscalls.h>
#include <linux/ratelimit.h>

#include <asm/vsyscall.h>
#include <asm/unistd.h>
#include <asm/fixmap.h>
#include <asm/traps.h>


#define CREATE_TRACE_POINTS
#include "vsyscall_trace.h"





static enum { EMULATE, NATIVE, NONE } vsyscall_mode =
#if defined(CONFIG_LEGACY_VSYSCALL_NATIVE)
	NATIVE;
#elif defined(CONFIG_LEGACY_VSYSCALL_NONE)
	NONE;
#else
	EMULATE;
#endif


static int __init vsyscall_setup(char *str) { if (str) { if (!strcmp("emulate", str)) vsyscall_mode = EMULATE; else if (!strcmp("native", str)) vsyscall_mode = NATIVE; else if (!strcmp("none", str)) vsyscall_mode = NONE; else return -EINVAL; return 0; } return -EINVAL; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Lutomirski73100.00%1100.00%
Total73100.00%1100.00%

early_param("vsyscall", vsyscall_setup);
static void warn_bad_vsyscall(const char *level, struct pt_regs *regs, const char *message) { if (!show_unhandled_signals) return; printk_ratelimited("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n", level, current->comm, task_pid_nr(current), message, regs->ip, regs->cs, regs->sp, regs->ax, regs->si, regs->di); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Lutomirski5884.06%466.67%
Andi Kleen913.04%116.67%
Joe Perches22.90%116.67%
Total69100.00%6100.00%


static int addr_to_vsyscall_nr(unsigned long addr) { int nr; if ((addr & ~0xC00UL) != VSYSCALL_ADDR) return -EINVAL; nr = (addr & 0xC00UL) >> 10; if (nr >= 3) return -EINVAL; return nr; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Lutomirski51100.00%2100.00%
Total51100.00%2100.00%


static bool write_ok_or_segv(unsigned long ptr, size_t size) { /* * XXX: if access_ok, get_user, and put_user handled * sig_on_uaccess_err, this could go away. */ if (!access_ok(VERIFY_WRITE, (void __user *)ptr, size)) { siginfo_t info; struct thread_struct *thread = &current->thread; thread->error_code = 6; /* user fault, no page, write */ thread->cr2 = ptr; thread->trap_nr = X86_TRAP_PF; memset(&info, 0, sizeof(info)); info.si_signo = SIGSEGV; info.si_errno = 0; info.si_code = SEGV_MAPERR; info.si_addr = (void __user *)ptr; force_sig_info(SIGSEGV, &info, current); return false; } else { return true; } }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Lutomirski12397.62%133.33%
Srikar Dronamraju21.59%133.33%
Ingo Molnar10.79%133.33%
Total126100.00%3100.00%


bool emulate_vsyscall(struct pt_regs *regs, unsigned long address) { struct task_struct *tsk; unsigned long caller; int vsyscall_nr, syscall_nr, tmp; int prev_sig_on_uaccess_err; long ret; /* * No point in checking CS -- the only way to get here is a user mode * trap to a high address, which means that we're in 64-bit user code. */ WARN_ON_ONCE(address != regs->ip); if (vsyscall_mode == NONE) { warn_bad_vsyscall(KERN_INFO, regs, "vsyscall attempted with vsyscall=none"); return false; } vsyscall_nr = addr_to_vsyscall_nr(address); trace_emulate_vsyscall(vsyscall_nr); if (vsyscall_nr < 0) { warn_bad_vsyscall(KERN_WARNING, regs, "misaligned vsyscall (exploit attempt or buggy program) -- look up the vsyscall kernel parameter if you need a workaround"); goto sigsegv; } if (get_user(caller, (unsigned long __user *)regs->sp) != 0) { warn_bad_vsyscall(KERN_WARNING, regs, "vsyscall with bad stack (exploit attempt?)"); goto sigsegv; } tsk = current; /* * Check for access_ok violations and find the syscall nr. * * NULL is a valid user pointer (in the access_ok sense) on 32-bit and * 64-bit, so we don't need to special-case it here. For all the * vsyscalls, NULL means "don't write anything" not "write it at * address 0". */ switch (vsyscall_nr) { case 0: if (!write_ok_or_segv(regs->di, sizeof(struct timeval)) || !write_ok_or_segv(regs->si, sizeof(struct timezone))) { ret = -EFAULT; goto check_fault; } syscall_nr = __NR_gettimeofday; break; case 1: if (!write_ok_or_segv(regs->di, sizeof(time_t))) { ret = -EFAULT; goto check_fault; } syscall_nr = __NR_time; break; case 2: if (!write_ok_or_segv(regs->di, sizeof(unsigned)) || !write_ok_or_segv(regs->si, sizeof(unsigned))) { ret = -EFAULT; goto check_fault; } syscall_nr = __NR_getcpu; break; } /* * Handle seccomp. regs->ip must be the original value. * See seccomp_send_sigsys and Documentation/prctl/seccomp_filter.txt. * * We could optimize the seccomp disabled case, but performance * here doesn't matter. */ regs->orig_ax = syscall_nr; regs->ax = -ENOSYS; tmp = secure_computing(NULL); if ((!tmp && regs->orig_ax != syscall_nr) || regs->ip != address) { warn_bad_vsyscall(KERN_DEBUG, regs, "seccomp tried to change syscall nr or ip"); do_exit(SIGSYS); } regs->orig_ax = -1; if (tmp) goto do_ret; /* skip requested */ /* * With a real vsyscall, page faults cause SIGSEGV. We want to * preserve that behavior to make writing exploits harder. */ prev_sig_on_uaccess_err = current->thread.sig_on_uaccess_err; current->thread.sig_on_uaccess_err = 1; ret = -EFAULT; switch (vsyscall_nr) { case 0: ret = sys_gettimeofday( (struct timeval __user *)regs->di, (struct timezone __user *)regs->si); break; case 1: ret = sys_time((time_t __user *)regs->di); break; case 2: ret = sys_getcpu((unsigned __user *)regs->di, (unsigned __user *)regs->si, NULL); break; } current->thread.sig_on_uaccess_err = prev_sig_on_uaccess_err; check_fault: if (ret == -EFAULT) { /* Bad news -- userspace fed a bad pointer to a vsyscall. */ warn_bad_vsyscall(KERN_INFO, regs, "vsyscall fault (exploit attempt?)"); /* * If we failed to generate a signal for any reason, * generate one here. (This should be impossible.) */ if (WARN_ON_ONCE(!sigismember(&tsk->pending.signal, SIGBUS) && !sigismember(&tsk->pending.signal, SIGSEGV))) goto sigsegv; return true; /* Don't emulate the ret. */ } regs->ax = ret; do_ret: /* Emulate a ret instruction. */ regs->ip = caller; regs->sp += 8; return true; sigsegv: force_sig(SIGSEGV, current); return true; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Lutomirski48187.77%1155.00%
Vojtech Pavlik224.01%15.00%
John Stultz193.47%15.00%
Andi Kleen101.82%210.00%
Ingo Molnar61.09%15.00%
Eric Dumazet50.91%210.00%
Will Drewry40.73%15.00%
Emil Goode10.18%15.00%
Total548100.00%20100.00%

/* * A pseudo VMA to allow ptrace access for the vsyscall page. This only * covers the 64bit vsyscall page now. 32bit has a real VMA now and does * not need special handling anymore: */
static const char *gate_vma_name(struct vm_area_struct *vma) { return "[vsyscall]"; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Lutomirski16100.00%1100.00%
Total16100.00%1100.00%

static const struct vm_operations_struct gate_vma_ops = { .name = gate_vma_name, }; static struct vm_area_struct gate_vma = { .vm_start = VSYSCALL_ADDR, .vm_end = VSYSCALL_ADDR + PAGE_SIZE, .vm_page_prot = PAGE_READONLY_EXEC, .vm_flags = VM_READ | VM_EXEC, .vm_ops = &gate_vma_ops, };
struct vm_area_struct *get_gate_vma(struct mm_struct *mm) { #ifdef CONFIG_COMPAT if (!mm || mm->context.ia32_compat) return NULL; #endif if (vsyscall_mode == NONE) return NULL; return &gate_vma; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Lutomirski4397.73%266.67%
Brian Gerst12.27%133.33%
Total44100.00%3100.00%


int in_gate_area(struct mm_struct *mm, unsigned long addr) { struct vm_area_struct *vma = get_gate_vma(mm); if (!vma) return 0; return (addr >= vma->vm_start) && (addr < vma->vm_end); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Lutomirski49100.00%1100.00%
Total49100.00%1100.00%

/* * Use this when you have no reliable mm, typically from interrupt * context. It is less reliable than using a task's mm and may give * false positives. */
int in_gate_area_no_mm(unsigned long addr) { return vsyscall_mode != NONE && (addr & PAGE_MASK) == VSYSCALL_ADDR; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Lutomirski22100.00%2100.00%
Total22100.00%2100.00%


void __init map_vsyscall(void) { extern char __vsyscall_page; unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page); if (vsyscall_mode != NONE) __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, vsyscall_mode == NATIVE ? PAGE_KERNEL_VSYSCALL : PAGE_KERNEL_VVAR); BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) != (unsigned long)VSYSCALL_ADDR); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Lutomirski3760.66%466.67%
Andi Kleen2439.34%233.33%
Total61100.00%6100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
Andrew Lutomirski103787.22%1748.57%
Andi Kleen574.79%25.71%
John Stultz221.85%12.86%
Vojtech Pavlik221.85%12.86%
Ingo Molnar131.09%38.57%
Kees Cook131.09%12.86%
Borislav Petkov80.67%12.86%
Eric Dumazet50.42%25.71%
Will Drewry40.34%12.86%
Joe Perches20.17%12.86%
Srikar Dronamraju20.17%12.86%
Kirill A. Shutemov10.08%12.86%
Greg Kroah-Hartman10.08%12.86%
Emil Goode10.08%12.86%
Brian Gerst10.08%12.86%
Total1189100.00%35100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.