cregit-Linux how code gets into the kernel

Release 4.16 arch/x86/include/asm/nospec-branch.h

/* SPDX-License-Identifier: GPL-2.0 */

#ifndef _ASM_X86_NOSPEC_BRANCH_H_

#define _ASM_X86_NOSPEC_BRANCH_H_

#include <asm/alternative.h>
#include <asm/alternative-asm.h>
#include <asm/cpufeatures.h>
#include <asm/msr-index.h>

/*
 * Fill the CPU return stack buffer.
 *
 * Each entry in the RSB, if used for a speculative 'ret', contains an
 * infinite 'pause; lfence; jmp' loop to capture speculative execution.
 *
 * This is required in various cases for retpoline and IBRS-based
 * mitigations for the Spectre variant 2 vulnerability. Sometimes to
 * eliminate potentially bogus entries from the RSB, and sometimes
 * purely to ensure that it doesn't get empty, which on some CPUs would
 * allow predictions from other (unwanted!) sources to be used.
 *
 * We define a CPP macro such that it can be used from both .S files and
 * inline assembly. It's possible to do a .macro and then include that
 * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
 */


#define RSB_CLEAR_LOOPS		32	
/* To forcibly overwrite all entries */

#define RSB_FILL_LOOPS		16	
/* To avoid underflow */

/*
 * Google experimented with loop-unrolling and this turned out to be
 * the optimal version — two calls, each with their own speculation
 * trap should their return address end up getting used, in a loop.
 */

#define __FILL_RETURN_BUFFER(reg, nr, sp)	\
	mov     $(nr/2), reg;                   \
771:                                            \
        call    772f;                           \
773:	/* speculation trap */			\
        pause;                                  \
        lfence;                                 \
        jmp     773b;                           \
772:                                            \
        call    774f;                           \
775:	/* speculation trap */			\
        pause;                                  \
        lfence;                                 \
        jmp     775b;                           \
774:                                            \
        dec     reg;                            \
        jnz     771b;                           \
        add     $(BITS_PER_LONG/8) * nr, sp;

#ifdef __ASSEMBLY__

/*
 * This should be used immediately before a retpoline alternative.  It tells
 * objtool where the retpolines are so that it can make sense of the control
 * flow by just reading the original instruction(s) and ignoring the
 * alternatives.
 */
.macro ANNOTATE_NOSPEC_ALTERNATIVE
	.Lannotate_\@:
	.pushsection .discard.nospec
	.long .Lannotate_\@ - .
	.popsection
.endm

/*
 * This should be used immediately before an indirect jump/call. It tells
 * objtool the subsequent indirect jump/call is vouched safe for retpoline
 * builds.
 */
.macro ANNOTATE_RETPOLINE_SAFE
	.Lannotate_\@:
	.pushsection .discard.retpoline_safe
	_ASM_PTR .Lannotate_\@
	.popsection
.endm

/*
 * These are the bare retpoline primitives for indirect jmp and call.
 * Do not use these directly; they only exist to make the ALTERNATIVE
 * invocation below less ugly.
 */
.macro RETPOLINE_JMP reg:req
	call	.Ldo_rop_\@
.Lspec_trap_\@:
	pause
	lfence
	jmp	.Lspec_trap_\@
.Ldo_rop_\@:
	mov	\reg, (%_ASM_SP)
	ret
.endm

/*
 * This is a wrapper around RETPOLINE_JMP so the called function in reg
 * returns to the instruction after the macro.
 */
.macro RETPOLINE_CALL reg:req
	jmp	.Ldo_call_\@
.Ldo_retpoline_jmp_\@:
	RETPOLINE_JMP \reg
.Ldo_call_\@:
	call	.Ldo_retpoline_jmp_\@
.endm

/*
 * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
 * indirect jmp/call which may be susceptible to the Spectre variant 2
 * attack.
 */
.macro JMP_NOSPEC reg:req
#ifdef CONFIG_RETPOLINE
	ANNOTATE_NOSPEC_ALTERNATIVE
	ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *\reg),	\
		__stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE,	\
		__stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *\reg), X86_FEATURE_RETPOLINE_AMD
#else
	jmp	*\reg
#endif
.endm

.macro CALL_NOSPEC reg:req
#ifdef CONFIG_RETPOLINE
	ANNOTATE_NOSPEC_ALTERNATIVE
	ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *\reg),	\
		__stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\
		__stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *\reg), X86_FEATURE_RETPOLINE_AMD
#else
	call	*\reg
#endif
.endm

 /*
  * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
  * monstrosity above, manually.
  */
.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
#ifdef CONFIG_RETPOLINE
	ANNOTATE_NOSPEC_ALTERNATIVE
	ALTERNATIVE "jmp .Lskip_rsb_\@",				\
		__stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP))	\
		\ftr
.Lskip_rsb_\@:
#endif
.endm

#else /* __ASSEMBLY__ */

#define ANNOTATE_NOSPEC_ALTERNATIVE				\
	"999:\n\t"                                              \
        ".pushsection .discard.nospec\n\t"                      \
        ".long 999b - .\n\t"                                    \
        ".popsection\n\t"

#define ANNOTATE_RETPOLINE_SAFE					\
	"999:\n\t"                                              \
        ".pushsection .discard.retpoline_safe\n\t"              \
        _ASM_PTR " 999b\n\t"                                    \
        ".popsection\n\t"

#if defined(CONFIG_X86_64) && defined(RETPOLINE)

/*
 * Since the inline asm uses the %V modifier which is only in newer GCC,
 * the 64-bit one is dependent on RETPOLINE not CONFIG_RETPOLINE.
 */
# define CALL_NOSPEC						\
	ANNOTATE_NOSPEC_ALTERNATIVE                             \
        ALTERNATIVE(                                            \
        ANNOTATE_RETPOLINE_SAFE                                 \
        "call *%[thunk_target]\n",                              \
        "call __x86_indirect_thunk_%V[thunk_target]\n",         \
        X86_FEATURE_RETPOLINE)
# define THUNK_TARGET(addr) [thunk_target] "r" (addr)

#elif defined(CONFIG_X86_32) && defined(CONFIG_RETPOLINE)
/*
 * For i386 we use the original ret-equivalent retpoline, because
 * otherwise we'll run out of registers. We don't care about CET
 * here, anyway.
 */
# define CALL_NOSPEC						\
	ALTERNATIVE(                                            \
        ANNOTATE_RETPOLINE_SAFE                                 \
        "call *%[thunk_target]\n",                              \
        "       jmp    904f;\n"                                 \
        "       .align 16\n"                                    \
        "901:   call   903f;\n"                                 \
        "902:   pause;\n"                                       \
        "       lfence;\n"                                      \
        "       jmp    902b;\n"                                 \
        "       .align 16\n"                                    \
        "903:   addl   $4, %%esp;\n"                            \
        "       pushl  %[thunk_target];\n"                      \
        "       ret;\n"                                         \
        "       .align 16\n"                                    \
        "904:   call   901b;\n",                                \
        X86_FEATURE_RETPOLINE)

# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
#else /* No retpoline for C / inline asm */
# define CALL_NOSPEC "call *%[thunk_target]\n"
# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
#endif

/* The Spectre V2 mitigation variants */
enum spectre_v2_mitigation {
	SPECTRE_V2_NONE,
	SPECTRE_V2_RETPOLINE_MINIMAL,
	SPECTRE_V2_RETPOLINE_MINIMAL_AMD,
	SPECTRE_V2_RETPOLINE_GENERIC,
	SPECTRE_V2_RETPOLINE_AMD,
	SPECTRE_V2_IBRS,
};

extern char __indirect_thunk_start[];
extern char __indirect_thunk_end[];

/*
 * On VMEXIT we must ensure that no RSB predictions learned in the guest
 * can be followed in the host, by overwriting the RSB completely. Both
 * retpoline and IBRS mitigations for Spectre v2 need this; only on future
 * CPUs with IBRS_ALL *might* it be avoided.
 */

static inline void vmexit_fill_RSB(void) { #ifdef CONFIG_RETPOLINE unsigned long loops; asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE ALTERNATIVE("jmp 910f", __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)), X86_FEATURE_RETPOLINE) "910:" : "=r" (loops), ASM_CALL_CONSTRAINT : : "memory" ); #endif }

Contributors

PersonTokensPropCommitsCommitProp
David Woodhouse21100.00%2100.00%
Total21100.00%2100.00%

#define alternative_msr_write(_msr, _val, _feature) \ asm volatile(ALTERNATIVE("", \ "movl %[msr], %%ecx\n\t" \ "movl %[val], %%eax\n\t" \ "movl $0, %%edx\n\t" \ "wrmsr", \ _feature) \ : : [msr] "i" (_msr), [val] "i" (_val) \ : "eax", "ecx", "edx", "memory")
static inline void indirect_branch_prediction_barrier(void) { alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, X86_FEATURE_USE_IBPB); }

Contributors

PersonTokensPropCommitsCommitProp
David Woodhouse18100.00%2100.00%
Total18100.00%2100.00%

/* * With retpoline, we must use IBRS to restrict branch prediction * before calling into firmware. * * (Implemented as CPP macros due to header hell.) */ #define firmware_restrict_branch_speculation_start() \ do { \ preempt_disable(); \ alternative_msr_write(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS, \ X86_FEATURE_USE_IBRS_FW); \ } while (0) #define firmware_restrict_branch_speculation_end() \ do { \ alternative_msr_write(MSR_IA32_SPEC_CTRL, 0, \ X86_FEATURE_USE_IBRS_FW); \ preempt_enable(); \ } while (0) #endif /* __ASSEMBLY__ */ /* * Below is used in the eBPF JIT compiler and emits the byte sequence * for the following assembly: * * With retpolines configured: * * callq do_rop * spec_trap: * pause * lfence * jmp spec_trap * do_rop: * mov %rax,(%rsp) * retq * * Without retpolines configured: * * jmp *%rax */ #ifdef CONFIG_RETPOLINE # define RETPOLINE_RAX_BPF_JIT_SIZE 17 # define RETPOLINE_RAX_BPF_JIT() \ EMIT1_off32(0xE8, 7); /* callq do_rop */ \ /* spec_trap: */ \ EMIT2(0xF3, 0x90); /* pause */ \ EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \ EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \ /* do_rop: */ \ EMIT4(0x48, 0x89, 0x04, 0x24); /* mov %rax,(%rsp) */ \ EMIT1(0xC3); /* retq */ #else # define RETPOLINE_RAX_BPF_JIT_SIZE 2 # define RETPOLINE_RAX_BPF_JIT() \ EMIT2(0xFF, 0xE0); /* jmp *%rax */ #endif #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */

Overall Contributors

PersonTokensPropCommitsCommitProp
David Woodhouse41779.28%640.00%
Daniel Borkmann448.37%16.67%
Peter Zijlstra377.03%213.33%
Ingo Molnar112.09%16.67%
Masami Hiramatsu101.90%16.67%
Borislav Petkov30.57%16.67%
Andy Whitcroft20.38%16.67%
Darren Kenny10.19%16.67%
Tom Lendacky10.19%16.67%
Total526100.00%15100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.