cregit-Linux how code gets into the kernel

Release 4.14 arch/x86/include/asm/fpu/internal.h

/* SPDX-License-Identifier: GPL-2.0 */
/*
 * Copyright (C) 1994 Linus Torvalds
 *
 * Pentium III FXSR, SSE support
 * General FPU state handling cleanups
 *      Gareth Hughes <gareth@valinux.com>, May 2000
 * x86-64 work by Andi Kleen 2002
 */

#ifndef _ASM_X86_FPU_INTERNAL_H

#define _ASM_X86_FPU_INTERNAL_H

#include <linux/compat.h>
#include <linux/sched.h>
#include <linux/slab.h>

#include <asm/user.h>
#include <asm/fpu/api.h>
#include <asm/fpu/xstate.h>
#include <asm/cpufeature.h>
#include <asm/trace/fpu.h>

/*
 * High level FPU state handling functions:
 */
extern void fpu__initialize(struct fpu *fpu);
extern void fpu__prepare_read(struct fpu *fpu);
extern void fpu__prepare_write(struct fpu *fpu);
extern void fpu__save(struct fpu *fpu);
extern void fpu__restore(struct fpu *fpu);
extern int  fpu__restore_sig(void __user *buf, int ia32_frame);
extern void fpu__drop(struct fpu *fpu);
extern int  fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu);
extern void fpu__clear(struct fpu *fpu);
extern int  fpu__exception_code(struct fpu *fpu, int trap_nr);
extern int  dump_fpu(struct pt_regs *ptregs, struct user_i387_struct *fpstate);

/*
 * Boot time FPU initialization functions:
 */
extern void fpu__init_cpu(void);
extern void fpu__init_system_xstate(void);
extern void fpu__init_cpu_xstate(void);
extern void fpu__init_system(struct cpuinfo_x86 *c);
extern void fpu__init_check_bugs(void);
extern void fpu__resume_cpu(void);
extern u64 fpu__get_supported_xfeatures_mask(void);

/*
 * Debugging facility:
 */
#ifdef CONFIG_X86_DEBUG_FPU

# define WARN_ON_FPU(x) WARN_ON_ONCE(x)
#else

# define WARN_ON_FPU(x) ({ (void)(x); 0; })
#endif

/*
 * FPU related CPU feature flag helper routines:
 */

static __always_inline __pure bool use_xsaveopt(void) { return static_cpu_has(X86_FEATURE_XSAVEOPT); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds1593.75%150.00%
Borislav Petkov16.25%150.00%
Total16100.00%2100.00%


static __always_inline __pure bool use_xsave(void) { return static_cpu_has(X86_FEATURE_XSAVE); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds1593.75%150.00%
Borislav Petkov16.25%150.00%
Total16100.00%2100.00%


static __always_inline __pure bool use_fxsr(void) { return static_cpu_has(X86_FEATURE_FXSR); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds1593.75%150.00%
Borislav Petkov16.25%150.00%
Total16100.00%2100.00%

/* * fpstate handling functions: */ extern union fpregs_state init_fpstate; extern void fpstate_init(union fpregs_state *state); #ifdef CONFIG_MATH_EMULATION extern void fpstate_init_soft(struct swregs_state *soft); #else
static inline void fpstate_init_soft(struct swregs_state *soft) {}

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar11100.00%1100.00%
Total11100.00%1100.00%

#endif
static inline void fpstate_init_xstate(struct xregs_state *xsave) { /* * XRSTORS requires these bits set in xcomp_bv, or it will * trigger #GP: */ xsave->header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT | xfeatures_mask; }

Contributors

PersonTokensPropCommitsCommitProp
Yu-cheng Yu23100.00%1100.00%
Total23100.00%1100.00%


static inline void fpstate_init_fxstate(struct fxregs_state *fx) { fx->cwd = 0x37f; fx->mxcsr = MXCSR_DEFAULT; }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar24100.00%1100.00%
Total24100.00%1100.00%

extern void fpstate_sanitize_xstate(struct fpu *fpu); #define user_insn(insn, output, input...) \ ({ \ int err; \ asm volatile(ASM_STAC "\n" \ "1:" #insn "\n\t" \ "2: " ASM_CLAC "\n" \ ".section .fixup,\"ax\"\n" \ "3: movl $-1,%[err]\n" \ " jmp 2b\n" \ ".previous\n" \ _ASM_EXTABLE(1b, 3b) \ : [err] "=r" (err), output \ : "0"(0), input); \ err; \ }) #define kernel_insn(insn, output, input...) \ asm volatile("1:" #insn "\n\t" \ "2:\n" \ _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_fprestore) \ : output : input)
static inline int copy_fregs_to_user(struct fregs_state __user *fx) { return user_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx)); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds1246.15%120.00%
Suresh B. Siddha1142.31%120.00%
Ingo Molnar27.69%240.00%
H. Peter Anvin13.85%120.00%
Total26100.00%5100.00%


static inline int copy_fxregs_to_user(struct fxregs_state __user *fx) { if (IS_ENABLED(CONFIG_X86_32)) return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx)); else if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx)); /* See comment in copy_fxregs_to_kernel() below. */ return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx)); }

Contributors

PersonTokensPropCommitsCommitProp
Suresh B. Siddha6762.62%116.67%
Linus Torvalds3229.91%116.67%
Ingo Molnar32.80%233.33%
H. Peter Anvin32.80%116.67%
Masahiro Yamada21.87%116.67%
Total107100.00%6100.00%


static inline void copy_kernel_to_fxregs(struct fxregs_state *fx) { if (IS_ENABLED(CONFIG_X86_32)) { kernel_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); } else { if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) { kernel_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); } else { /* See comment in copy_fxregs_to_kernel() below. */ kernel_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), "m" (*fx)); } } }

Contributors

PersonTokensPropCommitsCommitProp
Suresh B. Siddha8471.19%112.50%
Ingo Molnar1714.41%450.00%
Linus Torvalds1210.17%112.50%
Eric Biggers32.54%112.50%
Masahiro Yamada21.69%112.50%
Total118100.00%8100.00%


static inline int copy_user_to_fxregs(struct fxregs_state __user *fx) { if (IS_ENABLED(CONFIG_X86_32)) return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); else if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); /* See comment in copy_fxregs_to_kernel() below. */ return user_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), "m" (*fx)); }

Contributors

PersonTokensPropCommitsCommitProp
H. Peter Anvin10895.58%125.00%
Ingo Molnar32.65%250.00%
Masahiro Yamada21.77%125.00%
Total113100.00%4100.00%


static inline void copy_kernel_to_fregs(struct fregs_state *fx) { kernel_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds1644.44%112.50%
Suresh B. Siddha1336.11%112.50%
Ingo Molnar411.11%450.00%
H. Peter Anvin25.56%112.50%
Eric Biggers12.78%112.50%
Total36100.00%8100.00%


static inline int copy_user_to_fregs(struct fregs_state __user *fx) { return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); }

Contributors

PersonTokensPropCommitsCommitProp
H. Peter Anvin3386.84%125.00%
Linus Torvalds37.89%125.00%
Ingo Molnar25.26%250.00%
Total38100.00%4100.00%


static inline void copy_fxregs_to_kernel(struct fpu *fpu) { if (IS_ENABLED(CONFIG_X86_32)) asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state.fxsave)); else if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave)); else { /* Using "rex64; fxsave %0" is broken because, if the memory * operand uses any extended registers for addressing, a second * REX prefix will be generated (to the assembler, rex64 * followed by semicolon is a separate instruction), and hence * the 64-bitness is lost. * * Using "fxsaveq %0" would be the ideal choice, but is only * supported starting with gas 2.16. * * Using, as a workaround, the properly prefixed form below * isn't accepted by any binutils version so far released, * complaining that the same type of prefix is used twice if * an extended register is needed for addressing (fix submitted * to mainline 2005-11-21). * * asm volatile("rex64/fxsave %0" : "=m" (fpu->state.fxsave)); * * This, however, we can work around by forcing the compiler to * select an addressing mode that doesn't require extended * registers. */ asm volatile( "rex64/fxsave (%[fx])" : "=m" (fpu->state.fxsave) : [fx] "R" (&fpu->state.fxsave)); } }

Contributors

PersonTokensPropCommitsCommitProp
Suresh B. Siddha1742.50%120.00%
Linus Torvalds1640.00%120.00%
Ingo Molnar512.50%240.00%
Masahiro Yamada25.00%120.00%
Total40100.00%5100.00%

/* These macros all use (%edi)/(%rdi) as the single memory argument. */ #define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27" #define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37" #define XSAVES ".byte " REX_PREFIX "0x0f,0xc7,0x2f" #define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f" #define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f" #define XSTATE_OP(op, st, lmask, hmask, err) \ asm volatile("1:" op "\n\t" \ "xor %[err], %[err]\n" \ "2:\n\t" \ ".pushsection .fixup,\"ax\"\n\t" \ "3: movl $-2,%[err]\n\t" \ "jmp 2b\n\t" \ ".popsection\n\t" \ _ASM_EXTABLE(1b, 3b) \ : [err] "=r" (err) \ : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ : "memory") /* * If XSAVES is enabled, it replaces XSAVEOPT because it supports a compact * format and supervisor states in addition to modified optimization in * XSAVEOPT. * * Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT * supports modified optimization which is not supported by XSAVE. * * We use XSAVE as a fallback. * * The 661 label is defined in the ALTERNATIVE* macros as the address of the * original instruction which gets replaced. We need to use it here as the * address of the instruction where we might get an exception at. */ #define XSTATE_XSAVE(st, lmask, hmask, err) \ asm volatile(ALTERNATIVE_2(XSAVE, \ XSAVEOPT, X86_FEATURE_XSAVEOPT, \ XSAVES, X86_FEATURE_XSAVES) \ "\n" \ "xor %[err], %[err]\n" \ "3:\n" \ ".pushsection .fixup,\"ax\"\n" \ "4: movl $-2, %[err]\n" \ "jmp 3b\n" \ ".popsection\n" \ _ASM_EXTABLE(661b, 4b) \ : [err] "=r" (err) \ : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ : "memory") /* * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact * XSAVE area format. */ #define XSTATE_XRESTORE(st, lmask, hmask) \ asm volatile(ALTERNATIVE(XRSTOR, \ XRSTORS, X86_FEATURE_XSAVES) \ "\n" \ "3:\n" \ _ASM_EXTABLE_HANDLE(661b, 3b, ex_handler_fprestore)\ : \ : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ : "memory") /* * This function is called only during boot time when x86 caps are not set * up and alternative can not be used yet. */
static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate) { u64 mask = -1; u32 lmask = mask; u32 hmask = mask >> 32; int err; WARN_ON(system_state != SYSTEM_BOOTING); if (static_cpu_has(X86_FEATURE_XSAVES)) XSTATE_OP(XSAVES, xstate, lmask, hmask, err); else XSTATE_OP(XSAVE, xstate, lmask, hmask, err); /* We should never fault when copying to a kernel buffer: */ WARN_ON_FPU(err); }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar5366.25%360.00%
Borislav Petkov2733.75%240.00%
Total80100.00%5100.00%

/* * This function is called only during boot time when x86 caps are not set * up and alternative can not be used yet. */
static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate) { u64 mask = -1; u32 lmask = mask; u32 hmask = mask >> 32; int err; WARN_ON(system_state != SYSTEM_BOOTING); if (static_cpu_has(X86_FEATURE_XSAVES)) XSTATE_OP(XRSTORS, xstate, lmask, hmask, err); else XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); /* * We should never fault when copying from a kernel buffer, and the FPU * state we set at boot time should be valid. */ WARN_ON_FPU(err); }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar5265.00%457.14%
Borislav Petkov2733.75%228.57%
Eric Biggers11.25%114.29%
Total80100.00%7100.00%

/* * Save processor xstate to xsave area. */
static inline void copy_xregs_to_kernel(struct xregs_state *xstate) { u64 mask = -1; u32 lmask = mask; u32 hmask = mask >> 32; int err; WARN_ON_FPU(!alternatives_patched); XSTATE_XSAVE(xstate, lmask, hmask, err); /* We should never fault when copying to a kernel buffer: */ WARN_ON_FPU(err); }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar5292.86%360.00%
Borislav Petkov35.36%120.00%
Andi Kleen11.79%120.00%
Total56100.00%5100.00%

/* * Restore processor xstate from xsave area. */
static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask) { u32 lmask = mask; u32 hmask = mask >> 32; XSTATE_XRESTORE(xstate, lmask, hmask); }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar3597.22%375.00%
Borislav Petkov12.78%125.00%
Total36100.00%4100.00%

/* * Save xstate to user space xsave area. * * We don't use modified optimization because xrstor/xrstors might track * a different application. * * We don't use compacted format xsave area for * backward compatibility for old applications which don't understand * compacted format of xsave area. */
static inline int copy_xregs_to_user(struct xregs_state __user *buf) { int err; /* * Clear the xsave header first, so that reserved fields are * initialized to zero. */ err = __clear_user(&buf->header, sizeof(buf->header)); if (unlikely(err)) return -EFAULT; stac(); XSTATE_OP(XSAVE, buf, -1, -1, err); clac(); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar4869.57%150.00%
Borislav Petkov2130.43%150.00%
Total69100.00%2100.00%

/* * Restore xstate from user space xsave area. */
static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask) { struct xregs_state *xstate = ((__force struct xregs_state *)buf); u32 lmask = mask; u32 hmask = mask >> 32; int err; stac(); XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); clac(); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar4972.06%266.67%
Borislav Petkov1927.94%133.33%
Total68100.00%3100.00%

/* * These must be called with preempt disabled. Returns * 'true' if the FPU state is still intact and we can * keep registers active. * * The legacy FNSAVE instruction cleared all FPU state * unconditionally, so registers are essentially destroyed. * Modern FPU state can be kept in registers, if there are * no pending FP exceptions. */
static inline int copy_fpregs_to_fpstate(struct fpu *fpu) { if (likely(use_xsave())) { copy_xregs_to_kernel(&fpu->state.xsave); return 1; } if (likely(use_fxsr())) { copy_fxregs_to_kernel(fpu); return 1; } /* * Legacy FPU register saving, FNSAVE always clears FPU registers, * so we have to mark them inactive: */ asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->state.fsave)); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds3558.33%114.29%
Ingo Molnar2541.67%685.71%
Total60100.00%7100.00%


static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate, u64 mask) { if (use_xsave()) { copy_kernel_to_xregs(&fpstate->xsave, mask); } else { if (use_fxsr()) copy_kernel_to_fxregs(&fpstate->fxsave); else copy_kernel_to_fregs(&fpstate->fsave); } }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar2543.86%666.67%
Linus Torvalds1831.58%111.11%
Suresh B. Siddha1017.54%111.11%
Paolo Bonzini47.02%111.11%
Total57100.00%9100.00%


static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate) { /* * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is * pending. Clear the x87 state here by setting it to fixed values. * "m" is a random variable that should be in L1. */ if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) { asm volatile( "fnclex\n\t" "emms\n\t" "fildl %P[addr]" /* set F?P to defined value */ : : [addr] "m" (fpstate)); } __copy_kernel_to_fpregs(fpstate, -1); }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar3489.47%466.67%
Paolo Bonzini37.89%116.67%
Borislav Petkov12.63%116.67%
Total38100.00%6100.00%

extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size); /* * FPU context switch related helper methods: */ DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx); /* * The in-register FPU state for an FPU context on a CPU is assumed to be * valid if the fpu->last_cpu matches the CPU, and the fpu_fpregs_owner_ctx * matches the FPU. * * If the FPU register state is valid, the kernel can skip restoring the * FPU state from memory. * * Any code that clobbers the FPU registers or updates the in-memory * FPU state for a task MUST let the rest of the kernel know that the * FPU registers are no longer valid for this task. * * Either one of these invalidation functions is enough. Invalidate * a resource you control: CPU if using the CPU for something else * (with preemption disabled), FPU for the current task, or a task that * is prevented from running by the current task. */
static inline void __cpu_invalidate_fpregs_state(void) { __this_cpu_write(fpu_fpregs_owner_ctx, NULL); }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar1168.75%125.00%
Rik Van Riel425.00%250.00%
Linus Torvalds16.25%125.00%
Total16100.00%4100.00%


static inline void __fpu_invalidate_fpregs_state(struct fpu *fpu) { fpu->last_cpu = -1; }

Contributors

PersonTokensPropCommitsCommitProp
Rik Van Riel19100.00%1100.00%
Total19100.00%1100.00%


static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu) { return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu; }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar1653.33%250.00%
Linus Torvalds1343.33%125.00%
Rik Van Riel13.33%125.00%
Total30100.00%4100.00%

/* * These generally need preemption protection to work, * do try to avoid using these on their own: */
static inline void fpregs_deactivate(struct fpu *fpu) { this_cpu_write(fpu_fpregs_owner_ctx, NULL); trace_x86_fpu_regs_deactivated(fpu); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds1458.33%116.67%
Dave Hansen520.83%116.67%
Ingo Molnar312.50%233.33%
Rik Van Riel14.17%116.67%
Alex Shi14.17%116.67%
Total24100.00%6100.00%


static inline void fpregs_activate(struct fpu *fpu) { this_cpu_write(fpu_fpregs_owner_ctx, fpu); trace_x86_fpu_regs_activated(fpu); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds1354.17%116.67%
Dave Hansen520.83%116.67%
Ingo Molnar416.67%233.33%
Rik Van Riel14.17%116.67%
Alex Shi14.17%116.67%
Total24100.00%6100.00%

/* * FPU state switching for scheduling. * * This is a two-stage process: * * - switch_fpu_prepare() saves the old state. * This is done within the context of the old process. * * - switch_fpu_finish() restores the new state as * necessary. */
static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu) { if (old_fpu->initialized) { if (!copy_fpregs_to_fpstate(old_fpu)) old_fpu->last_cpu = -1; else old_fpu->last_cpu = cpu; /* But leave fpu_fpregs_owner_ctx! */ trace_x86_fpu_regs_deactivated(old_fpu); } else old_fpu->last_cpu = -1; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds3152.54%19.09%
Ingo Molnar2135.59%763.64%
Dave Hansen58.47%19.09%
Rik Van Riel23.39%218.18%
Total59100.00%11100.00%

/* * Misc helper functions: */ /* * Set up the userspace FPU context for the new task, if the task * has used the FPU. */
static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu) { bool preload = static_cpu_has(X86_FEATURE_FPU) && new_fpu->initialized; if (preload) { if (!fpregs_state_valid(new_fpu, cpu)) copy_kernel_to_fpregs(&new_fpu->state); fpregs_activate(new_fpu); } }

Contributors

PersonTokensPropCommitsCommitProp
Rik Van Riel3053.57%114.29%
Linus Torvalds1832.14%114.29%
Ingo Molnar814.29%571.43%
Total56100.00%7100.00%

/* * Needs to be preemption-safe. * * NOTE! user_fpu_begin() must be used only immediately before restoring * the save state. It does not do any saving/restoring on its own. In * lazy FPU mode, it is just an optimization to avoid a #NM exception, * the task can lose the FPU right after preempt_enable(). */
static inline void user_fpu_begin(void) { struct fpu *fpu = &current->thread.fpu; preempt_disable(); fpregs_activate(fpu); preempt_enable(); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds1856.25%133.33%
Ingo Molnar1443.75%266.67%
Total32100.00%3100.00%

/* * MXCSR and XCR definitions: */ extern unsigned int mxcsr_feature_mask; #define XCR_XFEATURE_ENABLED_MASK 0x00000000
static inline u64 xgetbv(u32 index) { u32 eax, edx; asm volatile(".byte 0x0f,0x01,0xd0" /* xgetbv */ : "=a" (eax), "=d" (edx) : "c" (index)); return eax + ((u64)edx << 32); }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar32100.00%1100.00%
Total32100.00%1100.00%


static inline void xsetbv(u32 index, u64 value) { u32 eax = value; u32 edx = value >> 32; asm volatile(".byte 0x0f,0x01,0xd1" /* xsetbv */ : : "a" (eax), "d" (edx), "c" (index)); }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar30100.00%1100.00%
Total30100.00%1100.00%

#endif /* _ASM_X86_FPU_INTERNAL_H */

Overall Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar89447.73%4866.67%
Linus Torvalds32117.14%11.39%
Suresh B. Siddha21611.53%22.78%
H. Peter Anvin1608.54%22.78%
Borislav Petkov1447.69%45.56%
Rik Van Riel623.31%56.94%
Yu-cheng Yu301.60%22.78%
Dave Hansen180.96%11.39%
Eric Biggers80.43%11.39%
Masahiro Yamada80.43%11.39%
Paolo Bonzini70.37%11.39%
Alex Shi20.11%11.39%
Andi Kleen10.05%11.39%
Greg Kroah-Hartman10.05%11.39%
Oleg Nesterov10.05%11.39%
Total1873100.00%72100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.