cregit-Linux how code gets into the kernel

Release 4.14 arch/powerpc/kernel/signal_32.c

/*
 * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
 *
 *  PowerPC version
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 * Copyright (C) 2001 IBM
 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
 *
 *  Derived from "arch/i386/kernel/signal.c"
 *    Copyright (C) 1991, 1992 Linus Torvalds
 *    1997-11-28  Modified for POSIX.1b signals by Richard Henderson
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version
 *  2 of the License, or (at your option) any later version.
 */

#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/elf.h>
#include <linux/ptrace.h>
#include <linux/ratelimit.h>
#ifdef CONFIG_PPC64
#include <linux/syscalls.h>
#include <linux/compat.h>
#else
#include <linux/wait.h>
#include <linux/unistd.h>
#include <linux/stddef.h>
#include <linux/tty.h>
#include <linux/binfmts.h>
#endif

#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/syscalls.h>
#include <asm/sigcontext.h>
#include <asm/vdso.h>
#include <asm/switch_to.h>
#include <asm/tm.h>
#include <asm/asm-prototypes.h>
#ifdef CONFIG_PPC64
#include "ppc32.h"
#include <asm/unistd.h>
#else
#include <asm/ucontext.h>
#include <asm/pgtable.h>
#endif

#include "signal.h"


#ifdef CONFIG_PPC64

#define sys_rt_sigreturn	compat_sys_rt_sigreturn

#define sys_swapcontext	compat_sys_swapcontext

#define sys_sigreturn	compat_sys_sigreturn


#define old_sigaction	old_sigaction32

#define sigcontext	sigcontext32

#define mcontext	mcontext32

#define ucontext	ucontext32


#define __save_altstack __compat_save_altstack

/*
 * Userspace code may pass a ucontext which doesn't include VSX added
 * at the end.  We need to check for this case.
 */

#define UCONTEXTSIZEWITHOUTVSX \
		(sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))

/*
 * Returning 0 means we return to userspace via
 * ret_from_except and thus restore all user
 * registers from *regs.  This is what we need
 * to do when a signal has been delivered.
 */


#define GP_REGS_SIZE	min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))

#undef __SIGNAL_FRAMESIZE

#define __SIGNAL_FRAMESIZE	__SIGNAL_FRAMESIZE32

#undef ELF_NVRREG

#define ELF_NVRREG	ELF_NVRREG32

/*
 * Functions for flipping sigsets (thanks to brain dead generic
 * implementation that makes things simple for little endian only)
 */

static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set) { compat_sigset_t cset; switch (_NSIG_WORDS) { case 4: cset.sig[6] = set->sig[3] & 0xffffffffull; cset.sig[7] = set->sig[3] >> 32; case 3: cset.sig[4] = set->sig[2] & 0xffffffffull; cset.sig[5] = set->sig[2] >> 32; case 2: cset.sig[2] = set->sig[1] & 0xffffffffull; cset.sig[3] = set->sig[1] >> 32; case 1: cset.sig[0] = set->sig[0] & 0xffffffffull; cset.sig[1] = set->sig[0] >> 32; } return copy_to_user(uset, &cset, sizeof(*uset)); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton10658.89%114.29%
Stephen Rothwell3821.11%114.29%
Peter Bergner2312.78%114.29%
Anton Blanchard126.67%342.86%
Will Deacon10.56%114.29%
Total180100.00%7100.00%


static inline int get_sigset_t(sigset_t *set, const compat_sigset_t __user *uset) { compat_sigset_t s32; if (copy_from_user(&s32, uset, sizeof(*uset))) return -EFAULT; /* * Swap the 2 words of the 64-bit sigset_t (they are stored * in the "wrong" endian in 32-bit user storage). */ switch (_NSIG_WORDS) { case 4: set->sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32); case 3: set->sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32); case 2: set->sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32); case 1: set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32); } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton8948.90%116.67%
Stephen Rothwell5932.42%116.67%
Anton Blanchard3318.13%350.00%
Paul Mackerras10.55%116.67%
Total182100.00%6100.00%

#define to_user_ptr(p) ptr_to_compat(p) #define from_user_ptr(p) compat_ptr(p)
static inline int save_general_regs(struct pt_regs *regs, struct mcontext __user *frame) { elf_greg_t64 *gregs = (elf_greg_t64 *)regs; int i; WARN_ON(!FULL_REGS(regs)); for (i = 0; i <= PT_RESULT; i ++) { if (i == 14 && !FULL_REGS(regs)) i = 32; if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i])) return -EFAULT; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton4848.00%114.29%
David Woodhouse2525.00%114.29%
Anton Blanchard1717.00%228.57%
Stephen Rothwell77.00%114.29%
Paul Mackerras22.00%114.29%
Linus Torvalds11.00%114.29%
Total100100.00%7100.00%


static inline int restore_general_regs(struct pt_regs *regs, struct mcontext __user *sr) { elf_greg_t64 *gregs = (elf_greg_t64 *)regs; int i; for (i = 0; i <= PT_RESULT; i++) { if ((i == PT_MSR) || (i == PT_SOFTE)) continue; if (__get_user(gregs[i], &sr->mc_gregs[i])) return -EFAULT; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Anton Blanchard3844.19%466.67%
Andrew Morton3540.70%116.67%
Stephen Rothwell1315.12%116.67%
Total86100.00%6100.00%

#else /* CONFIG_PPC64 */ #define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set) { return copy_to_user(uset, set, sizeof(*uset)); }

Contributors

PersonTokensPropCommitsCommitProp
Stephen Rothwell30100.00%1100.00%
Total30100.00%1100.00%


static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset) { return copy_from_user(set, uset, sizeof(*uset)); }

Contributors

PersonTokensPropCommitsCommitProp
Stephen Rothwell3096.77%150.00%
Paul Mackerras13.23%150.00%
Total31100.00%2100.00%

#define to_user_ptr(p) ((unsigned long)(p)) #define from_user_ptr(p) ((void __user *)(p))
static inline int save_general_regs(struct pt_regs *regs, struct mcontext __user *frame) { WARN_ON(!FULL_REGS(regs)); return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE); }

Contributors

PersonTokensPropCommitsCommitProp
Stephen Rothwell3177.50%133.33%
David Woodhouse717.50%133.33%
Paul Mackerras25.00%133.33%
Total40100.00%3100.00%


static inline int restore_general_regs(struct pt_regs *regs, struct mcontext __user *sr) { /* copy up to but not including MSR */ if (__copy_from_user(regs, &sr->mc_gregs, PT_MSR * sizeof(elf_greg_t))) return -EFAULT; /* copy from orig_r3 (the word after the MSR) up to the end */ if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3], GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t))) return -EFAULT; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Stephen Rothwell77100.00%1100.00%
Total77100.00%1100.00%

#endif /* * When we have signals to deliver, we set up on the * user stack, going down from the original stack pointer: * an ABI gap of 56 words * an mcontext struct * a sigcontext struct * a gap of __SIGNAL_FRAMESIZE bytes * * Each of these things must be a multiple of 16 bytes in size. The following * structure represent all of this except the __SIGNAL_FRAMESIZE gap * */ struct sigframe { struct sigcontext sctx; /* the sigcontext */ struct mcontext mctx; /* all the register values */ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM struct sigcontext sctx_transact; struct mcontext mctx_transact; #endif /* * Programs using the rs6000/xcoff abi can save up to 19 gp * regs and 18 fp regs below sp before decrementing it. */ int abigap[56]; }; /* We use the mc_pad field for the signal return trampoline. */ #define tramp mc_pad /* * When we have rt signals to deliver, we set up on the * user stack, going down from the original stack pointer: * one rt_sigframe struct (siginfo + ucontext + ABI gap) * a gap of __SIGNAL_FRAMESIZE+16 bytes * (the +16 is to get the siginfo and ucontext in the same * positions as in older kernels). * * Each of these things must be a multiple of 16 bytes in size. * */ struct rt_sigframe { #ifdef CONFIG_PPC64 compat_siginfo_t info; #else struct siginfo info; #endif struct ucontext uc; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM struct ucontext uc_transact; #endif /* * Programs using the rs6000/xcoff abi can save up to 19 gp * regs and 18 fp regs below sp before decrementing it. */ int abigap[56]; }; #ifdef CONFIG_VSX
unsigned long copy_fpr_to_user(void __user *to, struct task_struct *task) { u64 buf[ELF_NFPREG]; int i; /* save FPR copy to local buffer then write to the thread_struct */ for (i = 0; i < (ELF_NFPREG - 1) ; i++) buf[i] = task->thread.TS_FPR(i); buf[i] = task->thread.fp_state.fpscr; return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double)); }

Contributors

PersonTokensPropCommitsCommitProp
Michael Neuling8195.29%150.00%
Paul Mackerras44.71%150.00%
Total85100.00%2100.00%


unsigned long copy_fpr_from_user(struct task_struct *task, void __user *from) { u64 buf[ELF_NFPREG]; int i; if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double))) return 1; for (i = 0; i < (ELF_NFPREG - 1) ; i++) task->thread.TS_FPR(i) = buf[i]; task->thread.fp_state.fpscr = buf[i]; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Michael Neuling8795.60%150.00%
Paul Mackerras44.40%150.00%
Total91100.00%2100.00%


unsigned long copy_vsx_to_user(void __user *to, struct task_struct *task) { u64 buf[ELF_NVSRHALFREG]; int i; /* save FPR copy to local buffer then write to the thread_struct */ for (i = 0; i < ELF_NVSRHALFREG; i++) buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET]; return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double)); }

Contributors

PersonTokensPropCommitsCommitProp
Michael Neuling7095.89%150.00%
Paul Mackerras34.11%150.00%
Total73100.00%2100.00%


unsigned long copy_vsx_from_user(struct task_struct *task, void __user *from) { u64 buf[ELF_NVSRHALFREG]; int i; if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double))) return 1; for (i = 0; i < ELF_NVSRHALFREG ; i++) task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i]; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Michael Neuling7696.20%150.00%
Paul Mackerras33.80%150.00%
Total79100.00%2100.00%

#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
unsigned long copy_ckfpr_to_user(void __user *to, struct task_struct *task) { u64 buf[ELF_NFPREG]; int i; /* save FPR copy to local buffer then write to the thread_struct */ for (i = 0; i < (ELF_NFPREG - 1) ; i++) buf[i] = task->thread.TS_CKFPR(i); buf[i] = task->thread.ckfp_state.fpscr; return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double)); }

Contributors

PersonTokensPropCommitsCommitProp
Michael Neuling7891.76%250.00%
Paul Mackerras44.71%125.00%
Cyril Bur33.53%125.00%
Total85100.00%4100.00%


unsigned long copy_ckfpr_from_user(struct task_struct *task, void __user *from) { u64 buf[ELF_NFPREG]; int i; if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double))) return 1; for (i = 0; i < (ELF_NFPREG - 1) ; i++) task->thread.TS_CKFPR(i) = buf[i]; task->thread.ckfp_state.fpscr = buf[i]; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Michael Neuling6874.73%228.57%
Stephen Rothwell1010.99%114.29%
Andrew Morton55.49%114.29%
Paul Mackerras44.40%114.29%
Cyril Bur33.30%114.29%
Anton Blanchard11.10%114.29%
Total91100.00%7100.00%


unsigned long copy_ckvsx_to_user(void __user *to, struct task_struct *task) { u64 buf[ELF_NVSRHALFREG]; int i; /* save FPR copy to local buffer then write to the thread_struct */ for (i = 0; i < ELF_NVSRHALFREG; i++) buf[i] = task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET]; return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double)); }

Contributors

PersonTokensPropCommitsCommitProp
Michael Neuling6893.15%133.33%
Paul Mackerras34.11%133.33%
Cyril Bur22.74%133.33%
Total73100.00%3100.00%


unsigned long copy_ckvsx_from_user(struct task_struct *task, void __user *from) { u64 buf[ELF_NVSRHALFREG]; int i; if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double))) return 1; for (i = 0; i < ELF_NVSRHALFREG ; i++) task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i]; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Michael Neuling7493.67%133.33%
Paul Mackerras33.80%133.33%
Cyril Bur22.53%133.33%
Total79100.00%3100.00%

#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ #else
inline unsigned long copy_fpr_to_user(void __user *to, struct task_struct *task) { return __copy_to_user(to, task->thread.fp_state.fpr, ELF_NFPREG * sizeof(double)); }

Contributors

PersonTokensPropCommitsCommitProp
Michael Neuling3694.74%150.00%
Paul Mackerras25.26%150.00%
Total38100.00%2100.00%


inline unsigned long copy_fpr_from_user(struct task_struct *task, void __user *from) { return __copy_from_user(task->thread.fp_state.fpr, from, ELF_NFPREG * sizeof(double)); }

Contributors

PersonTokensPropCommitsCommitProp
Michael Neuling3694.74%150.00%
Paul Mackerras25.26%150.00%
Total38100.00%2100.00%

#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
inline unsigned long copy_ckfpr_to_user(void __user *to, struct task_struct *task) { return __copy_to_user(to, task->thread.ckfp_state.fpr, ELF_NFPREG * sizeof(double)); }

Contributors

PersonTokensPropCommitsCommitProp
Michael Neuling3489.47%133.33%
Cyril Bur25.26%133.33%
Paul Mackerras25.26%133.33%
Total38100.00%3100.00%


inline unsigned long copy_ckfpr_from_user(struct task_struct *task, void __user *from) { return __copy_from_user(task->thread.ckfp_state.fpr, from, ELF_NFPREG * sizeof(double)); }

Contributors

PersonTokensPropCommitsCommitProp
Michael Neuling3489.47%133.33%
Paul Mackerras25.26%133.33%
Cyril Bur25.26%133.33%
Total38100.00%3100.00%

#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ #endif /* * Save the current user registers on the user stack. * We only save the altivec/spe registers if the process has used * altivec/spe instructions at some point. */
static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, struct mcontext __user *tm_frame, int sigret, int ctx_has_vsx_region) { unsigned long msr = regs->msr; /* Make sure floating point registers are stored in regs */ flush_fp_to_thread(current); /* save general registers */ if (save_general_regs(regs, frame)) return 1; #ifdef CONFIG_ALTIVEC /* save altivec registers */ if (current->thread.used_vr) { flush_altivec_to_thread(current); if (__copy_to_user(&frame->mc_vregs, &current->thread.vr_state, ELF_NVRREG * sizeof(vector128))) return 1; /* set MSR_VEC in the saved MSR value to indicate that frame->mc_vregs contains valid data */ msr |= MSR_VEC; } /* else assert((regs->msr & MSR_VEC) == 0) */ /* We always copy to/from vrsave, it's 0 if we don't have or don't * use altivec. Since VSCR only contains 32 bits saved in the least * significant bits of a vector, we "cheat" and stuff VRSAVE in the * most significant bits of that same vector. --BenH * Note that the current VRSAVE value is in the SPR at this point. */ if (cpu_has_feature(CPU_FTR_ALTIVEC)) current->thread.vrsave = mfspr(SPRN_VRSAVE); if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32])) return 1; #endif /* CONFIG_ALTIVEC */ if (copy_fpr_to_user(&frame->mc_fregs, current)) return 1; /* * Clear the MSR VSX bit to indicate there is no valid state attached * to this context, except in the specific case below where we set it. */ msr &= ~MSR_VSX; #ifdef CONFIG_VSX /* * Copy VSR 0-31 upper half from thread_struct to local * buffer, then write that to userspace. Also set MSR_VSX in * the saved MSR value to indicate that frame->mc_vregs * contains valid data */ if (current->thread.used_vsr && ctx_has_vsx_region) { flush_vsx_to_thread(current); if (copy_vsx_to_user(&frame->mc_vsregs, current)) return 1; msr |= MSR_VSX; } #endif /* CONFIG_VSX */ #ifdef CONFIG_SPE /* save spe registers */ if (current->thread.used_spe) { flush_spe_to_thread(current); if (__copy_to_user(&frame->mc_vregs, current->thread.evr, ELF_NEVRREG * sizeof(u32))) return 1; /* set MSR_SPE in the saved MSR value to indicate that frame->mc_vregs contains valid data */ msr |= MSR_SPE; } /* else assert((regs->msr & MSR_SPE) == 0) */ /* We always copy to/from spefscr */ if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG)) return 1; #endif /* CONFIG_SPE */ if (__put_user(msr, &frame->mc_gregs[PT_MSR])) return 1; /* We need to write 0 the MSR top 32 bits in the tm frame so that we * can check it on the restore to see if TM is active */ if (tm_frame && __put_user(0, &tm_frame->mc_gregs[PT_MSR])) return 1; if (sigret) { /* Set up the sigreturn trampoline: li r0,sigret; sc */ if (__put_user(0x38000000UL + sigret, &frame->tramp[0]) || __put_user(0x44000002UL, &frame->tramp[1])) return 1; flush_icache_range((unsigned long) &frame->tramp[0], (unsigned long) &frame->tramp[2]); } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Michael Neuling38994.65%350.00%
Paul Mackerras215.11%233.33%
Anton Blanchard10.24%116.67%
Total411100.00%6100.00%

#ifdef CONFIG_PPC_TRANSACTIONAL_MEM /* * Save the current user registers on the user stack. * We only save the altivec/spe registers if the process has used * altivec/spe instructions at some point. * We also save the transactional registers to a second ucontext in the * frame. * * See save_user_regs() and signal_64.c:setup_tm_sigcontexts(). */
static int save_tm_user_regs(struct pt_regs *regs, struct mcontext __user *frame, struct mcontext __user *tm_frame, int sigret) { unsigned long msr = regs->msr; /* Remove TM bits from thread's MSR. The MSR in the sigcontext * just indicates to userland that we were doing a transaction, but we * don't want to return in transactional state. This also ensures * that flush_fp_to_thread won't set TIF_RESTORE_TM again. */ regs->msr &= ~MSR_TS_MASK; /* Save both sets of general registers */ if (save_general_regs(&current->thread.ckpt_regs, frame) || save_general_regs(regs, tm_frame)) return 1; /* Stash the top half of the 64bit MSR into the 32bit MSR word * of the transactional mcontext. This way we have a backward-compatible * MSR in the 'normal' (checkpointed) mcontext and additionally one can * also look at what type of transaction (T or S) was active at the * time of the signal. */ if (__put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR])) return 1; #ifdef CONFIG_ALTIVEC /* save altivec registers */ if (current->thread.used_vr) { if (__copy_to_user(&frame->mc_vregs, &current->thread.ckvr_state, ELF_NVRREG * sizeof(vector128))) return 1; if (msr & MSR_VEC) { if (__copy_to_user(&tm_frame->mc_vregs, &current->thread.vr_state, ELF_NVRREG * sizeof(vector128))) return 1; } else { if (__copy_to_user(&tm_frame->mc_vregs, &current->thread.ckvr_state, ELF_NVRREG * sizeof(vector128))) return 1; } /* set MSR_VEC in the saved MSR value to indicate that * frame->mc_vregs contains valid data */ msr |= MSR_VEC; } /* We always copy to/from vrsave, it's 0 if we don't have or don't * use altivec. Since VSCR only contains 32 bits saved in the least * significant bits of a vector, we "cheat" and stuff VRSAVE in the * most significant bits of that same vector. --BenH */ if (cpu_has_feature(CPU_FTR_ALTIVEC)) current->thread.ckvrsave = mfspr(SPRN_VRSAVE); if (__put_user(current->thread.ckvrsave, (u32 __user *)&frame->mc_vregs[32])) return 1; if (msr & MSR_VEC) { if (__put_user(current->thread.vrsave, (u32 __user *)&tm_frame->mc_vregs[32])) return 1; } else { if (__put_user(current->thread.ckvrsave, (u32 __user *)&tm_frame->mc_vregs[32])) return 1; } #endif /* CONFIG_ALTIVEC */ if (copy_ckfpr_to_user(&frame->mc_fregs, current)) return 1; if (msr & MSR_FP) { if (copy_fpr_to_user(&tm_frame->mc_fregs, current)) return 1; } else { if (copy_ckfpr_to_user(&tm_frame->mc_fregs, current)) return 1; } #ifdef CONFIG_VSX /* * Copy VSR 0-31 upper half from thread_struct to local * buffer, then write that to userspace. Also set MSR_VSX in * the saved MSR value to indicate that frame->mc_vregs * contains valid data */ if (current->thread.used_vsr) { if (copy_ckvsx_to_user(&frame->mc_vsregs, current)) return 1; if (msr & MSR_VSX) { if (copy_vsx_to_user(&tm_frame->mc_vsregs, current)) return 1; } else { if (copy_ckvsx_to_user(&tm_frame->mc_vsregs, current)) return 1; } msr |= MSR_VSX; } #endif /* CONFIG_VSX */ #ifdef CONFIG_SPE /* SPE regs are not checkpointed with TM, so this section is * simply the same as in save_user_regs(). */ if (current->thread.used_spe) { flush_spe_to_thread(current); if (__copy_to_user(&frame->mc_vregs, current->thread.evr, ELF_NEVRREG * sizeof(u32))) return 1; /* set MSR_SPE in the saved MSR value to indicate that * frame->mc_vregs contains valid data */ msr |= MSR_SPE; } /* We always copy to/from spefscr */ if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG)) return 1; #endif /* CONFIG_SPE */ if (__put_user(msr, &frame->mc_gregs[PT_MSR])) return 1; if (sigret) { /* Set up the sigreturn trampoline: li r0,sigret; sc */ if (__put_user(0x38000000UL + sigret, &frame->tramp[0]) || __put_user(0x44000002UL, &frame->tramp[1])) return 1; flush_icache_range((unsigned long) &frame->