Release 4.14 arch/powerpc/lib/sstep.c
/*
* Single-step support.
*
* Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/kprobes.h>
#include <linux/ptrace.h>
#include <linux/prefetch.h>
#include <asm/sstep.h>
#include <asm/processor.h>
#include <linux/uaccess.h>
#include <asm/cpu_has_feature.h>
#include <asm/cputable.h>
extern char system_call_common[];
#ifdef CONFIG_PPC64
/* Bits in SRR1 that are copied from MSR */
#define MSR_MASK 0xffffffff87c0ffffUL
#else
#define MSR_MASK 0x87c0ffff
#endif
/* Bits in XER */
#define XER_SO 0x80000000U
#define XER_OV 0x40000000U
#define XER_CA 0x20000000U
#ifdef CONFIG_PPC_FPU
/*
* Functions in ldstfp.S
*/
extern void get_fpr(int rn, double *p);
extern void put_fpr(int rn, const double *p);
extern void get_vr(int rn, __vector128 *p);
extern void put_vr(int rn, __vector128 *p);
extern void load_vsrn(int vsr, const void *p);
extern void store_vsrn(int vsr, void *p);
extern void conv_sp_to_dp(const float *sp, double *dp);
extern void conv_dp_to_sp(const double *dp, float *sp);
#endif
#ifdef __powerpc64__
/*
* Functions in quad.S
*/
extern int do_lq(unsigned long ea, unsigned long *regs);
extern int do_stq(unsigned long ea, unsigned long val0, unsigned long val1);
extern int do_lqarx(unsigned long ea, unsigned long *regs);
extern int do_stqcx(unsigned long ea, unsigned long val0, unsigned long val1,
unsigned int *crp);
#endif
#ifdef __LITTLE_ENDIAN__
#define IS_LE 1
#define IS_BE 0
#else
#define IS_LE 0
#define IS_BE 1
#endif
/*
* Emulate the truncation of 64 bit values in 32-bit mode.
*/
static nokprobe_inline unsigned long truncate_if_32bit(unsigned long msr,
unsigned long val)
{
#ifdef __powerpc64__
if ((msr & MSR_64BIT) == 0)
val &= 0xffffffffUL;
#endif
return val;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Michael Ellerman | 37 | 97.37% | 1 | 50.00% |
Naveen N. Rao | 1 | 2.63% | 1 | 50.00% |
Total | 38 | 100.00% | 2 | 100.00% |
/*
* Determine whether a conditional branch instruction would branch.
*/
static nokprobe_inline int branch_taken(unsigned int instr,
const struct pt_regs *regs,
struct instruction_op *op)
{
unsigned int bo = (instr >> 21) & 0x1f;
unsigned int bi;
if ((bo & 4) == 0) {
/* decrement counter */
op->type |= DECCTR;
if (((bo >> 1) & 1) ^ (regs->ctr == 1))
return 0;
}
if ((bo & 0x10) == 0) {
/* check bit from CR */
bi = (instr >> 16) & 0x1f;
if (((regs->ccr >> (31 - bi)) & 1) != ((bo >> 3) & 1))
return 0;
}
return 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mackerras | 136 | 99.27% | 2 | 66.67% |
Naveen N. Rao | 1 | 0.73% | 1 | 33.33% |
Total | 137 | 100.00% | 3 | 100.00% |
static nokprobe_inline long address_ok(struct pt_regs *regs,
unsigned long ea, int nb)
{
if (!user_mode(regs))
return 1;
if (__access_ok(ea, nb, USER_DS))
return 1;
if (__access_ok(ea, 1, USER_DS))
/* Access overlaps the end of the user region */
regs->dar = USER_DS.seg;
else
regs->dar = ea;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mackerras | 73 | 98.65% | 3 | 75.00% |
Naveen N. Rao | 1 | 1.35% | 1 | 25.00% |
Total | 74 | 100.00% | 4 | 100.00% |
/*
* Calculate effective address for a D-form instruction
*/
static nokprobe_inline unsigned long dform_ea(unsigned int instr,
const struct pt_regs *regs)
{
int ra;
unsigned long ea;
ra = (instr >> 16) & 0x1f;
ea = (signed short) instr; /* sign-extend */
if (ra)
ea += regs->gpr[ra];
return ea;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mackerras | 58 | 96.67% | 3 | 60.00% |
Michael Ellerman | 1 | 1.67% | 1 | 20.00% |
Naveen N. Rao | 1 | 1.67% | 1 | 20.00% |
Total | 60 | 100.00% | 5 | 100.00% |
#ifdef __powerpc64__
/*
* Calculate effective address for a DS-form instruction
*/
static nokprobe_inline unsigned long dsform_ea(unsigned int instr,
const struct pt_regs *regs)
{
int ra;
unsigned long ea;
ra = (instr >> 16) & 0x1f;
ea = (signed short) (instr & ~3); /* sign-extend */
if (ra)
ea += regs->gpr[ra];
return ea;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mackerras | 63 | 96.92% | 3 | 60.00% |
Naveen N. Rao | 1 | 1.54% | 1 | 20.00% |
Michael Ellerman | 1 | 1.54% | 1 | 20.00% |
Total | 65 | 100.00% | 5 | 100.00% |
/*
* Calculate effective address for a DQ-form instruction
*/
static nokprobe_inline unsigned long dqform_ea(unsigned int instr,
const struct pt_regs *regs)
{
int ra;
unsigned long ea;
ra = (instr >> 16) & 0x1f;
ea = (signed short) (instr & ~0xf); /* sign-extend */
if (ra)
ea += regs->gpr[ra];
return ea;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mackerras | 65 | 100.00% | 1 | 100.00% |
Total | 65 | 100.00% | 1 | 100.00% |
#endif /* __powerpc64 */
/*
* Calculate effective address for an X-form instruction
*/
static nokprobe_inline unsigned long xform_ea(unsigned int instr,
const struct pt_regs *regs)
{
int ra, rb;
unsigned long ea;
ra = (instr >> 16) & 0x1f;
rb = (instr >> 11) & 0x1f;
ea = regs->gpr[rb];
if (ra)
ea += regs->gpr[ra];
return ea;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mackerras | 70 | 97.22% | 3 | 60.00% |
Naveen N. Rao | 1 | 1.39% | 1 | 20.00% |
Michael Ellerman | 1 | 1.39% | 1 | 20.00% |
Total | 72 | 100.00% | 5 | 100.00% |
/*
* Return the largest power of 2, not greater than sizeof(unsigned long),
* such that x is a multiple of it.
*/
static nokprobe_inline unsigned long max_align(unsigned long x)
{
x |= sizeof(unsigned long);
return x & -x; /* isolates rightmost bit */
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mackerras | 26 | 96.30% | 1 | 50.00% |
Naveen N. Rao | 1 | 3.70% | 1 | 50.00% |
Total | 27 | 100.00% | 2 | 100.00% |
static nokprobe_inline unsigned long byterev_2(unsigned long x)
{
return ((x >> 8) & 0xff) | ((x & 0xff) << 8);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mackerras | 32 | 96.97% | 1 | 50.00% |
Naveen N. Rao | 1 | 3.03% | 1 | 50.00% |
Total | 33 | 100.00% | 2 | 100.00% |
static nokprobe_inline unsigned long byterev_4(unsigned long x)
{
return ((x >> 24) & 0xff) | ((x >> 8) & 0xff00) |
((x & 0xff00) << 8) | ((x & 0xff) << 24);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mackerras | 52 | 98.11% | 1 | 50.00% |
Naveen N. Rao | 1 | 1.89% | 1 | 50.00% |
Total | 53 | 100.00% | 2 | 100.00% |
#ifdef __powerpc64__
static nokprobe_inline unsigned long byterev_8(unsigned long x)
{
return (byterev_4(x) << 32) | byterev_4(x >> 32);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mackerras | 28 | 96.55% | 1 | 50.00% |
Naveen N. Rao | 1 | 3.45% | 1 | 50.00% |
Total | 29 | 100.00% | 2 | 100.00% |
#endif
static nokprobe_inline void do_byte_reverse(void *ptr, int nb)
{
switch (nb) {
case 2:
*(u16 *)ptr = byterev_2(*(u16 *)ptr);
break;
case 4:
*(u32 *)ptr = byterev_4(*(u32 *)ptr);
break;
#ifdef __powerpc64__
case 8:
*(unsigned long *)ptr = byterev_8(*(unsigned long *)ptr);
break;
case 16: {
unsigned long *up = (unsigned long *)ptr;
unsigned long tmp;
tmp = byterev_8(up[0]);
up[0] = byterev_8(up[1]);
up[1] = tmp;
break;
}
#endif
default:
WARN_ON_ONCE(1);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mackerras | 148 | 100.00% | 1 | 100.00% |
Total | 148 | 100.00% | 1 | 100.00% |
static nokprobe_inline int read_mem_aligned(unsigned long *dest,
unsigned long ea, int nb,
struct pt_regs *regs)
{
int err = 0;
unsigned long x = 0;
switch (nb) {
case 1:
err = __get_user(x, (unsigned char __user *) ea);
break;
case 2:
err = __get_user(x, (unsigned short __user *) ea);
break;
case 4:
err = __get_user(x, (unsigned int __user *) ea);
break;
#ifdef __powerpc64__
case 8:
err = __get_user(x, (unsigned long __user *) ea);
break;
#endif
}
if (!err)
*dest = x;
else
regs->dar = ea;
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mackerras | 141 | 99.30% | 2 | 66.67% |
Naveen N. Rao | 1 | 0.70% | 1 | 33.33% |
Total | 142 | 100.00% | 3 | 100.00% |
/*
* Copy from userspace to a buffer, using the largest possible
* aligned accesses, up to sizeof(long).
*/
static int nokprobe_inline copy_mem_in(u8 *dest, unsigned long ea, int nb,
struct pt_regs *regs)
{
int err = 0;
int c;
for (; nb > 0; nb -= c) {
c = max_align(ea);
if (c > nb)
c = max_align(nb);
switch (c) {
case 1:
err = __get_user(*dest, (unsigned char __user *) ea);
break;
case 2:
err = __get_user(*(u16 *)dest,
(unsigned short __user *) ea);
break;
case 4:
err = __get_user(*(u32 *)dest,
(unsigned int __user *) ea);
break;
#ifdef __powerpc64__
case 8:
err = __get_user(*(unsigned long *)dest,
(unsigned long __user *) ea);
break;
#endif
}
if (err) {
regs->dar = ea;
return err;
}
dest += c;
ea += c;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mackerras | 162 | 83.51% | 3 | 75.00% |
Tom Musta | 32 | 16.49% | 1 | 25.00% |
Total | 194 | 100.00% | 4 | 100.00% |
static nokprobe_inline int read_mem_unaligned(unsigned long *dest,
unsigned long ea, int nb,
struct pt_regs *regs)
{
union {
unsigned long ul;
u8 b[sizeof(unsigned long)];
} u;
int i;
int err;
u.ul = 0;
i = IS_BE ? sizeof(unsigned long) - nb : 0;
err = copy_mem_in(&u.b[i], ea, nb, regs);
if (!err)
*dest = u.ul;
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mackerras | 103 | 100.00% | 3 | 100.00% |
Total | 103 | 100.00% | 3 | 100.00% |
/*
* Read memory at address ea for nb bytes, return 0 for success
* or -EFAULT if an error occurred. N.B. nb must be 1, 2, 4 or 8.
* If nb < sizeof(long), the result is right-justified on BE systems.
*/
static int read_mem(unsigned long *dest, unsigned long ea, int nb,
struct pt_regs *regs)
{
if (!address_ok(regs, ea, nb))
return -EFAULT;
if ((ea & (nb - 1)) == 0)
return read_mem_aligned(dest, ea, nb, regs);
return read_mem_unaligned(dest, ea, nb, regs);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mackerras | 77 | 100.00% | 2 | 100.00% |
Total | 77 | 100.00% | 2 | 100.00% |
NOKPROBE_SYMBOL(read_mem);
static nokprobe_inline int write_mem_aligned(unsigned long val,
unsigned long ea, int nb,
struct pt_regs *regs)
{
int err = 0;
switch (nb) {
case 1:
err = __put_user(val, (unsigned char __user *) ea);
break;
case 2:
err = __put_user(val, (unsigned short __user *) ea);
break;
case 4:
err = __put_user(val, (unsigned int __user *) ea);
break;
#ifdef __powerpc64__
case 8:
err = __put_user(val, (unsigned long __user *) ea);
break;
#endif
}
if (err)
regs->dar = ea;
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mackerras | 127 | 99.22% | 2 | 66.67% |
Naveen N. Rao | 1 | 0.78% | 1 | 33.33% |
Total | 128 | 100.00% | 3 | 100.00% |
/*
* Copy from a buffer to userspace, using the largest possible
* aligned accesses, up to sizeof(long).
*/
static int nokprobe_inline copy_mem_out(u8 *dest, unsigned long ea, int nb,
struct pt_regs *regs)
{
int err = 0;
int c;
for (; nb > 0; nb -= c) {
c = max_align(ea);
if (c > nb)
c = max_align(nb);
switch (c) {
case 1:
err = __put_user(*dest, (unsigned char __user *) ea);
break;
case 2:
err = __put_user(*(u16 *)dest,
(unsigned short __user *) ea);
break;
case 4:
err = __put_user(*(u32 *)dest,
(unsigned int __user *) ea);
break;
#ifdef __powerpc64__
case 8:
err = __put_user(*(unsigned long *)dest,
(unsigned long __user *) ea);
break;
#endif
}
if (err) {
regs->dar = ea;
return err;
}
dest += c;
ea += c;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mackerras | 192 | 98.97% | 3 | 75.00% |
Tom Musta | 2 | 1.03% | 1 | 25.00% |
Total | 194 | 100.00% | 4 | 100.00% |
static nokprobe_inline int write_mem_unaligned(unsigned long val,
unsigned long ea, int nb,
struct pt_regs *regs)
{
union {
unsigned long ul;
u8 b[sizeof(unsigned long)];
} u;
int i;
u.ul = val;
i = IS_BE ? sizeof(unsigned long) - nb : 0;
return copy_mem_out(&u.b[i], ea, nb, regs);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mackerras | 83 | 100.00% | 2 | 100.00% |
Total | 83 | 100.00% | 2 | 100.00% |
/*
* Write memory at address ea for nb bytes, return 0 for success
* or -EFAULT if an error occurred. N.B. nb must be 1, 2, 4 or 8.
*/
static int write_mem(unsigned long val, unsigned long ea, int nb,
struct pt_regs *regs)
{
if (!address_ok(regs, ea, nb))
return -EFAULT;
if ((ea & (nb - 1)) == 0)
return write_mem_aligned(val, ea, nb, regs);
return write_mem_unaligned(val, ea, nb, regs);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mackerras | 76 | 100.00% | 2 | 100.00% |
Total | 76 | 100.00% | 2 | 100.00% |
NOKPROBE_SYMBOL(write_mem);
#ifdef CONFIG_PPC_FPU
/*
* These access either the real FP register or the image in the
* thread_struct, depending on regs->msr & MSR_FP.
*/
static int do_fp_load(struct instruction_op *op, unsigned long ea,
struct pt_regs *regs, bool cross_endian)
{
int err, rn, nb;
union {
int i;
unsigned int u;
float f;
double d[2];
unsigned long l[2];
u8 b[2 * sizeof(double)];
} u;
nb = GETSIZE(op->type);
if (!address_ok(regs, ea, nb))
return -EFAULT;
rn = op->reg;
err = copy_mem_in(u.b, ea, nb, regs);
if (err)
return err;
if (unlikely(cross_endian)) {
do_byte_reverse(u.b, min(nb, 8));
if (nb == 16)
do_byte_reverse(&u.b[8], 8);
}
preempt_disable();
if (nb == 4) {
if (op->type & FPCONV)
conv_sp_to_dp(&u.f, &u.d[0]);
else if (op->type & SIGNEXT)
u.l[0] = u.i;
else
u.l[0] = u.u;
}
if (regs->msr & MSR_FP)
put_fpr(rn, &u.d[0]);
else
current->thread.TS_FPR(rn) = u.l[0];
if (nb == 16) {
/* lfdp */
rn |= 1;
if (regs->msr & MSR_FP)
put_fpr(rn, &u.d[1]);
else
current->thread.TS_FPR(rn) = u.l[1];
}
preempt_enable();
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mackerras | 326 | 100.00% | 7 | 100.00% |
Total | 326 | 100.00% | 7 | 100.00% |
NOKPROBE_SYMBOL(do_fp_load);
static int do_fp_store(struct instruction_op *op, unsigned long ea,
struct pt_regs *regs, bool cross_endian)
{
int rn, nb;
union {
unsigned int u;
float f;
double d[2];
unsigned long l[2];
u8 b[2 * sizeof(double)];
} u;
nb = GETSIZE(op->type);
if (!address_ok(regs, ea, nb))
return -EFAULT;
rn = op->reg;
preempt_disable();
if (regs->msr & MSR_FP)
get_fpr(rn, &u.d[0]);
else
u.l[0] = current->thread.TS_FPR(rn);
if (nb == 4) {
if (op->type & FPCONV)
conv_dp_to_sp(&u.d[0], &u.f);
else
u.u = u.l[0];
}
if (nb == 16) {
rn |= 1;
if (regs->msr & MSR_FP)
get_fpr(rn, &u.d[1]);
else
u.l[1] = current->thread.TS_FPR(rn);
}
preempt_enable();
if (unlikely(cross_endian)) {
do_byte_reverse(u.b, min(nb, 8));
if (nb == 16)
do_byte_reverse(&u.b[8], 8);
}
return copy_mem_out(u.b, ea, nb, regs);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mackerras | 289 | 100.00% | 7 | 100.00% |
Total | 289 | 100.00% | 7 | 100.00% |
NOKPROBE_SYMBOL(do_fp_store);
#endif
#ifdef CONFIG_ALTIVEC
/* For Altivec/VMX, no need to worry about alignment */
static nokprobe_inline int do_vec_load(int rn, unsigned long ea,
int size, struct pt_regs *regs,
bool cross_endian)
{
int err;
union {
__vector128 v;
u8 b[sizeof(__vector128)];
} u = {};
if (!address_ok(regs, ea & ~0xfUL, 16))
return -EFAULT;
/* align to multiple of size */
ea &= ~(size - 1);
err = copy_mem_in(&u.b[ea & 0xf], ea, size, regs);
if (err)
return err;
if (unlikely(cross_endian))
do_byte_reverse(&u.b[ea & 0xf], size);
preempt_disable();
if (regs->msr & MSR_VEC)
put_vr(rn, &u.v);
else
current->thread.vr_state.vr[rn] = u.v;
preempt_enable