Release 4.14 arch/mips/kernel/process.c
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
* Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 2004 Thiemo Seufer
* Copyright (C) 2013 Imagination Technologies Ltd.
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/tick.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/export.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/personality.h>
#include <linux/sys.h>
#include <linux/init.h>
#include <linux/completion.h>
#include <linux/kallsyms.h>
#include <linux/random.h>
#include <linux/prctl.h>
#include <asm/asm.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
#include <asm/dsemul.h>
#include <asm/dsp.h>
#include <asm/fpu.h>
#include <asm/irq.h>
#include <asm/msa.h>
#include <asm/pgtable.h>
#include <asm/mipsregs.h>
#include <asm/processor.h>
#include <asm/reg.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/elf.h>
#include <asm/isadep.h>
#include <asm/inst.h>
#include <asm/stacktrace.h>
#include <asm/irq_regs.h>
#ifdef CONFIG_HOTPLUG_CPU
void arch_cpu_idle_dead(void)
{
play_dead();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 5 | 50.00% | 1 | 50.00% |
Thomas Gleixner | 5 | 50.00% | 1 | 50.00% |
Total | 10 | 100.00% | 2 | 100.00% |
#endif
asmlinkage void ret_from_fork(void);
asmlinkage void ret_from_kernel_thread(void);
void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
{
unsigned long status;
/* New thread loses kernel privileges. */
status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK);
status |= KU_USER;
regs->cp0_status = status;
lose_fpu(0);
clear_thread_flag(TIF_MSA_CTX_LIVE);
clear_used_math();
atomic_set(¤t->thread.bd_emu_frame, BD_EMUFRAME_NONE);
init_dsp();
regs->cp0_epc = pc;
regs->regs[29] = sp;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ralf Bächle | 56 | 60.22% | 2 | 22.22% |
Paul Burton | 22 | 23.66% | 3 | 33.33% |
Linus Torvalds (pre-git) | 7 | 7.53% | 2 | 22.22% |
James Hogan | 6 | 6.45% | 1 | 11.11% |
Chris Dearman | 2 | 2.15% | 1 | 11.11% |
Total | 93 | 100.00% | 9 | 100.00% |
void exit_thread(struct task_struct *tsk)
{
/*
* User threads may have allocated a delay slot emulation frame.
* If so, clean up that allocation.
*/
if (!(current->flags & PF_KTHREAD))
dsemul_thread_cleanup(tsk);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Burton | 27 | 100.00% | 1 | 100.00% |
Total | 27 | 100.00% | 1 | 100.00% |
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
/*
* Save any process state which is live in hardware registers to the
* parent context prior to duplication. This prevents the new child
* state becoming stale if the parent is preempted before copy_thread()
* gets a chance to save the parent's live hardware registers to the
* child context.
*/
preempt_disable();
if (is_msa_enabled())
save_msa(current);
else if (is_fpu_owner())
_save_fp(current);
save_dsp(current);
preempt_enable();
*dst = *src;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
James Hogan | 57 | 100.00% | 1 | 100.00% |
Total | 57 | 100.00% | 1 | 100.00% |
/*
* Copy architecture-specific thread state
*/
int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
unsigned long kthread_arg, struct task_struct *p, unsigned long tls)
{
struct thread_info *ti = task_thread_info(p);
struct pt_regs *childregs, *regs = current_pt_regs();
unsigned long childksp;
childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
/* set up new TSS. */
childregs = (struct pt_regs *) childksp - 1;
/* Put the stack after the struct pt_regs. */
childksp = (unsigned long) childregs;
p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
if (unlikely(p->flags & PF_KTHREAD)) {
/* kernel thread */
unsigned long status = p->thread.cp0_status;
memset(childregs, 0, sizeof(struct pt_regs));
ti->addr_limit = KERNEL_DS;
p->thread.reg16 = usp; /* fn */
p->thread.reg17 = kthread_arg;
p->thread.reg29 = childksp;
p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
((status & (ST0_KUC | ST0_IEC)) << 2);
#else
status |= ST0_EXL;
#endif
childregs->cp0_status = status;
return 0;
}
/* user thread */
*childregs = *regs;
childregs->regs[7] = 0; /* Clear error flag */
childregs->regs[2] = 0; /* Child gets zero as return value */
if (usp)
childregs->regs[29] = usp;
ti->addr_limit = USER_DS;
p->thread.reg29 = (unsigned long) childregs;
p->thread.reg31 = (unsigned long) ret_from_fork;
/*
* New tasks lose permission to use the fpu. This accelerates context
* switching for most programs since they don't use the fpu.
*/
childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
clear_tsk_thread_flag(p, TIF_USEDFPU);
clear_tsk_thread_flag(p, TIF_USEDMSA);
clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
#ifdef CONFIG_MIPS_MT_FPAFF
clear_tsk_thread_flag(p, TIF_FPUBOUND);
#endif /* CONFIG_MIPS_MT_FPAFF */
atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE);
if (clone_flags & CLONE_SETTLS)
ti->tp_value = tls;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Al Viro | 167 | 43.60% | 4 | 17.39% |
Linus Torvalds (pre-git) | 128 | 33.42% | 8 | 34.78% |
Ralf Bächle | 41 | 10.70% | 5 | 21.74% |
Paul Burton | 26 | 6.79% | 2 | 8.70% |
David Daney | 10 | 2.61% | 1 | 4.35% |
James Cowgill | 6 | 1.57% | 1 | 4.35% |
Alex Dowad | 4 | 1.04% | 1 | 4.35% |
Steven Cole | 1 | 0.26% | 1 | 4.35% |
Total | 383 | 100.00% | 23 | 100.00% |
#ifdef CONFIG_CC_STACKPROTECTOR
#include <linux/stackprotector.h>
unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
#endif
struct mips_frame_info {
void *func;
unsigned long func_size;
int frame_size;
int pc_offset;
};
#define J_TARGET(pc,target) \
(((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
static inline int is_ra_save_ins(union mips_instruction *ip, int *poff)
{
#ifdef CONFIG_CPU_MICROMIPS
/*
* swsp ra,offset
* swm16 reglist,offset(sp)
* swm32 reglist,offset(sp)
* sw32 ra,offset(sp)
* jradiussp - NOT SUPPORTED
*
* microMIPS is way more fun...
*/
if (mm_insn_16bit(ip->word >> 16)) {
switch (ip->mm16_r5_format.opcode) {
case mm_swsp16_op:
if (ip->mm16_r5_format.rt != 31)
return 0;
*poff = ip->mm16_r5_format.imm;
*poff = (*poff << 2) / sizeof(ulong);
return 1;
case mm_pool16c_op:
switch (ip->mm16_m_format.func) {
case mm_swm16_op:
*poff = ip->mm16_m_format.imm;
*poff += 1 + ip->mm16_m_format.rlist;
*poff = (*poff << 2) / sizeof(ulong);
return 1;
default:
return 0;
}
default:
return 0;
}
}
switch (ip->i_format.opcode) {
case mm_sw32_op:
if (ip->i_format.rs != 29)
return 0;
if (ip->i_format.rt != 31)
return 0;
*poff = ip->i_format.simmediate / sizeof(ulong);
return 1;
case mm_pool32b_op:
switch (ip->mm_m_format.func) {
case mm_swm32_func:
if (ip->mm_m_format.rd < 0x10)
return 0;
if (ip->mm_m_format.base != 29)
return 0;
*poff = ip->mm_m_format.simmediate;
*poff += (ip->mm_m_format.rd & 0xf) * sizeof(u32);
*poff /= sizeof(ulong);
return 1;
default:
return 0;
}
default:
return 0;
}
#else
/* sw / sd $ra, offset($sp) */
if ((ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
ip->i_format.rs == 29 && ip->i_format.rt == 31) {
*poff = ip->i_format.simmediate / sizeof(ulong);
return 1;
}
return 0;
#endif
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Burton | 252 | 70.99% | 2 | 25.00% |
Leonid Yegoshin | 52 | 14.65% | 1 | 12.50% |
Franck Bui-Huu | 36 | 10.14% | 1 | 12.50% |
Ralf Bächle | 10 | 2.82% | 1 | 12.50% |
Matt Redfearn | 4 | 1.13% | 2 | 25.00% |
Linus Torvalds (pre-git) | 1 | 0.28% | 1 | 12.50% |
Total | 355 | 100.00% | 8 | 100.00% |
static inline int is_jump_ins(union mips_instruction *ip)
{
#ifdef CONFIG_CPU_MICROMIPS
/*
* jr16,jrc,jalr16,jalr16
* jal
* jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
* jraddiusp - NOT SUPPORTED
*
* microMIPS is kind of more fun...
*/
if (mm_insn_16bit(ip->word >> 16)) {
if ((ip->mm16_r5_format.opcode == mm_pool16c_op &&
(ip->mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op))
return 1;
return 0;
}
if (ip->j_format.opcode == mm_j32_op)
return 1;
if (ip->j_format.opcode == mm_jal32_op)
return 1;
if (ip->r_format.opcode != mm_pool32a_op ||
ip->r_format.func != mm_pool32axf_op)
return 0;
return ((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op;
#else
if (ip->j_format.opcode == j_op)
return 1;
if (ip->j_format.opcode == jal_op)
return 1;
if (ip->r_format.opcode != spec_op)
return 0;
return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
#endif
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Leonid Yegoshin | 76 | 41.53% | 1 | 11.11% |
Paul Burton | 36 | 19.67% | 3 | 33.33% |
Ralf Bächle | 29 | 15.85% | 1 | 11.11% |
Franck Bui-Huu | 22 | 12.02% | 1 | 11.11% |
Tony Wu | 14 | 7.65% | 1 | 11.11% |
Linus Torvalds (pre-git) | 3 | 1.64% | 1 | 11.11% |
Matt Redfearn | 3 | 1.64% | 1 | 11.11% |
Total | 183 | 100.00% | 9 | 100.00% |
static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size)
{
#ifdef CONFIG_CPU_MICROMIPS
unsigned short tmp;
/*
* addiusp -imm
* addius5 sp,-imm
* addiu32 sp,sp,-imm
* jradiussp - NOT SUPPORTED
*
* microMIPS is not more fun...
*/
if (mm_insn_16bit(ip->word >> 16)) {
if (ip->mm16_r3_format.opcode == mm_pool16d_op &&
ip->mm16_r3_format.simmediate & mm_addiusp_func) {
tmp = ip->mm_b0_format.simmediate >> 1;
tmp = ((tmp & 0x1ff) ^ 0x100) - 0x100;
if ((tmp + 2) < 4) /* 0x0,0x1,0x1fe,0x1ff are special */
tmp ^= 0x100;
*frame_size = -(signed short)(tmp << 2);
return 1;
}
if (ip->mm16_r5_format.opcode == mm_pool16d_op &&
ip->mm16_r5_format.rt == 29) {
tmp = ip->mm16_r5_format.imm >> 1;
*frame_size = -(signed short)(tmp & 0xf);
return 1;
}
return 0;
}
if (ip->mm_i_format.opcode == mm_addiu32_op &&
ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29) {
*frame_size = -ip->i_format.simmediate;
return 1;
}
#else
/* addiu/daddiu sp,sp,-imm */
if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
return 0;
if (ip->i_format.opcode == addiu_op ||
ip->i_format.opcode == daddiu_op) {
*frame_size = -ip->i_format.simmediate;
return 1;
}
#endif
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matt Redfearn | 133 | 50.00% | 3 | 30.00% |
Leonid Yegoshin | 69 | 25.94% | 1 | 10.00% |
Franck Bui-Huu | 37 | 13.91% | 1 | 10.00% |
Ralf Bächle | 13 | 4.89% | 2 | 20.00% |
Atsushi Nemoto | 7 | 2.63% | 1 | 10.00% |
Paul Burton | 6 | 2.26% | 1 | 10.00% |
Linus Torvalds (pre-git) | 1 | 0.38% | 1 | 10.00% |
Total | 266 | 100.00% | 10 | 100.00% |
static int get_frame_info(struct mips_frame_info *info)
{
bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
union mips_instruction insn, *ip, *ip_end;
const unsigned int max_insns = 128;
unsigned int last_insn_size = 0;
unsigned int i;
bool saw_jump = false;
info->pc_offset = -1;
info->frame_size = 0;
ip = (void *)msk_isa16_mode((ulong)info->func);
if (!ip)
goto err;
ip_end = (void *)ip + info->func_size;
for (i = 0; i < max_insns && ip < ip_end; i++) {
ip = (void *)ip + last_insn_size;
if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
insn.word = ip->halfword[0] << 16;
last_insn_size = 2;
} else if (is_mmips) {
insn.word = ip->halfword[0] << 16 | ip->halfword[1];
last_insn_size = 4;
} else {
insn.word = ip->word;
last_insn_size = 4;
}
if (!info->frame_size) {
is_sp_move_ins(&insn, &info->frame_size);
continue;
} else if (!saw_jump && is_jump_ins(ip)) {
/*
* If we see a jump instruction, we are finished
* with the frame save.
*
* Some functions can have a shortcut return at
* the beginning of the function, so don't start
* looking for jump instruction until we see the
* frame setup.
*
* The RA save instruction can get put into the
* delay slot of the jump instruction, so look
* at the next instruction, too.
*/
saw_jump = true;
continue;
}
if (info->pc_offset == -1 &&
is_ra_save_ins(&insn, &info->pc_offset))
break;
if (saw_jump)
break;
}
if (info->frame_size && info->pc_offset >= 0) /* nested */
return 0;
if (info->pc_offset < 0) /* leaf */
return 1;
/* prologue seems bogus... */
err:
return -1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Burton | 110 | 35.60% | 4 | 23.53% |
Franck Bui-Huu | 72 | 23.30% | 3 | 17.65% |
Matt Redfearn | 38 | 12.30% | 3 | 17.65% |
Corey Minyard | 29 | 9.39% | 1 | 5.88% |
Ralf Bächle | 22 | 7.12% | 1 | 5.88% |
Atsushi Nemoto | 22 | 7.12% | 2 | 11.76% |
Leonid Yegoshin | 11 | 3.56% | 1 | 5.88% |
Linus Torvalds (pre-git) | 4 | 1.29% | 1 | 5.88% |
Andrea Gelmini | 1 | 0.32% | 1 | 5.88% |
Total | 309 | 100.00% | 17 | 100.00% |
static struct mips_frame_info schedule_mfi __read_mostly;
#ifdef CONFIG_KALLSYMS
static unsigned long get___schedule_addr(void)
{
return kallsyms_lookup_name("__schedule");
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tony Wu | 15 | 100.00% | 1 | 100.00% |
Total | 15 | 100.00% | 1 | 100.00% |
#else
static unsigned long get___schedule_addr(void)
{
union mips_instruction *ip = (void *)schedule;
int max_insns = 8;
int i;
for (i = 0; i < max_insns; i++, ip++) {
if (ip->j_format.opcode == j_op)
return J_TARGET(ip, ip->j_format.target);
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tony Wu | 71 | 100.00% | 1 | 100.00% |
Total | 71 | 100.00% | 1 | 100.00% |
#endif
static int __init frame_info_init(void)
{
unsigned long size = 0;
#ifdef CONFIG_KALLSYMS
unsigned long ofs;
#endif
unsigned long addr;
addr = get___schedule_addr();
if (!addr)
addr = (unsigned long)schedule;
#ifdef CONFIG_KALLSYMS
kallsyms_lookup_size_offset(addr, &size, &ofs);
#endif
schedule_mfi.func = (void *)addr;
schedule_mfi.func_size = size;
get_frame_info(&schedule_mfi);
/*
* Without schedule() frame info, result given by
* thread_saved_pc() and get_wchan() are not reliable.
*/
if (schedule_mfi.pc_offset < 0)
printk("Can't analyze schedule() prologue at %p\n", schedule);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Atsushi Nemoto | 34 | 33.01% | 2 | 20.00% |
Tony Wu | 30 | 29.13% | 1 | 10.00% |
Franck Bui-Huu | 25 | 24.27% | 2 | 20.00% |
Ralf Bächle | 6 | 5.83% | 1 | 10.00% |
Andrew Morton | 5 | 4.85% | 1 | 10.00% |
Linus Torvalds (pre-git) | 2 | 1.94% | 2 | 20.00% |
Thiemo Seufer | 1 | 0.97% | 1 | 10.00% |
Total | 103 | 100.00% | 10 | 100.00% |
arch_initcall(frame_info_init);
/*
* Return saved PC of a blocked thread.
*/
unsigned long thread_saved_pc(struct task_struct *tsk)
{
struct thread_struct *t = &tsk->thread;
/* New born processes are a special case */
if (t->reg31 == (unsigned long) ret_from_fork)
return t->reg31;
if (schedule_mfi.pc_offset < 0)
return 0;
return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset];
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ralf Bächle | 55 | 82.09% | 2 | 33.33% |
Linus Torvalds (pre-git) | 8 | 11.94% | 2 | 33.33% |
Franck Bui-Huu | 3 | 4.48% | 1 | 16.67% |
Linus Torvalds | 1 | 1.49% | 1 | 16.67% |
Total | 67 | 100.00% | 6 | 100.00% |
#ifdef CONFIG_KALLSYMS
/* generic stack unwinding function */
unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
unsigned long *sp,
unsigned long pc,
unsigned long *ra)
{
unsigned long low, high, irq_stack_high;
struct mips_frame_info info;
unsigned long size, ofs;
struct pt_regs *regs;
int leaf;
if (!stack_page)
return 0;
/*
* IRQ stacks start at IRQ_STACK_START
* task stacks at THREAD_SIZE - 32
*/
low = stack_page;
if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) {
high = stack_page + IRQ_STACK_START;
irq_stack_high = high;
} else {
high = stack_page + THREAD_SIZE - 32;
irq_stack_high = 0;
}
/*
* If we reached the top of the interrupt stack, start unwinding
* the interrupted task stack.
*/
if (unlikely(*sp == irq_stack_high)) {
unsigned long task_sp = *(unsigned long *)*sp;
/*
* Check that the pointer saved in the IRQ stack head points to
* something within the stack of the current task
*/
if (!object_is_on_stack((void *)task_sp))
return 0;
/*
* Follow pointer to tasks kernel stack frame where interrupted
* state was saved.
*/
regs = (struct pt_regs *)task_sp;
pc = regs->cp0_epc;
if (!user_mode(regs) && __kernel_text_address(pc)) {
*sp = regs->regs[29];
*ra = regs->regs[31];
return pc;
}
return 0;
}
if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
return 0;
/*
* Return ra if an exception occurred at the first instruction
*/
if (unlikely(ofs == 0)) {
pc = *ra;
*ra = 0;
return pc;
}
info.func = (void *)(pc - ofs);
info.func_size = ofs; /* analyze from start to ofs */
leaf = get_frame_info(&info);
if (leaf < 0)
return 0;
if (*sp < low || *sp + info.frame_size > high)
return 0;
if (leaf)
/*
* For some extreme cases, get_frame_info() can
* consider wrongly a nested function as a leaf
* one. In that cases avoid to return always the
* same value.
*/
pc = pc != *ra ? *ra : 0;
else
pc = ((unsigned long *)(*sp))[info.pc_offset];
*sp += info.frame_size;
*ra = 0;
return __kernel_text_address(pc) ? pc : 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Atsushi Nemoto | 212 | 58.40% | 2 | 22.22% |
Matt Redfearn | 89 | 24.52% | 1 | 11.11% |
Franck Bui-Huu | 50 | 13.77% | 3 | 33.33% |
James Hogan | 6 | 1.65% | 1 | 11.11% |
Daniel Kalmar | 5 | 1.38% | 1 | 11.11% |
Lucas De Marchi | 1 | 0.28% | 1 | 11.11% |
Total | 363 | 100.00% | 9 | 100.00% |
EXPORT_SYMBOL(unwind_stack_by_address);
/* used by show_backtrace() */
unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
unsigned long pc, unsigned long *ra)
{
unsigned long stack_page = 0;
int cpu;
for_each_possible_cpu(cpu) {
if (on_irq_stack(cpu, *sp)) {
stack_page = (unsigned long)irq_stack[cpu];
break;
}
}
if (!stack_page)
stack_page = (unsigned long)task_stack_page(task);
return unwind_stack_by_address(stack_page, sp, pc, ra);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Daniel Kalmar | 49 | 53.26% | 1 | 50.00% |
Matt Redfearn | 43 | 46.74% | 1 | 50.00% |
Total | 92 | 100.00% | 2 | 100.00% |
#endif
/*
* get_wchan - a maintenance nightmare^W^Wpain in the ass ...
*/
unsigned long get_wchan(struct task_struct *task)
{
unsigned long pc = 0;
#ifdef CONFIG_KALLSYMS
unsigned long sp;
unsigned long ra = 0;
#endif
if (!task || task == current || task->state == TASK_RUNNING)
goto out;
if (!task_stack_page(task))
goto out;
pc = thread_saved_pc(task);
#ifdef CONFIG_KALLSYMS
sp = task->thread.reg29 + schedule_mfi.frame_size;
while (in_sched_functions(pc))
pc = unwind_stack(task, &sp, pc, &ra);
#endif
out:
return pc;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Franck Bui-Huu | 104 | 92.86% | 1 | 50.00% |
Atsushi Nemoto | 8 | 7.14% | 1 | 50.00% |
Total | 112 | 100.00% | 2 | 100.00% |
/*
* Don't forget that the stack pointer must be aligned on a 8 bytes
* boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
*/
unsigned long arch_align_stack(unsigned long sp)
{
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
sp -= get_random_int() & ~PAGE_MASK;
return sp & ALMASK;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Franck Bui-Huu | 36 | 100.00% | 1 | 100.00% |
Total | 36 | 100.00% | 1 | 100.00% |
static void arch_dump_stack(void *info)
{
struct pt_regs *regs;
regs = get_irq_regs();
if (regs)
show_regs(regs);
dump_stack();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eunbong Song 송은봉 | 32 | 100.00% | 1 | 100.00% |
Total | 32 | 100.00% | 1 | 100.00% |
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
{
long this_cpu = get_cpu();
if (cpumask_test_cpu(this_cpu, mask) && !exclude_self)
dump_stack();
smp_call_function_many(mask, arch_dump_stack, NULL, 1);
put_cpu();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Metcalf | 34 | 70.83% | 1 | 50.00% |
Eunbong Song 송은봉 | 14 | 29.17% | 1 | 50.00% |
Total | 48 | 100.00% | 2 | 100.00% |
int mips_get_process_fp_mode(struct task_struct *task)
{
int value = 0;
if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS))
value |= PR_FP_MODE_FR;
if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS))
value |= PR_FP_MODE_FRE;
return value;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Burton | 45 | 100.00% | 1 | 100.00% |
Total | 45 | 100.00% | 1 | 100.00% |
static void prepare_for_fp_mode_switch(void *info)
{
struct mm_struct *mm = info;
if (current->mm == mm)
lose_fpu(1);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Burton | 30 | 100.00% | 1 | 100.00% |
Total | 30 | 100.00% | 1 | 100.00% |
int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
{
const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
struct task_struct *t;
int max_users;
/* Check the value is valid */
if (value & ~known_bits)
return -EOPNOTSUPP;
/* Avoid inadvertently triggering emulation */
if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
!(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
return -EOPNOTSUPP;
if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre)
return -EOPNOTSUPP;
/* FR = 0 not supported in MIPS R6 */
if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
return -EOPNOTSUPP;
/* Proceed with the mode switch */
preempt_disable();
/* Save FP & vector context, then disable FPU & MSA */
if (task->signal == current->signal)
lose_fpu(1);
/* Prevent any threads from obtaining live FP context */
atomic_set(&task->mm->context.fp_mode_switching, 1);
smp_mb__after_atomic();
/*
* If there are multiple online CPUs then force any which are running
* threads in this process to lose their FPU context, which they can't
* regain until fp_mode_switching is cleared later.
*/
if (num_online_cpus() > 1) {
/* No need to send an IPI for the local CPU */
max_users = (task->mm == current->mm) ? 1 : 0;
if (atomic_read(¤t->mm->mm_users) > max_users)
smp_call_function(prepare_for_fp_mode_switch,
(void *)current->mm, 1);
}
/*
* There are now no threads of the process with live FP context, so it
* is safe to proceed with the FP mode switch.
*/
for_each_thread(task, t) {
/* Update desired FP register width */
if (value & PR_FP_MODE_FR) {
clear_tsk_thread_flag(t, TIF_32BIT_FPREGS);
} else {
set_tsk_thread_flag(t, TIF_32BIT_FPREGS);
clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE);
}
/* Update desired FP single layout */
if (value & PR_FP_MODE_FRE)
set_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
else
clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
}
/* Allow threads to use FP again */
atomic_set(&task->mm->context.fp_mode_switching, 0);
preempt_enable();
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Burton | 260 | 92.53% | 3 | 60.00% |
Markos Chandras | 17 | 6.05% | 1 | 20.00% |
Marcin Nowakowski | 4 | 1.42% | 1 | 20.00% |
Total | 281 | 100.00% | 5 | 100.00% |
#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
void mips_dump_regs32(u32 *uregs, const struct pt_regs *regs)
{
unsigned int i;
for (i = MIPS32_EF_R1; i <= MIPS32_EF_R31; i++) {
/* k0/k1 are copied as zero. */
if (i == MIPS32_EF_R26 || i == MIPS32_EF_R27)
uregs[i] = 0;
else
uregs[i] = regs->regs[i - MIPS32_EF_R0];
}
uregs[MIPS32_EF_LO] = regs->lo;
uregs[MIPS32_EF_HI] = regs->hi;
uregs[MIPS32_EF_CP0_EPC] = regs->cp0_epc;
uregs[MIPS32_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
uregs[MIPS32_EF_CP0_STATUS] = regs->cp0_status;
uregs[MIPS32_EF_CP0_CAUSE] = regs->cp0_cause;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Marcin Nowakowski | 121 | 100.00% | 1 | 100.00% |
Total | 121 | 100.00% | 1 | 100.00% |
#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
#ifdef CONFIG_64BIT
void mips_dump_regs64(u64 *uregs, const struct pt_regs *regs)
{
unsigned int i;
for (i = MIPS64_EF_R1; i <= MIPS64_EF_R31; i++) {
/* k0/k1 are copied as zero. */
if (i == MIPS64_EF_R26 || i == MIPS64_EF_R27)
uregs[i] = 0;
else
uregs[i] = regs->regs[i - MIPS64_EF_R0];
}
uregs[MIPS64_EF_LO] = regs->lo;
uregs[MIPS64_EF_HI] = regs->hi;
uregs[MIPS64_EF_CP0_EPC] = regs->cp0_epc;
uregs[MIPS64_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
uregs[MIPS64_EF_CP0_STATUS] = regs->cp0_status;
uregs[MIPS64_EF_CP0_CAUSE] = regs->cp0_cause;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Marcin Nowakowski | 121 | 100.00% | 1 | 100.00% |
Total | 121 | 100.00% | 1 | 100.00% |
#endif /* CONFIG_64BIT */
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Burton | 823 | 23.80% | 12 | 14.12% |
Franck Bui-Huu | 399 | 11.54% | 9 | 10.59% |
Matt Redfearn | 313 | 9.05% | 7 | 8.24% |
Atsushi Nemoto | 298 | 8.62% | 4 | 4.71% |
Marcin Nowakowski | 266 | 7.69% | 2 | 2.35% |
Ralf Bächle | 264 | 7.63% | 7 | 8.24% |
Linus Torvalds (pre-git) | 215 | 6.22% | 10 | 11.76% |
Leonid Yegoshin | 209 | 6.04% | 1 | 1.18% |
Al Viro | 174 | 5.03% | 4 | 4.71% |
Tony Wu | 147 | 4.25% | 2 | 2.35% |
James Hogan | 69 | 2.00% | 3 | 3.53% |
Daniel Kalmar | 61 | 1.76% | 1 | 1.18% |
Eunbong Song 송은봉 | 49 | 1.42% | 1 | 1.18% |
Chris Metcalf | 34 | 0.98% | 1 | 1.18% |
Corey Minyard | 29 | 0.84% | 1 | 1.18% |
Gregory Fong | 18 | 0.52% | 1 | 1.18% |
Markos Chandras | 17 | 0.49% | 1 | 1.18% |
Andrew Morton | 10 | 0.29% | 1 | 1.18% |
Thomas Gleixner | 10 | 0.29% | 1 | 1.18% |
David Daney | 10 | 0.29% | 1 | 1.18% |
Ingo Molnar | 9 | 0.26% | 3 | 3.53% |
Thiemo Seufer | 7 | 0.20% | 1 | 1.18% |
James Cowgill | 6 | 0.17% | 1 | 1.18% |
Linus Torvalds | 5 | 0.14% | 3 | 3.53% |
Alex Dowad | 5 | 0.14% | 1 | 1.18% |
Paul Gortmaker | 3 | 0.09% | 1 | 1.18% |
Alex Smith | 3 | 0.09% | 1 | 1.18% |
Chris Dearman | 2 | 0.06% | 1 | 1.18% |
Steven Cole | 1 | 0.03% | 1 | 1.18% |
Lucas De Marchi | 1 | 0.03% | 1 | 1.18% |
Andrea Gelmini | 1 | 0.03% | 1 | 1.18% |
Total | 3458 | 100.00% | 85 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.