Release 4.10 arch/sh/kernel/process.c
#include <linux/mm.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/export.h>
#include <linux/stackprotector.h>
#include <asm/fpu.h>
struct kmem_cache *task_xstate_cachep = NULL;
unsigned int xstate_size;
#ifdef CONFIG_CC_STACKPROTECTOR
unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
#endif
/*
* this gets called so that we can store lazy state into memory and copy the
* current task into the new thread.
*/
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
#ifdef CONFIG_SUPERH32
unlazy_fpu(src, task_pt_regs(src));
#endif
*dst = *src;
if (src->thread.xstate) {
dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
GFP_KERNEL);
if (!dst->thread.xstate)
return -ENOMEM;
memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
}
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
paul mundt | paul mundt | 77 | 83.70% | 1 | 50.00% |
suresh siddha | suresh siddha | 15 | 16.30% | 1 | 50.00% |
| Total | 92 | 100.00% | 2 | 100.00% |
void free_thread_xstate(struct task_struct *tsk)
{
if (tsk->thread.xstate) {
kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
tsk->thread.xstate = NULL;
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
paul mundt | paul mundt | 39 | 100.00% | 1 | 100.00% |
| Total | 39 | 100.00% | 1 | 100.00% |
void arch_release_task_struct(struct task_struct *tsk)
{
free_thread_xstate(tsk);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
paul mundt | paul mundt | 13 | 86.67% | 2 | 66.67% |
thomas gleixner | thomas gleixner | 2 | 13.33% | 1 | 33.33% |
| Total | 15 | 100.00% | 3 | 100.00% |
void arch_task_cache_init(void)
{
if (!xstate_size)
return;
task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size,
__alignof__(union thread_xstate),
SLAB_PANIC | SLAB_NOTRACK, NULL);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
paul mundt | paul mundt | 34 | 100.00% | 1 | 100.00% |
| Total | 34 | 100.00% | 1 | 100.00% |
#ifdef CONFIG_SH_FPU_EMU
# define HAVE_SOFTFP 1
#else
# define HAVE_SOFTFP 0
#endif
void init_thread_xstate(void)
{
if (boot_cpu_data.flags & CPU_HAS_FPU)
xstate_size = sizeof(struct sh_fpu_hard_struct);
else if (HAVE_SOFTFP)
xstate_size = sizeof(struct sh_fpu_soft_struct);
else
xstate_size = 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
paul mundt | paul mundt | 41 | 100.00% | 1 | 100.00% |
| Total | 41 | 100.00% | 1 | 100.00% |
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
paul mundt | paul mundt | 242 | 85.21% | 3 | 42.86% |
filippo arcidiacono | filippo arcidiacono | 21 | 7.39% | 1 | 14.29% |
suresh siddha | suresh siddha | 16 | 5.63% | 1 | 14.29% |
tejun heo | tejun heo | 3 | 1.06% | 1 | 14.29% |
thomas gleixner | thomas gleixner | 2 | 0.70% | 1 | 14.29% |
| Total | 284 | 100.00% | 7 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.