Release 4.11 arch/x86/kernel/process.c
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/prctl.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/sched/idle.h>
#include <linux/sched/debug.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/pm.h>
#include <linux/tick.h>
#include <linux/random.h>
#include <linux/user-return-notifier.h>
#include <linux/dmi.h>
#include <linux/utsname.h>
#include <linux/stackprotector.h>
#include <linux/tick.h>
#include <linux/cpuidle.h>
#include <trace/events/power.h>
#include <linux/hw_breakpoint.h>
#include <asm/cpu.h>
#include <asm/apic.h>
#include <asm/syscalls.h>
#include <linux/uaccess.h>
#include <asm/mwait.h>
#include <asm/fpu/internal.h>
#include <asm/debugreg.h>
#include <asm/nmi.h>
#include <asm/tlbflush.h>
#include <asm/mce.h>
#include <asm/vm86.h>
#include <asm/switch_to.h>
#include <asm/desc.h>
/*
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
* no more per-task TSS's. The TSS size is kept cacheline-aligned
* so they are allowed to end up in the .data..cacheline_aligned
* section. Since TSS's are completely CPU-local, we want them
* on exact cacheline boundaries, to eliminate cacheline ping-pong.
*/
__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
.x86_tss = {
.sp0 = TOP_OF_INIT_STACK,
#ifdef CONFIG_X86_32
.ss0 = __KERNEL_DS,
.ss1 = __KERNEL_CS,
.io_bitmap_base = INVALID_IO_BITMAP_OFFSET,
#endif
},
#ifdef CONFIG_X86_32
/*
* Note that the .io_bitmap member must be extra-big. This is because
* the CPU will access an additional byte beyond the end of the IO
* permission bitmap. The extra byte must be all 1 bits, and must
* be within the limit.
*/
.io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 },
#endif
#ifdef CONFIG_X86_32
.SYSENTER_stack_canary = STACK_END_MAGIC,
#endif
};
EXPORT_PER_CPU_SYMBOL(cpu_tss);
DEFINE_PER_CPU(bool, __tss_limit_invalid);
EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid);
/*
* this gets called so that we can store lazy state into memory and copy the
* current task into the new thread.
*/
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
memcpy(dst, src, arch_task_struct_size);
#ifdef CONFIG_VM86
dst->thread.vm86 = NULL;
#endif
return fpu__copy(&dst->thread.fpu, &src->thread.fpu);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Suresh B. Siddha | 30 | 54.55% | 1 | 20.00% |
Andrew Lutomirski | 13 | 23.64% | 1 | 20.00% |
Dave Hansen | 6 | 10.91% | 1 | 20.00% |
Avi Kivity | 5 | 9.09% | 1 | 20.00% |
Ingo Molnar | 1 | 1.82% | 1 | 20.00% |
Total | 55 | 100.00% | 5 | 100.00% |
/*
* Free current thread data structures etc..
*/
void exit_thread(struct task_struct *tsk)
{
struct thread_struct *t = &tsk->thread;
unsigned long *bp = t->io_bitmap_ptr;
struct fpu *fpu = &t->fpu;
if (bp) {
struct tss_struct *tss = &per_cpu(cpu_tss, get_cpu());
t->io_bitmap_ptr = NULL;
clear_thread_flag(TIF_IO_BITMAP);
/*
* Careful, clear this in the TSS too:
*/
memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
t->io_bitmap_max = 0;
put_cpu();
kfree(bp);
}
free_vm86(t);
fpu__drop(fpu);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeremy Fitzhardinge | 68 | 62.96% | 1 | 12.50% |
Thomas Gleixner | 15 | 13.89% | 1 | 12.50% |
Ingo Molnar | 12 | 11.11% | 2 | 25.00% |
Brian Gerst | 5 | 4.63% | 1 | 12.50% |
Jiri Slaby | 4 | 3.70% | 1 | 12.50% |
Suresh B. Siddha | 3 | 2.78% | 1 | 12.50% |
Andrew Lutomirski | 1 | 0.93% | 1 | 12.50% |
Total | 108 | 100.00% | 8 | 100.00% |
void flush_thread(void)
{
struct task_struct *tsk = current;
flush_ptrace_hw_breakpoint(tsk);
memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
fpu__clear(&tsk->thread.fpu);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeremy Fitzhardinge | 35 | 71.43% | 1 | 14.29% |
Ingo Molnar | 6 | 12.24% | 2 | 28.57% |
Bobby Powers | 4 | 8.16% | 1 | 14.29% |
K.Prasad | 2 | 4.08% | 1 | 14.29% |
Frédéric Weisbecker | 1 | 2.04% | 1 | 14.29% |
Oleg Nesterov | 1 | 2.04% | 1 | 14.29% |
Total | 49 | 100.00% | 7 | 100.00% |
static void hard_disable_TSC(void)
{
cr4_set_bits(X86_CR4_TSD);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeremy Fitzhardinge | 12 | 92.31% | 1 | 50.00% |
Andrew Lutomirski | 1 | 7.69% | 1 | 50.00% |
Total | 13 | 100.00% | 2 | 100.00% |
void disable_TSC(void)
{
preempt_disable();
if (!test_and_set_thread_flag(TIF_NOTSC))
/*
* Must flip the CPU state synchronously with
* TIF_NOTSC in the current running context.
*/
hard_disable_TSC();
preempt_enable();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeremy Fitzhardinge | 25 | 100.00% | 1 | 100.00% |
Total | 25 | 100.00% | 1 | 100.00% |
static void hard_enable_TSC(void)
{
cr4_clear_bits(X86_CR4_TSD);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeremy Fitzhardinge | 12 | 92.31% | 1 | 50.00% |
Andrew Lutomirski | 1 | 7.69% | 1 | 50.00% |
Total | 13 | 100.00% | 2 | 100.00% |
static void enable_TSC(void)
{
preempt_disable();
if (test_and_clear_thread_flag(TIF_NOTSC))
/*
* Must flip the CPU state synchronously with
* TIF_NOTSC in the current running context.
*/
hard_enable_TSC();
preempt_enable();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeremy Fitzhardinge | 25 | 100.00% | 1 | 100.00% |
Total | 25 | 100.00% | 1 | 100.00% |
int get_tsc_mode(unsigned long adr)
{
unsigned int val;
if (test_thread_flag(TIF_NOTSC))
val = PR_TSC_SIGSEGV;
else
val = PR_TSC_ENABLE;
return put_user(val, (unsigned int __user *)adr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeremy Fitzhardinge | 43 | 100.00% | 1 | 100.00% |
Total | 43 | 100.00% | 1 | 100.00% |
int set_tsc_mode(unsigned int val)
{
if (val == PR_TSC_SIGSEGV)
disable_TSC();
else if (val == PR_TSC_ENABLE)
enable_TSC();
else
return -EINVAL;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeremy Fitzhardinge | 36 | 100.00% | 1 | 100.00% |
Total | 36 | 100.00% | 1 | 100.00% |
void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
struct tss_struct *tss)
{
struct thread_struct *prev, *next;
prev = &prev_p->thread;
next = &next_p->thread;
if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^
test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) {
unsigned long debugctl = get_debugctlmsr();
debugctl &= ~DEBUGCTLMSR_BTF;
if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP))
debugctl |= DEBUGCTLMSR_BTF;
update_debugctlmsr(debugctl);
}
if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
test_tsk_thread_flag(next_p, TIF_NOTSC)) {
/* prev and next are different */
if (test_tsk_thread_flag(next_p, TIF_NOTSC))
hard_disable_TSC();
else
hard_enable_TSC();
}
if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
/*
* Copy the relevant range of the IO bitmap.
* Normally this is 128 bytes or less:
*/
memcpy(tss->io_bitmap, next->io_bitmap_ptr,
max(prev->io_bitmap_max, next->io_bitmap_max));
/*
* Make sure that the TSS limit is correct for the CPU
* to notice the IO bitmap.
*/
refresh_tss_limit();
} else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
/*
* Clear any possible leftover bits:
*/
memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
}
propagate_user_return_notify(prev_p, next_p);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeremy Fitzhardinge | 157 | 80.10% | 1 | 20.00% |
Peter Zijlstra | 28 | 14.29% | 1 | 20.00% |
Avi Kivity | 7 | 3.57% | 1 | 20.00% |
Andrew Lutomirski | 4 | 2.04% | 2 | 40.00% |
Total | 196 | 100.00% | 5 | 100.00% |
/*
* Idle related variables and functions
*/
unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
EXPORT_SYMBOL(boot_option_idle_override);
static void (*x86_idle)(void);
#ifndef CONFIG_SMP
static inline void play_dead(void)
{
BUG();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Richard Weinberger | 12 | 100.00% | 1 | 100.00% |
Total | 12 | 100.00% | 1 | 100.00% |
#endif
void arch_cpu_idle_enter(void)
{
tsc_verify_tsc_adjust(false);
local_touch_nmi();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 11 | 73.33% | 3 | 75.00% |
Richard Weinberger | 4 | 26.67% | 1 | 25.00% |
Total | 15 | 100.00% | 4 | 100.00% |
void arch_cpu_idle_dead(void)
{
play_dead();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 8 | 80.00% | 1 | 50.00% |
Richard Weinberger | 2 | 20.00% | 1 | 50.00% |
Total | 10 | 100.00% | 2 | 100.00% |
/*
* Called from the generic idle code.
*/
void arch_cpu_idle(void)
{
x86_idle();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 6 | 60.00% | 1 | 33.33% |
Richard Weinberger | 3 | 30.00% | 1 | 33.33% |
Len Brown | 1 | 10.00% | 1 | 33.33% |
Total | 10 | 100.00% | 3 | 100.00% |
/*
* We use this if we don't have any better idle routine..
*/
void __cpuidle default_idle(void)
{
trace_cpu_idle_rcuidle(1, smp_processor_id());
safe_halt();
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Renninger | 14 | 51.85% | 2 | 40.00% |
Thomas Gleixner | 10 | 37.04% | 1 | 20.00% |
Steven Rostedt | 2 | 7.41% | 1 | 20.00% |
Chris Metcalf | 1 | 3.70% | 1 | 20.00% |
Total | 27 | 100.00% | 5 | 100.00% |
#ifdef CONFIG_APM_MODULE
EXPORT_SYMBOL(default_idle);
#endif
#ifdef CONFIG_XEN
bool xen_set_default_idle(void)
{
bool ret = !!x86_idle;
x86_idle = default_idle;
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Konrad Rzeszutek Wilk | 18 | 85.71% | 1 | 33.33% |
Len Brown | 3 | 14.29% | 2 | 66.67% |
Total | 21 | 100.00% | 3 | 100.00% |
#endif
void stop_this_cpu(void *dummy)
{
local_irq_disable();
/*
* Remove this CPU:
*/
set_cpu_online(smp_processor_id(), false);
disable_local_APIC();
mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
for (;;)
halt();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ivan Vecera | 30 | 73.17% | 1 | 33.33% |
Ashok Raj | 9 | 21.95% | 1 | 33.33% |
Rusty Russell | 2 | 4.88% | 1 | 33.33% |
Total | 41 | 100.00% | 3 | 100.00% |
/*
* AMD Erratum 400 aware idle routine. We handle it the same way as C3 power
* states (local apic timer and TSC stop).
*/
static void amd_e400_idle(void)
{
/*
* We cannot use static_cpu_has_bug() here because X86_BUG_AMD_APIC_C1E
* gets set after static_cpu_has() places have been converted via
* alternatives.
*/
if (!boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) {
default_idle();
return;
}
tick_broadcast_enter();
default_idle();
/*
* The switch back from broadcast mode needs to be called with
* interrupts disabled.
*/
local_irq_disable();
tick_broadcast_exit();
local_irq_enable();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 32 | 82.05% | 3 | 60.00% |
Borislav Petkov | 6 | 15.38% | 1 | 20.00% |
Len Brown | 1 | 2.56% | 1 | 20.00% |
Total | 39 | 100.00% | 5 | 100.00% |
/*
* Intel Core2 and older machines prefer MWAIT over HALT for C1.
* We can't rely on cpuidle installing MWAIT, because it will not load
* on systems that support only C1 -- so the boot default must be MWAIT.
*
* Some AMD machines are the opposite, they depend on using HALT.
*
* So for default C1, which is used during boot until cpuidle loads,
* use MWAIT-C1 on Intel HW that has it, else use HALT.
*/
static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
{
if (c->x86_vendor != X86_VENDOR_INTEL)
return 0;
if (!cpu_has(c, X86_FEATURE_MWAIT) || static_cpu_has_bug(X86_BUG_MONITOR))
return 0;
return 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Len Brown | 39 | 88.64% | 1 | 50.00% |
Peter Zijlstra | 5 | 11.36% | 1 | 50.00% |
Total | 44 | 100.00% | 2 | 100.00% |
/*
* MONITOR/MWAIT with no hints, used for default C1 state. This invokes MWAIT
* with interrupts enabled and no flags, which is backwards compatible with the
* original MWAIT implementation.
*/
static __cpuidle void mwait_idle(void)
{
if (!current_set_polling_and_test()) {
trace_cpu_idle_rcuidle(1, smp_processor_id());
if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
mb(); /* quirk */
clflush((void *)¤t_thread_info()->flags);
mb(); /* quirk */
}
__monitor((void *)¤t_thread_info()->flags, 0, 0);
if (!need_resched())
__sti_mwait(0, 0);
else
local_irq_enable();
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
} else {
local_irq_enable();
}
__current_clr_polling();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Len Brown | 73 | 68.87% | 1 | 20.00% |
JiSheng Zhang | 16 | 15.09% | 1 | 20.00% |
Mike Galbraith | 14 | 13.21% | 1 | 20.00% |
Michael S. Tsirkin | 2 | 1.89% | 1 | 20.00% |
Chris Metcalf | 1 | 0.94% | 1 | 20.00% |
Total | 106 | 100.00% | 5 | 100.00% |
void select_idle_routine(const struct cpuinfo_x86 *c)
{
#ifdef CONFIG_SMP
if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
#endif
if (x86_idle || boot_option_idle_override == IDLE_POLL)
return;
if (boot_cpu_has_bug(X86_BUG_AMD_E400)) {
pr_info("using AMD E400 aware idle routine\n");
x86_idle = amd_e400_idle;
} else if (prefer_mwait_c1_over_halt(c)) {
pr_info("using mwait in idle threads\n");
x86_idle = mwait_idle;
} else
x86_idle = default_idle;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 27 | 32.93% | 1 | 9.09% |
Len Brown | 24 | 29.27% | 3 | 27.27% |
Thomas Gleixner | 19 | 23.17% | 4 | 36.36% |
Rusty Russell | 8 | 9.76% | 1 | 9.09% |
Joe Perches | 3 | 3.66% | 1 | 9.09% |
Ingo Molnar | 1 | 1.22% | 1 | 9.09% |
Total | 82 | 100.00% | 11 | 100.00% |
void amd_e400_c1e_apic_setup(void)
{
if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) {
pr_info("Switch to broadcast mode on CPU%d\n", smp_processor_id());
local_irq_disable();
tick_broadcast_force();
local_irq_enable();
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Borislav Petkov | 20 | 60.61% | 1 | 25.00% |
Rusty Russell | 12 | 36.36% | 2 | 50.00% |
Peter Zijlstra | 1 | 3.03% | 1 | 25.00% |
Total | 33 | 100.00% | 4 | 100.00% |
void __init arch_post_acpi_subsys_init(void)
{
u32 lo, hi;
if (!boot_cpu_has_bug(X86_BUG_AMD_E400))
return;
/*
* AMD E400 detection needs to happen after ACPI has been enabled. If
* the machine is affected K8_INTP_C1E_ACTIVE_MASK bits are set in
* MSR_K8_INT_PENDING_MSG.
*/
rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
if (!(lo & K8_INTP_C1E_ACTIVE_MASK))
return;
boot_cpu_set_bug(X86_BUG_AMD_APIC_C1E);
if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
mark_tsc_unstable("TSC halt in AMD C1E");
pr_info("System has AMD C1E enabled\n");
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 65 | 100.00% | 1 | 100.00% |
Total | 65 | 100.00% | 1 | 100.00% |
static int __init idle_setup(char *str)
{
if (!str)
return -EINVAL;
if (!strcmp(str, "poll")) {
pr_info("using polling idle threads\n");
boot_option_idle_override = IDLE_POLL;
cpu_idle_poll_ctrl(true);
} else if (!strcmp(str, "halt")) {
/*
* When the boot option of idle=halt is added, halt is
* forced to be used for CPU idle. In such case CPU C2/C3
* won't be used again.
* To continue to load the CPU idle driver, don't touch
* the boot_option_idle_override.
*/
x86_idle = default_idle;
boot_option_idle_override = IDLE_HALT;
} else if (!strcmp(str, "nomwait")) {
/*
* If the boot option of "idle=nomwait" is added,
* it means that mwait will be disabled for CPU C2/C3
* states. In such case it won't touch the variable
* of boot_option_idle_override.
*/
boot_option_idle_override = IDLE_NOMWAIT;
} else
return -1;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 42 | 44.68% | 1 | 12.50% |
Yakui Zhao | 27 | 28.72% | 2 | 25.00% |
Cyrill V. Gorcunov | 9 | 9.57% | 1 | 12.50% |
Thomas Renninger | 8 | 8.51% | 1 | 12.50% |
Thomas Gleixner | 5 | 5.32% | 1 | 12.50% |
Joe Perches | 2 | 2.13% | 1 | 12.50% |
Len Brown | 1 | 1.06% | 1 | 12.50% |
Total | 94 | 100.00% | 8 | 100.00% |
early_param("idle", idle_setup);
unsigned long arch_align_stack(unsigned long sp)
{
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
sp -= get_random_int() % 8192;
return sp & ~0xf;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Américo Wang | 36 | 100.00% | 1 | 100.00% |
Total | 36 | 100.00% | 1 | 100.00% |
unsigned long arch_randomize_brk(struct mm_struct *mm)
{
return randomize_page(mm->brk, 0x02000000);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Américo Wang | 19 | 90.48% | 1 | 50.00% |
Jason Cooper | 2 | 9.52% | 1 | 50.00% |
Total | 21 | 100.00% | 2 | 100.00% |
/*
* Return saved PC of a blocked thread.
* What is this good for? it will be always the scheduler or ret_from_fork.
*/
unsigned long thread_saved_pc(struct task_struct *tsk)
{
struct inactive_task_frame *frame =
(struct inactive_task_frame *) READ_ONCE(tsk->thread.sp);
return READ_ONCE_NOCHECK(frame->ret_addr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Brian Gerst | 38 | 100.00% | 1 | 100.00% |
Total | 38 | 100.00% | 1 | 100.00% |
/*
* Called from fs/proc with a reference on @p to find the function
* which called into schedule(). This needs to be done carefully
* because the task might wake up and we might look at a stack
* changing under us.
*/
unsigned long get_wchan(struct task_struct *p)
{
unsigned long start, bottom, top, sp, fp, ip, ret = 0;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
if (!try_get_task_stack(p))
return 0;
start = (unsigned long)task_stack_page(p);
if (!start)
goto out;
/*
* Layout of the stack page:
*
* ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long)
* PADDING
* ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING
* stack
* ----------- bottom = start
*
* The tasks stack pointer points at the location where the
* framepointer is stored. The data on the stack is:
* ... IP FP ... IP FP
*
* We need to read FP and IP, so we need to adjust the upper
* bound by another unsigned long.
*/
top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;
top -= 2 * sizeof(unsigned long);
bottom = start;
sp = READ_ONCE(p->thread.sp);
if (sp < bottom || sp > top)
goto out;
fp = READ_ONCE_NOCHECK(((struct inactive_task_frame *)sp)->bp);
do {
if (fp < bottom || fp > top)
goto out;
ip = READ_ONCE_NOCHECK(*(unsigned long *)(fp + sizeof(unsigned long)));
if (!in_sched_functions(ip)) {
ret = ip;
goto out;
}
fp = READ_ONCE_NOCHECK(*(unsigned long *)fp);
} while (count++ < 16 && p->state != TASK_RUNNING);
out:
put_task_stack(p);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 186 | 78.81% | 1 | 20.00% |
Andrew Lutomirski | 41 | 17.37% | 2 | 40.00% |
Brian Gerst | 6 | 2.54% | 1 | 20.00% |
Andrey Ryabinin | 3 | 1.27% | 1 | 20.00% |
Total | 236 | 100.00% | 5 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeremy Fitzhardinge | 420 | 23.93% | 1 | 1.06% |
Thomas Gleixner | 393 | 22.39% | 13 | 13.83% |
Len Brown | 153 | 8.72% | 4 | 4.26% |
Andrew Lutomirski | 143 | 8.15% | 11 | 11.70% |
Peter Zijlstra | 115 | 6.55% | 3 | 3.19% |
Américo Wang | 58 | 3.30% | 1 | 1.06% |
Brian Gerst | 54 | 3.08% | 3 | 3.19% |
Suresh B. Siddha | 51 | 2.91% | 3 | 3.19% |
Richard Weinberger | 38 | 2.17% | 1 | 1.06% |
Ingo Molnar | 33 | 1.88% | 11 | 11.70% |
Ivan Vecera | 31 | 1.77% | 1 | 1.06% |
Borislav Petkov | 30 | 1.71% | 2 | 2.13% |
Yakui Zhao | 29 | 1.65% | 2 | 2.13% |
Thomas Renninger | 23 | 1.31% | 3 | 3.19% |
Rusty Russell | 22 | 1.25% | 3 | 3.19% |
Konrad Rzeszutek Wilk | 18 | 1.03% | 1 | 1.06% |
JiSheng Zhang | 16 | 0.91% | 1 | 1.06% |
Avi Kivity | 15 | 0.85% | 2 | 2.13% |
Mike Galbraith | 14 | 0.80% | 1 | 1.06% |
Joe Perches | 12 | 0.68% | 1 | 1.06% |
Ashok Raj | 12 | 0.68% | 1 | 1.06% |
Cyrill V. Gorcunov | 9 | 0.51% | 1 | 1.06% |
Andy Isaacson | 6 | 0.34% | 1 | 1.06% |
Andi Kleen | 6 | 0.34% | 1 | 1.06% |
Dave Hansen | 6 | 0.34% | 1 | 1.06% |
Linus Torvalds | 5 | 0.28% | 3 | 3.19% |
Jaswinder Singh Rajput | 5 | 0.28% | 2 | 2.13% |
Paul Gortmaker | 4 | 0.23% | 1 | 1.06% |
Bobby Powers | 4 | 0.23% | 1 | 1.06% |
Jiri Slaby | 4 | 0.23% | 1 | 1.06% |
Frédéric Weisbecker | 4 | 0.23% | 1 | 1.06% |
Arjan van de Ven | 3 | 0.17% | 2 | 2.13% |
K.Prasad | 3 | 0.17% | 1 | 1.06% |
Andrey Ryabinin | 3 | 0.17% | 1 | 1.06% |
Chris Metcalf | 2 | 0.11% | 1 | 1.06% |
Steven Rostedt | 2 | 0.11% | 1 | 1.06% |
Michael S. Tsirkin | 2 | 0.11% | 1 | 1.06% |
Jason Cooper | 2 | 0.11% | 1 | 1.06% |
Andy Whitcroft | 2 | 0.11% | 1 | 1.06% |
Oleg Nesterov | 1 | 0.06% | 1 | 1.06% |
Marc Dionne | 1 | 0.06% | 1 | 1.06% |
Huang Rui | 1 | 0.06% | 1 | 1.06% |
Total | 1755 | 100.00% | 94 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.