Release 4.15 kernel/up.c
/*
* Uniprocessor-only support functions. The counterpart to kernel/smp.c
*/
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/smp.h>
#include <linux/hypervisor.h>
int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
int wait)
{
unsigned long flags;
WARN_ON(cpu != 0);
local_irq_save(flags);
func(info);
local_irq_restore(flags);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 42 | 76.36% | 1 | 33.33% |
David Daney | 12 | 21.82% | 1 | 33.33% |
Ingo Molnar | 1 | 1.82% | 1 | 33.33% |
Total | 55 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(smp_call_function_single);
int smp_call_function_single_async(int cpu, call_single_data_t *csd)
{
unsigned long flags;
local_irq_save(flags);
csd->func(csd->info);
local_irq_restore(flags);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 32 | 84.21% | 1 | 25.00% |
Jan Kara | 4 | 10.53% | 1 | 25.00% |
Huang Ying | 1 | 2.63% | 1 | 25.00% |
Frédéric Weisbecker | 1 | 2.63% | 1 | 25.00% |
Total | 38 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(smp_call_function_single_async);
int on_each_cpu(smp_call_func_t func, void *info, int wait)
{
unsigned long flags;
local_irq_save(flags);
func(info);
local_irq_restore(flags);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Daney | 37 | 100.00% | 1 | 100.00% |
Total | 37 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(on_each_cpu);
/*
* Note we still need to test the mask even for UP
* because we actually can get an empty mask from
* code that on SMP might call us without the local
* CPU in the mask.
*/
void on_each_cpu_mask(const struct cpumask *mask,
smp_call_func_t func, void *info, bool wait)
{
unsigned long flags;
if (cpumask_test_cpu(0, mask)) {
local_irq_save(flags);
func(info);
local_irq_restore(flags);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Daney | 51 | 100.00% | 1 | 100.00% |
Total | 51 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(on_each_cpu_mask);
/*
* Preemption is disabled here to make sure the cond_func is called under the
* same condtions in UP and SMP.
*/
void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
smp_call_func_t func, void *info, bool wait,
gfp_t gfp_flags)
{
unsigned long flags;
preempt_disable();
if (cond_func(0, info)) {
local_irq_save(flags);
func(info);
local_irq_restore(flags);
}
preempt_enable();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Daney | 68 | 100.00% | 1 | 100.00% |
Total | 68 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(on_each_cpu_cond);
int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
{
int ret;
if (cpu != 0)
return -ENXIO;
if (phys)
hypervisor_pin_vcpu(0);
ret = func(par);
if (phys)
hypervisor_pin_vcpu(-1);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Juergen Gross | 68 | 100.00% | 1 | 100.00% |
Total | 68 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(smp_call_on_cpu);
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Daney | 185 | 50.68% | 3 | 23.08% |
Juergen Gross | 76 | 20.82% | 2 | 15.38% |
Andrew Morton | 56 | 15.34% | 1 | 7.69% |
Christoph Hellwig | 36 | 9.86% | 1 | 7.69% |
Ingo Molnar | 4 | 1.10% | 2 | 15.38% |
Jan Kara | 4 | 1.10% | 1 | 7.69% |
Frédéric Weisbecker | 2 | 0.55% | 1 | 7.69% |
Huang Ying | 1 | 0.27% | 1 | 7.69% |
Paul Gortmaker | 1 | 0.27% | 1 | 7.69% |
Total | 365 | 100.00% | 13 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.