Release 4.7 include/linux/stop_machine.h
#ifndef _LINUX_STOP_MACHINE
#define _LINUX_STOP_MACHINE
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/smp.h>
#include <linux/list.h>
/*
* stop_cpu[s]() is simplistic per-cpu maximum priority cpu
* monopolization mechanism. The caller can specify a non-sleeping
* function to be executed on a single or multiple cpus preempting all
* other processes and monopolizing those cpus until it finishes.
*
* Resources for this mechanism are preallocated when a cpu is brought
* up and requests are guaranteed to be served as long as the target
* cpus are online.
*/
typedef int (*cpu_stop_fn_t)(void *arg);
#ifdef CONFIG_SMP
struct cpu_stop_work {
struct list_head list; /* cpu_stopper->works */
cpu_stop_fn_t fn;
void *arg;
struct cpu_stop_done *done;
};
int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg);
int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg);
bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
struct cpu_stop_work *work_buf);
int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg);
int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg);
void stop_machine_park(int cpu);
void stop_machine_unpark(int cpu);
#else /* CONFIG_SMP */
#include <linux/workqueue.h>
struct cpu_stop_work {
struct work_struct work;
cpu_stop_fn_t fn;
void *arg;
};
static inline int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
{
int ret = -ENOENT;
preempt_disable();
if (cpu == smp_processor_id())
ret = fn(arg);
preempt_enable();
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
tejun heo | tejun heo | 47 | 100.00% | 1 | 100.00% |
| Total | 47 | 100.00% | 1 | 100.00% |
static void stop_one_cpu_nowait_workfn(struct work_struct *work)
{
struct cpu_stop_work *stwork =
container_of(work, struct cpu_stop_work, work);
preempt_disable();
stwork->fn(stwork->arg);
preempt_enable();
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
tejun heo | tejun heo | 41 | 100.00% | 1 | 100.00% |
| Total | 41 | 100.00% | 1 | 100.00% |
static inline bool stop_one_cpu_nowait(unsigned int cpu,
cpu_stop_fn_t fn, void *arg,
struct cpu_stop_work *work_buf)
{
if (cpu == smp_processor_id()) {
INIT_WORK(&work_buf->work, stop_one_cpu_nowait_workfn);
work_buf->fn = fn;
work_buf->arg = arg;
schedule_work(&work_buf->work);
return true;
}
return false;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
tejun heo | tejun heo | 61 | 89.71% | 1 | 50.00% |
oleg nesterov | oleg nesterov | 7 | 10.29% | 1 | 50.00% |
| Total | 68 | 100.00% | 2 | 100.00% |
static inline int stop_cpus(const struct cpumask *cpumask,
cpu_stop_fn_t fn, void *arg)
{
if (cpumask_test_cpu(raw_smp_processor_id(), cpumask))
return stop_one_cpu(raw_smp_processor_id(), fn, arg);
return -ENOENT;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
tejun heo | tejun heo | 45 | 100.00% | 1 | 100.00% |
| Total | 45 | 100.00% | 1 | 100.00% |
static inline int try_stop_cpus(const struct cpumask *cpumask,
cpu_stop_fn_t fn, void *arg)
{
return stop_cpus(cpumask, fn, arg);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
tejun heo | tejun heo | 30 | 100.00% | 1 | 100.00% |
| Total | 30 | 100.00% | 1 | 100.00% |
#endif /* CONFIG_SMP */
/*
* stop_machine "Bogolock": stop the entire machine, disable
* interrupts. This is a very heavy lock, which is equivalent to
* grabbing every spinlock (and more). So the "read" side to such a
* lock is anything which disables preemption.
*/
#if defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
/**
* stop_machine: freeze the machine on all CPUs and run this function
* @fn: the function to run
* @data: the data ptr for the @fn()
* @cpus: the cpus to run the @fn() on (NULL = any online cpu)
*
* Description: This causes a thread to be scheduled on every cpu,
* each of which disables interrupts. The result is that no one is
* holding a spinlock or inside any other preempt-disabled region when
* @fn() runs.
*
* This can be thought of as a very heavy write lock, equivalent to
* grabbing every spinlock in the kernel. */
int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
const struct cpumask *cpus);
#else /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */
static inline int stop_machine(cpu_stop_fn_t fn, void *data,
const struct cpumask *cpus)
{
unsigned long flags;
int ret;
local_irq_save(flags);
ret = fn(data);
local_irq_restore(flags);
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
rusty russell | rusty russell | 33 | 70.21% | 3 | 50.00% |
tejun heo | tejun heo | 12 | 25.53% | 1 | 16.67% |
oleg nesterov | oleg nesterov | 2 | 4.26% | 2 | 33.33% |
| Total | 47 | 100.00% | 6 | 100.00% |
static inline int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
const struct cpumask *cpus)
{
return stop_machine(fn, data, cpus);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
tejun heo | tejun heo | 28 | 93.33% | 1 | 33.33% |
oleg nesterov | oleg nesterov | 2 | 6.67% | 2 | 66.67% |
| Total | 30 | 100.00% | 3 | 100.00% |
#endif /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */
#endif /* _LINUX_STOP_MACHINE */
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
tejun heo | tejun heo | 420 | 76.78% | 3 | 18.75% |
rusty russell | rusty russell | 70 | 12.80% | 3 | 18.75% |
oleg nesterov | oleg nesterov | 28 | 5.12% | 5 | 31.25% |
peter zijlstra | peter zijlstra | 19 | 3.47% | 1 | 6.25% |
chris wilson | chris wilson | 5 | 0.91% | 1 | 6.25% |
paul gortmaker | paul gortmaker | 3 | 0.55% | 1 | 6.25% |
lucas de marchi | lucas de marchi | 1 | 0.18% | 1 | 6.25% |
jonathan neuschafer | jonathan neuschafer | 1 | 0.18% | 1 | 6.25% |
| Total | 547 | 100.00% | 16 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.