Release 4.15 kernel/cpu.c
/* CPU control.
* (C) 2001, 2002, 2003, 2004 Rusty Russell
*
* This code is licenced under the GPL.
*/
#include <linux/proc_fs.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/notifier.h>
#include <linux/sched/signal.h>
#include <linux/sched/hotplug.h>
#include <linux/sched/task.h>
#include <linux/unistd.h>
#include <linux/cpu.h>
#include <linux/oom.h>
#include <linux/rcupdate.h>
#include <linux/export.h>
#include <linux/bug.h>
#include <linux/kthread.h>
#include <linux/stop_machine.h>
#include <linux/mutex.h>
#include <linux/gfp.h>
#include <linux/suspend.h>
#include <linux/lockdep.h>
#include <linux/tick.h>
#include <linux/irq.h>
#include <linux/nmi.h>
#include <linux/smpboot.h>
#include <linux/relay.h>
#include <linux/slab.h>
#include <linux/percpu-rwsem.h>
#include <trace/events/power.h>
#define CREATE_TRACE_POINTS
#include <trace/events/cpuhp.h>
#include "smpboot.h"
/**
* cpuhp_cpu_state - Per cpu hotplug state storage
* @state: The current cpu state
* @target: The target state
* @thread: Pointer to the hotplug thread
* @should_run: Thread should execute
* @rollback: Perform a rollback
* @single: Single callback invocation
* @bringup: Single callback bringup or teardown selector
* @cb_state: The state for a single callback (install/uninstall)
* @result: Result of the operation
* @done_up: Signal completion to the issuer of the task for cpu-up
* @done_down: Signal completion to the issuer of the task for cpu-down
*/
struct cpuhp_cpu_state {
enum cpuhp_state state;
enum cpuhp_state target;
enum cpuhp_state fail;
#ifdef CONFIG_SMP
struct task_struct *thread;
bool should_run;
bool rollback;
bool single;
bool bringup;
struct hlist_node *node;
struct hlist_node *last;
enum cpuhp_state cb_state;
int result;
struct completion done_up;
struct completion done_down;
#endif
};
static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
.fail = CPUHP_INVALID,
};
#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
static struct lockdep_map cpuhp_state_up_map =
STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
static struct lockdep_map cpuhp_state_down_map =
STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
static inline void cpuhp_lock_acquire(bool bringup)
{
lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 20 | 95.24% | 1 | 50.00% |
Mathieu Malaterre | 1 | 4.76% | 1 | 50.00% |
Total | 21 | 100.00% | 2 | 100.00% |
static inline void cpuhp_lock_release(bool bringup)
{
lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 20 | 95.24% | 1 | 50.00% |
Mathieu Malaterre | 1 | 4.76% | 1 | 50.00% |
Total | 21 | 100.00% | 2 | 100.00% |
#else
static inline void cpuhp_lock_acquire(bool bringup) { }
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 8 | 88.89% | 1 | 50.00% |
Mathieu Malaterre | 1 | 11.11% | 1 | 50.00% |
Total | 9 | 100.00% | 2 | 100.00% |
static inline void cpuhp_lock_release(bool bringup) { }
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 8 | 88.89% | 1 | 50.00% |
Mathieu Malaterre | 1 | 11.11% | 1 | 50.00% |
Total | 9 | 100.00% | 2 | 100.00% |
#endif
/**
* cpuhp_step - Hotplug state machine step
* @name: Name of the step
* @startup: Startup function of the step
* @teardown: Teardown function of the step
* @skip_onerr: Do not invoke the functions on error rollback
* Will go away once the notifiers are gone
* @cant_stop: Bringup/teardown can't be stopped at this step
*/
struct cpuhp_step {
const char *name;
union {
int (*single)(unsigned int cpu);
int (*multi)(unsigned int cpu,
struct hlist_node *node);
} startup;
union {
int (*single)(unsigned int cpu);
int (*multi)(unsigned int cpu,
struct hlist_node *node);
} teardown;
struct hlist_head list;
bool skip_onerr;
bool cant_stop;
bool multi_instance;
};
static DEFINE_MUTEX(cpuhp_state_mutex);
static struct cpuhp_step cpuhp_bp_states[];
static struct cpuhp_step cpuhp_ap_states[];
static bool cpuhp_is_ap_state(enum cpuhp_state state)
{
/*
* The extra check for CPUHP_TEARDOWN_CPU is only for documentation
* purposes as that state is handled explicitly in cpu_down.
*/
return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 20 | 100.00% | 1 | 100.00% |
Total | 20 | 100.00% | 1 | 100.00% |
static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
{
struct cpuhp_step *sp;
sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states;
return sp + state;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 33 | 100.00% | 1 | 100.00% |
Total | 33 | 100.00% | 1 | 100.00% |
/**
* cpuhp_invoke_callback _ Invoke the callbacks for a given state
* @cpu: The cpu for which the callback should be invoked
* @state: The state to do callbacks for
* @bringup: True if the bringup callback should be invoked
* @node: For multi-instance, do a single entry callback for install/remove
* @lastp: For multi-instance rollback, remember how far we got
*
* Called from cpu hotplug and from the state register machinery.
*/
static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
bool bringup, struct hlist_node *node,
struct hlist_node **lastp)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
struct cpuhp_step *step = cpuhp_get_step(state);
int (*cbm)(unsigned int cpu, struct hlist_node *node);
int (*cb)(unsigned int cpu);
int ret, cnt;
if (st->fail == state) {
st->fail = CPUHP_INVALID;
if (!(bringup ? step->startup.single : step->teardown.single))
return 0;
return -EAGAIN;
}
if (!step->multi_instance) {
WARN_ON_ONCE(lastp && *lastp);
cb = bringup ? step->startup.single : step->teardown.single;
if (!cb)
return 0;
trace_cpuhp_enter(cpu, st->target, state, cb);
ret = cb(cpu);
trace_cpuhp_exit(cpu, st->state, state, ret);
return ret;
}
cbm = bringup ? step->startup.multi : step->teardown.multi;
if (!cbm)
return 0;
/* Single invocation for instance add/remove */
if (node) {
WARN_ON_ONCE(lastp && *lastp);
trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
ret = cbm(cpu, node);
trace_cpuhp_exit(cpu, st->state, state, ret);
return ret;
}
/* State transition. Invoke on all instances */
cnt = 0;
hlist_for_each(node, &step->list) {
if (lastp && node == *lastp)
break;
trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
ret = cbm(cpu, node);
trace_cpuhp_exit(cpu, st->state, state, ret);
if (ret) {
if (!lastp)
goto err;
*lastp = node;
return ret;
}
cnt++;
}
if (lastp)
*lastp = NULL;
return 0;
err:
/* Rollback the instances if one failed */
cbm = !bringup ? step->startup.multi : step->teardown.multi;
if (!cbm)
return ret;
hlist_for_each(node, &step->list) {
if (!cnt--)
break;
trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
ret = cbm(cpu, node);
trace_cpuhp_exit(cpu, st->state, state, ret);
/*
* Rollback must not fail,
*/
WARN_ON_ONCE(ret);
}
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 333 | 71.31% | 4 | 57.14% |
Peter Zijlstra | 134 | 28.69% | 3 | 42.86% |
Total | 467 | 100.00% | 7 | 100.00% |
#ifdef CONFIG_SMP
static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
{
struct completion *done = bringup ? &st->done_up : &st->done_down;
wait_for_completion(done);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 37 | 100.00% | 1 | 100.00% |
Total | 37 | 100.00% | 1 | 100.00% |
static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
{
struct completion *done = bringup ? &st->done_up : &st->done_down;
complete(done);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 37 | 100.00% | 1 | 100.00% |
Total | 37 | 100.00% | 1 | 100.00% |
/*
* The former STARTING/DYING states, ran with IRQs disabled and must not fail.
*/
static bool cpuhp_is_atomic_state(enum cpuhp_state state)
{
return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 19 | 100.00% | 1 | 100.00% |
Total | 19 | 100.00% | 1 | 100.00% |
/* Serializes the updates to cpu_online_mask, cpu_present_mask */
static DEFINE_MUTEX(cpu_add_remove_lock);
bool cpuhp_tasks_frozen;
EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
/*
* The following two APIs (cpu_maps_update_begin/done) must be used when
* attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
*/
void cpu_maps_update_begin(void)
{
mutex_lock(&cpu_add_remove_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Lai Jiangshan | 13 | 100.00% | 1 | 100.00% |
Total | 13 | 100.00% | 1 | 100.00% |
void cpu_maps_update_done(void)
{
mutex_unlock(&cpu_add_remove_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Lai Jiangshan | 13 | 100.00% | 1 | 100.00% |
Total | 13 | 100.00% | 1 | 100.00% |
/*
* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
* Should always be manipulated under cpu_add_remove_lock
*/
static int cpu_hotplug_disabled;
#ifdef CONFIG_HOTPLUG_CPU
DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
void cpus_read_lock(void)
{
percpu_down_read(&cpu_hotplug_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Gautham R. Shenoy | 6 | 46.15% | 1 | 14.29% |
Thomas Gleixner | 3 | 23.08% | 2 | 28.57% |
Ashok Raj | 1 | 7.69% | 1 | 14.29% |
David Hildenbrand | 1 | 7.69% | 1 | 14.29% |
Linus Torvalds | 1 | 7.69% | 1 | 14.29% |
Paul E. McKenney | 1 | 7.69% | 1 | 14.29% |
Total | 13 | 100.00% | 7 | 100.00% |
EXPORT_SYMBOL_GPL(cpus_read_lock);
void cpus_read_unlock(void)
{
percpu_up_read(&cpu_hotplug_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ashok Raj | 10 | 76.92% | 1 | 33.33% |
Thomas Gleixner | 3 | 23.08% | 2 | 66.67% |
Total | 13 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL_GPL(cpus_read_unlock);
void cpus_write_lock(void)
{
percpu_down_write(&cpu_hotplug_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Gautham R. Shenoy | 6 | 46.15% | 1 | 20.00% |
Thomas Gleixner | 3 | 23.08% | 2 | 40.00% |
Paul E. McKenney | 2 | 15.38% | 1 | 20.00% |
David Hildenbrand | 2 | 15.38% | 1 | 20.00% |
Total | 13 | 100.00% | 5 | 100.00% |
void cpus_write_unlock(void)
{
percpu_up_write(&cpu_hotplug_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 8 | 61.54% | 1 | 33.33% |
David Hildenbrand | 4 | 30.77% | 1 | 33.33% |
Gautham R. Shenoy | 1 | 7.69% | 1 | 33.33% |
Total | 13 | 100.00% | 3 | 100.00% |
void lockdep_assert_cpus_held(void)
{
percpu_rwsem_assert_held(&cpu_hotplug_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Gautham R. Shenoy | 10 | 76.92% | 1 | 50.00% |
Thomas Gleixner | 3 | 23.08% | 1 | 50.00% |
Total | 13 | 100.00% | 2 | 100.00% |
/*
* Wait for currently running CPU hotplug operations to complete (if any) and
* disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
* the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
* hotplug path before performing hotplug operations. So acquiring that lock
* guarantees mutual exclusion from any currently running hotplug operations.
*/
void cpu_hotplug_disable(void)
{
cpu_maps_update_begin();
cpu_hotplug_disabled++;
cpu_maps_update_done();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Srivatsa S. Bhat | 15 | 93.75% | 1 | 50.00% |
Vitaly Kuznetsov | 1 | 6.25% | 1 | 50.00% |
Total | 16 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
static void __cpu_hotplug_enable(void)
{
if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
return;
cpu_hotplug_disabled--;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Lianwei Wang | 22 | 100.00% | 1 | 100.00% |
Total | 22 | 100.00% | 1 | 100.00% |
void cpu_hotplug_enable(void)
{
cpu_maps_update_begin();
__cpu_hotplug_enable();
cpu_maps_update_done();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Srivatsa S. Bhat | 14 | 87.50% | 1 | 50.00% |
Lianwei Wang | 2 | 12.50% | 1 | 50.00% |
Total | 16 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
#endif /* CONFIG_HOTPLUG_CPU */
static inline enum cpuhp_state
cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
{
enum cpuhp_state prev_state = st->state;
st->rollback = false;
st->last = NULL;
st->target = target;
st->single = false;
st->bringup = st->state < target;
return prev_state;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 55 | 88.71% | 1 | 50.00% |
Thomas Gleixner | 7 | 11.29% | 1 | 50.00% |
Total | 62 | 100.00% | 2 | 100.00% |
static inline void
cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
{
st->rollback = true;
/*
* If we have st->last we need to undo partial multi_instance of this
* state first. Otherwise start undo at the previous state.
*/
if (!st->last) {
if (st->bringup)
st->state--;
else
st->state++;
}
st->target = prev_state;
st->bringup = !st->bringup;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 32 | 50.00% | 1 | 16.67% |
Thomas Gleixner | 31 | 48.44% | 4 | 66.67% |
Boris Ostrovsky | 1 | 1.56% | 1 | 16.67% |
Total | 64 | 100.00% | 6 | 100.00% |
/* Regular hotplug invocation of the AP hotplug thread */
static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
{
if (!st->single && st->state == st->target)
return;
st->result = 0;
/*
* Make sure the above stores are visible before should_run becomes
* true. Paired with the mb() above in cpuhp_thread_fun()
*/
smp_mb();
st->should_run = true;
wake_up_process(st->thread);
wait_for_ap_thread(st, st->bringup);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 30 | 50.85% | 2 | 50.00% |
Thomas Gleixner | 29 | 49.15% | 2 | 50.00% |
Total | 59 | 100.00% | 4 | 100.00% |
static int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target)
{
enum cpuhp_state prev_state;
int ret;
prev_state = cpuhp_set_state(st, target);
__cpuhp_kick_ap(st);
if ((ret = st->result)) {
cpuhp_reset_state(st, prev_state);
__cpuhp_kick_ap(st);
}
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 37 | 58.73% | 1 | 50.00% |
Thomas Gleixner | 26 | 41.27% | 1 | 50.00% |
Total | 63 | 100.00% | 2 | 100.00% |
static int bringup_wait_for_ap(unsigned int cpu)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
/* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
wait_for_ap_thread(st, true);
if (WARN_ON_ONCE((!cpu_online(cpu))))
return -ECANCELED;
/* Unpark the stopper thread and the hotplug thread of the target cpu */
stop_machine_unpark(cpu);
kthread_unpark(st->thread);
if (st->target <= CPUHP_AP_ONLINE_IDLE)
return 0;
return cpuhp_kick_ap(st, st->target);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 71 | 86.59% | 2 | 66.67% |
Thomas Gleixner | 11 | 13.41% | 1 | 33.33% |
Total | 82 | 100.00% | 3 | 100.00% |
static int bringup_cpu(unsigned int cpu)
{
struct task_struct *idle = idle_thread_get(cpu);
int ret;
/*
* Some architectures have to walk the irq descriptors to
* setup the vector space for the cpu which comes online.
* Prevent irq alloc/free across the bringup.
*/
irq_lock_sparse();
/* Arch-specific enabling code. */
ret = __cpu_up(cpu, idle);
irq_unlock_sparse();
if (ret)
return ret;
return bringup_wait_for_ap(cpu);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 43 | 81.13% | 1 | 50.00% |
Thomas Gleixner | 10 | 18.87% | 1 | 50.00% |
Total | 53 | 100.00% | 2 | 100.00% |
/*
* Hotplug state machine related functions
*/
static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
{
for (st->state--; st->state > st->target; st->state--) {
struct cpuhp_step *step = cpuhp_get_step(st->state);
if (!step->skip_onerr)
cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 69 | 97.18% | 4 | 80.00% |
Peter Zijlstra | 2 | 2.82% | 1 | 20.00% |
Total | 71 | 100.00% | 5 | 100.00% |
static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
enum cpuhp_state target)
{
enum cpuhp_state prev_state = st->state;
int ret = 0;
while (st->state < target) {
st->state++;
ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
if (ret) {
st->target = prev_state;
undo_cpu_up(cpu, st);
break;
}
}
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 85 | 97.70% | 3 | 75.00% |
Peter Zijlstra | 2 | 2.30% | 1 | 25.00% |
Total | 87 | 100.00% | 4 | 100.00% |
/*
* The cpu hotplug threads manage the bringup and teardown of the cpus
*/
static void cpuhp_create(unsigned int cpu)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
init_completion(&st->done_up);
init_completion(&st->done_down);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 30 | 76.92% | 1 | 50.00% |
Peter Zijlstra | 9 | 23.08% | 1 | 50.00% |
Total | 39 | 100.00% | 2 | 100.00% |
static int cpuhp_should_run(unsigned int cpu)
{
struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
return st->should_run;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 26 | 100.00% | 1 | 100.00% |
Total | 26 | 100.00% | 1 | 100.00% |
/*
* Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
* callbacks when a state gets [un]installed at runtime.
*
* Each invocation of this function by the smpboot thread does a single AP
* state callback.
*
* It has 3 modes of operation:
* - single: runs st->cb_state
* - up: runs ++st->state, while st->state < st->target
* - down: runs st->state--, while st->state > st->target
*
* When complete or on error, should_run is cleared and the completion is fired.
*/
static void cpuhp_thread_fun(unsigned int cpu)
{
struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
bool bringup = st->bringup;
enum cpuhp_state state;
/*
* ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
* that if we see ->should_run we also see the rest of the state.
*/
smp_mb();
if (WARN_ON_ONCE(!st->should_run))
return;
cpuhp_lock_acquire(bringup);
if (st->single) {
state = st->cb_state;
st->should_run = false;
} else {
if (bringup) {
st->state++;
state = st->state;
st->should_run = (st->state < st->target);
WARN_ON_ONCE(st->state > st->target);
} else {
state = st->state;
st->state--;
st->should_run = (st->state > st->target);
WARN_ON_ONCE(st->state < st->target);
}
}
WARN_ON_ONCE(!cpuhp_is_ap_state(state));
if (st->rollback) {
struct cpuhp_step *step = cpuhp_get_step(state);
if (step->skip_onerr)
goto next;
}
if (cpuhp_is_atomic_state(state)) {
local_irq_disable();
st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
local_irq_enable();
/*
* STARTING/DYING must not fail!
*/
WARN_ON_ONCE(st->result);
} else {
st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
}
if (st->result) {
/*
* If we fail on a rollback, we're up a creek without no
* paddle, no way forward, no way back. We loose, thanks for
* playing.
*/
WARN_ON_ONCE(st->rollback);
st->should_run = false;
}
next:
cpuhp_lock_release(bringup);
if (!st->should_run)
complete_ap_thread(st, bringup);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 185 | 60.66% | 3 | 33.33% |
Thomas Gleixner | 101 | 33.11% | 5 | 55.56% |
Sebastian Andrzej Siewior | 19 | 6.23% | 1 | 11.11% |
Total | 305 | 100.00% | 9 | 100.00% |
/* Invoke a single callback on a remote cpu */
static int
cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
struct hlist_node *node)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int ret;
if (!cpu_online(cpu))
return 0;
cpuhp_lock_acquire(false);
cpuhp_lock_release(false);
cpuhp_lock_acquire(true);
cpuhp_lock_release(true);
/*
* If we are up and running, use the hotplug thread. For early calls
* we invoke the thread function directly.
*/
if (!st->thread)
return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
st->rollback = false;
st->last = NULL;
st->node = node;
st->bringup = bringup;
st->cb_state = state;
st->single = true;
__cpuhp_kick_ap(st);
/*
* If we failed and did a partial, do a rollback.
*/
if ((ret = st->result) && st->last) {
st->rollback = true;
st->bringup = !bringup;
__cpuhp_kick_ap(st);
}
/*
* Clean up the leftovers so the next hotplug operation wont use stale
* data.
*/
st->node = st->last = NULL;
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 126 | 70.00% | 8 | 72.73% |
Peter Zijlstra | 54 | 30.00% | 3 | 27.27% |
Total | 180 | 100.00% | 11 | 100.00% |
static int cpuhp_kick_ap_work(unsigned int cpu)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
enum cpuhp_state prev_state = st->state;
int ret;
cpuhp_lock_acquire(false);
cpuhp_lock_release(false);
cpuhp_lock_acquire(true);
cpuhp_lock_release(true);
trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
ret = cpuhp_kick_ap(st, st->target);
trace_cpuhp_exit(cpu, st->state, prev_state, ret);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 58 | 61.70% | 3 | 60.00% |
Peter Zijlstra | 36 | 38.30% | 2 | 40.00% |
Total | 94 | 100.00% | 5 | 100.00% |
static struct smp_hotplug_thread cpuhp_threads = {
.store = &cpuhp_state.thread,
.create = &cpuhp_create,
.thread_should_run = cpuhp_should_run,
.thread_fn = cpuhp_thread_fun,
.thread_comm = "cpuhp/%u",
.selfparking = true,
};
void __init cpuhp_threads_init(void)
{
BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
kthread_unpark(this_cpu_read(cpuhp_state.thread));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 27 | 100.00% | 1 | 100.00% |
Total | 27 | 100.00% | 1 | 100.00% |
#ifdef CONFIG_HOTPLUG_CPU
/**
* clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
* @cpu: a CPU id
*
* This function walks all processes, finds a valid mm struct for each one and
* then clears a corresponding bit in mm's cpumask. While this all sounds
* trivial, there are various non-obvious corner cases, which this function
* tries to solve in a safe manner.
*
* Also note that the function uses a somewhat relaxed locking scheme, so it may
* be called only for an already offlined CPU.
*/
void clear_tasks_mm_cpumask(int cpu)
{
struct task_struct *p;
/*
* This function is called after the cpu is taken down and marked
* offline, so its not like new tasks will ever get this cpu set in
* their mm mask. -- Peter Zijlstra
* Thus, we may use rcu_read_lock() here, instead of grabbing
* full-fledged tasklist_lock.
*/
WARN_ON(cpu_online(cpu));
rcu_read_lock();
for_each_process(p) {
struct task_struct *t;
/*
* Main thread might exit, but other threads may still have
* a valid mm. Find one.
*/
t = find_lock_task_mm(p);
if (!t)
continue;
cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
task_unlock(t);
}
rcu_read_unlock();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Anton Vorontsov | 70 | 100.00% | 2 | 100.00% |
Total | 70 | 100.00% | 2 | 100.00% |
/* Take this CPU down. */
static int take_cpu_down(void *_param)
{
struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
int err, cpu = smp_processor_id();
int ret;
/* Ensure this CPU doesn't handle any more interrupts. */
err = __cpu_disable();
if (err < 0)
return err;
/*
* We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
* do this step again.
*/
WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
st->state--;
/* Invoke the former CPU_DYING callbacks */
for (; st->state > target; st->state--) {
ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
/*
* DYING must not fail!
*/
WARN_ON_ONCE(ret);
}
/* Give up timekeeping duties */
tick_handover_do_timer();
/* Park the stopper thread */
stop_machine_park(cpu);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 116 | 88.55% | 4 | 66.67% |
Peter Zijlstra | 15 | 11.45% | 2 | 33.33% |
Total | 131 | 100.00% | 6 | 100.00% |
static int takedown_cpu(unsigned int cpu)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int err;
/* Park the smpboot threads */
kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
smpboot_park_threads(cpu);
/*
* Prevent irq alloc/free while the dying cpu reorganizes the
* interrupt affinities.
*/
irq_lock_sparse();