cregit-Linux how code gets into the kernel

Release 4.15 kernel/cpu.c

Directory: kernel
/* CPU control.
 * (C) 2001, 2002, 2003, 2004 Rusty Russell
 *
 * This code is licenced under the GPL.
 */
#include <linux/proc_fs.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/notifier.h>
#include <linux/sched/signal.h>
#include <linux/sched/hotplug.h>
#include <linux/sched/task.h>
#include <linux/unistd.h>
#include <linux/cpu.h>
#include <linux/oom.h>
#include <linux/rcupdate.h>
#include <linux/export.h>
#include <linux/bug.h>
#include <linux/kthread.h>
#include <linux/stop_machine.h>
#include <linux/mutex.h>
#include <linux/gfp.h>
#include <linux/suspend.h>
#include <linux/lockdep.h>
#include <linux/tick.h>
#include <linux/irq.h>
#include <linux/nmi.h>
#include <linux/smpboot.h>
#include <linux/relay.h>
#include <linux/slab.h>
#include <linux/percpu-rwsem.h>

#include <trace/events/power.h>

#define CREATE_TRACE_POINTS
#include <trace/events/cpuhp.h>

#include "smpboot.h"

/**
 * cpuhp_cpu_state - Per cpu hotplug state storage
 * @state:      The current cpu state
 * @target:     The target state
 * @thread:     Pointer to the hotplug thread
 * @should_run: Thread should execute
 * @rollback:   Perform a rollback
 * @single:     Single callback invocation
 * @bringup:    Single callback bringup or teardown selector
 * @cb_state:   The state for a single callback (install/uninstall)
 * @result:     Result of the operation
 * @done_up:    Signal completion to the issuer of the task for cpu-up
 * @done_down:  Signal completion to the issuer of the task for cpu-down
 */

struct cpuhp_cpu_state {
	
enum cpuhp_state	state;
	
enum cpuhp_state	target;
	
enum cpuhp_state	fail;
#ifdef CONFIG_SMP
	
struct task_struct	*thread;
	
bool			should_run;
	
bool			rollback;
	
bool			single;
	
bool			bringup;
	
struct hlist_node	*node;
	
struct hlist_node	*last;
	
enum cpuhp_state	cb_state;
	
int			result;
	
struct completion	done_up;
	
struct completion	done_down;
#endif
};

static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
	.fail = CPUHP_INVALID,
};

#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)

static struct lockdep_map cpuhp_state_up_map =
	STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);

static struct lockdep_map cpuhp_state_down_map =
	STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);



static inline void cpuhp_lock_acquire(bool bringup) { lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra2095.24%150.00%
Mathieu Malaterre14.76%150.00%
Total21100.00%2100.00%


static inline void cpuhp_lock_release(bool bringup) { lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra2095.24%150.00%
Mathieu Malaterre14.76%150.00%
Total21100.00%2100.00%

#else
static inline void cpuhp_lock_acquire(bool bringup) { }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra888.89%150.00%
Mathieu Malaterre111.11%150.00%
Total9100.00%2100.00%


static inline void cpuhp_lock_release(bool bringup) { }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra888.89%150.00%
Mathieu Malaterre111.11%150.00%
Total9100.00%2100.00%

#endif /** * cpuhp_step - Hotplug state machine step * @name: Name of the step * @startup: Startup function of the step * @teardown: Teardown function of the step * @skip_onerr: Do not invoke the functions on error rollback * Will go away once the notifiers are gone * @cant_stop: Bringup/teardown can't be stopped at this step */ struct cpuhp_step { const char *name; union { int (*single)(unsigned int cpu); int (*multi)(unsigned int cpu, struct hlist_node *node); } startup; union { int (*single)(unsigned int cpu); int (*multi)(unsigned int cpu, struct hlist_node *node); } teardown; struct hlist_head list; bool skip_onerr; bool cant_stop; bool multi_instance; }; static DEFINE_MUTEX(cpuhp_state_mutex); static struct cpuhp_step cpuhp_bp_states[]; static struct cpuhp_step cpuhp_ap_states[];
static bool cpuhp_is_ap_state(enum cpuhp_state state) { /* * The extra check for CPUHP_TEARDOWN_CPU is only for documentation * purposes as that state is handled explicitly in cpu_down. */ return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner20100.00%1100.00%
Total20100.00%1100.00%


static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state) { struct cpuhp_step *sp; sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states; return sp + state; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner33100.00%1100.00%
Total33100.00%1100.00%

/** * cpuhp_invoke_callback _ Invoke the callbacks for a given state * @cpu: The cpu for which the callback should be invoked * @state: The state to do callbacks for * @bringup: True if the bringup callback should be invoked * @node: For multi-instance, do a single entry callback for install/remove * @lastp: For multi-instance rollback, remember how far we got * * Called from cpu hotplug and from the state register machinery. */
static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state, bool bringup, struct hlist_node *node, struct hlist_node **lastp) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); struct cpuhp_step *step = cpuhp_get_step(state); int (*cbm)(unsigned int cpu, struct hlist_node *node); int (*cb)(unsigned int cpu); int ret, cnt; if (st->fail == state) { st->fail = CPUHP_INVALID; if (!(bringup ? step->startup.single : step->teardown.single)) return 0; return -EAGAIN; } if (!step->multi_instance) { WARN_ON_ONCE(lastp && *lastp); cb = bringup ? step->startup.single : step->teardown.single; if (!cb) return 0; trace_cpuhp_enter(cpu, st->target, state, cb); ret = cb(cpu); trace_cpuhp_exit(cpu, st->state, state, ret); return ret; } cbm = bringup ? step->startup.multi : step->teardown.multi; if (!cbm) return 0; /* Single invocation for instance add/remove */ if (node) { WARN_ON_ONCE(lastp && *lastp); trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); ret = cbm(cpu, node); trace_cpuhp_exit(cpu, st->state, state, ret); return ret; } /* State transition. Invoke on all instances */ cnt = 0; hlist_for_each(node, &step->list) { if (lastp && node == *lastp) break; trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); ret = cbm(cpu, node); trace_cpuhp_exit(cpu, st->state, state, ret); if (ret) { if (!lastp) goto err; *lastp = node; return ret; } cnt++; } if (lastp) *lastp = NULL; return 0; err: /* Rollback the instances if one failed */ cbm = !bringup ? step->startup.multi : step->teardown.multi; if (!cbm) return ret; hlist_for_each(node, &step->list) { if (!cnt--) break; trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); ret = cbm(cpu, node); trace_cpuhp_exit(cpu, st->state, state, ret); /* * Rollback must not fail, */ WARN_ON_ONCE(ret); } return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner33371.31%457.14%
Peter Zijlstra13428.69%342.86%
Total467100.00%7100.00%

#ifdef CONFIG_SMP
static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup) { struct completion *done = bringup ? &st->done_up : &st->done_down; wait_for_completion(done); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra37100.00%1100.00%
Total37100.00%1100.00%


static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup) { struct completion *done = bringup ? &st->done_up : &st->done_down; complete(done); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra37100.00%1100.00%
Total37100.00%1100.00%

/* * The former STARTING/DYING states, ran with IRQs disabled and must not fail. */
static bool cpuhp_is_atomic_state(enum cpuhp_state state) { return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra19100.00%1100.00%
Total19100.00%1100.00%

/* Serializes the updates to cpu_online_mask, cpu_present_mask */ static DEFINE_MUTEX(cpu_add_remove_lock); bool cpuhp_tasks_frozen; EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen); /* * The following two APIs (cpu_maps_update_begin/done) must be used when * attempting to serialize the updates to cpu_online_mask & cpu_present_mask. */
void cpu_maps_update_begin(void) { mutex_lock(&cpu_add_remove_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Lai Jiangshan13100.00%1100.00%
Total13100.00%1100.00%


void cpu_maps_update_done(void) { mutex_unlock(&cpu_add_remove_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Lai Jiangshan13100.00%1100.00%
Total13100.00%1100.00%

/* * If set, cpu_up and cpu_down will return -EBUSY and do nothing. * Should always be manipulated under cpu_add_remove_lock */ static int cpu_hotplug_disabled; #ifdef CONFIG_HOTPLUG_CPU DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
void cpus_read_lock(void) { percpu_down_read(&cpu_hotplug_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Gautham R. Shenoy646.15%114.29%
Thomas Gleixner323.08%228.57%
Ashok Raj17.69%114.29%
David Hildenbrand17.69%114.29%
Linus Torvalds17.69%114.29%
Paul E. McKenney17.69%114.29%
Total13100.00%7100.00%

EXPORT_SYMBOL_GPL(cpus_read_lock);
void cpus_read_unlock(void) { percpu_up_read(&cpu_hotplug_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Ashok Raj1076.92%133.33%
Thomas Gleixner323.08%266.67%
Total13100.00%3100.00%

EXPORT_SYMBOL_GPL(cpus_read_unlock);
void cpus_write_lock(void) { percpu_down_write(&cpu_hotplug_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Gautham R. Shenoy646.15%120.00%
Thomas Gleixner323.08%240.00%
Paul E. McKenney215.38%120.00%
David Hildenbrand215.38%120.00%
Total13100.00%5100.00%


void cpus_write_unlock(void) { percpu_up_write(&cpu_hotplug_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner861.54%133.33%
David Hildenbrand430.77%133.33%
Gautham R. Shenoy17.69%133.33%
Total13100.00%3100.00%


void lockdep_assert_cpus_held(void) { percpu_rwsem_assert_held(&cpu_hotplug_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Gautham R. Shenoy1076.92%150.00%
Thomas Gleixner323.08%150.00%
Total13100.00%2100.00%

/* * Wait for currently running CPU hotplug operations to complete (if any) and * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the * hotplug path before performing hotplug operations. So acquiring that lock * guarantees mutual exclusion from any currently running hotplug operations. */
void cpu_hotplug_disable(void) { cpu_maps_update_begin(); cpu_hotplug_disabled++; cpu_maps_update_done(); }

Contributors

PersonTokensPropCommitsCommitProp
Srivatsa S. Bhat1593.75%150.00%
Vitaly Kuznetsov16.25%150.00%
Total16100.00%2100.00%

EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
static void __cpu_hotplug_enable(void) { if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n")) return; cpu_hotplug_disabled--; }

Contributors

PersonTokensPropCommitsCommitProp
Lianwei Wang22100.00%1100.00%
Total22100.00%1100.00%


void cpu_hotplug_enable(void) { cpu_maps_update_begin(); __cpu_hotplug_enable(); cpu_maps_update_done(); }

Contributors

PersonTokensPropCommitsCommitProp
Srivatsa S. Bhat1487.50%150.00%
Lianwei Wang212.50%150.00%
Total16100.00%2100.00%

EXPORT_SYMBOL_GPL(cpu_hotplug_enable); #endif /* CONFIG_HOTPLUG_CPU */
static inline enum cpuhp_state cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target) { enum cpuhp_state prev_state = st->state; st->rollback = false; st->last = NULL; st->target = target; st->single = false; st->bringup = st->state < target; return prev_state; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra5588.71%150.00%
Thomas Gleixner711.29%150.00%
Total62100.00%2100.00%


static inline void cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state) { st->rollback = true; /* * If we have st->last we need to undo partial multi_instance of this * state first. Otherwise start undo at the previous state. */ if (!st->last) { if (st->bringup) st->state--; else st->state++; } st->target = prev_state; st->bringup = !st->bringup; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra3250.00%116.67%
Thomas Gleixner3148.44%466.67%
Boris Ostrovsky11.56%116.67%
Total64100.00%6100.00%

/* Regular hotplug invocation of the AP hotplug thread */
static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st) { if (!st->single && st->state == st->target) return; st->result = 0; /* * Make sure the above stores are visible before should_run becomes * true. Paired with the mb() above in cpuhp_thread_fun() */ smp_mb(); st->should_run = true; wake_up_process(st->thread); wait_for_ap_thread(st, st->bringup); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra3050.85%250.00%
Thomas Gleixner2949.15%250.00%
Total59100.00%4100.00%


static int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target) { enum cpuhp_state prev_state; int ret; prev_state = cpuhp_set_state(st, target); __cpuhp_kick_ap(st); if ((ret = st->result)) { cpuhp_reset_state(st, prev_state); __cpuhp_kick_ap(st); } return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra3758.73%150.00%
Thomas Gleixner2641.27%150.00%
Total63100.00%2100.00%


static int bringup_wait_for_ap(unsigned int cpu) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */ wait_for_ap_thread(st, true); if (WARN_ON_ONCE((!cpu_online(cpu)))) return -ECANCELED; /* Unpark the stopper thread and the hotplug thread of the target cpu */ stop_machine_unpark(cpu); kthread_unpark(st->thread); if (st->target <= CPUHP_AP_ONLINE_IDLE) return 0; return cpuhp_kick_ap(st, st->target); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra7186.59%266.67%
Thomas Gleixner1113.41%133.33%
Total82100.00%3100.00%


static int bringup_cpu(unsigned int cpu) { struct task_struct *idle = idle_thread_get(cpu); int ret; /* * Some architectures have to walk the irq descriptors to * setup the vector space for the cpu which comes online. * Prevent irq alloc/free across the bringup. */ irq_lock_sparse(); /* Arch-specific enabling code. */ ret = __cpu_up(cpu, idle); irq_unlock_sparse(); if (ret) return ret; return bringup_wait_for_ap(cpu); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra4381.13%150.00%
Thomas Gleixner1018.87%150.00%
Total53100.00%2100.00%

/* * Hotplug state machine related functions */
static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st) { for (st->state--; st->state > st->target; st->state--) { struct cpuhp_step *step = cpuhp_get_step(st->state); if (!step->skip_onerr) cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); } }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner6997.18%480.00%
Peter Zijlstra22.82%120.00%
Total71100.00%5100.00%


static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target) { enum cpuhp_state prev_state = st->state; int ret = 0; while (st->state < target) { st->state++; ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL); if (ret) { st->target = prev_state; undo_cpu_up(cpu, st); break; } } return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner8597.70%375.00%
Peter Zijlstra22.30%125.00%
Total87100.00%4100.00%

/* * The cpu hotplug threads manage the bringup and teardown of the cpus */
static void cpuhp_create(unsigned int cpu) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); init_completion(&st->done_up); init_completion(&st->done_down); }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner3076.92%150.00%
Peter Zijlstra923.08%150.00%
Total39100.00%2100.00%


static int cpuhp_should_run(unsigned int cpu) { struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); return st->should_run; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner26100.00%1100.00%
Total26100.00%1100.00%

/* * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke * callbacks when a state gets [un]installed at runtime. * * Each invocation of this function by the smpboot thread does a single AP * state callback. * * It has 3 modes of operation: * - single: runs st->cb_state * - up: runs ++st->state, while st->state < st->target * - down: runs st->state--, while st->state > st->target * * When complete or on error, should_run is cleared and the completion is fired. */
static void cpuhp_thread_fun(unsigned int cpu) { struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); bool bringup = st->bringup; enum cpuhp_state state; /* * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures * that if we see ->should_run we also see the rest of the state. */ smp_mb(); if (WARN_ON_ONCE(!st->should_run)) return; cpuhp_lock_acquire(bringup); if (st->single) { state = st->cb_state; st->should_run = false; } else { if (bringup) { st->state++; state = st->state; st->should_run = (st->state < st->target); WARN_ON_ONCE(st->state > st->target); } else { state = st->state; st->state--; st->should_run = (st->state > st->target); WARN_ON_ONCE(st->state < st->target); } } WARN_ON_ONCE(!cpuhp_is_ap_state(state)); if (st->rollback) { struct cpuhp_step *step = cpuhp_get_step(state); if (step->skip_onerr) goto next; } if (cpuhp_is_atomic_state(state)) { local_irq_disable(); st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last); local_irq_enable(); /* * STARTING/DYING must not fail! */ WARN_ON_ONCE(st->result); } else { st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last); } if (st->result) { /* * If we fail on a rollback, we're up a creek without no * paddle, no way forward, no way back. We loose, thanks for * playing. */ WARN_ON_ONCE(st->rollback); st->should_run = false; } next: cpuhp_lock_release(bringup); if (!st->should_run) complete_ap_thread(st, bringup); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra18560.66%333.33%
Thomas Gleixner10133.11%555.56%
Sebastian Andrzej Siewior196.23%111.11%
Total305100.00%9100.00%

/* Invoke a single callback on a remote cpu */
static int cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup, struct hlist_node *node) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); int ret; if (!cpu_online(cpu)) return 0; cpuhp_lock_acquire(false); cpuhp_lock_release(false); cpuhp_lock_acquire(true); cpuhp_lock_release(true); /* * If we are up and running, use the hotplug thread. For early calls * we invoke the thread function directly. */ if (!st->thread) return cpuhp_invoke_callback(cpu, state, bringup, node, NULL); st->rollback = false; st->last = NULL; st->node = node; st->bringup = bringup; st->cb_state = state; st->single = true; __cpuhp_kick_ap(st); /* * If we failed and did a partial, do a rollback. */ if ((ret = st->result) && st->last) { st->rollback = true; st->bringup = !bringup; __cpuhp_kick_ap(st); } /* * Clean up the leftovers so the next hotplug operation wont use stale * data. */ st->node = st->last = NULL; return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner12670.00%872.73%
Peter Zijlstra5430.00%327.27%
Total180100.00%11100.00%


static int cpuhp_kick_ap_work(unsigned int cpu) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); enum cpuhp_state prev_state = st->state; int ret; cpuhp_lock_acquire(false); cpuhp_lock_release(false); cpuhp_lock_acquire(true); cpuhp_lock_release(true); trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work); ret = cpuhp_kick_ap(st, st->target); trace_cpuhp_exit(cpu, st->state, prev_state, ret); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner5861.70%360.00%
Peter Zijlstra3638.30%240.00%
Total94100.00%5100.00%

static struct smp_hotplug_thread cpuhp_threads = { .store = &cpuhp_state.thread, .create = &cpuhp_create, .thread_should_run = cpuhp_should_run, .thread_fn = cpuhp_thread_fun, .thread_comm = "cpuhp/%u", .selfparking = true, };
void __init cpuhp_threads_init(void) { BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads)); kthread_unpark(this_cpu_read(cpuhp_state.thread)); }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner27100.00%1100.00%
Total27100.00%1100.00%

#ifdef CONFIG_HOTPLUG_CPU /** * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU * @cpu: a CPU id * * This function walks all processes, finds a valid mm struct for each one and * then clears a corresponding bit in mm's cpumask. While this all sounds * trivial, there are various non-obvious corner cases, which this function * tries to solve in a safe manner. * * Also note that the function uses a somewhat relaxed locking scheme, so it may * be called only for an already offlined CPU. */
void clear_tasks_mm_cpumask(int cpu) { struct task_struct *p; /* * This function is called after the cpu is taken down and marked * offline, so its not like new tasks will ever get this cpu set in * their mm mask. -- Peter Zijlstra * Thus, we may use rcu_read_lock() here, instead of grabbing * full-fledged tasklist_lock. */ WARN_ON(cpu_online(cpu)); rcu_read_lock(); for_each_process(p) { struct task_struct *t; /* * Main thread might exit, but other threads may still have * a valid mm. Find one. */ t = find_lock_task_mm(p); if (!t) continue; cpumask_clear_cpu(cpu, mm_cpumask(t->mm)); task_unlock(t); } rcu_read_unlock(); }

Contributors

PersonTokensPropCommitsCommitProp
Anton Vorontsov70100.00%2100.00%
Total70100.00%2100.00%

/* Take this CPU down. */
static int take_cpu_down(void *_param) { struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE); int err, cpu = smp_processor_id(); int ret; /* Ensure this CPU doesn't handle any more interrupts. */ err = __cpu_disable(); if (err < 0) return err; /* * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not * do this step again. */ WARN_ON(st->state != CPUHP_TEARDOWN_CPU); st->state--; /* Invoke the former CPU_DYING callbacks */ for (; st->state > target; st->state--) { ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); /* * DYING must not fail! */ WARN_ON_ONCE(ret); } /* Give up timekeeping duties */ tick_handover_do_timer(); /* Park the stopper thread */ stop_machine_park(cpu); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner11688.55%466.67%
Peter Zijlstra1511.45%233.33%
Total131100.00%6100.00%


static int takedown_cpu(unsigned int cpu) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); int err; /* Park the smpboot threads */ kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread); smpboot_park_threads(cpu); /* * Prevent irq alloc/free while the dying cpu reorganizes the * interrupt affinities. */ irq_lock_sparse();