cregit-Linux how code gets into the kernel

Release 4.18 kernel/smp.c

Directory: kernel
/*
 * Generic helpers for smp ipi calls
 *
 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
 */


#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/irq_work.h>
#include <linux/rcupdate.h>
#include <linux/rculist.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/gfp.h>
#include <linux/smp.h>
#include <linux/cpu.h>
#include <linux/sched.h>
#include <linux/sched/idle.h>
#include <linux/hypervisor.h>

#include "smpboot.h"


enum {
	
CSD_FLAG_LOCK		= 0x01,
	
CSD_FLAG_SYNCHRONOUS	= 0x02,
};


struct call_function_data {
	
call_single_data_t	__percpu *csd;
	
cpumask_var_t		cpumask;
	
cpumask_var_t		cpumask_ipi;
};

static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);

static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);

static void flush_smp_call_function_queue(bool warn_cpu_offline);


int smpcfd_prepare_cpu(unsigned int cpu) { struct call_function_data *cfd = &per_cpu(cfd_data, cpu); if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, cpu_to_node(cpu))) return -ENOMEM; if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL, cpu_to_node(cpu))) { free_cpumask_var(cfd->cpumask); return -ENOMEM; } cfd->csd = alloc_percpu(call_single_data_t); if (!cfd->csd) { free_cpumask_var(cfd->cpumask); free_cpumask_var(cfd->cpumask_ipi); return -ENOMEM; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Aaron Lu3833.33%112.50%
Peter Zijlstra3732.46%112.50%
Shaohua Li2521.93%112.50%
Richard Weinberger76.14%112.50%
Chen Gang S32.63%112.50%
Akinobu Mita21.75%112.50%
Yinghai Lu10.88%112.50%
Huang Ying10.88%112.50%
Total114100.00%8100.00%


int smpcfd_dead_cpu(unsigned int cpu) { struct call_function_data *cfd = &per_cpu(cfd_data, cpu); free_cpumask_var(cfd->cpumask); free_cpumask_var(cfd->cpumask_ipi); free_percpu(cfd->csd); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Richard Weinberger2554.35%125.00%
Aaron Lu715.22%125.00%
Peter Zijlstra715.22%125.00%
Shaohua Li715.22%125.00%
Total46100.00%4100.00%


int smpcfd_dying_cpu(unsigned int cpu) { /* * The IPIs for the smp-call-function callbacks queued by other * CPUs might arrive late, either due to hardware latencies or * because this CPU disabled interrupts (inside stop-machine) * before the IPIs were sent. So flush out any pending callbacks * explicitly (without waiting for the IPIs to arrive), to * ensure that the outgoing CPU doesn't go offline with work * still pending. */ flush_smp_call_function_queue(false); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Richard Weinberger1055.56%133.33%
Srivatsa S. Bhat527.78%133.33%
Peter Zijlstra316.67%133.33%
Total18100.00%3100.00%


void __init call_function_init(void) { int i; for_each_possible_cpu(i) init_llist_head(&per_cpu(call_single_queue, i)); smpcfd_prepare_cpu(smp_processor_id()); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe2165.62%120.00%
Takao Indoh39.38%120.00%
Peter Zijlstra39.38%120.00%
Richard Weinberger39.38%120.00%
Christoph Hellwig26.25%120.00%
Total32100.00%5100.00%

/* * csd_lock/csd_unlock used to serialize access to per-cpu csd resources * * For non-synchronous ipi calls the csd can still be in use by the * previous function call. For multi-cpu calls its even more interesting * as we'll have to ensure no other cpu is observing our csd. */
static __always_inline void csd_lock_wait(call_single_data_t *csd) { smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK)); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra1765.38%333.33%
Davidlohr Bueso A311.54%222.22%
Andrew Morton27.69%111.11%
Jens Axboe27.69%111.11%
Huang Ying13.85%111.11%
Linus Torvalds13.85%111.11%
Total26100.00%9100.00%


static __always_inline void csd_lock(call_single_data_t *csd) { csd_lock_wait(csd); csd->flags |= CSD_FLAG_LOCK; /* * prevent CPU from reordering the above assignment * to ->flags with any subsequent assignments to other * fields of the specified call_single_data_t structure: */ smp_wmb(); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra1869.23%228.57%
Andrew Morton311.54%114.29%
Huang Ying27.69%114.29%
Linus Torvalds13.85%114.29%
liguang13.85%114.29%
Davidlohr Bueso A13.85%114.29%
Total26100.00%7100.00%


static __always_inline void csd_unlock(call_single_data_t *csd) { WARN_ON(!(csd->flags & CSD_FLAG_LOCK)); /* * ensure we're all done before releasing data: */ smp_store_release(&csd->flags, 0); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra2161.76%114.29%
Linus Torvalds617.65%114.29%
Andrew Morton38.82%114.29%
Huang Ying12.94%114.29%
Jens Axboe12.94%114.29%
Ingo Molnar12.94%114.29%
Davidlohr Bueso A12.94%114.29%
Total34100.00%7100.00%

static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data); /* * Insert a previously allocated call_single_data_t element * for execution on the given CPU. data must already have * ->func, ->info, and ->flags set. */
static int generic_exec_single(int cpu, call_single_data_t *csd, smp_call_func_t func, void *info) { if (cpu == smp_processor_id()) { unsigned long flags; /* * We can unlock early even for the synchronous on-stack case, * since we're doing this from the same CPU.. */ csd_unlock(csd); local_irq_save(flags); func(info); local_irq_restore(flags); return 0; } if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) { csd_unlock(csd); return -ENXIO; } csd->func = func; csd->info = info; /* * The list addition should be visible before sending the IPI * handler locks the list to pull the entry off it because of * normal cache coherency rules implied by spinlocks. * * If IPIs can go out of order to the cache coherency protocol * in an architecture, sufficient synchronisation should be added * to arch code to make it appear to obey cache coherency WRT * locking and barrier primitives. Generic code isn't really * equipped to do the right thing... */ if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) arch_send_call_function_single_ipi(cpu); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Frédéric Weisbecker6855.74%111.11%
Jens Axboe2318.85%111.11%
Linus Torvalds1713.93%222.22%
Christoph Hellwig108.20%222.22%
Andrew Morton21.64%111.11%
Peter Zijlstra10.82%111.11%
Huang Ying10.82%111.11%
Total122100.00%9100.00%

/** * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks * * Invoked by arch to handle an IPI for call function single. * Must be called with interrupts disabled. */
void generic_smp_call_function_single_interrupt(void) { flush_smp_call_function_queue(true); }

Contributors

PersonTokensPropCommitsCommitProp
Srivatsa S. Bhat650.00%150.00%
Jens Axboe650.00%150.00%
Total12100.00%2100.00%

/** * flush_smp_call_function_queue - Flush pending smp-call-function callbacks * * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an * offline CPU. Skip this check if set to 'false'. * * Flush any pending smp-call-function callbacks queued on this CPU. This is * invoked by the generic IPI handler, as well as by a CPU about to go offline, * to ensure that all pending IPI callbacks are run before it goes completely * offline. * * Loop through the call_single_queue and run all the queued callbacks. * Must be called with interrupts disabled. */
static void flush_smp_call_function_queue(bool warn_cpu_offline) { struct llist_head *head; struct llist_node *entry; call_single_data_t *csd, *csd_next; static bool warned; lockdep_assert_irqs_disabled(); head = this_cpu_ptr(&call_single_queue); entry = llist_del_all(head); entry = llist_reverse_order(entry); /* There shouldn't be any pending callbacks on an offline CPU. */ if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) && !warned && !llist_empty(head))) { warned = true; WARN(1, "IPI on offline CPU %d\n", smp_processor_id()); /* * We don't have to use the _safe() variant here * because we are not invoking the IPI handlers yet. */ llist_for_each_entry(csd, entry, llist) pr_warn("IPI callback %pS sent to offline CPU\n", csd->func); } llist_for_each_entry_safe(csd, csd_next, entry, llist) { smp_call_func_t func = csd->func; void *info = csd->info; /* Do we wait until *after* callback? */ if (csd->flags & CSD_FLAG_SYNCHRONOUS) { func(info); csd_unlock(csd); } else { csd_unlock(csd); func(info); } } /* * Handle irq works queued remotely by irq_work_queue_on(). * Smp functions above are typically synchronous so they * better run first since some other CPUs may be busy waiting * for them. */ irq_work_run(); }

Contributors

PersonTokensPropCommitsCommitProp
Srivatsa S. Bhat8547.75%216.67%
Linus Torvalds3620.22%18.33%
Jens Axboe2413.48%18.33%
Jan Kara116.18%18.33%
Christoph Hellwig105.62%18.33%
Frédéric Weisbecker52.81%216.67%
Andrew Morton31.69%18.33%
Christoph Lameter21.12%18.33%
Huang Ying10.56%18.33%
Peter Zijlstra10.56%18.33%
Total178100.00%12100.00%

/* * smp_call_function_single - Run a function on a specific CPU * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. * @wait: If true, wait until function has completed on other CPUs. * * Returns 0 on success, else a negative status code. */
int smp_call_function_single(int cpu, smp_call_func_t func, void *info, int wait) { call_single_data_t *csd; call_single_data_t csd_stack = { .flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS, }; int this_cpu; int err; /* * prevent preemption and reschedule on another processor, * as well as CPU removal */ this_cpu = get_cpu(); /* * Can deadlock when called with interrupts disabled. * We allow cpu's that are not yet online though, as no one else can * send smp call function interrupt to this cpu and as such deadlocks * can't happen. */ WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() && !oops_in_progress); csd = &csd_stack; if (!wait) { csd = this_cpu_ptr(&csd_data); csd_lock(csd); } err = generic_exec_single(cpu, csd, func, info); if (wait) csd_lock_wait(csd); put_cpu(); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds4741.23%110.00%
Jens Axboe3631.58%110.00%
Ingo Molnar119.65%220.00%
Suresh B. Siddha65.26%110.00%
Frédéric Weisbecker54.39%110.00%
H. Peter Anvin43.51%110.00%
Huang Ying32.63%110.00%
Peter Zijlstra10.88%110.00%
David Howells10.88%110.00%
Total114100.00%10100.00%

EXPORT_SYMBOL(smp_call_function_single); /** * smp_call_function_single_async(): Run an asynchronous function on a * specific CPU. * @cpu: The CPU to run on. * @csd: Pre-allocated and setup data structure * * Like smp_call_function_single(), but the call is asynchonous and * can thus be done from contexts with disabled interrupts. * * The caller passes his own pre-allocated data structure * (ie: embedded in an object) and is responsible for synchronizing it * such that the IPIs performed on the @csd are strictly serialized. * * NOTE: Be careful, there is unfortunately no current debugging facility to * validate the correctness of this serialization. */
int smp_call_function_single_async(int cpu, call_single_data_t *csd) { int err = 0; preempt_disable(); /* We could deadlock if we have to wait here with interrupts disabled! */ if (WARN_ON_ONCE(csd->flags & CSD_FLAG_LOCK)) csd_lock_wait(csd); csd->flags = CSD_FLAG_LOCK; smp_wmb(); err = generic_exec_single(cpu, csd, csd->func, csd->info); preempt_enable(); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Frédéric Weisbecker4260.87%360.00%
Linus Torvalds2637.68%120.00%
Huang Ying11.45%120.00%
Total69100.00%5100.00%

EXPORT_SYMBOL_GPL(smp_call_function_single_async); /* * smp_call_function_any - Run a function on any of the given cpus * @mask: The mask of cpus it can run on. * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. * @wait: If true, wait until function has completed. * * Returns 0 on success, else a negative status code (if no cpus were online). * * Selection preference: * 1) current cpu if in @mask * 2) any cpu of current node if in @mask * 3) any other online cpu in @mask */
int smp_call_function_any(const struct cpumask *mask, smp_call_func_t func, void *info, int wait) { unsigned int cpu; const struct cpumask *nodemask; int ret; /* Try for same CPU (cheapest) */ cpu = get_cpu(); if (cpumask_test_cpu(cpu, mask)) goto call; /* Try for same node. */ nodemask = cpumask_of_node(cpu_to_node(cpu)); for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids; cpu = cpumask_next_and(cpu, nodemask, mask)) { if (cpu_online(cpu)) goto call; } /* Any online will do: smp_call_function_single handles nr_cpu_ids. */ cpu = cpumask_any_and(mask, cpu_online_mask); call: ret = smp_call_function_single(cpu, func, info, wait); put_cpu(); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Rusty Russell12896.97%133.33%
David John32.27%133.33%
David Howells10.76%133.33%
Total132100.00%3100.00%

EXPORT_SYMBOL_GPL(smp_call_function_any); /** * smp_call_function_many(): Run a function on a set of other CPUs. * @mask: The set of cpus to run on (only runs on online subset). * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. * @wait: If true, wait (atomically) until function has completed * on other CPUs. * * If @wait is true, then returns once @func has returned. * * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. Preemption * must be disabled when calling this function. */
void smp_call_function_many(const struct cpumask *mask, smp_call_func_t func, void *info, bool wait) { struct call_function_data *cfd; int cpu, next_cpu, this_cpu = smp_processor_id(); /* * Can deadlock when called with interrupts disabled. * We allow cpu's that are not yet online though, as no one else can * send smp call function interrupt to this cpu and as such deadlocks * can't happen. */ WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() && !oops_in_progress && !early_boot_irqs_disabled); /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */ cpu = cpumask_first_and(mask, cpu_online_mask); if (cpu == this_cpu) cpu = cpumask_next_and(cpu, mask, cpu_online_mask); /* No online cpus? We're done. */ if (cpu >= nr_cpu_ids) return; /* Do we have another CPU which isn't us? */ next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask); if (next_cpu == this_cpu) next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask); /* Fastpath: do that cpu by itself. */ if (next_cpu >= nr_cpu_ids) { smp_call_function_single(cpu, func, info, wait); return; } cfd = this_cpu_ptr(&cfd_data); cpumask_and(cfd->cpumask, mask, cpu_online_mask); __cpumask_clear_cpu(this_cpu, cfd->cpumask); /* Some callers race with other cpus changing the passed mask */ if (unlikely(!cpumask_weight(cfd->cpumask))) return; cpumask_clear(cfd->cpumask_ipi); for_each_cpu(cpu, cfd->cpumask) { call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu); csd_lock(csd); if (wait) csd->flags |= CSD_FLAG_SYNCHRONOUS; csd->func = func; csd->info = info; if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) __cpumask_set_cpu(cpu, cfd->cpumask_ipi); } /* Send a message to all CPUs in the map */ arch_send_call_function_ipi_mask(cfd->cpumask_ipi); if (wait) { for_each_cpu(cpu, cfd->cpumask) { call_single_data_t *csd; csd = per_cpu_ptr(cfd->csd, cpu); csd_lock_wait(csd); } } }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe8929.08%15.00%
Rusty Russell6220.26%210.00%
Shaohua Li5618.30%15.00%
Aaron Lu185.88%15.00%
Milton D. Miller II154.90%210.00%
Andrew Morton144.58%15.00%
Linus Torvalds103.27%15.00%
Peter Zijlstra103.27%210.00%
Ingo Molnar82.61%210.00%
Christoph Hellwig61.96%15.00%
Suresh B. Siddha61.96%15.00%
Nicholas Piggin41.31%15.00%
Tejun Heo30.98%15.00%
Huang Ying20.65%15.00%
Christoph Lameter20.65%15.00%
David Howells10.33%15.00%
Total306100.00%20100.00%

EXPORT_SYMBOL(smp_call_function_many); /** * smp_call_function(): Run a function on all other CPUs. * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. * @wait: If true, wait (atomically) until function has completed * on other CPUs. * * Returns 0. * * If @wait is true, then returns once @func has returned; otherwise * it returns just before the target cpu calls @func. * * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. */
int smp_call_function(smp_call_func_t func, void *info, int wait) { preempt_disable(); smp_call_function_many(cpu_online_mask, func, info, wait); preempt_enable(); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe3188.57%133.33%
Rusty Russell38.57%133.33%
David Howells12.86%133.33%
Total35100.00%3100.00%

EXPORT_SYMBOL(smp_call_function); /* Setup configured maximum number of CPUs to activate */ unsigned int setup_max_cpus = NR_CPUS; EXPORT_SYMBOL(setup_max_cpus); /* * Setup routine for controlling SMP activation * * Command-line option of "nosmp" or "maxcpus=0" will disable SMP * activation entirely (the MPS table probe still happens, though). * * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer * greater than 0, limits the maximum number of CPUs activated in * SMP mode to <NUM>. */
void __weak arch_disable_smp_support(void) { }

Contributors

PersonTokensPropCommitsCommitProp
Américo Wang7100.00%1100.00%
Total7100.00%1100.00%


static int __init nosmp(char *str) { setup_max_cpus = 0; arch_disable_smp_support(); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Américo Wang21100.00%1100.00%
Total21100.00%1100.00%

early_param("nosmp", nosmp); /* this is hard limit */
static int __init nrcpus(char *str) { int nr_cpus; get_option(&str, &nr_cpus); if (nr_cpus > 0 && nr_cpus < nr_cpu_ids) nr_cpu_ids = nr_cpus; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Américo Wang40100.00%1100.00%
Total40100.00%1100.00%

early_param("nr_cpus", nrcpus);
static int __init maxcpus(char *str) { get_option(&str, &setup_max_cpus); if (setup_max_cpus == 0) arch_disable_smp_support(); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Américo Wang32100.00%1100.00%
Total32100.00%1100.00%

early_param("maxcpus", maxcpus); /* Setup number of possible processor ids */ unsigned int nr_cpu_ids __read_mostly = NR_CPUS; EXPORT_SYMBOL(nr_cpu_ids); /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
void __init setup_nr_cpu_ids(void) { nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; }

Contributors

PersonTokensPropCommitsCommitProp
Américo Wang22100.00%1100.00%
Total22100.00%1100.00%

/* Called by boot processor to activate the rest. */
void __init smp_init(void) { int num_nodes, num_cpus; unsigned int cpu; idle_threads_init(); cpuhp_threads_init(); pr_info("Bringing up secondary CPUs ...\n"); /* FIXME: This should be done in userspace --RR */ for_each_present_cpu(cpu) { if (num_online_cpus() >= setup_max_cpus) break; if (!cpu_online(cpu)) cpu_up(cpu); } num_nodes = num_online_nodes(); num_cpus = num_online_cpus(); pr_info("Brought up %d node%s, %d CPU%s\n", num_nodes, (num_nodes > 1 ? "s" : ""), num_cpus, (num_cpus > 1 ? "s" : "")); /* Any cleanup work */ smp_cpus_done(setup_max_cpus); }

Contributors

PersonTokensPropCommitsCommitProp
Michael Ellerman4847.52%240.00%
Américo Wang4746.53%120.00%
Thomas Gleixner32.97%120.00%
Suresh B. Siddha32.97%120.00%
Total101100.00%5100.00%

/* * Call a function on all processors. May be used during early boot while * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead * of local_irq_disable/enable(). */
int on_each_cpu(void (*func) (void *info), void *info, int wait) { unsigned long flags; int ret = 0; preempt_disable(); ret = smp_call_function(func, info, wait); local_irq_save(flags); func(info); local_irq_restore(flags); preempt_enable(); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Américo Wang5582.09%150.00%
Tejun Heo1217.91%150.00%
Total67100.00%2100.00%

EXPORT_SYMBOL(on_each_cpu); /** * on_each_cpu_mask(): Run a function on processors specified by * cpumask, which may include the local processor. * @mask: The set of cpus to run on (only runs on online subset). * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. * @wait: If true, wait (atomically) until function has completed * on other CPUs. * * If @wait is true, then returns once @func has returned. * * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. The * exception is that it may be used during early boot while * early_boot_irqs_disabled is set. */
void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, void *info, bool wait) { int cpu = get_cpu(); smp_call_function_many(mask, func, info, wait); if (cpumask_test_cpu(cpu, mask)) { unsigned long flags; local_irq_save(flags); func(info); local_irq_restore(flags); } put_cpu(); }

Contributors

PersonTokensPropCommitsCommitProp
Gilad Ben-Yossef5983.10%150.00%
David Daney1216.90%150.00%
Total71100.00%2100.00%

EXPORT_SYMBOL(on_each_cpu_mask); /* * on_each_cpu_cond(): Call a function on each processor for which * the supplied function cond_func returns true, optionally waiting * for all the required CPUs to finish. This may include the local * processor. * @cond_func: A callback function that is passed a cpu id and * the the info parameter. The function is called * with preemption disabled. The function should * return a blooean value indicating whether to IPI * the specified CPU. * @func: The function to run on all applicable CPUs. * This must be fast and non-blocking. * @info: An arbitrary pointer to pass to both functions. * @wait: If true, wait (atomically) until function has * completed on other CPUs. * @gfp_flags: GFP flags to use when allocating the cpumask * used internally by the function. * * The function might sleep if the GFP flags indicates a non * atomic allocation is allowed. * * Preemption is disabled to protect against CPUs going offline but not online. * CPUs going online during the call will not be seen or sent an IPI. * * You must not call this function with disabled interrupts or * from a hardware interrupt handler or from a bottom half handler. */
void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), smp_call_func_t func, void *info, bool wait, gfp_t gfp_flags) { cpumask_var_t cpus; int cpu, ret; might_sleep_if(gfpflags_allow_blocking(gfp_flags)); if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) { preempt_disable(); for_each_online_cpu(cpu) if (cond_func(cpu, info)) cpumask_set_cpu(cpu, cpus); on_each_cpu_mask(cpus, func, info, wait); preempt_enable(); free_cpumask_var(cpus); } else { /* * No free cpumask, bother. No matter, we'll * just have to IPI them one by one. */ preempt_disable(); for_each_online_cpu(cpu) if (cond_func(cpu, info)) { ret = smp_call_function_single(cpu, func, info, wait); WARN_ON_ONCE(ret); } preempt_enable(); } }

Contributors

PersonTokensPropCommitsCommitProp
Gilad Ben-Yossef14998.03%150.00%
Mel Gorman31.97%150.00%
Total152100.00%2100.00%

EXPORT_SYMBOL(on_each_cpu_cond);
static void do_nothing(void *unused) { }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner9100.00%1100.00%
Total9100.00%1100.00%

/** * kick_all_cpus_sync - Force all cpus out of idle * * Used to synchronize the update of pm_idle function pointer. It's * called after the pointer is updated and returns after the dummy * callback function has been executed on all cpus. The execution of * the function can only happen on the remote cpus after they have * left the idle function which had been called via pm_idle function * pointer. So it's guaranteed that nothing uses the previous pointer * anymore. */
void kick_all_cpus_sync(void) { /* Make sure the change is visible before we kick the cpus */ smp_mb(); smp_call_function(do_nothing, NULL, 1); }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner20100.00%1100.00%
Total20100.00%1100.00%

EXPORT_SYMBOL_GPL(kick_all_cpus_sync); /** * wake_up_all_idle_cpus - break all cpus out of idle * wake_up_all_idle_cpus try to break all cpus which is in idle state even * including idle polling cpus, for non-idle cpus, we will do nothing * for them. */
void wake_up_all_idle_cpus(void) { int cpu; preempt_disable(); for_each_online_cpu(cpu) { if (cpu == smp_processor_id()) continue; wake_up_if_idle(cpu); } preempt_enable(); }

Contributors

PersonTokensPropCommitsCommitProp
Chuansheng Liu35100.00%1100.00%
Total35100.00%1100.00%

EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus); /** * smp_call_on_cpu - Call a function on a specific cpu * * Used to call a function on a specific cpu and wait for it to return. * Optionally make sure the call is done on a specified physical cpu via vcpu * pinning in order to support virtualized environments. */ struct smp_call_on_cpu_struct { struct work_struct work; struct completion done; int (*func)(void *); void *data; int ret; int cpu; };
static void smp_call_on_cpu_callback(struct work_struct *work) { struct smp_call_on_cpu_struct *sscs; sscs = container_of(work, struct smp_call_on_cpu_struct, work); if (sscs->cpu >= 0) hypervisor_pin_vcpu(sscs->cpu); sscs->ret = sscs->func(sscs->data); if (sscs->cpu >= 0) hypervisor_pin_vcpu(-1); complete(&sscs->done); }

Contributors

PersonTokensPropCommitsCommitProp
Juergen Gross78100.00%1100.00%
Total78100.00%1100.00%


int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys) { struct smp_call_on_cpu_struct sscs = { .done = COMPLETION_INITIALIZER_ONSTACK(sscs.done), .func = func, .data = par, .cpu = phys ? cpu : -1, }; INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback); if (cpu >= nr_cpu_ids || !cpu_online(cpu)) return -ENXIO; queue_work_on(cpu, system_wq, &sscs.work); wait_for_completion(&sscs.done); return sscs.ret; }

Contributors

PersonTokensPropCommitsCommitProp
Juergen Gross10391.15%150.00%
Peter Zijlstra108.85%150.00%
Total113100.00%2100.00%

EXPORT_SYMBOL_GPL(smp_call_on_cpu);

Overall Contributors

PersonTokensPropCommitsCommitProp
Américo Wang27812.11%22.86%
Jens Axboe27211.85%22.86%
Juergen Gross2239.71%22.86%
Gilad Ben-Yossef2199.54%22.86%
Rusty Russell2008.71%34.29%
Linus Torvalds1476.40%34.29%
Peter Zijlstra1416.14%57.14%
Frédéric Weisbecker1365.92%68.57%
Srivatsa S. Bhat1064.62%22.86%
Shaohua Li903.92%11.43%
Aaron Lu662.87%11.43%
Michael Ellerman552.40%34.29%
Richard Weinberger451.96%11.43%
Chuansheng Liu441.92%11.43%
Thomas Gleixner381.66%22.86%
Ingo Molnar321.39%34.29%
Christoph Hellwig291.26%22.86%
Andrew Morton271.18%11.43%
Milton D. Miller II261.13%34.29%
Tejun Heo190.83%22.86%
Suresh B. Siddha180.78%22.86%
Huang Ying160.70%11.43%
David Daney130.57%11.43%
Jan Kara110.48%11.43%
Davidlohr Bueso A50.22%22.86%
David Howells40.17%11.43%
H. Peter Anvin40.17%11.43%
Christoph Lameter40.17%11.43%
Nicholas Piggin40.17%11.43%
Takao Indoh30.13%11.43%
David John30.13%11.43%
Chen Gang S30.13%11.43%
Mel Gorman30.13%11.43%
Sheng Yang30.13%11.43%
Akinobu Mita20.09%11.43%
Steven Rostedt20.09%11.43%
Alexey Dobriyan10.04%11.43%
Yinghai Lu10.04%11.43%
Paul Gortmaker10.04%11.43%
liguang10.04%11.43%
Xie XiuQi10.04%11.43%
Total2296100.00%70100.00%
Directory: kernel
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.