cregit-Linux how code gets into the kernel

Release 4.13 kernel/smp.c

Directory: kernel
/*
 * Generic helpers for smp ipi calls
 *
 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
 */


#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/irq_work.h>
#include <linux/rcupdate.h>
#include <linux/rculist.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/gfp.h>
#include <linux/smp.h>
#include <linux/cpu.h>
#include <linux/sched.h>
#include <linux/sched/idle.h>
#include <linux/hypervisor.h>

#include "smpboot.h"

enum {
	
CSD_FLAG_LOCK		= 0x01,
	
CSD_FLAG_SYNCHRONOUS	= 0x02,
};


struct call_function_data {
	
struct call_single_data	__percpu *csd;
	
cpumask_var_t		cpumask;
	
cpumask_var_t		cpumask_ipi;
};

static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);

static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);

static void flush_smp_call_function_queue(bool warn_cpu_offline);


int smpcfd_prepare_cpu(unsigned int cpu) { struct call_function_data *cfd = &per_cpu(cfd_data, cpu); if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, cpu_to_node(cpu))) return -ENOMEM; if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL, cpu_to_node(cpu))) { free_cpumask_var(cfd->cpumask); return -ENOMEM; } cfd->csd = alloc_percpu(struct call_single_data); if (!cfd->csd) { free_cpumask_var(cfd->cpumask); free_cpumask_var(cfd->cpumask_ipi); return -ENOMEM; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Aaron Lu3833.04%116.67%
Peter Zijlstra3732.17%116.67%
Shaohua Li3026.09%116.67%
Richard Weinberger76.09%116.67%
Akinobu Mita21.74%116.67%
Yinghai Lu10.87%116.67%
Total115100.00%6100.00%


int smpcfd_dead_cpu(unsigned int cpu) { struct call_function_data *cfd = &per_cpu(cfd_data, cpu); free_cpumask_var(cfd->cpumask); free_cpumask_var(cfd->cpumask_ipi); free_percpu(cfd->csd); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Richard Weinberger2554.35%125.00%
Shaohua Li715.22%125.00%
Aaron Lu715.22%125.00%
Peter Zijlstra715.22%125.00%
Total46100.00%4100.00%


int smpcfd_dying_cpu(unsigned int cpu) { /* * The IPIs for the smp-call-function callbacks queued by other * CPUs might arrive late, either due to hardware latencies or * because this CPU disabled interrupts (inside stop-machine) * before the IPIs were sent. So flush out any pending callbacks * explicitly (without waiting for the IPIs to arrive), to * ensure that the outgoing CPU doesn't go offline with work * still pending. */ flush_smp_call_function_queue(false); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Richard Weinberger1055.56%133.33%
Srivatsa S. Bhat527.78%133.33%
Peter Zijlstra316.67%133.33%
Total18100.00%3100.00%


void __init call_function_init(void) { int i; for_each_possible_cpu(i) init_llist_head(&per_cpu(call_single_queue, i)); smpcfd_prepare_cpu(smp_processor_id()); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe2165.62%120.00%
Peter Zijlstra39.38%120.00%
Takao Indoh39.38%120.00%
Richard Weinberger39.38%120.00%
Christoph Hellwig26.25%120.00%
Total32100.00%5100.00%

/* * csd_lock/csd_unlock used to serialize access to per-cpu csd resources * * For non-synchronous ipi calls the csd can still be in use by the * previous function call. For multi-cpu calls its even more interesting * as we'll have to ensure no other cpu is observing our csd. */
static __always_inline void csd_lock_wait(struct call_single_data *csd) { smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK)); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra1970.37%337.50%
Davidlohr Bueso A311.11%225.00%
Andrew Morton27.41%112.50%
Jens Axboe27.41%112.50%
Linus Torvalds13.70%112.50%
Total27100.00%8100.00%


static __always_inline void csd_lock(struct call_single_data *csd) { csd_lock_wait(csd); csd->flags |= CSD_FLAG_LOCK; /* * prevent CPU from reordering the above assignment * to ->flags with any subsequent assignments to other * fields of the specified call_single_data structure: */ smp_wmb(); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra2074.07%228.57%
Andrew Morton311.11%114.29%
Linus Torvalds13.70%114.29%
Ingo Molnar13.70%114.29%
Davidlohr Bueso A13.70%114.29%
liguang13.70%114.29%
Total27100.00%7100.00%


static __always_inline void csd_unlock(struct call_single_data *csd) { WARN_ON(!(csd->flags & CSD_FLAG_LOCK)); /* * ensure we're all done before releasing data: */ smp_store_release(&csd->flags, 0); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra2365.71%116.67%
Linus Torvalds617.14%116.67%
Andrew Morton38.57%116.67%
Davidlohr Bueso A12.86%116.67%
Jens Axboe12.86%116.67%
Ingo Molnar12.86%116.67%
Total35100.00%6100.00%

static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data); /* * Insert a previously allocated call_single_data element * for execution on the given CPU. data must already have * ->func, ->info, and ->flags set. */
static int generic_exec_single(int cpu, struct call_single_data *csd, smp_call_func_t func, void *info) { if (cpu == smp_processor_id()) { unsigned long flags; /* * We can unlock early even for the synchronous on-stack case, * since we're doing this from the same CPU.. */ csd_unlock(csd); local_irq_save(flags); func(info); local_irq_restore(flags); return 0; } if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) { csd_unlock(csd); return -ENXIO; } csd->func = func; csd->info = info; /* * The list addition should be visible before sending the IPI * handler locks the list to pull the entry off it because of * normal cache coherency rules implied by spinlocks. * * If IPIs can go out of order to the cache coherency protocol * in an architecture, sufficient synchronisation should be added * to arch code to make it appear to obey cache coherency WRT * locking and barrier primitives. Generic code isn't really * equipped to do the right thing... */ if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) arch_send_call_function_single_ipi(cpu); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Frédéric Weisbecker6855.28%112.50%
Jens Axboe2520.33%112.50%
Linus Torvalds1713.82%225.00%
Christoph Hellwig108.13%225.00%
Andrew Morton21.63%112.50%
Peter Zijlstra10.81%112.50%
Total123100.00%8100.00%

/** * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks * * Invoked by arch to handle an IPI for call function single. * Must be called with interrupts disabled. */
void generic_smp_call_function_single_interrupt(void) { flush_smp_call_function_queue(true); }

Contributors

PersonTokensPropCommitsCommitProp
Srivatsa S. Bhat650.00%150.00%
Jens Axboe650.00%150.00%
Total12100.00%2100.00%

/** * flush_smp_call_function_queue - Flush pending smp-call-function callbacks * * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an * offline CPU. Skip this check if set to 'false'. * * Flush any pending smp-call-function callbacks queued on this CPU. This is * invoked by the generic IPI handler, as well as by a CPU about to go offline, * to ensure that all pending IPI callbacks are run before it goes completely * offline. * * Loop through the call_single_queue and run all the queued callbacks. * Must be called with interrupts disabled. */
static void flush_smp_call_function_queue(bool warn_cpu_offline) { struct llist_head *head; struct llist_node *entry; struct call_single_data *csd, *csd_next; static bool warned; WARN_ON(!irqs_disabled()); head = this_cpu_ptr(&call_single_queue); entry = llist_del_all(head); entry = llist_reverse_order(entry); /* There shouldn't be any pending callbacks on an offline CPU. */ if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) && !warned && !llist_empty(head))) { warned = true; WARN(1, "IPI on offline CPU %d\n", smp_processor_id()); /* * We don't have to use the _safe() variant here * because we are not invoking the IPI handlers yet. */ llist_for_each_entry(csd, entry, llist) pr_warn("IPI callback %pS sent to offline CPU\n", csd->func); } llist_for_each_entry_safe(csd, csd_next, entry, llist) { smp_call_func_t func = csd->func; void *info = csd->info; /* Do we wait until *after* callback? */ if (csd->flags & CSD_FLAG_SYNCHRONOUS) { func(info); csd_unlock(csd); } else { csd_unlock(csd); func(info); } } /* * Handle irq works queued remotely by irq_work_queue_on(). * Smp functions above are typically synchronous so they * better run first since some other CPUs may be busy waiting * for them. */ irq_work_run(); }

Contributors

PersonTokensPropCommitsCommitProp
Srivatsa S. Bhat8948.63%220.00%
Linus Torvalds3619.67%110.00%
Jens Axboe2513.66%110.00%
Jan Kara137.10%110.00%
Christoph Hellwig105.46%110.00%
Frédéric Weisbecker42.19%110.00%
Andrew Morton31.64%110.00%
Christoph Lameter21.09%110.00%
Peter Zijlstra10.55%110.00%
Total183100.00%10100.00%

/* * smp_call_function_single - Run a function on a specific CPU * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. * @wait: If true, wait until function has completed on other CPUs. * * Returns 0 on success, else a negative status code. */
int smp_call_function_single(int cpu, smp_call_func_t func, void *info, int wait) { struct call_single_data *csd; struct call_single_data csd_stack = { .flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS }; int this_cpu; int err; /* * prevent preemption and reschedule on another processor, * as well as CPU removal */ this_cpu = get_cpu(); /* * Can deadlock when called with interrupts disabled. * We allow cpu's that are not yet online though, as no one else can * send smp call function interrupt to this cpu and as such deadlocks * can't happen. */ WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() && !oops_in_progress); csd = &csd_stack; if (!wait) { csd = this_cpu_ptr(&csd_data); csd_lock(csd); } err = generic_exec_single(cpu, csd, func, info); if (wait) csd_lock_wait(csd); put_cpu(); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds5244.83%111.11%
Jens Axboe3631.03%111.11%
Ingo Molnar119.48%222.22%
Suresh B. Siddha65.17%111.11%
Frédéric Weisbecker54.31%111.11%
H. Peter Anvin43.45%111.11%
David Howells10.86%111.11%
Peter Zijlstra10.86%111.11%
Total116100.00%9100.00%

EXPORT_SYMBOL(smp_call_function_single); /** * smp_call_function_single_async(): Run an asynchronous function on a * specific CPU. * @cpu: The CPU to run on. * @csd: Pre-allocated and setup data structure * * Like smp_call_function_single(), but the call is asynchonous and * can thus be done from contexts with disabled interrupts. * * The caller passes his own pre-allocated data structure * (ie: embedded in an object) and is responsible for synchronizing it * such that the IPIs performed on the @csd are strictly serialized. * * NOTE: Be careful, there is unfortunately no current debugging facility to * validate the correctness of this serialization. */
int smp_call_function_single_async(int cpu, struct call_single_data *csd) { int err = 0; preempt_disable(); /* We could deadlock if we have to wait here with interrupts disabled! */ if (WARN_ON_ONCE(csd->flags & CSD_FLAG_LOCK)) csd_lock_wait(csd); csd->flags = CSD_FLAG_LOCK; smp_wmb(); err = generic_exec_single(cpu, csd, csd->func, csd->info); preempt_enable(); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Frédéric Weisbecker4462.86%375.00%
Linus Torvalds2637.14%125.00%
Total70100.00%4100.00%

EXPORT_SYMBOL_GPL(smp_call_function_single_async); /* * smp_call_function_any - Run a function on any of the given cpus * @mask: The mask of cpus it can run on. * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. * @wait: If true, wait until function has completed. * * Returns 0 on success, else a negative status code (if no cpus were online). * * Selection preference: * 1) current cpu if in @mask * 2) any cpu of current node if in @mask * 3) any other online cpu in @mask */
int smp_call_function_any(const struct cpumask *mask, smp_call_func_t func, void *info, int wait) { unsigned int cpu; const struct cpumask *nodemask; int ret; /* Try for same CPU (cheapest) */ cpu = get_cpu(); if (cpumask_test_cpu(cpu, mask)) goto call; /* Try for same node. */ nodemask = cpumask_of_node(cpu_to_node(cpu)); for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids; cpu = cpumask_next_and(cpu, nodemask, mask)) { if (cpu_online(cpu)) goto call; } /* Any online will do: smp_call_function_single handles nr_cpu_ids. */ cpu = cpumask_any_and(mask, cpu_online_mask); call: ret = smp_call_function_single(cpu, func, info, wait); put_cpu(); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Rusty Russell12896.97%133.33%
David John32.27%133.33%
David Howells10.76%133.33%
Total132100.00%3100.00%

EXPORT_SYMBOL_GPL(smp_call_function_any); /** * smp_call_function_many(): Run a function on a set of other CPUs. * @mask: The set of cpus to run on (only runs on online subset). * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. * @wait: If true, wait (atomically) until function has completed * on other CPUs. * * If @wait is true, then returns once @func has returned. * * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. Preemption * must be disabled when calling this function. */
void smp_call_function_many(const struct cpumask *mask, smp_call_func_t func, void *info, bool wait) { struct call_function_data *cfd; int cpu, next_cpu, this_cpu = smp_processor_id(); /* * Can deadlock when called with interrupts disabled. * We allow cpu's that are not yet online though, as no one else can * send smp call function interrupt to this cpu and as such deadlocks * can't happen. */ WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() && !oops_in_progress && !early_boot_irqs_disabled); /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */ cpu = cpumask_first_and(mask, cpu_online_mask); if (cpu == this_cpu) cpu = cpumask_next_and(cpu, mask, cpu_online_mask); /* No online cpus? We're done. */ if (cpu >= nr_cpu_ids) return; /* Do we have another CPU which isn't us? */ next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask); if (next_cpu == this_cpu) next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask); /* Fastpath: do that cpu by itself. */ if (next_cpu >= nr_cpu_ids) { smp_call_function_single(cpu, func, info, wait); return; } cfd = this_cpu_ptr(&cfd_data); cpumask_and(cfd->cpumask, mask, cpu_online_mask); __cpumask_clear_cpu(this_cpu, cfd->cpumask); /* Some callers race with other cpus changing the passed mask */ if (unlikely(!cpumask_weight(cfd->cpumask))) return; cpumask_clear(cfd->cpumask_ipi); for_each_cpu(cpu, cfd->cpumask) { struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu); csd_lock(csd); if (wait) csd->flags |= CSD_FLAG_SYNCHRONOUS; csd->func = func; csd->info = info; if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) __cpumask_set_cpu(cpu, cfd->cpumask_ipi); } /* Send a message to all CPUs in the map */ arch_send_call_function_ipi_mask(cfd->cpumask_ipi); if (wait) { for_each_cpu(cpu, cfd->cpumask) { struct call_single_data *csd; csd = per_cpu_ptr(cfd->csd, cpu); csd_lock_wait(csd); } } }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe8928.90%15.26%
Rusty Russell6220.13%210.53%
Shaohua Li6019.48%15.26%
Aaron Lu185.84%15.26%
Milton D. Miller II154.87%210.53%
Andrew Morton144.55%15.26%
Peter Zijlstra103.25%210.53%
Linus Torvalds103.25%15.26%
Ingo Molnar82.60%210.53%
Christoph Hellwig61.95%15.26%
Suresh B. Siddha61.95%15.26%
Nicholas Piggin41.30%15.26%
Tejun Heo30.97%15.26%
Christoph Lameter20.65%15.26%
David Howells10.32%15.26%
Total308100.00%19100.00%

EXPORT_SYMBOL(smp_call_function_many); /** * smp_call_function(): Run a function on all other CPUs. * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. * @wait: If true, wait (atomically) until function has completed * on other CPUs. * * Returns 0. * * If @wait is true, then returns once @func has returned; otherwise * it returns just before the target cpu calls @func. * * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. */
int smp_call_function(smp_call_func_t func, void *info, int wait) { preempt_disable(); smp_call_function_many(cpu_online_mask, func, info, wait); preempt_enable(); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe3188.57%133.33%
Rusty Russell38.57%133.33%
David Howells12.86%133.33%
Total35100.00%3100.00%

EXPORT_SYMBOL(smp_call_function); /* Setup configured maximum number of CPUs to activate */ unsigned int setup_max_cpus = NR_CPUS; EXPORT_SYMBOL(setup_max_cpus); /* * Setup routine for controlling SMP activation * * Command-line option of "nosmp" or "maxcpus=0" will disable SMP * activation entirely (the MPS table probe still happens, though). * * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer * greater than 0, limits the maximum number of CPUs activated in * SMP mode to <NUM>. */
void __weak arch_disable_smp_support(void) { }

Contributors

PersonTokensPropCommitsCommitProp
Américo Wang7100.00%1100.00%
Total7100.00%1100.00%


static int __init nosmp(char *str) { setup_max_cpus = 0; arch_disable_smp_support(); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Américo Wang21100.00%1100.00%
Total21100.00%1100.00%

early_param("nosmp", nosmp); /* this is hard limit */
static int __init nrcpus(char *str) { int nr_cpus; get_option(&str, &nr_cpus); if (nr_cpus > 0 && nr_cpus < nr_cpu_ids) nr_cpu_ids = nr_cpus; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Américo Wang40100.00%1100.00%
Total40100.00%1100.00%

early_param("nr_cpus", nrcpus);
static int __init maxcpus(char *str) { get_option(&str, &setup_max_cpus); if (setup_max_cpus == 0) arch_disable_smp_support(); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Américo Wang32100.00%1100.00%
Total32100.00%1100.00%

early_param("maxcpus", maxcpus); /* Setup number of possible processor ids */ int nr_cpu_ids __read_mostly = NR_CPUS; EXPORT_SYMBOL(nr_cpu_ids); /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
void __init setup_nr_cpu_ids(void) { nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; }

Contributors

PersonTokensPropCommitsCommitProp
Américo Wang22100.00%1100.00%
Total22100.00%1100.00%

/* Called by boot processor to activate the rest. */
void __init smp_init(void) { int num_nodes, num_cpus; unsigned int cpu; idle_threads_init(); cpuhp_threads_init(); pr_info("Bringing up secondary CPUs ...\n"); /* FIXME: This should be done in userspace --RR */ for_each_present_cpu(cpu) { if (num_online_cpus() >= setup_max_cpus) break; if (!cpu_online(cpu)) cpu_up(cpu); } num_nodes = num_online_nodes(); num_cpus = num_online_cpus(); pr_info("Brought up %d node%s, %d CPU%s\n", num_nodes, (num_nodes > 1 ? "s" : ""), num_cpus, (num_cpus > 1 ? "s" : "")); /* Any cleanup work */ smp_cpus_done(setup_max_cpus); }

Contributors

PersonTokensPropCommitsCommitProp
Michael Ellerman4847.52%240.00%
Américo Wang4746.53%120.00%
Thomas Gleixner32.97%120.00%
Suresh B. Siddha32.97%120.00%
Total101100.00%5100.00%

/* * Call a function on all processors. May be used during early boot while * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead * of local_irq_disable/enable(). */
int on_each_cpu(void (*func) (void *info), void *info, int wait) { unsigned long flags; int ret = 0; preempt_disable(); ret = smp_call_function(func, info, wait); local_irq_save(flags); func(info); local_irq_restore(flags); preempt_enable(); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Américo Wang5582.09%150.00%
Tejun Heo1217.91%150.00%
Total67100.00%2100.00%

EXPORT_SYMBOL(on_each_cpu); /** * on_each_cpu_mask(): Run a function on processors specified by * cpumask, which may include the local processor. * @mask: The set of cpus to run on (only runs on online subset). * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. * @wait: If true, wait (atomically) until function has completed * on other CPUs. * * If @wait is true, then returns once @func has returned. * * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. The * exception is that it may be used during early boot while * early_boot_irqs_disabled is set. */
void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, void *info, bool wait) { int cpu = get_cpu(); smp_call_function_many(mask, func, info, wait); if (cpumask_test_cpu(cpu, mask)) { unsigned long flags; local_irq_save(flags); func(info); local_irq_restore(flags); } put_cpu(); }

Contributors

PersonTokensPropCommitsCommitProp
Gilad Ben-Yossef5983.10%150.00%
David Daney1216.90%150.00%
Total71100.00%2100.00%

EXPORT_SYMBOL(on_each_cpu_mask); /* * on_each_cpu_cond(): Call a function on each processor for which * the supplied function cond_func returns true, optionally waiting * for all the required CPUs to finish. This may include the local * processor. * @cond_func: A callback function that is passed a cpu id and * the the info parameter. The function is called * with preemption disabled. The function should * return a blooean value indicating whether to IPI * the specified CPU. * @func: The function to run on all applicable CPUs. * This must be fast and non-blocking. * @info: An arbitrary pointer to pass to both functions. * @wait: If true, wait (atomically) until function has * completed on other CPUs. * @gfp_flags: GFP flags to use when allocating the cpumask * used internally by the function. * * The function might sleep if the GFP flags indicates a non * atomic allocation is allowed. * * Preemption is disabled to protect against CPUs going offline but not online. * CPUs going online during the call will not be seen or sent an IPI. * * You must not call this function with disabled interrupts or * from a hardware interrupt handler or from a bottom half handler. */
void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), smp_call_func_t func, void *info, bool wait, gfp_t gfp_flags) { cpumask_var_t cpus; int cpu, ret; might_sleep_if(gfpflags_allow_blocking(gfp_flags)); if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) { preempt_disable(); for_each_online_cpu(cpu) if (cond_func(cpu, info)) cpumask_set_cpu(cpu, cpus); on_each_cpu_mask(cpus, func, info, wait); preempt_enable(); free_cpumask_var(cpus); } else { /* * No free cpumask, bother. No matter, we'll * just have to IPI them one by one. */ preempt_disable(); for_each_online_cpu(cpu) if (cond_func(cpu, info)) { ret = smp_call_function_single(cpu, func, info, wait); WARN_ON_ONCE(ret); } preempt_enable(); } }

Contributors

PersonTokensPropCommitsCommitProp
Gilad Ben-Yossef14998.03%150.00%
Mel Gorman31.97%150.00%
Total152100.00%2100.00%

EXPORT_SYMBOL(on_each_cpu_cond);
static void do_nothing(void *unused) { }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner9100.00%1100.00%
Total9100.00%1100.00%

/** * kick_all_cpus_sync - Force all cpus out of idle * * Used to synchronize the update of pm_idle function pointer. It's * called after the pointer is updated and returns after the dummy * callback function has been executed on all cpus. The execution of * the function can only happen on the remote cpus after they have * left the idle function which had been called via pm_idle function * pointer. So it's guaranteed that nothing uses the previous pointer * anymore. */
void kick_all_cpus_sync(void) { /* Make sure the change is visible before we kick the cpus */ smp_mb(); smp_call_function(do_nothing, NULL, 1); }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner20100.00%1100.00%
Total20100.00%1100.00%

EXPORT_SYMBOL_GPL(kick_all_cpus_sync); /** * wake_up_all_idle_cpus - break all cpus out of idle * wake_up_all_idle_cpus try to break all cpus which is in idle state even * including idle polling cpus, for non-idle cpus, we will do nothing * for them. */
void wake_up_all_idle_cpus(void) { int cpu; preempt_disable(); for_each_online_cpu(cpu) { if (cpu == smp_processor_id()) continue; wake_up_if_idle(cpu); } preempt_enable(); }

Contributors

PersonTokensPropCommitsCommitProp
Chuansheng Liu35100.00%1100.00%
Total35100.00%1100.00%

EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus); /** * smp_call_on_cpu - Call a function on a specific cpu * * Used to call a function on a specific cpu and wait for it to return. * Optionally make sure the call is done on a specified physical cpu via vcpu * pinning in order to support virtualized environments. */ struct smp_call_on_cpu_struct { struct work_struct work; struct completion done; int (*func)(void *); void *data; int ret; int cpu; };
static void smp_call_on_cpu_callback(struct work_struct *work) { struct smp_call_on_cpu_struct *sscs; sscs = container_of(work, struct smp_call_on_cpu_struct, work); if (sscs->cpu >= 0) hypervisor_pin_vcpu(sscs->cpu); sscs->ret = sscs->func(sscs->data); if (sscs->cpu >= 0) hypervisor_pin_vcpu(-1); complete(&sscs->done); }

Contributors

PersonTokensPropCommitsCommitProp
Juergen Gross78100.00%1100.00%
Total78100.00%1100.00%


int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys) { struct smp_call_on_cpu_struct sscs = { .done = COMPLETION_INITIALIZER_ONSTACK(sscs.done), .func = func, .data = par, .cpu = phys ? cpu : -1, }; INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback); if (cpu >= nr_cpu_ids || !cpu_online(cpu)) return -ENXIO; queue_work_on(cpu, system_wq, &sscs.work); wait_for_completion(&sscs.done); return sscs.ret; }

Contributors

PersonTokensPropCommitsCommitProp
Juergen Gross10391.15%150.00%
Peter Zijlstra108.85%150.00%
Total113100.00%2100.00%

EXPORT_SYMBOL_GPL(smp_call_on_cpu);

Overall Contributors

PersonTokensPropCommitsCommitProp
Américo Wang27812.02%23.03%
Jens Axboe27711.98%23.03%
Juergen Gross2239.65%23.03%
Gilad Ben-Yossef2199.47%23.03%
Rusty Russell2008.65%34.55%
Linus Torvalds1526.57%34.55%
Peter Zijlstra1476.36%57.58%
Frédéric Weisbecker1396.01%57.58%
Srivatsa S. Bhat1104.76%23.03%
Shaohua Li994.28%11.52%
Aaron Lu662.85%11.52%
Michael Ellerman552.38%34.55%
Richard Weinberger451.95%11.52%
Chuansheng Liu441.90%11.52%
Thomas Gleixner381.64%23.03%
Ingo Molnar341.47%34.55%
Christoph Hellwig291.25%23.03%
Andrew Morton271.17%11.52%
Milton D. Miller II261.12%34.55%
Tejun Heo190.82%23.03%
Suresh B. Siddha180.78%23.03%
Jan Kara130.56%11.52%
David Daney130.56%11.52%
Davidlohr Bueso A50.22%23.03%
David Howells40.17%11.52%
Christoph Lameter40.17%11.52%
H. Peter Anvin40.17%11.52%
Nicholas Piggin40.17%11.52%
David John30.13%11.52%
Mel Gorman30.13%11.52%
Takao Indoh30.13%11.52%
Sheng Yang30.13%11.52%
Akinobu Mita20.09%11.52%
Steven Rostedt20.09%11.52%
Paul Gortmaker10.04%11.52%
liguang10.04%11.52%
Xie XiuQi10.04%11.52%
Yinghai Lu10.04%11.52%
Total2312100.00%66100.00%
Directory: kernel
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.