cregit-Linux how code gets into the kernel

Release 4.7 kernel/smp.c

Directory: kernel
/*
 * Generic helpers for smp ipi calls
 *
 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
 */
#include <linux/irq_work.h>
#include <linux/rcupdate.h>
#include <linux/rculist.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/gfp.h>
#include <linux/smp.h>
#include <linux/cpu.h>
#include <linux/sched.h>

#include "smpboot.h"

enum {
	
CSD_FLAG_LOCK		= 0x01,
	
CSD_FLAG_SYNCHRONOUS	= 0x02,
};


struct call_function_data {
	
struct call_single_data	__percpu *csd;
	
cpumask_var_t		cpumask;
};

static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);

static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);

static void flush_smp_call_function_queue(bool warn_cpu_offline);


static int hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) { long cpu = (long)hcpu; struct call_function_data *cfd = &per_cpu(cfd_data, cpu); switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, cpu_to_node(cpu))) return notifier_from_errno(-ENOMEM); cfd->csd = alloc_percpu(struct call_single_data); if (!cfd->csd) { free_cpumask_var(cfd->cpumask); return notifier_from_errno(-ENOMEM); } break; #ifdef CONFIG_HOTPLUG_CPU case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: /* Fall-through to the CPU_DEAD[_FROZEN] case. */ case CPU_DEAD: case CPU_DEAD_FROZEN: free_cpumask_var(cfd->cpumask); free_percpu(cfd->csd); break; case CPU_DYING: case CPU_DYING_FROZEN: /* * The IPIs for the smp-call-function callbacks queued by other * CPUs might arrive late, either due to hardware latencies or * because this CPU disabled interrupts (inside stop-machine) * before the IPIs were sent. So flush out any pending callbacks * explicitly (without waiting for the IPIs to arrive), to * ensure that the outgoing CPU doesn't go offline with work * still pending. */ flush_smp_call_function_queue(false); break; #endif }; return NOTIFY_OK; }

Contributors

PersonTokensPropCommitsCommitProp
peter zijlstrapeter zijlstra10062.11%116.67%
shaohua lishaohua li4024.84%116.67%
srivatsa s. bhatsrivatsa s. bhat148.70%116.67%
akinobu mitaakinobu mita53.11%116.67%
yinghai luyinghai lu10.62%116.67%
xiao guangrongxiao guangrong10.62%116.67%
Total161100.00%6100.00%

static struct notifier_block hotplug_cfd_notifier = { .notifier_call = hotplug_cfd, };
void __init call_function_init(void) { void *cpu = (void *)(long)smp_processor_id(); int i; for_each_possible_cpu(i) init_llist_head(&per_cpu(call_single_queue, i)); hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu); register_cpu_notifier(&hotplug_cfd_notifier); }

Contributors

PersonTokensPropCommitsCommitProp
peter zijlstrapeter zijlstra3053.57%125.00%
jens axboejens axboe2137.50%125.00%
takao indohtakao indoh35.36%125.00%
christoph hellwigchristoph hellwig23.57%125.00%
Total56100.00%4100.00%

/* * csd_lock/csd_unlock used to serialize access to per-cpu csd resources * * For non-synchronous ipi calls the csd can still be in use by the * previous function call. For multi-cpu calls its even more interesting * as we'll have to ensure no other cpu is observing our csd. */
static __always_inline void csd_lock_wait(struct call_single_data *csd) { smp_cond_acquire(!(csd->flags & CSD_FLAG_LOCK)); }

Contributors

PersonTokensPropCommitsCommitProp
peter zijlstrapeter zijlstra1354.17%228.57%
davidlohr buesodavidlohr bueso625.00%228.57%
jens axboejens axboe28.33%114.29%
andrew mortonandrew morton28.33%114.29%
linus torvaldslinus torvalds14.17%114.29%
Total24100.00%7100.00%


static __always_inline void csd_lock(struct call_single_data *csd) { csd_lock_wait(csd); csd->flags |= CSD_FLAG_LOCK; /* * prevent CPU from reordering the above assignment * to ->flags with any subsequent assignments to other * fields of the specified call_single_data structure: */ smp_wmb(); }

Contributors

PersonTokensPropCommitsCommitProp
peter zijlstrapeter zijlstra2074.07%228.57%
andrew mortonandrew morton311.11%114.29%
ingo molnaringo molnar13.70%114.29%
liguangliguang13.70%114.29%
linus torvaldslinus torvalds13.70%114.29%
davidlohr buesodavidlohr bueso13.70%114.29%
Total27100.00%7100.00%


static __always_inline void csd_unlock(struct call_single_data *csd) { WARN_ON(!(csd->flags & CSD_FLAG_LOCK)); /* * ensure we're all done before releasing data: */ smp_store_release(&csd->flags, 0); }

Contributors

PersonTokensPropCommitsCommitProp
peter zijlstrapeter zijlstra2365.71%116.67%
linus torvaldslinus torvalds617.14%116.67%
andrew mortonandrew morton38.57%116.67%
davidlohr buesodavidlohr bueso12.86%116.67%
jens axboejens axboe12.86%116.67%
ingo molnaringo molnar12.86%116.67%
Total35100.00%6100.00%

static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data); /* * Insert a previously allocated call_single_data element * for execution on the given CPU. data must already have * ->func, ->info, and ->flags set. */
static int generic_exec_single(int cpu, struct call_single_data *csd, smp_call_func_t func, void *info) { if (cpu == smp_processor_id()) { unsigned long flags; /* * We can unlock early even for the synchronous on-stack case, * since we're doing this from the same CPU.. */ csd_unlock(csd); local_irq_save(flags); func(info); local_irq_restore(flags); return 0; } if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) { csd_unlock(csd); return -ENXIO; } csd->func = func; csd->info = info; /* * The list addition should be visible before sending the IPI * handler locks the list to pull the entry off it because of * normal cache coherency rules implied by spinlocks. * * If IPIs can go out of order to the cache coherency protocol * in an architecture, sufficient synchronisation should be added * to arch code to make it appear to obey cache coherency WRT * locking and barrier primitives. Generic code isn't really * equipped to do the right thing... */ if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) arch_send_call_function_single_ipi(cpu); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
frederic weisbeckerfrederic weisbecker6855.28%112.50%
jens axboejens axboe2520.33%112.50%
linus torvaldslinus torvalds1713.82%225.00%
christoph hellwigchristoph hellwig108.13%225.00%
andrew mortonandrew morton21.63%112.50%
peter zijlstrapeter zijlstra10.81%112.50%
Total123100.00%8100.00%

/** * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks * * Invoked by arch to handle an IPI for call function single. * Must be called with interrupts disabled. */
void generic_smp_call_function_single_interrupt(void) { flush_smp_call_function_queue(true); }

Contributors

PersonTokensPropCommitsCommitProp
srivatsa s. bhatsrivatsa s. bhat650.00%150.00%
jens axboejens axboe650.00%150.00%
Total12100.00%2100.00%

/** * flush_smp_call_function_queue - Flush pending smp-call-function callbacks * * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an * offline CPU. Skip this check if set to 'false'. * * Flush any pending smp-call-function callbacks queued on this CPU. This is * invoked by the generic IPI handler, as well as by a CPU about to go offline, * to ensure that all pending IPI callbacks are run before it goes completely * offline. * * Loop through the call_single_queue and run all the queued callbacks. * Must be called with interrupts disabled. */
static void flush_smp_call_function_queue(bool warn_cpu_offline) { struct llist_head *head; struct llist_node *entry; struct call_single_data *csd, *csd_next; static bool warned; WARN_ON(!irqs_disabled()); head = this_cpu_ptr(&call_single_queue); entry = llist_del_all(head); entry = llist_reverse_order(entry); /* There shouldn't be any pending callbacks on an offline CPU. */ if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) && !warned && !llist_empty(head))) { warned = true; WARN(1, "IPI on offline CPU %d\n", smp_processor_id()); /* * We don't have to use the _safe() variant here * because we are not invoking the IPI handlers yet. */ llist_for_each_entry(csd, entry, llist) pr_warn("IPI callback %pS sent to offline CPU\n", csd->func); } llist_for_each_entry_safe(csd, csd_next, entry, llist) { smp_call_func_t func = csd->func; void *info = csd->info; /* Do we wait until *after* callback? */ if (csd->flags & CSD_FLAG_SYNCHRONOUS) { func(info); csd_unlock(csd); } else { csd_unlock(csd); func(info); } } /* * Handle irq works queued remotely by irq_work_queue_on(). * Smp functions above are typically synchronous so they * better run first since some other CPUs may be busy waiting * for them. */ irq_work_run(); }

Contributors

PersonTokensPropCommitsCommitProp
srivatsa s. bhatsrivatsa s. bhat8948.63%220.00%
linus torvaldslinus torvalds3619.67%110.00%
jens axboejens axboe2513.66%110.00%
jan karajan kara137.10%110.00%
christoph hellwigchristoph hellwig105.46%110.00%
frederic weisbeckerfrederic weisbecker42.19%110.00%
andrew mortonandrew morton31.64%110.00%
christoph lameterchristoph lameter21.09%110.00%
peter zijlstrapeter zijlstra10.55%110.00%
Total183100.00%10100.00%

/* * smp_call_function_single - Run a function on a specific CPU * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. * @wait: If true, wait until function has completed on other CPUs. * * Returns 0 on success, else a negative status code. */
int smp_call_function_single(int cpu, smp_call_func_t func, void *info, int wait) { struct call_single_data *csd; struct call_single_data csd_stack = { .flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS }; int this_cpu; int err; /* * prevent preemption and reschedule on another processor, * as well as CPU removal */ this_cpu = get_cpu(); /* * Can deadlock when called with interrupts disabled. * We allow cpu's that are not yet online though, as no one else can * send smp call function interrupt to this cpu and as such deadlocks * can't happen. */ WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() && !oops_in_progress); csd = &csd_stack; if (!wait) { csd = this_cpu_ptr(&csd_data); csd_lock(csd); } err = generic_exec_single(cpu, csd, func, info); if (wait) csd_lock_wait(csd); put_cpu(); return err; }

Contributors

PersonTokensPropCommitsCommitProp
linus torvaldslinus torvalds5244.83%111.11%
jens axboejens axboe3631.03%111.11%
ingo molnaringo molnar119.48%222.22%
suresh siddhasuresh siddha65.17%111.11%
frederic weisbeckerfrederic weisbecker54.31%111.11%
h. peter anvinh. peter anvin43.45%111.11%
peter zijlstrapeter zijlstra10.86%111.11%
david howellsdavid howells10.86%111.11%
Total116100.00%9100.00%

EXPORT_SYMBOL(smp_call_function_single); /** * smp_call_function_single_async(): Run an asynchronous function on a * specific CPU. * @cpu: The CPU to run on. * @csd: Pre-allocated and setup data structure * * Like smp_call_function_single(), but the call is asynchonous and * can thus be done from contexts with disabled interrupts. * * The caller passes his own pre-allocated data structure * (ie: embedded in an object) and is responsible for synchronizing it * such that the IPIs performed on the @csd are strictly serialized. * * NOTE: Be careful, there is unfortunately no current debugging facility to * validate the correctness of this serialization. */
int smp_call_function_single_async(int cpu, struct call_single_data *csd) { int err = 0; preempt_disable(); /* We could deadlock if we have to wait here with interrupts disabled! */ if (WARN_ON_ONCE(csd->flags & CSD_FLAG_LOCK)) csd_lock_wait(csd); csd->flags = CSD_FLAG_LOCK; smp_wmb(); err = generic_exec_single(cpu, csd, csd->func, csd->info); preempt_enable(); return err; }

Contributors

PersonTokensPropCommitsCommitProp
frederic weisbeckerfrederic weisbecker4462.86%375.00%
linus torvaldslinus torvalds2637.14%125.00%
Total70100.00%4100.00%

EXPORT_SYMBOL_GPL(smp_call_function_single_async); /* * smp_call_function_any - Run a function on any of the given cpus * @mask: The mask of cpus it can run on. * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. * @wait: If true, wait until function has completed. * * Returns 0 on success, else a negative status code (if no cpus were online). * * Selection preference: * 1) current cpu if in @mask * 2) any cpu of current node if in @mask * 3) any other online cpu in @mask */
int smp_call_function_any(const struct cpumask *mask, smp_call_func_t func, void *info, int wait) { unsigned int cpu; const struct cpumask *nodemask; int ret; /* Try for same CPU (cheapest) */ cpu = get_cpu(); if (cpumask_test_cpu(cpu, mask)) goto call; /* Try for same node. */ nodemask = cpumask_of_node(cpu_to_node(cpu)); for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids; cpu = cpumask_next_and(cpu, nodemask, mask)) { if (cpu_online(cpu)) goto call; } /* Any online will do: smp_call_function_single handles nr_cpu_ids. */ cpu = cpumask_any_and(mask, cpu_online_mask); call: ret = smp_call_function_single(cpu, func, info, wait); put_cpu(); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
rusty russellrusty russell12896.97%133.33%
david johndavid john32.27%133.33%
david howellsdavid howells10.76%133.33%
Total132100.00%3100.00%

EXPORT_SYMBOL_GPL(smp_call_function_any); /** * smp_call_function_many(): Run a function on a set of other CPUs. * @mask: The set of cpus to run on (only runs on online subset). * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. * @wait: If true, wait (atomically) until function has completed * on other CPUs. * * If @wait is true, then returns once @func has returned. * * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. Preemption * must be disabled when calling this function. */
void smp_call_function_many(const struct cpumask *mask, smp_call_func_t func, void *info, bool wait) { struct call_function_data *cfd; int cpu, next_cpu, this_cpu = smp_processor_id(); /* * Can deadlock when called with interrupts disabled. * We allow cpu's that are not yet online though, as no one else can * send smp call function interrupt to this cpu and as such deadlocks * can't happen. */ WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() && !oops_in_progress && !early_boot_irqs_disabled); /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */ cpu = cpumask_first_and(mask, cpu_online_mask); if (cpu == this_cpu) cpu = cpumask_next_and(cpu, mask, cpu_online_mask); /* No online cpus? We're done. */ if (cpu >= nr_cpu_ids) return; /* Do we have another CPU which isn't us? */ next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask); if (next_cpu == this_cpu) next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask); /* Fastpath: do that cpu by itself. */ if (next_cpu >= nr_cpu_ids) { smp_call_function_single(cpu, func, info, wait); return; } cfd = this_cpu_ptr(&cfd_data); cpumask_and(cfd->cpumask, mask, cpu_online_mask); cpumask_clear_cpu(this_cpu, cfd->cpumask); /* Some callers race with other cpus changing the passed mask */ if (unlikely(!cpumask_weight(cfd->cpumask))) return; for_each_cpu(cpu, cfd->cpumask) { struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu); csd_lock(csd); if (wait) csd->flags |= CSD_FLAG_SYNCHRONOUS; csd->func = func; csd->info = info; llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)); } /* Send a message to all CPUs in the map */ arch_send_call_function_ipi_mask(cfd->cpumask); if (wait) { for_each_cpu(cpu, cfd->cpumask) { struct call_single_data *csd; csd = per_cpu_ptr(cfd->csd, cpu); csd_lock_wait(csd); } } }

Contributors

PersonTokensPropCommitsCommitProp
jens axboejens axboe8930.69%15.56%
rusty russellrusty russell6321.72%211.11%
shaohua lishaohua li6020.69%15.56%
milton d. millermilton d. miller155.17%211.11%
andrew mortonandrew morton144.83%15.56%
linus torvaldslinus torvalds103.45%15.56%
peter zijlstrapeter zijlstra82.76%15.56%
ingo molnaringo molnar82.76%211.11%
christoph hellwigchristoph hellwig62.07%15.56%
suresh siddhasuresh siddha62.07%15.56%
nick pigginnick piggin41.38%15.56%
tejun heotejun heo31.03%15.56%
christoph lameterchristoph lameter20.69%15.56%
david howellsdavid howells10.34%15.56%
roman gushchinroman gushchin10.34%15.56%
Total290100.00%18100.00%

EXPORT_SYMBOL(smp_call_function_many); /** * smp_call_function(): Run a function on all other CPUs. * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. * @wait: If true, wait (atomically) until function has completed * on other CPUs. * * Returns 0. * * If @wait is true, then returns once @func has returned; otherwise * it returns just before the target cpu calls @func. * * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. */
int smp_call_function(smp_call_func_t func, void *info, int wait) { preempt_disable(); smp_call_function_many(cpu_online_mask, func, info, wait); preempt_enable(); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
jens axboejens axboe3188.57%133.33%
rusty russellrusty russell38.57%133.33%
david howellsdavid howells12.86%133.33%
Total35100.00%3100.00%

EXPORT_SYMBOL(smp_call_function); /* Setup configured maximum number of CPUs to activate */ unsigned int setup_max_cpus = NR_CPUS; EXPORT_SYMBOL(setup_max_cpus); /* * Setup routine for controlling SMP activation * * Command-line option of "nosmp" or "maxcpus=0" will disable SMP * activation entirely (the MPS table probe still happens, though). * * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer * greater than 0, limits the maximum number of CPUs activated in * SMP mode to <NUM>. */
void __weak arch_disable_smp_support(void) { }

Contributors

PersonTokensPropCommitsCommitProp
americo wangamerico wang7100.00%1100.00%
Total7100.00%1100.00%


static int __init nosmp(char *str) { setup_max_cpus = 0; arch_disable_smp_support(); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
americo wangamerico wang21100.00%1100.00%
Total21100.00%1100.00%

early_param("nosmp", nosmp); /* this is hard limit */
static int __init nrcpus(char *str) { int nr_cpus; get_option(&str, &nr_cpus); if (nr_cpus > 0 && nr_cpus < nr_cpu_ids) nr_cpu_ids = nr_cpus; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
americo wangamerico wang40100.00%1100.00%
Total40100.00%1100.00%

early_param("nr_cpus", nrcpus);
static int __init maxcpus(char *str) { get_option(&str, &setup_max_cpus); if (setup_max_cpus == 0) arch_disable_smp_support(); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
americo wangamerico wang32100.00%1100.00%
Total32100.00%1100.00%

early_param("maxcpus", maxcpus); /* Setup number of possible processor ids */ int nr_cpu_ids __read_mostly = NR_CPUS; EXPORT_SYMBOL(nr_cpu_ids); /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
void __init setup_nr_cpu_ids(void) { nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; }

Contributors

PersonTokensPropCommitsCommitProp
americo wangamerico wang22100.00%1100.00%
Total22100.00%1100.00%


void __weak smp_announce(void) { printk(KERN_INFO "Brought up %d CPUs\n", num_online_cpus()); }

Contributors

PersonTokensPropCommitsCommitProp
borislav petkovborislav petkov17100.00%1100.00%
Total17100.00%1100.00%

/* Called by boot processor to activate the rest. */
void __init smp_init(void) { unsigned int cpu; idle_threads_init(); cpuhp_threads_init(); /* FIXME: This should be done in userspace --RR */ for_each_present_cpu(cpu) { if (num_online_cpus() >= setup_max_cpus) break; if (!cpu_online(cpu)) cpu_up(cpu); } /* Any cleanup work */ smp_announce(); smp_cpus_done(setup_max_cpus); }

Contributors

PersonTokensPropCommitsCommitProp
americo wangamerico wang4887.27%125.00%
suresh siddhasuresh siddha35.45%125.00%
thomas gleixnerthomas gleixner35.45%125.00%
borislav petkovborislav petkov11.82%125.00%
Total55100.00%4100.00%

/* * Call a function on all processors. May be used during early boot while * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead * of local_irq_disable/enable(). */
int on_each_cpu(void (*func) (void *info), void *info, int wait) { unsigned long flags; int ret = 0; preempt_disable(); ret = smp_call_function(func, info, wait); local_irq_save(flags); func(info); local_irq_restore(flags); preempt_enable(); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
americo wangamerico wang5582.09%150.00%
tejun heotejun heo1217.91%150.00%
Total67100.00%2100.00%

EXPORT_SYMBOL(on_each_cpu); /** * on_each_cpu_mask(): Run a function on processors specified by * cpumask, which may include the local processor. * @mask: The set of cpus to run on (only runs on online subset). * @func: The function to run. This must be fast and non-blocking. * @info: An arbitrary pointer to pass to the function. * @wait: If true, wait (atomically) until function has completed * on other CPUs. * * If @wait is true, then returns once @func has returned. * * You must not call this function with disabled interrupts or from a * hardware interrupt handler or from a bottom half handler. The * exception is that it may be used during early boot while * early_boot_irqs_disabled is set. */
void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, void *info, bool wait) { int cpu = get_cpu(); smp_call_function_many(mask, func, info, wait); if (cpumask_test_cpu(cpu, mask)) { unsigned long flags; local_irq_save(flags); func(info); local_irq_restore(flags); } put_cpu(); }

Contributors

PersonTokensPropCommitsCommitProp
gilad ben-yossefgilad ben-yossef5983.10%150.00%
david daneydavid daney1216.90%150.00%
Total71100.00%2100.00%

EXPORT_SYMBOL(on_each_cpu_mask); /* * on_each_cpu_cond(): Call a function on each processor for which * the supplied function cond_func returns true, optionally waiting * for all the required CPUs to finish. This may include the local * processor. * @cond_func: A callback function that is passed a cpu id and * the the info parameter. The function is called * with preemption disabled. The function should * return a blooean value indicating whether to IPI * the specified CPU. * @func: The function to run on all applicable CPUs. * This must be fast and non-blocking. * @info: An arbitrary pointer to pass to both functions. * @wait: If true, wait (atomically) until function has * completed on other CPUs. * @gfp_flags: GFP flags to use when allocating the cpumask * used internally by the function. * * The function might sleep if the GFP flags indicates a non * atomic allocation is allowed. * * Preemption is disabled to protect against CPUs going offline but not online. * CPUs going online during the call will not be seen or sent an IPI. * * You must not call this function with disabled interrupts or * from a hardware interrupt handler or from a bottom half handler. */
void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), smp_call_func_t func, void *info, bool wait, gfp_t gfp_flags) { cpumask_var_t cpus; int cpu, ret; might_sleep_if(gfpflags_allow_blocking(gfp_flags)); if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) { preempt_disable(); for_each_online_cpu(cpu) if (cond_func(cpu, info)) cpumask_set_cpu(cpu, cpus); on_each_cpu_mask(cpus, func, info, wait); preempt_enable(); free_cpumask_var(cpus); } else { /* * No free cpumask, bother. No matter, we'll * just have to IPI them one by one. */ preempt_disable(); for_each_online_cpu(cpu) if (cond_func(cpu, info)) { ret = smp_call_function_single(cpu, func, info, wait); WARN_ON_ONCE(ret); } preempt_enable(); } }

Contributors

PersonTokensPropCommitsCommitProp
gilad ben-yossefgilad ben-yossef14998.03%150.00%
mel gormanmel gorman31.97%150.00%
Total152100.00%2100.00%

EXPORT_SYMBOL(on_each_cpu_cond);
static void do_nothing(void *unused) { }

Contributors

PersonTokensPropCommitsCommitProp
thomas gleixnerthomas gleixner9100.00%1100.00%
Total9100.00%1100.00%

/** * kick_all_cpus_sync - Force all cpus out of idle * * Used to synchronize the update of pm_idle function pointer. It's * called after the pointer is updated and returns after the dummy * callback function has been executed on all cpus. The execution of * the function can only happen on the remote cpus after they have * left the idle function which had been called via pm_idle function * pointer. So it's guaranteed that nothing uses the previous pointer * anymore. */
void kick_all_cpus_sync(void) { /* Make sure the change is visible before we kick the cpus */ smp_mb(); smp_call_function(do_nothing, NULL, 1); }

Contributors

PersonTokensPropCommitsCommitProp
thomas gleixnerthomas gleixner20100.00%1100.00%
Total20100.00%1100.00%

EXPORT_SYMBOL_GPL(kick_all_cpus_sync); /** * wake_up_all_idle_cpus - break all cpus out of idle * wake_up_all_idle_cpus try to break all cpus which is in idle state even * including idle polling cpus, for non-idle cpus, we will do nothing * for them. */
void wake_up_all_idle_cpus(void) { int cpu; preempt_disable(); for_each_online_cpu(cpu) { if (cpu == smp_processor_id()) continue; wake_up_if_idle(cpu); } preempt_enable(); }

Contributors

PersonTokensPropCommitsCommitProp
liu chuanshengliu chuansheng35100.00%1100.00%
Total35100.00%1100.00%

EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);

Overall Contributors

PersonTokensPropCommitsCommitProp
americo wangamerico wang27913.72%23.45%
jens axboejens axboe27713.62%23.45%
peter zijlstrapeter zijlstra22110.87%23.45%
gilad ben-yossefgilad ben-yossef21910.77%23.45%
rusty russellrusty russell2019.88%35.17%
linus torvaldslinus torvalds1527.47%35.17%
frederic weisbeckerfrederic weisbecker1396.83%58.62%
srivatsa s. bhatsrivatsa s. bhat1195.85%23.45%
shaohua lishaohua li1025.01%11.72%
liu chuanshengliu chuansheng442.16%11.72%
thomas gleixnerthomas gleixner381.87%23.45%
ingo molnaringo molnar311.52%23.45%
christoph hellwigchristoph hellwig291.43%23.45%
andrew mortonandrew morton271.33%11.72%
milton d. millermilton d. miller261.28%35.17%
tejun heotejun heo190.93%23.45%
borislav petkovborislav petkov180.88%11.72%
suresh siddhasuresh siddha180.88%23.45%
david daneydavid daney130.64%11.72%
jan karajan kara130.64%11.72%
davidlohr buesodavidlohr bueso80.39%23.45%
akinobu mitaakinobu mita50.25%11.72%
h. peter anvinh. peter anvin40.20%11.72%
christoph lameterchristoph lameter40.20%11.72%
nick pigginnick piggin40.20%11.72%
david howellsdavid howells40.20%11.72%
sheng yangsheng yang30.15%11.72%
takao indohtakao indoh30.15%11.72%
david johndavid john30.15%11.72%
mel gormanmel gorman30.15%11.72%
steven rostedtsteven rostedt20.10%11.72%
roman gushchinroman gushchin10.05%11.72%
xiao guangrongxiao guangrong10.05%11.72%
yinghai luyinghai lu10.05%11.72%
liguangliguang10.05%11.72%
paul gortmakerpaul gortmaker10.05%11.72%
xie xiuqixie xiuqi10.05%11.72%
Total2034100.00%58100.00%
Directory: kernel
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
{% endraw %}