cregit-Linux how code gets into the kernel

Release 4.11 arch/arm/kernel/smp.c

Directory: arch/arm/kernel
/*
 *  linux/arch/arm/kernel/smp.c
 *
 *  Copyright (C) 2002 ARM Limited, All Rights Reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/sched/mm.h>
#include <linux/sched/hotplug.h>
#include <linux/sched/task_stack.h>
#include <linux/interrupt.h>
#include <linux/cache.h>
#include <linux/profile.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/err.h>
#include <linux/cpu.h>
#include <linux/seq_file.h>
#include <linux/irq.h>
#include <linux/nmi.h>
#include <linux/percpu.h>
#include <linux/clockchips.h>
#include <linux/completion.h>
#include <linux/cpufreq.h>
#include <linux/irq_work.h>

#include <linux/atomic.h>
#include <asm/smp.h>
#include <asm/cacheflush.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/exception.h>
#include <asm/idmap.h>
#include <asm/topology.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/sections.h>
#include <asm/tlbflush.h>
#include <asm/ptrace.h>
#include <asm/smp_plat.h>
#include <asm/virt.h>
#include <asm/mach/arch.h>
#include <asm/mpu.h>


#define CREATE_TRACE_POINTS
#include <trace/events/ipi.h>

/*
 * as from 2.5, kernels no longer have an init_tasks structure
 * so we need some other way of telling a new secondary core
 * where to place its SVC stack
 */

struct secondary_data secondary_data;

/*
 * control for which core is the next to come out of the secondary
 * boot "holding pen"
 */

volatile int pen_release = -1;


enum ipi_msg_type {
	
IPI_WAKEUP,
	
IPI_TIMER,
	
IPI_RESCHEDULE,
	
IPI_CALL_FUNC,
	
IPI_CPU_STOP,
	
IPI_IRQ_WORK,
	
IPI_COMPLETION,
	
IPI_CPU_BACKTRACE,
	/*
         * SGI8-15 can be reserved by secure firmware, and thus may
         * not be usable by the kernel. Please keep the above limited
         * to at most 8 entries.
         */
};

static DECLARE_COMPLETION(cpu_running);


static struct smp_operations smp_ops __ro_after_init;


void __init smp_set_ops(const struct smp_operations *ops) { if (ops) smp_ops = *ops; }

Contributors

PersonTokensPropCommitsCommitProp
Marc Zyngier2095.24%150.00%
Masahiro Yamada14.76%150.00%
Total21100.00%2100.00%

;
static unsigned long get_arch_pgd(pgd_t *pgd) { #ifdef CONFIG_ARM_LPAE return __phys_to_pfn(virt_to_phys(pgd)); #else return virt_to_phys(pgd); #endif }

Contributors

PersonTokensPropCommitsCommitProp
Cyril Chemparathy1751.52%150.00%
Russell King1648.48%150.00%
Total33100.00%2100.00%


int __cpu_up(unsigned int cpu, struct task_struct *idle) { int ret; if (!smp_ops.smp_boot_secondary) return -ENOSYS; /* * We need to tell the secondary core where to find * its stack and the page tables. */ secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; #ifdef CONFIG_ARM_MPU secondary_data.mpu_rgn_szr = mpu_rgn_info.rgns[MPU_RAM_REGION].drsr; #endif #ifdef CONFIG_MMU secondary_data.pgdir = virt_to_phys(idmap_pgd); secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir); #endif sync_cache_w(&secondary_data); /* * Now bring the CPU into our world. */ ret = smp_ops.smp_boot_secondary(cpu, idle); if (ret == 0) { /* * CPU was successfully started, wait for it * to come online or time out. */ wait_for_completion_timeout(&cpu_running, msecs_to_jiffies(1000)); if (!cpu_online(cpu)) { pr_crit("CPU%u: failed to come online\n", cpu); ret = -EIO; } } else { pr_err("CPU%u: failed to boot: %d\n", cpu, ret); } memset(&secondary_data, 0, sizeof(secondary_data)); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Russell King10763.31%847.06%
Jonathan Austin2816.57%15.88%
Geert Uytterhoeven148.28%15.88%
Catalin Marinas84.73%15.88%
Will Deacon63.55%211.76%
Al Viro31.78%15.88%
Nico Pitre10.59%15.88%
Thomas Gleixner10.59%15.88%
Cyril Chemparathy10.59%15.88%
Total169100.00%17100.00%

/* platform specific SMP operations */
void __init smp_init_cpus(void) { if (smp_ops.smp_init_cpus) smp_ops.smp_init_cpus(); }

Contributors

PersonTokensPropCommitsCommitProp
Marc Zyngier19100.00%2100.00%
Total19100.00%2100.00%


int platform_can_secondary_boot(void) { return !!smp_ops.smp_boot_secondary; }

Contributors

PersonTokensPropCommitsCommitProp
Geert Uytterhoeven14100.00%1100.00%
Total14100.00%1100.00%


int platform_can_cpu_hotplug(void) { #ifdef CONFIG_HOTPLUG_CPU if (smp_ops.cpu_kill) return 1; #endif return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Stephen Warren24100.00%1100.00%
Total24100.00%1100.00%

#ifdef CONFIG_HOTPLUG_CPU
static int platform_cpu_kill(unsigned int cpu) { if (smp_ops.cpu_kill) return smp_ops.cpu_kill(cpu); return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Marc Zyngier27100.00%2100.00%
Total27100.00%2100.00%


static int platform_cpu_disable(unsigned int cpu) { if (smp_ops.cpu_disable) return smp_ops.cpu_disable(cpu); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Marc Zyngier2488.89%266.67%
Stephen Boyd311.11%133.33%
Total27100.00%3100.00%


int platform_can_hotplug_cpu(unsigned int cpu) { /* cpu_die must be specified to support hotplug */ if (!smp_ops.cpu_die) return 0; if (smp_ops.cpu_can_disable) return smp_ops.cpu_can_disable(cpu); /* * By default, allow disabling all CPUs except the first one, * since this is special on a lot of platforms, e.g. because * of clock tick interrupts. */ return cpu != 0; }

Contributors

PersonTokensPropCommitsCommitProp
Stephen Boyd3587.50%150.00%
Marc Zyngier512.50%150.00%
Total40100.00%2100.00%

/* * __cpu_disable runs on the processor to be shutdown. */
int __cpu_disable(void) { unsigned int cpu = smp_processor_id(); int ret; ret = platform_cpu_disable(cpu); if (ret) return ret; /* * Take this CPU offline. Once we clear this, we can't return, * and we must not schedule until we're ready to give up the cpu. */ set_cpu_online(cpu, false); /* * OK - migrate IRQs away from this CPU */ migrate_irqs(); /* * Flush user cache and TLB mappings, and then remove this CPU * from the vm mask set of all processes. * * Caches are flushed to the Level of Unification Inner Shareable * to write-back dirty lines to unified caches shared by all CPUs. */ flush_cache_louis(); local_flush_tlb_all(); clear_tasks_mm_cpumask(cpu); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Russell King5594.83%360.00%
Lorenzo Pieralisi23.45%120.00%
Anton Vorontsov11.72%120.00%
Total58100.00%5100.00%

static DECLARE_COMPLETION(cpu_died); /* * called on the thread which is asking for a CPU to be shutdown - * waits until shutdown has completed, or it is timed out. */
void __cpu_die(unsigned int cpu) { if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) { pr_err("CPU%u: cpu didn't die\n", cpu); return; } pr_debug("CPU%u: shutdown\n", cpu); /* * platform_cpu_kill() is generally expected to do the powering off * and/or cutting of clocks to the dying CPU. Optionally, this may * be done by the CPU which is dying in preference to supporting * this call, but that means there is _no_ synchronisation between * the requesting CPU and the dying CPU actually losing power. */ if (!platform_cpu_kill(cpu)) pr_err("CPU%u: unable to kill\n", cpu); }

Contributors

PersonTokensPropCommitsCommitProp
Russell King5598.21%480.00%
Florian Fainelli11.79%120.00%
Total56100.00%5100.00%

/* * Called from the idle thread for the CPU which has been shutdown. * * Note that we disable IRQs here, but do not re-enable them * before returning to the caller. This is also the behaviour * of the other hotplug-cpu capable cores, so presumably coming * out of idle fixes this. */
void arch_cpu_idle_dead(void) { unsigned int cpu = smp_processor_id(); idle_task_exit(); local_irq_disable(); /* * Flush the data out of the L1 cache for this CPU. This must be * before the completion to ensure that data is safely written out * before platform_cpu_kill() gets called - which may disable * *this* CPU and power down its cache. */ flush_cache_louis(); /* * Tell __cpu_die() that this CPU is now safe to dispose of. Once * this returns, power and/or clocks can be removed at any point * from this CPU and its cache by platform_cpu_kill(). */ complete(&cpu_died); /* * Ensure that the cache lines associated with that completion are * written out. This covers the case where _this_ CPU is doing the * powering down, to ensure that the completion is visible to the * CPU waiting for this one. */ flush_cache_louis(); /* * The actual CPU shutdown procedure is at least platform (if not * CPU) specific. This may remove power, or it may simply spin. * * Platforms are generally expected *NOT* to return from this call, * although there are some which do because they have no way to * power down the CPU. These platforms are the _only_ reason we * have a return path which uses the fragment of assembly below. * * The return path should not be used for platforms which can * power off the CPU. */ if (smp_ops.cpu_die) smp_ops.cpu_die(cpu); pr_warn("CPU%u: smp_ops.cpu_die() returned, trying to resuscitate\n", cpu); /* * Do not return to the idle loop - jump back to the secondary * cpu initialisation. There's some initialisation which needs * to be repeated to undo the effects of taking the CPU offline. */ __asm__("mov sp, %0\n" " mov fp, #0\n" " b secondary_start_kernel" : : "r" (task_stack_page(current) + THREAD_SIZE - 8)); }

Contributors

PersonTokensPropCommitsCommitProp
Russell King5798.28%787.50%
Stephen Boyd11.72%112.50%
Total58100.00%8100.00%

#endif /* CONFIG_HOTPLUG_CPU */ /* * Called by both boot and secondaries to move global data into * per-processor storage. */
static void smp_store_cpu_info(unsigned int cpuid) { struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); cpu_info->loops_per_jiffy = loops_per_jiffy; cpu_info->cpuid = read_cpuid_id(); store_cpu_topology(cpuid); }

Contributors

PersonTokensPropCommitsCommitProp
Russell King2970.73%133.33%
Lorenzo Pieralisi717.07%133.33%
Vincent Guittot512.20%133.33%
Total41100.00%3100.00%

/* * This is the secondary CPU boot entry. We're using this CPUs * idle thread stack, but a set of temporary page tables. */
asmlinkage void secondary_start_kernel(void) { struct mm_struct *mm = &init_mm; unsigned int cpu; /* * The identity mapping is uncached (strongly ordered), so * switch away from it before attempting any exclusive accesses. */ cpu_switch_mm(mm->pgd, mm); local_flush_bp_all(); enter_lazy_tlb(mm, current); local_flush_tlb_all(); /* * All kernel threads share the same mm context; grab a * reference and switch to it. */ cpu = smp_processor_id(); mmgrab(mm); current->active_mm = mm; cpumask_set_cpu(cpu, mm_cpumask(mm)); cpu_init(); pr_debug("CPU%u: Booted secondary processor\n", cpu); preempt_disable(); trace_hardirqs_off(); /* * Give the platform a chance to do its own initialisation. */ if (smp_ops.smp_secondary_init) smp_ops.smp_secondary_init(cpu); notify_cpu_starting(cpu); calibrate_delay(); smp_store_cpu_info(cpu); /* * OK, now it's safe to let the boot CPU continue. Wait for * the CPU migration code to notice that the CPU is online * before we continue - which happens after __cpu_up returns. */ set_cpu_online(cpu, true); complete(&cpu_running); local_irq_enable(); local_fiq_enable(); local_abt_enable(); /* * OK, it's off to the idle thread for us */ cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); }

Contributors

PersonTokensPropCommitsCommitProp
Russell King7955.63%736.84%
Will Deacon2819.72%210.53%
Thomas Gleixner107.04%315.79%
Colin Cross64.23%15.26%
Manfred Spraul53.52%15.26%
Rusty Russell42.82%15.26%
Rob Herring32.11%15.26%
Lucas Stach32.11%15.26%
Nicholas Piggin32.11%15.26%
Vegard Nossum10.70%15.26%
Total142100.00%19100.00%


void __init smp_cpus_done(unsigned int max_cpus) { int cpu; unsigned long bogosum = 0; for_each_online_cpu(cpu) bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; printk(KERN_INFO "SMP: Total of %d processors activated " "(%lu.%02lu BogoMIPS).\n", num_online_cpus(), bogosum / (500000/HZ), (bogosum / (5000/HZ)) % 100); hyp_mode_check(); }

Contributors

PersonTokensPropCommitsCommitProp
Pavel Machek5480.60%133.33%
Russell King1014.93%133.33%
Dave P Martin34.48%133.33%
Total67100.00%3100.00%


void __init smp_prepare_boot_cpu(void) { set_my_cpu_offset(per_cpu_offset(smp_processor_id())); }

Contributors

PersonTokensPropCommitsCommitProp
Rob Herring1164.71%150.00%
Russell King635.29%150.00%
Total17100.00%2100.00%


void __init smp_prepare_cpus(unsigned int max_cpus) { unsigned int ncores = num_possible_cpus(); init_cpu_topology(); smp_store_cpu_info(smp_processor_id()); /* * are we trying to boot more cores than exist? */ if (max_cpus > ncores) max_cpus = ncores; if (ncores > 1 && max_cpus) { /* * Initialise the present map, which describes the set of CPUs * actually populated at the present time. A platform should * re-initialize the map in the platforms smp_prepare_cpus() * if present != possible (e.g. physical hotplug). */ init_cpu_present(cpu_possible_mask); /* * Initialise the SCU if there are more than one CPU * and let them know where to start. */ if (smp_ops.smp_prepare_cpus) smp_ops.smp_prepare_cpus(max_cpus); } }

Contributors

PersonTokensPropCommitsCommitProp
Russell King5683.58%350.00%
Stephen Boyd710.45%116.67%
Vincent Guittot34.48%116.67%
Rusty Russell11.49%116.67%
Total67100.00%6100.00%

static void (*__smp_cross_call)(const struct cpumask *, unsigned int);
void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) { if (!__smp_cross_call) __smp_cross_call = fn; }

Contributors

PersonTokensPropCommitsCommitProp
Russell King2480.00%133.33%
Rob Herring413.33%133.33%
Nico Pitre26.67%133.33%
Total30100.00%3100.00%

static const char *ipi_types[NR_IPI] __tracepoint_string = { #define S(x,s) [x] = s S(IPI_WAKEUP, "CPU wakeup interrupts"), S(IPI_TIMER, "Timer broadcast interrupts"), S(IPI_RESCHEDULE, "Rescheduling interrupts"), S(IPI_CALL_FUNC, "Function call interrupts"), S(IPI_CPU_STOP, "CPU stop interrupts"), S(IPI_IRQ_WORK, "IRQ work interrupts"), S(IPI_COMPLETION, "completion interrupts"), };
static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) { trace_ipi_raise_rcuidle(target, ipi_types[ipinr]); __smp_cross_call(target, ipinr); }

Contributors

PersonTokensPropCommitsCommitProp
Nico Pitre3296.97%150.00%
Paul E. McKenney13.03%150.00%
Total33100.00%2100.00%


void show_ipi_list(struct seq_file *p, int prec) { unsigned int cpu, i; for (i = 0; i < NR_IPI; i++) { seq_printf(p, "%*s%u: ", prec - 1, "IPI", i); for_each_online_cpu(cpu) seq_printf(p, "%10u ", __get_irq_stat(cpu, ipi_irqs[i])); seq_printf(p, " %s\n", ipi_types[i]); } }

Contributors

PersonTokensPropCommitsCommitProp
Russell King8198.78%583.33%
Nico Pitre11.22%116.67%
Total82100.00%6100.00%


u64 smp_irq_stat_cpu(unsigned int cpu) { u64 sum = 0; int i; for (i = 0; i < NR_IPI; i++) sum += __get_irq_stat(cpu, ipi_irqs[i]); return sum; }

Contributors

PersonTokensPropCommitsCommitProp
Russell King45100.00%2100.00%
Total45100.00%2100.00%


void arch_send_call_function_ipi_mask(const struct cpumask *mask) { smp_cross_call(mask, IPI_CALL_FUNC); }

Contributors

PersonTokensPropCommitsCommitProp
Nico Pitre18100.00%1100.00%
Total18100.00%1100.00%


void arch_send_wakeup_ipi_mask(const struct cpumask *mask) { smp_cross_call(mask, IPI_WAKEUP); }

Contributors

PersonTokensPropCommitsCommitProp
Nico Pitre18100.00%1100.00%
Total18100.00%1100.00%


void arch_send_call_function_single_ipi(int cpu) { smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC); }

Contributors

PersonTokensPropCommitsCommitProp
Nico Pitre1794.44%150.00%
Marc Zyngier15.56%150.00%
Total18100.00%2100.00%

#ifdef CONFIG_IRQ_WORK
void arch_irq_work_raise(void) { if (arch_irq_work_has_interrupt()) smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK); }

Contributors

PersonTokensPropCommitsCommitProp
Nico Pitre2295.65%150.00%
Frédéric Weisbecker14.35%150.00%
Total23100.00%2100.00%

#endif #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
void tick_broadcast(const struct cpumask *mask) { smp_cross_call(mask, IPI_TIMER); }

Contributors

PersonTokensPropCommitsCommitProp
Russell King1794.44%266.67%
Mark Rutland15.56%133.33%
Total18100.00%3100.00%

#endif static DEFINE_RAW_SPINLOCK(stop_lock); /* * ipi_cpu_stop - handle IPI from smp_send_stop() */
static void ipi_cpu_stop(unsigned int cpu) { if (system_state == SYSTEM_BOOTING || system_state == SYSTEM_RUNNING) { raw_spin_lock(&stop_lock); pr_crit("CPU%u: stopping\n", cpu); dump_stack(); raw_spin_unlock(&stop_lock); } set_cpu_online(cpu, false); local_fiq_disable(); local_irq_disable(); while (1) cpu_relax(); }

Contributors

PersonTokensPropCommitsCommitProp
Russell King6296.88%480.00%
Thomas Gleixner23.12%120.00%
Total64100.00%5100.00%

static DEFINE_PER_CPU(struct completion *, cpu_completion);
int register_ipi_completion(struct completion *completion, int cpu) { per_cpu(cpu_completion, cpu) = completion; return IPI_COMPLETION; }

Contributors

PersonTokensPropCommitsCommitProp
Nico Pitre25100.00%1100.00%
Total25100.00%1100.00%


static void ipi_complete(unsigned int cpu) { complete(per_cpu(cpu_completion, cpu)); }

Contributors

PersonTokensPropCommitsCommitProp
Nico Pitre20100.00%1100.00%
Total20100.00%1100.00%

/* * Main handler for inter-processor interrupts */
asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs) { handle_IPI(ipinr, regs); }

Contributors

PersonTokensPropCommitsCommitProp
Russell King1359.09%360.00%
Shawn Guo836.36%120.00%
Rabin Vincent14.55%120.00%
Total22100.00%5100.00%


void handle_IPI(int ipinr, struct pt_regs *regs) { unsigned int cpu = smp_processor_id(); struct pt_regs *old_regs = set_irq_regs(regs); if ((unsigned)ipinr < NR_IPI) { trace_ipi_entry_rcuidle(ipi_types[ipinr]); __inc_irq_stat(cpu, ipi_irqs[ipinr]); } switch (ipinr) { case IPI_WAKEUP: break; #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST case IPI_TIMER: irq_enter(); tick_receive_broadcast(); irq_exit(); break; #endif case IPI_RESCHEDULE: scheduler_ipi(); break; case IPI_CALL_FUNC: irq_enter(); generic_smp_call_function_interrupt(); irq_exit(); break; case IPI_CPU_STOP: irq_enter(); ipi_cpu_stop(cpu); irq_exit(); break; #ifdef CONFIG_IRQ_WORK case IPI_IRQ_WORK: irq_enter(); irq_work_run(); irq_exit(); break; #endif case IPI_COMPLETION: irq_enter(); ipi_complete(cpu); irq_exit(); break; case IPI_CPU_BACKTRACE: printk_nmi_enter(); irq_enter(); nmi_cpu_backtrace(regs); irq_exit(); printk_nmi_exit(); break; default: pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); break; } if ((unsigned)ipinr < NR_IPI) trace_ipi_exit_rcuidle(ipi_types[ipinr]); set_irq_regs(old_regs); }

Contributors

PersonTokensPropCommitsCommitProp
Russell King11052.63%947.37%
Nico Pitre4320.57%210.53%
Stephen Boyd2411.48%315.79%
Shawn Guo125.74%15.26%
Mark Rutland62.87%15.26%
Petr Mladek62.87%15.26%
Jens Axboe52.39%15.26%
Peter Zijlstra31.44%15.26%
Total209100.00%19100.00%


void smp_send_reschedule(int cpu) { smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); }

Contributors

PersonTokensPropCommitsCommitProp
Russell King18100.00%3100.00%
Total18100.00%3100.00%


void smp_send_stop(void) { unsigned long timeout; struct cpumask mask; cpumask_copy(&mask, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), &mask); if (!cpumask_empty(&mask)) smp_cross_call(&mask, IPI_CPU_STOP); /* Wait up to one second for other CPUs to stop */ timeout = USEC_PER_SEC; while (num_online_cpus() > 1 && timeout--) udelay(1); if (num_online_cpus() > 1) pr_warn("SMP: failed to stop secondary CPUs\n"); }

Contributors

PersonTokensPropCommitsCommitProp
Russell King4859.26%555.56%
Rusty Russell1214.81%111.11%
Catalin Marinas1113.58%111.11%
Javier Martinez Canillas911.11%111.11%
Joe Perches11.23%111.11%
Total81100.00%9100.00%

/* * not supported here */
int setup_profiling_timer(unsigned int multiplier) { return -EINVAL; }

Contributors

PersonTokensPropCommitsCommitProp
Russell King1292.31%266.67%
Catalin Marinas17.69%133.33%
Total13100.00%3100.00%

#ifdef CONFIG_CPU_FREQ static DEFINE_PER_CPU(unsigned long, l_p_j_ref); static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq); static unsigned long global_l_p_j_ref; static unsigned long global_l_p_j_ref_freq;
static int cpufreq_callback(struct notifier_block *nb, unsigned long val, void *data) { struct cpufreq_freqs *freq = data; int cpu = freq->cpu; if (freq->flags & CPUFREQ_CONST_LOOPS) return NOTIFY_OK; if (!per_cpu(l_p_j_ref, cpu)) { per_cpu(l_p_j_ref, cpu) = per_cpu(cpu_data, cpu).loops_per_jiffy; per_cpu(l_p_j_ref_freq, cpu) = freq->old; if (!global_l_p_j_ref) { global_l_p_j_ref = loops_per_jiffy; global_l_p_j_ref_freq = freq->old; } } if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) { loops_per_jiffy = cpufreq_scale(global_l_p_j_ref, global_l_p_j_ref_freq, freq->new); per_cpu(cpu_data, cpu).loops_per_jiffy = cpufreq_scale(per_cpu(l_p_j_ref, cpu), per_cpu(l_p_j_ref_freq, cpu), freq->new); } return NOTIFY_OK; }

Contributors

PersonTokensPropCommitsCommitProp
Richard Zhao178100.00%1100.00%
Total178100.00%1100.00%

static struct notifier_block cpufreq_notifier = { .notifier_call = cpufreq_callback, };
static int __init register_cpufreq_notifier(void) { return cpufreq_register_notifier(&cpufreq_notifier, CPUFREQ_TRANSITION_NOTIFIER); }

Contributors

PersonTokensPropCommitsCommitProp
Richard Zhao18100.00%1100.00%
Total18100.00%1100.00%

core_initcall(register_cpufreq_notifier); #endif
static void raise_nmi(cpumask_t *mask) { smp_cross_call(mask, IPI_CPU_BACKTRACE); }

Contributors

PersonTokensPropCommitsCommitProp
Russell King17100.00%1100.00%
Total17100.00%1100.00%


void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) { nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_nmi); }

Contributors

PersonTokensPropCommitsCommitProp
Chris Metcalf1150.00%150.00%
Russell King1150.00%150.00%
Total22100.00%2100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
Russell King113251.20%4739.50%
Richard Zhao24711.17%10.84%
Nico Pitre22710.27%43.36%
Marc Zyngier1185.34%54.20%
Stephen Boyd924.16%65.04%
Pavel Machek542.44%10.84%
Will Deacon381.72%65.04%
Jonathan Austin311.40%10.84%
Geert Uytterhoeven281.27%21.68%
Stephen Warren241.09%10.84%
Shawn Guo200.90%10.84%
Catalin Marinas200.90%21.68%
Rob Herring180.81%21.68%
Cyril Chemparathy180.81%10.84%
Rusty Russell170.77%21.68%
Thomas Gleixner170.77%65.04%
Vincent Guittot110.50%10.84%
Chris Metcalf110.50%10.84%
Lorenzo Pieralisi90.41%21.68%
Javier Martinez Canillas90.41%10.84%
Ingo Molnar70.32%32.52%
Mark Rutland70.32%21.68%
Colin Cross60.27%10.84%
Petr Mladek60.27%10.84%
Dave P Martin60.27%10.84%
Jens Axboe50.23%10.84%
Manfred Spraul50.23%10.84%
Alexey Dobriyan30.14%10.84%
Nicholas Piggin30.14%10.84%
Jamie Iles30.14%10.84%
Al Viro30.14%10.84%
Lucas Stach30.14%10.84%
Peter Zijlstra30.14%10.84%
Florian Fainelli10.05%10.84%
Masahiro Yamada10.05%10.84%
Vegard Nossum10.05%10.84%
Rabin Vincent10.05%10.84%
Paul E. McKenney10.05%10.84%
Frédéric Weisbecker10.05%10.84%
Kees Cook10.05%10.84%
Joe Perches10.05%10.84%
Arun Sharma10.05%10.84%
Anton Vorontsov10.05%10.84%
Total2211100.00%119100.00%
Directory: arch/arm/kernel
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.