Release 4.14 arch/ia64/kernel/smp.c
/*
* SMP Support
*
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
* Copyright (C) 1999, 2001, 2003 David Mosberger-Tang <davidm@hpl.hp.com>
*
* Lots of stuff stolen from arch/alpha/kernel/smp.c
*
* 01/05/16 Rohit Seth <rohit.seth@intel.com> IA64-SMP functions. Reorganized
* the existing code (on the lines of x86 port).
* 00/09/11 David Mosberger <davidm@hpl.hp.com> Do loops_per_jiffy
* calibration on each CPU.
* 00/08/23 Asit Mallick <asit.k.mallick@intel.com> fixed logical processor id
* 00/03/31 Rohit Seth <rohit.seth@intel.com> Fixes for Bootstrap Processor
* & cpu_online_map now gets done here (instead of setup.c)
* 99/10/05 davidm Update to bring it in sync with new command-line processing
* scheme.
* 10/13/00 Goutham Rao <goutham.rao@intel.com> Updated smp_call_function and
* smp_call_function_single to resend IPI on timeouts
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/smp.h>
#include <linux/kernel_stat.h>
#include <linux/mm.h>
#include <linux/cache.h>
#include <linux/delay.h>
#include <linux/efi.h>
#include <linux/bitops.h>
#include <linux/kexec.h>
#include <linux/atomic.h>
#include <asm/current.h>
#include <asm/delay.h>
#include <asm/machvec.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/sal.h>
#include <asm/tlbflush.h>
#include <asm/unistd.h>
#include <asm/mca.h>
/*
* Note: alignment of 4 entries/cacheline was empirically determined
* to be a good tradeoff between hot cachelines & spreading the array
* across too many cacheline.
*/
static struct local_tlb_flush_counts {
unsigned int count;
} __attribute__((__aligned__(32))) local_tlb_flush_counts[NR_CPUS];
static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned short [NR_CPUS],
shadow_flush_counts);
#define IPI_CALL_FUNC 0
#define IPI_CPU_STOP 1
#define IPI_CALL_FUNC_SINGLE 2
#define IPI_KDUMP_CPU_STOP 3
/* This needs to be cacheline aligned because it is written to by *other* CPUs. */
static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, ipi_operation);
extern void cpu_halt (void);
static void
stop_this_cpu(void)
{
/*
* Remove this CPU:
*/
set_cpu_online(smp_processor_id(), false);
max_xtp();
local_irq_disable();
cpu_halt();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 12 | 46.15% | 1 | 14.29% |
Andrew Morton | 6 | 23.08% | 1 | 14.29% |
Linus Torvalds | 4 | 15.38% | 2 | 28.57% |
Srivatsa S. Bhat | 2 | 7.69% | 1 | 14.29% |
Hidetoshi Seto | 1 | 3.85% | 1 | 14.29% |
Ingo Molnar | 1 | 3.85% | 1 | 14.29% |
Total | 26 | 100.00% | 7 | 100.00% |
void
cpu_die(void)
{
max_xtp();
local_irq_disable();
cpu_halt();
/* Should never be here */
BUG();
for (;;);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 26 | 100.00% | 1 | 100.00% |
Total | 26 | 100.00% | 1 | 100.00% |
irqreturn_t
handle_IPI (int irq, void *dev_id)
{
int this_cpu = get_cpu();
unsigned long *pending_ipis = &__ia64_per_cpu_var(ipi_operation);
unsigned long ops;
mb(); /* Order interrupt and bit testing. */
while ((ops = xchg(pending_ipis, 0)) != 0) {
mb(); /* Order bit clearing and data access. */
do {
unsigned long which;
which = ffz(~ops);
ops &= ~(1 << which);
switch (which) {
case IPI_CPU_STOP:
stop_this_cpu();
break;
case IPI_CALL_FUNC:
generic_smp_call_function_interrupt();
break;
case IPI_CALL_FUNC_SINGLE:
generic_smp_call_function_single_interrupt();
break;
#ifdef CONFIG_KEXEC
case IPI_KDUMP_CPU_STOP:
unw_init_running(kdump_cpu_freeze, NULL);
break;
#endif
default:
printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
this_cpu, which);
break;
}
} while (ops);
mb(); /* Order data access and bit testing. */
}
put_cpu();
return IRQ_HANDLED;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 91 | 59.87% | 3 | 27.27% |
Linus Torvalds | 24 | 15.79% | 1 | 9.09% |
Zou Nan hai | 15 | 9.87% | 1 | 9.09% |
Jens Axboe | 10 | 6.58% | 1 | 9.09% |
David Mosberger-Tang | 7 | 4.61% | 3 | 27.27% |
Peter Chubb | 4 | 2.63% | 1 | 9.09% |
Simon Horman | 1 | 0.66% | 1 | 9.09% |
Total | 152 | 100.00% | 11 | 100.00% |
/*
* Called with preemption disabled.
*/
static inline void
send_IPI_single (int dest_cpu, int op)
{
set_bit(op, &per_cpu(ipi_operation, dest_cpu));
platform_send_ipi(dest_cpu, IA64_IPI_VECTOR, IA64_IPI_DM_INT, 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 30 | 81.08% | 4 | 66.67% |
David Mosberger-Tang | 5 | 13.51% | 1 | 16.67% |
Linus Torvalds | 2 | 5.41% | 1 | 16.67% |
Total | 37 | 100.00% | 6 | 100.00% |
/*
* Called with preemption disabled.
*/
static inline void
send_IPI_allbutself (int op)
{
unsigned int i;
for_each_online_cpu(i) {
if (i != smp_processor_id())
send_IPI_single(i, op);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 29 | 85.29% | 2 | 50.00% |
John Hawkes | 4 | 11.76% | 1 | 25.00% |
David Mosberger-Tang | 1 | 2.94% | 1 | 25.00% |
Total | 34 | 100.00% | 4 | 100.00% |
/*
* Called with preemption disabled.
*/
static inline void
send_IPI_mask(const struct cpumask *mask, int op)
{
unsigned int cpu;
for_each_cpu(cpu, mask) {
send_IPI_single(cpu, op);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Xiantao Zhang | 27 | 77.14% | 1 | 33.33% |
Rusty Russell | 5 | 14.29% | 1 | 33.33% |
Linus Torvalds (pre-git) | 3 | 8.57% | 1 | 33.33% |
Total | 35 | 100.00% | 3 | 100.00% |
/*
* Called with preemption disabled.
*/
static inline void
send_IPI_all (int op)
{
int i;
for_each_online_cpu(i) {
send_IPI_single(i, op);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 17 | 65.38% | 1 | 25.00% |
John Hawkes | 4 | 15.38% | 1 | 25.00% |
Xiantao Zhang | 3 | 11.54% | 1 | 25.00% |
Rusty Russell | 2 | 7.69% | 1 | 25.00% |
Total | 26 | 100.00% | 4 | 100.00% |
/*
* Called with preemption disabled.
*/
static inline void
send_IPI_self (int op)
{
send_IPI_single(smp_processor_id(), op);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 18 | 100.00% | 1 | 100.00% |
Total | 18 | 100.00% | 1 | 100.00% |
#ifdef CONFIG_KEXEC
void
kdump_smp_send_stop(void)
{
send_IPI_allbutself(IPI_KDUMP_CPU_STOP);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Zou Nan hai | 9 | 75.00% | 1 | 50.00% |
Al Viro | 3 | 25.00% | 1 | 50.00% |
Total | 12 | 100.00% | 2 | 100.00% |
void
kdump_smp_send_init(void)
{
unsigned int cpu, self_cpu;
self_cpu = smp_processor_id();
for_each_online_cpu(cpu) {
if (cpu != self_cpu) {
if(kdump_status[cpu] == 0)
platform_send_ipi(cpu, 0, IA64_IPI_DM_INIT, 0);
}
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Zou Nan hai | 49 | 94.23% | 1 | 50.00% |
Al Viro | 3 | 5.77% | 1 | 50.00% |
Total | 52 | 100.00% | 2 | 100.00% |
#endif
/*
* Called with preemption disabled.
*/
void
smp_send_reschedule (int cpu)
{
platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 13 | 68.42% | 1 | 50.00% |
Linus Torvalds | 6 | 31.58% | 1 | 50.00% |
Total | 19 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL_GPL(smp_send_reschedule);
/*
* Called with preemption disabled.
*/
static void
smp_send_local_flush_tlb (int cpu)
{
platform_send_ipi(cpu, IA64_IPI_LOCAL_TLB_FLUSH, IA64_IPI_DM_INT, 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jack Steiner | 20 | 100.00% | 1 | 100.00% |
Total | 20 | 100.00% | 1 | 100.00% |
void
smp_local_flush_tlb(void)
{
/*
* Use atomic ops. Otherwise, the load/increment/store sequence from
* a "++" operation can have the line stolen between the load & store.
* The overhead of the atomic op in negligible in this case & offers
* significant benefit for the brief periods where lots of cpus
* are simultaneously flushing TLBs.
*/
ia64_fetchadd(1, &local_tlb_flush_counts[smp_processor_id()].count, acq);
local_flush_tlb_all();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jack Steiner | 27 | 100.00% | 1 | 100.00% |
Total | 27 | 100.00% | 1 | 100.00% |
#define FLUSH_DELAY 5
/* Usec backoff to eliminate excessive cacheline bouncing */
void
smp_flush_tlb_cpumask(cpumask_t xcpumask)
{
unsigned short *counts = __ia64_per_cpu_var(shadow_flush_counts);
cpumask_t cpumask = xcpumask;
int mycpu, cpu, flush_mycpu = 0;
preempt_disable();
mycpu = smp_processor_id();
for_each_cpu(cpu, &cpumask)
counts[cpu] = local_tlb_flush_counts[cpu].count & 0xffff;
mb();
for_each_cpu(cpu, &cpumask) {
if (cpu == mycpu)
flush_mycpu = 1;
else
smp_send_local_flush_tlb(cpu);
}
if (flush_mycpu)
smp_local_flush_tlb();
for_each_cpu(cpu, &cpumask)
while(counts[cpu] == (local_tlb_flush_counts[cpu].count & 0xffff))
udelay(FLUSH_DELAY);
preempt_enable();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jack Steiner | 113 | 89.68% | 1 | 33.33% |
Robin Holt | 7 | 5.56% | 1 | 33.33% |
Rusty Russell | 6 | 4.76% | 1 | 33.33% |
Total | 126 | 100.00% | 3 | 100.00% |
void
smp_flush_tlb_all (void)
{
on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 23 | 88.46% | 1 | 25.00% |
David Mosberger-Tang | 2 | 7.69% | 2 | 50.00% |
Andrew Morton | 1 | 3.85% | 1 | 25.00% |
Total | 26 | 100.00% | 4 | 100.00% |
void
smp_flush_tlb_mm (struct mm_struct *mm)
{
cpumask_var_t cpus;
preempt_disable();
/* this happens for the common case of a single-threaded fork(): */
if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1))
{
local_finish_flush_tlb_mm(mm);
preempt_enable();
return;
}
if (!alloc_cpumask_var(&cpus, GFP_ATOMIC)) {
smp_call_function((void (*)(void *))local_finish_flush_tlb_mm,
mm, 1);
} else {
cpumask_copy(cpus, mm_cpumask(mm));
smp_call_function_many(cpus,
(void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
free_cpumask_var(cpus);
}
local_irq_disable();
local_finish_flush_tlb_mm(mm);
local_irq_enable();
preempt_enable();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dimitri Sivanich | 67 | 50.00% | 2 | 28.57% |
David Mosberger-Tang | 51 | 38.06% | 2 | 28.57% |
Andrew Morton | 7 | 5.22% | 1 | 14.29% |
Peter Chubb | 6 | 4.48% | 1 | 14.29% |
Rusty Russell | 3 | 2.24% | 1 | 14.29% |
Total | 134 | 100.00% | 7 | 100.00% |
void arch_send_call_function_single_ipi(int cpu)
{
send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 7 | 46.67% | 1 | 33.33% |
Jens Axboe | 6 | 40.00% | 1 | 33.33% |
Linus Torvalds | 2 | 13.33% | 1 | 33.33% |
Total | 15 | 100.00% | 3 | 100.00% |
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
send_IPI_mask(mask, IPI_CALL_FUNC);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 7 | 38.89% | 1 | 25.00% |
Linus Torvalds | 5 | 27.78% | 1 | 25.00% |
Rusty Russell | 5 | 27.78% | 1 | 25.00% |
Linus Torvalds (pre-git) | 1 | 5.56% | 1 | 25.00% |
Total | 18 | 100.00% | 4 | 100.00% |
/*
* this function calls the 'stop' function on all other CPUs in the system.
*/
void
smp_send_stop (void)
{
send_IPI_allbutself(IPI_CPU_STOP);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 6 | 50.00% | 1 | 50.00% |
Linus Torvalds (pre-git) | 6 | 50.00% | 1 | 50.00% |
Total | 12 | 100.00% | 2 | 100.00% |
int
setup_profiling_timer (unsigned int multiplier)
{
return -EINVAL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 13 | 100.00% | 1 | 100.00% |
Total | 13 | 100.00% | 1 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 308 | 30.80% | 6 | 11.76% |
Jack Steiner | 194 | 19.40% | 1 | 1.96% |
Zou Nan hai | 84 | 8.40% | 1 | 1.96% |
Linus Torvalds | 80 | 8.00% | 3 | 5.88% |
David Mosberger-Tang | 78 | 7.80% | 11 | 21.57% |
Dimitri Sivanich | 67 | 6.70% | 2 | 3.92% |
Andrew Morton | 47 | 4.70% | 2 | 3.92% |
Xiantao Zhang | 31 | 3.10% | 1 | 1.96% |
Jens Axboe | 27 | 2.70% | 1 | 1.96% |
Rusty Russell | 25 | 2.50% | 5 | 9.80% |
Peter Chubb | 10 | 1.00% | 2 | 3.92% |
John Hawkes | 8 | 0.80% | 1 | 1.96% |
Robin Holt | 7 | 0.70% | 1 | 1.96% |
Al Viro | 6 | 0.60% | 1 | 1.96% |
Simon Arlott | 6 | 0.60% | 1 | 1.96% |
Marcelo Tosatti | 5 | 0.50% | 1 | 1.96% |
Tejun Heo | 3 | 0.30% | 2 | 3.92% |
Matt Domsch | 3 | 0.30% | 1 | 1.96% |
Matthew Wilcox | 2 | 0.20% | 1 | 1.96% |
Srivatsa S. Bhat | 2 | 0.20% | 1 | 1.96% |
Simon Horman | 2 | 0.20% | 1 | 1.96% |
Adrian Bunk | 1 | 0.10% | 1 | 1.96% |
Hidetoshi Seto | 1 | 0.10% | 1 | 1.96% |
Fenghua Yu | 1 | 0.10% | 1 | 1.96% |
Arun Sharma | 1 | 0.10% | 1 | 1.96% |
Ingo Molnar | 1 | 0.10% | 1 | 1.96% |
Total | 1000 | 100.00% | 51 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.