Release 4.14 arch/sh/kernel/smp.c
/*
* arch/sh/kernel/smp.c
*
* SMP support for the SuperH processors.
*
* Copyright (C) 2002 - 2010 Paul Mundt
* Copyright (C) 2006 - 2007 Akio Idehara
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/err.h>
#include <linux/cache.h>
#include <linux/cpumask.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/cpu.h>
#include <linux/interrupt.h>
#include <linux/sched/mm.h>
#include <linux/sched/hotplug.h>
#include <linux/atomic.h>
#include <linux/clockchips.h>
#include <asm/processor.h>
#include <asm/mmu_context.h>
#include <asm/smp.h>
#include <asm/cacheflush.h>
#include <asm/sections.h>
#include <asm/setup.h>
int __cpu_number_map[NR_CPUS];
/* Map physical to logical */
int __cpu_logical_map[NR_CPUS];
/* Map logical to physical */
struct plat_smp_ops *mp_ops = NULL;
/* State of each CPU */
DEFINE_PER_CPU(int, cpu_state) = { 0 };
void register_smp_ops(struct plat_smp_ops *ops)
{
if (mp_ops)
printk(KERN_WARNING "Overriding previously set SMP ops\n");
mp_ops = ops;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 24 | 100.00% | 1 | 100.00% |
Total | 24 | 100.00% | 1 | 100.00% |
static inline void smp_store_cpu_info(unsigned int cpu)
{
struct sh_cpuinfo *c = cpu_data + cpu;
memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo));
c->loops_per_jiffy = loops_per_jiffy;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 23 | 57.50% | 2 | 66.67% |
Andrew Morton | 17 | 42.50% | 1 | 33.33% |
Total | 40 | 100.00% | 3 | 100.00% |
void __init smp_prepare_cpus(unsigned int max_cpus)
{
unsigned int cpu = smp_processor_id();
init_new_context(current, &init_mm);
current_thread_info()->cpu = cpu;
mp_ops->prepare_cpus(max_cpus);
#ifndef CONFIG_HOTPLUG_CPU
init_cpu_present(cpu_possible_mask);
#endif
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 25 | 51.02% | 1 | 20.00% |
Paul Mundt | 20 | 40.82% | 2 | 40.00% |
Rusty Russell | 4 | 8.16% | 2 | 40.00% |
Total | 49 | 100.00% | 5 | 100.00% |
void __init smp_prepare_boot_cpu(void)
{
unsigned int cpu = smp_processor_id();
__cpu_number_map[0] = cpu;
__cpu_logical_map[0] = cpu;
set_cpu_online(cpu, true);
set_cpu_possible(cpu, true);
per_cpu(cpu_state, cpu) = CPU_ONLINE;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 24 | 46.15% | 1 | 20.00% |
Paul Mundt | 24 | 46.15% | 3 | 60.00% |
Rusty Russell | 4 | 7.69% | 1 | 20.00% |
Total | 52 | 100.00% | 5 | 100.00% |
#ifdef CONFIG_HOTPLUG_CPU
void native_cpu_die(unsigned int cpu)
{
unsigned int i;
for (i = 0; i < 10; i++) {
smp_rmb();
if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
if (system_state == SYSTEM_RUNNING)
pr_info("CPU %u is now offline\n", cpu);
return;
}
msleep(100);
}
pr_err("CPU %u didn't die...\n", cpu);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 70 | 100.00% | 1 | 100.00% |
Total | 70 | 100.00% | 1 | 100.00% |
int native_cpu_disable(unsigned int cpu)
{
return cpu == 0 ? -EPERM : 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 19 | 100.00% | 1 | 100.00% |
Total | 19 | 100.00% | 1 | 100.00% |
void play_dead_common(void)
{
idle_task_exit();
irq_ctx_exit(raw_smp_processor_id());
mb();
__this_cpu_write(cpu_state, CPU_DEAD);
local_irq_disable();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 26 | 89.66% | 1 | 50.00% |
Christoph Lameter | 3 | 10.34% | 1 | 50.00% |
Total | 29 | 100.00% | 2 | 100.00% |
void native_play_dead(void)
{
play_dead_common();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 10 | 100.00% | 1 | 100.00% |
Total | 10 | 100.00% | 1 | 100.00% |
int __cpu_disable(void)
{
unsigned int cpu = smp_processor_id();
int ret;
ret = mp_ops->cpu_disable(cpu);
if (ret)
return ret;
/*
* Take this CPU offline. Once we clear this, we can't return,
* and we must not schedule until we're ready to give up the cpu.
*/
set_cpu_online(cpu, false);
/*
* OK - migrate IRQs away from this CPU
*/
migrate_irqs();
/*
* Flush user cache and TLB mappings, and then remove this CPU
* from the vm mask set of all processes.
*/
flush_cache_all();
#ifdef CONFIG_MMU
local_flush_tlb_all();
#endif
clear_tasks_mm_cpumask(cpu);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 59 | 90.77% | 1 | 33.33% |
Rich Felker | 5 | 7.69% | 1 | 33.33% |
Anton Vorontsov | 1 | 1.54% | 1 | 33.33% |
Total | 65 | 100.00% | 3 | 100.00% |
#else /* ... !CONFIG_HOTPLUG_CPU */
int native_cpu_disable(unsigned int cpu)
{
return -ENOSYS;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 10 | 76.92% | 1 | 50.00% |
Matt Fleming | 3 | 23.08% | 1 | 50.00% |
Total | 13 | 100.00% | 2 | 100.00% |
void native_cpu_die(unsigned int cpu)
{
/* We said "no" in __cpu_disable */
BUG();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 13 | 100.00% | 1 | 100.00% |
Total | 13 | 100.00% | 1 | 100.00% |
void native_play_dead(void)
{
BUG();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 10 | 100.00% | 1 | 100.00% |
Total | 10 | 100.00% | 1 | 100.00% |
#endif
asmlinkage void start_secondary(void)
{
unsigned int cpu = smp_processor_id();
struct mm_struct *mm = &init_mm;
enable_mmu();
mmgrab(mm);
mmget(mm);
current->active_mm = mm;
#ifdef CONFIG_MMU
enter_lazy_tlb(mm, current);
local_flush_tlb_all();
#endif
per_cpu_trap_init();
preempt_disable();
notify_cpu_starting(cpu);
local_irq_enable();
calibrate_delay();
smp_store_cpu_info(cpu);
set_cpu_online(cpu, true);
per_cpu(cpu_state, cpu) = CPU_ONLINE;
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 82 | 82.00% | 5 | 41.67% |
Rich Felker | 5 | 5.00% | 1 | 8.33% |
Thomas Gleixner | 4 | 4.00% | 2 | 16.67% |
Manfred Spraul | 4 | 4.00% | 1 | 8.33% |
Matt Fleming | 3 | 3.00% | 1 | 8.33% |
Vegard Nossum | 2 | 2.00% | 2 | 16.67% |
Total | 100 | 100.00% | 12 | 100.00% |
extern struct {
unsigned long sp;
unsigned long bss_start;
unsigned long bss_end;
void *start_kernel_fn;
void *cpu_init_fn;
void *thread_info;
} stack_start;
int __cpu_up(unsigned int cpu, struct task_struct *tsk)
{
unsigned long timeout;
per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
/* Fill in data in head.S for secondary cpus */
stack_start.sp = tsk->thread.sp;
stack_start.thread_info = tsk->stack;
stack_start.bss_start = 0; /* don't clear bss for secondary cpus */
stack_start.start_kernel_fn = start_secondary;
flush_icache_range((unsigned long)&stack_start,
(unsigned long)&stack_start + sizeof(stack_start));
wmb();
mp_ops->start_cpu(cpu, (unsigned long)_stext);
timeout = jiffies + HZ;
while (time_before(jiffies, timeout)) {
if (cpu_online(cpu))
break;
udelay(10);
barrier();
}
if (cpu_online(cpu))
return 0;
return -ENOENT;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 107 | 74.31% | 5 | 50.00% |
Andrew Morton | 29 | 20.14% | 1 | 10.00% |
Thomas Gleixner | 5 | 3.47% | 2 | 20.00% |
Nicholas Piggin | 2 | 1.39% | 1 | 10.00% |
Oleg Nesterov | 1 | 0.69% | 1 | 10.00% |
Total | 144 | 100.00% | 10 | 100.00% |
void __init smp_cpus_done(unsigned int max_cpus)
{
unsigned long bogosum = 0;
int cpu;
for_each_online_cpu(cpu)
bogosum += cpu_data[cpu].loops_per_jiffy;
printk(KERN_INFO "SMP: Total of %d processors activated "
"(%lu.%02lu BogoMIPS).\n", num_online_cpus(),
bogosum / (500000/HZ),
(bogosum / (5000/HZ)) % 100);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 50 | 80.65% | 1 | 50.00% |
Andrew Morton | 12 | 19.35% | 1 | 50.00% |
Total | 62 | 100.00% | 2 | 100.00% |
void smp_send_reschedule(int cpu)
{
mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 14 | 82.35% | 1 | 50.00% |
Paul Mundt | 3 | 17.65% | 1 | 50.00% |
Total | 17 | 100.00% | 2 | 100.00% |
void smp_send_stop(void)
{
smp_call_function(stop_this_cpu, 0, 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 16 | 100.00% | 1 | 100.00% |
Total | 16 | 100.00% | 1 | 100.00% |
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
int cpu;
for_each_cpu(cpu, mask)
mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 12 | 41.38% | 2 | 33.33% |
Rusty Russell | 6 | 20.69% | 1 | 16.67% |
Jens Axboe | 6 | 20.69% | 1 | 16.67% |
Paul Mundt | 5 | 17.24% | 2 | 33.33% |
Total | 29 | 100.00% | 6 | 100.00% |
void arch_send_call_function_single_ipi(int cpu)
{
mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 10 | 58.82% | 1 | 33.33% |
Andrew Morton | 4 | 23.53% | 1 | 33.33% |
Paul Mundt | 3 | 17.65% | 1 | 33.33% |
Total | 17 | 100.00% | 3 | 100.00% |
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
void tick_broadcast(const struct cpumask *mask)
{
int cpu;
for_each_cpu(cpu, mask)
mp_ops->send_ipi(cpu, SMP_MSG_TIMER);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 23 | 79.31% | 2 | 50.00% |
Rusty Russell | 5 | 17.24% | 1 | 25.00% |
Rich Felker | 1 | 3.45% | 1 | 25.00% |
Total | 29 | 100.00% | 4 | 100.00% |
static void ipi_timer(void)
{
irq_enter();
tick_receive_broadcast();
irq_exit();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 16 | 94.12% | 2 | 66.67% |
Rich Felker | 1 | 5.88% | 1 | 33.33% |
Total | 17 | 100.00% | 3 | 100.00% |
#endif
void smp_message_recv(unsigned int msg)
{
switch (msg) {
case SMP_MSG_FUNCTION:
generic_smp_call_function_interrupt();
break;
case SMP_MSG_RESCHEDULE:
scheduler_ipi();
break;
case SMP_MSG_FUNCTION_SINGLE:
generic_smp_call_function_single_interrupt();
break;
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
case SMP_MSG_TIMER:
ipi_timer();
break;
#endif
default:
printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n",
smp_processor_id(), __func__, msg);
break;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 55 | 87.30% | 2 | 50.00% |
Rich Felker | 5 | 7.94% | 1 | 25.00% |
Peter Zijlstra | 3 | 4.76% | 1 | 25.00% |
Total | 63 | 100.00% | 4 | 100.00% |
/* Not really SMP stuff ... */
int setup_profiling_timer(unsigned int multiplier)
{
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 12 | 100.00% | 1 | 100.00% |
Total | 12 | 100.00% | 1 | 100.00% |
#ifdef CONFIG_MMU
static void flush_tlb_all_ipi(void *info)
{
local_flush_tlb_all();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 13 | 100.00% | 1 | 100.00% |
Total | 13 | 100.00% | 1 | 100.00% |
void flush_tlb_all(void)
{
on_each_cpu(flush_tlb_all_ipi, 0, 1);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 16 | 100.00% | 1 | 100.00% |
Total | 16 | 100.00% | 1 | 100.00% |
static void flush_tlb_mm_ipi(void *mm)
{
local_flush_tlb_mm((struct mm_struct *)mm);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 20 | 100.00% | 1 | 100.00% |
Total | 20 | 100.00% | 1 | 100.00% |
/*
* The following tlb flush calls are invoked when old translations are
* being torn down, or pte attributes are changing. For single threaded
* address spaces, a new context is obtained on the current cpu, and tlb
* context on other cpus are invalidated to force a new context allocation
* at switch_mm time, should the mm ever be used on other cpus. For
* multithreaded address spaces, intercpu interrupts have to be sent.
* Another case where intercpu interrupts are required is when the target
* mm might be active on another cpu (eg debuggers doing the flushes on
* behalf of debugees, kswapd stealing pages from another process etc).
* Kanoj 07/00.
*/
void flush_tlb_mm(struct mm_struct *mm)
{
preempt_disable();
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
} else {
int i;
for_each_online_cpu(i)
if (smp_processor_id() != i)
cpu_context(i, mm) = 0;
}
local_flush_tlb_mm(mm);
preempt_enable();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 80 | 95.24% | 1 | 50.00% |
Rusty Russell | 4 | 4.76% | 1 | 50.00% |
Total | 84 | 100.00% | 2 | 100.00% |
struct flush_tlb_data {
struct vm_area_struct *vma;
unsigned long addr1;
unsigned long addr2;
};
static void flush_tlb_range_ipi(void *info)
{
struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 37 | 100.00% | 1 | 100.00% |
Total | 37 | 100.00% | 1 | 100.00% |
void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
preempt_disable();
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
struct flush_tlb_data fd;
fd.vma = vma;
fd.addr1 = start;
fd.addr2 = end;
smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
} else {
int i;
for_each_online_cpu(i)
if (smp_processor_id() != i)
cpu_context(i, mm) = 0;
}
local_flush_tlb_range(vma, start, end);
preempt_enable();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 124 | 96.88% | 1 | 50.00% |
Rusty Russell | 4 | 3.12% | 1 | 50.00% |
Total | 128 | 100.00% | 2 | 100.00% |
static void flush_tlb_kernel_range_ipi(void *info)
{
struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 33 | 100.00% | 1 | 100.00% |
Total | 33 | 100.00% | 1 | 100.00% |
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
struct flush_tlb_data fd;
fd.addr1 = start;
fd.addr2 = end;
on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 43 | 100.00% | 1 | 100.00% |
Total | 43 | 100.00% | 1 | 100.00% |
static void flush_tlb_page_ipi(void *info)
{
struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
local_flush_tlb_page(fd->vma, fd->addr1);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 33 | 100.00% | 1 | 100.00% |
Total | 33 | 100.00% | 1 | 100.00% |
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
preempt_disable();
if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
(current->mm != vma->vm_mm)) {
struct flush_tlb_data fd;
fd.vma = vma;
fd.addr1 = page;
smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
} else {
int i;
for_each_online_cpu(i)
if (smp_processor_id() != i)
cpu_context(i, vma->vm_mm) = 0;
}
local_flush_tlb_page(vma, page);
preempt_enable();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 109 | 96.46% | 1 | 50.00% |
Rusty Russell | 4 | 3.54% | 1 | 50.00% |
Total | 113 | 100.00% | 2 | 100.00% |
static void flush_tlb_one_ipi(void *info)
{
struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
local_flush_tlb_one(fd->addr1, fd->addr2);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 33 | 100.00% | 1 | 100.00% |
Total | 33 | 100.00% | 1 | 100.00% |
void flush_tlb_one(unsigned long asid, unsigned long vaddr)
{
struct flush_tlb_data fd;
fd.addr1 = asid;
fd.addr2 = vaddr;
smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
local_flush_tlb_one(asid, vaddr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 50 | 100.00% | 1 | 100.00% |
Total | 50 | 100.00% | 1 | 100.00% |
#endif
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paul Mundt | 1343 | 80.66% | 14 | 33.33% |
Andrew Morton | 204 | 12.25% | 2 | 4.76% |
Rusty Russell | 31 | 1.86% | 5 | 11.90% |
Rich Felker | 30 | 1.80% | 2 | 4.76% |
Jens Axboe | 16 | 0.96% | 1 | 2.38% |
Thomas Gleixner | 9 | 0.54% | 4 | 9.52% |
Matt Fleming | 6 | 0.36% | 2 | 4.76% |
Peter Zijlstra | 5 | 0.30% | 1 | 2.38% |
Manfred Spraul | 4 | 0.24% | 1 | 2.38% |
Ingo Molnar | 4 | 0.24% | 2 | 4.76% |
Christoph Lameter | 3 | 0.18% | 1 | 2.38% |
Evgeniy Polyakov | 3 | 0.18% | 1 | 2.38% |
Vegard Nossum | 2 | 0.12% | 2 | 4.76% |
Nicholas Piggin | 2 | 0.12% | 1 | 2.38% |
Oleg Nesterov | 1 | 0.06% | 1 | 2.38% |
Anton Vorontsov | 1 | 0.06% | 1 | 2.38% |
Arun Sharma | 1 | 0.06% | 1 | 2.38% |
Total | 1665 | 100.00% | 42 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.