Contributors: 18
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Ralf Baechle |
201 |
63.81% |
4 |
16.67% |
Hidehiro Kawai |
35 |
11.11% |
1 |
4.17% |
Corey Minyard |
19 |
6.03% |
1 |
4.17% |
Dengcheng Zhu |
10 |
3.17% |
2 |
8.33% |
Rusty Russell |
8 |
2.54% |
1 |
4.17% |
Linus Torvalds |
6 |
1.90% |
1 |
4.17% |
Alexander Nyberg |
6 |
1.90% |
1 |
4.17% |
Marcin Nowakowski |
5 |
1.59% |
1 |
4.17% |
Linus Torvalds (pre-git) |
5 |
1.59% |
3 |
12.50% |
Nicolas Schichan |
4 |
1.27% |
1 |
4.17% |
Franck Bui-Huu |
3 |
0.95% |
1 |
4.17% |
Ingo Molnar |
3 |
0.95% |
1 |
4.17% |
Nathan T. Lynch |
3 |
0.95% |
1 |
4.17% |
Deng-Cheng Zhu |
2 |
0.63% |
1 |
4.17% |
David Howells |
2 |
0.63% |
1 |
4.17% |
Eric W. Biedermann |
1 |
0.32% |
1 |
4.17% |
Mike Rapoport |
1 |
0.32% |
1 |
4.17% |
Greg Kroah-Hartman |
1 |
0.32% |
1 |
4.17% |
Total |
315 |
|
24 |
|
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/reboot.h>
#include <linux/kexec.h>
#include <linux/memblock.h>
#include <linux/crash_dump.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
/* This keeps a track of which one is crashing cpu. */
static int crashing_cpu = -1;
static cpumask_t cpus_in_crash = CPU_MASK_NONE;
#ifdef CONFIG_SMP
static void crash_shutdown_secondary(void *passed_regs)
{
struct pt_regs *regs = passed_regs;
int cpu = smp_processor_id();
/*
* If we are passed registers, use those. Otherwise get the
* regs from the last interrupt, which should be correct, as
* we are in an interrupt. But if the regs are not there,
* pull them from the top of the stack. They are probably
* wrong, but we need something to keep from crashing again.
*/
if (!regs)
regs = get_irq_regs();
if (!regs)
regs = task_pt_regs(current);
if (!cpu_online(cpu))
return;
/* We won't be sent IPIs any more. */
set_cpu_online(cpu, false);
local_irq_disable();
if (!cpumask_test_cpu(cpu, &cpus_in_crash))
crash_save_cpu(regs, cpu);
cpumask_set_cpu(cpu, &cpus_in_crash);
while (!atomic_read(&kexec_ready_to_reboot))
cpu_relax();
kexec_reboot();
/* NOTREACHED */
}
static void crash_kexec_prepare_cpus(void)
{
static int cpus_stopped;
unsigned int msecs;
unsigned int ncpus;
if (cpus_stopped)
return;
ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
smp_call_function(crash_shutdown_secondary, NULL, 0);
smp_wmb();
/*
* The crash CPU sends an IPI and wait for other CPUs to
* respond. Delay of at least 10 seconds.
*/
pr_emerg("Sending IPI to other cpus...\n");
msecs = 10000;
while ((cpumask_weight(&cpus_in_crash) < ncpus) && (--msecs > 0)) {
cpu_relax();
mdelay(1);
}
cpus_stopped = 1;
}
/* Override the weak function in kernel/panic.c */
void crash_smp_send_stop(void)
{
if (_crash_smp_send_stop)
_crash_smp_send_stop();
crash_kexec_prepare_cpus();
}
#else /* !defined(CONFIG_SMP) */
static void crash_kexec_prepare_cpus(void) {}
#endif /* !defined(CONFIG_SMP) */
void default_machine_crash_shutdown(struct pt_regs *regs)
{
local_irq_disable();
crashing_cpu = smp_processor_id();
crash_save_cpu(regs, crashing_cpu);
crash_kexec_prepare_cpus();
cpumask_set_cpu(crashing_cpu, &cpus_in_crash);
}