Contributors: 12
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Mark Rutland |
166 |
42.24% |
9 |
37.50% |
Ashwin Chaugule |
73 |
18.58% |
2 |
8.33% |
Marc Zyngier |
50 |
12.72% |
2 |
8.33% |
Yunfeng Ye |
39 |
9.92% |
1 |
4.17% |
Will Deacon |
20 |
5.09% |
2 |
8.33% |
Sudeep Holla |
11 |
2.80% |
1 |
4.17% |
Liviu Dudau |
9 |
2.29% |
1 |
4.17% |
Sami Tolvanen |
9 |
2.29% |
1 |
4.17% |
Catalin Marinas |
8 |
2.04% |
2 |
8.33% |
Jean-Philippe Brucker |
5 |
1.27% |
1 |
4.17% |
Thomas Gleixner |
2 |
0.51% |
1 |
4.17% |
Vladimir Murzin |
1 |
0.25% |
1 |
4.17% |
Total |
393 |
|
24 |
|
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2013 ARM Limited
*
* Author: Will Deacon <will.deacon@arm.com>
*/
#define pr_fmt(fmt) "psci: " fmt
#include <linux/init.h>
#include <linux/of.h>
#include <linux/smp.h>
#include <linux/delay.h>
#include <linux/psci.h>
#include <linux/mm.h>
#include <uapi/linux/psci.h>
#include <asm/cpu_ops.h>
#include <asm/errno.h>
#include <asm/smp_plat.h>
static int __init cpu_psci_cpu_init(unsigned int cpu)
{
return 0;
}
static int __init cpu_psci_cpu_prepare(unsigned int cpu)
{
if (!psci_ops.cpu_on) {
pr_err("no cpu_on method, not booting CPU%d\n", cpu);
return -ENODEV;
}
return 0;
}
static int cpu_psci_cpu_boot(unsigned int cpu)
{
phys_addr_t pa_secondary_entry = __pa_symbol(secondary_entry);
int err = psci_ops.cpu_on(cpu_logical_map(cpu), pa_secondary_entry);
if (err && err != -EPERM)
pr_err("failed to boot CPU%d (%d)\n", cpu, err);
return err;
}
#ifdef CONFIG_HOTPLUG_CPU
static bool cpu_psci_cpu_can_disable(unsigned int cpu)
{
return !psci_tos_resident_on(cpu);
}
static int cpu_psci_cpu_disable(unsigned int cpu)
{
/* Fail early if we don't have CPU_OFF support */
if (!psci_ops.cpu_off)
return -EOPNOTSUPP;
/* Trusted OS will deny CPU_OFF */
if (psci_tos_resident_on(cpu))
return -EPERM;
return 0;
}
static void cpu_psci_cpu_die(unsigned int cpu)
{
/*
* There are no known implementations of PSCI actually using the
* power state field, pass a sensible default for now.
*/
u32 state = PSCI_POWER_STATE_TYPE_POWER_DOWN <<
PSCI_0_2_POWER_STATE_TYPE_SHIFT;
psci_ops.cpu_off(state);
}
static int cpu_psci_cpu_kill(unsigned int cpu)
{
int err;
unsigned long start, end;
if (!psci_ops.affinity_info)
return 0;
/*
* cpu_kill could race with cpu_die and we can
* potentially end up declaring this cpu undead
* while it is dying. So, try again a few times.
*/
start = jiffies;
end = start + msecs_to_jiffies(100);
do {
err = psci_ops.affinity_info(cpu_logical_map(cpu), 0);
if (err == PSCI_0_2_AFFINITY_LEVEL_OFF) {
pr_info("CPU%d killed (polled %d ms)\n", cpu,
jiffies_to_msecs(jiffies - start));
return 0;
}
usleep_range(100, 1000);
} while (time_before(jiffies, end));
pr_warn("CPU%d may not have shut down cleanly (AFFINITY_INFO reports %d)\n",
cpu, err);
return -ETIMEDOUT;
}
#endif
const struct cpu_operations cpu_psci_ops = {
.name = "psci",
.cpu_init = cpu_psci_cpu_init,
.cpu_prepare = cpu_psci_cpu_prepare,
.cpu_boot = cpu_psci_cpu_boot,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_can_disable = cpu_psci_cpu_can_disable,
.cpu_disable = cpu_psci_cpu_disable,
.cpu_die = cpu_psci_cpu_die,
.cpu_kill = cpu_psci_cpu_kill,
#endif
};