Contributors: 22
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Heiko Carstens |
747 |
45.16% |
43 |
42.16% |
Martin Schwidefsky |
490 |
29.63% |
23 |
22.55% |
Alexander Gordeev |
164 |
9.92% |
6 |
5.88% |
Linus Torvalds |
127 |
7.68% |
7 |
6.86% |
Linus Torvalds (pre-git) |
65 |
3.93% |
4 |
3.92% |
Niklas Schnelle |
9 |
0.54% |
1 |
0.98% |
David Hildenbrand |
8 |
0.48% |
1 |
0.98% |
Art Haas |
8 |
0.48% |
1 |
0.98% |
Ingo Molnar |
6 |
0.36% |
2 |
1.96% |
Gerald Schaefer |
5 |
0.30% |
1 |
0.98% |
Sebastian Andrzej Siewior |
4 |
0.24% |
1 |
0.98% |
Andreas Krebbel |
4 |
0.24% |
2 |
1.96% |
Michael Holzheu |
3 |
0.18% |
1 |
0.98% |
Christian Ehrhardt |
3 |
0.18% |
1 |
0.98% |
Stoyan Gaydarov |
3 |
0.18% |
1 |
0.98% |
Christoph Lameter |
2 |
0.12% |
1 |
0.98% |
Vasily Gorbik |
1 |
0.06% |
1 |
0.98% |
Tejun Heo |
1 |
0.06% |
1 |
0.98% |
Greg Kroah-Hartman |
1 |
0.06% |
1 |
0.98% |
Vegard Nossum |
1 |
0.06% |
1 |
0.98% |
Jan Engelhardt |
1 |
0.06% |
1 |
0.98% |
Sven Schnelle |
1 |
0.06% |
1 |
0.98% |
Total |
1654 |
|
102 |
|
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2008
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
#define KMSG_COMPONENT "cpu"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/stop_machine.h>
#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/random.h>
#include <linux/sched/mm.h>
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/mm_types.h>
#include <linux/delay.h>
#include <linux/cpu.h>
#include <linux/smp.h>
#include <asm/text-patching.h>
#include <asm/diag.h>
#include <asm/facility.h>
#include <asm/elf.h>
#include <asm/lowcore.h>
#include <asm/param.h>
#include <asm/sclp.h>
#include <asm/smp.h>
unsigned long __read_mostly elf_hwcap;
char elf_platform[ELF_PLATFORM_SIZE];
struct cpu_info {
unsigned int cpu_mhz_dynamic;
unsigned int cpu_mhz_static;
struct cpuid cpu_id;
};
static DEFINE_PER_CPU(struct cpu_info, cpu_info);
static DEFINE_PER_CPU(int, cpu_relax_retry);
static bool machine_has_cpu_mhz;
void __init cpu_detect_mhz_feature(void)
{
if (test_facility(34) && __ecag(ECAG_CPU_ATTRIBUTE, 0) != -1UL)
machine_has_cpu_mhz = true;
}
static void update_cpu_mhz(void *arg)
{
unsigned long mhz;
struct cpu_info *c;
mhz = __ecag(ECAG_CPU_ATTRIBUTE, 0);
c = this_cpu_ptr(&cpu_info);
c->cpu_mhz_dynamic = mhz >> 32;
c->cpu_mhz_static = mhz & 0xffffffff;
}
void s390_update_cpu_mhz(void)
{
s390_adjust_jiffies();
if (machine_has_cpu_mhz)
on_each_cpu(update_cpu_mhz, NULL, 0);
}
void notrace stop_machine_yield(const struct cpumask *cpumask)
{
int cpu, this_cpu;
this_cpu = smp_processor_id();
if (__this_cpu_inc_return(cpu_relax_retry) >= spin_retry) {
__this_cpu_write(cpu_relax_retry, 0);
cpu = cpumask_next_wrap(this_cpu, cpumask, this_cpu, false);
if (cpu >= nr_cpu_ids)
return;
if (arch_vcpu_is_preempted(cpu))
smp_yield_cpu(cpu);
}
}
static void do_sync_core(void *info)
{
sync_core();
}
void text_poke_sync(void)
{
on_each_cpu(do_sync_core, NULL, 1);
}
void text_poke_sync_lock(void)
{
cpus_read_lock();
text_poke_sync();
cpus_read_unlock();
}
/*
* cpu_init - initializes state that is per-CPU.
*/
void cpu_init(void)
{
struct cpuid *id = this_cpu_ptr(&cpu_info.cpu_id);
get_cpu_id(id);
if (machine_has_cpu_mhz)
update_cpu_mhz(NULL);
mmgrab(&init_mm);
current->active_mm = &init_mm;
BUG_ON(current->mm);
enter_lazy_tlb(&init_mm, current);
}
static void show_facilities(struct seq_file *m)
{
unsigned int bit;
seq_puts(m, "facilities :");
for_each_set_bit_inv(bit, (long *)&stfle_fac_list, MAX_FACILITY_BIT)
seq_printf(m, " %d", bit);
seq_putc(m, '\n');
}
static void show_cpu_summary(struct seq_file *m, void *v)
{
static const char *hwcap_str[] = {
[HWCAP_NR_ESAN3] = "esan3",
[HWCAP_NR_ZARCH] = "zarch",
[HWCAP_NR_STFLE] = "stfle",
[HWCAP_NR_MSA] = "msa",
[HWCAP_NR_LDISP] = "ldisp",
[HWCAP_NR_EIMM] = "eimm",
[HWCAP_NR_DFP] = "dfp",
[HWCAP_NR_HPAGE] = "edat",
[HWCAP_NR_ETF3EH] = "etf3eh",
[HWCAP_NR_HIGH_GPRS] = "highgprs",
[HWCAP_NR_TE] = "te",
[HWCAP_NR_VXRS] = "vx",
[HWCAP_NR_VXRS_BCD] = "vxd",
[HWCAP_NR_VXRS_EXT] = "vxe",
[HWCAP_NR_GS] = "gs",
[HWCAP_NR_VXRS_EXT2] = "vxe2",
[HWCAP_NR_VXRS_PDE] = "vxp",
[HWCAP_NR_SORT] = "sort",
[HWCAP_NR_DFLT] = "dflt",
[HWCAP_NR_VXRS_PDE2] = "vxp2",
[HWCAP_NR_NNPA] = "nnpa",
[HWCAP_NR_PCI_MIO] = "pcimio",
[HWCAP_NR_SIE] = "sie",
};
int i, cpu;
BUILD_BUG_ON(ARRAY_SIZE(hwcap_str) != HWCAP_NR_MAX);
seq_printf(m, "vendor_id : IBM/S390\n"
"# processors : %i\n"
"bogomips per cpu: %lu.%02lu\n",
num_online_cpus(), loops_per_jiffy/(500000/HZ),
(loops_per_jiffy/(5000/HZ))%100);
seq_printf(m, "max thread id : %d\n", smp_cpu_mtid);
seq_puts(m, "features\t: ");
for (i = 0; i < ARRAY_SIZE(hwcap_str); i++)
if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
seq_printf(m, "%s ", hwcap_str[i]);
seq_puts(m, "\n");
show_facilities(m);
show_cacheinfo(m);
for_each_online_cpu(cpu) {
struct cpuid *id = &per_cpu(cpu_info.cpu_id, cpu);
seq_printf(m, "processor %d: "
"version = %02X, "
"identification = %06X, "
"machine = %04X\n",
cpu, id->version, id->ident, id->machine);
}
}
static int __init setup_hwcaps(void)
{
/* instructions named N3, "backported" to esa-mode */
elf_hwcap |= HWCAP_ESAN3;
/* z/Architecture mode active */
elf_hwcap |= HWCAP_ZARCH;
/* store-facility-list-extended */
if (test_facility(7))
elf_hwcap |= HWCAP_STFLE;
/* message-security assist */
if (test_facility(17))
elf_hwcap |= HWCAP_MSA;
/* long-displacement */
if (test_facility(19))
elf_hwcap |= HWCAP_LDISP;
/* extended-immediate */
elf_hwcap |= HWCAP_EIMM;
/* extended-translation facility 3 enhancement */
if (test_facility(22) && test_facility(30))
elf_hwcap |= HWCAP_ETF3EH;
/* decimal floating point & perform floating point operation */
if (test_facility(42) && test_facility(44))
elf_hwcap |= HWCAP_DFP;
/* huge page support */
if (MACHINE_HAS_EDAT1)
elf_hwcap |= HWCAP_HPAGE;
/* 64-bit register support for 31-bit processes */
elf_hwcap |= HWCAP_HIGH_GPRS;
/* transactional execution */
if (MACHINE_HAS_TE)
elf_hwcap |= HWCAP_TE;
/* vector */
if (test_facility(129)) {
elf_hwcap |= HWCAP_VXRS;
if (test_facility(134))
elf_hwcap |= HWCAP_VXRS_BCD;
if (test_facility(135))
elf_hwcap |= HWCAP_VXRS_EXT;
if (test_facility(148))
elf_hwcap |= HWCAP_VXRS_EXT2;
if (test_facility(152))
elf_hwcap |= HWCAP_VXRS_PDE;
if (test_facility(192))
elf_hwcap |= HWCAP_VXRS_PDE2;
}
if (test_facility(150))
elf_hwcap |= HWCAP_SORT;
if (test_facility(151))
elf_hwcap |= HWCAP_DFLT;
if (test_facility(165))
elf_hwcap |= HWCAP_NNPA;
/* guarded storage */
if (MACHINE_HAS_GS)
elf_hwcap |= HWCAP_GS;
if (MACHINE_HAS_PCI_MIO)
elf_hwcap |= HWCAP_PCI_MIO;
/* virtualization support */
if (sclp.has_sief2)
elf_hwcap |= HWCAP_SIE;
return 0;
}
arch_initcall(setup_hwcaps);
static int __init setup_elf_platform(void)
{
struct cpuid cpu_id;
get_cpu_id(&cpu_id);
add_device_randomness(&cpu_id, sizeof(cpu_id));
switch (cpu_id.machine) {
default: /* Use "z10" as default. */
strcpy(elf_platform, "z10");
break;
case 0x2817:
case 0x2818:
strcpy(elf_platform, "z196");
break;
case 0x2827:
case 0x2828:
strcpy(elf_platform, "zEC12");
break;
case 0x2964:
case 0x2965:
strcpy(elf_platform, "z13");
break;
case 0x3906:
case 0x3907:
strcpy(elf_platform, "z14");
break;
case 0x8561:
case 0x8562:
strcpy(elf_platform, "z15");
break;
case 0x3931:
case 0x3932:
strcpy(elf_platform, "z16");
break;
}
return 0;
}
arch_initcall(setup_elf_platform);
static void show_cpu_topology(struct seq_file *m, unsigned long n)
{
#ifdef CONFIG_SCHED_TOPOLOGY
seq_printf(m, "physical id : %d\n", topology_physical_package_id(n));
seq_printf(m, "core id : %d\n", topology_core_id(n));
seq_printf(m, "book id : %d\n", topology_book_id(n));
seq_printf(m, "drawer id : %d\n", topology_drawer_id(n));
seq_printf(m, "dedicated : %d\n", topology_cpu_dedicated(n));
seq_printf(m, "address : %d\n", smp_cpu_get_cpu_address(n));
seq_printf(m, "siblings : %d\n", cpumask_weight(topology_core_cpumask(n)));
seq_printf(m, "cpu cores : %d\n", topology_booted_cores(n));
#endif /* CONFIG_SCHED_TOPOLOGY */
}
static void show_cpu_ids(struct seq_file *m, unsigned long n)
{
struct cpuid *id = &per_cpu(cpu_info.cpu_id, n);
seq_printf(m, "version : %02X\n", id->version);
seq_printf(m, "identification : %06X\n", id->ident);
seq_printf(m, "machine : %04X\n", id->machine);
}
static void show_cpu_mhz(struct seq_file *m, unsigned long n)
{
struct cpu_info *c = per_cpu_ptr(&cpu_info, n);
if (!machine_has_cpu_mhz)
return;
seq_printf(m, "cpu MHz dynamic : %d\n", c->cpu_mhz_dynamic);
seq_printf(m, "cpu MHz static : %d\n", c->cpu_mhz_static);
}
/*
* show_cpuinfo - Get information on one CPU for use by procfs.
*/
static int show_cpuinfo(struct seq_file *m, void *v)
{
unsigned long n = (unsigned long) v - 1;
unsigned long first = cpumask_first(cpu_online_mask);
if (n == first)
show_cpu_summary(m, v);
seq_printf(m, "\ncpu number : %ld\n", n);
show_cpu_topology(m, n);
show_cpu_ids(m, n);
show_cpu_mhz(m, n);
return 0;
}
static inline void *c_update(loff_t *pos)
{
if (*pos)
*pos = cpumask_next(*pos - 1, cpu_online_mask);
else
*pos = cpumask_first(cpu_online_mask);
return *pos < nr_cpu_ids ? (void *)*pos + 1 : NULL;
}
static void *c_start(struct seq_file *m, loff_t *pos)
{
cpus_read_lock();
return c_update(pos);
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return c_update(pos);
}
static void c_stop(struct seq_file *m, void *v)
{
cpus_read_unlock();
}
const struct seq_operations cpuinfo_op = {
.start = c_start,
.next = c_next,
.stop = c_stop,
.show = show_cpuinfo,
};