cregit-Linux how code gets into the kernel

Release 4.11 arch/arm64/include/asm/sysreg.h

/*
 * Macros for accessing system registers with older binutils.
 *
 * Copyright (C) 2014 ARM Ltd.
 * Author: Catalin Marinas <catalin.marinas@arm.com>
 *
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#ifndef __ASM_SYSREG_H

#define __ASM_SYSREG_H

#include <linux/stringify.h>

/*
 * ARMv8 ARM reserves the following encoding for system registers:
 * (Ref: ARMv8 ARM, Section: "System instruction class encoding overview",
 *  C5.2, version:ARM DDI 0487A.f)
 *      [20-19] : Op0
 *      [18-16] : Op1
 *      [15-12] : CRn
 *      [11-8]  : CRm
 *      [7-5]   : Op2
 */

#define Op0_shift	19

#define Op0_mask	0x3

#define Op1_shift	16

#define Op1_mask	0x7

#define CRn_shift	12

#define CRn_mask	0xf

#define CRm_shift	8

#define CRm_mask	0xf

#define Op2_shift	5

#define Op2_mask	0x7


#define sys_reg(op0, op1, crn, crm, op2) \
	(((op0) << Op0_shift) | ((op1) << Op1_shift) | \
         ((crn) << CRn_shift) | ((crm) << CRm_shift) | \
         ((op2) << Op2_shift))


#define sys_reg_Op0(id)	(((id) >> Op0_shift) & Op0_mask)

#define sys_reg_Op1(id)	(((id) >> Op1_shift) & Op1_mask)

#define sys_reg_CRn(id)	(((id) >> CRn_shift) & CRn_mask)

#define sys_reg_CRm(id)	(((id) >> CRm_shift) & CRm_mask)

#define sys_reg_Op2(id)	(((id) >> Op2_shift) & Op2_mask)

#ifndef CONFIG_BROKEN_GAS_INST

#ifdef __ASSEMBLY__

#define __emit_inst(x)			.inst (x)
#else

#define __emit_inst(x)			".inst " __stringify((x)) "\n\t"
#endif

#else  /* CONFIG_BROKEN_GAS_INST */

#ifndef CONFIG_CPU_BIG_ENDIAN

#define __INSTR_BSWAP(x)		(x)
#else  /* CONFIG_CPU_BIG_ENDIAN */

#define __INSTR_BSWAP(x)		((((x) << 24) & 0xff000000)     | \
                                         (((x) <<  8) & 0x00ff0000)     | \
                                         (((x) >>  8) & 0x0000ff00)     | \
                                         (((x) >> 24) & 0x000000ff))
#endif	/* CONFIG_CPU_BIG_ENDIAN */

#ifdef __ASSEMBLY__

#define __emit_inst(x)			.long __INSTR_BSWAP(x)
#else  /* __ASSEMBLY__ */

#define __emit_inst(x)			".long " __stringify(__INSTR_BSWAP(x)) "\n\t"
#endif	/* __ASSEMBLY__ */

#endif	/* CONFIG_BROKEN_GAS_INST */


#define SYS_MIDR_EL1			sys_reg(3, 0, 0, 0, 0)

#define SYS_MPIDR_EL1			sys_reg(3, 0, 0, 0, 5)

#define SYS_REVIDR_EL1			sys_reg(3, 0, 0, 0, 6)


#define SYS_ID_PFR0_EL1			sys_reg(3, 0, 0, 1, 0)

#define SYS_ID_PFR1_EL1			sys_reg(3, 0, 0, 1, 1)

#define SYS_ID_DFR0_EL1			sys_reg(3, 0, 0, 1, 2)

#define SYS_ID_MMFR0_EL1		sys_reg(3, 0, 0, 1, 4)

#define SYS_ID_MMFR1_EL1		sys_reg(3, 0, 0, 1, 5)

#define SYS_ID_MMFR2_EL1		sys_reg(3, 0, 0, 1, 6)

#define SYS_ID_MMFR3_EL1		sys_reg(3, 0, 0, 1, 7)


#define SYS_ID_ISAR0_EL1		sys_reg(3, 0, 0, 2, 0)

#define SYS_ID_ISAR1_EL1		sys_reg(3, 0, 0, 2, 1)

#define SYS_ID_ISAR2_EL1		sys_reg(3, 0, 0, 2, 2)

#define SYS_ID_ISAR3_EL1		sys_reg(3, 0, 0, 2, 3)

#define SYS_ID_ISAR4_EL1		sys_reg(3, 0, 0, 2, 4)

#define SYS_ID_ISAR5_EL1		sys_reg(3, 0, 0, 2, 5)

#define SYS_ID_MMFR4_EL1		sys_reg(3, 0, 0, 2, 6)


#define SYS_MVFR0_EL1			sys_reg(3, 0, 0, 3, 0)

#define SYS_MVFR1_EL1			sys_reg(3, 0, 0, 3, 1)

#define SYS_MVFR2_EL1			sys_reg(3, 0, 0, 3, 2)


#define SYS_ID_AA64PFR0_EL1		sys_reg(3, 0, 0, 4, 0)

#define SYS_ID_AA64PFR1_EL1		sys_reg(3, 0, 0, 4, 1)


#define SYS_ID_AA64DFR0_EL1		sys_reg(3, 0, 0, 5, 0)

#define SYS_ID_AA64DFR1_EL1		sys_reg(3, 0, 0, 5, 1)


#define SYS_ID_AA64ISAR0_EL1		sys_reg(3, 0, 0, 6, 0)

#define SYS_ID_AA64ISAR1_EL1		sys_reg(3, 0, 0, 6, 1)


#define SYS_ID_AA64MMFR0_EL1		sys_reg(3, 0, 0, 7, 0)

#define SYS_ID_AA64MMFR1_EL1		sys_reg(3, 0, 0, 7, 1)

#define SYS_ID_AA64MMFR2_EL1		sys_reg(3, 0, 0, 7, 2)


#define SYS_CNTFRQ_EL0			sys_reg(3, 3, 14, 0, 0)

#define SYS_CTR_EL0			sys_reg(3, 3, 0, 0, 1)

#define SYS_DCZID_EL0			sys_reg(3, 3, 0, 0, 7)


#define REG_PSTATE_PAN_IMM		sys_reg(0, 0, 4, 0, 4)

#define REG_PSTATE_UAO_IMM		sys_reg(0, 0, 4, 0, 3)


#define SET_PSTATE_PAN(x) __emit_inst(0xd5000000 | REG_PSTATE_PAN_IMM |   \
                                      (!!x)<<8 | 0x1f)

#define SET_PSTATE_UAO(x) __emit_inst(0xd5000000 | REG_PSTATE_UAO_IMM |   \
                                      (!!x)<<8 | 0x1f)

/* Common SCTLR_ELx flags. */

#define SCTLR_ELx_EE    (1 << 25)

#define SCTLR_ELx_I	(1 << 12)

#define SCTLR_ELx_SA	(1 << 3)

#define SCTLR_ELx_C	(1 << 2)

#define SCTLR_ELx_A	(1 << 1)

#define SCTLR_ELx_M	1


#define SCTLR_ELx_FLAGS	(SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
                         SCTLR_ELx_SA | SCTLR_ELx_I)

/* SCTLR_EL1 specific flags. */

#define SCTLR_EL1_UCI		(1 << 26)

#define SCTLR_EL1_SPAN		(1 << 23)

#define SCTLR_EL1_UCT		(1 << 15)

#define SCTLR_EL1_SED		(1 << 8)

#define SCTLR_EL1_CP15BEN	(1 << 5)

/* id_aa64isar0 */

#define ID_AA64ISAR0_RDM_SHIFT		28

#define ID_AA64ISAR0_ATOMICS_SHIFT	20

#define ID_AA64ISAR0_CRC32_SHIFT	16

#define ID_AA64ISAR0_SHA2_SHIFT		12

#define ID_AA64ISAR0_SHA1_SHIFT		8

#define ID_AA64ISAR0_AES_SHIFT		4

/* id_aa64pfr0 */

#define ID_AA64PFR0_GIC_SHIFT		24

#define ID_AA64PFR0_ASIMD_SHIFT		20

#define ID_AA64PFR0_FP_SHIFT		16

#define ID_AA64PFR0_EL3_SHIFT		12

#define ID_AA64PFR0_EL2_SHIFT		8

#define ID_AA64PFR0_EL1_SHIFT		4

#define ID_AA64PFR0_EL0_SHIFT		0


#define ID_AA64PFR0_FP_NI		0xf

#define ID_AA64PFR0_FP_SUPPORTED	0x0

#define ID_AA64PFR0_ASIMD_NI		0xf

#define ID_AA64PFR0_ASIMD_SUPPORTED	0x0

#define ID_AA64PFR0_EL1_64BIT_ONLY	0x1

#define ID_AA64PFR0_EL0_64BIT_ONLY	0x1

#define ID_AA64PFR0_EL0_32BIT_64BIT	0x2

/* id_aa64mmfr0 */

#define ID_AA64MMFR0_TGRAN4_SHIFT	28

#define ID_AA64MMFR0_TGRAN64_SHIFT	24

#define ID_AA64MMFR0_TGRAN16_SHIFT	20

#define ID_AA64MMFR0_BIGENDEL0_SHIFT	16

#define ID_AA64MMFR0_SNSMEM_SHIFT	12

#define ID_AA64MMFR0_BIGENDEL_SHIFT	8

#define ID_AA64MMFR0_ASID_SHIFT		4

#define ID_AA64MMFR0_PARANGE_SHIFT	0


#define ID_AA64MMFR0_TGRAN4_NI		0xf

#define ID_AA64MMFR0_TGRAN4_SUPPORTED	0x0

#define ID_AA64MMFR0_TGRAN64_NI		0xf

#define ID_AA64MMFR0_TGRAN64_SUPPORTED	0x0

#define ID_AA64MMFR0_TGRAN16_NI		0x0

#define ID_AA64MMFR0_TGRAN16_SUPPORTED	0x1

/* id_aa64mmfr1 */

#define ID_AA64MMFR1_PAN_SHIFT		20

#define ID_AA64MMFR1_LOR_SHIFT		16

#define ID_AA64MMFR1_HPD_SHIFT		12

#define ID_AA64MMFR1_VHE_SHIFT		8

#define ID_AA64MMFR1_VMIDBITS_SHIFT	4

#define ID_AA64MMFR1_HADBS_SHIFT	0


#define ID_AA64MMFR1_VMIDBITS_8		0

#define ID_AA64MMFR1_VMIDBITS_16	2

/* id_aa64mmfr2 */

#define ID_AA64MMFR2_LVA_SHIFT		16

#define ID_AA64MMFR2_IESB_SHIFT		12

#define ID_AA64MMFR2_LSM_SHIFT		8

#define ID_AA64MMFR2_UAO_SHIFT		4

#define ID_AA64MMFR2_CNP_SHIFT		0

/* id_aa64dfr0 */

#define ID_AA64DFR0_PMSVER_SHIFT	32

#define ID_AA64DFR0_CTX_CMPS_SHIFT	28

#define ID_AA64DFR0_WRPS_SHIFT		20

#define ID_AA64DFR0_BRPS_SHIFT		12

#define ID_AA64DFR0_PMUVER_SHIFT	8

#define ID_AA64DFR0_TRACEVER_SHIFT	4

#define ID_AA64DFR0_DEBUGVER_SHIFT	0


#define ID_ISAR5_RDM_SHIFT		24

#define ID_ISAR5_CRC32_SHIFT		16

#define ID_ISAR5_SHA2_SHIFT		12

#define ID_ISAR5_SHA1_SHIFT		8

#define ID_ISAR5_AES_SHIFT		4

#define ID_ISAR5_SEVL_SHIFT		0


#define MVFR0_FPROUND_SHIFT		28

#define MVFR0_FPSHVEC_SHIFT		24

#define MVFR0_FPSQRT_SHIFT		20

#define MVFR0_FPDIVIDE_SHIFT		16

#define MVFR0_FPTRAP_SHIFT		12

#define MVFR0_FPDP_SHIFT		8

#define MVFR0_FPSP_SHIFT		4

#define MVFR0_SIMD_SHIFT		0


#define MVFR1_SIMDFMAC_SHIFT		28

#define MVFR1_FPHP_SHIFT		24

#define MVFR1_SIMDHP_SHIFT		20

#define MVFR1_SIMDSP_SHIFT		16

#define MVFR1_SIMDINT_SHIFT		12

#define MVFR1_SIMDLS_SHIFT		8

#define MVFR1_FPDNAN_SHIFT		4

#define MVFR1_FPFTZ_SHIFT		0



#define ID_AA64MMFR0_TGRAN4_SHIFT	28

#define ID_AA64MMFR0_TGRAN64_SHIFT	24

#define ID_AA64MMFR0_TGRAN16_SHIFT	20


#define ID_AA64MMFR0_TGRAN4_NI		0xf

#define ID_AA64MMFR0_TGRAN4_SUPPORTED	0x0

#define ID_AA64MMFR0_TGRAN64_NI		0xf

#define ID_AA64MMFR0_TGRAN64_SUPPORTED	0x0

#define ID_AA64MMFR0_TGRAN16_NI		0x0

#define ID_AA64MMFR0_TGRAN16_SUPPORTED	0x1

#if defined(CONFIG_ARM64_4K_PAGES)

#define ID_AA64MMFR0_TGRAN_SHIFT	ID_AA64MMFR0_TGRAN4_SHIFT

#define ID_AA64MMFR0_TGRAN_SUPPORTED	ID_AA64MMFR0_TGRAN4_SUPPORTED
#elif defined(CONFIG_ARM64_16K_PAGES)

#define ID_AA64MMFR0_TGRAN_SHIFT	ID_AA64MMFR0_TGRAN16_SHIFT

#define ID_AA64MMFR0_TGRAN_SUPPORTED	ID_AA64MMFR0_TGRAN16_SUPPORTED
#elif defined(CONFIG_ARM64_64K_PAGES)

#define ID_AA64MMFR0_TGRAN_SHIFT	ID_AA64MMFR0_TGRAN64_SHIFT

#define ID_AA64MMFR0_TGRAN_SUPPORTED	ID_AA64MMFR0_TGRAN64_SUPPORTED
#endif


/* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */

#define SYS_MPIDR_SAFE_VAL		(1UL << 31)

#ifdef __ASSEMBLY__

	.irp	num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
	.equ	.L__reg_num_x\num, \num
	.endr
	.equ	.L__reg_num_xzr, 31

	
.macro	mrs_s, rt, sreg
	 __emit_inst(0xd5200000|(\sreg)|(.L__reg_num_\rt))
	.endm

	
.macro	msr_s, sreg, rt
	__emit_inst(0xd5000000|(\sreg)|(.L__reg_num_\rt))
	.endm

#else

#include <linux/types.h>

asm(
"       .irp    num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n"
"       .equ    .L__reg_num_x\\num, \\num\n"
"       .endr\n"
"       .equ    .L__reg_num_xzr, 31\n"
"\n"
"       .macro  mrs_s, rt, sreg\n"
        __emit_inst(0xd5200000|(\\sreg)|(.L__reg_num_\\rt))
"       .endm\n"
"\n"
"       .macro  msr_s, sreg, rt\n"
        __emit_inst(0xd5000000|(\\sreg)|(.L__reg_num_\\rt))
"       .endm\n"
);

/*
 * Unlike read_cpuid, calls to read_sysreg are never expected to be
 * optimized away or replaced with synthetic values.
 */
#define read_sysreg(r) ({                                      \
        u64 __val;                                              \
        asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \
        __val;                                                  \
})

/*
 * The "Z" constraint normally means a zero immediate, but when combined with
 * the "%x0" template means XZR.
 */
#define write_sysreg(v, r) do {                                    \
        u64 __val = (u64)v;                                     \
        asm volatile("msr " __stringify(r) ", %x0"              \
                     : : "rZ" (__val));                         \
} while (0)

/*
 * For registers without architectural names, or simply unsupported by
 * GAS.
 */
#define read_sysreg_s(r) ({                                              \
        u64 __val;                                                      \
        asm volatile("mrs_s %0, " __stringify(r) : "=r" (__val));       \
        __val;                                                          \
})

#define write_sysreg_s(v, r) do {                                    \
        u64 __val = (u64)v;                                             \
        asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \
} while (0)


static inline void config_sctlr_el1(u32 clear, u32 set) { u32 val; val = read_sysreg(sctlr_el1); val &= ~clear; val |= set; write_sysreg(val, sctlr_el1); }

Contributors

PersonTokensPropCommitsCommitProp
James Morse2564.10%150.00%
Mark Rutland1435.90%150.00%
Total39100.00%2100.00%

#endif #endif /* __ASM_SYSREG_H */

Overall Contributors

PersonTokensPropCommitsCommitProp
Suzuki K. Poulose58458.81%1038.46%
Catalin Marinas14814.90%13.85%
Marc Zyngier858.56%27.69%
James Morse565.64%415.38%
Mark Rutland363.63%27.69%
Geoff Levand353.52%13.85%
Will Deacon212.11%311.54%
Kefeng Wang161.61%13.85%
Ard Biesheuvel80.81%13.85%
Andre Przywara40.40%13.85%
Total993100.00%26100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.