Contributors: 19
Author Tokens Token Proportion Commits Commit Proportion
Andrew Lutomirski 36 24.16% 6 18.75%
Kirill A. Shutemov 27 18.12% 5 15.62%
Chris Snook 12 8.05% 1 3.12%
Gabriel Krisman Bertazi 12 8.05% 1 3.12%
Peter Zijlstra 11 7.38% 2 6.25%
Dave Hansen 11 7.38% 2 6.25%
Stephen Wilson 7 4.70% 1 3.12%
Andi Kleen 6 4.03% 3 9.38%
Thomas Gleixner 6 4.03% 1 3.12%
Len Brown 4 2.68% 1 3.12%
H. Peter Anvin 3 2.01% 1 3.12%
Venkatesh Pallipadi 3 2.01% 1 3.12%
Richard Kennedy 3 2.01% 1 3.12%
Luiz Fernando N. Capitulino 2 1.34% 1 3.12%
Jeremy Fitzhardinge 2 1.34% 1 3.12%
Arun Sharma 1 0.67% 1 3.12%
Greg Kroah-Hartman 1 0.67% 1 3.12%
Sebastian Andrzej Siewior 1 0.67% 1 3.12%
Michel Lespinasse 1 0.67% 1 3.12%
Total 149 32


/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_MMU_H
#define _ASM_X86_MMU_H

#include <linux/spinlock.h>
#include <linux/rwsem.h>
#include <linux/mutex.h>
#include <linux/atomic.h>
#include <linux/bits.h>

/* Uprobes on this MM assume 32-bit code */
#define MM_CONTEXT_UPROBE_IA32		0
/* vsyscall page is accessible on this MM */
#define MM_CONTEXT_HAS_VSYSCALL		1
/* Do not allow changing LAM mode */
#define MM_CONTEXT_LOCK_LAM		2
/* Allow LAM and SVA coexisting */
#define MM_CONTEXT_FORCE_TAGGED_SVA	3

/*
 * x86 has arch-specific MMU state beyond what lives in mm_struct.
 */
typedef struct {
	/*
	 * ctx_id uniquely identifies this mm_struct.  A ctx_id will never
	 * be reused, and zero is not a valid ctx_id.
	 */
	u64 ctx_id;

	/*
	 * Any code that needs to do any sort of TLB flushing for this
	 * mm will first make its changes to the page tables, then
	 * increment tlb_gen, then flush.  This lets the low-level
	 * flushing code keep track of what needs flushing.
	 *
	 * This is not used on Xen PV.
	 */
	atomic64_t tlb_gen;

#ifdef CONFIG_MODIFY_LDT_SYSCALL
	struct rw_semaphore	ldt_usr_sem;
	struct ldt_struct	*ldt;
#endif

#ifdef CONFIG_X86_64
	unsigned long flags;
#endif

#ifdef CONFIG_ADDRESS_MASKING
	/* Active LAM mode:  X86_CR3_LAM_U48 or X86_CR3_LAM_U57 or 0 (disabled) */
	unsigned long lam_cr3_mask;

	/* Significant bits of the virtual address. Excludes tag bits. */
	u64 untag_mask;
#endif

	struct mutex lock;
	void __user *vdso;			/* vdso base address */
	const struct vdso_image *vdso_image;	/* vdso image in use */

	atomic_t perf_rdpmc_allowed;	/* nonzero if rdpmc is allowed */
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
	/*
	 * One bit per protection key says whether userspace can
	 * use it or not.  protected by mmap_lock.
	 */
	u16 pkey_allocation_map;
	s16 execute_only_pkey;
#endif
} mm_context_t;

#define INIT_MM_CONTEXT(mm)						\
	.context = {							\
		.ctx_id = 1,						\
		.lock = __MUTEX_INITIALIZER(mm.context.lock),		\
	}

void leave_mm(int cpu);
#define leave_mm leave_mm

#endif /* _ASM_X86_MMU_H */