Contributors: 23
Author Tokens Token Proportion Commits Commit Proportion
Andi Kleen 39 20.74% 5 11.90%
Jeremy Fitzhardinge 37 19.68% 7 16.67%
Yinghai Lu 29 15.43% 4 9.52%
Kirill A. Shutemov 21 11.17% 2 4.76%
Jacob Shin 12 6.38% 2 4.76%
Toshi Kani 8 4.26% 1 2.38%
Linus Torvalds (pre-git) 7 3.72% 3 7.14%
Vivek Goyal 6 3.19% 3 7.14%
Dave Hansen 6 3.19% 1 2.38%
Arjan van de Ven 3 1.60% 1 2.38%
Pavel Tatashin 3 1.60% 1 2.38%
Tom Lendacky 3 1.60% 1 2.38%
David Woodhouse 2 1.06% 1 2.38%
Jaswinder Singh Rajput 2 1.06% 1 2.38%
Kees Cook 2 1.06% 1 2.38%
Robert P. J. Day 1 0.53% 1 2.38%
Anshuman Khandual 1 0.53% 1 2.38%
Thomas Gleixner 1 0.53% 1 2.38%
Randy Dunlap 1 0.53% 1 2.38%
Greg Kroah-Hartman 1 0.53% 1 2.38%
Tejun Heo 1 0.53% 1 2.38%
Arnd Bergmann 1 0.53% 1 2.38%
Wei Yang 1 0.53% 1 2.38%
Total 188 42


/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_PAGE_DEFS_H
#define _ASM_X86_PAGE_DEFS_H

#include <linux/const.h>
#include <linux/types.h>
#include <linux/mem_encrypt.h>

/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT		CONFIG_PAGE_SHIFT
#define PAGE_SIZE		(_AC(1,UL) << PAGE_SHIFT)
#define PAGE_MASK		(~(PAGE_SIZE-1))

#define __VIRTUAL_MASK		((1UL << __VIRTUAL_MASK_SHIFT) - 1)

/* Cast P*D_MASK to a signed type so that it is sign-extended if
   virtual addresses are 32-bits but physical addresses are larger
   (ie, 32-bit PAE). */
#define PHYSICAL_PAGE_MASK	(((signed long)PAGE_MASK) & __PHYSICAL_MASK)
#define PHYSICAL_PMD_PAGE_MASK	(((signed long)PMD_MASK) & __PHYSICAL_MASK)
#define PHYSICAL_PUD_PAGE_MASK	(((signed long)PUD_MASK) & __PHYSICAL_MASK)

#define HPAGE_SHIFT		PMD_SHIFT
#define HPAGE_SIZE		(_AC(1,UL) << HPAGE_SHIFT)
#define HPAGE_MASK		(~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)

#define HUGE_MAX_HSTATE 2

#define PAGE_OFFSET		((unsigned long)__PAGE_OFFSET)

#define VM_DATA_DEFAULT_FLAGS	VM_DATA_FLAGS_TSK_EXEC

/* Physical address where kernel should be loaded. */
#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
				+ (CONFIG_PHYSICAL_ALIGN - 1)) \
				& ~(CONFIG_PHYSICAL_ALIGN - 1))

#define __START_KERNEL		(__START_KERNEL_map + LOAD_PHYSICAL_ADDR)

#ifdef CONFIG_X86_64
#include <asm/page_64_types.h>
#define IOREMAP_MAX_ORDER       (PUD_SHIFT)
#else
#include <asm/page_32_types.h>
#define IOREMAP_MAX_ORDER       (PMD_SHIFT)
#endif	/* CONFIG_X86_64 */

#ifndef __ASSEMBLY__

#ifdef CONFIG_DYNAMIC_PHYSICAL_MASK
extern phys_addr_t physical_mask;
#define __PHYSICAL_MASK		physical_mask
#else
#define __PHYSICAL_MASK		((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1))
#endif

extern int devmem_is_allowed(unsigned long pagenr);

extern unsigned long max_low_pfn_mapped;
extern unsigned long max_pfn_mapped;

static inline phys_addr_t get_max_mapped(void)
{
	return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT;
}

bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn);

extern void initmem_init(void);

#endif	/* !__ASSEMBLY__ */

#endif	/* _ASM_X86_PAGE_DEFS_H */