Contributors: 12
Author Tokens Token Proportion Commits Commit Proportion
Ryan Roberts 132 57.89% 1 7.14%
Nicholas Piggin 43 18.86% 3 21.43%
Andrey Konovalov 15 6.58% 1 7.14%
Ingo Molnar 9 3.95% 1 7.14%
Anshuman Khandual 6 2.63% 1 7.14%
Mark Rutland 5 2.19% 1 7.14%
Baolin Wang 5 2.19% 1 7.14%
Christophe Leroy 4 1.75% 1 7.14%
Rusty Russell 3 1.32% 1 7.14%
Toshi Kani 3 1.32% 1 7.14%
Ard Biesheuvel 2 0.88% 1 7.14%
Steven Price 1 0.44% 1 7.14%
Total 228 14


#ifndef _ASM_ARM64_VMALLOC_H
#define _ASM_ARM64_VMALLOC_H

#include <asm/page.h>
#include <asm/pgtable.h>

#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP

#define arch_vmap_pud_supported arch_vmap_pud_supported
static inline bool arch_vmap_pud_supported(pgprot_t prot)
{
	/*
	 * SW table walks can't handle removal of intermediate entries.
	 */
	return pud_sect_supported() &&
	       !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
}

#define arch_vmap_pmd_supported arch_vmap_pmd_supported
static inline bool arch_vmap_pmd_supported(pgprot_t prot)
{
	/* See arch_vmap_pud_supported() */
	return !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
}

#define arch_vmap_pte_range_map_size arch_vmap_pte_range_map_size
static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr,
						unsigned long end, u64 pfn,
						unsigned int max_page_shift)
{
	/*
	 * If the block is at least CONT_PTE_SIZE in size, and is naturally
	 * aligned in both virtual and physical space, then we can pte-map the
	 * block using the PTE_CONT bit for more efficient use of the TLB.
	 */
	if (max_page_shift < CONT_PTE_SHIFT)
		return PAGE_SIZE;

	if (end - addr < CONT_PTE_SIZE)
		return PAGE_SIZE;

	if (!IS_ALIGNED(addr, CONT_PTE_SIZE))
		return PAGE_SIZE;

	if (!IS_ALIGNED(PFN_PHYS(pfn), CONT_PTE_SIZE))
		return PAGE_SIZE;

	return CONT_PTE_SIZE;
}

#define arch_vmap_pte_range_unmap_size arch_vmap_pte_range_unmap_size
static inline unsigned long arch_vmap_pte_range_unmap_size(unsigned long addr,
							   pte_t *ptep)
{
	/*
	 * The caller handles alignment so it's sufficient just to check
	 * PTE_CONT.
	 */
	return pte_valid_cont(__ptep_get(ptep)) ? CONT_PTE_SIZE : PAGE_SIZE;
}

#define arch_vmap_pte_supported_shift arch_vmap_pte_supported_shift
static inline int arch_vmap_pte_supported_shift(unsigned long size)
{
	if (size >= CONT_PTE_SIZE)
		return CONT_PTE_SHIFT;

	return PAGE_SHIFT;
}

#endif

#define arch_vmap_pgprot_tagged arch_vmap_pgprot_tagged
static inline pgprot_t arch_vmap_pgprot_tagged(pgprot_t prot)
{
	return pgprot_tagged(prot);
}

#endif /* _ASM_ARM64_VMALLOC_H */