Contributors: 27
Author Tokens Token Proportion Commits Commit Proportion
Gerald Schaefer 231 36.38% 4 7.02%
Alexandre Ghiti 76 11.97% 12 21.05%
Peter Xu 62 9.76% 3 5.26%
Linus Torvalds (pre-git) 45 7.09% 6 10.53%
Andi Kleen 40 6.30% 2 3.51%
Linus Torvalds 24 3.78% 2 3.51%
Naoya Horiguchi 23 3.62% 2 3.51%
David S. Miller 21 3.31% 3 5.26%
Andrew Morton 15 2.36% 3 5.26%
Chris Metcalf 14 2.20% 1 1.75%
Punit Agrawal 10 1.57% 1 1.75%
Catalin Marinas 10 1.57% 1 1.75%
Hugh Dickins 9 1.42% 2 3.51%
David Mosberger-Tang 7 1.10% 1 1.75%
Andy Whitcroft 7 1.10% 1 1.75%
Will Deacon 6 0.94% 1 1.75%
Anthony Iliopoulos 6 0.94% 1 1.75%
Aneesh Kumar K.V 5 0.79% 1 1.75%
Sam Ravnborg 5 0.79% 1 1.75%
Kenneth W Chen 4 0.63% 1 1.75%
Ryan Roberts 4 0.63% 1 1.75%
David Gibson 3 0.47% 2 3.51%
Baolin Wang 3 0.47% 1 1.75%
Anshuman Khandual 2 0.31% 1 1.75%
Christophe Leroy 1 0.16% 1 1.75%
Greg Kroah-Hartman 1 0.16% 1 1.75%
Rick Edgecombe 1 0.16% 1 1.75%
Total 635 57


/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_GENERIC_HUGETLB_H
#define _ASM_GENERIC_HUGETLB_H

#include <linux/swap.h>
#include <linux/swapops.h>

static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
{
	return mk_pte(page, pgprot);
}

static inline unsigned long huge_pte_write(pte_t pte)
{
	return pte_write(pte);
}

static inline unsigned long huge_pte_dirty(pte_t pte)
{
	return pte_dirty(pte);
}

static inline pte_t huge_pte_mkwrite(pte_t pte)
{
	return pte_mkwrite_novma(pte);
}

#ifndef __HAVE_ARCH_HUGE_PTE_WRPROTECT
static inline pte_t huge_pte_wrprotect(pte_t pte)
{
	return pte_wrprotect(pte);
}
#endif

static inline pte_t huge_pte_mkdirty(pte_t pte)
{
	return pte_mkdirty(pte);
}

static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
{
	return pte_modify(pte, newprot);
}

static inline pte_t huge_pte_mkuffd_wp(pte_t pte)
{
	return huge_pte_wrprotect(pte_mkuffd_wp(pte));
}

static inline pte_t huge_pte_clear_uffd_wp(pte_t pte)
{
	return pte_clear_uffd_wp(pte);
}

static inline int huge_pte_uffd_wp(pte_t pte)
{
	return pte_uffd_wp(pte);
}

#ifndef __HAVE_ARCH_HUGE_PTE_CLEAR
static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
		    pte_t *ptep, unsigned long sz)
{
	pte_clear(mm, addr, ptep);
}
#endif

#ifndef __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE
static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
		unsigned long addr, unsigned long end,
		unsigned long floor, unsigned long ceiling)
{
	free_pgd_range(tlb, addr, end, floor, ceiling);
}
#endif

#ifndef __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
		pte_t *ptep, pte_t pte, unsigned long sz)
{
	set_pte_at(mm, addr, ptep, pte);
}
#endif

#ifndef __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
		unsigned long addr, pte_t *ptep)
{
	return ptep_get_and_clear(mm, addr, ptep);
}
#endif

#ifndef __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
		unsigned long addr, pte_t *ptep)
{
	return ptep_clear_flush(vma, addr, ptep);
}
#endif

#ifndef __HAVE_ARCH_HUGE_PTE_NONE
static inline int huge_pte_none(pte_t pte)
{
	return pte_none(pte);
}
#endif

/* Please refer to comments above pte_none_mostly() for the usage */
static inline int huge_pte_none_mostly(pte_t pte)
{
	return huge_pte_none(pte) || is_pte_marker(pte);
}

#ifndef __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
static inline int prepare_hugepage_range(struct file *file,
		unsigned long addr, unsigned long len)
{
	struct hstate *h = hstate_file(file);

	if (len & ~huge_page_mask(h))
		return -EINVAL;
	if (addr & ~huge_page_mask(h))
		return -EINVAL;

	return 0;
}
#endif

#ifndef __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
		unsigned long addr, pte_t *ptep)
{
	ptep_set_wrprotect(mm, addr, ptep);
}
#endif

#ifndef __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
		unsigned long addr, pte_t *ptep,
		pte_t pte, int dirty)
{
	return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
}
#endif

#ifndef __HAVE_ARCH_HUGE_PTEP_GET
static inline pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
	return ptep_get(ptep);
}
#endif

#ifndef __HAVE_ARCH_GIGANTIC_PAGE_RUNTIME_SUPPORTED
static inline bool gigantic_page_runtime_supported(void)
{
	return IS_ENABLED(CONFIG_ARCH_HAS_GIGANTIC_PAGE);
}
#endif /* __HAVE_ARCH_GIGANTIC_PAGE_RUNTIME_SUPPORTED */

#endif /* _ASM_GENERIC_HUGETLB_H */