Contributors: 23
Author Tokens Token Proportion Commits Commit Proportion
Aneesh Kumar K.V 47 14.87% 10 20.41%
David Gibson 44 13.92% 4 8.16%
Gerald Schaefer 40 12.66% 4 8.16%
Becky Bruce 37 11.71% 2 4.08%
Christophe Leroy 34 10.76% 6 12.24%
Linus Torvalds (pre-git) 16 5.06% 3 6.12%
Linus Torvalds 15 4.75% 1 2.04%
David S. Miller 11 3.48% 2 4.08%
Baolin Wang 9 2.85% 1 2.04%
Alexandre Ghiti 9 2.85% 3 6.12%
Andi Kleen 9 2.85% 1 2.04%
Nicholas Piggin 9 2.85% 1 2.04%
Benjamin Herrenschmidt 8 2.53% 1 2.04%
Adam Litke 5 1.58% 1 2.04%
Anshuman Khandual 4 1.27% 1 2.04%
Hari Bathini 4 1.27% 1 2.04%
Michael Ellerman 4 1.27% 1 2.04%
Anton Blanchard 3 0.95% 1 2.04%
Kenneth W Chen 2 0.63% 1 2.04%
Paul Mackerras 2 0.63% 1 2.04%
Andrew Morton 2 0.63% 1 2.04%
Nick Child 1 0.32% 1 2.04%
Greg Kroah-Hartman 1 0.32% 1 2.04%
Total 316 49


/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_POWERPC_HUGETLB_H
#define _ASM_POWERPC_HUGETLB_H

#ifdef CONFIG_HUGETLB_PAGE
#include <asm/page.h>

#ifdef CONFIG_PPC_BOOK3S_64
#include <asm/book3s/64/hugetlb.h>
#elif defined(CONFIG_PPC_E500)
#include <asm/nohash/hugetlb-e500.h>
#elif defined(CONFIG_PPC_8xx)
#include <asm/nohash/32/hugetlb-8xx.h>
#endif /* CONFIG_PPC_BOOK3S_64 */

extern bool hugetlb_disabled;

void __init hugetlbpage_init_defaultsize(void);

int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
			   unsigned long len);

static inline int is_hugepage_only_range(struct mm_struct *mm,
					 unsigned long addr,
					 unsigned long len)
{
	if (IS_ENABLED(CONFIG_PPC_64S_HASH_MMU) && !radix_enabled())
		return slice_is_hugepage_only_range(mm, addr, len);
	return 0;
}
#define is_hugepage_only_range is_hugepage_only_range

#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
		     pte_t pte, unsigned long sz);

#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
					    unsigned long addr, pte_t *ptep)
{
	return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
}

#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
					  unsigned long addr, pte_t *ptep)
{
	pte_t pte;

	pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
	flush_hugetlb_page(vma, addr);
	return pte;
}

#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
int huge_ptep_set_access_flags(struct vm_area_struct *vma,
			       unsigned long addr, pte_t *ptep,
			       pte_t pte, int dirty);

void gigantic_hugetlb_cma_reserve(void) __init;
#include <asm-generic/hugetlb.h>

#else /* ! CONFIG_HUGETLB_PAGE */
static inline void flush_hugetlb_page(struct vm_area_struct *vma,
				      unsigned long vmaddr)
{
}

static inline void __init gigantic_hugetlb_cma_reserve(void)
{
}

static inline void __init hugetlbpage_init_defaultsize(void)
{
}
#endif /* CONFIG_HUGETLB_PAGE */

#endif /* _ASM_POWERPC_HUGETLB_H */