Contributors: 26
Author Tokens Token Proportion Commits Commit Proportion
Aneesh Kumar K.V 58 16.81% 12 21.05%
David Gibson 47 13.62% 5 8.77%
Becky Bruce 47 13.62% 2 3.51%
Gerald Schaefer 40 11.59% 4 7.02%
Christophe Leroy 20 5.80% 5 8.77%
Linus Torvalds (pre-git) 16 4.64% 3 5.26%
Alexandre Ghiti 12 3.48% 4 7.02%
David S. Miller 11 3.19% 2 3.51%
Linus Torvalds 10 2.90% 1 1.75%
Baolin Wang 9 2.61% 1 1.75%
Hugh Dickins 9 2.61% 2 3.51%
Nicholas Piggin 9 2.61% 1 1.75%
Benjamin Herrenschmidt 8 2.32% 1 1.75%
Andrew Morton 8 2.32% 2 3.51%
David Mosberger-Tang 8 2.32% 1 1.75%
Andi Kleen 6 1.74% 1 1.75%
Adam Litke 5 1.45% 1 1.75%
Hari Bathini 4 1.16% 1 1.75%
Anshuman Khandual 4 1.16% 1 1.75%
Michael Ellerman 4 1.16% 1 1.75%
Anton Blanchard 3 0.87% 1 1.75%
Kenneth W Chen 2 0.58% 1 1.75%
Paul Mackerras 2 0.58% 1 1.75%
Greg Kroah-Hartman 1 0.29% 1 1.75%
Nick Child 1 0.29% 1 1.75%
Mathieu Malaterre 1 0.29% 1 1.75%
Total 345 57


/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_POWERPC_HUGETLB_H
#define _ASM_POWERPC_HUGETLB_H

#ifdef CONFIG_HUGETLB_PAGE
#include <asm/page.h>

#ifdef CONFIG_PPC_BOOK3S_64
#include <asm/book3s/64/hugetlb.h>
#elif defined(CONFIG_PPC_E500)
#include <asm/nohash/hugetlb-e500.h>
#elif defined(CONFIG_PPC_8xx)
#include <asm/nohash/32/hugetlb-8xx.h>
#endif /* CONFIG_PPC_BOOK3S_64 */

extern bool hugetlb_disabled;

void __init hugetlbpage_init_defaultsize(void);

int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
			   unsigned long len);

static inline int is_hugepage_only_range(struct mm_struct *mm,
					 unsigned long addr,
					 unsigned long len)
{
	if (IS_ENABLED(CONFIG_PPC_64S_HASH_MMU) && !radix_enabled())
		return slice_is_hugepage_only_range(mm, addr, len);
	return 0;
}
#define is_hugepage_only_range is_hugepage_only_range

#define __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE
void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
			    unsigned long end, unsigned long floor,
			    unsigned long ceiling);

#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
					    unsigned long addr, pte_t *ptep)
{
	return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
}

#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
					  unsigned long addr, pte_t *ptep)
{
	pte_t pte;

	pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
	flush_hugetlb_page(vma, addr);
	return pte;
}

#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
int huge_ptep_set_access_flags(struct vm_area_struct *vma,
			       unsigned long addr, pte_t *ptep,
			       pte_t pte, int dirty);

void gigantic_hugetlb_cma_reserve(void) __init;
#include <asm-generic/hugetlb.h>

#else /* ! CONFIG_HUGETLB_PAGE */
static inline void flush_hugetlb_page(struct vm_area_struct *vma,
				      unsigned long vmaddr)
{
}

#define hugepd_shift(x) 0
static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
				    unsigned pdshift)
{
	return NULL;
}


static inline void __init gigantic_hugetlb_cma_reserve(void)
{
}

static inline void __init hugetlbpage_init_defaultsize(void)
{
}
#endif /* CONFIG_HUGETLB_PAGE */

#endif /* _ASM_POWERPC_HUGETLB_H */