Contributors: 27
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Gerald Schaefer |
233 |
37.76% |
4 |
7.27% |
Alexandre Ghiti |
95 |
15.40% |
12 |
21.82% |
Peter Xu |
45 |
7.29% |
3 |
5.45% |
Linus Torvalds (pre-git) |
45 |
7.29% |
6 |
10.91% |
Linus Torvalds |
24 |
3.89% |
2 |
3.64% |
Chris Metcalf |
24 |
3.89% |
1 |
1.82% |
Andi Kleen |
22 |
3.57% |
2 |
3.64% |
Naoya Horiguchi |
19 |
3.08% |
2 |
3.64% |
Andrew Morton |
15 |
2.43% |
3 |
5.45% |
Oscar Salvador |
12 |
1.94% |
1 |
1.82% |
Punit Agrawal |
10 |
1.62% |
1 |
1.82% |
Catalin Marinas |
10 |
1.62% |
1 |
1.82% |
Hugh Dickins |
9 |
1.46% |
2 |
3.64% |
Andy Whitcroft |
7 |
1.13% |
1 |
1.82% |
David Mosberger-Tang |
7 |
1.13% |
1 |
1.82% |
Anthony Iliopoulos |
6 |
0.97% |
1 |
1.82% |
Will Deacon |
6 |
0.97% |
1 |
1.82% |
Aneesh Kumar K.V |
5 |
0.81% |
1 |
1.82% |
Kenneth W Chen |
4 |
0.65% |
1 |
1.82% |
Ryan Roberts |
4 |
0.65% |
1 |
1.82% |
David S. Miller |
4 |
0.65% |
1 |
1.82% |
Baolin Wang |
3 |
0.49% |
1 |
1.82% |
David Gibson |
3 |
0.49% |
2 |
3.64% |
Anshuman Khandual |
2 |
0.32% |
1 |
1.82% |
Greg Kroah-Hartman |
1 |
0.16% |
1 |
1.82% |
Rick Edgecombe |
1 |
0.16% |
1 |
1.82% |
Christophe Leroy |
1 |
0.16% |
1 |
1.82% |
Total |
617 |
|
55 |
|
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_GENERIC_HUGETLB_H
#define _ASM_GENERIC_HUGETLB_H
#include <linux/swap.h>
#include <linux/swapops.h>
static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
{
return mk_pte(page, pgprot);
}
static inline unsigned long huge_pte_write(pte_t pte)
{
return pte_write(pte);
}
static inline unsigned long huge_pte_dirty(pte_t pte)
{
return pte_dirty(pte);
}
static inline pte_t huge_pte_mkwrite(pte_t pte)
{
return pte_mkwrite_novma(pte);
}
#ifndef __HAVE_ARCH_HUGE_PTE_WRPROTECT
static inline pte_t huge_pte_wrprotect(pte_t pte)
{
return pte_wrprotect(pte);
}
#endif
static inline pte_t huge_pte_mkdirty(pte_t pte)
{
return pte_mkdirty(pte);
}
static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
{
return pte_modify(pte, newprot);
}
#ifndef __HAVE_ARCH_HUGE_PTE_MKUFFD_WP
static inline pte_t huge_pte_mkuffd_wp(pte_t pte)
{
return huge_pte_wrprotect(pte_mkuffd_wp(pte));
}
#endif
#ifndef __HAVE_ARCH_HUGE_PTE_CLEAR_UFFD_WP
static inline pte_t huge_pte_clear_uffd_wp(pte_t pte)
{
return pte_clear_uffd_wp(pte);
}
#endif
#ifndef __HAVE_ARCH_HUGE_PTE_UFFD_WP
static inline int huge_pte_uffd_wp(pte_t pte)
{
return pte_uffd_wp(pte);
}
#endif
#ifndef __HAVE_ARCH_HUGE_PTE_CLEAR
static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long sz)
{
pte_clear(mm, addr, ptep);
}
#endif
#ifndef __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE
static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
unsigned long addr, unsigned long end,
unsigned long floor, unsigned long ceiling)
{
free_pgd_range(tlb, addr, end, floor, ceiling);
}
#endif
#ifndef __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte, unsigned long sz)
{
set_pte_at(mm, addr, ptep, pte);
}
#endif
#ifndef __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
return ptep_get_and_clear(mm, addr, ptep);
}
#endif
#ifndef __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
return ptep_clear_flush(vma, addr, ptep);
}
#endif
#ifndef __HAVE_ARCH_HUGE_PTE_NONE
static inline int huge_pte_none(pte_t pte)
{
return pte_none(pte);
}
#endif
/* Please refer to comments above pte_none_mostly() for the usage */
#ifndef __HAVE_ARCH_HUGE_PTE_NONE_MOSTLY
static inline int huge_pte_none_mostly(pte_t pte)
{
return huge_pte_none(pte) || is_pte_marker(pte);
}
#endif
#ifndef __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
static inline int prepare_hugepage_range(struct file *file,
unsigned long addr, unsigned long len)
{
return 0;
}
#endif
#ifndef __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
ptep_set_wrprotect(mm, addr, ptep);
}
#endif
#ifndef __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty)
{
return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
}
#endif
#ifndef __HAVE_ARCH_HUGE_PTEP_GET
static inline pte_t huge_ptep_get(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
return ptep_get(ptep);
}
#endif
#ifndef __HAVE_ARCH_GIGANTIC_PAGE_RUNTIME_SUPPORTED
static inline bool gigantic_page_runtime_supported(void)
{
return IS_ENABLED(CONFIG_ARCH_HAS_GIGANTIC_PAGE);
}
#endif /* __HAVE_ARCH_GIGANTIC_PAGE_RUNTIME_SUPPORTED */
#endif /* _ASM_GENERIC_HUGETLB_H */