Release 4.14 arch/sparc/include/asm/hugetlb.h
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SPARC64_HUGETLB_H
#define _ASM_SPARC64_HUGETLB_H
#include <asm/page.h>
#include <asm-generic/hugetlb.h>
#ifdef CONFIG_HUGETLB_PAGE
struct pud_huge_patch_entry {
unsigned int addr;
unsigned int insn;
};
extern struct pud_huge_patch_entry __pud_huge_patch, __pud_huge_patch_end;
#endif
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte);
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep);
static inline int is_hugepage_only_range(struct mm_struct *mm,
unsigned long addr,
unsigned long len) {
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Sam Ravnborg | 23 | 100.00% | 1 | 100.00% |
Total | 23 | 100.00% | 1 | 100.00% |
/*
* If the arch doesn't supply something else, assume that hugepage
* size aligned regions are ok without further preparation.
*/
static inline int prepare_hugepage_range(struct file *file,
unsigned long addr, unsigned long len)
{
struct hstate *h = hstate_file(file);
if (len & ~huge_page_mask(h))
return -EINVAL;
if (addr & ~huge_page_mask(h))
return -EINVAL;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Sam Ravnborg | 38 | 62.30% | 1 | 33.33% |
Nitin Gupta | 18 | 29.51% | 1 | 33.33% |
Andi Kleen | 5 | 8.20% | 1 | 33.33% |
Total | 61 | 100.00% | 3 | 100.00% |
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Sam Ravnborg | 19 | 100.00% | 1 | 100.00% |
Total | 19 | 100.00% | 1 | 100.00% |
static inline int huge_pte_none(pte_t pte)
{
return pte_none(pte);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Sam Ravnborg | 16 | 100.00% | 1 | 100.00% |
Total | 16 | 100.00% | 1 | 100.00% |
static inline pte_t huge_pte_wrprotect(pte_t pte)
{
return pte_wrprotect(pte);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Sam Ravnborg | 16 | 100.00% | 1 | 100.00% |
Total | 16 | 100.00% | 1 | 100.00% |
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
pte_t old_pte = *ptep;
set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Sam Ravnborg | 28 | 70.00% | 1 | 50.00% |
Dave Kleikamp | 12 | 30.00% | 1 | 50.00% |
Total | 40 | 100.00% | 2 | 100.00% |
static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty)
{
int changed = !pte_same(*ptep, pte);
if (changed) {
set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
flush_tlb_page(vma, addr);
}
return changed;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Sam Ravnborg | 37 | 55.22% | 1 | 50.00% |
Dave Kleikamp | 30 | 44.78% | 1 | 50.00% |
Total | 67 | 100.00% | 2 | 100.00% |
static inline pte_t huge_ptep_get(pte_t *ptep)
{
return *ptep;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Sam Ravnborg | 15 | 100.00% | 1 | 100.00% |
Total | 15 | 100.00% | 1 | 100.00% |
static inline void arch_clear_hugepage_flags(struct page *page)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Will Deacon | 11 | 100.00% | 1 | 100.00% |
Total | 11 | 100.00% | 1 | 100.00% |
void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
unsigned long end, unsigned long floor,
unsigned long ceiling);
#endif /* _ASM_SPARC64_HUGETLB_H */
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Sam Ravnborg | 242 | 65.05% | 1 | 11.11% |
Nitin Gupta | 68 | 18.28% | 3 | 33.33% |
Dave Kleikamp | 42 | 11.29% | 1 | 11.11% |
Will Deacon | 11 | 2.96% | 1 | 11.11% |
Andi Kleen | 5 | 1.34% | 1 | 11.11% |
Gerald Schaefer | 3 | 0.81% | 1 | 11.11% |
Greg Kroah-Hartman | 1 | 0.27% | 1 | 11.11% |
Total | 372 | 100.00% | 9 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.