Release 4.14 arch/tile/include/asm/hugetlb.h
/*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#ifndef _ASM_TILE_HUGETLB_H
#define _ASM_TILE_HUGETLB_H
#include <asm/page.h>
#include <asm-generic/hugetlb.h>
static inline int is_hugepage_only_range(struct mm_struct *mm,
unsigned long addr,
unsigned long len) {
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Metcalf | 23 | 100.00% | 1 | 100.00% |
Total | 23 | 100.00% | 1 | 100.00% |
/*
* If the arch doesn't supply something else, assume that hugepage
* size aligned regions are ok without further preparation.
*/
static inline int prepare_hugepage_range(struct file *file,
unsigned long addr, unsigned long len)
{
struct hstate *h = hstate_file(file);
if (len & ~huge_page_mask(h))
return -EINVAL;
if (addr & ~huge_page_mask(h))
return -EINVAL;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Metcalf | 61 | 100.00% | 1 | 100.00% |
Total | 61 | 100.00% | 1 | 100.00% |
static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
unsigned long addr, unsigned long end,
unsigned long floor,
unsigned long ceiling)
{
free_pgd_range(tlb, addr, end, floor, ceiling);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Metcalf | 41 | 100.00% | 1 | 100.00% |
Total | 41 | 100.00% | 1 | 100.00% |
static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
set_pte(ptep, pte);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Metcalf | 30 | 100.00% | 2 | 100.00% |
Total | 30 | 100.00% | 2 | 100.00% |
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
return ptep_get_and_clear(mm, addr, ptep);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Metcalf | 30 | 100.00% | 1 | 100.00% |
Total | 30 | 100.00% | 1 | 100.00% |
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
ptep_clear_flush(vma, addr, ptep);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Metcalf | 29 | 100.00% | 1 | 100.00% |
Total | 29 | 100.00% | 1 | 100.00% |
static inline int huge_pte_none(pte_t pte)
{
return pte_none(pte);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Metcalf | 16 | 100.00% | 1 | 100.00% |
Total | 16 | 100.00% | 1 | 100.00% |
static inline pte_t huge_pte_wrprotect(pte_t pte)
{
return pte_wrprotect(pte);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Metcalf | 16 | 100.00% | 1 | 100.00% |
Total | 16 | 100.00% | 1 | 100.00% |
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
ptep_set_wrprotect(mm, addr, ptep);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Metcalf | 29 | 100.00% | 1 | 100.00% |
Total | 29 | 100.00% | 1 | 100.00% |
static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty)
{
return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Metcalf | 40 | 100.00% | 1 | 100.00% |
Total | 40 | 100.00% | 1 | 100.00% |
static inline pte_t huge_ptep_get(pte_t *ptep)
{
return *ptep;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Metcalf | 15 | 100.00% | 1 | 100.00% |
Total | 15 | 100.00% | 1 | 100.00% |
static inline void arch_clear_hugepage_flags(struct page *page)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Will Deacon | 11 | 100.00% | 1 | 100.00% |
Total | 11 | 100.00% | 1 | 100.00% |
#ifdef CONFIG_HUGETLB_SUPER_PAGES
static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
struct page *page, int writable)
{
size_t pagesize = huge_page_size(hstate_vma(vma));
if (pagesize != PUD_SIZE && pagesize != PMD_SIZE)
entry = pte_mksuper(entry);
return entry;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Metcalf | 54 | 100.00% | 1 | 100.00% |
Total | 54 | 100.00% | 1 | 100.00% |
#define arch_make_huge_pte arch_make_huge_pte
/* Sizes to scale up page size for PTEs with HV_PTE_SUPER bit. */
enum {
HUGE_SHIFT_PGDIR = 0,
HUGE_SHIFT_PMD = 1,
HUGE_SHIFT_PAGE = 2,
HUGE_SHIFT_ENTRIES
};
extern int huge_shift[HUGE_SHIFT_ENTRIES];
#endif
#endif /* _ASM_TILE_HUGETLB_H */
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Chris Metcalf | 432 | 96.86% | 3 | 60.00% |
Will Deacon | 11 | 2.47% | 1 | 20.00% |
Gerald Schaefer | 3 | 0.67% | 1 | 20.00% |
Total | 446 | 100.00% | 5 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.