Release 4.8 arch/powerpc/include/asm/hugetlb.h
#ifndef _ASM_POWERPC_HUGETLB_H
#define _ASM_POWERPC_HUGETLB_H
#ifdef CONFIG_HUGETLB_PAGE
#include <asm/page.h>
#include <asm-generic/hugetlb.h>
extern struct kmem_cache *hugepte_cache;
#ifdef CONFIG_PPC_BOOK3S_64
#include <asm/book3s/64/hugetlb-radix.h>
/*
* This should work for other subarchs too. But right now we use the
* new format only for 64bit book3s
*/
static inline pte_t *hugepd_page(hugepd_t hpd)
{
BUG_ON(!hugepd_ok(hpd));
/*
* We have only four bits to encode, MMU page size
*/
BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
return __va(hpd.pd & HUGEPD_ADDR_MASK);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| aneesh kumar | aneesh kumar | 38 | 90.48% | 1 | 50.00% |
| paul mackerras | paul mackerras | 4 | 9.52% | 1 | 50.00% |
| Total | 42 | 100.00% | 2 | 100.00% |
static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
{
return (hpd.pd & HUGEPD_SHIFT_MASK) >> 2;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| aneesh kumar | aneesh kumar | 22 | 100.00% | 1 | 100.00% |
| Total | 22 | 100.00% | 1 | 100.00% |
static inline unsigned int hugepd_shift(hugepd_t hpd)
{
return mmu_psize_to_shift(hugepd_mmu_psize(hpd));
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| aneesh kumar | aneesh kumar | 20 | 100.00% | 1 | 100.00% |
| Total | 20 | 100.00% | 1 | 100.00% |
static inline void flush_hugetlb_page(struct vm_area_struct *vma,
unsigned long vmaddr)
{
if (radix_enabled())
return radix__flush_hugetlb_page(vma, vmaddr);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| aneesh kumar | aneesh kumar | 29 | 100.00% | 1 | 100.00% |
| Total | 29 | 100.00% | 1 | 100.00% |
static inline void __local_flush_hugetlb_page(struct vm_area_struct *vma,
unsigned long vmaddr)
{
if (radix_enabled())
return radix__local_flush_hugetlb_page(vma, vmaddr);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| aneesh kumar | aneesh kumar | 29 | 100.00% | 1 | 100.00% |
| Total | 29 | 100.00% | 1 | 100.00% |
#else
static inline pte_t *hugepd_page(hugepd_t hpd)
{
BUG_ON(!hugepd_ok(hpd));
return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| becky bruce | becky bruce | 38 | 100.00% | 1 | 100.00% |
| Total | 38 | 100.00% | 1 | 100.00% |
static inline unsigned int hugepd_shift(hugepd_t hpd)
{
return hpd.pd & HUGEPD_SHIFT_MASK;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| becky bruce | becky bruce | 18 | 100.00% | 1 | 100.00% |
| Total | 18 | 100.00% | 1 | 100.00% |
#endif /* CONFIG_PPC_BOOK3S_64 */
static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
unsigned pdshift)
{
/*
* On FSL BookE, we have multiple higher-level table entries that
* point to the same hugepte. Just use the first one since they're all
* identical. So for that case, idx=0.
*/
unsigned long idx = 0;
pte_t *dir = hugepd_page(hpd);
#ifndef CONFIG_PPC_FSL_BOOK3E
idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(hpd);
#endif
return dir + idx;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| becky bruce | becky bruce | 62 | 95.38% | 2 | 66.67% |
| aneesh kumar | aneesh kumar | 3 | 4.62% | 1 | 33.33% |
| Total | 65 | 100.00% | 3 | 100.00% |
pte_t *huge_pte_offset_and_shift(struct mm_struct *mm,
unsigned long addr, unsigned *shift);
void flush_dcache_icache_hugepage(struct page *page);
#if defined(CONFIG_PPC_MM_SLICES)
int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
unsigned long len);
#else
static inline int is_hugepage_only_range(struct mm_struct *mm,
unsigned long addr,
unsigned long len)
{
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| becky bruce | becky bruce | 23 | 100.00% | 1 | 100.00% |
| Total | 23 | 100.00% | 1 | 100.00% |
#endif
void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
pte_t pte);
void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
unsigned long end, unsigned long floor,
unsigned long ceiling);
/*
* The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs
* to override the version in mm/hugetlb.c
*/
#define vma_mmu_pagesize vma_mmu_pagesize
/*
* If the arch doesn't supply something else, assume that hugepage
* size aligned regions are ok without further preparation.
*/
static inline int prepare_hugepage_range(struct file *file,
unsigned long addr, unsigned long len)
{
struct hstate *h = hstate_file(file);
if (len & ~huge_page_mask(h))
return -EINVAL;
if (addr & ~huge_page_mask(h))
return -EINVAL;
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| gerald schaefer | gerald schaefer | 38 | 62.30% | 1 | 33.33% |
| jon tollefson | jon tollefson | 18 | 29.51% | 1 | 33.33% |
| andi kleen | andi kleen | 5 | 8.20% | 1 | 33.33% |
| Total | 61 | 100.00% | 3 | 100.00% |
static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
set_pte_at(mm, addr, ptep, pte);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| david gibson | david gibson | 34 | 100.00% | 1 | 100.00% |
| Total | 34 | 100.00% | 1 | 100.00% |
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
#ifdef CONFIG_PPC64
return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
#else
return __pte(pte_update(ptep, ~0UL, 0));
#endif
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| david gibson | david gibson | 38 | 62.30% | 1 | 33.33% |
| becky bruce | becky bruce | 21 | 34.43% | 1 | 33.33% |
| aneesh kumar | aneesh kumar | 2 | 3.28% | 1 | 33.33% |
| Total | 61 | 100.00% | 3 | 100.00% |
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
pte_t pte;
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
flush_hugetlb_page(vma, addr);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| david gibson | david gibson | 24 | 55.81% | 1 | 33.33% |
| gerald schaefer | gerald schaefer | 18 | 41.86% | 1 | 33.33% |
| aneesh kumar | aneesh kumar | 1 | 2.33% | 1 | 33.33% |
| Total | 43 | 100.00% | 3 | 100.00% |
static inline int huge_pte_none(pte_t pte)
{
return pte_none(pte);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| gerald schaefer | gerald schaefer | 16 | 100.00% | 1 | 100.00% |
| Total | 16 | 100.00% | 1 | 100.00% |
static inline pte_t huge_pte_wrprotect(pte_t pte)
{
return pte_wrprotect(pte);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| gerald schaefer | gerald schaefer | 16 | 100.00% | 1 | 100.00% |
| Total | 16 | 100.00% | 1 | 100.00% |
static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty)
{
#ifdef HUGETLB_NEED_PRELOAD
/*
* The "return 1" forces a call of update_mmu_cache, which will write a
* TLB entry. Without this, platforms that don't do a write of the TLB
* entry in the TLB miss handler asm will fault ad infinitum.
*/
ptep_set_access_flags(vma, addr, ptep, pte, dirty);
return 1;
#else
return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
#endif
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| gerald schaefer | gerald schaefer | 40 | 62.50% | 1 | 33.33% |
| becky bruce | becky bruce | 24 | 37.50% | 2 | 66.67% |
| Total | 64 | 100.00% | 3 | 100.00% |
static inline pte_t huge_ptep_get(pte_t *ptep)
{
return *ptep;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| gerald schaefer | gerald schaefer | 15 | 100.00% | 1 | 100.00% |
| Total | 15 | 100.00% | 1 | 100.00% |
static inline void arch_clear_hugepage_flags(struct page *page)
{
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| will deacon | will deacon | 11 | 100.00% | 1 | 100.00% |
| Total | 11 | 100.00% | 1 | 100.00% |
#else /* ! CONFIG_HUGETLB_PAGE */
static inline void flush_hugetlb_page(struct vm_area_struct *vma,
unsigned long vmaddr)
{
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| becky bruce | becky bruce | 15 | 100.00% | 1 | 100.00% |
| Total | 15 | 100.00% | 1 | 100.00% |
#define hugepd_shift(x) 0
static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
unsigned pdshift)
{
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| aneesh kumar | aneesh kumar | 21 | 100.00% | 2 | 100.00% |
| Total | 21 | 100.00% | 2 | 100.00% |
#endif /* CONFIG_HUGETLB_PAGE */
/*
* FSL Book3E platforms require special gpage handling - the gpages
* are reserved early in the boot process by memblock instead of via
* the .dts as on IBM platforms.
*/
#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_FSL_BOOK3E)
extern void __init reserve_hugetlb_gpages(void);
#else
static inline void reserve_hugetlb_gpages(void)
{
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| becky bruce | becky bruce | 8 | 100.00% | 1 | 100.00% |
| Total | 8 | 100.00% | 1 | 100.00% |
#endif
#endif /* _ASM_POWERPC_HUGETLB_H */
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| becky bruce | becky bruce | 287 | 34.25% | 6 | 26.09% |
| gerald schaefer | gerald schaefer | 201 | 23.99% | 4 | 17.39% |
| aneesh kumar | aneesh kumar | 184 | 21.96% | 6 | 26.09% |
| david gibson | david gibson | 123 | 14.68% | 2 | 8.70% |
| jon tollefson | jon tollefson | 18 | 2.15% | 1 | 4.35% |
| will deacon | will deacon | 11 | 1.31% | 1 | 4.35% |
| andi kleen | andi kleen | 5 | 0.60% | 1 | 4.35% |
| mel gorman | mel gorman | 5 | 0.60% | 1 | 4.35% |
| paul mackerras | paul mackerras | 4 | 0.48% | 1 | 4.35% |
| Total | 838 | 100.00% | 23 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.