Release 4.12 include/linux/huge_mm.h
#ifndef _LINUX_HUGE_MM_H
#define _LINUX_HUGE_MM_H
extern int do_huge_pmd_anonymous_page(struct vm_fault *vmf);
extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
struct vm_area_struct *vma);
extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
struct vm_area_struct *vma);
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
#else
static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Wilcox | 14 | 100.00% | 1 | 100.00% |
Total | 14 | 100.00% | 1 | 100.00% |
#endif
extern int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
unsigned long addr,
pmd_t *pmd,
unsigned int flags);
extern bool madvise_free_huge_pmd(struct mmu_gather *tlb,
struct vm_area_struct *vma,
pmd_t *pmd, unsigned long addr, unsigned long next);
extern int zap_huge_pmd(struct mmu_gather *tlb,
struct vm_area_struct *vma,
pmd_t *pmd, unsigned long addr);
extern int zap_huge_pud(struct mmu_gather *tlb,
struct vm_area_struct *vma,
pud_t *pud, unsigned long addr);
extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end,
unsigned char *vec);
extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
unsigned long new_addr, unsigned long old_end,
pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush);
extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, pgprot_t newprot,
int prot_numa);
int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, pfn_t pfn, bool write);
int vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
pud_t *pud, pfn_t pfn, bool write);
enum transparent_hugepage_flag {
TRANSPARENT_HUGEPAGE_FLAG,
TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
#ifdef CONFIG_DEBUG_VM
TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
#endif
};
struct kobject;
struct kobj_attribute;
extern ssize_t single_hugepage_flag_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count,
enum transparent_hugepage_flag flag);
extern ssize_t single_hugepage_flag_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf,
enum transparent_hugepage_flag flag);
extern struct kobj_attribute shmem_enabled_attr;
#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define HPAGE_PMD_SHIFT PMD_SHIFT
#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
#define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
#define HPAGE_PUD_SHIFT PUD_SHIFT
#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
#define transparent_hugepage_enabled(__vma) \
((transparent_hugepage_flags & \
(1<<TRANSPARENT_HUGEPAGE_FLAG) || \
(transparent_hugepage_flags & \
(1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \
((__vma)->vm_flags & VM_HUGEPAGE))) && \
!((__vma)->vm_flags & VM_NOHUGEPAGE) && \
!is_vma_temporary_stack(__vma))
#define transparent_hugepage_use_zero_page() \
(transparent_hugepage_flags & \
(1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
#ifdef CONFIG_DEBUG_VM
#define transparent_hugepage_debug_cow() \
(transparent_hugepage_flags & \
(1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
#else /* CONFIG_DEBUG_VM */
#define transparent_hugepage_debug_cow() 0
#endif /* CONFIG_DEBUG_VM */
extern unsigned long transparent_hugepage_flags;
extern unsigned long thp_get_unmapped_area(struct file *filp,
unsigned long addr, unsigned long len, unsigned long pgoff,
unsigned long flags);
extern void prep_transhuge_page(struct page *page);
extern void free_transhuge_page(struct page *page);
int split_huge_page_to_list(struct page *page, struct list_head *list);
static inline int split_huge_page(struct page *page)
{
return split_huge_page_to_list(page, NULL);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kirill A. Shutemov | 16 | 80.00% | 1 | 33.33% |
Shaohua Li | 3 | 15.00% | 1 | 33.33% |
Andrea Arcangeli | 1 | 5.00% | 1 | 33.33% |
Total | 20 | 100.00% | 3 | 100.00% |
void deferred_split_huge_page(struct page *page);
void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long address, bool freeze, struct page *page);
#define split_huge_pmd(__vma, __pmd, __address) \
do { \
pmd_t *____pmd = (__pmd); \
if (pmd_trans_huge(*____pmd) \
|| pmd_devmap(*____pmd)) \
__split_huge_pmd(__vma, __pmd, __address, \
false, NULL); \
} while (0)
void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
bool freeze, struct page *page);
void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
unsigned long address);
#define split_huge_pud(__vma, __pud, __address) \
do { \
pud_t *____pud = (__pud); \
if (pud_trans_huge(*____pud) \
|| pud_devmap(*____pud)) \
__split_huge_pud(__vma, __pud, __address); \
} while (0)
extern int hugepage_madvise(struct vm_area_struct *vma,
unsigned long *vm_flags, int advice);
extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
unsigned long start,
unsigned long end,
long adjust_next);
extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma);
extern spinlock_t *__pud_trans_huge_lock(pud_t *pud,
struct vm_area_struct *vma);
/* mmap_sem must be held on entry */
static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma)
{
VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
return __pmd_trans_huge_lock(pmd, vma);
else
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Naoya Horiguchi | 47 | 79.66% | 1 | 20.00% |
Dan J Williams | 6 | 10.17% | 1 | 20.00% |
Sasha Levin | 3 | 5.08% | 1 | 20.00% |
Kirill A. Shutemov | 2 | 3.39% | 1 | 20.00% |
Chen Gang S | 1 | 1.69% | 1 | 20.00% |
Total | 59 | 100.00% | 5 | 100.00% |
static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
struct vm_area_struct *vma)
{
VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
if (pud_trans_huge(*pud) || pud_devmap(*pud))
return __pud_trans_huge_lock(pud, vma);
else
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Wilcox | 59 | 100.00% | 1 | 100.00% |
Total | 59 | 100.00% | 1 | 100.00% |
static inline int hpage_nr_pages(struct page *page)
{
if (unlikely(PageTransHuge(page)))
return HPAGE_PMD_NR;
return 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rik Van Riel | 28 | 100.00% | 1 | 100.00% |
Total | 28 | 100.00% | 1 | 100.00% |
struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, int flags);
struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
pud_t *pud, int flags);
extern int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
extern struct page *huge_zero_page;
static inline bool is_huge_zero_page(struct page *page)
{
return ACCESS_ONCE(huge_zero_page) == page;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yalin Wang | 20 | 100.00% | 1 | 100.00% |
Total | 20 | 100.00% | 1 | 100.00% |
static inline bool is_huge_zero_pmd(pmd_t pmd)
{
return is_huge_zero_page(pmd_page(pmd));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Wilcox | 19 | 100.00% | 1 | 100.00% |
Total | 19 | 100.00% | 1 | 100.00% |
static inline bool is_huge_zero_pud(pud_t pud)
{
return false;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Wilcox | 13 | 100.00% | 1 | 100.00% |
Total | 13 | 100.00% | 1 | 100.00% |
struct page *mm_get_huge_zero_page(struct mm_struct *mm);
void mm_put_huge_zero_page(struct mm_struct *mm);
#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
#define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
#define hpage_nr_pages(x) 1
#define transparent_hugepage_enabled(__vma) 0
static inline void prep_transhuge_page(struct page *page) {}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kirill A. Shutemov | 11 | 100.00% | 1 | 100.00% |
Total | 11 | 100.00% | 1 | 100.00% |
#define transparent_hugepage_flags 0UL
#define thp_get_unmapped_area NULL
static inline int
split_huge_page_to_list(struct page *page, struct list_head *list)
{
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Shaohua Li | 20 | 100.00% | 1 | 100.00% |
Total | 20 | 100.00% | 1 | 100.00% |
static inline int split_huge_page(struct page *page)
{
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrea Arcangeli | 15 | 100.00% | 1 | 100.00% |
Total | 15 | 100.00% | 1 | 100.00% |
static inline void deferred_split_huge_page(struct page *page) {}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kirill A. Shutemov | 11 | 100.00% | 1 | 100.00% |
Total | 11 | 100.00% | 1 | 100.00% |
#define split_huge_pmd(__vma, __pmd, __address) \
do { } while (0)
static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long address, bool freeze, struct page *page) {}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Rientjes | 27 | 100.00% | 1 | 100.00% |
Total | 27 | 100.00% | 1 | 100.00% |
static inline void split_huge_pmd_address(struct vm_area_struct *vma,
unsigned long address, bool freeze, struct page *page) {}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kirill A. Shutemov | 23 | 100.00% | 2 | 100.00% |
Total | 23 | 100.00% | 2 | 100.00% |
#define split_huge_pud(__vma, __pmd, __address) \
do { } while (0)
static inline int hugepage_madvise(struct vm_area_struct *vma,
unsigned long *vm_flags, int advice)
{
BUG();
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrea Arcangeli | 26 | 100.00% | 3 | 100.00% |
Total | 26 | 100.00% | 3 | 100.00% |
static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
unsigned long start,
unsigned long end,
long adjust_next)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrea Arcangeli | 22 | 100.00% | 1 | 100.00% |
Total | 22 | 100.00% | 1 | 100.00% |
static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma)
{
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Naoya Horiguchi | 17 | 85.00% | 1 | 50.00% |
Kirill A. Shutemov | 3 | 15.00% | 1 | 50.00% |
Total | 20 | 100.00% | 2 | 100.00% |
static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
struct vm_area_struct *vma)
{
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Wilcox | 20 | 100.00% | 1 | 100.00% |
Total | 20 | 100.00% | 1 | 100.00% |
static inline int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd)
{
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mel Gorman | 15 | 83.33% | 2 | 50.00% |
Jan Kara | 2 | 11.11% | 1 | 25.00% |
Kirill A. Shutemov | 1 | 5.56% | 1 | 25.00% |
Total | 18 | 100.00% | 4 | 100.00% |
static inline bool is_huge_zero_page(struct page *page)
{
return false;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yalin Wang | 15 | 100.00% | 1 | 100.00% |
Total | 15 | 100.00% | 1 | 100.00% |
static inline bool is_huge_zero_pud(pud_t pud)
{
return false;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Wilcox | 13 | 100.00% | 1 | 100.00% |
Total | 13 | 100.00% | 1 | 100.00% |
static inline void mm_put_huge_zero_page(struct mm_struct *mm)
{
return;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kirill A. Shutemov | 7 | 53.85% | 1 | 50.00% |
Aaron Lu | 6 | 46.15% | 1 | 50.00% |
Total | 13 | 100.00% | 2 | 100.00% |
static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmd, int flags)
{
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan J Williams | 28 | 100.00% | 1 | 100.00% |
Total | 28 | 100.00% | 1 | 100.00% |
static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
unsigned long addr, pud_t *pud, int flags)
{
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Wilcox | 28 | 100.00% | 1 | 100.00% |
Total | 28 | 100.00% | 1 | 100.00% |
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif /* _LINUX_HUGE_MM_H */
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Wilcox | 416 | 28.36% | 3 | 5.17% |
Andrea Arcangeli | 330 | 22.49% | 8 | 13.79% |
Kirill A. Shutemov | 257 | 17.52% | 17 | 29.31% |
Naoya Horiguchi | 95 | 6.48% | 3 | 5.17% |
Johannes Weiner | 47 | 3.20% | 2 | 3.45% |
Yalin Wang | 41 | 2.79% | 1 | 1.72% |
Dan J Williams | 35 | 2.39% | 3 | 5.17% |
Rik Van Riel | 35 | 2.39% | 1 | 1.72% |
Mel Gorman | 31 | 2.11% | 4 | 6.90% |
David Rientjes | 31 | 2.11% | 3 | 5.17% |
Toshi Kani | 31 | 2.11% | 1 | 1.72% |
MinChan Kim | 27 | 1.84% | 1 | 1.72% |
Shaohua Li | 26 | 1.77% | 1 | 1.72% |
Aaron Lu | 20 | 1.36% | 2 | 3.45% |
Will Deacon | 11 | 0.75% | 1 | 1.72% |
Jan Kara | 10 | 0.68% | 1 | 1.72% |
Alex Shi | 10 | 0.68% | 1 | 1.72% |
Aneesh Kumar K.V | 5 | 0.34% | 1 | 1.72% |
David Shaohua Li | 4 | 0.27% | 1 | 1.72% |
Sasha Levin | 3 | 0.20% | 1 | 1.72% |
Huang Ying | 1 | 0.07% | 1 | 1.72% |
Chen Gang S | 1 | 0.07% | 1 | 1.72% |
Total | 1467 | 100.00% | 58 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.