Contributors: 18
Author Tokens Token Proportion Commits Commit Proportion
Andrea Arcangeli 115 43.40% 4 13.79%
Song Liu 28 10.57% 1 3.45%
Vijay Balakrishna 20 7.55% 1 3.45%
Linus Torvalds (pre-git) 19 7.17% 4 13.79%
Johannes Weiner 15 5.66% 1 3.45%
Kirill A. Shutemov 13 4.91% 2 6.90%
Zach O'Keefe 11 4.15% 1 3.45%
Yang Shi 8 3.02% 2 6.90%
Balbir Singh 7 2.64% 2 6.90%
Srivatsa Vaddagiri 6 2.26% 1 3.45%
David Rientjes 6 2.26% 2 6.90%
Christoph Hellwig 5 1.89% 1 3.45%
Linus Torvalds 4 1.51% 1 3.45%
Mel Gorman 3 1.13% 2 6.90%
Lorenzo Stoakes 2 0.75% 1 3.45%
Dhaval Giani 1 0.38% 1 3.45%
Greg Kroah-Hartman 1 0.38% 1 3.45%
Usama Arif 1 0.38% 1 3.45%
Total 265 29


/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_KHUGEPAGED_H
#define _LINUX_KHUGEPAGED_H

extern unsigned int khugepaged_max_ptes_none __read_mostly;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern struct attribute_group khugepaged_attr_group;

extern int khugepaged_init(void);
extern void khugepaged_destroy(void);
extern int start_stop_khugepaged(void);
extern void __khugepaged_enter(struct mm_struct *mm);
extern void __khugepaged_exit(struct mm_struct *mm);
extern void khugepaged_enter_vma(struct vm_area_struct *vma,
				 vm_flags_t vm_flags);
extern void khugepaged_min_free_kbytes_update(void);
extern bool current_is_khugepaged(void);
extern int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
				   bool install_pmd);

static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
	if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags))
		__khugepaged_enter(mm);
}

static inline void khugepaged_exit(struct mm_struct *mm)
{
	if (test_bit(MMF_VM_HUGEPAGE, &mm->flags))
		__khugepaged_exit(mm);
}
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
}
static inline void khugepaged_exit(struct mm_struct *mm)
{
}
static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
					vm_flags_t vm_flags)
{
}
static inline int collapse_pte_mapped_thp(struct mm_struct *mm,
					  unsigned long addr, bool install_pmd)
{
	return 0;
}

static inline void khugepaged_min_free_kbytes_update(void)
{
}

static inline bool current_is_khugepaged(void)
{
	return false;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

#endif /* _LINUX_KHUGEPAGED_H */