Contributors: 19
Author Tokens Token Proportion Commits Commit Proportion
Andrea Arcangeli 102 38.93% 4 13.79%
Song Liu 28 10.69% 1 3.45%
Linus Torvalds (pre-git) 26 9.92% 4 13.79%
Vijay Balakrishna 20 7.63% 1 3.45%
Johannes Weiner 15 5.73% 1 3.45%
Kirill A. Shutemov 13 4.96% 2 6.90%
Zach O'Keefe 11 4.20% 1 3.45%
Christoph Hellwig 10 3.82% 1 3.45%
Yang Shi 8 3.05% 2 6.90%
Srivatsa Vaddagiri 6 2.29% 1 3.45%
Lorenzo Stoakes 4 1.53% 2 6.90%
Linus Torvalds 4 1.53% 1 3.45%
David Rientjes 4 1.53% 1 3.45%
Mel Gorman 3 1.15% 2 6.90%
Andrew Morton 3 1.15% 1 3.45%
Balbir Singh 2 0.76% 1 3.45%
Dhaval Giani 1 0.38% 1 3.45%
Usama Arif 1 0.38% 1 3.45%
Greg Kroah-Hartman 1 0.38% 1 3.45%
Total 262 29


/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_KHUGEPAGED_H
#define _LINUX_KHUGEPAGED_H

#include <linux/mm.h>

extern unsigned int khugepaged_max_ptes_none __read_mostly;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern struct attribute_group khugepaged_attr_group;

extern int khugepaged_init(void);
extern void khugepaged_destroy(void);
extern int start_stop_khugepaged(void);
extern void __khugepaged_enter(struct mm_struct *mm);
extern void __khugepaged_exit(struct mm_struct *mm);
extern void khugepaged_enter_vma(struct vm_area_struct *vma,
				 vm_flags_t vm_flags);
extern void khugepaged_min_free_kbytes_update(void);
extern bool current_is_khugepaged(void);
extern int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
				   bool install_pmd);

static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
	if (mm_flags_test(MMF_VM_HUGEPAGE, oldmm))
		__khugepaged_enter(mm);
}

static inline void khugepaged_exit(struct mm_struct *mm)
{
	if (mm_flags_test(MMF_VM_HUGEPAGE, mm))
		__khugepaged_exit(mm);
}
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
}
static inline void khugepaged_exit(struct mm_struct *mm)
{
}
static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
					vm_flags_t vm_flags)
{
}
static inline int collapse_pte_mapped_thp(struct mm_struct *mm,
					  unsigned long addr, bool install_pmd)
{
	return 0;
}

static inline void khugepaged_min_free_kbytes_update(void)
{
}

static inline bool current_is_khugepaged(void)
{
	return false;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

#endif /* _LINUX_KHUGEPAGED_H */