Contributors: 13
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Song Muchun |
223 |
59.79% |
7 |
29.17% |
Mike Kravetz |
68 |
18.23% |
2 |
8.33% |
van der Linden, Frank |
42 |
11.26% |
4 |
16.67% |
Usama Arif |
20 |
5.36% |
2 |
8.33% |
Alexander Duyck |
5 |
1.34% |
1 |
4.17% |
Andrew Morton |
3 |
0.80% |
1 |
4.17% |
David Rientjes |
3 |
0.80% |
1 |
4.17% |
Adrian Bunk |
2 |
0.54% |
1 |
4.17% |
David Gibson |
2 |
0.54% |
1 |
4.17% |
Gang Li |
2 |
0.54% |
1 |
4.17% |
Chris Forbes |
1 |
0.27% |
1 |
4.17% |
Greg Kroah-Hartman |
1 |
0.27% |
1 |
4.17% |
Vegard Nossum |
1 |
0.27% |
1 |
4.17% |
Total |
373 |
|
24 |
|
// SPDX-License-Identifier: GPL-2.0
/*
* HugeTLB Vmemmap Optimization (HVO)
*
* Copyright (c) 2020, ByteDance. All rights reserved.
*
* Author: Muchun Song <songmuchun@bytedance.com>
*/
#ifndef _LINUX_HUGETLB_VMEMMAP_H
#define _LINUX_HUGETLB_VMEMMAP_H
#include <linux/hugetlb.h>
#include <linux/io.h>
#include <linux/memblock.h>
/*
* Reserve one vmemmap page, all vmemmap addresses are mapped to it. See
* Documentation/mm/vmemmap_dedup.rst.
*/
#define HUGETLB_VMEMMAP_RESERVE_SIZE PAGE_SIZE
#define HUGETLB_VMEMMAP_RESERVE_PAGES (HUGETLB_VMEMMAP_RESERVE_SIZE / sizeof(struct page))
#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
int hugetlb_vmemmap_restore_folio(const struct hstate *h, struct folio *folio);
long hugetlb_vmemmap_restore_folios(const struct hstate *h,
struct list_head *folio_list,
struct list_head *non_hvo_folios);
void hugetlb_vmemmap_optimize_folio(const struct hstate *h, struct folio *folio);
void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list);
void hugetlb_vmemmap_optimize_bootmem_folios(struct hstate *h, struct list_head *folio_list);
#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT
void hugetlb_vmemmap_init_early(int nid);
void hugetlb_vmemmap_init_late(int nid);
#endif
static inline unsigned int hugetlb_vmemmap_size(const struct hstate *h)
{
return pages_per_huge_page(h) * sizeof(struct page);
}
/*
* Return how many vmemmap size associated with a HugeTLB page that can be
* optimized and can be freed to the buddy allocator.
*/
static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h)
{
int size = hugetlb_vmemmap_size(h) - HUGETLB_VMEMMAP_RESERVE_SIZE;
if (!is_power_of_2(sizeof(struct page)))
return 0;
return size > 0 ? size : 0;
}
#else
static inline int hugetlb_vmemmap_restore_folio(const struct hstate *h, struct folio *folio)
{
return 0;
}
static inline long hugetlb_vmemmap_restore_folios(const struct hstate *h,
struct list_head *folio_list,
struct list_head *non_hvo_folios)
{
list_splice_init(folio_list, non_hvo_folios);
return 0;
}
static inline void hugetlb_vmemmap_optimize_folio(const struct hstate *h, struct folio *folio)
{
}
static inline void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list)
{
}
static inline void hugetlb_vmemmap_optimize_bootmem_folios(struct hstate *h,
struct list_head *folio_list)
{
}
static inline void hugetlb_vmemmap_init_early(int nid)
{
}
static inline void hugetlb_vmemmap_init_late(int nid)
{
}
static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h)
{
return 0;
}
#endif /* CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP */
static inline bool hugetlb_vmemmap_optimizable(const struct hstate *h)
{
return hugetlb_vmemmap_optimizable_size(h) != 0;
}
#endif /* _LINUX_HUGETLB_VMEMMAP_H */