Release 4.14 arch/x86/mm/hugetlbpage.c
// SPDX-License-Identifier: GPL-2.0
/*
* IA-32 Huge TLB Page Support for Kernel.
*
* Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
*/
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/sched/mm.h>
#include <linux/hugetlb.h>
#include <linux/pagemap.h>
#include <linux/err.h>
#include <linux/sysctl.h>
#include <linux/compat.h>
#include <asm/mman.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/pgalloc.h>
#include <asm/elf.h>
#include <asm/mpx.h>
#if 0 /* This is just for testing */
struct page *
follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
{
unsigned long start = address;
int length = 1;
int nr;
struct page *page;
struct vm_area_struct *vma;
vma = find_vma(mm, addr);
if (!vma || !is_vm_hugetlb_page(vma))
return ERR_PTR(-EINVAL);
pte = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
/* hugetlb should be locked, and hence, prefaulted */
WARN_ON(!pte || pte_none(*pte));
page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
WARN_ON(!PageHead(page));
return page;
}
int pmd_huge(pmd_t pmd)
{
return 0;
}
int pud_huge(pud_t pud)
{
return 0;
}
#else
/*
* pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal
* hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
* Otherwise, returns 0.
*/
int pmd_huge(pmd_t pmd)
{
return !pmd_none(pmd) &&
(pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Gibson | 14 | 46.67% | 1 | 20.00% |
Naoya Horiguchi | 11 | 36.67% | 1 | 20.00% |
Andrew Morton | 3 | 10.00% | 2 | 40.00% |
Christoph Lameter | 2 | 6.67% | 1 | 20.00% |
Total | 30 | 100.00% | 5 | 100.00% |
int pud_huge(pud_t pud)
{
return !!(pud_val(pud) & _PAGE_PSE);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andi Kleen | 20 | 100.00% | 2 | 100.00% |
Total | 20 | 100.00% | 2 | 100.00% |
#endif
#ifdef CONFIG_HUGETLB_PAGE
static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
{
struct hstate *h = hstate_file(file);
struct vm_unmapped_area_info info;
info.flags = 0;
info.length = len;
info.low_limit = get_mmap_base(1);
/*
* If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
* in the full address space.
*/
info.high_limit = in_compat_syscall() ?
task_size_32bit() : task_size_64bit(addr > DEFAULT_MAP_WINDOW);
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0;
return vm_unmapped_area(&info);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yanmin Zhang | 40 | 37.74% | 1 | 14.29% |
Michel Lespinasse | 31 | 29.25% | 1 | 14.29% |
Andi Kleen | 11 | 10.38% | 1 | 14.29% |
Dmitry Safonov | 9 | 8.49% | 1 | 14.29% |
Kirill A. Shutemov | 8 | 7.55% | 2 | 28.57% |
Wolfgang Wander | 7 | 6.60% | 1 | 14.29% |
Total | 106 | 100.00% | 7 | 100.00% |
static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
{
struct hstate *h = hstate_file(file);
struct vm_unmapped_area_info info;
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
info.low_limit = PAGE_SIZE;
info.high_limit = get_mmap_base(0);
/*
* If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
* in the full address space.
*/
if (addr > DEFAULT_MAP_WINDOW && !in_compat_syscall())
info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW;
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0;
addr = vm_unmapped_area(&info);
/*
* A failed mmap() very likely causes application failure,
* so fall back to the bottom-up function here. This scenario
* can happen with large stack limits and large mmap()
* allocations.
*/
if (addr & ~PAGE_MASK) {
VM_BUG_ON(addr != -ENOMEM);
info.flags = 0;
info.low_limit = TASK_UNMAPPED_BASE;
info.high_limit = TASK_SIZE_LOW;
addr = vm_unmapped_area(&info);
}
return addr;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Michel Lespinasse | 54 | 33.54% | 1 | 14.29% |
Yanmin Zhang | 48 | 29.81% | 1 | 14.29% |
Kirill A. Shutemov | 21 | 13.04% | 1 | 14.29% |
Wolfgang Wander | 17 | 10.56% | 1 | 14.29% |
Andi Kleen | 15 | 9.32% | 1 | 14.29% |
Dmitry Safonov | 4 | 2.48% | 1 | 14.29% |
Xiao Guangrong | 2 | 1.24% | 1 | 14.29% |
Total | 161 | 100.00% | 7 | 100.00% |
unsigned long
hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct hstate *h = hstate_file(file);
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
if (len & ~huge_page_mask(h))
return -EINVAL;
addr = mpx_unmapped_area_check(addr, len, flags);
if (IS_ERR_VALUE(addr))
return addr;
if (len > TASK_SIZE)
return -ENOMEM;
if (flags & MAP_FIXED) {
if (prepare_hugepage_range(file, addr, len))
return -EINVAL;
return addr;
}
if (addr) {
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vm_start_gap(vma)))
return addr;
}
if (mm->get_unmapped_area == arch_get_unmapped_area)
return hugetlb_get_unmapped_area_bottomup(file, addr, len,
pgoff, flags);
else
return hugetlb_get_unmapped_area_topdown(file, addr, len,
pgoff, flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yanmin Zhang | 143 | 67.77% | 1 | 16.67% |
Benjamin Herrenschmidt | 24 | 11.37% | 1 | 16.67% |
Kirill A. Shutemov | 21 | 9.95% | 1 | 16.67% |
Andi Kleen | 20 | 9.48% | 2 | 33.33% |
Hugh Dickins | 3 | 1.42% | 1 | 16.67% |
Total | 211 | 100.00% | 6 | 100.00% |
#endif /* CONFIG_HUGETLB_PAGE */
#ifdef CONFIG_X86_64
static __init int setup_hugepagesz(char *opt)
{
unsigned long ps = memparse(opt, &opt);
if (ps == PMD_SIZE) {
hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
} else if (ps == PUD_SIZE && boot_cpu_has(X86_FEATURE_GBPAGES)) {
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
} else {
hugetlb_bad_size();
printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n",
ps >> 20);
return 0;
}
return 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andi Kleen | 74 | 91.36% | 1 | 33.33% |
Borislav Petkov | 4 | 4.94% | 1 | 33.33% |
Vaishali Thakkar | 3 | 3.70% | 1 | 33.33% |
Total | 81 | 100.00% | 3 | 100.00% |
__setup("hugepagesz=", setup_hugepagesz);
#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
static __init int gigantic_pages_init(void)
{
/* With compaction or CMA we can allocate gigantic pages at runtime */
if (boot_cpu_has(X86_FEATURE_GBPAGES) && !size_to_hstate(1UL << PUD_SHIFT))
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kirill A. Shutemov | 30 | 85.71% | 1 | 33.33% |
Borislav Petkov | 4 | 11.43% | 1 | 33.33% |
Vlastimil Babka | 1 | 2.86% | 1 | 33.33% |
Total | 35 | 100.00% | 3 | 100.00% |
arch_initcall(gigantic_pages_init);
#endif
#endif
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yanmin Zhang | 235 | 31.50% | 1 | 3.12% |
Andi Kleen | 152 | 20.38% | 4 | 12.50% |
Kirill A. Shutemov | 93 | 12.47% | 5 | 15.62% |
Michel Lespinasse | 85 | 11.39% | 1 | 3.12% |
Andrew Morton | 36 | 4.83% | 5 | 15.62% |
Benjamin Herrenschmidt | 24 | 3.22% | 1 | 3.12% |
Wolfgang Wander | 24 | 3.22% | 1 | 3.12% |
David Gibson | 21 | 2.82% | 1 | 3.12% |
Dmitry Safonov | 19 | 2.55% | 1 | 3.12% |
Vlastimil Babka | 18 | 2.41% | 1 | 3.12% |
Naoya Horiguchi | 13 | 1.74% | 2 | 6.25% |
Borislav Petkov | 8 | 1.07% | 1 | 3.12% |
Hugh Dickins | 3 | 0.40% | 1 | 3.12% |
Vaishali Thakkar | 3 | 0.40% | 1 | 3.12% |
Ingo Molnar | 3 | 0.40% | 1 | 3.12% |
Jeremy Fitzhardinge | 3 | 0.40% | 1 | 3.12% |
Xiao Guangrong | 2 | 0.27% | 1 | 3.12% |
Christoph Lameter | 2 | 0.27% | 1 | 3.12% |
Punit Agrawal | 1 | 0.13% | 1 | 3.12% |
Greg Kroah-Hartman | 1 | 0.13% | 1 | 3.12% |
Total | 746 | 100.00% | 32 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.