cregit-Linux how code gets into the kernel

Release 4.14 arch/x86/mm/hugetlbpage.c

Directory: arch/x86/mm
// SPDX-License-Identifier: GPL-2.0
/*
 * IA-32 Huge TLB Page Support for Kernel.
 *
 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
 */

#include <linux/init.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/sched/mm.h>
#include <linux/hugetlb.h>
#include <linux/pagemap.h>
#include <linux/err.h>
#include <linux/sysctl.h>
#include <linux/compat.h>
#include <asm/mman.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/pgalloc.h>
#include <asm/elf.h>
#include <asm/mpx.h>

#if 0	/* This is just for testing */
struct page *
follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
{
        unsigned long start = address;
        int length = 1;
        int nr;
        struct page *page;
        struct vm_area_struct *vma;

        vma = find_vma(mm, addr);
        if (!vma || !is_vm_hugetlb_page(vma))
                return ERR_PTR(-EINVAL);

        pte = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));

	/* hugetlb should be locked, and hence, prefaulted */
	WARN_ON(!pte || pte_none(*pte));

        page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];

        WARN_ON(!PageHead(page));

        return page;
}

int pmd_huge(pmd_t pmd)
{
        return 0;
}

int pud_huge(pud_t pud)
{
        return 0;
}

#else

/*
 * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal
 * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
 * Otherwise, returns 0.
 */

int pmd_huge(pmd_t pmd) { return !pmd_none(pmd) && (pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT; }

Contributors

PersonTokensPropCommitsCommitProp
David Gibson1446.67%120.00%
Naoya Horiguchi1136.67%120.00%
Andrew Morton310.00%240.00%
Christoph Lameter26.67%120.00%
Total30100.00%5100.00%


int pud_huge(pud_t pud) { return !!(pud_val(pud) & _PAGE_PSE); }

Contributors

PersonTokensPropCommitsCommitProp
Andi Kleen20100.00%2100.00%
Total20100.00%2100.00%

#endif #ifdef CONFIG_HUGETLB_PAGE
static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct hstate *h = hstate_file(file); struct vm_unmapped_area_info info; info.flags = 0; info.length = len; info.low_limit = get_mmap_base(1); /* * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area * in the full address space. */ info.high_limit = in_compat_syscall() ? task_size_32bit() : task_size_64bit(addr > DEFAULT_MAP_WINDOW); info.align_mask = PAGE_MASK & ~huge_page_mask(h); info.align_offset = 0; return vm_unmapped_area(&info); }

Contributors

PersonTokensPropCommitsCommitProp
Yanmin Zhang4037.74%114.29%
Michel Lespinasse3129.25%114.29%
Andi Kleen1110.38%114.29%
Dmitry Safonov98.49%114.29%
Kirill A. Shutemov87.55%228.57%
Wolfgang Wander76.60%114.29%
Total106100.00%7100.00%


static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct hstate *h = hstate_file(file); struct vm_unmapped_area_info info; info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.low_limit = PAGE_SIZE; info.high_limit = get_mmap_base(0); /* * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area * in the full address space. */ if (addr > DEFAULT_MAP_WINDOW && !in_compat_syscall()) info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW; info.align_mask = PAGE_MASK & ~huge_page_mask(h); info.align_offset = 0; addr = vm_unmapped_area(&info); /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ if (addr & ~PAGE_MASK) { VM_BUG_ON(addr != -ENOMEM); info.flags = 0; info.low_limit = TASK_UNMAPPED_BASE; info.high_limit = TASK_SIZE_LOW; addr = vm_unmapped_area(&info); } return addr; }

Contributors

PersonTokensPropCommitsCommitProp
Michel Lespinasse5433.54%114.29%
Yanmin Zhang4829.81%114.29%
Kirill A. Shutemov2113.04%114.29%
Wolfgang Wander1710.56%114.29%
Andi Kleen159.32%114.29%
Dmitry Safonov42.48%114.29%
Xiao Guangrong21.24%114.29%
Total161100.00%7100.00%


unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct hstate *h = hstate_file(file); struct mm_struct *mm = current->mm; struct vm_area_struct *vma; if (len & ~huge_page_mask(h)) return -EINVAL; addr = mpx_unmapped_area_check(addr, len, flags); if (IS_ERR_VALUE(addr)) return addr; if (len > TASK_SIZE) return -ENOMEM; if (flags & MAP_FIXED) { if (prepare_hugepage_range(file, addr, len)) return -EINVAL; return addr; } if (addr) { addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && (!vma || addr + len <= vm_start_gap(vma))) return addr; } if (mm->get_unmapped_area == arch_get_unmapped_area) return hugetlb_get_unmapped_area_bottomup(file, addr, len, pgoff, flags); else return hugetlb_get_unmapped_area_topdown(file, addr, len, pgoff, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Yanmin Zhang14367.77%116.67%
Benjamin Herrenschmidt2411.37%116.67%
Kirill A. Shutemov219.95%116.67%
Andi Kleen209.48%233.33%
Hugh Dickins31.42%116.67%
Total211100.00%6100.00%

#endif /* CONFIG_HUGETLB_PAGE */ #ifdef CONFIG_X86_64
static __init int setup_hugepagesz(char *opt) { unsigned long ps = memparse(opt, &opt); if (ps == PMD_SIZE) { hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); } else if (ps == PUD_SIZE && boot_cpu_has(X86_FEATURE_GBPAGES)) { hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); } else { hugetlb_bad_size(); printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n", ps >> 20); return 0; } return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Andi Kleen7491.36%133.33%
Borislav Petkov44.94%133.33%
Vaishali Thakkar33.70%133.33%
Total81100.00%3100.00%

__setup("hugepagesz=", setup_hugepagesz); #if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
static __init int gigantic_pages_init(void) { /* With compaction or CMA we can allocate gigantic pages at runtime */ if (boot_cpu_has(X86_FEATURE_GBPAGES) && !size_to_hstate(1UL << PUD_SHIFT)) hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Kirill A. Shutemov3085.71%133.33%
Borislav Petkov411.43%133.33%
Vlastimil Babka12.86%133.33%
Total35100.00%3100.00%

arch_initcall(gigantic_pages_init); #endif #endif

Overall Contributors

PersonTokensPropCommitsCommitProp
Yanmin Zhang23531.50%13.12%
Andi Kleen15220.38%412.50%
Kirill A. Shutemov9312.47%515.62%
Michel Lespinasse8511.39%13.12%
Andrew Morton364.83%515.62%
Benjamin Herrenschmidt243.22%13.12%
Wolfgang Wander243.22%13.12%
David Gibson212.82%13.12%
Dmitry Safonov192.55%13.12%
Vlastimil Babka182.41%13.12%
Naoya Horiguchi131.74%26.25%
Borislav Petkov81.07%13.12%
Hugh Dickins30.40%13.12%
Vaishali Thakkar30.40%13.12%
Ingo Molnar30.40%13.12%
Jeremy Fitzhardinge30.40%13.12%
Xiao Guangrong20.27%13.12%
Christoph Lameter20.27%13.12%
Punit Agrawal10.13%13.12%
Greg Kroah-Hartman10.13%13.12%
Total746100.00%32100.00%
Directory: arch/x86/mm
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.