cregit-Linux how code gets into the kernel

Release 4.11 arch/x86/mm/hugetlbpage.c

Directory: arch/x86/mm
/*
 * IA-32 Huge TLB Page Support for Kernel.
 *
 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
 */

#include <linux/init.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/sched/mm.h>
#include <linux/hugetlb.h>
#include <linux/pagemap.h>
#include <linux/err.h>
#include <linux/sysctl.h>
#include <asm/mman.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/pgalloc.h>

#if 0	/* This is just for testing */
struct page *
follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
{
        unsigned long start = address;
        int length = 1;
        int nr;
        struct page *page;
        struct vm_area_struct *vma;

        vma = find_vma(mm, addr);
        if (!vma || !is_vm_hugetlb_page(vma))
                return ERR_PTR(-EINVAL);

        pte = huge_pte_offset(mm, address);

	/* hugetlb should be locked, and hence, prefaulted */
	WARN_ON(!pte || pte_none(*pte));

        page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];

        WARN_ON(!PageHead(page));

        return page;
}

int pmd_huge(pmd_t pmd)
{
        return 0;
}

int pud_huge(pud_t pud)
{
        return 0;
}

#else

/*
 * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal
 * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
 * Otherwise, returns 0.
 */

int pmd_huge(pmd_t pmd) { return !pmd_none(pmd) && (pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT; }

Contributors

PersonTokensPropCommitsCommitProp
David Gibson1963.33%150.00%
Naoya Horiguchi1136.67%150.00%
Total30100.00%2100.00%


int pud_huge(pud_t pud) { return !!(pud_val(pud) & _PAGE_PSE); }

Contributors

PersonTokensPropCommitsCommitProp
Andi Kleen20100.00%2100.00%
Total20100.00%2100.00%

#endif #ifdef CONFIG_HUGETLB_PAGE
static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct hstate *h = hstate_file(file); struct vm_unmapped_area_info info; info.flags = 0; info.length = len; info.low_limit = current->mm->mmap_legacy_base; info.high_limit = TASK_SIZE; info.align_mask = PAGE_MASK & ~huge_page_mask(h); info.align_offset = 0; return vm_unmapped_area(&info); }

Contributors

PersonTokensPropCommitsCommitProp
Yanmin Zhang4042.11%120.00%
Michel Lespinasse3233.68%120.00%
Andi Kleen1111.58%120.00%
Wolfgang Wander77.37%120.00%
Kirill A. Shutemov55.26%120.00%
Total95100.00%5100.00%


static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr0, unsigned long len, unsigned long pgoff, unsigned long flags) { struct hstate *h = hstate_file(file); struct vm_unmapped_area_info info; unsigned long addr; info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.low_limit = PAGE_SIZE; info.high_limit = current->mm->mmap_base; info.align_mask = PAGE_MASK & ~huge_page_mask(h); info.align_offset = 0; addr = vm_unmapped_area(&info); /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ if (addr & ~PAGE_MASK) { VM_BUG_ON(addr != -ENOMEM); info.flags = 0; info.low_limit = TASK_UNMAPPED_BASE; info.high_limit = TASK_SIZE; addr = vm_unmapped_area(&info); } return addr; }

Contributors

PersonTokensPropCommitsCommitProp
Michel Lespinasse5738.78%120.00%
Yanmin Zhang5336.05%120.00%
Wolfgang Wander1711.56%120.00%
Andi Kleen1510.20%120.00%
Xiao Guangrong53.40%120.00%
Total147100.00%5100.00%


unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct hstate *h = hstate_file(file); struct mm_struct *mm = current->mm; struct vm_area_struct *vma; if (len & ~huge_page_mask(h)) return -EINVAL; if (len > TASK_SIZE) return -ENOMEM; if (flags & MAP_FIXED) { if (prepare_hugepage_range(file, addr, len)) return -EINVAL; return addr; } if (addr) { addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && (!vma || addr + len <= vma->vm_start)) return addr; } if (mm->get_unmapped_area == arch_get_unmapped_area) return hugetlb_get_unmapped_area_bottomup(file, addr, len, pgoff, flags); else return hugetlb_get_unmapped_area_topdown(file, addr, len, pgoff, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Yanmin Zhang14576.72%125.00%
Benjamin Herrenschmidt2412.70%125.00%
Andi Kleen2010.58%250.00%
Total189100.00%4100.00%

#endif /* CONFIG_HUGETLB_PAGE */ #ifdef CONFIG_X86_64
static __init int setup_hugepagesz(char *opt) { unsigned long ps = memparse(opt, &opt); if (ps == PMD_SIZE) { hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); } else if (ps == PUD_SIZE && boot_cpu_has(X86_FEATURE_GBPAGES)) { hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); } else { hugetlb_bad_size(); printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n", ps >> 20); return 0; } return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Andi Kleen7491.36%133.33%
Borislav Petkov44.94%133.33%
Vaishali Thakkar33.70%133.33%
Total81100.00%3100.00%

__setup("hugepagesz=", setup_hugepagesz); #if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
static __init int gigantic_pages_init(void) { /* With compaction or CMA we can allocate gigantic pages at runtime */ if (boot_cpu_has(X86_FEATURE_GBPAGES) && !size_to_hstate(1UL << PUD_SHIFT)) hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Kirill A. Shutemov3085.71%133.33%
Borislav Petkov411.43%133.33%
Vlastimil Babka12.86%133.33%
Total35100.00%3100.00%

arch_initcall(gigantic_pages_init); #endif #endif

Overall Contributors

PersonTokensPropCommitsCommitProp
Yanmin Zhang24235.12%14.35%
Andi Kleen15222.06%417.39%
Michel Lespinasse8912.92%14.35%
Kirill A. Shutemov456.53%28.70%
Andrew Morton314.50%417.39%
David Gibson294.21%14.35%
Wolfgang Wander243.48%14.35%
Benjamin Herrenschmidt243.48%14.35%
Vlastimil Babka182.61%14.35%
Naoya Horiguchi131.89%28.70%
Borislav Petkov81.16%14.35%
Xiao Guangrong50.73%14.35%
Ingo Molnar30.44%14.35%
Jeremy Fitzhardinge30.44%14.35%
Vaishali Thakkar30.44%14.35%
Total689100.00%23100.00%
Directory: arch/x86/mm
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.