cregit-Linux how code gets into the kernel

Release 4.18 arch/x86/mm/init_64.c

Directory: arch/x86/mm
/*
 *  linux/arch/x86_64/mm/init.c
 *
 *  Copyright (C) 1995  Linus Torvalds
 *  Copyright (C) 2000  Pavel Machek <pavel@ucw.cz>
 *  Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
 */

#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/initrd.h>
#include <linux/pagemap.h>
#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/proc_fs.h>
#include <linux/pci.h>
#include <linux/pfn.h>
#include <linux/poison.h>
#include <linux/dma-mapping.h>
#include <linux/memory.h>
#include <linux/memory_hotplug.h>
#include <linux/memremap.h>
#include <linux/nmi.h>
#include <linux/gfp.h>
#include <linux/kcore.h>

#include <asm/processor.h>
#include <asm/bios_ebda.h>
#include <linux/uaccess.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/dma.h>
#include <asm/fixmap.h>
#include <asm/e820/api.h>
#include <asm/apic.h>
#include <asm/tlb.h>
#include <asm/mmu_context.h>
#include <asm/proto.h>
#include <asm/smp.h>
#include <asm/sections.h>
#include <asm/kdebug.h>
#include <asm/numa.h>
#include <asm/set_memory.h>
#include <asm/init.h>
#include <asm/uv/uv.h>
#include <asm/setup.h>

#include "mm_internal.h"

#include "ident_map.c"

/*
 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
 * physical space so we can cache the place of the first one and move
 * around without checking the pgd every time.
 */

/* Bits supported by the hardware: */

pteval_t __supported_pte_mask __read_mostly = ~0;
/* Bits allowed in normal kernel mappings: */

pteval_t __default_kernel_pte_mask __read_mostly = ~0;
EXPORT_SYMBOL_GPL(__supported_pte_mask);
/* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */
EXPORT_SYMBOL(__default_kernel_pte_mask);


int force_personality32;

/*
 * noexec32=on|off
 * Control non executable heap for 32bit processes.
 * To control the stack too use noexec=off
 *
 * on   PROT_READ does not imply PROT_EXEC for 32-bit processes (default)
 * off  PROT_READ implies PROT_EXEC
 */

static int __init nonx32_setup(char *str) { if (!strcmp(str, "on")) force_personality32 &= ~READ_IMPLIES_EXEC; else if (!strcmp(str, "off")) force_personality32 |= READ_IMPLIES_EXEC; return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Yinghai Lu44100.00%1100.00%
Total44100.00%1100.00%

__setup("noexec32=", nonx32_setup);
static void sync_global_pgds_l5(unsigned long start, unsigned long end) { unsigned long addr; for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) { const pgd_t *pgd_ref = pgd_offset_k(addr); struct page *page; /* Check for overflow */ if (addr < start) break; if (pgd_none(*pgd_ref)) continue; spin_lock(&pgd_lock); list_for_each_entry(page, &pgd_list, lru) { pgd_t *pgd; spinlock_t *pgt_lock; pgd = (pgd_t *)page_address(page) + pgd_index(addr); /* the pgt_lock only for Xen */ pgt_lock = &pgd_page_get_mm(page)->page_table_lock; spin_lock(pgt_lock); if (!pgd_none(*pgd_ref) && !pgd_none(*pgd)) BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); if (pgd_none(*pgd)) set_pgd(pgd, *pgd_ref); spin_unlock(pgt_lock); } spin_unlock(&pgd_lock); } }

Contributors

PersonTokensPropCommitsCommitProp
Kirill A. Shutemov187100.00%2100.00%
Total187100.00%2100.00%


static void sync_global_pgds_l4(unsigned long start, unsigned long end) { unsigned long addr; for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) { pgd_t *pgd_ref = pgd_offset_k(addr); const p4d_t *p4d_ref; struct page *page; /* * With folded p4d, pgd_none() is always false, we need to * handle synchonization on p4d level. */ MAYBE_BUILD_BUG_ON(pgd_none(*pgd_ref)); p4d_ref = p4d_offset(pgd_ref, addr); if (p4d_none(*p4d_ref)) continue; spin_lock(&pgd_lock); list_for_each_entry(page, &pgd_list, lru) { pgd_t *pgd; p4d_t *p4d; spinlock_t *pgt_lock; pgd = (pgd_t *)page_address(page) + pgd_index(addr); p4d = p4d_offset(pgd, addr); /* the pgt_lock only for Xen */ pgt_lock = &pgd_page_get_mm(page)->page_table_lock; spin_lock(pgt_lock); if (!p4d_none(*p4d_ref) && !p4d_none(*p4d)) BUG_ON(p4d_page_vaddr(*p4d) != p4d_page_vaddr(*p4d_ref)); if (p4d_none(*p4d)) set_p4d(p4d, *p4d_ref); spin_unlock(pgt_lock); } spin_unlock(&pgd_lock); } }

Contributors

PersonTokensPropCommitsCommitProp
Haicheng Li10146.98%111.11%
Kirill A. Shutemov5224.19%333.33%
Jeremy Fitzhardinge2411.16%111.11%
Baoquan He167.44%111.11%
Yasuaki Ishimatsu156.98%111.11%
Linus Torvalds41.86%111.11%
Andrea Arcangeli31.40%111.11%
Total215100.00%9100.00%

/* * When memory was added make sure all the processes MM have * suitable PGD entries in the local PGD level page. */
void sync_global_pgds(unsigned long start, unsigned long end) { if (pgtable_l5_enabled()) sync_global_pgds_l5(start, end); else sync_global_pgds_l4(start, end); }

Contributors

PersonTokensPropCommitsCommitProp
Kirill A. Shutemov33100.00%2100.00%
Total33100.00%2100.00%

/* * NOTE: This function is marked __ref because it calls __init function * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0. */
static __ref void *spp_getpage(void) { void *ptr; if (after_bootmem) ptr = (void *) get_zeroed_page(GFP_ATOMIC); else ptr = alloc_bootmem_pages(PAGE_SIZE); if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) { panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem ? "after bootmem" : ""); } pr_debug("spp_getpage %p\n", ptr); return ptr; }

Contributors

PersonTokensPropCommitsCommitProp
Andi Kleen7193.42%233.33%
Thomas Gleixner22.63%116.67%
Marcin Ślusarz11.32%116.67%
Ingo Molnar11.32%116.67%
Andrew Morton11.32%116.67%
Total76100.00%6100.00%


static p4d_t *fill_p4d(pgd_t *pgd, unsigned long vaddr) { if (pgd_none(*pgd)) { p4d_t *p4d = (p4d_t *)spp_getpage(); pgd_populate(&init_mm, pgd, p4d); if (p4d != p4d_offset(pgd, 0)) printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n", p4d, p4d_offset(pgd, 0)); } return p4d_offset(pgd, vaddr); }

Contributors

PersonTokensPropCommitsCommitProp
Kirill A. Shutemov5973.75%125.00%
Tejun Heo1417.50%125.00%
Andi Kleen56.25%125.00%
Eduardo Pereira Habkost22.50%125.00%
Total80100.00%4100.00%


static pud_t *fill_pud(p4d_t *p4d, unsigned long vaddr) { if (p4d_none(*p4d)) { pud_t *pud = (pud_t *)spp_getpage(); p4d_populate(&init_mm, p4d, pud); if (pud != pud_offset(p4d, 0)) printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n", pud, pud_offset(p4d, 0)); } return pud_offset(p4d, vaddr); }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo4758.75%133.33%
Kirill A. Shutemov2936.25%133.33%
Andi Kleen45.00%133.33%
Total80100.00%3100.00%


static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr) { if (pud_none(*pud)) { pmd_t *pmd = (pmd_t *) spp_getpage(); pud_populate(&init_mm, pud, pmd); if (pmd != pmd_offset(pud, 0)) printk(KERN_ERR "PAGETABLE BUG #02! %p <-> %p\n", pmd, pmd_offset(pud, 0)); } return pmd_offset(pud, vaddr); }

Contributors

PersonTokensPropCommitsCommitProp
Andi Kleen5872.50%342.86%
Tejun Heo1620.00%114.29%
Jeremy Fitzhardinge45.00%114.29%
Ingo Molnar11.25%114.29%
Kirill A. Shutemov11.25%114.29%
Total80100.00%7100.00%


static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr) { if (pmd_none(*pmd)) { pte_t *pte = (pte_t *) spp_getpage(); pmd_populate_kernel(&init_mm, pmd, pte); if (pte != pte_offset_kernel(pmd, 0)) printk(KERN_ERR "PAGETABLE BUG #03!\n"); } return pte_offset_kernel(pmd, vaddr); }

Contributors

PersonTokensPropCommitsCommitProp
Andi Kleen3954.93%233.33%
Tejun Heo2636.62%116.67%
Jeremy Fitzhardinge45.63%116.67%
Ingo Molnar11.41%116.67%
Kirill A. Shutemov11.41%116.67%
Total71100.00%6100.00%


static void __set_pte_vaddr(pud_t *pud, unsigned long vaddr, pte_t new_pte) { pmd_t *pmd = fill_pmd(pud, vaddr); pte_t *pte = fill_pte(pmd, vaddr); set_pte(pte, new_pte); /* * It's enough to flush this one mapping. * (PGE mappings get flushed as well) */ __flush_tlb_one_kernel(vaddr); }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo2446.15%120.00%
Andi Kleen1834.62%240.00%
Kirill A. Shutemov917.31%120.00%
Andrew Lutomirski11.92%120.00%
Total52100.00%5100.00%


void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte) { p4d_t *p4d = p4d_page + p4d_index(vaddr); pud_t *pud = fill_pud(p4d, vaddr); __set_pte_vaddr(pud, vaddr, new_pte); }

Contributors

PersonTokensPropCommitsCommitProp
Kirill A. Shutemov47100.00%1100.00%
Total47100.00%1100.00%


void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte) { pud_t *pud = pud_page + pud_index(vaddr); __set_pte_vaddr(pud, vaddr, new_pte); }

Contributors

PersonTokensPropCommitsCommitProp
Kirill A. Shutemov36100.00%1100.00%
Total36100.00%1100.00%


void set_pte_vaddr(unsigned long vaddr, pte_t pteval) { pgd_t *pgd; p4d_t *p4d_page; pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval)); pgd = pgd_offset_k(vaddr); if (pgd_none(*pgd)) { printk(KERN_ERR "PGD FIXMAP MISSING, it should be setup in head.S!\n"); return; } p4d_page = p4d_offset(pgd, 0); set_pte_vaddr_p4d(p4d_page, vaddr, pteval); }

Contributors

PersonTokensPropCommitsCommitProp
Eduardo Pereira Habkost6587.84%133.33%
Kirill A. Shutemov810.81%133.33%
Tejun Heo11.35%133.33%
Total74100.00%3100.00%


pmd_t * __init populate_extra_pmd(unsigned long vaddr) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pgd = pgd_offset_k(vaddr); p4d = fill_p4d(pgd, vaddr); pud = fill_pud(p4d, vaddr); return fill_pmd(pud, vaddr); }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo2748.21%250.00%
Jack Steiner1526.79%125.00%
Kirill A. Shutemov1425.00%125.00%
Total56100.00%4100.00%


pte_t * __init populate_extra_pte(unsigned long vaddr) { pmd_t *pmd; pmd = populate_extra_pmd(vaddr); return fill_pte(pmd, vaddr); }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo30100.00%2100.00%
Total30100.00%2100.00%

/* * Create large page table mappings for a range of physical addresses. */
static void __init __init_extra_mapping(unsigned long phys, unsigned long size, enum page_cache_mode cache) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pgprot_t prot; pgprot_val(prot) = pgprot_val(PAGE_KERNEL_LARGE) | pgprot_val(pgprot_4k_2_large(cachemode2pgprot(cache))); BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK)); for (; size; phys += PMD_SIZE, size -= PMD_SIZE) { pgd = pgd_offset_k((unsigned long)__va(phys)); if (pgd_none(*pgd)) { p4d = (p4d_t *) spp_getpage(); set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE | _PAGE_USER)); } p4d = p4d_offset(pgd, (unsigned long)__va(phys)); if (p4d_none(*p4d)) { pud = (pud_t *) spp_getpage(); set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE | _PAGE_USER)); } pud = pud_offset(p4d, (unsigned long)__va(phys)); if (pud_none(*pud)) { pmd = (pmd_t *) spp_getpage(); set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER)); } pmd = pmd_offset(pud, phys); BUG_ON(!pmd_none(*pmd)); set_pmd(pmd, __pmd(phys | pgprot_val(prot))); } }

Contributors

PersonTokensPropCommitsCommitProp
Jack Steiner19369.18%133.33%
Kirill A. Shutemov5921.15%133.33%
Juergen Gross279.68%133.33%
Total279100.00%3100.00%


void __init init_extra_mapping_wb(unsigned long phys, unsigned long size) { __init_extra_mapping(phys, size, _PAGE_CACHE_MODE_WB); }

Contributors

PersonTokensPropCommitsCommitProp
Jack Steiner2295.65%150.00%
Juergen Gross14.35%150.00%
Total23100.00%2100.00%


void __init init_extra_mapping_uc(unsigned long phys, unsigned long size) { __init_extra_mapping(phys, size, _PAGE_CACHE_MODE_UC); }

Contributors

PersonTokensPropCommitsCommitProp
Jack Steiner2295.65%150.00%
Juergen Gross14.35%150.00%
Total23100.00%2100.00%

/* * The head.S code sets up the kernel high mapping: * * from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text) * * phys_base holds the negative offset to the kernel, which is added * to the compile time generated pmds. This results in invalid pmds up * to the point where we hit the physaddr 0 mapping. * * We limit the mappings to the region from _text to _brk_end. _brk_end * is rounded up to the 2MB boundary. This catches the invalid pmds as * well, as they are located before _text: */
void __init cleanup_highmap(void) { unsigned long vaddr = __START_KERNEL_map; unsigned long vaddr_end = __START_KERNEL_map + KERNEL_IMAGE_SIZE; unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1; pmd_t *pmd = level2_kernel_pgt; /* * Native path, max_pfn_mapped is not set yet. * Xen has valid max_pfn_mapped set in * arch/x86/xen/mmu.c:xen_setup_kernel_pagetable(). */ if (max_pfn_mapped) vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT); for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) { if (pmd_none(*pmd)) continue; if (vaddr < (unsigned long) _text || vaddr > end) set_pmd(pmd, __pmd(0)); } }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner8171.68%120.00%
Yinghai Lu3026.55%240.00%
Hugh Dickins10.88%120.00%
Joerg Roedel10.88%120.00%
Total113100.00%5100.00%

/* * Create PTE level page table mapping for physical addresses. * It returns the last physical address mapped. */
static unsigned long __meminit phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end, pgprot_t prot) { unsigned long pages = 0, paddr_next; unsigned long paddr_last = paddr_end; pte_t *pte; int i; pte = pte_page + pte_index(paddr); i = pte_index(paddr); for (; i < PTRS_PER_PTE; i++, paddr = paddr_next, pte++) { paddr_next = (paddr & PAGE_MASK) + PAGE_SIZE; if (paddr >= paddr_end) { if (!after_bootmem && !e820__mapped_any(paddr & PAGE_MASK, paddr_next, E820_TYPE_RAM) && !e820__mapped_any(paddr & PAGE_MASK, paddr_next, E820_TYPE_RESERVED_KERN)) set_pte(pte, __pte(0)); continue; } /* * We will re-use the existing mapping. * Xen for example has some special requirements, like mapping * pagetable pages as RO. So assume someone who pre-setup * these mappings are more intelligent. */ if (!pte_none(*pte)) { if (!after_bootmem) pages++; continue; } if (0) pr_info(" pte=%p addr=%lx pte=%016lx\n", pte, paddr, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL).pte); pages++; set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, prot)); paddr_last = (paddr & PAGE_MASK) + PAGE_SIZE; } update_page_count(PG_LEVEL_4K, pages); return paddr_last; }

Contributors

PersonTokensPropCommitsCommitProp
Jeremy Fitzhardinge11753.42%18.33%
Yinghai Lu4922.37%325.00%
Thomas Garnier3315.07%18.33%
Suresh B. Siddha83.65%216.67%
Jan Beulich52.28%18.33%
Ingo Molnar41.83%216.67%
Dave Hansen20.91%18.33%
Andi Kleen10.46%18.33%
Total219100.00%12100.00%

/* * Create PMD level page table mapping for physical addresses. The virtual * and physical address have to be aligned at this level. * It returns the last physical address mapped. */
static unsigned long __meminit phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end, unsigned long page_size_mask, pgprot_t prot) { unsigned long pages = 0, paddr_next; unsigned long paddr_last = paddr_end; int i = pmd_index(paddr); for (; i < PTRS_PER_PMD; i++, paddr = paddr_next) { pmd_t *pmd = pmd_page + pmd_index(paddr); pte_t *pte; pgprot_t new_prot = prot; paddr_next = (paddr & PMD_MASK) + PMD_SIZE; if (paddr >= paddr_end) { if (!after_bootmem && !e820__mapped_any(paddr & PMD_MASK, paddr_next, E820_TYPE_RAM) && !e820__mapped_any(paddr & PMD_MASK, paddr_next, E820_TYPE_RESERVED_KERN)) set_pmd(pmd, __pmd(0)); continue; } if (!pmd_none(*pmd)) { if (!pmd_large(*pmd)) { spin_lock(&init_mm.page_table_lock); pte = (pte_t *)pmd_page_vaddr(*pmd); paddr_last = phys_pte_init(pte, paddr, paddr_end, prot); spin_unlock(&init_mm.page_table_lock); continue; } /* * If we are ok with PG_LEVEL_2M mapping, then we will * use the existing mapping, * * Otherwise, we will split the large page mapping but * use the same existing protection bits except for * large page, so that we don't violate Intel's TLB * Application note (317080) which says, while changing * the page sizes, new and old translations should * not differ with respect to page frame and * attributes. */ if (page_size_mask & (1 << PG_LEVEL_2M)) { if (!after_bootmem) pages++; paddr_last = paddr_next; continue; } new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd)); } if (page_size_mask & (1<<PG_LEVEL_2M)) { pages++; spin_lock(&init_mm.page_table_lock); set_pte((pte_t *)pmd, pfn_pte((paddr & PMD_MASK) >> PAGE_SHIFT, __pgprot(pgprot_val(prot) | _PAGE_PSE))); spin_unlock(&init_mm.page_table_lock); paddr_last = paddr_next; continue; } pte = alloc_low_page(); paddr_last = phys_pte_init(pte, paddr, paddr_end, new_prot); spin_lock(&init_mm.page_table_lock); pmd_populate_kernel(&init_mm, pmd, pte); spin_unlock(&init_mm.page_table_lock); } update_page_count(PG_LEVEL_2M, pages); return paddr_last; }

Contributors

PersonTokensPropCommitsCommitProp
Andi Kleen7219.57%621.43%
Yinghai Lu6818.48%725.00%
Jan Beulich6718.21%414.29%
Jeremy Fitzhardinge4913.32%27.14%
Suresh B. Siddha4411.96%13.57%
Thomas Garnier297.88%13.57%
Keith Mannthey246.52%13.57%
Matt Tolentino51.36%13.57%
Ingo Molnar41.09%27.14%
Hugh Dickins30.82%13.57%
Dave Hansen20.54%13.57%
Thomas Gleixner10.27%13.57%
Total368100.00%28100.00%

/* * Create PUD level page table mapping for physical addresses. The virtual * and physical address do not have to be aligned at this level. KASLR can * randomize virtual addresses up to this level. * It returns the last physical address mapped. */
static unsigned long __meminit phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end, unsigned long page_size_mask) { unsigned long pages = 0, paddr_next; unsigned long paddr_last = paddr_end; unsigned long vaddr = (unsigned long)__va(paddr); int i = pud_index(vaddr); for (; i < PTRS_PER_PUD; i++, paddr = paddr_next) { pud_t *pud; pmd_t *pmd; pgprot_t prot = PAGE_KERNEL; vaddr = (unsigned long)__va(paddr); pud = pud_page + pud_index(vaddr); paddr_next = (paddr & PUD_MASK) + PUD_SIZE; if (paddr >= paddr_end) { if (!after_bootmem && !e820__mapped_any(paddr & PUD_MASK, paddr_next, E820_TYPE_RAM) && !e820__mapped_any(paddr & PUD_MASK, paddr_next, E820_TYPE_RESERVED_KERN)) set_pud(pud, __pud(0)); continue; } if (!pud_none(*pud)) { if (!pud_large(*pud)) { pmd = pmd_offset(pud, 0); paddr_last = phys_pmd_init(pmd, paddr, paddr_end, page_size_mask, prot); __flush_tlb_all(); continue; } /* * If we are ok with PG_LEVEL_1G mapping, then we will * use the existing mapping. * * Otherwise, we will split the gbpage mapping but use * the same existing protection bits except for large * page, so that we don't violate Intel's TLB * Application note (317080) which says, while changing * the page sizes, new and old translations should * not differ with respect to page frame and * attributes. */ if (page_size_mask & (1 << PG_LEVEL_1G)) { if (!after_bootmem) pages++; paddr_last = paddr_next; continue; } prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud)); } if (page_size_mask & (1<<PG_LEVEL_1G)) { pages++; spin_lock(&init_mm.page_table_lock); set_pte((pte_t *)pud, pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); spin_unlock(&init_mm.page_table_lock); paddr_last = paddr_next; continue; } pmd = alloc_low_page(); paddr_last = phys_pmd_init(pmd, paddr, paddr_end, page_size_mask, prot); spin_lock(&init_mm.page_table_lock); pud_populate(&init_mm, pud, pmd); spin_unlock(&init_mm.page_table_lock); } __flush_tlb_all(); update_page_count(PG_LEVEL_1G, pages); return paddr_last; }

Contributors

PersonTokensPropCommitsCommitProp
Matt Tolentino7820.86%14.00%
Andi Kleen6216.58%520.00%
Thomas Garnier5815.51%28.00%
Yinghai Lu5213.90%624.00%
Jan Beulich4411.76%312.00%
Suresh B. Siddha3810.16%28.00%
Keith Mannthey246.42%14.00%
Jeremy Fitzhardinge123.21%28.00%
Ingo Molnar41.07%28.00%
Dave Hansen20.53%14.00%
Total374100.00%25100.00%


static unsigned long __meminit phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end, unsigned long page_size_mask) { unsigned long paddr_next, paddr_last = paddr_end; unsigned long vaddr = (unsigned long)__va(paddr); int i = p4d_index(vaddr); if (!pgtable_l5_enabled()) return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end, page_size_mask); for (; i < PTRS_PER_P4D; i++, paddr = paddr_next) { p4d_t *p4d; pud_t *pud; vaddr = (unsigned long)__va(paddr); p4d = p4d_page + p4d_index(vaddr); paddr_next = (paddr & P4D_MASK) + P4D_SIZE; if (paddr >= paddr_end) { if (!after_bootmem && !e820__mapped_any(paddr & P4D_MASK, paddr_next, E820_TYPE_RAM) && !e820__mapped_any(paddr & P4D_MASK, paddr_next, E820_TYPE_RESERVED_KERN)) set_p4d(p4d, __p4d(0)); continue; } if (!p4d_none(*p4d)) { pud = pud_offset(p4d, 0); paddr_last = phys_pud_init(pud, paddr, paddr_end, page_size_mask); __flush_tlb_all(); continue; } pud = alloc_low_page(); paddr_last = phys_pud_init(pud, paddr, paddr_end, page_size_mask); spin_lock(&init_mm.page_table_lock); p4d_populate(&init_mm, p4d, pud); spin_unlock(&init_mm.page_table_lock); } __flush_tlb_all(); return paddr_last; }

Contributors

PersonTokensPropCommitsCommitProp
Kirill A. Shutemov22083.33%428.57%
Pekka J Enberg197.20%17.14%
Matt Tolentino72.65%17.14%
Thomas Garnier62.27%17.14%
Yinghai Lu51.89%214.29%
Andi Kleen41.52%321.43%
Suresh B. Siddha20.76%17.14%
David Shaohua Li10.38%17.14%
Total264100.00%14100.00%

/* * Create page table mapping for the physical memory for specific physical * addresses. The virtual and physical addresses have to be aligned on PMD level * down. It returns the last physical address mapped. */
unsigned long __meminit kernel_physical_mapping_init(unsigned long paddr_start, unsigned long paddr_end, unsigned long page_size_mask) { bool pgd_changed = false; unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last; paddr_last = paddr_end; vaddr = (unsigned long)__va(paddr_start); vaddr_end = (unsigned long)__va(paddr_end); vaddr_start = vaddr; for (; vaddr < vaddr_end; vaddr = vaddr_next) { pgd_t *pgd = pgd_offset_k(vaddr); p4d_t *p4d; vaddr_next = (vaddr & PGDIR_MASK) + PGDIR_SIZE; if (pgd_val(*pgd)) { p4d = (p4d_t *)pgd_page_vaddr(*pgd); paddr_last = phys_p4d_init(p4d, __pa(vaddr), __pa(vaddr_end), page_size_mask); continue; } p4d = alloc_low_page(); paddr_last = phys_p4d_init(p4d, __pa(vaddr), __pa(vaddr_end), page_size_mask); spin_lock(&init_mm.page_table_lock); if (pgtable_l5_enabled()) pgd_populate(&init_mm, pgd, p4d); else p4d_populate(&init_mm, p4d_offset(pgd, vaddr), (pud_t *) p4d); spin_unlock(&init_mm.page_table_lock); pgd_changed = true; } if (pgd_changed) sync_global_pgds(vaddr_start, vaddr_end - 1); __flush_tlb_all(); return paddr_last; }

Contributors

PersonTokensPropCommitsCommitProp
Kirill A. Shutemov4920.25%413.79%
Thomas Garnier3414.05%13.45%
Andi Kleen3414.05%724.14%
Yinghai Lu3112.81%724.14%
Pekka J Enberg3112.81%13.45%
Haicheng Li197.85%13.45%
Jeremy Fitzhardinge177.02%26.90%
Matt Tolentino135.37%13.45%
Jan Beulich72.89%13.45%
Wen Congyang20.83%13.45%
Fengguang Wu20.83%13.45%
Suresh B. Siddha20.83%13.45%
David Shaohua Li10.41%13.45%
Total242100.00%29100.00%

#ifndef CONFIG_NUMA
void __init initmem_init(void) { memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); }

Contributors

PersonTokensPropCommitsCommitProp
Yinghai Lu1359.09%233.33%
Tang Chen522.73%116.67%
Tejun Heo313.64%233.33%
Stefan Agner14.55%116.67%
Total22100.00%6100.00%

#endif
void __init paging_init(void) { sparse_memory_present_with_active_regions(MAX_NUMNODES); sparse_init(); /* * clear the default setting with node 0 * note: don't use nodes_clear here, that is really clearing when * numa support is not compiled in, and later node_set_state * will not set it back. */ node_clear_state(0, N_MEMORY); if (N_MEMORY != N_NORMAL_MEMORY) node_clear_state(0, N_NORMAL_MEMORY); zone_sizes_init(); }

Contributors

PersonTokensPropCommitsCommitProp
Lai Jiangshan1332.50%112.50%
Andi Kleen922.50%225.00%
Yinghai Lu820.00%225.00%
Pekka J Enberg717.50%225.00%
Matt Tolentino37.50%112.50%
Total40100.00%8100.00%

/* * Memory hotplug specific functions */ #ifdef CONFIG_MEMORY_HOTPLUG /* * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need * updating. */
static void update_end_of_memory_vars(u64 start, u64 size) { unsigned long end_pfn = PFN_UP(start + size); if (end_pfn > max_pfn) { max_pfn = end_pfn; max_low_pfn = end_pfn; high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; } }

Contributors

PersonTokensPropCommitsCommitProp
Shaohui Zheng56100.00%1100.00%
Total56100.00%1100.00%


int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, struct vmem_altmap *altmap, bool want_memblock) { int ret; ret = __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock); WARN_ON_ONCE(ret); /* update max_pfn, max_low_pfn and high_memory */ update_end_of_memory_vars(start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Michal Hocko5588.71%150.00%
Christoph Hellwig711.29%150.00%
Total62100.00%2100.00%


int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, bool want_memblock) { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; init_memory_mapping(start, start + size); return add_pages(nid, start_pfn, nr_pages, altmap, want_memblock); }

Contributors

PersonTokensPropCommitsCommitProp
Matt Tolentino3252.46%111.11%
Keith Mannthey914.75%111.11%
Christoph Hellwig711.48%111.11%
Michal Hocko58.20%333.33%
Yasunori Goto46.56%111.11%
Dan J Williams23.28%111.11%
Gary Hade23.28%111.11%
Total61100.00%9100.00%

#define PAGE_INUSE 0xFD
static void __meminit free_pagetable(struct page *page, int order) { unsigned long magic; unsigned int nr_pages = 1 << order; /* bootmem page has reserved flag */ if (PageReserved(page)) { __ClearPageReserved(page); magic = (unsigned long)page->freelist; if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) { while (nr_pages--) put_page_bootmem(page++); } else while (nr_pages--) free_reserved_page(page++); } else free_pages((unsigned long)page_address(page), order); }

Contributors

PersonTokensPropCommitsCommitProp
Wen Congyang9492.16%133.33%
Jiang Liu76.86%133.33%
Yasuaki Ishimatsu10.98%133.33%
Total102100.00%3100.00%


static void __meminit free_hugepage_table(struct page *page, struct vmem_altmap *altmap) { if (altmap) vmem_altmap_free(altmap, PMD_SIZE / PAGE_SIZE); else free_pagetable(page, get_order(PMD_SIZE)); }

Contributors

PersonTokensPropCommitsCommitProp
Dan J Williams41100.00%1100.00%
Total41100.00%1100.00%


static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd) { pte_t *pte; int i; for (i = 0; i < PTRS_PER_PTE; i++) { pte = pte_start + i; if (!pte_none(*pte)) return; } /* free a pte talbe */ free_pagetable(pmd_page(*pmd), 0); spin_lock(&init_mm.page_table_lock); pmd_clear(pmd); spin_unlock(&init_mm.page_table_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Kirill A. Shutemov8194.19%150.00%
Wen Congyang55.81%150.00%
Total86100.00%2100.00%


static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud) { pmd_t *pmd; int i; for (i = 0; i < PTRS_PER_PMD; i++) { pmd = pmd_start + i; if (!pmd_none(*pmd)) return; } /* free a pmd talbe */ free_pagetable(pud_page(*pud), 0); spin_lock(&init_mm.page_table_lock); pud_clear(pud); spin_unlock(&init_mm.page_table_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Wen Congyang6474.42%133.33%
Kirill A. Shutemov2124.42%133.33%
Dave Hansen11.16%133.33%
Total86100.00%3100.00%


static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d) { pud_t *pud; int i; for (i = 0; i < PTRS_PER_PUD; i++) { pud = pud_start + i; if (!pud_none(*pud)) return; } /* free a pud talbe */ free_pagetable(p4d_page(*p4d), 0); spin_lock(&init_mm.page_table_lock); p4d_clear(p4d); spin_unlock(&init_mm.page_table_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Wen Congyang6879.07%133.33%
Kirill A. Shutemov1719.77%133.33%
Dave Hansen11.16%133.33%
Total86100.00%3100.00%


static void __meminit remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end, bool direct) { unsigned long next, pages = 0; pte_t *pte; void *page_addr; phys_addr_t phys_addr; pte = pte_start + pte_index(addr); for (; addr < end; addr = next, pte++) { next = (addr + PAGE_SIZE) & PAGE_MASK; if (next > end) next = end; if (!pte_present(*pte)) continue; /* * We mapped [0,1G) memory as identity mapping when * initializing, in arch/x86/kernel/head_64.S. These * pagetables cannot be removed. */ phys_addr = pte_val(*pte) + (addr & PAGE_MASK); if (phys_addr < (phys_addr_t)0x40000000) return; if (PAGE_ALIGNED(addr) && PAGE_ALIGNED(next)) { /* * Do not free direct mapping pages since they were * freed when offlining, or simplely not in use. */ if (!direct) free_pagetable(pte_page(*pte), 0); spin_lock(&init_mm.page_table_lock); pte_clear(&init_mm, addr, pte); spin_unlock(&init_mm.page_table_lock); /* For non-direct mapping, pages means nothing. */ pages++; } else { /* * If we are here, we are freeing vmemmap pages since * direct mapped memory ranges to be freed are aligned. * * If we are not removing the whole page, it means * other page structs in this page are being used and * we canot remove them. So fill the unused page_structs * with 0xFD, and remove the page when it is wholly * filled with 0xFD. */ memset((void *)addr, PAGE_INUSE, next - addr); page_addr = page_address(pte_page(*pte)); if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) { free_pagetable(pte_page(*pte), 0); spin_lock(&init_mm.page_table_lock); pte_clear(&init_mm, addr, pte); spin_unlock(&init_mm.page_table_lock); } } } /* Call free_pte_table() in remove_pmd_table(). */ flush_tlb_all(); if (direct) update_page_count(PG_LEVEL_4K, -pages); }

Contributors

PersonTokensPropCommitsCommitProp
Wen Congyang27799.28%150.00%
Kefeng Wang20.72%150.00%
Total279100.00%2100.00%


static void __meminit remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end, bool direct, struct vmem_altmap *altmap) { unsigned long next, pages = 0; pte_t *pte_base; pmd_t *pmd; void *page_addr; pmd = pmd_start + pmd_index(addr); for (; addr < end; addr = next, pmd++) { next = pmd_addr_end(addr, end); if (!pmd_present(*pmd)) continue; if (pmd_large(*pmd)) { if (IS_ALIGNED(addr, PMD_SIZE) && IS_ALIGNED(next, PMD_SIZE)) { if (!direct) free_hugepage_table(pmd_page(*pmd), altmap); spin_lock(&init_mm.page_table_lock); pmd_clear(pmd); spin_unlock(&init_mm.page_table_lock); pages++; } else { /* If here, we are freeing vmemmap pages. */ memset((void *)addr, PAGE_INUSE, next - addr); page_addr = page_address(pmd_page(*pmd)); if (!memchr_inv(page_addr, PAGE_INUSE, PMD_SIZE)) { free_hugepage_table(pmd_page(*pmd), altmap); spin_lock(&init_mm.page_table_lock); pmd_clear(pmd); spin_unlock(&init_mm.page_table_lock); } } continue; } pte_base = (pte_t *)pmd_page_vaddr(*pmd); remove_pte_table(pte_base, addr, next, direct); free_pte_table(pte_base, pmd); } /* Call free_pmd_table() in remove_pud_table(). */ if (direct) update_page_count(PG_LEVEL_2M, -pages); }

Contributors

PersonTokensPropCommitsCommitProp
Wen Congyang27096.77%133.33%
Christoph Hellwig72.51%133.33%
Dan J Williams20.72%133.33%
Total279100.00%3100.00%


static void __meminit remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end, struct vmem_altmap *altmap, bool direct) { unsigned long next, pages = 0; pmd_t *pmd_base; pud_t *pud; void *page_addr; pud = pud_start + pud_index(addr); for (; addr < end; addr = next, pud++) { next = pud_addr_end(addr, end); if (!pud_present(*pud)) continue; if (pud_large(*pud)) { if (IS_ALIGNED(addr, PUD_SIZE) && IS_ALIGNED(next, PUD_SIZE)) { if (!direct) free_pagetable(pud_page(*pud), get_order(PUD_SIZE)); spin_lock(&init_mm.page_table_lock); pud_clear(pud); spin_unlock(&init_mm.page_table_lock); pages++; } else { /* If here, we are freeing vmemmap pages. */ memset((void *)addr, PAGE_INUSE, next - addr); page_addr = page_address(pud_page(*pud)); if (!memchr_inv(page_addr, PAGE_INUSE, PUD_SIZE)) { free_pagetable(pud_page(*pud), get_order(PUD_SIZE)); spin_lock(&init_mm.page_table_lock); pud_clear(pud); spin_unlock(&init_mm.page_table_lock); } } continue; } pmd_base = pmd_offset(pud, 0); remove_pmd_table(pmd_base, addr, next, direct, altmap); free_pmd_table(pmd_base, pud); } if (direct) update_page_count(PG_LEVEL_1G, -pages); }

Contributors

PersonTokensPropCommitsCommitProp
Wen Congyang27396.47%133.33%
Christoph Hellwig72.47%133.33%
Kirill A. Shutemov31.06%133.33%
Total283100.00%3100.00%


static void __meminit remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end, struct vmem_altmap *altmap, bool direct) { unsigned long next, pages = 0; pud_t *pud_base; p4d_t *p4d; p4d = p4d_start + p4d_index(addr); for (; addr < end; addr = next, p4d++) { next = p4d_addr_end(addr, end); if (!p4d_present(*p4d)) continue; BUILD_BUG_ON(p4d_large(*p4d)); pud_base = pud_offset(p4d, 0); remove_pud_table(pud_base, addr, next, altmap, direct); /* * For 4-level page tables we do not want to free PUDs, but in the * 5-level case we should free them. This code will have to change * to adapt for boot-time switching between 4 and 5 level page tables. */ if (pgtable_l5_enabled()) free_pud_table(pud_base, p4d); } if (direct) update_page_count(PG_LEVEL_512G, -pages); }

Contributors

PersonTokensPropCommitsCommitProp
Kirill A. Shutemov13292.31%466.67%
Christoph Hellwig74.90%116.67%
Jérôme Glisse42.80%116.67%
Total143100.00%6100.00%

/* start and end are both virtual address. */
static void __meminit remove_pagetable(unsigned long start, unsigned long end, bool direct, struct vmem_altmap *altmap) { unsigned long next; unsigned long addr; pgd_t *pgd; p4d_t *p4d; for (addr = start; addr < end; addr = next) { next = pgd_addr_end(addr, end); pgd = pgd_offset_k(addr); if (!pgd_present(*pgd)) continue; p4d = p4d_offset(pgd, 0); remove_p4d_table(p4d, addr, next, altmap, direct); } flush_tlb_all(); }

Contributors

PersonTokensPropCommitsCommitProp
Wen Congyang7974.53%120.00%
Yasuaki Ishimatsu1211.32%120.00%
Kirill A. Shutemov87.55%240.00%
Christoph Hellwig76.60%120.00%
Total106100.00%5100.00%


void __ref vmemmap_free(unsigned long start, unsigned long end, struct vmem_altmap *altmap) { remove_pagetable(start, end, false, altmap); }

Contributors

PersonTokensPropCommitsCommitProp
Tang Chen2066.67%133.33%
Christoph Hellwig723.33%133.33%
Johannes Weiner310.00%133.33%
Total30100.00%3100.00%

#ifdef CONFIG_MEMORY_HOTREMOVE
static void __meminit kernel_physical_mapping_remove(unsigned long start, unsigned long end) { start = (unsigned long)__va(start); end = (unsigned long)__va(end); remove_pagetable(start, end, true, NULL); }

Contributors

PersonTokensPropCommitsCommitProp
Tang Chen4695.83%150.00%
Christoph Hellwig24.17%150.00%
Total48100.00%2100.00%


int __ref arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) { unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; struct page *page = pfn_to_page(start_pfn); struct zone *zone; int ret; /* With altmap the first mapped page is offset from @start */ if (altmap) page += vmem_altmap_offset(altmap); zone = page_zone(page); ret = __remove_pages(zone, start_pfn, nr_pages, altmap); WARN_ON_ONCE(ret); kernel_physical_mapping_remove(start, start + size); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Wen Congyang5757.00%125.00%
Dan J Williams3333.00%125.00%
Christoph Hellwig77.00%125.00%
Tang Chen33.00%125.00%
Total100100.00%4100.00%

#endif #endif /* CONFIG_MEMORY_HOTPLUG */ static struct kcore_list kcore_vsyscall;
static void __init register_page_bootmem_info(void) { #ifdef CONFIG_NUMA int i; for_each_online_node(i) register_page_bootmem_info_node(NODE_DATA(i)); #endif }

Contributors

PersonTokensPropCommitsCommitProp
Yinghai Lu29100.00%1100.00%
Total29100.00%1100.00%


void __init mem_init(void) { pci_iommu_alloc(); /* clear_bss() already clear the empty_zero_page */ /* this will put all memory onto the freelists */ free_all_bootmem(); after_bootmem = 1; x86_init.hyper.init_after_bootmem(); /* * Must be done after boot memory is put on freelist, because here we * might set fields in deferred struct pages that have not yet been * initialized, and free_all_bootmem() initializes all the reserved * deferred pages for us. */ register_page_bootmem_info(); /* Register memory areas for /proc/kcore */ if (get_gate_vma(&init_mm)) kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER); mem_init_print_info(NULL); }

Contributors

PersonTokensPropCommitsCommitProp
Andi Kleen3150.82%321.43%
Pavel Tatashin1118.03%214.29%
jia zhang914.75%214.29%
Jiang Liu34.92%214.29%
Andrew Lutomirski23.28%17.14%
Muli Ben-Yehuda23.28%17.14%
Kamezawa Hiroyuki11.64%17.14%
Yinghai Lu11.64%17.14%
Jon Mason11.64%17.14%
Total61100.00%14100.00%

int kernel_set_to_readonly;
void set_kernel_text_rw(void) { unsigned long start = PFN_ALIGN(_text); unsigned long end = PFN_ALIGN(__stop___ex_table); if (!kernel_set_to_readonly) return; pr_debug("Set kernel text: %lx - %lx for read write\n", start, end); /* * Make the kernel identity mapping for text RW. Kernel text * mapping will always be RO. Refer to the comment in * static_protections() in pageattr.c */ set_memory_rw(start, (end - start) >> PAGE_SHIFT); }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt5194.44%133.33%
Suresh B. Siddha35.56%266.67%
Total54100.00%3100.00%


void set_kernel_text_ro(void) { unsigned long start = PFN_ALIGN(_text); unsigned long end = PFN_ALIGN(__stop___ex_table); if (!kernel_set_to_readonly) return; pr_debug("Set kernel text: %lx - %lx for read only\n", start, end); /* * Set the kernel identity mapping for text RO. */ set_memory_ro(start, (end - start) >> PAGE_SHIFT); }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt5194.44%133.33%
Suresh B. Siddha35.56%266.67%
Total54100.00%3100.00%


void mark_rodata_ro(void) { unsigned long start = PFN_ALIGN(_text); unsigned long rodata_start = PFN_ALIGN(__start_rodata); unsigned long end = (unsigned long) &__end_rodata_hpage_align; unsigned long text_end = PFN_ALIGN(&__stop___ex_table); unsigned long rodata_end = PFN_ALIGN(&__end_rodata); unsigned long all_end; printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", (end - start) >> 10); set_memory_ro(start, (end - start) >> PAGE_SHIFT); kernel_set_to_readonly = 1; /* * The rodata/data/bss/brk section (but not the kernel text!) * should also be not-executable. * * We align all_end to PMD_SIZE because the existing mapping * is a full PMD. If we would align _brk_end to PAGE_SIZE we * split the PMD and the reminder between _brk_end and the end * of the PMD will remain mapped executable. * * Any PMD which was setup after the one which covers _brk_end * has been zapped already via cleanup_highmem(). */ all_end = roundup((unsigned long)_brk_end, PMD_SIZE); set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT); #ifdef CONFIG_CPA_DEBUG printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end); set_memory_rw(start, (end-start) >> PAGE_SHIFT); printk(KERN_INFO "Testing CPA: again\n"); set_memory_ro(start, (end-start) >> PAGE_SHIFT); #endif free_init_pages("unused kernel", (unsigned long) __va(__pa_symbol(text_end)), (unsigned long) __va(__pa_symbol(rodata_start))); free_init_pages("unused kernel", (unsigned long) __va(__pa_symbol(rodata_end)), (unsigned long) __va(__pa_symbol(_sdata))); debug_checkwx(); /* * Do this after all of the manipulation of the * kernel text page tables are complete. */ pti_clone_kernel_text(); }

Contributors

PersonTokensPropCommitsCommitProp
Suresh B. Siddha8034.78%210.53%
Andi Kleen4318.70%15.26%
Arjan van de Ven3816.52%315.79%
Kees Cook146.09%15.26%
Alexander Duyck146.09%15.26%
Steven Rostedt104.35%210.53%
Linus Torvalds83.48%15.26%
Stephen D. Smalley52.17%210.53%
Jan Beulich41.74%15.26%
Mathieu Desnoyers41.74%15.26%
Dave Hansen41.74%15.26%
Jiang Liu20.87%15.26%
Ingo Molnar20.87%15.26%
Yinghai Lu20.87%15.26%
Total230100.00%19100.00%


int kern_addr_valid(unsigned long addr) { unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT; pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte; if (above != 0 && above != -1UL) return 0; pgd = pgd_offset_k(addr); if (pgd_none(*pgd)) return 0; p4d = p4d_offset(pgd, addr); if (p4d_none(*p4d)) return 0; pud = pud_offset(p4d, addr); if (pud_none(*pud)) return 0; if (pud_large(*pud)) return pfn_valid(pud_pfn(*pud)); pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) return 0; if (pmd_large(*pmd)) return pfn_valid(pmd_pfn(*pmd)); pte = pte_offset_kernel(pmd, addr); if (pte_none(*pte)) return 0; return pfn_valid(pte_pfn(*pte)); }

Contributors

PersonTokensPropCommitsCommitProp
Andi Kleen15778.50%360.00%
Kirill A. Shutemov2512.50%120.00%
Mel Gorman189.00%120.00%
Total200100.00%5100.00%

/* * Block size is the minimum amount of memory which can be hotplugged or * hotremoved. It must be power of two and must be equal or larger than * MIN_MEMORY_BLOCK_SIZE. */ #define MAX_BLOCK_SIZE (2UL << 30) /* Amount of ram needed to start using large blocks */ #define MEM_SIZE_FOR_LARGE_BLOCK (64UL << 30) /* Adjustable memory block size */ static unsigned long set_memory_block_size;
int __init set_memory_block_size_order(unsigned int order) { unsigned long size = 1UL << order; if (size > MEM_SIZE_FOR_LARGE_BLOCK || size < MIN_MEMORY_BLOCK_SIZE) return -EINVAL; set_memory_block_size = size; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Mike Travis39100.00%1100.00%
Total39100.00%1100.00%


static unsigned long probe_memory_block_size(void) { unsigned long boot_mem_end = max_pfn << PAGE_SHIFT; unsigned long bz; /* If memory block size has been set, then use it */ bz = set_memory_block_size; if (bz) goto done; /* Use regular block if RAM is smaller than MEM_SIZE_FOR_LARGE_BLOCK */ if (boot_mem_end < MEM_SIZE_FOR_LARGE_BLOCK) { bz = MIN_MEMORY_BLOCK_SIZE; goto done; } /* Find the largest allowed block size that aligns to memory end */ for (bz = MAX_BLOCK_SIZE; bz > MIN_MEMORY_BLOCK_SIZE; bz >>= 1) { if (IS_ALIGNED(boot_mem_end, bz)) break; } done: pr_info("x86/mm: Memory block size: %ldMB\n", bz >> 20); return bz; }

Contributors

PersonTokensPropCommitsCommitProp
Pavel Tatashin5662.22%120.00%
Yinghai Lu1820.00%120.00%
Nathan Fontenot77.78%120.00%
Mike Travis66.67%120.00%
Seth Jennings33.33%120.00%
Total90100.00%5100.00%

static unsigned long memory_block_size_probed;
unsigned long memory_block_size_bytes(void) { if (!memory_block_size_probed) memory_block_size_probed = probe_memory_block_size(); return memory_block_size_probed; }

Contributors

PersonTokensPropCommitsCommitProp
Yinghai Lu21100.00%1100.00%
Total21100.00%1100.00%

#ifdef CONFIG_SPARSEMEM_VMEMMAP /* * Initialise the sparsemem vmemmap using huge-pages at the PMD level. */ static long __meminitdata addr_start, addr_end; static void __meminitdata *p_start, *p_end; static int __meminitdata node_start;
static int __meminit vmemmap_populate_hugepages(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap) { unsigned long addr; unsigned long next; pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; for (addr = start; addr < end; addr = next) { next = pmd_addr_end(addr, end); pgd = vmemmap_pgd_populate(addr, node); if (!pgd) return -ENOMEM; p4d = vmemmap_p4d_populate(pgd, addr, node); if (!p4d) return -ENOMEM; pud = vmemmap_pud_populate(p4d, addr, node); if (!pud) return -ENOMEM; pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) { void *p; if (altmap) p = altmap_alloc_block_buf(PMD_SIZE, altmap); else p = vmemmap_alloc_block_buf(PMD_SIZE, node); if (p) { pte_t entry; entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL_LARGE); set_pmd(pmd, __pmd(pte_val(entry))); /* check to see if we have contiguous blocks */ if (p_end != p || node_start != node) { if (p_start) pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n", addr_start, addr_end-1, p_start, p_end-1, node_start); addr_start = addr; node_start = node; p_start = p; } addr_end = addr + PMD_SIZE; p_end = p + PMD_SIZE; continue; } else if (altmap) return -ENOMEM; /* no fallback */ } else if (pmd_large(*pmd)) { vmemmap_verify((pte_t *)pmd, node, addr, next); continue; } if (vmemmap_populate_basepages(addr, next, node)) return -ENOMEM; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Lameter16048.78%18.33%
Johannes Weiner5617.07%325.00%
Yinghai Lu5015.24%216.67%
Kirill A. Shutemov257.62%18.33%
Dan J Williams175.18%216.67%
Christoph Hellwig154.57%18.33%
Thomas Gleixner41.22%18.33%
Jeremy Fitzhardinge10.30%18.33%
Total328100.00%12100.00%


int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap) { int err; if (boot_cpu_has(X86_FEATURE_PSE)) err = vmemmap_populate_hugepages(start, end, node, altmap); else if (altmap) { pr_err_once("%s: no cpu support for altmap allocations\n", __func__); err = -ENOMEM; } else err = vmemmap_populate_basepages(start, end, node); if (!err) sync_global_pgds(start, end - 1); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Johannes Weiner5053.76%225.00%
Dan J Williams2526.88%112.50%
Haicheng Li66.45%112.50%
Borislav Petkov44.30%112.50%
Christoph Lameter33.23%112.50%
Christoph Hellwig33.23%112.50%
Wen Congyang22.15%112.50%
Total93100.00%8100.00%

#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE)
void register_page_bootmem_memmap(unsigned long section_nr, struct page *start_page, unsigned long nr_pages) { unsigned long addr = (unsigned long)start_page; unsigned long end = (unsigned long)(start_page + nr_pages); unsigned long next; pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; unsigned int nr_pmd_pages; struct page *page; for (; addr < end; addr = next) { pte_t *pte = NULL; pgd = pgd_offset_k(addr); if (pgd_none(*pgd)) { next = (addr + PAGE_SIZE) & PAGE_MASK; continue; } get_page_bootmem(section_nr, pgd_page(*pgd), MIX_SECTION_INFO); p4d = p4d_offset(pgd, addr); if (p4d_none(*p4d)) { next = (addr + PAGE_SIZE) & PAGE_MASK; continue; } get_page_bootmem(section_nr, p4d_page(*p4d), MIX_SECTION_INFO); pud = pud_offset(p4d, addr); if (pud_none(*pud)) { next = (addr + PAGE_SIZE) & PAGE_MASK; continue; } get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO); if (!boot_cpu_has(X86_FEATURE_PSE)) { next = (addr + PAGE_SIZE) & PAGE_MASK; pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) continue; get_page_bootmem(section_nr, pmd_page(*pmd), MIX_SECTION_INFO); pte = pte_offset_kernel(pmd, addr); if (pte_none(*pte)) continue; get_page_bootmem(section_nr, pte_page(*pte), SECTION_INFO); } else { next = pmd_addr_end(addr, end); pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) continue; nr_pmd_pages = 1 << get_order(PMD_SIZE); page = pmd_page(*pmd); while (nr_pmd_pages--) get_page_bootmem(section_nr, page++, SECTION_INFO); } } }

Contributors

PersonTokensPropCommitsCommitProp
Yasuaki Ishimatsu30484.21%125.00%
Kirill A. Shutemov4813.30%125.00%
Baoquan He51.39%125.00%
Borislav Petkov41.11%125.00%
Total361100.00%4100.00%

#endif
void __meminit vmemmap_populate_print_last(void) { if (p_start) { pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n", addr_start, addr_end-1, p_start, p_end-1, node_start); p_start = NULL; p_end = NULL; node_start = 0; } }

Contributors

PersonTokensPropCommitsCommitProp
Yinghai Lu4497.78%150.00%
Dan J Williams12.22%150.00%
Total45100.00%2100.00%

#endif

Overall Contributors

PersonTokensPropCommitsCommitProp
Wen Congyang119817.44%31.59%
Kirill A. Shutemov116516.96%73.70%
Andi Kleen70410.25%2111.11%
Yinghai Lu5217.59%2613.76%
Yasuaki Ishimatsu3455.02%42.12%
Jack Steiner2533.68%10.53%
Jeremy Fitzhardinge2293.33%63.17%
Tejun Heo1912.78%52.65%
Suresh B. Siddha1802.62%63.17%
Christoph Lameter1672.43%10.53%
Thomas Garnier1642.39%21.06%
Matt Tolentino1412.05%10.53%
Jan Beulich1301.89%52.65%
Haicheng Li1261.83%21.06%
Dan J Williams1241.81%42.12%
Steven Rostedt1151.67%21.06%
Johannes Weiner1091.59%31.59%
Thomas Gleixner971.41%52.65%
Pavel Tatashin771.12%31.59%
Tang Chen771.12%42.12%
Christoph Hellwig761.11%52.65%
Eduardo Pereira Habkost670.98%10.53%
Pekka J Enberg620.90%42.12%
Michal Hocko600.87%31.59%
Keith Mannthey590.86%21.06%
Shaohui Zheng570.83%10.53%
Mike Travis510.74%10.53%
Arjan van de Ven380.55%31.59%
Juergen Gross290.42%10.53%
Dave Hansen270.39%31.59%
Ingo Molnar220.32%63.17%
Baoquan He210.31%21.06%
Mel Gorman180.26%10.53%
Alexander Duyck140.20%10.53%
Kees Cook140.20%10.53%
Lai Jiangshan130.19%10.53%
Linus Torvalds130.19%31.59%
Jiang Liu120.17%42.12%
Nathan Fontenot110.16%10.53%
jia zhang90.13%21.06%
Borislav Petkov80.12%10.53%
Yasunori Goto70.10%10.53%
Seth Jennings60.09%10.53%
Stephen D. Smalley50.07%21.06%
Muli Ben-Yehuda50.07%10.53%
Hugh Dickins40.06%21.06%
Mathieu Desnoyers40.06%10.53%
Jérôme Glisse40.06%10.53%
Konrad Rzeszutek Wilk30.04%10.53%
Randy Dunlap30.04%10.53%
Andrea Arcangeli30.04%10.53%
David Howells30.04%10.53%
Benjamin Herrenschmidt30.04%10.53%
Andrew Lutomirski30.04%21.06%
Fengguang Wu20.03%10.53%
Kefeng Wang20.03%10.53%
Harvey Harrison20.03%10.53%
Marcin Ślusarz20.03%10.53%
David Shaohua Li20.03%10.53%
Gary Hade20.03%10.53%
Pavel Machek10.01%10.53%
Kamezawa Hiroyuki10.01%10.53%
David Vrabel10.01%10.53%
Jon Mason10.01%10.53%
Stefan Agner10.01%10.53%
Andrew Morton10.01%10.53%
Laura Abbott10.01%10.53%
Joerg Roedel10.01%10.53%
Zhang Yanfei10.01%10.53%
Total6868100.00%189100.00%
Directory: arch/x86/mm
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.