cregit-Linux how code gets into the kernel

Release 4.14 arch/arm64/mm/init.c

Directory: arch/arm64/mm
/*
 * Based on arch/arm/mm/init.c
 *
 * Copyright (C) 1995-2005 Russell King
 * Copyright (C) 2012 ARM Ltd.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/errno.h>
#include <linux/swap.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/cache.h>
#include <linux/mman.h>
#include <linux/nodemask.h>
#include <linux/initrd.h>
#include <linux/gfp.h>
#include <linux/memblock.h>
#include <linux/sort.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/dma-mapping.h>
#include <linux/dma-contiguous.h>
#include <linux/efi.h>
#include <linux/swiotlb.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/kexec.h>
#include <linux/crash_dump.h>

#include <asm/boot.h>
#include <asm/fixmap.h>
#include <asm/kasan.h>
#include <asm/kernel-pgtable.h>
#include <asm/memory.h>
#include <asm/numa.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/sizes.h>
#include <asm/tlb.h>
#include <asm/alternative.h>

/*
 * We need to be able to catch inadvertent references to memstart_addr
 * that occur (potentially in generic code) before arm64_memblock_init()
 * executes, which assigns it its actual value. So use a default value
 * that cannot be mistaken for a real physical address.
 */

s64 memstart_addr __ro_after_init = -1;

phys_addr_t arm64_dma_phys_limit __ro_after_init;

#ifdef CONFIG_BLK_DEV_INITRD

static int __init early_initrd(char *p) { unsigned long start, size; char *endp; start = memparse(p, &endp); if (*endp == ',') { size = memparse(endp + 1, NULL); initrd_start = start; initrd_end = start + size; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Catalin Marinas6093.75%150.00%
Rob Herring46.25%150.00%
Total64100.00%2100.00%

early_param("initrd", early_initrd); #endif #ifdef CONFIG_KEXEC_CORE /* * reserve_crashkernel() - reserves memory for crash kernel * * This function reserves memory area given in "crashkernel=" kernel command * line parameter. The memory reserved is used by dump capture kernel when * primary kernel is crashing. */
static void __init reserve_crashkernel(void) { unsigned long long crash_base, crash_size; int ret; ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), &crash_size, &crash_base); /* no crashkernel= or invalid value specified */ if (ret || !crash_size) return; crash_size = PAGE_ALIGN(crash_size); if (crash_base == 0) { /* Current arm64 boot protocol requires 2MB alignment */ crash_base = memblock_find_in_range(0, ARCH_LOW_ADDRESS_LIMIT, crash_size, SZ_2M); if (crash_base == 0) { pr_warn("cannot allocate crashkernel (size:0x%llx)\n", crash_size); return; } } else { /* User specifies base address explicitly. */ if (!memblock_is_region_memory(crash_base, crash_size)) { pr_warn("cannot reserve crashkernel: region is not memory\n"); return; } if (memblock_is_region_reserved(crash_base, crash_size)) { pr_warn("cannot reserve crashkernel: region overlaps reserved memory\n"); return; } if (!IS_ALIGNED(crash_base, SZ_2M)) { pr_warn("cannot reserve crashkernel: base address is not 2MB aligned\n"); return; } } memblock_reserve(crash_base, crash_size); pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n", crash_base, crash_base + crash_size, crash_size >> 20); crashk_res.start = crash_base; crashk_res.end = crash_base + crash_size - 1; }

Contributors

PersonTokensPropCommitsCommitProp
AKASHI Takahiro184100.00%1100.00%
Total184100.00%1100.00%


static void __init kexec_reserve_crashkres_pages(void) { #ifdef CONFIG_HIBERNATION phys_addr_t addr; struct page *page; if (!crashk_res.end) return; /* * To reduce the size of hibernation image, all the pages are * marked as Reserved initially. */ for (addr = crashk_res.start; addr < (crashk_res.end + 1); addr += PAGE_SIZE) { page = phys_to_page(addr); SetPageReserved(page); } #endif }

Contributors

PersonTokensPropCommitsCommitProp
AKASHI Takahiro67100.00%1100.00%
Total67100.00%1100.00%

#else
static void __init reserve_crashkernel(void) { }

Contributors

PersonTokensPropCommitsCommitProp
AKASHI Takahiro8100.00%1100.00%
Total8100.00%1100.00%


static void __init kexec_reserve_crashkres_pages(void) { }

Contributors

PersonTokensPropCommitsCommitProp
AKASHI Takahiro8100.00%1100.00%
Total8100.00%1100.00%

#endif /* CONFIG_KEXEC_CORE */ #ifdef CONFIG_CRASH_DUMP
static int __init early_init_dt_scan_elfcorehdr(unsigned long node, const char *uname, int depth, void *data) { const __be32 *reg; int len; if (depth != 1 || strcmp(uname, "chosen") != 0) return 0; reg = of_get_flat_dt_prop(node, "linux,elfcorehdr", &len); if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells))) return 1; elfcorehdr_addr = dt_mem_next_cell(dt_root_addr_cells, &reg); elfcorehdr_size = dt_mem_next_cell(dt_root_size_cells, &reg); return 1; }

Contributors

PersonTokensPropCommitsCommitProp
AKASHI Takahiro102100.00%1100.00%
Total102100.00%1100.00%

/* * reserve_elfcorehdr() - reserves memory for elf core header * * This function reserves the memory occupied by an elf core header * described in the device tree. This region contains all the * information about primary kernel's core image and is used by a dump * capture kernel to access the system memory on primary kernel. */
static void __init reserve_elfcorehdr(void) { of_scan_flat_dt(early_init_dt_scan_elfcorehdr, NULL); if (!elfcorehdr_size) return; if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) { pr_warn("elfcorehdr is overlapped\n"); return; } memblock_reserve(elfcorehdr_addr, elfcorehdr_size); pr_info("Reserving %lldKB of memory at 0x%llx for elfcorehdr\n", elfcorehdr_size >> 10, elfcorehdr_addr); }

Contributors

PersonTokensPropCommitsCommitProp
AKASHI Takahiro57100.00%1100.00%
Total57100.00%1100.00%

#else
static void __init reserve_elfcorehdr(void) { }

Contributors

PersonTokensPropCommitsCommitProp
AKASHI Takahiro8100.00%1100.00%
Total8100.00%1100.00%

#endif /* CONFIG_CRASH_DUMP */ /* * Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It * currently assumes that for memory starting above 4G, 32-bit devices will * use a DMA offset. */
static phys_addr_t __init max_zone_dma_phys(void) { phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32); return min(offset + (1ULL << 32), memblock_end_of_DRAM()); }

Contributors

PersonTokensPropCommitsCommitProp
Catalin Marinas3697.30%150.00%
JiSheng Zhang12.70%150.00%
Total37100.00%2100.00%

#ifdef CONFIG_NUMA
static void __init zone_sizes_init(unsigned long min, unsigned long max) { unsigned long max_zone_pfns[MAX_NR_ZONES] = {0}; if (IS_ENABLED(CONFIG_ZONE_DMA)) max_zone_pfns[ZONE_DMA] = PFN_DOWN(max_zone_dma_phys()); max_zone_pfns[ZONE_NORMAL] = max; free_area_init_nodes(max_zone_pfns); }

Contributors

PersonTokensPropCommitsCommitProp
Ganapatrao Kulkarni56100.00%1100.00%
Total56100.00%1100.00%

#else
static void __init zone_sizes_init(unsigned long min, unsigned long max) { struct memblock_region *reg; unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; unsigned long max_dma = min; memset(zone_size, 0, sizeof(zone_size)); /* 4GB maximum for 32-bit only capable devices */ #ifdef CONFIG_ZONE_DMA max_dma = PFN_DOWN(arm64_dma_phys_limit); zone_size[ZONE_DMA] = max_dma - min; #endif zone_size[ZONE_NORMAL] = max - max_dma; memcpy(zhole_size, zone_size, sizeof(zhole_size)); for_each_memblock(memory, reg) { unsigned long start = memblock_region_memory_base_pfn(reg); unsigned long end = memblock_region_memory_end_pfn(reg); if (start >= max) continue; #ifdef CONFIG_ZONE_DMA if (start < max_dma) { unsigned long dma_end = min(end, max_dma); zhole_size[ZONE_DMA] -= dma_end - start; } #endif if (end > max_dma) { unsigned long normal_end = min(end, max); unsigned long normal_start = max(start, max_dma); zhole_size[ZONE_NORMAL] -= normal_end - normal_start; } } free_area_init_node(0, zone_size, min, zhole_size); }

Contributors

PersonTokensPropCommitsCommitProp
Catalin Marinas19894.74%466.67%
Robin Murphy94.31%116.67%
Will Deacon20.96%116.67%
Total209100.00%6100.00%

#endif /* CONFIG_NUMA */ #ifdef CONFIG_HAVE_ARCH_PFN_VALID
int pfn_valid(unsigned long pfn) { return memblock_is_map_memory(pfn << PAGE_SHIFT); }

Contributors

PersonTokensPropCommitsCommitProp
Catalin Marinas1694.12%150.00%
Ard Biesheuvel15.88%150.00%
Total17100.00%2100.00%

EXPORT_SYMBOL(pfn_valid); #endif #ifndef CONFIG_SPARSEMEM
static void __init arm64_memory_present(void) { }

Contributors

PersonTokensPropCommitsCommitProp
Catalin Marinas787.50%150.00%
JiSheng Zhang112.50%150.00%
Total8100.00%2100.00%

#else
static void __init arm64_memory_present(void) { struct memblock_region *reg; for_each_memblock(memory, reg) { int nid = memblock_get_region_node(reg); memory_present(nid, memblock_region_memory_base_pfn(reg), memblock_region_memory_end_pfn(reg)); } }

Contributors

PersonTokensPropCommitsCommitProp
Catalin Marinas3373.33%125.00%
Mark Rutland613.33%125.00%
Ganapatrao Kulkarni511.11%125.00%
JiSheng Zhang12.22%125.00%
Total45100.00%4100.00%

#endif static phys_addr_t memory_limit = (phys_addr_t)ULLONG_MAX; /* * Limit the memory size that was specified via FDT. */
static int __init early_mem(char *p) { if (!p) return 1; memory_limit = memparse(p, &p) & PAGE_MASK; pr_notice("Memory limited to %lldMB\n", memory_limit >> 20); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Mark Rutland43100.00%1100.00%
Total43100.00%1100.00%

early_param("mem", early_mem);
static int __init early_init_dt_scan_usablemem(unsigned long node, const char *uname, int depth, void *data) { struct memblock_region *usablemem = data; const __be32 *reg; int len; if (depth != 1 || strcmp(uname, "chosen") != 0) return 0; reg = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len); if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells))) return 1; usablemem->base = dt_mem_next_cell(dt_root_addr_cells, &reg); usablemem->size = dt_mem_next_cell(dt_root_size_cells, &reg); return 1; }

Contributors

PersonTokensPropCommitsCommitProp
AKASHI Takahiro113100.00%1100.00%
Total113100.00%1100.00%


static void __init fdt_enforce_memory_region(void) { struct memblock_region reg = { .size = 0, }; of_scan_flat_dt(early_init_dt_scan_usablemem, &reg); if (reg.size) memblock_cap_memory_range(reg.base, reg.size); }

Contributors

PersonTokensPropCommitsCommitProp
AKASHI Takahiro45100.00%1100.00%
Total45100.00%1100.00%


void __init arm64_memblock_init(void) { const s64 linear_region_size = -(s64)PAGE_OFFSET; /* Handle linux,usable-memory-range property */ fdt_enforce_memory_region(); /* * Ensure that the linear region takes up exactly half of the kernel * virtual address space. This way, we can distinguish a linear address * from a kernel/module/vmalloc address by testing a single bit. */ BUILD_BUG_ON(linear_region_size != BIT(VA_BITS - 1)); /* * Select a suitable value for the base of physical memory. */ memstart_addr = round_down(memblock_start_of_DRAM(), ARM64_MEMSTART_ALIGN); /* * Remove the memory that we will not be able to cover with the * linear mapping. Take care not to clip the kernel which may be * high in memory. */ memblock_remove(max_t(u64, memstart_addr + linear_region_size, __pa_symbol(_end)), ULLONG_MAX); if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) { /* ensure that memstart_addr remains sufficiently aligned */ memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size, ARM64_MEMSTART_ALIGN); memblock_remove(0, memstart_addr); } /* * Apply the memory limit if it was set. Since the kernel may be loaded * high up in memory, add back the kernel region that must be accessible * via the linear mapping. */ if (memory_limit != (phys_addr_t)ULLONG_MAX) { memblock_mem_limit_remove_map(memory_limit); memblock_add(__pa_symbol(_text), (u64)(_end - _text)); } if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_start) { /* * Add back the memory we just removed if it results in the * initrd to become inaccessible via the linear mapping. * Otherwise, this is a no-op */ u64 base = initrd_start & PAGE_MASK; u64 size = PAGE_ALIGN(initrd_end) - base; /* * We can only add back the initrd memory if we don't end up * with more memory than we can address via the linear mapping. * It is up to the bootloader to position the kernel and the * initrd reasonably close to each other (i.e., within 32 GB of * each other) so that all granule/#levels combinations can * always access both. */ if (WARN(base < memblock_start_of_DRAM() || base + size > memblock_start_of_DRAM() + linear_region_size, "initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) { initrd_start = 0; } else { memblock_remove(base, size); /* clear MEMBLOCK_ flags */ memblock_add(base, size); memblock_reserve(base, size); } } if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { extern u16 memstart_offset_seed; u64 range = linear_region_size - (memblock_end_of_DRAM() - memblock_start_of_DRAM()); /* * If the size of the linear region exceeds, by a sufficient * margin, the size of the region that the available physical * memory spans, randomize the linear region as well. */ if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) { range = range / ARM64_MEMSTART_ALIGN + 1; memstart_addr -= ARM64_MEMSTART_ALIGN * ((range * memstart_offset_seed) >> 16); } } /* * Register the kernel text, kernel data, initrd, and initial * pagetables with memblock. */ memblock_reserve(__pa_symbol(_text), _end - _text); #ifdef CONFIG_BLK_DEV_INITRD if (initrd_start) { memblock_reserve(initrd_start, initrd_end - initrd_start); /* the generic initrd code expects virtual addresses */ initrd_start = __phys_to_virt(initrd_start); initrd_end = __phys_to_virt(initrd_end); } #endif early_init_fdt_scan_reserved_mem(); /* 4GB maximum for 32-bit only capable devices */ if (IS_ENABLED(CONFIG_ZONE_DMA)) arm64_dma_phys_limit = max_zone_dma_phys(); else arm64_dma_phys_limit = PHYS_MASK + 1; reserve_crashkernel(); reserve_elfcorehdr(); dma_contiguous_reserve(arm64_dma_phys_limit); memblock_allow_resize(); }

Contributors

PersonTokensPropCommitsCommitProp
Ard Biesheuvel27275.77%733.33%
Catalin Marinas5615.60%419.05%
AKASHI Takahiro102.79%314.29%
Laura Abbott71.95%29.52%
Mark Rutland51.39%29.52%
Rob Herring51.39%14.76%
Marek Szyprowski30.84%14.76%
Dennis Chen10.28%14.76%
Total359100.00%21100.00%


void __init bootmem_init(void) { unsigned long min, max; min = PFN_UP(memblock_start_of_DRAM()); max = PFN_DOWN(memblock_end_of_DRAM()); early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT); max_pfn = max_low_pfn = max; arm64_numa_init(); /* * Sparsemem tries to allocate bootmem in memory_present(), so must be * done after the fixed reservations. */ arm64_memory_present(); sparse_init(); zone_sizes_init(min, max); high_memory = __va((max << PAGE_SHIFT) - 1) + 1; memblock_dump_all(); }

Contributors

PersonTokensPropCommitsCommitProp
Catalin Marinas6073.17%133.33%
Vladimir Murzin1113.41%133.33%
Ganapatrao Kulkarni1113.41%133.33%
Total82100.00%3100.00%

#ifndef CONFIG_SPARSEMEM_VMEMMAP
static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn) { struct page *start_pg, *end_pg; unsigned long pg, pgend; /* * Convert start_pfn/end_pfn to a struct page pointer. */ start_pg = pfn_to_page(start_pfn - 1) + 1; end_pg = pfn_to_page(end_pfn - 1) + 1; /* * Convert to physical addresses, and round start upwards and end * downwards. */ pg = (unsigned long)PAGE_ALIGN(__pa(start_pg)); pgend = (unsigned long)__pa(end_pg) & PAGE_MASK; /* * If there are free pages between these, free the section of the * memmap array. */ if (pg < pgend) free_bootmem(pg, pgend - pg); }

Contributors

PersonTokensPropCommitsCommitProp
Catalin Marinas96100.00%1100.00%
Total96100.00%1100.00%

/* * The mem_map array can get very big. Free the unused area of the memory map. */
static void __init free_unused_memmap(void) { unsigned long start, prev_end = 0; struct memblock_region *reg; for_each_memblock(memory, reg) { start = __phys_to_pfn(reg->base); #ifdef CONFIG_SPARSEMEM /* * Take care not to free memmap entries that don't exist due * to SPARSEMEM sections which aren't present. */ start = min(start, ALIGN(prev_end, PAGES_PER_SECTION)); #endif /* * If we had a previous bank, and there is a space between the * current bank and the previous, free it. */ if (prev_end && prev_end < start) free_memmap(prev_end, start); /* * Align up here since the VM subsystem insists that the * memmap entries are valid from the bank end aligned to * MAX_ORDER_NR_PAGES. */ prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size), MAX_ORDER_NR_PAGES); } #ifdef CONFIG_SPARSEMEM if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION)); #endif }

Contributors

PersonTokensPropCommitsCommitProp
Catalin Marinas11796.69%150.00%
Dave P Martin43.31%150.00%
Total121100.00%2100.00%

#endif /* !CONFIG_SPARSEMEM_VMEMMAP */ /* * mem_init() marks the free areas in the mem_map and tells us how much memory * is free. This is done after various parts of the system have claimed their * memory after the kernel image. */
void __init mem_init(void) { if (swiotlb_force == SWIOTLB_FORCE || max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT)) swiotlb_init(1); else swiotlb_force = SWIOTLB_NO_FORCE; set_max_mapnr(pfn_to_page(max_pfn) - mem_map); #ifndef CONFIG_SPARSEMEM_VMEMMAP free_unused_memmap(); #endif /* this will put all unused low memory onto the freelists */ free_all_bootmem(); kexec_reserve_crashkres_pages(); mem_init_print_info(NULL); #define MLK(b, t) b, t, ((t) - (b)) >> 10 #define MLM(b, t) b, t, ((t) - (b)) >> 20 #define MLG(b, t) b, t, ((t) - (b)) >> 30 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K) pr_notice("Virtual kernel memory layout:\n"); #ifdef CONFIG_KASAN pr_notice(" kasan : 0x%16lx - 0x%16lx (%6ld GB)\n", MLG(KASAN_SHADOW_START, KASAN_SHADOW_END)); #endif pr_notice(" modules : 0x%16lx - 0x%16lx (%6ld MB)\n", MLM(MODULES_VADDR, MODULES_END)); pr_notice(" vmalloc : 0x%16lx - 0x%16lx (%6ld GB)\n", MLG(VMALLOC_START, VMALLOC_END)); pr_notice(" .text : 0x%p" " - 0x%p" " (%6ld KB)\n", MLK_ROUNDUP(_text, _etext)); pr_notice(" .rodata : 0x%p" " - 0x%p" " (%6ld KB)\n", MLK_ROUNDUP(__start_rodata, __init_begin)); pr_notice(" .init : 0x%p" " - 0x%p" " (%6ld KB)\n", MLK_ROUNDUP(__init_begin, __init_end)); pr_notice(" .data : 0x%p" " - 0x%p" " (%6ld KB)\n", MLK_ROUNDUP(_sdata, _edata)); pr_notice(" .bss : 0x%p" " - 0x%p" " (%6ld KB)\n", MLK_ROUNDUP(__bss_start, __bss_stop)); pr_notice(" fixed : 0x%16lx - 0x%16lx (%6ld KB)\n", MLK(FIXADDR_START, FIXADDR_TOP)); pr_notice(" PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n", MLM(PCI_IO_START, PCI_IO_END)); #ifdef CONFIG_SPARSEMEM_VMEMMAP pr_notice(" vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n", MLG(VMEMMAP_START, VMEMMAP_START + VMEMMAP_SIZE)); pr_notice(" 0x%16lx - 0x%16lx (%6ld MB actual)\n", MLM((unsigned long)phys_to_page(memblock_start_of_DRAM()), (unsigned long)virt_to_page(high_memory))); #endif pr_notice(" memory : 0x%16lx - 0x%16lx (%6ld MB)\n", MLM(__phys_to_virt(memblock_start_of_DRAM()), (unsigned long)high_memory)); #undef MLK #undef MLM #undef MLK_ROUNDUP /* * Check boundaries twice: Some fundamental inconsistencies can be * detected at build time already. */ #ifdef CONFIG_COMPAT BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64); #endif /* * Make sure we chose the upper bound of sizeof(struct page) * correctly. */ BUILD_BUG_ON(sizeof(struct page) > (1 << STRUCT_PAGE_MAX_SHIFT)); if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { extern int sysctl_overcommit_memory; /* * On a machine this small we won't get anywhere without * overcommit, so turn it on by default. */ sysctl_overcommit_memory = OVERCOMMIT_ALWAYS; } }

Contributors

PersonTokensPropCommitsCommitProp
Catalin Marinas19152.47%418.18%
Ard Biesheuvel7520.60%627.27%
Kefeng Wang359.62%29.09%
Mark Rutland133.57%14.55%
Linus Walleij123.30%14.55%
JiSheng Zhang123.30%14.55%
Jiang Liu71.92%29.09%
Jeremy Linton61.65%14.55%
Alexander Graf51.37%14.55%
AKASHI Takahiro30.82%14.55%
Ganapatrao Kulkarni30.82%14.55%
Geert Uytterhoeven20.55%14.55%
Total364100.00%22100.00%


void free_initmem(void) { free_reserved_area(lm_alias(__init_begin), lm_alias(__init_end), 0, "unused kernel"); /* * Unmap the __init region but leave the VM area in place. This * prevents the region from being reused for kernel modules, which * is not supported by kallsyms. */ unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin)); }

Contributors

PersonTokensPropCommitsCommitProp
Kefeng Wang1740.48%116.67%
Ard Biesheuvel1330.95%233.33%
Catalin Marinas921.43%116.67%
Laura Abbott24.76%116.67%
Jiang Liu12.38%116.67%
Total42100.00%6100.00%

#ifdef CONFIG_BLK_DEV_INITRD static int keep_initrd __initdata;
void __init free_initrd_mem(unsigned long start, unsigned long end) { if (!keep_initrd) free_reserved_area((void *)start, (void *)end, 0, "initrd"); }

Contributors

PersonTokensPropCommitsCommitProp
Catalin Marinas2668.42%120.00%
Jiang Liu1128.95%360.00%
Wang Long12.63%120.00%
Total38100.00%5100.00%


static int __init keepinitrd_setup(char *__unused) { keep_initrd = 1; return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Catalin Marinas18100.00%1100.00%
Total18100.00%1100.00%

__setup("keepinitrd", keepinitrd_setup); #endif /* * Dump out memory limit information on panic. */
static int dump_mem_limit(struct notifier_block *self, unsigned long v, void *p) { if (memory_limit != (phys_addr_t)ULLONG_MAX) { pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20); } else { pr_emerg("Memory Limit: none\n"); } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Ard Biesheuvel50100.00%1100.00%
Total50100.00%1100.00%

static struct notifier_block mem_limit_notifier = { .notifier_call = dump_mem_limit, };
static int __init register_mem_limit_dumper(void) { atomic_notifier_chain_register(&panic_notifier_list, &mem_limit_notifier); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Ard Biesheuvel21100.00%1100.00%
Total21100.00%1100.00%

__initcall(register_mem_limit_dumper);

Overall Contributors

PersonTokensPropCommitsCommitProp
Catalin Marinas104141.76%711.86%
AKASHI Takahiro63225.35%46.78%
Ard Biesheuvel46318.57%1423.73%
Mark Rutland873.49%58.47%
Ganapatrao Kulkarni863.45%23.39%
Kefeng Wang552.21%35.08%
JiSheng Zhang200.80%35.08%
Jiang Liu190.76%58.47%
Laura Abbott150.60%23.39%
Rob Herring140.56%11.69%
Linus Walleij120.48%11.69%
Vladimir Murzin110.44%11.69%
Robin Murphy90.36%11.69%
Jeremy Linton60.24%11.69%
Alexander Graf50.20%11.69%
Dave P Martin40.16%11.69%
Leif Lindholm30.12%11.69%
Marek Szyprowski30.12%11.69%
Wang Long20.08%11.69%
Geert Uytterhoeven20.08%11.69%
Will Deacon20.08%11.69%
Andre Przywara10.04%11.69%
Dennis Chen10.04%11.69%
Total2493100.00%59100.00%
Directory: arch/arm64/mm
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.