Release 4.11 mm/nobootmem.c
/*
* bootmem - A boot-time physical memory allocator and configurator
*
* Copyright (C) 1999 Ingo Molnar
* 1999 Kanoj Sarcar, SGI
* 2008 Johannes Weiner
*
* Access to this subsystem has to be serialized externally (which is true
* for the boot process anyway).
*/
#include <linux/init.h>
#include <linux/pfn.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/kmemleak.h>
#include <linux/range.h>
#include <linux/memblock.h>
#include <linux/bootmem.h>
#include <asm/bug.h>
#include <asm/io.h>
#include "internal.h"
#ifndef CONFIG_HAVE_MEMBLOCK
#error CONFIG_HAVE_MEMBLOCK not defined
#endif
#ifndef CONFIG_NEED_MULTIPLE_NODES
struct pglist_data __refdata contig_page_data;
EXPORT_SYMBOL(contig_page_data);
#endif
unsigned long max_low_pfn;
unsigned long min_low_pfn;
unsigned long max_pfn;
unsigned long long max_possible_pfn;
static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
u64 goal, u64 limit)
{
void *ptr;
u64 addr;
ulong flags = choose_memblock_flags();
if (limit > memblock.current_limit)
limit = memblock.current_limit;
again:
addr = memblock_find_in_range_node(size, align, goal, limit, nid,
flags);
if (!addr && (flags & MEMBLOCK_MIRROR)) {
flags &= ~MEMBLOCK_MIRROR;
pr_warn("Could not allocate %pap bytes of mirrored memory\n",
&size);
goto again;
}
if (!addr)
return NULL;
if (memblock_reserve(addr, size))
return NULL;
ptr = phys_to_virt(addr);
memset(ptr, 0, size);
/*
* The min_count is set to 0 so that bootmem allocated blocks
* are never reported as leaks.
*/
kmemleak_alloc(ptr, size, 0, 0);
return ptr;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yinghai Lu | 91 | 61.07% | 1 | 12.50% |
Tony Luck | 39 | 26.17% | 2 | 25.00% |
JoonSoo Kim | 6 | 4.03% | 1 | 12.50% |
Philipp Hachtmann | 6 | 4.03% | 1 | 12.50% |
Grygorii Strashko | 4 | 2.68% | 1 | 12.50% |
Tejun Heo | 3 | 2.01% | 2 | 25.00% |
Total | 149 | 100.00% | 8 | 100.00% |
/*
* free_bootmem_late - free bootmem pages directly to page allocator
* @addr: starting address of the range
* @size: size of the range in bytes
*
* This is only useful when the bootmem allocator has already been torn
* down, but we are still initializing the system. Pages are given directly
* to the page allocator, no bootmem metadata is updated because it is gone.
*/
void __init free_bootmem_late(unsigned long addr, unsigned long size)
{
unsigned long cursor, end;
kmemleak_free_part_phys(addr, size);
cursor = PFN_UP(addr);
end = PFN_DOWN(addr + size);
for (; cursor < end; cursor++) {
__free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
totalram_pages++;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yinghai Lu | 67 | 95.71% | 1 | 33.33% |
Mel Gorman | 2 | 2.86% | 1 | 33.33% |
Catalin Marinas | 1 | 1.43% | 1 | 33.33% |
Total | 70 | 100.00% | 3 | 100.00% |
static void __init __free_pages_memory(unsigned long start, unsigned long end)
{
int order;
while (start < end) {
order = min(MAX_ORDER - 1UL, __ffs(start));
while (start + (1UL << order) > end)
order--;
__free_pages_bootmem(pfn_to_page(start), start, order);
start += (1UL << order);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yinghai Lu | 40 | 53.33% | 1 | 33.33% |
Robin Holt | 33 | 44.00% | 1 | 33.33% |
Mel Gorman | 2 | 2.67% | 1 | 33.33% |
Total | 75 | 100.00% | 3 | 100.00% |
static unsigned long __init __free_memory_core(phys_addr_t start,
phys_addr_t end)
{
unsigned long start_pfn = PFN_UP(start);
unsigned long end_pfn = min_t(unsigned long,
PFN_DOWN(end), max_low_pfn);
if (start_pfn > end_pfn)
return 0;
__free_pages_memory(start_pfn, end_pfn);
return end_pfn - start_pfn;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yinghai Lu | 57 | 100.00% | 1 | 100.00% |
Total | 57 | 100.00% | 1 | 100.00% |
static unsigned long __init free_low_memory_core_early(void)
{
unsigned long count = 0;
phys_addr_t start, end;
u64 i;
memblock_clear_hotplug(0, -1);
for_each_reserved_mem_region(i, &start, &end)
reserve_bootmem_region(start, end);
/*
* We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
* because in some case like Node0 doesn't have RAM installed
* low ram will be on Node1
*/
for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
NULL)
count += __free_memory_core(start, end);
#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
{
phys_addr_t size;
/* Free memblock.reserved array if it was allocated */
size = get_allocated_memblock_reserved_regions_info(&start);
if (size)
count += __free_memory_core(start, start + size);
/* Free memblock.memory array if it was allocated */
size = get_allocated_memblock_memory_regions_info(&start);
if (size)
count += __free_memory_core(start, start + size);
}
#endif
return count;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yinghai Lu | 41 | 31.06% | 2 | 16.67% |
Philipp Hachtmann | 35 | 26.52% | 2 | 16.67% |
Tejun Heo | 27 | 20.45% | 2 | 16.67% |
Nathan Zimmer | 15 | 11.36% | 1 | 8.33% |
Xishi Qiu | 8 | 6.06% | 1 | 8.33% |
JoonSoo Kim | 2 | 1.52% | 1 | 8.33% |
Tony Luck | 2 | 1.52% | 1 | 8.33% |
Wanlong Gao | 1 | 0.76% | 1 | 8.33% |
Grygorii Strashko | 1 | 0.76% | 1 | 8.33% |
Total | 132 | 100.00% | 12 | 100.00% |
static int reset_managed_pages_done __initdata;
void reset_node_managed_pages(pg_data_t *pgdat)
{
struct zone *z;
for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
z->managed_pages = 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiang Liu | 39 | 100.00% | 2 | 100.00% |
Total | 39 | 100.00% | 2 | 100.00% |
void __init reset_all_zones_managed_pages(void)
{
struct pglist_data *pgdat;
if (reset_managed_pages_done)
return;
for_each_online_pgdat(pgdat)
reset_node_managed_pages(pgdat);
reset_managed_pages_done = 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiang Liu | 21 | 67.74% | 2 | 50.00% |
Tang Chen | 5 | 16.13% | 1 | 25.00% |
Yinghai Lu | 5 | 16.13% | 1 | 25.00% |
Total | 31 | 100.00% | 4 | 100.00% |
/**
* free_all_bootmem - release free pages to the buddy allocator
*
* Returns the number of pages actually released.
*/
unsigned long __init free_all_bootmem(void)
{
unsigned long pages;
reset_all_zones_managed_pages();
pages = free_low_memory_core_early();
totalram_pages += pages;
return pages;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiang Liu | 24 | 85.71% | 2 | 40.00% |
Yinghai Lu | 2 | 7.14% | 1 | 20.00% |
Tejun Heo | 1 | 3.57% | 1 | 20.00% |
JoonSoo Kim | 1 | 3.57% | 1 | 20.00% |
Total | 28 | 100.00% | 5 | 100.00% |
/**
* free_bootmem_node - mark a page range as usable
* @pgdat: node the range resides on
* @physaddr: starting address of the range
* @size: size of the range in bytes
*
* Partial pages will be considered reserved and left as they are.
*
* The range must reside completely on the specified node.
*/
void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
unsigned long size)
{
memblock_free(physaddr, size);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yinghai Lu | 24 | 96.00% | 1 | 50.00% |
Tejun Heo | 1 | 4.00% | 1 | 50.00% |
Total | 25 | 100.00% | 2 | 100.00% |
/**
* free_bootmem - mark a page range as usable
* @addr: starting address of the range
* @size: size of the range in bytes
*
* Partial pages will be considered reserved and left as they are.
*
* The range must be contiguous but may span node boundaries.
*/
void __init free_bootmem(unsigned long addr, unsigned long size)
{
memblock_free(addr, size);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yinghai Lu | 20 | 95.24% | 1 | 50.00% |
Tejun Heo | 1 | 4.76% | 1 | 50.00% |
Total | 21 | 100.00% | 2 | 100.00% |
static void * __init ___alloc_bootmem_nopanic(unsigned long size,
unsigned long align,
unsigned long goal,
unsigned long limit)
{
void *ptr;
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc(size, GFP_NOWAIT);
restart:
ptr = __alloc_memory_core_early(NUMA_NO_NODE, size, align, goal, limit);
if (ptr)
return ptr;
if (goal != 0) {
goal = 0;
goto restart;
}
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yinghai Lu | 85 | 98.84% | 1 | 50.00% |
Grygorii Strashko | 1 | 1.16% | 1 | 50.00% |
Total | 86 | 100.00% | 2 | 100.00% |
/**
* __alloc_bootmem_nopanic - allocate boot memory without panicking
* @size: size of the request in bytes
* @align: alignment of the region
* @goal: preferred starting address of the region
*
* The goal is dropped if it can not be satisfied and the allocation will
* fall back to memory below @goal.
*
* Allocation may happen on any node in the system.
*
* Returns NULL on failure.
*/
void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
unsigned long goal)
{
unsigned long limit = -1UL;
return ___alloc_bootmem_nopanic(size, align, goal, limit);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yinghai Lu | 38 | 100.00% | 1 | 100.00% |
Total | 38 | 100.00% | 1 | 100.00% |
static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
unsigned long goal, unsigned long limit)
{
void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
if (mem)
return mem;
/*
* Whoops, we cannot satisfy the allocation request.
*/
pr_alert("bootmem alloc of %lu bytes failed!\n", size);
panic("Out of memory");
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yinghai Lu | 61 | 98.39% | 1 | 50.00% |
Joe Perches | 1 | 1.61% | 1 | 50.00% |
Total | 62 | 100.00% | 2 | 100.00% |
/**
* __alloc_bootmem - allocate boot memory
* @size: size of the request in bytes
* @align: alignment of the region
* @goal: preferred starting address of the region
*
* The goal is dropped if it can not be satisfied and the allocation will
* fall back to memory below @goal.
*
* Allocation may happen on any node in the system.
*
* The function panics if the request can not be satisfied.
*/
void * __init __alloc_bootmem(unsigned long size, unsigned long align,
unsigned long goal)
{
unsigned long limit = -1UL;
return ___alloc_bootmem(size, align, goal, limit);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yinghai Lu | 38 | 100.00% | 1 | 100.00% |
Total | 38 | 100.00% | 1 | 100.00% |
void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
unsigned long size,
unsigned long align,
unsigned long goal,
unsigned long limit)
{
void *ptr;
again:
ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
goal, limit);
if (ptr)
return ptr;
ptr = __alloc_memory_core_early(NUMA_NO_NODE, size, align,
goal, limit);
if (ptr)
return ptr;
if (goal) {
goal = 0;
goto again;
}
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yinghai Lu | 58 | 61.05% | 1 | 20.00% |
David S. Miller | 19 | 20.00% | 1 | 20.00% |
Johannes Weiner | 17 | 17.89% | 2 | 40.00% |
Grygorii Strashko | 1 | 1.05% | 1 | 20.00% |
Total | 95 | 100.00% | 5 | 100.00% |
void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
unsigned long align, unsigned long goal)
{
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Johannes Weiner | 57 | 100.00% | 1 | 100.00% |
Total | 57 | 100.00% | 1 | 100.00% |
static void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
unsigned long align, unsigned long goal,
unsigned long limit)
{
void *ptr;
ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, limit);
if (ptr)
return ptr;
pr_alert("bootmem alloc of %lu bytes failed!\n", size);
panic("Out of memory");
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Johannes Weiner | 64 | 92.75% | 2 | 33.33% |
Yinghai Lu | 2 | 2.90% | 1 | 16.67% |
Joe Perches | 1 | 1.45% | 1 | 16.67% |
Rashika Kheria | 1 | 1.45% | 1 | 16.67% |
David S. Miller | 1 | 1.45% | 1 | 16.67% |
Total | 69 | 100.00% | 6 | 100.00% |
/**
* __alloc_bootmem_node - allocate boot memory from a specific node
* @pgdat: node to allocate from
* @size: size of the request in bytes
* @align: alignment of the region
* @goal: preferred starting address of the region
*
* The goal is dropped if it can not be satisfied and the allocation will
* fall back to memory below @goal.
*
* Allocation may fall back to any node in the system if the specified node
* can not hold the requested memory.
*
* The function panics if the request can not be satisfied.
*/
void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
unsigned long align, unsigned long goal)
{
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
return ___alloc_bootmem_node(pgdat, size, align, goal, 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Johannes Weiner | 57 | 100.00% | 1 | 100.00% |
Total | 57 | 100.00% | 1 | 100.00% |
void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
unsigned long align, unsigned long goal)
{
return __alloc_bootmem_node(pgdat, size, align, goal);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yinghai Lu | 35 | 100.00% | 1 | 100.00% |
Total | 35 | 100.00% | 1 | 100.00% |
/**
* __alloc_bootmem_low - allocate low boot memory
* @size: size of the request in bytes
* @align: alignment of the region
* @goal: preferred starting address of the region
*
* The goal is dropped if it can not be satisfied and the allocation will
* fall back to memory below @goal.
*
* Allocation may happen on any node in the system.
*
* The function panics if the request can not be satisfied.
*/
void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
unsigned long goal)
{
return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yinghai Lu | 31 | 100.00% | 1 | 100.00% |
Total | 31 | 100.00% | 1 | 100.00% |
void * __init __alloc_bootmem_low_nopanic(unsigned long size,
unsigned long align,
unsigned long goal)
{
return ___alloc_bootmem_nopanic(size, align, goal,
ARCH_LOW_ADDRESS_LIMIT);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yinghai Lu | 31 | 100.00% | 1 | 100.00% |
Total | 31 | 100.00% | 1 | 100.00% |
/**
* __alloc_bootmem_low_node - allocate low boot memory from a specific node
* @pgdat: node to allocate from
* @size: size of the request in bytes
* @align: alignment of the region
* @goal: preferred starting address of the region
*
* The goal is dropped if it can not be satisfied and the allocation will
* fall back to memory below @goal.
*
* Allocation may fall back to any node in the system if the specified node
* can not hold the requested memory.
*
* The function panics if the request can not be satisfied.
*/
void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
unsigned long align, unsigned long goal)
{
if (WARN_ON_ONCE(slab_is_available()))
return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
return ___alloc_bootmem_node(pgdat, size, align, goal,
ARCH_LOW_ADDRESS_LIMIT);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yinghai Lu | 55 | 96.49% | 1 | 50.00% |
Johannes Weiner | 2 | 3.51% | 1 | 50.00% |
Total | 57 | 100.00% | 2 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yinghai Lu | 845 | 61.63% | 5 | 13.51% |
Johannes Weiner | 198 | 14.44% | 2 | 5.41% |
Jiang Liu | 90 | 6.56% | 3 | 8.11% |
Tony Luck | 41 | 2.99% | 2 | 5.41% |
Philipp Hachtmann | 41 | 2.99% | 3 | 8.11% |
Tejun Heo | 33 | 2.41% | 5 | 13.51% |
Robin Holt | 33 | 2.41% | 1 | 2.70% |
David S. Miller | 20 | 1.46% | 1 | 2.70% |
Nathan Zimmer | 15 | 1.09% | 1 | 2.70% |
zijun_hu | 11 | 0.80% | 1 | 2.70% |
JoonSoo Kim | 9 | 0.66% | 2 | 5.41% |
Xishi Qiu | 8 | 0.58% | 1 | 2.70% |
Grygorii Strashko | 7 | 0.51% | 2 | 5.41% |
Tang Chen | 5 | 0.36% | 1 | 2.70% |
Igor Mammedov | 5 | 0.36% | 1 | 2.70% |
Mel Gorman | 4 | 0.29% | 1 | 2.70% |
Joe Perches | 2 | 0.15% | 1 | 2.70% |
Wanlong Gao | 1 | 0.07% | 1 | 2.70% |
Paul Gortmaker | 1 | 0.07% | 1 | 2.70% |
Catalin Marinas | 1 | 0.07% | 1 | 2.70% |
Rashika Kheria | 1 | 0.07% | 1 | 2.70% |
Total | 1371 | 100.00% | 37 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.