Contributors: 24
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Yasunori Goto |
271 |
48.05% |
2 |
5.00% |
Song Muchun |
72 |
12.77% |
2 |
5.00% |
Yasuaki Ishimatsu |
50 |
8.87% |
3 |
7.50% |
Linus Torvalds (pre-git) |
42 |
7.45% |
4 |
10.00% |
Dan J Williams |
29 |
5.14% |
3 |
7.50% |
Dave Hansen |
16 |
2.84% |
3 |
7.50% |
Liu Shixin |
16 |
2.84% |
2 |
5.00% |
Andrea Arcangeli |
14 |
2.48% |
1 |
2.50% |
Badari Pulavarty |
10 |
1.77% |
1 |
2.50% |
Andi Kleen |
7 |
1.24% |
2 |
5.00% |
Matthew Wilcox |
6 |
1.06% |
1 |
2.50% |
Andy Whitcroft |
5 |
0.89% |
2 |
5.00% |
Kamezawa Hiroyuki |
4 |
0.71% |
2 |
5.00% |
Christoph Lameter |
3 |
0.53% |
1 |
2.50% |
Wen Congyang |
3 |
0.53% |
1 |
2.50% |
Yinghai Lu |
3 |
0.53% |
1 |
2.50% |
Jiang Liu |
3 |
0.53% |
2 |
5.00% |
JoonSoo Kim |
2 |
0.35% |
1 |
2.50% |
Oscar Salvador |
2 |
0.35% |
1 |
2.50% |
Wei Yang |
2 |
0.35% |
1 |
2.50% |
Adrian Bunk |
1 |
0.18% |
1 |
2.50% |
Greg Kroah-Hartman |
1 |
0.18% |
1 |
2.50% |
Linus Torvalds |
1 |
0.18% |
1 |
2.50% |
David Howells |
1 |
0.18% |
1 |
2.50% |
Total |
564 |
|
40 |
|
// SPDX-License-Identifier: GPL-2.0
/*
* Bootmem core functions.
*
* Copyright (c) 2020, Bytedance.
*
* Author: Muchun Song <songmuchun@bytedance.com>
*
*/
#include <linux/mm.h>
#include <linux/compiler.h>
#include <linux/memblock.h>
#include <linux/bootmem_info.h>
#include <linux/memory_hotplug.h>
#include <linux/kmemleak.h>
void get_page_bootmem(unsigned long info, struct page *page, unsigned long type)
{
page->index = type;
SetPagePrivate(page);
set_page_private(page, info);
page_ref_inc(page);
}
void put_page_bootmem(struct page *page)
{
unsigned long type = page->index;
BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
if (page_ref_dec_return(page) == 1) {
page->index = 0;
ClearPagePrivate(page);
set_page_private(page, 0);
INIT_LIST_HEAD(&page->lru);
kmemleak_free_part_phys(PFN_PHYS(page_to_pfn(page)), PAGE_SIZE);
free_reserved_page(page);
}
}
#ifndef CONFIG_SPARSEMEM_VMEMMAP
static void __init register_page_bootmem_info_section(unsigned long start_pfn)
{
unsigned long mapsize, section_nr, i;
struct mem_section *ms;
struct page *page, *memmap;
struct mem_section_usage *usage;
section_nr = pfn_to_section_nr(start_pfn);
ms = __nr_to_section(section_nr);
/* Get section's memmap address */
memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
/*
* Get page for the memmap's phys address
* XXX: need more consideration for sparse_vmemmap...
*/
page = virt_to_page(memmap);
mapsize = sizeof(struct page) * PAGES_PER_SECTION;
mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
/* remember memmap's page */
for (i = 0; i < mapsize; i++, page++)
get_page_bootmem(section_nr, page, SECTION_INFO);
usage = ms->usage;
page = virt_to_page(usage);
mapsize = PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT;
for (i = 0; i < mapsize; i++, page++)
get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
}
#else /* CONFIG_SPARSEMEM_VMEMMAP */
static void __init register_page_bootmem_info_section(unsigned long start_pfn)
{
unsigned long mapsize, section_nr, i;
struct mem_section *ms;
struct page *page, *memmap;
struct mem_section_usage *usage;
section_nr = pfn_to_section_nr(start_pfn);
ms = __nr_to_section(section_nr);
memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION);
usage = ms->usage;
page = virt_to_page(usage);
mapsize = PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT;
for (i = 0; i < mapsize; i++, page++)
get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
}
#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
{
unsigned long i, pfn, end_pfn, nr_pages;
int node = pgdat->node_id;
struct page *page;
nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
page = virt_to_page(pgdat);
for (i = 0; i < nr_pages; i++, page++)
get_page_bootmem(node, page, NODE_INFO);
pfn = pgdat->node_start_pfn;
end_pfn = pgdat_end_pfn(pgdat);
/* register section info */
for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
/*
* Some platforms can assign the same pfn to multiple nodes - on
* node0 as well as nodeN. To avoid registering a pfn against
* multiple nodes we check that this pfn does not already
* reside in some other nodes.
*/
if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node))
register_page_bootmem_info_section(pfn);
}
}