Release 4.14 arch/x86/mm/numa_32.c
/*
* Written by: Patricia Gaughen <gone@us.ibm.com>, IBM Corporation
* August 2002: added remote node KVA remap - Martin J. Bligh
*
* Copyright (C) 2002, IBM Corp.
*
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/init.h>
#include "numa_internal.h"
#ifdef CONFIG_DISCONTIGMEM
/*
* 4) physnode_map - the mapping between a pfn and owning node
* physnode_map keeps track of the physical memory layout of a generic
* numa node on a 64Mb break (each element of the array will
* represent 64Mb of memory and will be marked by the node id. so,
* if the first gig is on node 0, and the second gig is on node 1
* physnode_map will contain:
*
* physnode_map[0-15] = 0;
* physnode_map[16-31] = 1;
* physnode_map[32- ] = -1;
*/
s8 physnode_map[MAX_SECTIONS] __read_mostly = { [0 ... (MAX_SECTIONS - 1)] = -1};
EXPORT_SYMBOL(physnode_map);
void memory_present(int nid, unsigned long start, unsigned long end)
{
unsigned long pfn;
printk(KERN_INFO "Node: %d, start_pfn: %lx, end_pfn: %lx\n",
nid, start, end);
printk(KERN_DEBUG " Setting physnode_map array to node %d for pfns:\n", nid);
printk(KERN_DEBUG " ");
start = round_down(start, PAGES_PER_SECTION);
end = round_up(end, PAGES_PER_SECTION);
for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
physnode_map[pfn / PAGES_PER_SECTION] = nid;
printk(KERN_CONT "%lx ", pfn);
}
printk(KERN_CONT "\n");
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dave Hansen | 79 | 76.70% | 1 | 20.00% |
Petr Tesarik | 18 | 17.48% | 1 | 20.00% |
Yinghai Lu | 4 | 3.88% | 2 | 40.00% |
Tejun Heo | 2 | 1.94% | 1 | 20.00% |
Total | 103 | 100.00% | 5 | 100.00% |
unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
unsigned long end_pfn)
{
unsigned long nr_pages = end_pfn - start_pfn;
if (!nr_pages)
return 0;
return (nr_pages + 1) * sizeof(struct page);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dave Hansen | 46 | 100.00% | 1 | 100.00% |
Total | 46 | 100.00% | 1 | 100.00% |
#endif
extern unsigned long highend_pfn, highstart_pfn;
void __init initmem_init(void)
{
x86_numa_init();
#ifdef CONFIG_HIGHMEM
highstart_pfn = highend_pfn = max_pfn;
if (max_pfn > max_low_pfn)
highstart_pfn = max_low_pfn;
printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
pages_to_mb(highend_pfn - highstart_pfn));
high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
#else
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
#endif
printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
pages_to_mb(max_low_pfn));
printk(KERN_DEBUG "max_low_pfn = %lx, highstart_pfn = %lx\n",
max_low_pfn, highstart_pfn);
printk(KERN_DEBUG "Low memory ends at vaddr %08lx\n",
(ulong) pfn_to_kaddr(max_low_pfn));
printk(KERN_DEBUG "High memory starts at vaddr %08lx\n",
(ulong) pfn_to_kaddr(highstart_pfn));
__vmalloc_start_set = true;
setup_bootmem_allocator();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 79 | 57.66% | 2 | 22.22% |
Jan Beulich | 35 | 25.55% | 1 | 11.11% |
Tejun Heo | 10 | 7.30% | 2 | 22.22% |
Yinghai Lu | 8 | 5.84% | 2 | 22.22% |
Laura Abbott | 4 | 2.92% | 1 | 11.11% |
Dave Hansen | 1 | 0.73% | 1 | 11.11% |
Total | 137 | 100.00% | 9 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dave Hansen | 126 | 37.17% | 2 | 10.00% |
Andrew Morton | 92 | 27.14% | 2 | 10.00% |
Jan Beulich | 35 | 10.32% | 1 | 5.00% |
Martin J. Bligh | 19 | 5.60% | 2 | 10.00% |
Petr Tesarik | 18 | 5.31% | 1 | 5.00% |
Yinghai Lu | 16 | 4.72% | 4 | 20.00% |
Tejun Heo | 15 | 4.42% | 3 | 15.00% |
Alexey Dobriyan | 7 | 2.06% | 1 | 5.00% |
Andy Whitcroft | 5 | 1.47% | 1 | 5.00% |
Laura Abbott | 4 | 1.18% | 1 | 5.00% |
Ravikiran G. Thirumalai | 1 | 0.29% | 1 | 5.00% |
Paul Gortmaker | 1 | 0.29% | 1 | 5.00% |
Total | 339 | 100.00% | 20 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.