Release 4.8 arch/powerpc/mm/init_64.c
/*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
* Copyright (C) 1996 Paul Mackerras
*
* Derived from "arch/i386/mm/init.c"
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*
* Dave Engebretsen <engebret@us.ibm.com>
* Rework for PPC64 port.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#undef DEBUG
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/stddef.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/highmem.h>
#include <linux/idr.h>
#include <linux/nodemask.h>
#include <linux/module.h>
#include <linux/poison.h>
#include <linux/memblock.h>
#include <linux/hugetlb.h>
#include <linux/slab.h>
#include <asm/pgalloc.h>
#include <asm/page.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/uaccess.h>
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/tlb.h>
#include <asm/eeh.h>
#include <asm/processor.h>
#include <asm/mmzone.h>
#include <asm/cputable.h>
#include <asm/sections.h>
#include <asm/iommu.h>
#include <asm/vdso.h>
#include "mmu_decl.h"
#ifdef CONFIG_PPC_STD_MMU_64
#if H_PGTABLE_RANGE > USER_VSID_RANGE
#warning Limited user VSID range means pagetable space is wasted
#endif
#if (TASK_SIZE_USER64 < H_PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
#warning TASK_SIZE is smaller than it needs to be.
#endif
#endif /* CONFIG_PPC_STD_MMU_64 */
phys_addr_t memstart_addr = ~0;
EXPORT_SYMBOL_GPL(memstart_addr);
phys_addr_t kernstart_addr;
EXPORT_SYMBOL_GPL(kernstart_addr);
static void pgd_ctor(void *addr)
{
memset(addr, 0, PGD_TABLE_SIZE);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| paul mackerras | paul mackerras | 12 | 63.16% | 1 | 33.33% |
| alexey dobriyan | alexey dobriyan | 4 | 21.05% | 1 | 33.33% |
| christoph lameter | christoph lameter | 3 | 15.79% | 1 | 33.33% |
| Total | 19 | 100.00% | 3 | 100.00% |
static void pud_ctor(void *addr)
{
memset(addr, 0, PUD_TABLE_SIZE);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| aneesh kumar | aneesh kumar | 19 | 100.00% | 1 | 100.00% |
| Total | 19 | 100.00% | 1 | 100.00% |
static void pmd_ctor(void *addr)
{
memset(addr, 0, PMD_TABLE_SIZE);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| alexey dobriyan | alexey dobriyan | 9 | 47.37% | 1 | 33.33% |
| aneesh kumar | aneesh kumar | 9 | 47.37% | 1 | 33.33% |
| paul mackerras | paul mackerras | 1 | 5.26% | 1 | 33.33% |
| Total | 19 | 100.00% | 3 | 100.00% |
struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE];
/*
* Create a kmem_cache() for pagetables. This is not used for PTE
* pages - they're linked to struct page, come from the normal free
* pages pool and have a different entry size (see real_pte_t) to
* everything else. Caches created by this function are used for all
* the higher level pagetables, and for hugepage pagetables.
*/
void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
{
char *name;
unsigned long table_size = sizeof(void *) << shift;
unsigned long align = table_size;
/* When batching pgtable pointers for RCU freeing, we store
* the index size in the low bits. Table alignment must be
* big enough to fit it.
*
* Likewise, hugeapge pagetable pointers contain a (different)
* shift value in the low bits. All tables must be aligned so
* as to leave enough 0 bits in the address to contain it. */
unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1,
HUGEPD_SHIFT_MASK + 1);
struct kmem_cache *new;
/* It would be nice if this was a BUILD_BUG_ON(), but at the
* moment, gcc doesn't seem to recognize is_power_of_2 as a
* constant expression, so so much for that. */
BUG_ON(!is_power_of_2(minalign));
BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE));
if (PGT_CACHE(shift))
return; /* Already have a cache of this size */
align = max_t(unsigned long, align, minalign);
name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
new = kmem_cache_create(name, table_size, align, 0, ctor);
kfree(name);
pgtable_cache[shift - 1] = new;
pr_debug("Allocated pgtable cache for order %d\n", shift);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| david gibson | david gibson | 123 | 80.39% | 3 | 33.33% |
| paul mackerras | paul mackerras | 10 | 6.54% | 1 | 11.11% |
| yanjiang jin | yanjiang jin | 5 | 3.27% | 1 | 11.11% |
| benjamin herrenschmidt | benjamin herrenschmidt | 5 | 3.27% | 1 | 11.11% |
| aneesh kumar | aneesh kumar | 5 | 3.27% | 1 | 11.11% |
| alexey dobriyan | alexey dobriyan | 3 | 1.96% | 1 | 11.11% |
| christoph lameter | christoph lameter | 2 | 1.31% | 1 | 11.11% |
| Total | 153 | 100.00% | 9 | 100.00% |
void pgtable_cache_init(void)
{
pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor);
pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor);
/*
* In all current configs, when the PUD index exists it's the
* same size as either the pgd or pmd index except with THP enabled
* on book3s 64
*/
if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor);
if (!PGT_CACHE(PGD_INDEX_SIZE) || !PGT_CACHE(PMD_CACHE_INDEX))
panic("Couldn't allocate pgtable caches");
if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
panic("Couldn't allocate pud pgtable caches");
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| david gibson | david gibson | 42 | 57.53% | 1 | 20.00% |
| aneesh kumar | aneesh kumar | 26 | 35.62% | 2 | 40.00% |
| paul mackerras | paul mackerras | 4 | 5.48% | 1 | 20.00% |
| alexey dobriyan | alexey dobriyan | 1 | 1.37% | 1 | 20.00% |
| Total | 73 | 100.00% | 5 | 100.00% |
#ifdef CONFIG_SPARSEMEM_VMEMMAP
/*
* Given an address within the vmemmap, determine the pfn of the page that
* represents the start of the section it is within. Note that we have to
* do this by hand as the proffered address may not be correctly aligned.
* Subtraction of non-aligned pointers produces undefined results.
*/
static unsigned long __meminit vmemmap_section_start(unsigned long page)
{
unsigned long offset = page - ((unsigned long)(vmemmap));
/* Return the pfn of the start of the section. */
return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| andy whitcroft | andy whitcroft | 41 | 97.62% | 1 | 50.00% |
| michael ellerman | michael ellerman | 1 | 2.38% | 1 | 50.00% |
| Total | 42 | 100.00% | 2 | 100.00% |
/*
* Check if this vmemmap page is already initialised. If any section
* which overlaps this vmemmap page is initialised then this page is
* initialised already.
*/
static int __meminit vmemmap_populated(unsigned long start, int page_size)
{
unsigned long end = start + page_size;
start = (unsigned long)(pfn_to_page(vmemmap_section_start(start)));
for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
if (pfn_valid(page_to_pfn((struct page *)start)))
return 1;
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| andy whitcroft | andy whitcroft | 55 | 70.51% | 1 | 33.33% |
| li zhong | li zhong | 22 | 28.21% | 1 | 33.33% |
| michael ellerman | michael ellerman | 1 | 1.28% | 1 | 33.33% |
| Total | 78 | 100.00% | 3 | 100.00% |
struct vmemmap_backing *vmemmap_list;
static struct vmemmap_backing *next;
static int num_left;
static int num_freed;
static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
{
struct vmemmap_backing *vmem_back;
/* get from freed entries first */
if (num_freed) {
num_freed--;
vmem_back = next;
next = next->list;
return vmem_back;
}
/* allocate a page when required and hand out chunks */
if (!num_left) {
next = vmemmap_alloc_block(PAGE_SIZE, node);
if (unlikely(!next)) {
WARN_ON(1);
return NULL;
}
num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
}
num_left--;
return next++;
}
static
__meminit void vmemmap_list_populate(unsigned long phys,
unsigned long start,
int node)
{
struct vmemmap_backing *vmem_back;
vmem_back = vmemmap_list_alloc(node);
if (unlikely(!vmem_back)) {
WARN_ON(1);
return;
}
vmem_back->phys = phys;
vmem_back->virt_addr = start;
vmem_back->list = vmemmap_list;
vmemmap_list = vmem_back;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| mark nelson | mark nelson | 68 | 100.00% | 1 | 100.00% |
| Total | 68 | 100.00% | 1 | 100.00% |
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
{
unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
/* Align to the page size of the linear mapping. */
start = _ALIGN_DOWN(start, page_size);
pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
for (; start < end; start += page_size) {
void *p;
int rc;
if (vmemmap_populated(start, page_size))
continue;
p = vmemmap_alloc_block(page_size, node);
if (!p)
return -ENOMEM;
vmemmap_list_populate(__pa(p), start, node);
pr_debug(" * %016lx..%016lx allocated at %p\n",
start, start + page_size, p);
rc = vmemmap_create_mapping(start, page_size, __pa(p));
if (rc < 0) {
pr_warning(
"vmemmap_populate: Unable to create vmemmap mapping: %d\n",
rc);
return -EFAULT;
}
}
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| li zhong | li zhong | 136 | 85.00% | 1 | 50.00% |
| david gibson | david gibson | 24 | 15.00% | 1 | 50.00% |
| Total | 160 | 100.00% | 2 | 100.00% |
#ifdef CONFIG_MEMORY_HOTPLUG
static unsigned long vmemmap_list_free(unsigned long start)
{
struct vmemmap_backing *vmem_back, *vmem_back_prev;
vmem_back_prev = vmem_back = vmemmap_list;
/* look for it with prev pointer recorded */
for (; vmem_back; vmem_back = vmem_back->list) {
if (vmem_back->virt_addr == start)
break;
vmem_back_prev = vmem_back;
}
if (unlikely(!vmem_back)) {
WARN_ON(1);
return 0;
}
/* remove it from vmemmap_list */
if (vmem_back == vmemmap_list) /* remove head */
vmemmap_list = vmem_back->list;
else
vmem_back_prev->list = vmem_back->list;
/* next point to this freed entry */
vmem_back->list = next;
next = vmem_back;
num_freed++;
return vmem_back->phys;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| li zhong | li zhong | 112 | 100.00% | 1 | 100.00% |
| Total | 112 | 100.00% | 1 | 100.00% |
void __ref vmemmap_free(unsigned long start, unsigned long end)
{
unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
start = _ALIGN_DOWN(start, page_size);
pr_debug("vmemmap_free %lx...%lx\n", start, end);
for (; start < end; start += page_size) {
unsigned long addr;
/*
* the section has already be marked as invalid, so
* vmemmap_populated() true means some other sections still
* in this page, so skip it.
*/
if (vmemmap_populated(start, page_size))
continue;
addr = vmemmap_list_free(start);
if (addr) {
struct page *page = pfn_to_page(addr >> PAGE_SHIFT);
if (PageReserved(page)) {
/* allocated from bootmem */
if (page_size < PAGE_SIZE) {
/*
* this shouldn't happen, but if it is
* the case, leave the memory there
*/
WARN_ON_ONCE(1);
} else {
unsigned int nr_pages =
1 << get_order(page_size);
while (nr_pages--)
free_reserved_page(page++);
}
} else
free_pages((unsigned long)(__va(addr)),
get_order(page_size));
vmemmap_remove_mapping(start, page_size);
}
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| li zhong | li zhong | 86 | 49.43% | 1 | 16.67% |
| andy whitcroft | andy whitcroft | 69 | 39.66% | 1 | 16.67% |
| benjamin herrenschmidt | benjamin herrenschmidt | 10 | 5.75% | 2 | 33.33% |
| mark nelson | mark nelson | 5 | 2.87% | 1 | 16.67% |
| johannes weiner | johannes weiner | 4 | 2.30% | 1 | 16.67% |
| Total | 174 | 100.00% | 6 | 100.00% |
#endif
void register_page_bootmem_memmap(unsigned long section_nr,
struct page *start_page, unsigned long size)
{
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| nathan fontenot | nathan fontenot | 17 | 100.00% | 1 | 100.00% |
| Total | 17 | 100.00% | 1 | 100.00% |
/*
* We do not have access to the sparsemem vmemmap, so we fallback to
* walking the list of sparsemem blocks which we already maintain for
* the sake of crashdump. In the long run, we might want to maintain
* a tree if performance of that linear walk becomes a problem.
*
* realmode_pfn_to_page functions can fail due to:
* 1) As real sparsemem blocks do not lay in RAM continously (they
* are in virtual address space which is not available in the real mode),
* the requested page struct can be split between blocks so get_page/put_page
* may fail.
* 2) When huge pages are used, the get_page/put_page API will fail
* in real mode as the linked addresses in the page struct are virtual
* too.
*/
struct page *realmode_pfn_to_page(unsigned long pfn)
{
struct vmemmap_backing *vmem_back;
struct page *page;
unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
unsigned long pg_va = (unsigned long) pfn_to_page(pfn);
for (vmem_back = vmemmap_list; vmem_back; vmem_back = vmem_back->list) {
if (pg_va < vmem_back->virt_addr)
continue;
/* After vmemmap_list entry free is possible, need check all */
if ((pg_va + sizeof(struct page)) <=
(vmem_back->virt_addr + page_size)) {
page = (struct page *) (vmem_back->phys + pg_va -
vmem_back->virt_addr);
return page;
}
}
/* Probably that page struct is split between real pages */
return NULL;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| alexey kardashevskiy | alexey kardashevskiy | 116 | 95.87% | 1 | 50.00% |
| li zhong | li zhong | 5 | 4.13% | 1 | 50.00% |
| Total | 121 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
#elif defined(CONFIG_FLATMEM)
struct page *realmode_pfn_to_page(unsigned long pfn)
{
struct page *page = pfn_to_page(pfn);
return page;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| alexey kardashevskiy | alexey kardashevskiy | 24 | 100.00% | 1 | 100.00% |
| Total | 24 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
#endif /* CONFIG_SPARSEMEM_VMEMMAP/CONFIG_FLATMEM */
#ifdef CONFIG_PPC_STD_MMU_64
static bool disable_radix;
static int __init parse_disable_radix(char *p)
{
disable_radix = true;
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| michael ellerman | michael ellerman | 18 | 100.00% | 1 | 100.00% |
| Total | 18 | 100.00% | 1 | 100.00% |
early_param("disable_radix", parse_disable_radix);
void __init mmu_early_init_devtree(void)
{
/* Disable radix mode based on kernel command line. */
if (disable_radix)
cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
if (early_radix_enabled())
radix__early_init_devtree();
else
hash__early_init_devtree();
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| michael ellerman | michael ellerman | 30 | 93.75% | 4 | 66.67% |
| aneesh kumar | aneesh kumar | 2 | 6.25% | 2 | 33.33% |
| Total | 32 | 100.00% | 6 | 100.00% |
#endif /* CONFIG_PPC_STD_MMU_64 */
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| li zhong | li zhong | 407 | 27.63% | 3 | 6.98% |
| david gibson | david gibson | 199 | 13.51% | 6 | 13.95% |
| andy whitcroft | andy whitcroft | 172 | 11.68% | 1 | 2.33% |
| paul mackerras | paul mackerras | 162 | 11.00% | 1 | 2.33% |
| alexey kardashevskiy | alexey kardashevskiy | 158 | 10.73% | 1 | 2.33% |
| mark nelson | mark nelson | 140 | 9.50% | 1 | 2.33% |
| michael ellerman | michael ellerman | 67 | 4.55% | 5 | 11.63% |
| aneesh kumar | aneesh kumar | 63 | 4.28% | 6 | 13.95% |
| benjamin herrenschmidt | benjamin herrenschmidt | 27 | 1.83% | 5 | 11.63% |
| alexey dobriyan | alexey dobriyan | 17 | 1.15% | 1 | 2.33% |
| nathan fontenot | nathan fontenot | 17 | 1.15% | 1 | 2.33% |
| sonny rao | sonny rao | 10 | 0.68% | 1 | 2.33% |
| kumar gala | kumar gala | 9 | 0.61% | 2 | 4.65% |
| christoph lameter | christoph lameter | 5 | 0.34% | 2 | 4.65% |
| yanjiang jin | yanjiang jin | 5 | 0.34% | 1 | 2.33% |
| johannes weiner | johannes weiner | 4 | 0.27% | 1 | 2.33% |
| tejun heo | tejun heo | 3 | 0.20% | 1 | 2.33% |
| david s. miller | david s. miller | 3 | 0.20% | 1 | 2.33% |
| randy dunlap | randy dunlap | 3 | 0.20% | 1 | 2.33% |
| yinghai lu | yinghai lu | 1 | 0.07% | 1 | 2.33% |
| hugh dickins | hugh dickins | 1 | 0.07% | 1 | 2.33% |
| Total | 1473 | 100.00% | 43 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.