cregit-Linux how code gets into the kernel

Release 4.10 arch/powerpc/mm/init_64.c

Directory: arch/powerpc/mm
/*
 *  PowerPC version
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 *
 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
 *    Copyright (C) 1996 Paul Mackerras
 *
 *  Derived from "arch/i386/mm/init.c"
 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *
 *  Dave Engebretsen <engebret@us.ibm.com>
 *      Rework for PPC64 port.
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version
 *  2 of the License, or (at your option) any later version.
 *
 */


#undef DEBUG

#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/stddef.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/highmem.h>
#include <linux/idr.h>
#include <linux/nodemask.h>
#include <linux/module.h>
#include <linux/poison.h>
#include <linux/memblock.h>
#include <linux/hugetlb.h>
#include <linux/slab.h>

#include <asm/pgalloc.h>
#include <asm/page.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <linux/uaccess.h>
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/tlb.h>
#include <asm/eeh.h>
#include <asm/processor.h>
#include <asm/mmzone.h>
#include <asm/cputable.h>
#include <asm/sections.h>
#include <asm/iommu.h>
#include <asm/vdso.h>

#include "mmu_decl.h"

#ifdef CONFIG_PPC_STD_MMU_64
#if H_PGTABLE_RANGE > USER_VSID_RANGE
#warning Limited user VSID range means pagetable space is wasted
#endif

#if (TASK_SIZE_USER64 < H_PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
#warning TASK_SIZE is smaller than it needs to be.
#endif
#endif /* CONFIG_PPC_STD_MMU_64 */


phys_addr_t memstart_addr = ~0;

EXPORT_SYMBOL_GPL(memstart_addr);

phys_addr_t kernstart_addr;

EXPORT_SYMBOL_GPL(kernstart_addr);

#ifdef CONFIG_SPARSEMEM_VMEMMAP
/*
 * Given an address within the vmemmap, determine the pfn of the page that
 * represents the start of the section it is within.  Note that we have to
 * do this by hand as the proffered address may not be correctly aligned.
 * Subtraction of non-aligned pointers produces undefined results.
 */

static unsigned long __meminit vmemmap_section_start(unsigned long page) { unsigned long offset = page - ((unsigned long)(vmemmap)); /* Return the pfn of the start of the section. */ return (offset / sizeof(struct page)) & PAGE_SECTION_MASK; }

Contributors

PersonTokensPropCommitsCommitProp
andy whitcroftandy whitcroft4197.62%150.00%
michael ellermanmichael ellerman12.38%150.00%
Total42100.00%2100.00%

/* * Check if this vmemmap page is already initialised. If any section * which overlaps this vmemmap page is initialised then this page is * initialised already. */
static int __meminit vmemmap_populated(unsigned long start, int page_size) { unsigned long end = start + page_size; start = (unsigned long)(pfn_to_page(vmemmap_section_start(start))); for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page))) if (pfn_valid(page_to_pfn((struct page *)start))) return 1; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
andy whitcroftandy whitcroft5570.51%133.33%
li zhongli zhong2228.21%133.33%
michael ellermanmichael ellerman11.28%133.33%
Total78100.00%3100.00%

struct vmemmap_backing *vmemmap_list; static struct vmemmap_backing *next; static int num_left; static int num_freed; static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node) { struct vmemmap_backing *vmem_back; /* get from freed entries first */ if (num_freed) { num_freed--; vmem_back = next; next = next->list; return vmem_back; } /* allocate a page when required and hand out chunks */ if (!num_left) { next = vmemmap_alloc_block(PAGE_SIZE, node); if (unlikely(!next)) { WARN_ON(1); return NULL; } num_left = PAGE_SIZE / sizeof(struct vmemmap_backing); } num_left--; return next++;
} static __meminit void vmemmap_list_populate(unsigned long phys, unsigned long start, int node) { struct vmemmap_backing *vmem_back; vmem_back = vmemmap_list_alloc(node); if (unlikely(!vmem_back)) { WARN_ON(1); return; } vmem_back->phys = phys; vmem_back->virt_addr = start; vmem_back->list = vmemmap_list; vmemmap_list = vmem_back; }

Contributors

PersonTokensPropCommitsCommitProp
mark nelsonmark nelson68100.00%1100.00%
Total68100.00%1100.00%


int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) { unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; /* Align to the page size of the linear mapping. */ start = _ALIGN_DOWN(start, page_size); pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node); for (; start < end; start += page_size) { void *p; int rc; if (vmemmap_populated(start, page_size)) continue; p = vmemmap_alloc_block(page_size, node); if (!p) return -ENOMEM; vmemmap_list_populate(__pa(p), start, node); pr_debug(" * %016lx..%016lx allocated at %p\n", start, start + page_size, p); rc = vmemmap_create_mapping(start, page_size, __pa(p)); if (rc < 0) { pr_warning( "vmemmap_populate: Unable to create vmemmap mapping: %d\n", rc); return -EFAULT; } } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
li zhongli zhong13685.00%150.00%
david gibsondavid gibson2415.00%150.00%
Total160100.00%2100.00%

#ifdef CONFIG_MEMORY_HOTPLUG
static unsigned long vmemmap_list_free(unsigned long start) { struct vmemmap_backing *vmem_back, *vmem_back_prev; vmem_back_prev = vmem_back = vmemmap_list; /* look for it with prev pointer recorded */ for (; vmem_back; vmem_back = vmem_back->list) { if (vmem_back->virt_addr == start) break; vmem_back_prev = vmem_back; } if (unlikely(!vmem_back)) { WARN_ON(1); return 0; } /* remove it from vmemmap_list */ if (vmem_back == vmemmap_list) /* remove head */ vmemmap_list = vmem_back->list; else vmem_back_prev->list = vmem_back->list; /* next point to this freed entry */ vmem_back->list = next; next = vmem_back; num_freed++; return vmem_back->phys; }

Contributors

PersonTokensPropCommitsCommitProp
li zhongli zhong112100.00%1100.00%
Total112100.00%1100.00%


void __ref vmemmap_free(unsigned long start, unsigned long end) { unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; start = _ALIGN_DOWN(start, page_size); pr_debug("vmemmap_free %lx...%lx\n", start, end); for (; start < end; start += page_size) { unsigned long addr; /* * the section has already be marked as invalid, so * vmemmap_populated() true means some other sections still * in this page, so skip it. */ if (vmemmap_populated(start, page_size)) continue; addr = vmemmap_list_free(start); if (addr) { struct page *page = pfn_to_page(addr >> PAGE_SHIFT); if (PageReserved(page)) { /* allocated from bootmem */ if (page_size < PAGE_SIZE) { /* * this shouldn't happen, but if it is * the case, leave the memory there */ WARN_ON_ONCE(1); } else { unsigned int nr_pages = 1 << get_order(page_size); while (nr_pages--) free_reserved_page(page++); } } else free_pages((unsigned long)(__va(addr)), get_order(page_size)); vmemmap_remove_mapping(start, page_size); } } }

Contributors

PersonTokensPropCommitsCommitProp
li zhongli zhong8649.43%116.67%
andy whitcroftandy whitcroft6939.66%116.67%
benjamin herrenschmidtbenjamin herrenschmidt105.75%233.33%
mark nelsonmark nelson52.87%116.67%
johannes weinerjohannes weiner42.30%116.67%
Total174100.00%6100.00%

#endif
void register_page_bootmem_memmap(unsigned long section_nr, struct page *start_page, unsigned long size) { }

Contributors

PersonTokensPropCommitsCommitProp
nathan fontenotnathan fontenot17100.00%1100.00%
Total17100.00%1100.00%

/* * We do not have access to the sparsemem vmemmap, so we fallback to * walking the list of sparsemem blocks which we already maintain for * the sake of crashdump. In the long run, we might want to maintain * a tree if performance of that linear walk becomes a problem. * * realmode_pfn_to_page functions can fail due to: * 1) As real sparsemem blocks do not lay in RAM continously (they * are in virtual address space which is not available in the real mode), * the requested page struct can be split between blocks so get_page/put_page * may fail. * 2) When huge pages are used, the get_page/put_page API will fail * in real mode as the linked addresses in the page struct are virtual * too. */
struct page *realmode_pfn_to_page(unsigned long pfn) { struct vmemmap_backing *vmem_back; struct page *page; unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; unsigned long pg_va = (unsigned long) pfn_to_page(pfn); for (vmem_back = vmemmap_list; vmem_back; vmem_back = vmem_back->list) { if (pg_va < vmem_back->virt_addr) continue; /* After vmemmap_list entry free is possible, need check all */ if ((pg_va + sizeof(struct page)) <= (vmem_back->virt_addr + page_size)) { page = (struct page *) (vmem_back->phys + pg_va - vmem_back->virt_addr); return page; } } /* Probably that page struct is split between real pages */ return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
alexey kardashevskiyalexey kardashevskiy11695.87%150.00%
li zhongli zhong54.13%150.00%
Total121100.00%2100.00%

EXPORT_SYMBOL_GPL(realmode_pfn_to_page); #elif defined(CONFIG_FLATMEM)
struct page *realmode_pfn_to_page(unsigned long pfn) { struct page *page = pfn_to_page(pfn); return page; }

Contributors

PersonTokensPropCommitsCommitProp
alexey kardashevskiyalexey kardashevskiy24100.00%1100.00%
Total24100.00%1100.00%

EXPORT_SYMBOL_GPL(realmode_pfn_to_page); #endif /* CONFIG_SPARSEMEM_VMEMMAP/CONFIG_FLATMEM */ #ifdef CONFIG_PPC_STD_MMU_64 static bool disable_radix;
static int __init parse_disable_radix(char *p) { disable_radix = true; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
michael ellermanmichael ellerman18100.00%1100.00%
Total18100.00%1100.00%

early_param("disable_radix", parse_disable_radix);
void __init mmu_early_init_devtree(void) { /* Disable radix mode based on kernel command line. */ /* We don't yet have the machinery to do radix as a guest. */ if (disable_radix || !(mfmsr() & MSR_HV)) cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; if (early_radix_enabled()) radix__early_init_devtree(); else hash__early_init_devtree(); }

Contributors

PersonTokensPropCommitsCommitProp
michael ellermanmichael ellerman3073.17%457.14%
paul mackerraspaul mackerras921.95%114.29%
aneesh kumaraneesh kumar24.88%228.57%
Total41100.00%7100.00%

#endif /* CONFIG_PPC_STD_MMU_64 */

Overall Contributors

PersonTokensPropCommitsCommitProp
li zhongli zhong40734.20%39.09%
andy whitcroftandy whitcroft17214.45%13.03%
alexey kardashevskiyalexey kardashevskiy15813.28%13.03%
paul mackerraspaul mackerras14312.02%26.06%
mark nelsonmark nelson14011.76%13.03%
michael ellermanmichael ellerman675.63%515.15%
david gibsondavid gibson292.44%412.12%
benjamin herrenschmidtbenjamin herrenschmidt191.60%39.09%
nathan fontenotnathan fontenot171.43%13.03%
sonny raosonny rao100.84%13.03%
kumar galakumar gala90.76%26.06%
johannes weinerjohannes weiner40.34%13.03%
aneesh kumaraneesh kumar40.34%39.09%
david s. millerdavid s. miller30.25%13.03%
tejun heotejun heo30.25%13.03%
randy dunlaprandy dunlap30.25%13.03%
yinghai luyinghai lu10.08%13.03%
linus torvaldslinus torvalds10.08%13.03%
Total1190100.00%33100.00%
Directory: arch/powerpc/mm
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.