cregit-Linux how code gets into the kernel

Release 4.14 arch/powerpc/mm/pgtable_64.c

Directory: arch/powerpc/mm
/*
 *  This file contains ioremap and related functions for 64-bit machines.
 *
 *  Derived from arch/ppc64/mm/init.c
 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 *
 *  Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
 *    Copyright (C) 1996 Paul Mackerras
 *
 *  Derived from "arch/i386/mm/init.c"
 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *
 *  Dave Engebretsen <engebret@us.ibm.com>
 *      Rework for PPC64 port.
 *
 *  This program is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU General Public License
 *  as published by the Free Software Foundation; either version
 *  2 of the License, or (at your option) any later version.
 *
 */

#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/export.h>
#include <linux/types.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/stddef.h>
#include <linux/vmalloc.h>
#include <linux/memblock.h>
#include <linux/slab.h>
#include <linux/hugetlb.h>

#include <asm/pgalloc.h>
#include <asm/page.h>
#include <asm/prom.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/tlb.h>
#include <asm/trace.h>
#include <asm/processor.h>
#include <asm/cputable.h>
#include <asm/sections.h>
#include <asm/firmware.h>
#include <asm/dma.h>
#include <asm/powernv.h>

#include "mmu_decl.h"

#ifdef CONFIG_PPC_STD_MMU_64
#if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT))
#error TASK_SIZE_USER64 exceeds user VSID range
#endif
#endif

#ifdef CONFIG_PPC_BOOK3S_64
/*
 * partition table and process table for ISA 3.0
 */

struct prtb_entry *process_tb;

struct patb_entry *partition_tb;
/*
 * page table size
 */

unsigned long __pte_index_size;

EXPORT_SYMBOL(__pte_index_size);

unsigned long __pmd_index_size;

EXPORT_SYMBOL(__pmd_index_size);

unsigned long __pud_index_size;

EXPORT_SYMBOL(__pud_index_size);

unsigned long __pgd_index_size;

EXPORT_SYMBOL(__pgd_index_size);

unsigned long __pmd_cache_index;

EXPORT_SYMBOL(__pmd_cache_index);

unsigned long __pte_table_size;

EXPORT_SYMBOL(__pte_table_size);

unsigned long __pmd_table_size;

EXPORT_SYMBOL(__pmd_table_size);

unsigned long __pud_table_size;

EXPORT_SYMBOL(__pud_table_size);

unsigned long __pgd_table_size;

EXPORT_SYMBOL(__pgd_table_size);

unsigned long __pmd_val_bits;

EXPORT_SYMBOL(__pmd_val_bits);

unsigned long __pud_val_bits;

EXPORT_SYMBOL(__pud_val_bits);

unsigned long __pgd_val_bits;

EXPORT_SYMBOL(__pgd_val_bits);

unsigned long __kernel_virt_start;

EXPORT_SYMBOL(__kernel_virt_start);

unsigned long __kernel_virt_size;

EXPORT_SYMBOL(__kernel_virt_size);

unsigned long __vmalloc_start;

EXPORT_SYMBOL(__vmalloc_start);

unsigned long __vmalloc_end;

EXPORT_SYMBOL(__vmalloc_end);

unsigned long __kernel_io_start;

EXPORT_SYMBOL(__kernel_io_start);

struct page *vmemmap;

EXPORT_SYMBOL(vmemmap);

unsigned long __pte_frag_nr;

EXPORT_SYMBOL(__pte_frag_nr);

unsigned long __pte_frag_size_shift;

EXPORT_SYMBOL(__pte_frag_size_shift);

unsigned long ioremap_bot;
#else /* !CONFIG_PPC_BOOK3S_64 */

unsigned long ioremap_bot = IOREMAP_BASE;
#endif

/**
 * __ioremap_at - Low level function to establish the page tables
 *                for an IO mapping
 */

void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size, unsigned long flags) { unsigned long i; /* Make sure we have the base flags */ if ((flags & _PAGE_PRESENT) == 0) flags |= pgprot_val(PAGE_KERNEL); /* We don't support the 4K PFN hack with ioremap */ if (flags & H_PAGE_4K_PFN) return NULL; WARN_ON(pa & ~PAGE_MASK); WARN_ON(((unsigned long)ea) & ~PAGE_MASK); WARN_ON(size & ~PAGE_MASK); for (i = 0; i < size; i += PAGE_SIZE) if (map_kernel_page((unsigned long)ea+i, pa+i, flags)) return NULL; return (void __iomem *)ea; }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mackerras7659.38%116.67%
Benjamin Herrenschmidt5139.84%466.67%
Aneesh Kumar K.V10.78%116.67%
Total128100.00%6100.00%

/** * __iounmap_from - Low level function to tear down the page tables * for an IO mapping. This is used for mappings that * are manipulated manually, like partial unmapping of * PCI IOs or ISA space. */
void __iounmap_at(void *ea, unsigned long size) { WARN_ON(((unsigned long)ea) & ~PAGE_MASK); WARN_ON(size & ~PAGE_MASK); unmap_kernel_range((unsigned long)ea, size); }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Herrenschmidt4086.96%150.00%
Paul Mackerras613.04%150.00%
Total46100.00%2100.00%


void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags, void *caller) { phys_addr_t paligned; void __iomem *ret; /* * Choose an address to map it to. * Once the imalloc system is running, we use it. * Before that, we map using addresses going * up from ioremap_bot. imalloc will use * the addresses from ioremap_bot through * IMALLOC_END * */ paligned = addr & PAGE_MASK; size = PAGE_ALIGN(addr + size) - paligned; if ((size == 0) || (paligned == 0)) return NULL; if (slab_is_available()) { struct vm_struct *area; area = __get_vm_area_caller(size, VM_IOREMAP, ioremap_bot, IOREMAP_END, caller); if (area == NULL) return NULL; area->phys_addr = paligned; ret = __ioremap_at(paligned, area->addr, size, flags); if (!ret) vunmap(area->addr); } else { ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags); if (ret) ioremap_bot += size; } if (ret) ret += addr & ~PAGE_MASK; return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mackerras11867.05%114.29%
Benjamin Herrenschmidt4324.43%342.86%
Michael Ellerman84.55%228.57%
Linas Vepstas73.98%114.29%
Total176100.00%7100.00%


void __iomem * __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags) { return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Herrenschmidt33100.00%1100.00%
Total33100.00%1100.00%


void __iomem * ioremap(phys_addr_t addr, unsigned long size) { unsigned long flags = pgprot_val(pgprot_noncached(__pgprot(0))); void *caller = __builtin_return_address(0); if (ppc_md.ioremap) return ppc_md.ioremap(addr, size, flags, caller); return __ioremap_caller(addr, size, flags, caller); }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Herrenschmidt6085.71%375.00%
Aneesh Kumar K.V1014.29%125.00%
Total70100.00%4100.00%


void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size) { unsigned long flags = pgprot_val(pgprot_noncached_wc(__pgprot(0))); void *caller = __builtin_return_address(0); if (ppc_md.ioremap) return ppc_md.ioremap(addr, size, flags, caller); return __ioremap_caller(addr, size, flags, caller); }

Contributors

PersonTokensPropCommitsCommitProp
Anton Blanchard6085.71%150.00%
Aneesh Kumar K.V1014.29%150.00%
Total70100.00%2100.00%


void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags) { void *caller = __builtin_return_address(0); /* writeable implies dirty for kernel addresses */ if (flags & _PAGE_WRITE) flags |= _PAGE_DIRTY; /* we don't want to let _PAGE_EXEC leak out */ flags &= ~_PAGE_EXEC; /* * Force kernel mapping. */ #if defined(CONFIG_PPC_BOOK3S_64) flags |= _PAGE_PRIVILEGED; #else flags &= ~_PAGE_USER; #endif #ifdef _PAGE_BAP_SR /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format * which means that we just cleared supervisor access... oops ;-) This * restores it */ flags |= _PAGE_BAP_SR; #endif if (ppc_md.ioremap) return ppc_md.ioremap(addr, size, flags, caller); return __ioremap_caller(addr, size, flags, caller); }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Herrenschmidt8378.30%562.50%
Aneesh Kumar K.V2220.75%225.00%
Anton Blanchard10.94%112.50%
Total106100.00%8100.00%

/* * Unmap an IO region and remove it from imalloc'd list. * Access to IO memory should be serialized by driver. */
void __iounmap(volatile void __iomem *token) { void *addr; if (!slab_is_available()) return; addr = (void *) ((unsigned long __force) PCI_FIX_ADDR(token) & PAGE_MASK); if ((unsigned long)addr < ioremap_bot) { printk(KERN_WARNING "Attempt to iounmap early bolted mapping" " at 0x%p\n", addr); return; } vunmap(addr); }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mackerras3449.28%133.33%
Benjamin Herrenschmidt3347.83%133.33%
Michael Ellerman22.90%133.33%
Total69100.00%3100.00%


void iounmap(volatile void __iomem *token) { if (ppc_md.iounmap) ppc_md.iounmap(token); else __iounmap(token); }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Herrenschmidt30100.00%2100.00%
Total30100.00%2100.00%

EXPORT_SYMBOL(ioremap); EXPORT_SYMBOL(ioremap_wc); EXPORT_SYMBOL(ioremap_prot); EXPORT_SYMBOL(__ioremap); EXPORT_SYMBOL(__ioremap_at); EXPORT_SYMBOL(iounmap); EXPORT_SYMBOL(__iounmap); EXPORT_SYMBOL(__iounmap_at); #ifndef __PAGETABLE_PUD_FOLDED /* 4 level page table */
struct page *pgd_page(pgd_t pgd) { if (pgd_huge(pgd)) return pte_page(pgd_pte(pgd)); return virt_to_page(pgd_page_vaddr(pgd)); }

Contributors

PersonTokensPropCommitsCommitProp
Aneesh Kumar K.V35100.00%1100.00%
Total35100.00%1100.00%

#endif
struct page *pud_page(pud_t pud) { if (pud_huge(pud)) return pte_page(pud_pte(pud)); return virt_to_page(pud_page_vaddr(pud)); }

Contributors

PersonTokensPropCommitsCommitProp
Aneesh Kumar K.V35100.00%1100.00%
Total35100.00%1100.00%

/* * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address. */
struct page *pmd_page(pmd_t pmd) { if (pmd_trans_huge(pmd) || pmd_huge(pmd) || pmd_devmap(pmd)) return pte_page(pmd_pte(pmd)); return virt_to_page(pmd_page_vaddr(pmd)); }

Contributors

PersonTokensPropCommitsCommitProp
Aneesh Kumar K.V4088.89%375.00%
Oliver O'Halloran511.11%125.00%
Total45100.00%4100.00%

#ifdef CONFIG_PPC_64K_PAGES
static pte_t *get_from_cache(struct mm_struct *mm) { void *pte_frag, *ret; spin_lock(&mm->page_table_lock); ret = mm->context.pte_frag; if (ret) { pte_frag = ret + PTE_FRAG_SIZE; /* * If we have taken up all the fragments mark PTE page NULL */ if (((unsigned long)pte_frag & ~PAGE_MASK) == 0) pte_frag = NULL; mm->context.pte_frag = pte_frag; } spin_unlock(&mm->page_table_lock); return (pte_t *)ret; }

Contributors

PersonTokensPropCommitsCommitProp
Aneesh Kumar K.V90100.00%1100.00%
Total90100.00%1100.00%


static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel) { void *ret = NULL; struct page *page; if (!kernel) { page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT); if (!page) return NULL; if (!pgtable_page_ctor(page)) { __free_page(page); return NULL; } } else { page = alloc_page(PGALLOC_GFP); if (!page) return NULL; } ret = page_address(page); spin_lock(&mm->page_table_lock); /* * If we find pgtable_page set, we return * the allocated page with single fragement * count. */ if (likely(!mm->context.pte_frag)) { set_page_count(page, PTE_FRAG_NR); mm->context.pte_frag = ret + PTE_FRAG_SIZE; } spin_unlock(&mm->page_table_lock); return (pte_t *)ret; }

Contributors

PersonTokensPropCommitsCommitProp
Aneesh Kumar K.V9866.22%125.00%
Balbir Singh3120.95%125.00%
Kirill A. Shutemov1812.16%125.00%
JoonSoo Kim10.68%125.00%
Total148100.00%4100.00%


pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel) { pte_t *pte; pte = get_from_cache(mm); if (pte) return pte; return __alloc_for_cache(mm, kernel); }

Contributors

PersonTokensPropCommitsCommitProp
Aneesh Kumar K.V44100.00%2100.00%
Total44100.00%2100.00%

#endif /* CONFIG_PPC_64K_PAGES */
void pte_fragment_free(unsigned long *table, int kernel) { struct page *page = virt_to_page(table); if (put_page_testzero(page)) { if (!kernel) pgtable_page_dtor(page); free_hot_cold_page(page, 0); } }

Contributors

PersonTokensPropCommitsCommitProp
Aneesh Kumar K.V49100.00%2100.00%
Total49100.00%2100.00%

#ifdef CONFIG_SMP
void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift) { unsigned long pgf = (unsigned long)table; BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); pgf |= shift; tlb_remove_table(tlb, (void *)pgf); }

Contributors

PersonTokensPropCommitsCommitProp
Aneesh Kumar K.V49100.00%1100.00%
Total49100.00%1100.00%


void __tlb_remove_table(void *_table) { void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE); unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE; if (!shift) /* PTE page needs special handling */ pte_fragment_free(table, 0); else { BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); kmem_cache_free(PGT_CACHE(shift), table); } }

Contributors

PersonTokensPropCommitsCommitProp
Aneesh Kumar K.V72100.00%2100.00%
Total72100.00%2100.00%

#else
void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift) { if (!shift) { /* PTE page needs special handling */ pte_fragment_free(table, 0); } else { BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); kmem_cache_free(PGT_CACHE(shift), table); } }

Contributors

PersonTokensPropCommitsCommitProp
Aneesh Kumar K.V52100.00%2100.00%
Total52100.00%2100.00%

#endif #ifdef CONFIG_PPC_BOOK3S_64
void __init mmu_partition_table_init(void) { unsigned long patb_size = 1UL << PATB_SIZE_SHIFT; unsigned long ptcr; BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large."); partition_tb = __va(memblock_alloc_base(patb_size, patb_size, MEMBLOCK_ALLOC_ANYWHERE)); /* Initialize the Partition Table with no entries */ memset((void *)partition_tb, 0, patb_size); /* * update partition table control register, * 64 K size. */ ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12); mtspr(SPRN_PTCR, ptcr); powernv_set_nmmu_ptcr(ptcr); }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mackerras6880.00%150.00%
Alistair Popple1720.00%150.00%
Total85100.00%2100.00%


void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0, unsigned long dw1) { unsigned long old = be64_to_cpu(partition_tb[lpid].patb0); partition_tb[lpid].patb0 = cpu_to_be64(dw0); partition_tb[lpid].patb1 = cpu_to_be64(dw1); /* * Global flush of TLBs and partition table caches for this lpid. * The type of flush (hash or radix) depends on what the previous * use of this partition ID was, not the new use. */ asm volatile("ptesync" : : : "memory"); if (old & PATB_HR) { asm volatile(PPC_TLBIE_5(%0,%1,2,0,1) : : "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 1); } else { asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : : "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid)); trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0); } asm volatile("eieio; tlbsync; ptesync" : : : "memory"); }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mackerras7566.37%266.67%
Balbir Singh3833.63%133.33%
Total113100.00%3100.00%

EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry); #endif /* CONFIG_PPC_BOOK3S_64 */ #ifdef CONFIG_STRICT_KERNEL_RWX
void mark_rodata_ro(void) { if (!mmu_has_feature(MMU_FTR_KERNEL_RO)) { pr_warn("Warning: Unable to mark rodata read only on this CPU.\n"); return; } if (radix_enabled()) radix__mark_rodata_ro(); else hash__mark_rodata_ro(); }

Contributors

PersonTokensPropCommitsCommitProp
Balbir Singh35100.00%2100.00%
Total35100.00%2100.00%


void mark_initmem_nx(void) { if (radix_enabled()) radix__mark_initmem_nx(); else hash__mark_initmem_nx(); }

Contributors

PersonTokensPropCommitsCommitProp
Michael Ellerman19100.00%1100.00%
Total19100.00%1100.00%

#endif

Overall Contributors

PersonTokensPropCommitsCommitProp
Aneesh Kumar K.V84942.20%1734.00%
Paul Mackerras48223.96%36.00%
Benjamin Herrenschmidt38819.28%714.00%
Balbir Singh1125.57%48.00%
Anton Blanchard703.48%36.00%
Michael Ellerman381.89%48.00%
Alistair Popple200.99%12.00%
Kirill A. Shutemov180.89%12.00%
Olof Johansson100.50%12.00%
Linas Vepstas70.35%12.00%
Oliver O'Halloran50.25%12.00%
Tejun Heo30.15%12.00%
Stephen Rothwell30.15%12.00%
Paul Gortmaker30.15%12.00%
David Gibson20.10%24.00%
JoonSoo Kim10.05%12.00%
Yinghai Lu10.05%12.00%
Total2012100.00%50100.00%
Directory: arch/powerpc/mm
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.