cregit-Linux how code gets into the kernel

Release 4.14 arch/powerpc/mm/hash_utils_64.c

Directory: arch/powerpc/mm
/*
 * PowerPC64 port by Mike Corrigan and Dave Engebretsen
 *   {mikejc|engebret}@us.ibm.com
 *
 *    Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
 *
 * SMP scalability work:
 *    Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
 * 
 *    Module name: htab.c
 *
 *    Description:
 *      PowerPC Hashed Page Table functions
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */


#undef DEBUG

#undef DEBUG_LOW

#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/sched/mm.h>
#include <linux/proc_fs.h>
#include <linux/stat.h>
#include <linux/sysctl.h>
#include <linux/export.h>
#include <linux/ctype.h>
#include <linux/cache.h>
#include <linux/init.h>
#include <linux/signal.h>
#include <linux/memblock.h>
#include <linux/context_tracking.h>
#include <linux/libfdt.h>

#include <asm/debugfs.h>
#include <asm/processor.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/page.h>
#include <asm/types.h>
#include <linux/uaccess.h>
#include <asm/machdep.h>
#include <asm/prom.h>
#include <asm/tlbflush.h>
#include <asm/io.h>
#include <asm/eeh.h>
#include <asm/tlb.h>
#include <asm/cacheflush.h>
#include <asm/cputable.h>
#include <asm/sections.h>
#include <asm/copro.h>
#include <asm/udbg.h>
#include <asm/code-patching.h>
#include <asm/fadump.h>
#include <asm/firmware.h>
#include <asm/tm.h>
#include <asm/trace.h>
#include <asm/ps3.h>
#include <asm/pte-walk.h>

#ifdef DEBUG

#define DBG(fmt...) udbg_printf(fmt)
#else

#define DBG(fmt...)
#endif

#ifdef DEBUG_LOW

#define DBG_LOW(fmt...) udbg_printf(fmt)
#else

#define DBG_LOW(fmt...)
#endif


#define KB (1024)

#define MB (1024*KB)

#define GB (1024L*MB)

/*
 * Note:  pte   --> Linux PTE
 *        HPTE  --> PowerPC Hashed Page Table Entry
 *
 * Execution context:
 *   htab_initialize is called with the MMU off (of course), but
 *   the kernel has been copied down to zero so it can directly
 *   reference global data.  At this point it is very difficult
 *   to print debug info.
 *
 */


static unsigned long _SDR1;

struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];

EXPORT_SYMBOL_GPL(mmu_psize_defs);


u8 hpte_page_sizes[1 << LP_BITS];

EXPORT_SYMBOL_GPL(hpte_page_sizes);


struct hash_pte *htab_address;

unsigned long htab_size_bytes;

unsigned long htab_hash_mask;

EXPORT_SYMBOL_GPL(htab_hash_mask);

int mmu_linear_psize = MMU_PAGE_4K;

EXPORT_SYMBOL_GPL(mmu_linear_psize);

int mmu_virtual_psize = MMU_PAGE_4K;

int mmu_vmalloc_psize = MMU_PAGE_4K;
#ifdef CONFIG_SPARSEMEM_VMEMMAP

int mmu_vmemmap_psize = MMU_PAGE_4K;
#endif

int mmu_io_psize = MMU_PAGE_4K;

int mmu_kernel_ssize = MMU_SEGSIZE_256M;

EXPORT_SYMBOL_GPL(mmu_kernel_ssize);

int mmu_highuser_ssize = MMU_SEGSIZE_256M;

u16 mmu_slb_size = 64;

EXPORT_SYMBOL_GPL(mmu_slb_size);
#ifdef CONFIG_PPC_64K_PAGES

int mmu_ci_restrictions;
#endif
#ifdef CONFIG_DEBUG_PAGEALLOC

static u8 *linear_map_hash_slots;

static unsigned long linear_map_hash_count;
static DEFINE_SPINLOCK(linear_map_hash_lock);
#endif /* CONFIG_DEBUG_PAGEALLOC */

struct mmu_hash_ops mmu_hash_ops;

EXPORT_SYMBOL(mmu_hash_ops);

/* There are definitions of page sizes arrays to be used when none
 * is provided by the firmware.
 */

/* Pre-POWER4 CPUs (4k pages only)
 */

static struct mmu_psize_def mmu_psize_defaults_old[] = {
	[MMU_PAGE_4K] = {
		.shift	= 12,
		.sllp	= 0,
		.penc   = {[MMU_PAGE_4K] = 0, [1 ... MMU_PAGE_COUNT - 1] = -1},
		.avpnm	= 0,
		.tlbiel = 0,
        },
};

/* POWER4, GPUL, POWER5
 *
 * Support for 16Mb large pages
 */

static struct mmu_psize_def mmu_psize_defaults_gp[] = {
	[MMU_PAGE_4K] = {
		.shift	= 12,
		.sllp	= 0,
		.penc   = {[MMU_PAGE_4K] = 0, [1 ... MMU_PAGE_COUNT - 1] = -1},
		.avpnm	= 0,
		.tlbiel = 1,
        },
	[MMU_PAGE_16M] = {
		.shift	= 24,
		.sllp	= SLB_VSID_L,
		.penc   = {[0 ... MMU_PAGE_16M - 1] = -1, [MMU_PAGE_16M] = 0,
			    [MMU_PAGE_16M + 1 ... MMU_PAGE_COUNT - 1] = -1 },
		.avpnm	= 0x1UL,
		.tlbiel = 0,
        },
};

/*
 * 'R' and 'C' update notes:
 *  - Under pHyp or KVM, the updatepp path will not set C, thus it *will*
 *     create writeable HPTEs without C set, because the hcall H_PROTECT
 *     that we use in that case will not update C
 *  - The above is however not a problem, because we also don't do that
 *     fancy "no flush" variant of eviction and we use H_REMOVE which will
 *     do the right thing and thus we don't have the race I described earlier
 *
 *    - Under bare metal,  we do have the race, so we need R and C set
 *    - We make sure R is always set and never lost
 *    - C is _PAGE_DIRTY, and *should* always be set for a writeable mapping
 */

unsigned long htab_convert_pte_flags(unsigned long pteflags) { unsigned long rflags = 0; /* _PAGE_EXEC -> NOEXEC */ if ((pteflags & _PAGE_EXEC) == 0) rflags |= HPTE_R_N; /* * PPP bits: * Linux uses slb key 0 for kernel and 1 for user. * kernel RW areas are mapped with PPP=0b000 * User area is mapped with PPP=0b010 for read/write * or PPP=0b011 for read-only (including writeable but clean pages). */ if (pteflags & _PAGE_PRIVILEGED) { /* * Kernel read only mapped with ppp bits 0b110 */ if (!(pteflags & _PAGE_WRITE)) { if (mmu_has_feature(MMU_FTR_KERNEL_RO)) rflags |= (HPTE_R_PP0 | 0x2); else rflags |= 0x3; } } else { if (pteflags & _PAGE_RWX) rflags |= 0x2; if (!((pteflags & _PAGE_WRITE) && (pteflags & _PAGE_DIRTY))) rflags |= 0x1; } /* * We can't allow hardware to update hpte bits. Hence always * set 'R' bit and set 'C' if it is a write fault */ rflags |= HPTE_R_R; if (pteflags & _PAGE_DIRTY) rflags |= HPTE_R_C; /* * Add in WIG bits */ if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_TOLERANT) rflags |= HPTE_R_I; else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT) rflags |= (HPTE_R_I | HPTE_R_G); else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO) rflags |= (HPTE_R_W | HPTE_R_I | HPTE_R_M); else /* * Add memory coherence if cache inhibited is not set */ rflags |= HPTE_R_M; return rflags; }

Contributors

PersonTokensPropCommitsCommitProp
Aneesh Kumar K.V13271.35%990.00%
Benjamin Herrenschmidt5328.65%110.00%
Total185100.00%10100.00%


int htab_bolt_mapping(unsigned long vstart, unsigned long vend, unsigned long pstart, unsigned long prot, int psize, int ssize) { unsigned long vaddr, paddr; unsigned int step, shift; int ret = 0; shift = mmu_psize_defs[psize].shift; step = 1 << shift; prot = htab_convert_pte_flags(prot); DBG("htab_bolt_mapping(%lx..%lx -> %lx (%lx,%d,%d)\n", vstart, vend, pstart, prot, psize, ssize); for (vaddr = vstart, paddr = pstart; vaddr < vend; vaddr += step, paddr += step) { unsigned long hash, hpteg; unsigned long vsid = get_kernel_vsid(vaddr, ssize); unsigned long vpn = hpt_vpn(vaddr, vsid, ssize); unsigned long tprot = prot; /* * If we hit a bad address return error. */ if (!vsid) return -1; /* Make kernel text executable */ if (overlaps_kernel_text(vaddr, vaddr + step)) tprot &= ~HPTE_R_N; /* Make kvm guest trampolines executable */ if (overlaps_kvm_tmp(vaddr, vaddr + step)) tprot &= ~HPTE_R_N; /* * If relocatable, check if it overlaps interrupt vectors that * are copied down to real 0. For relocatable kernel * (e.g. kdump case) we copy interrupt vectors down to real * address 0. Mark that region as executable. This is * because on p8 system with relocation on exception feature * enabled, exceptions are raised with MMU (IR=DR=1) ON. Hence * in order to execute the interrupt handlers in virtual * mode the vector region need to be marked as executable. */ if ((PHYSICAL_START > MEMORY_START) && overlaps_interrupt_vector_text(vaddr, vaddr + step)) tprot &= ~HPTE_R_N; hash = hpt_hash(vpn, shift, ssize); hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); BUG_ON(!mmu_hash_ops.hpte_insert); ret = mmu_hash_ops.hpte_insert(hpteg, vpn, paddr, tprot, HPTE_V_BOLTED, psize, psize, ssize); if (ret < 0) break; #ifdef CONFIG_DEBUG_PAGEALLOC if (debug_pagealloc_enabled() && (paddr >> PAGE_SHIFT) < linear_map_hash_count) linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80; #endif /* CONFIG_DEBUG_PAGEALLOC */ } return ret < 0 ? ret : 0; }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Herrenschmidt11637.42%526.32%
Anton Blanchard8627.74%315.79%
Paul Mackerras3912.58%315.79%
Mahesh Salgaonkar237.42%15.26%
Alexander Graf175.48%15.26%
Aneesh Kumar K.V165.16%315.79%
Michael Ellerman61.94%15.26%
David Gibson41.29%15.26%
JoonSoo Kim30.97%15.26%
Total310100.00%19100.00%


int htab_remove_mapping(unsigned long vstart, unsigned long vend, int psize, int ssize) { unsigned long vaddr; unsigned int step, shift; int rc; int ret = 0; shift = mmu_psize_defs[psize].shift; step = 1 << shift; if (!mmu_hash_ops.hpte_removebolted) return -ENODEV; for (vaddr = vstart; vaddr < vend; vaddr += step) { rc = mmu_hash_ops.hpte_removebolted(vaddr, psize, ssize); if (rc == -ENOENT) { ret = -ENOENT; continue; } if (rc < 0) return rc; } return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Badari Pulavarty7966.39%240.00%
David Gibson3831.93%240.00%
Benjamin Herrenschmidt21.68%120.00%
Total119100.00%5100.00%

static bool disable_1tb_segments = false;
static int __init parse_disable_1tb_segments(char *p) { disable_1tb_segments = true; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Oliver O'Halloran18100.00%1100.00%
Total18100.00%1100.00%

early_param("disable_1tb_segments", parse_disable_1tb_segments);
static int __init htab_dt_scan_seg_sizes(unsigned long node, const char *uname, int depth, void *data) { const char *type = of_get_flat_dt_prop(node, "device_type", NULL); const __be32 *prop; int size = 0; /* We are scanning "cpu" nodes only */ if (type == NULL || strcmp(type, "cpu") != 0) return 0; prop = of_get_flat_dt_prop(node, "ibm,processor-segment-sizes", &size); if (prop == NULL) return 0; for (; size >= 4; size -= 4, ++prop) { if (be32_to_cpu(prop[0]) == 40) { DBG("1T segment support detected\n"); if (disable_1tb_segments) { DBG("1T segments disabled by command line\n"); break; } cur_cpu_spec->mmu_features |= MMU_FTR_1T_SEGMENT; return 1; } } cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mackerras12481.05%114.29%
Oliver O'Halloran127.84%114.29%
Olof Johansson63.92%228.57%
Anton Blanchard42.61%114.29%
Matt Evans42.61%114.29%
Rob Herring31.96%114.29%
Total153100.00%7100.00%


static int __init get_idx_from_shift(unsigned int shift) { int idx = -1; switch (shift) { case 0xc: idx = MMU_PAGE_4K; break; case 0x10: idx = MMU_PAGE_64K; break; case 0x14: idx = MMU_PAGE_1M; break; case 0x18: idx = MMU_PAGE_16M; break; case 0x22: idx = MMU_PAGE_16G; break; } return idx; }

Contributors

PersonTokensPropCommitsCommitProp
Aneesh Kumar K.V66100.00%1100.00%
Total66100.00%1100.00%


static int __init htab_dt_scan_page_sizes(unsigned long node, const char *uname, int depth, void *data) { const char *type = of_get_flat_dt_prop(node, "device_type", NULL); const __be32 *prop; int size = 0; /* We are scanning "cpu" nodes only */ if (type == NULL || strcmp(type, "cpu") != 0) return 0; prop = of_get_flat_dt_prop(node, "ibm,segment-page-sizes", &size); if (!prop) return 0; pr_info("Page sizes from device-tree:\n"); size /= 4; cur_cpu_spec->mmu_features &= ~(MMU_FTR_16M_PAGE); while(size > 0) { unsigned int base_shift = be32_to_cpu(prop[0]); unsigned int slbenc = be32_to_cpu(prop[1]); unsigned int lpnum = be32_to_cpu(prop[2]); struct mmu_psize_def *def; int idx, base_idx; size -= 3; prop += 3; base_idx = get_idx_from_shift(base_shift); if (base_idx < 0) { /* skip the pte encoding also */ prop += lpnum * 2; size -= lpnum * 2; continue; } def = &mmu_psize_defs[base_idx]; if (base_idx == MMU_PAGE_16M) cur_cpu_spec->mmu_features |= MMU_FTR_16M_PAGE; def->shift = base_shift; if (base_shift <= 23) def->avpnm = 0; else def->avpnm = (1 << (base_shift - 23)) - 1; def->sllp = slbenc; /* * We don't know for sure what's up with tlbiel, so * for now we only set it for 4K and 64K pages */ if (base_idx == MMU_PAGE_4K || base_idx == MMU_PAGE_64K) def->tlbiel = 1; else def->tlbiel = 0; while (size > 0 && lpnum) { unsigned int shift = be32_to_cpu(prop[0]); int penc = be32_to_cpu(prop[1]); prop += 2; size -= 2; lpnum--; idx = get_idx_from_shift(shift); if (idx < 0) continue; if (penc == -1) pr_err("Invalid penc for base_shift=%d " "shift=%d\n", base_shift, shift); def->penc[idx] = penc; pr_info("base_shift=%d: shift=%d, sllp=0x%04lx," " avpnm=0x%08lx, tlbiel=%d, penc=%d\n", base_shift, shift, def->sllp, def->avpnm, def->tlbiel, def->penc[idx]); } } return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Herrenschmidt19950.25%325.00%
Aneesh Kumar K.V12030.30%216.67%
Anton Blanchard4010.10%216.67%
Paul Mackerras235.81%18.33%
Michael Ellerman71.77%216.67%
Matt Evans41.01%18.33%
Rob Herring30.76%18.33%
Total396100.00%12100.00%

#ifdef CONFIG_HUGETLB_PAGE /* Scan for 16G memory blocks that have been set aside for huge pages * and reserve those blocks for 16G huge pages. */
static int __init htab_dt_scan_hugepage_blocks(unsigned long node, const char *uname, int depth, void *data) { const char *type = of_get_flat_dt_prop(node, "device_type", NULL); const __be64 *addr_prop; const __be32 *page_count_prop; unsigned int expected_pages; long unsigned int phys_addr; long unsigned int block_size; /* We are scanning "memory" nodes only */ if (type == NULL || strcmp(type, "memory") != 0) return 0; /* This property is the log base 2 of the number of virtual pages that * will represent this memory block. */ page_count_prop = of_get_flat_dt_prop(node, "ibm,expected#pages", NULL); if (page_count_prop == NULL) return 0; expected_pages = (1 << be32_to_cpu(page_count_prop[0])); addr_prop = of_get_flat_dt_prop(node, "reg", NULL); if (addr_prop == NULL) return 0; phys_addr = be64_to_cpu(addr_prop[0]); block_size = be64_to_cpu(addr_prop[1]); if (block_size != (16 * GB)) return 0; printk(KERN_INFO "Huge page(16GB) memory: " "addr = 0x%lX size = 0x%lX pages = %d\n", phys_addr, block_size, expected_pages); if (phys_addr + block_size * expected_pages <= memblock_end_of_DRAM()) { memblock_reserve(phys_addr, block_size * expected_pages); pseries_add_gpage(phys_addr, block_size, expected_pages); } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Jon Tollefson19691.16%228.57%
Anton Blanchard115.12%114.29%
Rob Herring31.40%114.29%
Yinghai Lu20.93%114.29%
Rui Teng20.93%114.29%
Aneesh Kumar K.V10.47%114.29%
Total215100.00%7100.00%

#endif /* CONFIG_HUGETLB_PAGE */
static void mmu_psize_set_default_penc(void) { int bpsize, apsize; for (bpsize = 0; bpsize < MMU_PAGE_COUNT; bpsize++) for (apsize = 0; apsize < MMU_PAGE_COUNT; apsize++) mmu_psize_defs[bpsize].penc[apsize] = -1; }

Contributors

PersonTokensPropCommitsCommitProp
Aneesh Kumar K.V52100.00%1100.00%
Total52100.00%1100.00%

#ifdef CONFIG_PPC_64K_PAGES
static bool might_have_hea(void) { /* * The HEA ethernet adapter requires awareness of the * GX bus. Without that awareness we can easily assume * we will never see an HEA ethernet device. */ #ifdef CONFIG_IBMEBUS return !cpu_has_feature(CPU_FTR_ARCH_207S) && firmware_has_feature(FW_FEATURE_SPLPAR); #else return false; #endif }

Contributors

PersonTokensPropCommitsCommitProp
Alexander Graf2683.87%150.00%
Benjamin Herrenschmidt516.13%150.00%
Total31100.00%2100.00%

#endif /* #ifdef CONFIG_PPC_64K_PAGES */
static void __init htab_scan_page_sizes(void) { int rc; /* se the invalid penc to -1 */ mmu_psize_set_default_penc(); /* Default to 4K pages only */ memcpy(mmu_psize_defs, mmu_psize_defaults_old, sizeof(mmu_psize_defaults_old)); /* * Try to find the available page sizes in the device-tree */ rc = of_scan_flat_dt(htab_dt_scan_page_sizes, NULL); if (rc == 0 && early_mmu_has_feature(MMU_FTR_16M_PAGE)) { /* * Nothing in the device-tree, but the CPU supports 16M pages, * so let's fallback on a known size list for 16M capable CPUs. */ memcpy(mmu_psize_defs, mmu_psize_defaults_gp, sizeof(mmu_psize_defaults_gp)); } #ifdef CONFIG_HUGETLB_PAGE /* Reserve 16G huge page memory sections for huge pages */ of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL); #endif /* CONFIG_HUGETLB_PAGE */ }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Herrenschmidt5265.82%342.86%
Michael Ellerman2126.58%114.29%
Aneesh Kumar K.V56.33%228.57%
Matt Evans11.27%114.29%
Total79100.00%7100.00%

/* * Fill in the hpte_page_sizes[] array. * We go through the mmu_psize_defs[] array looking for all the * supported base/actual page size combinations. Each combination * has a unique pagesize encoding (penc) value in the low bits of * the LP field of the HPTE. For actual page sizes less than 1MB, * some of the upper LP bits are used for RPN bits, meaning that * we need to fill in several entries in hpte_page_sizes[]. * * In diagrammatic form, with r = RPN bits and z = page size bits: * PTE LP actual page size * rrrr rrrz >=8KB * rrrr rrzz >=16KB * rrrr rzzz >=32KB * rrrr zzzz >=64KB * ... * * The zzzz bits are implementation-specific but are chosen so that * no encoding for a larger page size uses the same value in its * low-order N bits as the encoding for the 2^(12+N) byte page size * (if it exists). */
static void init_hpte_page_sizes(void) { long int ap, bp; long int shift, penc; for (bp = 0; bp < MMU_PAGE_COUNT; ++bp) { if (!mmu_psize_defs[bp].shift) continue; /* not a supported page size */ for (ap = bp; ap < MMU_PAGE_COUNT; ++ap) { penc = mmu_psize_defs[bp].penc[ap]; if (penc == -1) continue; shift = mmu_psize_defs[ap].shift - LP_SHIFT; if (shift <= 0) continue; /* should never happen */ /* * For page sizes less than 1MB, this loop * replicates the entry for all possible values * of the rrrr bits. */ while (penc < (1 << LP_BITS)) { hpte_page_sizes[penc] = (ap << 4) | bp; penc += 1 << shift; } } } }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mackerras133100.00%1100.00%
Total133100.00%1100.00%


static void __init htab_init_page_sizes(void) { init_hpte_page_sizes(); if (!debug_pagealloc_enabled()) { /* * Pick a size for the linear mapping. Currently, we only * support 16M, 1M and 4K which is the default */ if (mmu_psize_defs[MMU_PAGE_16M].shift) mmu_linear_psize = MMU_PAGE_16M; else if (mmu_psize_defs[MMU_PAGE_1M].shift) mmu_linear_psize = MMU_PAGE_1M; } #ifdef CONFIG_PPC_64K_PAGES /* * Pick a size for the ordinary pages. Default is 4K, we support * 64K for user mappings and vmalloc if supported by the processor. * We only use 64k for ioremap if the processor * (and firmware) support cache-inhibited large pages. * If not, we use 4k and set mmu_ci_restrictions so that * hash_page knows to switch processes that use cache-inhibited * mappings to 4k pages. */ if (mmu_psize_defs[MMU_PAGE_64K].shift) { mmu_virtual_psize = MMU_PAGE_64K; mmu_vmalloc_psize = MMU_PAGE_64K; if (mmu_linear_psize == MMU_PAGE_4K) mmu_linear_psize = MMU_PAGE_64K; if (mmu_has_feature(MMU_FTR_CI_LARGE_PAGE)) { /* * When running on pSeries using 64k pages for ioremap * would stop us accessing the HEA ethernet. So if we * have the chance of ever seeing one, stay at 4k. */ if (!might_have_hea()) mmu_io_psize = MMU_PAGE_64K; } else mmu_ci_restrictions = 1; } #endif /* CONFIG_PPC_64K_PAGES */ #ifdef CONFIG_SPARSEMEM_VMEMMAP /* We try to use 16M pages for vmemmap if that is supported * and we have at least 1G of RAM at boot */ if (mmu_psize_defs[MMU_PAGE_16M].shift && memblock_phys_mem_size() >= 0x40000000) mmu_vmemmap_psize = MMU_PAGE_16M; else if (mmu_psize_defs[MMU_PAGE_64K].shift) mmu_vmemmap_psize = MMU_PAGE_64K; else mmu_vmemmap_psize = MMU_PAGE_4K; #endif /* CONFIG_SPARSEMEM_VMEMMAP */ printk(KERN_DEBUG "Page orders: linear mapping = %d, " "virtual = %d, io = %d" #ifdef CONFIG_SPARSEMEM_VMEMMAP ", vmemmap = %d" #endif "\n", mmu_psize_defs[mmu_linear_psize].shift, mmu_psize_defs[mmu_virtual_psize].shift, mmu_psize_defs[mmu_io_psize].shift #ifdef CONFIG_SPARSEMEM_VMEMMAP ,mmu_psize_defs[mmu_vmemmap_psize].shift #endif ); }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Herrenschmidt13467.00%428.57%
Paul Mackerras3718.50%321.43%
JoonSoo Kim94.50%17.14%
Michael Ellerman84.00%17.14%
Anton Blanchard52.50%17.14%
Alexander Graf31.50%17.14%
Matt Evans21.00%17.14%
Olof Johansson10.50%17.14%
Yinghai Lu10.50%17.14%
Total200100.00%14100.00%


static int __init htab_dt_scan_pftsize(unsigned long node, const char *uname, int depth, void *data) { const char *type = of_get_flat_dt_prop(node, "device_type", NULL); const __be32 *prop; /* We are scanning "cpu" nodes only */ if (type == NULL || strcmp(type, "cpu") != 0) return 0; prop = of_get_flat_dt_prop(node, "ibm,pft-size", NULL); if (prop != NULL) { /* pft_size[0] is the NUMA CEC cookie */ ppc64_pft_size = be32_to_cpu(prop[1]); return 1; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Herrenschmidt8587.63%233.33%
Michael Ellerman55.15%116.67%
Anton Blanchard55.15%233.33%
Rob Herring22.06%116.67%
Total97100.00%6100.00%


unsigned htab_shift_for_mem_size(unsigned long mem_size) { unsigned memshift = __ilog2(mem_size); unsigned pshift = mmu_psize_defs[mmu_virtual_psize].shift; unsigned pteg_shift; /* round mem_size up to next power of 2 */ if ((1UL << memshift) < mem_size) memshift += 1; /* aim for 2 pages / pteg */ pteg_shift = memshift - (pshift + 1); /* * 2^11 PTEGS of 128 bytes each, ie. 2^18 bytes is the minimum htab * size permitted by the architecture. */ return max(pteg_shift + 7, 18U); }

Contributors

PersonTokensPropCommitsCommitProp
David Gibson67100.00%1100.00%
Total67100.00%1100.00%


static unsigned long __init htab_get_table_size(void) { /* If hash size isn't already provided by the platform, we try to * retrieve it from the device-tree. If it's not there neither, we * calculate it now based on the total RAM size */ if (ppc64_pft_size == 0) of_scan_flat_dt(htab_dt_scan_pftsize, NULL); if (ppc64_pft_size) return 1UL << ppc64_pft_size; return 1UL << htab_shift_for_mem_size(memblock_phys_mem_size()); }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Herrenschmidt3378.57%120.00%
David Gibson49.52%120.00%
Anton Blanchard37.14%120.00%
Andrew Morton12.38%120.00%
Adrian Bunk12.38%120.00%
Total42100.00%5100.00%

#ifdef CONFIG_MEMORY_HOTPLUG
void resize_hpt_for_hotplug(unsigned long new_mem_size) { unsigned target_hpt_shift; if (!mmu_hash_ops.resize_hpt) return; target_hpt_shift = htab_shift_for_mem_size(new_mem_size); /* * To avoid lots of HPT resizes if memory size is fluctuating * across a boundary, we deliberately have some hysterisis * here: we immediately increase the HPT size if the target * shift exceeds the current shift, but we won't attempt to * reduce unless the target shift is at least 2 below the * current shift */ if ((target_hpt_shift > ppc64_pft_size) || (target_hpt_shift < (ppc64_pft_size - 1))) { int rc; rc = mmu_hash_ops.resize_hpt(target_hpt_shift); if (rc) printk(KERN_WARNING "Unable to resize hash page table to target order %d: %d\n", target_hpt_shift, rc); } }

Contributors

PersonTokensPropCommitsCommitProp
David Gibson74100.00%1100.00%
Total74100.00%1100.00%


int hash__create_section_mapping(unsigned long start, unsigned long end) { int rc = htab_bolt_mapping(start, end, __pa(start), pgprot_val(PAGE_KERNEL), mmu_linear_psize, mmu_kernel_ssize); if (rc < 0) { int rc2 = htab_remove_mapping(start, end, mmu_linear_psize, mmu_kernel_ssize); BUG_ON(rc2 && (rc2 != -ENOENT)); } return rc; }

Contributors

PersonTokensPropCommitsCommitProp
David Gibson4358.11%225.00%
Mike Kravetz2229.73%112.50%
Michael Ellerman34.05%112.50%
Paul Mackerras22.70%112.50%
Anton Blanchard22.70%112.50%
Benjamin Herrenschmidt11.35%112.50%
Reza Arbab11.35%112.50%
Total74100.00%8100.00%


int hash__remove_section_mapping(unsigned long start, unsigned long end) { int rc = htab_remove_mapping(start, end, mmu_linear_psize, mmu_kernel_ssize); WARN_ON(rc < 0); return rc; }

Contributors

PersonTokensPropCommitsCommitProp
Badari Pulavarty2362.16%250.00%
David Gibson1335.14%125.00%
Reza Arbab12.70%125.00%
Total37100.00%4100.00%

#endif /* CONFIG_MEMORY_HOTPLUG */
static void update_hid_for_hash(void) { unsigned long hid0; unsigned long rb