cregit-Linux how code gets into the kernel

Release 4.7 arch/s390/mm/vmem.c

Directory: arch/s390/mm
/*
 *    Copyright IBM Corp. 2006
 *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
 */

#include <linux/bootmem.h>
#include <linux/pfn.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/hugetlb.h>
#include <linux/slab.h>
#include <linux/memblock.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/setup.h>
#include <asm/tlbflush.h>
#include <asm/sections.h>

static DEFINE_MUTEX(vmem_mutex);


struct memory_segment {
	
struct list_head list;
	
unsigned long start;
	
unsigned long size;
};

static LIST_HEAD(mem_segs);


static void __ref *vmem_alloc_pages(unsigned int order) { if (slab_is_available()) return (void *)__get_free_pages(GFP_KERNEL, order); return alloc_bootmem_pages((1 << order) * PAGE_SIZE); }

Contributors

PersonTokensPropCommitsCommitProp
heiko carstensheiko carstens41100.00%1100.00%
Total41100.00%1100.00%


static inline pud_t *vmem_pud_alloc(void) { pud_t *pud = NULL; pud = vmem_alloc_pages(2); if (!pud) return NULL; clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4); return pud; }

Contributors

PersonTokensPropCommitsCommitProp
martin schwidefskymartin schwidefsky3978.00%250.00%
heiko carstensheiko carstens1122.00%250.00%
Total50100.00%4100.00%


static inline pmd_t *vmem_pmd_alloc(void) { pmd_t *pmd = NULL; pmd = vmem_alloc_pages(2); if (!pmd) return NULL; clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4); return pmd; }

Contributors

PersonTokensPropCommitsCommitProp
heiko carstensheiko carstens3672.00%266.67%
martin schwidefskymartin schwidefsky1428.00%133.33%
Total50100.00%3100.00%


static pte_t __ref *vmem_pte_alloc(void) { pte_t *pte; if (slab_is_available()) pte = (pte_t *) page_table_alloc(&init_mm); else pte = alloc_bootmem_align(PTRS_PER_PTE * sizeof(pte_t), PTRS_PER_PTE * sizeof(pte_t)); if (!pte) return NULL; clear_table((unsigned long *) pte, _PAGE_INVALID, PTRS_PER_PTE * sizeof(pte_t)); return pte; }

Contributors

PersonTokensPropCommitsCommitProp
martin schwidefskymartin schwidefsky4454.32%342.86%
heiko carstensheiko carstens2935.80%342.86%
philipp hachtmannphilipp hachtmann89.88%114.29%
Total81100.00%7100.00%

/* * Add a physical memory range to the 1:1 mapping. */
static int vmem_add_mem(unsigned long start, unsigned long size, int ro) { unsigned long end = start + size; unsigned long address = start; pgd_t *pg_dir; pud_t *pu_dir; pmd_t *pm_dir; pte_t *pt_dir; int ret = -ENOMEM; while (address < end) { pg_dir = pgd_offset_k(address); if (pgd_none(*pg_dir)) { pu_dir = vmem_pud_alloc(); if (!pu_dir) goto out; pgd_populate(&init_mm, pg_dir, pu_dir); } pu_dir = pud_offset(pg_dir, address); if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address && !(address & ~PUD_MASK) && (address + PUD_SIZE <= end) && !debug_pagealloc_enabled()) { pud_val(*pu_dir) = __pa(address) | _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE | (ro ? _REGION_ENTRY_PROTECT : 0); address += PUD_SIZE; continue; } if (pud_none(*pu_dir)) { pm_dir = vmem_pmd_alloc(); if (!pm_dir) goto out; pud_populate(&init_mm, pu_dir, pm_dir); } pm_dir = pmd_offset(pu_dir, address); if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address && !(address & ~PMD_MASK) && (address + PMD_SIZE <= end) && !debug_pagealloc_enabled()) { pmd_val(*pm_dir) = __pa(address) | _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_YOUNG | (ro ? _SEGMENT_ENTRY_PROTECT : 0); address += PMD_SIZE; continue; } if (pmd_none(*pm_dir)) { pt_dir = vmem_pte_alloc(); if (!pt_dir) goto out; pmd_populate(&init_mm, pm_dir, pt_dir); } pt_dir = pte_offset_kernel(pm_dir, address); pte_val(*pt_dir) = __pa(address) | pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL); address += PAGE_SIZE; } ret = 0; out: return ret; }

Contributors

PersonTokensPropCommitsCommitProp
heiko carstensheiko carstens21360.34%642.86%
martin schwidefskymartin schwidefsky9627.20%535.71%
gerald schaefergerald schaefer3610.20%214.29%
christian borntraegerchristian borntraeger82.27%17.14%
Total353100.00%14100.00%

/* * Remove a physical memory range from the 1:1 mapping. * Currently only invalidates page table entries. */
static void vmem_remove_range(unsigned long start, unsigned long size) { unsigned long end = start + size; unsigned long address = start; pgd_t *pg_dir; pud_t *pu_dir; pmd_t *pm_dir; pte_t *pt_dir; pte_t pte; pte_val(pte) = _PAGE_INVALID; while (address < end) { pg_dir = pgd_offset_k(address); if (pgd_none(*pg_dir)) { address += PGDIR_SIZE; continue; } pu_dir = pud_offset(pg_dir, address); if (pud_none(*pu_dir)) { address += PUD_SIZE; continue; } if (pud_large(*pu_dir)) { pud_clear(pu_dir); address += PUD_SIZE; continue; } pm_dir = pmd_offset(pu_dir, address); if (pmd_none(*pm_dir)) { address += PMD_SIZE; continue; } if (pmd_large(*pm_dir)) { pmd_clear(pm_dir); address += PMD_SIZE; continue; } pt_dir = pte_offset_kernel(pm_dir, address); *pt_dir = pte; address += PAGE_SIZE; } flush_tlb_kernel_range(start, end); }

Contributors

PersonTokensPropCommitsCommitProp
heiko carstensheiko carstens16081.22%444.44%
gerald schaefergerald schaefer199.64%222.22%
martin schwidefskymartin schwidefsky189.14%333.33%
Total197100.00%9100.00%

/* * Add a backed mem_map array to the virtual mem_map array. */
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) { unsigned long address = start; pgd_t *pg_dir; pud_t *pu_dir; pmd_t *pm_dir; pte_t *pt_dir; int ret = -ENOMEM; for (address = start; address < end;) { pg_dir = pgd_offset_k(address); if (pgd_none(*pg_dir)) { pu_dir = vmem_pud_alloc(); if (!pu_dir) goto out; pgd_populate(&init_mm, pg_dir, pu_dir); } pu_dir = pud_offset(pg_dir, address); if (pud_none(*pu_dir)) { pm_dir = vmem_pmd_alloc(); if (!pm_dir) goto out; pud_populate(&init_mm, pu_dir, pm_dir); } pm_dir = pmd_offset(pu_dir, address); if (pmd_none(*pm_dir)) { /* Use 1MB frames for vmemmap if available. We always * use large frames even if they are only partially * used. * Otherwise we would have also page tables since * vmemmap_populate gets called for each section * separately. */ if (MACHINE_HAS_EDAT1) { void *new_page; new_page = vmemmap_alloc_block(PMD_SIZE, node); if (!new_page) goto out; pmd_val(*pm_dir) = __pa(new_page) | _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE; address = (address + PMD_SIZE) & PMD_MASK; continue; } pt_dir = vmem_pte_alloc(); if (!pt_dir) goto out; pmd_populate(&init_mm, pm_dir, pt_dir); } else if (pmd_large(*pm_dir)) { address = (address + PMD_SIZE) & PMD_MASK; continue; } pt_dir = pte_offset_kernel(pm_dir, address); if (pte_none(*pt_dir)) { void *new_page; new_page = vmemmap_alloc_block(PAGE_SIZE, node); if (!new_page) goto out; pte_val(*pt_dir) = __pa(new_page) | pgprot_val(PAGE_KERNEL); } address += PAGE_SIZE; } ret = 0; out: return ret; }

Contributors

PersonTokensPropCommitsCommitProp
heiko carstensheiko carstens25176.76%545.45%
martin schwidefskymartin schwidefsky6720.49%436.36%
johannes weinerjohannes weiner72.14%19.09%
gerald schaefergerald schaefer20.61%19.09%
Total327100.00%11100.00%


void vmemmap_free(unsigned long start, unsigned long end) { }

Contributors

PersonTokensPropCommitsCommitProp
tang chentang chen866.67%150.00%
johannes weinerjohannes weiner433.33%150.00%
Total12100.00%2100.00%

/* * Add memory segment to the segment list if it doesn't overlap with * an already present segment. */
static int insert_memory_segment(struct memory_segment *seg) { struct memory_segment *tmp; if (seg->start + seg->size > VMEM_MAX_PHYS || seg->start + seg->size < seg->start) return -ERANGE; list_for_each_entry(tmp, &mem_segs, list) { if (seg->start >= tmp->start + tmp->size) continue; if (seg->start + seg->size <= tmp->start) continue; return -ENOSPC; } list_add(&seg->list, &mem_segs); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
heiko carstensheiko carstens102100.00%3100.00%
Total102100.00%3100.00%

/* * Remove memory segment from the segment list. */
static void remove_memory_segment(struct memory_segment *seg) { list_del(&seg->list); }

Contributors

PersonTokensPropCommitsCommitProp
heiko carstensheiko carstens19100.00%1100.00%
Total19100.00%1100.00%


static void __remove_shared_memory(struct memory_segment *seg) { remove_memory_segment(seg); vmem_remove_range(seg->start, seg->size); }

Contributors

PersonTokensPropCommitsCommitProp
heiko carstensheiko carstens27100.00%1100.00%
Total27100.00%1100.00%


int vmem_remove_mapping(unsigned long start, unsigned long size) { struct memory_segment *seg; int ret; mutex_lock(&vmem_mutex); ret = -ENOENT; list_for_each_entry(seg, &mem_segs, list) { if (seg->start == start && seg->size == size) break; } if (seg->start != start || seg->size != size) goto out; ret = 0; __remove_shared_memory(seg); kfree(seg); out: mutex_unlock(&vmem_mutex); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
heiko carstensheiko carstens99100.00%2100.00%
Total99100.00%2100.00%


int vmem_add_mapping(unsigned long start, unsigned long size) { struct memory_segment *seg; int ret; mutex_lock(&vmem_mutex); ret = -ENOMEM; seg = kzalloc(sizeof(*seg), GFP_KERNEL); if (!seg) goto out; seg->start = start; seg->size = size; ret = insert_memory_segment(seg); if (ret) goto out_free; ret = vmem_add_mem(start, size, 0); if (ret) goto out_remove; goto out; out_remove: __remove_shared_memory(seg); out_free: kfree(seg); out: mutex_unlock(&vmem_mutex); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
heiko carstensheiko carstens12398.40%266.67%
gerald schaefergerald schaefer21.60%133.33%
Total125100.00%3100.00%

/* * map whole physical memory to virtual memory (identity mapping) * we reserve enough space in the vmalloc area for vmemmap to hotplug * additional memory segments. */
void __init vmem_map_init(void) { unsigned long ro_start, ro_end; struct memblock_region *reg; phys_addr_t start, end; ro_start = PFN_ALIGN((unsigned long)&_stext); ro_end = (unsigned long)&_eshared & PAGE_MASK; for_each_memblock(memory, reg) { start = reg->base; end = reg->base + reg->size; if (start >= ro_end || end <= ro_start) vmem_add_mem(start, end - start, 0); else if (start >= ro_start && end <= ro_end) vmem_add_mem(start, end - start, 1); else if (start >= ro_start) { vmem_add_mem(start, ro_end - start, 1); vmem_add_mem(ro_end, end - ro_end, 0); } else if (end < ro_end) { vmem_add_mem(start, ro_start - start, 0); vmem_add_mem(ro_start, end - ro_start, 1); } else { vmem_add_mem(start, ro_start - start, 0); vmem_add_mem(ro_start, ro_end - ro_start, 1); vmem_add_mem(ro_end, end - ro_end, 0); } } }

Contributors

PersonTokensPropCommitsCommitProp
gerald schaefergerald schaefer17683.02%125.00%
philipp hachtmannphilipp hachtmann209.43%125.00%
heiko carstensheiko carstens167.55%250.00%
Total212100.00%4100.00%

/* * Convert memblock.memory to a memory segment list so there is a single * list that contains all memory segments. */
static int __init vmem_convert_memory_chunk(void) { struct memblock_region *reg; struct memory_segment *seg; mutex_lock(&vmem_mutex); for_each_memblock(memory, reg) { seg = kzalloc(sizeof(*seg), GFP_KERNEL); if (!seg) panic("Out of memory...\n"); seg->start = reg->base; seg->size = reg->size; insert_memory_segment(seg); } mutex_unlock(&vmem_mutex); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
heiko carstensheiko carstens7081.40%150.00%
philipp hachtmannphilipp hachtmann1618.60%150.00%
Total86100.00%2100.00%

core_initcall(vmem_convert_memory_chunk);

Overall Contributors

PersonTokensPropCommitsCommitProp
heiko carstensheiko carstens126467.88%1548.39%
martin schwidefskymartin schwidefsky27814.93%825.81%
gerald schaefergerald schaefer24112.94%26.45%
philipp hachtmannphilipp hachtmann482.58%13.23%
johannes weinerjohannes weiner110.59%13.23%
christian borntraegerchristian borntraeger90.48%26.45%
tang chentang chen80.43%13.23%
tejun heotejun heo30.16%13.23%
Total1862100.00%31100.00%
Directory: arch/s390/mm
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
{% endraw %}