cregit-Linux how code gets into the kernel

Release 4.8 mm/nommu.c

Directory: mm
/*
 *  linux/mm/nommu.c
 *
 *  Replacement code for mm functions to support CPU's that don't
 *  have any form of memory management unit (thus no virtual memory).
 *
 *  See Documentation/nommu-mmap.txt
 *
 *  Copyright (c) 2004-2008 David Howells <dhowells@redhat.com>
 *  Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
 *  Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
 *  Copyright (c) 2002      Greg Ungerer <gerg@snapgear.com>
 *  Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org>
 */


#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/export.h>
#include <linux/mm.h>
#include <linux/vmacache.h>
#include <linux/mman.h>
#include <linux/swap.h>
#include <linux/file.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <linux/compiler.h>
#include <linux/mount.h>
#include <linux/personality.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/audit.h>
#include <linux/printk.h>

#include <asm/uaccess.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include "internal.h"


void *high_memory;

EXPORT_SYMBOL(high_memory);

struct page *mem_map;

unsigned long max_mapnr;

EXPORT_SYMBOL(max_mapnr);

unsigned long highest_memmap_pfn;

int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;

int heap_stack_gap = 0;


atomic_long_t mmap_pages_allocated;


EXPORT_SYMBOL(mem_map);

/* list of mapped, potentially shareable regions */

static struct kmem_cache *vm_region_jar;

struct rb_root nommu_region_tree = RB_ROOT;

DECLARE_RWSEM(nommu_region_sem);


const struct vm_operations_struct generic_file_vm_ops = {
};

/*
 * Return the total memory allocated for this pointer, not
 * just what the caller asked for.
 *
 * Doesn't have to be accurate, i.e. may have races.
 */

unsigned int kobjsize(const void *objp) { struct page *page; /* * If the object we have should not have ksize performed on it, * return size of 0 */ if (!objp || !virt_addr_valid(objp)) return 0; page = virt_to_head_page(objp); /* * If the allocator sets PageSlab, we know the pointer came from * kmalloc(). */ if (PageSlab(page)) return ksize(objp); /* * If it's not a compound page, see if we have a matching VMA * region. This test is intentionally done in reverse order, * so if there's no VMA, we still fall through and hand back * PAGE_SIZE for 0-order pages. */ if (!PageCompound(page)) { struct vm_area_struct *vma; vma = find_vma(current->mm, (unsigned long)objp); if (vma) return vma->vm_end - vma->vm_start; } /* * The ksize() function is only guaranteed to work for pointers * returned by kmalloc(). So handle arbitrary pointers here. */ return PAGE_SIZE << compound_order(page); }

Contributors

PersonTokensPropCommitsCommitProp
paul mundtpaul mundt6158.10%360.00%
alan coxalan cox4240.00%120.00%
michael hennerichmichael hennerich21.90%120.00%
Total105100.00%5100.00%


long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int foll_flags, struct page **pages, struct vm_area_struct **vmas, int *nonblocking) { struct vm_area_struct *vma; unsigned long vm_flags; int i; /* calculate required read or write permissions. * If FOLL_FORCE is set, we only require the "MAY" flags. */ vm_flags = (foll_flags & FOLL_WRITE) ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); vm_flags &= (foll_flags & FOLL_FORCE) ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); for (i = 0; i < nr_pages; i++) { vma = find_vma(mm, start); if (!vma) goto finish_or_fault; /* protect what we can, including chardevs */ if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) || !(vm_flags & vma->vm_flags)) goto finish_or_fault; if (pages) { pages[i] = virt_to_page(start); if (pages[i]) get_page(pages[i]); } if (vmas) vmas[i] = vma; start = (start + PAGE_SIZE) & PAGE_MASK; } return i; finish_or_fault: return i ? : -EFAULT; }

Contributors

PersonTokensPropCommitsCommitProp
alan coxalan cox8237.44%17.14%
david howellsdavid howells8036.53%321.43%
sonic zhangsonic zhang167.31%17.14%
hugh dickinshugh dickins146.39%321.43%
greg ungerergreg ungerer135.94%17.14%
michel lespinassemichel lespinasse73.20%214.29%
nick pigginnick piggin41.83%17.14%
peter zijlstrapeter zijlstra20.91%17.14%
kirill a. shutemovkirill a. shutemov10.46%17.14%
Total219100.00%14100.00%

/* * get a list of pages in an address range belonging to the specified process * and indicate the VMA that covers each page * - this is potentially dodgy as we may end incrementing the page count of a * slab page or a secondary page from a compound page * - don't permit access to VMAs that don't support it, such as I/O mappings */
long get_user_pages(unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages, struct vm_area_struct **vmas) { int flags = 0; if (write) flags |= FOLL_WRITE; if (force) flags |= FOLL_FORCE; return __get_user_pages(current, current->mm, start, nr_pages, flags, pages, vmas, NULL); }

Contributors

PersonTokensPropCommitsCommitProp
nick pigginnick piggin6182.43%114.29%
michel lespinassemichel lespinasse56.76%228.57%
dave hansendave hansen34.05%114.29%
hugh dickinshugh dickins22.70%114.29%
peter zijlstrapeter zijlstra22.70%114.29%
ingo molnaringo molnar11.35%114.29%
Total74100.00%7100.00%

EXPORT_SYMBOL(get_user_pages);
long get_user_pages_locked(unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages, int *locked) { return get_user_pages(start, nr_pages, write, force, pages, NULL); }

Contributors

PersonTokensPropCommitsCommitProp
andrea arcangeliandrea arcangeli4395.56%150.00%
ingo molnaringo molnar24.44%150.00%
Total45100.00%2100.00%

EXPORT_SYMBOL(get_user_pages_locked);
long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages, unsigned int gup_flags) { long ret; down_read(&mm->mmap_sem); ret = __get_user_pages(tsk, mm, start, nr_pages, gup_flags, pages, NULL, NULL); up_read(&mm->mmap_sem); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
andrea arcangeliandrea arcangeli7895.12%266.67%
dave hansendave hansen44.88%133.33%
Total82100.00%3100.00%

EXPORT_SYMBOL(__get_user_pages_unlocked);
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, int write, int force, struct page **pages) { return __get_user_pages_unlocked(current, current->mm, start, nr_pages, write, force, pages, 0); }

Contributors

PersonTokensPropCommitsCommitProp
andrea arcangeliandrea arcangeli4391.49%133.33%
dave hansendave hansen36.38%133.33%
ingo molnaringo molnar12.13%133.33%
Total47100.00%3100.00%

EXPORT_SYMBOL(get_user_pages_unlocked); /** * follow_pfn - look up PFN at a user virtual address * @vma: memory mapping * @address: user virtual address * @pfn: location to store found PFN * * Only IO mappings and raw PFN mappings are allowed. * * Returns zero and the pfn at @pfn on success, -ve otherwise. */
int follow_pfn(struct vm_area_struct *vma, unsigned long address, unsigned long *pfn) { if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) return -EINVAL; *pfn = address >> PAGE_SHIFT; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
paul mundtpaul mundt48100.00%1100.00%
Total48100.00%1100.00%

EXPORT_SYMBOL(follow_pfn); LIST_HEAD(vmap_area_list);
void vfree(const void *addr) { kfree(addr); }

Contributors

PersonTokensPropCommitsCommitProp
alan coxalan cox1493.33%150.00%
christoph lameterchristoph lameter16.67%150.00%
Total15100.00%2100.00%

EXPORT_SYMBOL(vfree);
void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) { /* * You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc() * returns only a logical address. */ return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM); }

Contributors

PersonTokensPropCommitsCommitProp
alan coxalan cox2681.25%125.00%
nick pigginnick piggin412.50%125.00%
al viroal viro13.12%125.00%
robert p. j. dayrobert p. j. day13.12%125.00%
Total32100.00%4100.00%

EXPORT_SYMBOL(__vmalloc);
void *vmalloc_user(unsigned long size) { void *ret; ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); if (ret) { struct vm_area_struct *vma; down_write(&current->mm->mmap_sem); vma = find_vma(current->mm, (unsigned long)ret); if (vma) vma->vm_flags |= VM_USERMAP; up_write(&current->mm->mmap_sem); } return ret; }

Contributors

PersonTokensPropCommitsCommitProp
paul mundtpaul mundt88100.00%1100.00%
Total88100.00%1100.00%

EXPORT_SYMBOL(vmalloc_user);
struct page *vmalloc_to_page(const void *addr) { return virt_to_page(addr); }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton1794.44%150.00%
christoph lameterchristoph lameter15.56%150.00%
Total18100.00%2100.00%

EXPORT_SYMBOL(vmalloc_to_page);
unsigned long vmalloc_to_pfn(const void *addr) { return page_to_pfn(virt_to_page(addr)); }

Contributors

PersonTokensPropCommitsCommitProp
arjan van de venarjan van de ven1995.00%150.00%
christoph lameterchristoph lameter15.00%150.00%
Total20100.00%2100.00%

EXPORT_SYMBOL(vmalloc_to_pfn);
long vread(char *buf, char *addr, unsigned long count) { /* Don't allow overflow */ if ((unsigned long) buf + count < count) count = -(unsigned long) buf; memcpy(buf, addr, count); return count; }

Contributors

PersonTokensPropCommitsCommitProp
alan coxalan cox2956.86%150.00%
chen gangchen gang2243.14%150.00%
Total51100.00%2100.00%


long vwrite(char *buf, char *addr, unsigned long count) { /* Don't allow overflow */ if ((unsigned long) addr + count < count) count = -(unsigned long) addr; memcpy(addr, buf, count); return count; }

Contributors

PersonTokensPropCommitsCommitProp
alan coxalan cox51100.00%1100.00%
Total51100.00%1100.00%

/* * vmalloc - allocate virtually contiguous memory * * @size: allocation size * * Allocate enough pages to cover @size from the page level * allocator and map them into contiguous kernel virtual space. * * For tight control over page level allocator and protection flags * use __vmalloc() instead. */
void *vmalloc(unsigned long size) { return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); }

Contributors

PersonTokensPropCommitsCommitProp
alan coxalan cox22100.00%1100.00%
Total22100.00%1100.00%

EXPORT_SYMBOL(vmalloc); /* * vzalloc - allocate virtually contiguous memory with zero fill * * @size: allocation size * * Allocate enough pages to cover @size from the page level * allocator and map them into contiguous kernel virtual space. * The memory allocated is set to zero. * * For tight control over page level allocator and protection flags * use __vmalloc() instead. */
void *vzalloc(unsigned long size) { return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); }

Contributors

PersonTokensPropCommitsCommitProp
dave youngdave young24100.00%1100.00%
Total24100.00%1100.00%

EXPORT_SYMBOL(vzalloc); /** * vmalloc_node - allocate memory on a specific node * @size: allocation size * @node: numa node * * Allocate enough pages to cover @size from the page level * allocator and map them into contiguous kernel virtual space. * * For tight control over page level allocator and protection flags * use __vmalloc() instead. */
void *vmalloc_node(unsigned long size, int node) { return vmalloc(size); }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton19100.00%1100.00%
Total19100.00%1100.00%

EXPORT_SYMBOL(vmalloc_node); /** * vzalloc_node - allocate memory on a specific node with zero fill * @size: allocation size * @node: numa node * * Allocate enough pages to cover @size from the page level * allocator and map them into contiguous kernel virtual space. * The memory allocated is set to zero. * * For tight control over page level allocator and protection flags * use __vmalloc() instead. */
void *vzalloc_node(unsigned long size, int node) { return vzalloc(size); }

Contributors

PersonTokensPropCommitsCommitProp
dave youngdave young19100.00%1100.00%
Total19100.00%1100.00%

EXPORT_SYMBOL(vzalloc_node); #ifndef PAGE_KERNEL_EXEC # define PAGE_KERNEL_EXEC PAGE_KERNEL #endif /** * vmalloc_exec - allocate virtually contiguous, executable memory * @size: allocation size * * Kernel-internal function to allocate enough pages to cover @size * the page level allocator and map them into contiguous and * executable kernel virtual space. * * For tight control over page level allocator and protection flags * use __vmalloc() instead. */
void *vmalloc_exec(unsigned long size) { return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC); }

Contributors

PersonTokensPropCommitsCommitProp
paul mundtpaul mundt22100.00%1100.00%
Total22100.00%1100.00%

/** * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) * @size: allocation size * * Allocate enough 32bit PA addressable pages to cover @size from the * page level allocator and map them into contiguous kernel virtual space. */
void *vmalloc_32(unsigned long size) { return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL); }

Contributors

PersonTokensPropCommitsCommitProp
alan coxalan cox20100.00%1100.00%
Total20100.00%1100.00%

EXPORT_SYMBOL(vmalloc_32); /** * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory * @size: allocation size * * The resulting memory area is 32bit addressable and zeroed so it can be * mapped to userspace without leaking data. * * VM_USERMAP is set on the corresponding VMA so that subsequent calls to * remap_vmalloc_range() are permissible. */
void *vmalloc_32_user(unsigned long size) { /* * We'll have to sort out the ZONE_DMA bits for 64-bit, * but for now this can simply use vmalloc_user() directly. */ return vmalloc_user(size); }

Contributors

PersonTokensPropCommitsCommitProp
paul mundtpaul mundt17100.00%2100.00%
Total17100.00%2100.00%

EXPORT_SYMBOL(vmalloc_32_user);
void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot) { BUG(); return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
alan coxalan cox2275.86%150.00%
greg ungerergreg ungerer724.14%150.00%
Total29100.00%2100.00%

EXPORT_SYMBOL(vmap);
void vunmap(const void *addr) { BUG(); }

Contributors

PersonTokensPropCommitsCommitProp
alan coxalan cox1292.31%150.00%
christoph lameterchristoph lameter17.69%150.00%
Total13100.00%2100.00%

EXPORT_SYMBOL(vunmap);
void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) { BUG(); return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
paul mundtpaul mundt28100.00%1100.00%
Total28100.00%1100.00%

EXPORT_SYMBOL(vm_map_ram);
void vm_unmap_ram(const void *mem, unsigned int count) { BUG(); }

Contributors

PersonTokensPropCommitsCommitProp
paul mundtpaul mundt17100.00%1100.00%
Total17100.00%1100.00%

EXPORT_SYMBOL(vm_unmap_ram);
void vm_unmap_aliases(void) { }

Contributors

PersonTokensPropCommitsCommitProp
paul mundtpaul mundt6100.00%1100.00%
Total6100.00%1100.00%

EXPORT_SYMBOL_GPL(vm_unmap_aliases); /* * Implement a stub for vmalloc_sync_all() if the architecture chose not to * have one. */
void __weak vmalloc_sync_all(void) { }

Contributors

PersonTokensPropCommitsCommitProp
christoph hellwigchristoph hellwig457.14%150.00%
gideon israel dsouzagideon israel dsouza342.86%150.00%
Total7100.00%2100.00%

/** * alloc_vm_area - allocate a range of kernel address space * @size: size of the area * * Returns: NULL on failure, vm_struct on success * * This function reserves a range of kernel address space, and * allocates pagetables to map that range. No actual mappings * are created. If the kernel address space is not shared * between processes, it syncs the pagetable across all * processes. */
struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes) { BUG(); return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
paul mundtpaul mundt1676.19%150.00%
david vrabeldavid vrabel523.81%150.00%
Total21100.00%2100.00%

EXPORT_SYMBOL_GPL(alloc_vm_area);
void free_vm_area(struct vm_struct *area) { BUG(); }

Contributors

PersonTokensPropCommitsCommitProp
paul mundtpaul mundt13100.00%1100.00%
Total13100.00%1100.00%

EXPORT_SYMBOL_GPL(free_vm_area);
int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page) { return -EINVAL; }

Contributors

PersonTokensPropCommitsCommitProp
paul mundtpaul mundt23100.00%1100.00%
Total23100.00%1100.00%

EXPORT_SYMBOL(vm_insert_page); /* * sys_brk() for the most part doesn't need the global kernel * lock, except when an application is doing something nasty * like trying to un-brk an area that has already been mapped * to a regular file. in this case, the unmapping will need * to invoke file system routines that need the global lock. */ SYSCALL_DEFINE1(brk, unsigned long, brk) { struct mm_struct *mm = current->mm; if (brk < mm->start_brk || brk > mm->context.end_brk) return mm->brk; if (mm->brk == brk) return mm->brk; /* * Always allow shrinking brk */ if (brk <= mm->brk) { mm->brk = brk; return brk; } /* * Ok, looks good - let it rip. */ flush_icache_range(mm->brk, brk); return mm->brk = brk; } /* * initialise the VMA and region record slabs */
void __init mmap_init(void) { int ret; ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL); VM_BUG_ON(ret); vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC|SLAB_ACCOUNT); }

Contributors

PersonTokensPropCommitsCommitProp
kosaki motohirokosaki motohiro1846.15%120.00%
david howellsdavid howells1743.59%240.00%
vladimir davydovvladimir davydov25.13%120.00%
tejun heotejun heo25.13%120.00%
Total39100.00%5100.00%

/* * validate the region tree * - the caller must hold the region lock */ #ifdef CONFIG_DEBUG_NOMMU_REGIONS
static noinline void validate_nommu_regions(void) { struct vm_region *region, *last; struct rb_node *p, *lastp; lastp = rb_first(&nommu_region_tree); if (!lastp) return; last = rb_entry(lastp, struct vm_region, vm_rb); BUG_ON(last->vm_end <= last->vm_start); BUG_ON(last->vm_top < last->vm_end); while ((p = rb_next(lastp))) { region = rb_entry(p, struct vm_region, vm_rb); last = rb_entry(lastp, struct vm_region, vm_rb); BUG_ON(region->vm_end <= region->vm_start); BUG_ON(region->vm_top < region->vm_end); BUG_ON(region->vm_start < last->vm_top); lastp = p; } }

Contributors

PersonTokensPropCommitsCommitProp
david howellsdavid howells9665.31%350.00%
alan coxalan cox3020.41%116.67%
paul mundtpaul mundt1912.93%116.67%
greg ungerergreg ungerer21.36%116.67%
Total147100.00%6100.00%

#else
static void validate_nommu_regions(void) { }

Contributors

PersonTokensPropCommitsCommitProp
david howellsdavid howells7100.00%2100.00%
Total7100.00%2100.00%

#endif /* * add a region into the global tree */
static void add_nommu_region(struct vm_region *region) { struct vm_region *pregion; struct rb_node **p, *parent; validate_nommu_regions(); parent = NULL; p = &nommu_region_tree.rb_node; while (*p) { parent = *p; pregion = rb_entry(parent, struct vm_region, vm_rb); if (region->vm_start < pregion->vm_start) p = &(*p)->rb_left; else if (region->vm_start > pregion->vm_start) p = &(*p)->rb_right; else if (pregion == region) return; else BUG(); } rb_link_node(&region->vm_rb, parent, p); rb_insert_color(&region->vm_rb, &nommu_region_tree); validate_nommu_regions(); }

Contributors

PersonTokensPropCommitsCommitProp
david howellsdavid howells142100.00%3100.00%
Total142100.00%3100.00%

/* * delete a region from the global tree */
static void delete_nommu_region(struct vm_region *region) { BUG_ON(!nommu_region_tree.rb_node); validate_nommu_regions(); rb_erase(&region->vm_rb, &nommu_region_tree); validate_nommu_regions(); }

Contributors

PersonTokensPropCommitsCommitProp
david howellsdavid howells3083.33%150.00%
greg ungerergreg ungerer616.67%150.00%
Total36100.00%2100.00%

/* * free a contiguous series of pages */
static void free_page_series(unsigned long from, unsigned long to) { for (; from < to; from += PAGE_SIZE) { struct page *page = virt_to_page(from); atomic_long_dec(&mmap_pages_allocated); put_page(page); } }

Contributors

PersonTokensPropCommitsCommitProp
david howellsdavid howells48100.00%3100.00%
Total48100.00%3100.00%

/* * release a reference to a region * - the caller must hold the region semaphore for writing, which this releases * - the region may not have been added to the tree yet, in which case vm_top * will equal vm_start */
static void __put_nommu_region(struct vm_region *region) __releases (nommu_region_sem) { BUG_ON(!nommu_region_tree.rb_node); if (--region->vm_usage == 0) { if (region->vm_top > region->vm_start) delete_nommu_region(region); up_write(&nommu_region_sem); if (region->vm_file) fput(region->vm_file); /* IO memory and memory shared directly out of the pagecache * from ramfs/tmpfs mustn't be released here */ if (region->vm_flags & VM_MAPPED_COPY) free_page_series(region->vm_start, region->vm_top); kmem_cache_free(vm_region_jar, region); } else { up_write(&nommu_region_sem); } }

Contributors

PersonTokensPropCommitsCommitProp
david howellsdavid howells10298.08%375.00%
paul mundtpaul mundt21.92%125.00%
Total104100.00%4100.00%

/* * release a reference to a region */
static void put_nommu_region(struct vm_region *region) { down_write(&nommu_region_sem); __put_nommu_region(region); }

Contributors

PersonTokensPropCommitsCommitProp
david howellsdavid howells22100.00%1100.00%
Total22100.00%1100.00%

/* * update protection on a vma */
static void protect_vma(struct vm_area_struct *vma, unsigned long flags) { #ifdef CONFIG_MPU struct mm_struct *mm = vma->vm_mm; long start = vma->vm_start & PAGE_MASK; while (start < vma->vm_end) { protect_page(mm, start, flags); start += PAGE_SIZE; } update_protections(mm); #endif }

Contributors

PersonTokensPropCommitsCommitProp
bernd schmidtbernd schmidt66100.00%1100.00%
Total66100.00%1100.00%

/* * add a VMA into a process's mm_struct in the appropriate place in the list * and tree and add to the address space's page tree also if not an anonymous * page * - should be called with mm->mmap_sem held writelocked */
static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) { struct vm_area_struct *pvma, *prev; struct address_space *mapping; struct rb_node **p, *parent, *rb_prev; BUG_ON(!vma->vm_region); mm->map_count++; vma->vm_mm = mm; protect_vma(vma, vma->vm_flags); /* add the VMA to the mapping */ if (vma->vm_file) { mapping = vma->vm_file->f_mapping; i_mmap_lock_write(mapping); flush_dcache_mmap_lock(mapping); vma_interval_tree_insert(vma, &mapping->i_mmap); flush_dcache_mmap_unlock(mapping); i_mmap_unlock_write(mapping); } /* add the VMA to the tree */ parent = rb_prev = NULL; p = &mm->mm_rb.rb_node; while (*p) { parent = *p; pvma = rb_entry(parent, struct vm_area_struct, vm_rb); /* sort by: start addr, end addr, VMA struct addr in that order * (the latter is necessary as we may get identical VMAs) */ if (vma->vm_start < pvma->vm_start) p = &(*p)->rb_left; else if (vma->vm_start > pvma->vm_start) { rb_prev = parent; p = &(*p)->rb_right; } else if (vma->vm_end < pvma->vm_end) p = &(*p)->rb_left; else if (vma->vm_end > pvma->vm_end) { rb_prev = parent; p = &(*p)->rb_right; } else if (vma < pvma) p = &(*p)->rb_left; else if (vma > pvma) { rb_prev = parent; p = &(*p)->rb_right; } else BUG(); } rb_link_node(&vma->vm_rb, parent, p); rb_insert_color(&vma->vm_rb, &mm->mm_rb); /* add VMA to the VMA list also */ prev = NULL; if (rb_prev) prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb); __vma_link_list(mm, vma, prev, parent); }

Contributors

PersonTokensPropCommitsCommitProp
david howellsdavid howells28681.02%550.00%
namhyung kimnamhyung kim4713.31%110.00%
bernd schmidtbernd schmidt92.55%110.00%
linus torvaldslinus torvalds82.27%110.00%
davidlohr buesodavidlohr bueso20.57%110.00%
michel lespinassemichel lespinasse10.28%110.00%
Total353100.00%10100.00%

/* * delete a VMA from its owning mm_struct and address space */
static void delete_vma_from_mm(struct vm_area_struct *vma) { int i; struct address_space *mapping; struct mm_struct *mm = vma->vm_mm; struct task_struct *curr = current; protect_vma(vma, 0); mm->map_count--; for (i = 0; i < VMACACHE_SIZE; i++) { /* if the vma is cached, invalidate the entire cache */ if (curr->vmacache[i] == vma) { vmacache_invalidate(mm); break; } } /* remove the VMA from the mapping */ if (vma->vm_file) { mapping = vma->vm_file->f_mapping; i_mmap_lock_write(mapping); flush_dcache_mmap_lock(mapping); vma_interval_tree_remove(vma, &mapping->i_mmap); flush_dcache_mmap_unlock(mapping); i_mmap_unlock_write(mapping); } /* remove from the MM's tree and list */ rb_erase(&vma->vm_rb, &mm->mm_rb); if (vma->vm_prev) vma->vm_prev->vm_next = vma->vm_next; else mm->mmap = vma->vm_next; if (vma->vm_next) vma->vm_next->vm_prev = vma->vm_prev; }

Contributors

PersonTokensPropCommitsCommitProp
david howellsdavid howells11059.78%228.57%
davidlohr buesodavidlohr bueso4021.74%228.57%
namhyung kimnamhyung kim2614.13%114.29%
bernd schmidtbernd schmidt73.80%114.29%
michel lespinassemichel lespinasse10.54%114.29%
Total184100.00%7100.00%

/* * destroy a VMA record */
static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma) { if (vma->vm_ops && vma->vm_ops->close) vma->vm_ops->close(vma); if (vma->vm_file) fput(vma->vm_file); put_nommu_region(vma->vm_region); kmem_cache_free(vm_area_cachep, vma); }

Contributors

PersonTokensPropCommitsCommitProp
david howellsdavid howells64100.00%1100.00%
Total64100.00%1100.00%

/* * look up the first VMA in which addr resides, NULL if none * - should be called with mm->mmap_sem at least held readlocked */
struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) { struct vm_area_struct *