cregit-Linux how code gets into the kernel

Release 4.8 mm/vmalloc.c

Directory: mm
/*
 *  linux/mm/vmalloc.c
 *
 *  Copyright (C) 1993  Linus Torvalds
 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
 *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
 *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
 *  Numa awareness, Christoph Lameter, SGI, June 2005
 */

#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/highmem.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/debugobjects.h>
#include <linux/kallsyms.h>
#include <linux/list.h>
#include <linux/notifier.h>
#include <linux/rbtree.h>
#include <linux/radix-tree.h>
#include <linux/rcupdate.h>
#include <linux/pfn.h>
#include <linux/kmemleak.h>
#include <linux/atomic.h>
#include <linux/compiler.h>
#include <linux/llist.h>
#include <linux/bitops.h>

#include <asm/uaccess.h>
#include <asm/tlbflush.h>
#include <asm/shmparam.h>

#include "internal.h"


struct vfree_deferred {
	
struct llist_head list;
	
struct work_struct wq;
};
static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);

static void __vunmap(const void *, int);


static void free_work(struct work_struct *w) { struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq); struct llist_node *llnode = llist_del_all(&p->list); while (llnode) { void *p = llnode; llnode = llist_next(llnode); __vunmap(p, 1); } }

Contributors

PersonTokensPropCommitsCommitProp
al viroal viro65100.00%1100.00%
Total65100.00%1100.00%

/*** Page table manipulation functions ***/
static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) { pte_t *pte; pte = pte_offset_kernel(pmd, addr); do { pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); WARN_ON(!pte_none(ptent) && !pte_present(ptent)); } while (pte++, addr += PAGE_SIZE, addr != end); }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git4356.58%660.00%
hugh dickinshugh dickins2431.58%110.00%
david s. millerdavid s. miller45.26%110.00%
christoph hellwigchristoph hellwig45.26%110.00%
ingo molnaringo molnar11.32%110.00%
Total76100.00%10100.00%


static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end) { pmd_t *pmd; unsigned long next; pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); if (pmd_clear_huge(pmd)) continue; if (pmd_none_or_clear_bad(pmd)) continue; vunmap_pte_range(pmd, addr, next); } while (pmd++, addr = next, addr != end); }

Contributors

PersonTokensPropCommitsCommitProp
hugh dickinshugh dickins3641.86%222.22%
pre-gitpre-git3540.70%444.44%
toshi kanitoshi kani89.30%111.11%
christoph hellwigchristoph hellwig44.65%111.11%
andi kleenandi kleen33.49%111.11%
Total86100.00%9100.00%


static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end) { pud_t *pud; unsigned long next; pud = pud_offset(pgd, addr); do { next = pud_addr_end(addr, end); if (pud_clear_huge(pud)) continue; if (pud_none_or_clear_bad(pud)) continue; vunmap_pmd_range(pud, addr, next); } while (pud++, addr = next, addr != end); }

Contributors

PersonTokensPropCommitsCommitProp
andi kleenandi kleen4552.33%125.00%
hugh dickinshugh dickins3338.37%250.00%
toshi kanitoshi kani89.30%125.00%
Total86100.00%4100.00%


static void vunmap_page_range(unsigned long addr, unsigned long end) { pgd_t *pgd; unsigned long next; BUG_ON(addr >= end); pgd = pgd_offset_k(addr); do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) continue; vunmap_pud_range(pgd, addr, next); } while (pgd++, addr = next, addr != end); }

Contributors

PersonTokensPropCommitsCommitProp
hugh dickinshugh dickins4860.76%222.22%
pre-gitpre-git2126.58%444.44%
benjamin herrenschmidtbenjamin herrenschmidt67.59%111.11%
nick pigginnick piggin33.80%111.11%
andrew mortonandrew morton11.27%111.11%
Total79100.00%9100.00%


static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr) { pte_t *pte; /* * nr is a running index into the array which helps higher level * callers keep track of where we're up to. */ pte = pte_alloc_kernel(pmd, addr); if (!pte) return -ENOMEM; do { struct page *page = pages[*nr]; if (WARN_ON(!pte_none(*pte))) return -EBUSY; if (WARN_ON(!page)) return -ENOMEM; set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); (*nr)++; } while (pte++, addr += PAGE_SIZE, addr != end); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git4835.29%440.00%
hugh dickinshugh dickins4533.09%110.00%
nick pigginnick piggin2014.71%110.00%
andrew mortonandrew morton1712.50%110.00%
christoph hellwigchristoph hellwig42.94%110.00%
oleg nesterovoleg nesterov10.74%110.00%
ingo molnaringo molnar10.74%110.00%
Total136100.00%10100.00%


static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr) { pmd_t *pmd; unsigned long next; pmd = pmd_alloc(&init_mm, pud, addr); if (!pmd) return -ENOMEM; do { next = pmd_addr_end(addr, end); if (vmap_pte_range(pmd, addr, next, prot, pages, nr)) return -ENOMEM; } while (pmd++, addr = next, addr != end); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
andi kleenandi kleen6861.82%125.00%
hugh dickinshugh dickins3430.91%125.00%
nick pigginnick piggin65.45%125.00%
andrew mortonandrew morton21.82%125.00%
Total110100.00%4100.00%


static int vmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr) { pud_t *pud; unsigned long next; pud = pud_alloc(&init_mm, pgd, addr); if (!pud) return -ENOMEM; do { next = pud_addr_end(addr, end); if (vmap_pmd_range(pud, addr, next, prot, pages, nr)) return -ENOMEM; } while (pud++, addr = next, addr != end); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
hugh dickinshugh dickins6760.91%114.29%
andi kleenandi kleen1614.55%114.29%
christoph hellwigchristoph hellwig87.27%114.29%
pre-gitpre-git76.36%114.29%
nick pigginnick piggin65.45%114.29%
andrew mortonandrew morton65.45%228.57%
Total110100.00%7100.00%

/* * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and * will have pfns corresponding to the "pages" array. * * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N] */
static int vmap_page_range_noflush(unsigned long start, unsigned long end, pgprot_t prot, struct page **pages) { pgd_t *pgd; unsigned long next; unsigned long addr = start; int err = 0; int nr = 0; BUG_ON(addr >= end); pgd = pgd_offset_k(addr); do { next = pgd_addr_end(addr, end); err = vmap_pud_range(pgd, addr, next, prot, pages, &nr); if (err) return err; } while (pgd++, addr = next, addr != end); return nr; }

Contributors

PersonTokensPropCommitsCommitProp
hugh dickinshugh dickins4034.78%18.33%
christoph hellwigchristoph hellwig2219.13%18.33%
nick pigginnick piggin1916.52%18.33%
pre-gitpre-git86.96%325.00%
adam lackorzynskiadam lackorzynski76.09%18.33%
andi kleenandi kleen65.22%18.33%
andrew mortonandrew morton65.22%18.33%
marcus alanenmarcus alanen32.61%18.33%
figo zhangfigo zhang32.61%18.33%
tejun heotejun heo10.87%18.33%
Total115100.00%12100.00%


static int vmap_page_range(unsigned long start, unsigned long end, pgprot_t prot, struct page **pages) { int ret; ret = vmap_page_range_noflush(start, end, prot, pages); flush_cache_vmap(start, end); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
tejun heotejun heo49100.00%1100.00%
Total49100.00%1100.00%


int is_vmalloc_or_module_addr(const void *x) { /* * ARM, x86-64 and sparc64 put modules in a special place, * and fall back on vmalloc() if that fails. Others * just put it in the vmalloc space. */ #if defined(CONFIG_MODULES) && defined(MODULES_VADDR) unsigned long addr = (unsigned long)x; if (addr >= MODULES_VADDR && addr < MODULES_END) return 1; #endif return is_vmalloc_addr(x); }

Contributors

PersonTokensPropCommitsCommitProp
linus torvaldslinus torvalds5298.11%150.00%
russell kingrussell king11.89%150.00%
Total53100.00%2100.00%

/* * Walk a vmap address to the struct page it maps. */
struct page *vmalloc_to_page(const void *vmalloc_addr) { unsigned long addr = (unsigned long) vmalloc_addr; struct page *page = NULL; pgd_t *pgd = pgd_offset_k(addr); /* * XXX we might need to change this if we add VIRTUAL_BUG_ON for * architectures that do not vmalloc module space */ VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr)); if (!pgd_none(*pgd)) { pud_t *pud = pud_offset(pgd, addr); if (!pud_none(*pud)) { pmd_t *pmd = pmd_offset(pud, addr); if (!pmd_none(*pmd)) { pte_t *ptep, pte; ptep = pte_offset_map(pmd, addr); pte = *ptep; if (pte_present(pte)) page = pte_page(pte); pte_unmap(ptep); } } } return page; }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter10975.17%228.57%
nick pigginnick piggin149.66%114.29%
vassili karpovvassili karpov128.28%114.29%
jiri slabyjiri slaby85.52%114.29%
ingo molnaringo molnar10.69%114.29%
linus torvaldslinus torvalds10.69%114.29%
Total145100.00%7100.00%

EXPORT_SYMBOL(vmalloc_to_page); /* * Map a vmalloc()-space virtual address to the physical page frame number. */
unsigned long vmalloc_to_pfn(const void *vmalloc_addr) { return page_to_pfn(vmalloc_to_page(vmalloc_addr)); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter1575.00%266.67%
vassili karpovvassili karpov525.00%133.33%
Total20100.00%3100.00%

EXPORT_SYMBOL(vmalloc_to_pfn); /*** Global kva allocator ***/ #define VM_VM_AREA 0x04 static DEFINE_SPINLOCK(vmap_area_lock); /* Export for kexec only */ LIST_HEAD(vmap_area_list); static LLIST_HEAD(vmap_purge_list); static struct rb_root vmap_area_root = RB_ROOT; /* The vmap cache globals are protected by vmap_area_lock */ static struct rb_node *free_vmap_cache; static unsigned long cached_hole_size; static unsigned long cached_vstart; static unsigned long cached_align; static unsigned long vmap_area_pcpu_hole;
static struct vmap_area *__find_vmap_area(unsigned long addr) { struct rb_node *n = vmap_area_root.rb_node; while (n) { struct vmap_area *va; va = rb_entry(n, struct vmap_area, rb_node); if (addr < va->va_start) n = n->rb_left; else if (addr >= va->va_end) n = n->rb_right; else return va; } return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
nick pigginnick piggin5467.50%120.00%
james bottomleyjames bottomley2025.00%120.00%
hirofumi ogawahirofumi ogawa33.75%120.00%
daisuke hatayamadaisuke hatayama22.50%120.00%
christoph hellwigchristoph hellwig11.25%120.00%
Total80100.00%5100.00%


static void __insert_vmap_area(struct vmap_area *va) { struct rb_node **p = &vmap_area_root.rb_node; struct rb_node *parent = NULL; struct rb_node *tmp; while (*p) { struct vmap_area *tmp_va; parent = *p; tmp_va = rb_entry(parent, struct vmap_area, rb_node); if (va->va_start < tmp_va->va_end) p = &(*p)->rb_left; else if (va->va_end > tmp_va->va_start) p = &(*p)->rb_right; else BUG(); } rb_link_node(&va->rb_node, parent, p); rb_insert_color(&va->rb_node, &vmap_area_root); /* address-sort this list */ tmp = rb_prev(&va->rb_node); if (tmp) { struct vmap_area *prev; prev = rb_entry(tmp, struct vmap_area, rb_node); list_add_rcu(&va->list, &prev->list); } else list_add_rcu(&va->list, &vmap_area_list); }

Contributors

PersonTokensPropCommitsCommitProp
nick pigginnick piggin16988.95%120.00%
pre-gitpre-git157.89%120.00%
namhyung kimnamhyung kim42.11%120.00%
joonsoo kimjoonsoo kim10.53%120.00%
christoph lameterchristoph lameter10.53%120.00%
Total190100.00%5100.00%

static void purge_vmap_area_lazy(void); static BLOCKING_NOTIFIER_HEAD(vmap_notify_list); /* * Allocate a region of KVA of the specified size and alignment, within the * vstart and vend. */
static struct vmap_area *alloc_vmap_area(unsigned long size, unsigned long align, unsigned long vstart, unsigned long vend, int node, gfp_t gfp_mask) { struct vmap_area *va; struct rb_node *n; unsigned long addr; int purged = 0; struct vmap_area *first; BUG_ON(!size); BUG_ON(offset_in_page(size)); BUG_ON(!is_power_of_2(align)); might_sleep_if(gfpflags_allow_blocking(gfp_mask)); va = kmalloc_node(sizeof(struct vmap_area), gfp_mask & GFP_RECLAIM_MASK, node); if (unlikely(!va)) return ERR_PTR(-ENOMEM); /* * Only scan the relevant parts containing pointers to other objects * to avoid false negatives. */ kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK); retry: spin_lock(&vmap_area_lock); /* * Invalidate cache if we have more permissive parameters. * cached_hole_size notes the largest hole noticed _below_ * the vmap_area cached in free_vmap_cache: if size fits * into that hole, we want to scan from vstart to reuse * the hole instead of allocating above free_vmap_cache. * Note that __free_vmap_area may update free_vmap_cache * without updating cached_hole_size or cached_align. */ if (!free_vmap_cache || size < cached_hole_size || vstart < cached_vstart || align < cached_align) { nocache: cached_hole_size = 0; free_vmap_cache = NULL; } /* record if we encounter less permissive parameters */ cached_vstart = vstart; cached_align = align; /* find starting point for our search */ if (free_vmap_cache) { first = rb_entry(free_vmap_cache, struct vmap_area, rb_node); addr = ALIGN(first->va_end, align); if (addr < vstart) goto nocache; if (addr + size < addr) goto overflow; } else { addr = ALIGN(vstart, align); if (addr + size < addr) goto overflow; n = vmap_area_root.rb_node; first = NULL; while (n) { struct vmap_area *tmp; tmp = rb_entry(n, struct vmap_area, rb_node); if (tmp->va_end >= addr) { first = tmp; if (tmp->va_start <= addr) break; n = n->rb_left; } else n = n->rb_right; } if (!first) goto found; } /* from the starting point, walk areas until a suitable hole is found */ while (addr + size > first->va_start && addr + size <= vend) { if (addr + cached_hole_size < first->va_start) cached_hole_size = first->va_start - addr; addr = ALIGN(first->va_end, align); if (addr + size < addr) goto overflow; if (list_is_last(&first->list, &vmap_area_list)) goto found; first = list_next_entry(first, list); } found: if (addr + size > vend) goto overflow; va->va_start = addr; va->va_end = addr + size; va->flags = 0; __insert_vmap_area(va); free_vmap_cache = &va->rb_node; spin_unlock(&vmap_area_lock); BUG_ON(!IS_ALIGNED(va->va_start, align)); BUG_ON(va->va_start < vstart); BUG_ON(va->va_end > vend); return va; overflow: spin_unlock(&vmap_area_lock); if (!purged) { purge_vmap_area_lazy(); purged = 1; goto retry; } if (gfpflags_allow_blocking(gfp_mask)) { unsigned long freed = 0; blocking_notifier_call_chain(&vmap_notify_list, 0, &freed); if (freed > 0) { purged = 0; goto retry; } } if (printk_ratelimit()) pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n", size); kfree(va); return ERR_PTR(-EBUSY); }

Contributors

PersonTokensPropCommitsCommitProp
nick pigginnick piggin48582.48%320.00%
chris wilsonchris wilson498.33%16.67%
catalin marinascatalin marinas152.55%16.67%
hong zhi guohong zhi guo122.04%16.67%
glauber costaglauber costa101.70%213.33%
wang xiaoqiangwang xiaoqiang50.85%16.67%
ralph wuerthnerralph wuerthner50.85%16.67%
alexander kuleshovalexander kuleshov30.51%16.67%
geliang tanggeliang tang10.17%16.67%
johannes weinerjohannes weiner10.17%16.67%
pintu kumarpintu kumar10.17%16.67%
joe perchesjoe perches10.17%16.67%
Total588100.00%15100.00%


int register_vmap_purge_notifier(struct notifier_block *nb) { return blocking_notifier_chain_register(&vmap_notify_list, nb); }

Contributors

PersonTokensPropCommitsCommitProp
chris wilsonchris wilson19100.00%1100.00%
Total19100.00%1100.00%

EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
int unregister_vmap_purge_notifier(struct notifier_block *nb) { return blocking_notifier_chain_unregister(&vmap_notify_list, nb); }

Contributors

PersonTokensPropCommitsCommitProp
chris wilsonchris wilson19100.00%1100.00%
Total19100.00%1100.00%

EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
static void __free_vmap_area(struct vmap_area *va) { BUG_ON(RB_EMPTY_NODE(&va->rb_node)); if (free_vmap_cache) { if (va->va_end < cached_vstart) { free_vmap_cache = NULL; } else { struct vmap_area *cache; cache = rb_entry(free_vmap_cache, struct vmap_area, rb_node); if (va->va_start <= cache->va_start) { free_vmap_cache = rb_prev(&va->rb_node); /* * We don't try to update cached_hole_size or * cached_align, but it won't go very wrong. */ } } } rb_erase(&va->rb_node, &vmap_area_root); RB_CLEAR_NODE(&va->rb_node); list_del_rcu(&va->list); /* * Track the highest possible candidate for pcpu area * allocation. Areas outside of vmalloc area can be returned * here too, consider only end addresses which fall inside * vmalloc area proper. */ if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END) vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end); kfree_rcu(va, rcu_head); }

Contributors

PersonTokensPropCommitsCommitProp
nick pigginnick piggin11780.69%250.00%
tejun heotejun heo2617.93%125.00%
lai jiangshanlai jiangshan21.38%125.00%
Total145100.00%4100.00%

/* * Free a region of KVA allocated by alloc_vmap_area */
static void free_vmap_area(struct vmap_area *va) { spin_lock(&vmap_area_lock); __free_vmap_area(va); spin_unlock(&vmap_area_lock); }

Contributors

PersonTokensPropCommitsCommitProp
nick pigginnick piggin28100.00%1100.00%
Total28100.00%1100.00%

/* * Clear the pagetable entries of a given vmap_area */
static void unmap_vmap_area(struct vmap_area *va) { vunmap_page_range(va->va_start, va->va_end); }

Contributors

PersonTokensPropCommitsCommitProp
nick pigginnick piggin22100.00%1100.00%
Total22100.00%1100.00%


static void vmap_debug_free_range(unsigned long start, unsigned long end) { /* * Unmap page tables and force a TLB flush immediately if pagealloc * debugging is enabled. This catches use after free bugs similarly to * those in linear kernel virtual address space after a page has been * freed. * * All the lazy freeing logic is still retained, in order to minimise * intrusiveness of this debugging feature. * * This is going to be *slow* (linear kernel virtual address debugging * doesn't do a broadcast TLB flush so it is a lot faster). */ if (debug_pagealloc_enabled()) { vunmap_page_range(start, end); flush_tlb_kernel_range(start, end); } }

Contributors

PersonTokensPropCommitsCommitProp
nick pigginnick piggin2877.78%150.00%
joonsoo kimjoonsoo kim822.22%150.00%
Total36100.00%2100.00%

/* * lazy_max_pages is the maximum amount of virtual address space we gather up * before attempting to purge with a TLB flush. * * There is a tradeoff here: a larger number will cover more kernel page tables * and take slightly longer to purge, but it will linearly reduce the number of * global TLB flushes that must be performed. It would seem natural to scale * this number up linearly with the number of CPUs (because vmapping activity * could also scale linearly with the number of CPUs), however it is likely * that in practice, workloads might be constrained in other ways that mean * vmap activity will not scale linearly with CPUs. Also, I want to be * conservative and not introduce a big latency on huge systems, so go with * a less aggressive log scale. It will still be an improvement over the old * code, and it will be simple to change the scale factor if we find that it * becomes a problem on bigger systems. */
static unsigned long lazy_max_pages(void) { unsigned int log; log = fls(num_online_cpus()); return log * (32UL * 1024 * 1024 / PAGE_SIZE); }

Contributors

PersonTokensPropCommitsCommitProp
nick pigginnick piggin34100.00%1100.00%
Total34100.00%1100.00%

static atomic_t vmap_lazy_nr = ATOMIC_INIT(0); /* for per-CPU blocks */ static void purge_fragmented_blocks_allcpus(void); /* * called before a call to iounmap() if the caller wants vm_area_struct's * immediately freed. */
void set_iounmap_nonlazy(void) { atomic_set(&vmap_lazy_nr, lazy_max_pages()+1); }

Contributors

PersonTokensPropCommitsCommitProp
cliff wickmancliff wickman18100.00%1100.00%
Total18100.00%1100.00%

/* * Purges all lazily-freed vmap areas. * * If sync is 0 then don't purge if there is already a purge in progress. * If force_flush is 1, then flush kernel TLBs between *start and *end even * if we found no lazy vmap areas to unmap (callers can use this to optimise * their own TLB flushing). * Returns with *start = min(*start, lowest purged address) * *end = max(*end, highest purged address) */
static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, int sync, int force_flush) { static DEFINE_SPINLOCK(purge_lock); struct llist_node *valist; struct vmap_area *va; struct vmap_area *n_va; int nr = 0; /* * If sync is 0 but force_flush is 1, we'll go sync anyway but callers * should not expect such behaviour. This just simplifies locking for * the case that isn't actually used at the moment anyway. */ if (!sync && !force_flush) { if (!spin_trylock(&purge_lock)) return; } else spin_lock(&purge_lock); if (sync) purge_fragmented_blocks_allcpus(); valist = llist_del_all(&vmap_purge_list); llist_for_each_entry(va, valist, purge_list) { if (va->va_start < *start) *start = va->va_start; if (va->va_end > *end) *end = va->va_end; nr += (va->va_end - va->va_start) >> PAGE_SHIFT; } if (nr) atomic_sub(nr, &vmap_lazy_nr); if (nr || force_flush) flush_tlb_kernel_range(*start, *end); if (nr) { spin_lock(&vmap_area_lock); llist_for_each_entry_safe(va, n_va, valist, purge_list) __free_vmap_area(va); spin_unlock(&vmap_area_lock); } spin_unlock(&purge_lock); }

Contributors

PersonTokensPropCommitsCommitProp
nick pigginnick piggin18687.32%240.00%
chris wilsonchris wilson167.51%120.00%
vegard nossumvegard nossum73.29%120.00%
andrew mortonandrew morton41.88%120.00%
Total213100.00%5100.00%

/* * Kick off a purge of the outstanding lazy areas. Don't bother if somebody * is already purging. */
static void try_purge_vmap_area_lazy(void) { unsigned long start = ULONG_MAX, end = 0; __purge_vmap_area_lazy(&start, &end, 0, 0); }

Contributors

PersonTokensPropCommitsCommitProp
nick pigginnick piggin31100.00%1100.00%
Total31100.00%1100.00%

/* * Kick off a purge of the outstanding lazy areas. */
static void purge_vmap_area_lazy(void) { unsigned long start = ULONG_MAX, end = 0; __purge_vmap_area_lazy(&start, &end, 1, 0); }

Contributors

PersonTokensPropCommitsCommitProp
nick pigginnick piggin31100.00%2100.00%
Total31100.00%2100.00%

/* * Free a vmap area, caller ensuring that the area has been unmapped * and flush_cache_vunmap had been called for the correct range * previously. */
static void free_vmap_area_noflush(struct vmap_area *va) { int nr_lazy; nr_lazy = atomic_add_return((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); /* After this point, we may free va at any time */ llist_add(&va->purge_list, &vmap_purge_list); if (unlikely(nr_lazy > lazy_max_pages())) try_purge_vmap_area_lazy(); }

Contributors

PersonTokensPropCommitsCommitProp
nick pigginnick piggin3966.10%250.00%
chris wilsonchris wilson1932.20%125.00%
jeremy fitzhardingejeremy fitzhardinge11.69%125.00%
Total59100.00%4100.00%

/* * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been * called for the correct range previously. */
static void free_unmap_vmap_area_noflush(struct vmap_area *va) { unmap_vmap_area(va); free_vmap_area_noflush(va); }

Contributors

PersonTokensPropCommitsCommitProp
jeremy fitzhardingejeremy fitzhardinge21100.00%1100.00%
Total21100.00%1100.00%

/* * Free and unmap a vmap area */
static void free_unmap_vmap_area(struct vmap_area *va) { flush_cache_vunmap(va->va_start, va->va_end); free_unmap_vmap_area_noflush(va); }

Contributors

PersonTokensPropCommitsCommitProp
nick pigginnick piggin27100.00%1100.00%
Total27100.00%1100.00%


static struct vmap_area *find_vmap_area(unsigned long addr) { struct vmap_area *va; spin_lock(&vmap_area_lock); va = __find_vmap_area(addr); spin_unlock(&vmap_area_lock); return va; }

Contributors

PersonTokensPropCommitsCommitProp
nick pigginnick piggin39100.00%1100.00%
Total39100.00%1100.00%


static void free_unmap_vmap_area_addr(