Release 4.12 include/linux/vmalloc.h
#ifndef _LINUX_VMALLOC_H
#define _LINUX_VMALLOC_H
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/llist.h>
#include <asm/page.h> /* pgprot_t */
#include <linux/rbtree.h>
struct vm_area_struct; /* vma defining user mapping in mm_types.h */
struct notifier_block; /* in notifier.h */
/* bits in flags of vmalloc's vm_struct below */
#define VM_IOREMAP 0x00000001
/* ioremap() and friends */
#define VM_ALLOC 0x00000002
/* vmalloc() */
#define VM_MAP 0x00000004
/* vmap()ed pages */
#define VM_USERMAP 0x00000008
/* suitable for remap_vmalloc_range */
#define VM_UNINITIALIZED 0x00000020
/* vm_struct is not fully initialized */
#define VM_NO_GUARD 0x00000040
/* don't add guard page */
#define VM_KASAN 0x00000080
/* has allocated kasan shadow memory */
/* bits [20..32] reserved for arch specific ioremap internals */
/*
* Maximum alignment for ioremap() regions.
* Can be overriden by arch-specific value.
*/
#ifndef IOREMAP_MAX_ORDER
#define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT)
/* 128 pages */
#endif
struct vm_struct {
struct vm_struct *next;
void *addr;
unsigned long size;
unsigned long flags;
struct page **pages;
unsigned int nr_pages;
phys_addr_t phys_addr;
const void *caller;
};
struct vmap_area {
unsigned long va_start;
unsigned long va_end;
unsigned long flags;
struct rb_node rb_node; /* address sorted rbtree */
struct list_head list; /* address sorted list */
struct llist_node purge_list; /* "lazy purge" list */
struct vm_struct *vm;
struct rcu_head rcu_head;
};
/*
* Highlevel APIs for driver use
*/
extern void vm_unmap_ram(const void *mem, unsigned int count);
extern void *vm_map_ram(struct page **pages, unsigned int count,
int node, pgprot_t prot);
extern void vm_unmap_aliases(void);
#ifdef CONFIG_MMU
extern void __init vmalloc_init(void);
#else
static inline void vmalloc_init(void)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nicholas Piggin | 8 | 100.00% | 1 | 100.00% |
Total | 8 | 100.00% | 1 | 100.00% |
#endif
extern void *vmalloc(unsigned long size);
extern void *vzalloc(unsigned long size);
extern void *vmalloc_user(unsigned long size);
extern void *vmalloc_node(unsigned long size, int node);
extern void *vzalloc_node(unsigned long size, int node);
extern void *vmalloc_exec(unsigned long size);
extern void *vmalloc_32(unsigned long size);
extern void *vmalloc_32_user(unsigned long size);
extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
unsigned long start, unsigned long end, gfp_t gfp_mask,
pgprot_t prot, unsigned long vm_flags, int node,
const void *caller);
#ifndef CONFIG_MMU
extern void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags);
static inline void *__vmalloc_node_flags_caller(unsigned long size, int node,
gfp_t flags, void *caller)
{
return __vmalloc_node_flags(size, node, flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Michal Hocko | 32 | 100.00% | 2 | 100.00% |
Total | 32 | 100.00% | 2 | 100.00% |
#else
extern void *__vmalloc_node_flags_caller(unsigned long size,
int node, gfp_t flags, void *caller);
#endif
extern void vfree(const void *addr);
extern void vfree_atomic(const void *addr);
extern void *vmap(struct page **pages, unsigned int count,
unsigned long flags, pgprot_t prot);
extern void vunmap(const void *addr);
extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
unsigned long uaddr, void *kaddr,
unsigned long size);
extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
unsigned long pgoff);
void vmalloc_sync_all(void);
/*
* Lowlevel-APIs (not for driver use!)
*/
static inline size_t get_vm_area_size(const struct vm_struct *area)
{
if (!(area->flags & VM_NO_GUARD))
/* return actual size without guard page */
return area->size - PAGE_SIZE;
else
return area->size;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeremy Fitzhardinge | 21 | 55.26% | 1 | 50.00% |
Andrey Ryabinin | 17 | 44.74% | 1 | 50.00% |
Total | 38 | 100.00% | 2 | 100.00% |
extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
extern struct vm_struct *get_vm_area_caller(unsigned long size,
unsigned long flags, const void *caller);
extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
unsigned long start, unsigned long end);
extern struct vm_struct *__get_vm_area_caller(unsigned long size,
unsigned long flags,
unsigned long start, unsigned long end,
const void *caller);
extern struct vm_struct *remove_vm_area(const void *addr);
extern struct vm_struct *find_vm_area(const void *addr);
extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
struct page **pages);
#ifdef CONFIG_MMU
extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
pgprot_t prot, struct page **pages);
extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
extern void unmap_kernel_range(unsigned long addr, unsigned long size);
#else
static inline int
map_kernel_range_noflush(unsigned long start, unsigned long size,
pgprot_t prot, struct page **pages)
{
return size >> PAGE_SHIFT;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Graf Yang | 29 | 100.00% | 1 | 100.00% |
Total | 29 | 100.00% | 1 | 100.00% |
static inline void
unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Graf Yang | 14 | 100.00% | 1 | 100.00% |
Total | 14 | 100.00% | 1 | 100.00% |
static inline void
unmap_kernel_range(unsigned long addr, unsigned long size)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Graf Yang | 14 | 100.00% | 1 | 100.00% |
Total | 14 | 100.00% | 1 | 100.00% |
#endif
/* Allocate/destroy a 'vmalloc' VM area. */
extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
extern void free_vm_area(struct vm_struct *area);
/* for /dev/kmem */
extern long vread(char *buf, char *addr, unsigned long count);
extern long vwrite(char *buf, char *addr, unsigned long count);
/*
* Internals. Dont't use..
*/
extern struct list_head vmap_area_list;
extern __init void vm_area_add_early(struct vm_struct *vm);
extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
#ifdef CONFIG_SMP
# ifdef CONFIG_MMU
struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
const size_t *sizes, int nr_vms,
size_t align);
void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
# else
static inline struct vm_struct **
pcpu_get_vm_areas(const unsigned long *offsets,
const size_t *sizes, int nr_vms,
size_t align)
{
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Graf Yang | 30 | 100.00% | 1 | 100.00% |
Total | 30 | 100.00% | 1 | 100.00% |
static inline void
pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Graf Yang | 15 | 100.00% | 1 | 100.00% |
Total | 15 | 100.00% | 1 | 100.00% |
# endif
#endif
#ifdef CONFIG_MMU
#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
#else
#define VMALLOC_TOTAL 0UL
#endif
int register_vmap_purge_notifier(struct notifier_block *nb);
int unregister_vmap_purge_notifier(struct notifier_block *nb);
#endif /* _LINUX_VMALLOC_H */
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Graf Yang | 116 | 11.08% | 1 | 1.64% |
Nicholas Piggin | 115 | 10.98% | 2 | 3.28% |
Tejun Heo | 91 | 8.69% | 5 | 8.20% |
Linus Torvalds (pre-git) | 91 | 8.69% | 8 | 13.11% |
Michal Hocko | 75 | 7.16% | 3 | 4.92% |
Christoph Hellwig | 66 | 6.30% | 2 | 3.28% |
Atsushi Kumagai | 46 | 4.39% | 1 | 1.64% |
Jeremy Fitzhardinge | 42 | 4.01% | 2 | 3.28% |
Andrey Ryabinin | 41 | 3.92% | 4 | 6.56% |
Christoph Lameter | 39 | 3.72% | 3 | 4.92% |
Benjamin Herrenschmidt | 35 | 3.34% | 2 | 3.28% |
Motohiro Kosaki | 35 | 3.34% | 1 | 1.64% |
Nico Pitre | 34 | 3.25% | 2 | 3.28% |
Chris Wilson | 26 | 2.48% | 2 | 3.28% |
David Rientjes | 23 | 2.20% | 1 | 1.64% |
Dave Young | 23 | 2.20% | 1 | 1.64% |
Daisuke Hatayama | 22 | 2.10% | 1 | 1.64% |
Andrew Morton | 21 | 2.01% | 2 | 3.28% |
JoonSoo Kim | 17 | 1.62% | 2 | 3.28% |
Marek Szyprowski | 16 | 1.53% | 2 | 3.28% |
Andi Kleen | 13 | 1.24% | 2 | 3.28% |
Deepak Saxena | 11 | 1.05% | 1 | 1.64% |
Oleg Nesterov | 11 | 1.05% | 1 | 1.64% |
Ingo Molnar | 10 | 0.96% | 1 | 1.64% |
David Vrabel | 5 | 0.48% | 1 | 1.64% |
Eric Dumazet | 5 | 0.48% | 1 | 1.64% |
Tom Rini | 4 | 0.38% | 1 | 1.64% |
Rusty Russell | 4 | 0.38% | 1 | 1.64% |
Mitsuo Hayasaka | 3 | 0.29% | 1 | 1.64% |
Hugh Dickins | 2 | 0.19% | 1 | 1.64% |
Al Viro | 2 | 0.19% | 1 | 1.64% |
Zhang Yanfei | 2 | 0.19% | 1 | 1.64% |
Kenji Kaneshige | 1 | 0.10% | 1 | 1.64% |
Total | 1047 | 100.00% | 61 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.