cregit-Linux how code gets into the kernel

Release 4.7 include/linux/vmalloc.h

Directory: include/linux
#ifndef _LINUX_VMALLOC_H

#define _LINUX_VMALLOC_H

#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/llist.h>
#include <asm/page.h>		/* pgprot_t */
#include <linux/rbtree.h>

struct vm_area_struct;		/* vma defining user mapping in mm_types.h */
struct notifier_block;		/* in notifier.h */

/* bits in flags of vmalloc's vm_struct below */

#define VM_IOREMAP		0x00000001	
/* ioremap() and friends */

#define VM_ALLOC		0x00000002	
/* vmalloc() */

#define VM_MAP			0x00000004	
/* vmap()ed pages */

#define VM_USERMAP		0x00000008	
/* suitable for remap_vmalloc_range */

#define VM_UNINITIALIZED	0x00000020	
/* vm_struct is not fully initialized */

#define VM_NO_GUARD		0x00000040      
/* don't add guard page */

#define VM_KASAN		0x00000080      
/* has allocated kasan shadow memory */
/* bits [20..32] reserved for arch specific ioremap internals */

/*
 * Maximum alignment for ioremap() regions.
 * Can be overriden by arch-specific value.
 */
#ifndef IOREMAP_MAX_ORDER

#define IOREMAP_MAX_ORDER	(7 + PAGE_SHIFT)	
/* 128 pages */
#endif


struct vm_struct {
	
struct vm_struct	*next;
	
void			*addr;
	
unsigned long		size;
	
unsigned long		flags;
	
struct page		**pages;
	
unsigned int		nr_pages;
	
phys_addr_t		phys_addr;
	
const void		*caller;
};


struct vmap_area {
	
unsigned long va_start;
	
unsigned long va_end;
	
unsigned long flags;
	
struct rb_node rb_node;         /* address sorted rbtree */
	
struct list_head list;          /* address sorted list */
	
struct llist_node purge_list;    /* "lazy purge" list */
	
struct vm_struct *vm;
	
struct rcu_head rcu_head;
};

/*
 *      Highlevel APIs for driver use
 */
extern void vm_unmap_ram(const void *mem, unsigned int count);
extern void *vm_map_ram(struct page **pages, unsigned int count,
				int node, pgprot_t prot);
extern void vm_unmap_aliases(void);

#ifdef CONFIG_MMU
extern void __init vmalloc_init(void);
#else

static inline void vmalloc_init(void) { }

Contributors

PersonTokensPropCommitsCommitProp
nick pigginnick piggin8100.00%1100.00%
Total8100.00%1100.00%

#endif extern void *vmalloc(unsigned long size); extern void *vzalloc(unsigned long size); extern void *vmalloc_user(unsigned long size); extern void *vmalloc_node(unsigned long size, int node); extern void *vzalloc_node(unsigned long size, int node); extern void *vmalloc_exec(unsigned long size); extern void *vmalloc_32(unsigned long size); extern void *vmalloc_32_user(unsigned long size); extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); extern void *__vmalloc_node_range(unsigned long size, unsigned long align, unsigned long start, unsigned long end, gfp_t gfp_mask, pgprot_t prot, unsigned long vm_flags, int node, const void *caller); extern void vfree(const void *addr); extern void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot); extern void vunmap(const void *addr); extern int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, void *kaddr, unsigned long size); extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, unsigned long pgoff); void vmalloc_sync_all(void); /* * Lowlevel-APIs (not for driver use!) */
static inline size_t get_vm_area_size(const struct vm_struct *area) { if (!(area->flags & VM_NO_GUARD)) /* return actual size without guard page */ return area->size - PAGE_SIZE; else return area->size; }

Contributors

PersonTokensPropCommitsCommitProp
jeremy fitzhardingejeremy fitzhardinge2155.26%150.00%
andrey ryabininandrey ryabinin1744.74%150.00%
Total38100.00%2100.00%

extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags); extern struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, const void *caller); extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, unsigned long start, unsigned long end); extern struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, unsigned long start, unsigned long end, const void *caller); extern struct vm_struct *remove_vm_area(const void *addr); extern struct vm_struct *find_vm_area(const void *addr); extern int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages); #ifdef CONFIG_MMU extern int map_kernel_range_noflush(unsigned long start, unsigned long size, pgprot_t prot, struct page **pages); extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size); extern void unmap_kernel_range(unsigned long addr, unsigned long size); #else
static inline int map_kernel_range_noflush(unsigned long start, unsigned long size, pgprot_t prot, struct page **pages) { return size >> PAGE_SHIFT; }

Contributors

PersonTokensPropCommitsCommitProp
graf yanggraf yang29100.00%1100.00%
Total29100.00%1100.00%


static inline void unmap_kernel_range_noflush(unsigned long addr, unsigned long size) { }

Contributors

PersonTokensPropCommitsCommitProp
graf yanggraf yang14100.00%1100.00%
Total14100.00%1100.00%


static inline void unmap_kernel_range(unsigned long addr, unsigned long size) { }

Contributors

PersonTokensPropCommitsCommitProp
graf yanggraf yang14100.00%1100.00%
Total14100.00%1100.00%

#endif /* Allocate/destroy a 'vmalloc' VM area. */ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes); extern void free_vm_area(struct vm_struct *area); /* for /dev/kmem */ extern long vread(char *buf, char *addr, unsigned long count); extern long vwrite(char *buf, char *addr, unsigned long count); /* * Internals. Dont't use.. */ extern struct list_head vmap_area_list; extern __init void vm_area_add_early(struct vm_struct *vm); extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); #ifdef CONFIG_SMP # ifdef CONFIG_MMU struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, const size_t *sizes, int nr_vms, size_t align); void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms); # else
static inline struct vm_struct ** pcpu_get_vm_areas(const unsigned long *offsets, const size_t *sizes, int nr_vms, size_t align) { return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
graf yanggraf yang30100.00%1100.00%
Total30100.00%1100.00%


static inline void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) { }

Contributors

PersonTokensPropCommitsCommitProp
graf yanggraf yang15100.00%1100.00%
Total15100.00%1100.00%

# endif #endif #ifdef CONFIG_MMU #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START) #else #define VMALLOC_TOTAL 0UL #endif int register_vmap_purge_notifier(struct notifier_block *nb); int unregister_vmap_purge_notifier(struct notifier_block *nb); #endif /* _LINUX_VMALLOC_H */

Overall Contributors

PersonTokensPropCommitsCommitProp
graf yanggraf yang11612.06%11.75%
nick pigginnick piggin11511.95%23.51%
tejun heotejun heo919.46%58.77%
pre-gitpre-git919.46%814.04%
christoph hellwigchristoph hellwig666.86%23.51%
atsushi kumagaiatsushi kumagai464.78%11.75%
jeremy fitzhardingejeremy fitzhardinge424.37%23.51%
christoph lameterchristoph lameter394.05%35.26%
kosaki motohirokosaki motohiro353.64%11.75%
benjamin herrenschmidtbenjamin herrenschmidt353.64%23.51%
nicolas pitrenicolas pitre343.53%23.51%
andrey ryabininandrey ryabinin313.22%35.26%
chris wilsonchris wilson262.70%23.51%
david rientjesdavid rientjes232.39%11.75%
dave youngdave young232.39%11.75%
daisuke hatayamadaisuke hatayama222.29%11.75%
andrew mortonandrew morton212.18%23.51%
joonsoo kimjoonsoo kim171.77%23.51%
marek szyprowskimarek szyprowski161.66%23.51%
andi kleenandi kleen131.35%23.51%
deepak saxenadeepak saxena111.14%11.75%
oleg nesterovoleg nesterov111.14%11.75%
ingo molnaringo molnar101.04%11.75%
david vrabeldavid vrabel50.52%11.75%
eric dumazeteric dumazet50.52%11.75%
tom rinitom rini40.42%11.75%
rusty russellrusty russell40.42%11.75%
mitsuo hayasakamitsuo hayasaka30.31%11.75%
zhang yanfeizhang yanfei20.21%11.75%
hugh dickinshugh dickins20.21%11.75%
al viroal viro20.21%11.75%
kenji kaneshigekenji kaneshige10.10%11.75%
Total962100.00%57100.00%
Directory: include/linux
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
{% endraw %}