Contributors: 67
	  
        
          | Author | 
          Tokens | 
          Token Proportion | 
          Commits | 
          Commit Proportion | 
        
	  
	  
        
        
          | Nicholas Piggin | 
          189 | 
          12.58% | 
          7 | 
          5.88% | 
        
        
          | Linus Torvalds (pre-git) | 
          142 | 
          9.45% | 
          13 | 
          10.92% | 
        
        
          | Kent Overstreet | 
          123 | 
          8.19% | 
          1 | 
          0.84% | 
        
        
          | Christoph Hellwig | 
          99 | 
          6.59% | 
          6 | 
          5.04% | 
        
        
          | Tejun Heo | 
          80 | 
          5.33% | 
          4 | 
          3.36% | 
        
        
          | Kees Cook | 
          80 | 
          5.33% | 
          2 | 
          1.68% | 
        
        
          | Paolo Bonzini | 
          55 | 
          3.66% | 
          1 | 
          0.84% | 
        
        
          | Paul E. McKenney | 
          53 | 
          3.53% | 
          2 | 
          1.68% | 
        
        
          | Andrew Morton | 
          51 | 
          3.40% | 
          4 | 
          3.36% | 
        
        
          | Rick Edgecombe | 
          49 | 
          3.26% | 
          2 | 
          1.68% | 
        
        
          | Jeremy Fitzhardinge | 
          33 | 
          2.20% | 
          2 | 
          1.68% | 
        
        
          | Andrey Ryabinin | 
          33 | 
          2.20% | 
          4 | 
          3.36% | 
        
        
          | Andrey Konovalov | 
          31 | 
          2.06% | 
          4 | 
          3.36% | 
        
        
          | Christoph Lameter | 
          30 | 
          2.00% | 
          3 | 
          2.52% | 
        
        
          | Christophe Leroy | 
          29 | 
          1.93% | 
          2 | 
          1.68% | 
        
        
          | Joerg Roedel | 
          23 | 
          1.53% | 
          1 | 
          0.84% | 
        
        
          | Graf Yang | 
          23 | 
          1.53% | 
          1 | 
          0.84% | 
        
        
          | Chris Wilson | 
          20 | 
          1.33% | 
          1 | 
          0.84% | 
        
        
          | Roman Gushchin | 
          20 | 
          1.33% | 
          1 | 
          0.84% | 
        
        
          | Danilo Krummrich | 
          19 | 
          1.26% | 
          1 | 
          0.84% | 
        
        
          | Daisuke Hatayama | 
          18 | 
          1.20% | 
          1 | 
          0.84% | 
        
        
          | David Rientjes | 
          18 | 
          1.20% | 
          1 | 
          0.84% | 
        
        
          | Andi Kleen | 
          17 | 
          1.13% | 
          2 | 
          1.68% | 
        
        
          | Håvard Skinnemoen | 
          16 | 
          1.07% | 
          1 | 
          0.84% | 
        
        
          | Alexei Starovoitov | 
          15 | 
          1.00% | 
          1 | 
          0.84% | 
        
        
          | Benjamin Herrenschmidt | 
          14 | 
          0.93% | 
          2 | 
          1.68% | 
        
        
          | Marek Szyprowski | 
          13 | 
          0.87% | 
          2 | 
          1.68% | 
        
        
          | Claudio Imbrenda | 
          13 | 
          0.87% | 
          1 | 
          0.84% | 
        
        
          | Nico Pitre | 
          12 | 
          0.80% | 
          2 | 
          1.68% | 
        
        
          | Mike Rapoport | 
          12 | 
          0.80% | 
          1 | 
          0.84% | 
        
        
          | Oleg Nesterov | 
          11 | 
          0.73% | 
          1 | 
          0.84% | 
        
        
          | Motohiro Kosaki | 
          11 | 
          0.73% | 
          1 | 
          0.84% | 
        
        
          | Kefeng Wang | 
          10 | 
          0.67% | 
          1 | 
          0.84% | 
        
        
          | Lorenzo Stoakes | 
          10 | 
          0.67% | 
          1 | 
          0.84% | 
        
        
          | Paul Mundt | 
          10 | 
          0.67% | 
          1 | 
          0.84% | 
        
        
          | Matthew Wilcox | 
          10 | 
          0.67% | 
          1 | 
          0.84% | 
        
        
          | Michal Hocko | 
          9 | 
          0.60% | 
          2 | 
          1.68% | 
        
        
          | David Howells | 
          8 | 
          0.53% | 
          1 | 
          0.84% | 
        
        
          | Uladzislau Rezki | 
          7 | 
          0.47% | 
          1 | 
          0.84% | 
        
        
          | Pengfei Li | 
          6 | 
          0.40% | 
          1 | 
          0.84% | 
        
        
          | Yang Ruirui | 
          6 | 
          0.40% | 
          1 | 
          0.84% | 
        
        
          | Al Viro | 
          5 | 
          0.33% | 
          2 | 
          1.68% | 
        
        
          | Song Liu | 
          5 | 
          0.33% | 
          1 | 
          0.84% | 
        
        
          | James Bottomley | 
          5 | 
          0.33% | 
          2 | 
          1.68% | 
        
        
          | Suren Baghdasaryan | 
          5 | 
          0.33% | 
          1 | 
          0.84% | 
        
        
          | Deepak Saxena | 
          5 | 
          0.33% | 
          1 | 
          0.84% | 
        
        
          | Adrian Bunk | 
          5 | 
          0.33% | 
          2 | 
          1.68% | 
        
        
          | Ingo Molnar | 
          4 | 
          0.27% | 
          2 | 
          1.68% | 
        
        
          | Thomas Gleixner | 
          4 | 
          0.27% | 
          2 | 
          1.68% | 
        
        
          | Rusty Russell | 
          4 | 
          0.27% | 
          1 | 
          0.84% | 
        
        
          | Jann Horn | 
          4 | 
          0.27% | 
          1 | 
          0.84% | 
        
        
          | Atsushi Kumagai | 
          3 | 
          0.20% | 
          1 | 
          0.84% | 
        
        
          | Mitsuo Hayasaka | 
          3 | 
          0.20% | 
          1 | 
          0.84% | 
        
        
          | Alexander Potapenko | 
          3 | 
          0.20% | 
          1 | 
          0.84% | 
        
        
          | MinChan Kim | 
          3 | 
          0.20% | 
          1 | 
          0.84% | 
        
        
          | Richard Henderson | 
          2 | 
          0.13% | 
          1 | 
          0.84% | 
        
        
          | Zhang Yanfei | 
          2 | 
          0.13% | 
          1 | 
          0.84% | 
        
        
          | Hugh Dickins | 
          2 | 
          0.13% | 
          1 | 
          0.84% | 
        
        
          | Tom Rini | 
          2 | 
          0.13% | 
          1 | 
          0.84% | 
        
        
          | Kenji Kaneshige | 
          1 | 
          0.07% | 
          1 | 
          0.84% | 
        
        
          | Greg Kroah-Hartman | 
          1 | 
          0.07% | 
          1 | 
          0.84% | 
        
        
          | Bang Li | 
          1 | 
          0.07% | 
          1 | 
          0.84% | 
        
        
          | Baoquan He | 
          1 | 
          0.07% | 
          1 | 
          0.84% | 
        
        
          | Zhen Lei | 
          1 | 
          0.07% | 
          1 | 
          0.84% | 
        
        
          | Will Deacon | 
          1 | 
          0.07% | 
          1 | 
          0.84% | 
        
        
          | Peter Zijlstra | 
          1 | 
          0.07% | 
          1 | 
          0.84% | 
        
        
          | David Hildenbrand | 
          1 | 
          0.07% | 
          1 | 
          0.84% | 
        
	  
	  
        
          | Total | 
          1502 | 
           | 
          119 | 
           | 
	    
	  
    
 
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_VMALLOC_H
#define _LINUX_VMALLOC_H
#include <linux/alloc_tag.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/llist.h>
#include <asm/page.h>		/* pgprot_t */
#include <linux/rbtree.h>
#include <linux/overflow.h>
#include <asm/vmalloc.h>
struct vm_area_struct;		/* vma defining user mapping in mm_types.h */
struct notifier_block;		/* in notifier.h */
struct iov_iter;		/* in uio.h */
/* bits in flags of vmalloc's vm_struct below */
#define VM_IOREMAP		0x00000001	/* ioremap() and friends */
#define VM_ALLOC		0x00000002	/* vmalloc() */
#define VM_MAP			0x00000004	/* vmap()ed pages */
#define VM_USERMAP		0x00000008	/* suitable for remap_vmalloc_range */
#define VM_DMA_COHERENT		0x00000010	/* dma_alloc_coherent */
#define VM_UNINITIALIZED	0x00000020	/* vm_struct is not fully initialized */
#define VM_NO_GUARD		0x00000040      /* ***DANGEROUS*** don't add guard page */
#define VM_KASAN		0x00000080      /* has allocated kasan shadow memory */
#define VM_FLUSH_RESET_PERMS	0x00000100	/* reset direct map and flush TLB on unmap, can't be freed in atomic context */
#define VM_MAP_PUT_PAGES	0x00000200	/* put pages and free array in vfree */
#define VM_ALLOW_HUGE_VMAP	0x00000400      /* Allow for huge pages on archs with HAVE_ARCH_HUGE_VMALLOC */
#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
	!defined(CONFIG_KASAN_VMALLOC)
#define VM_DEFER_KMEMLEAK	0x00000800	/* defer kmemleak object creation */
#else
#define VM_DEFER_KMEMLEAK	0
#endif
#define VM_SPARSE		0x00001000	/* sparse vm_area. not all pages are present. */
/* bits [20..32] reserved for arch specific ioremap internals */
/*
 * Maximum alignment for ioremap() regions.
 * Can be overridden by arch-specific value.
 */
#ifndef IOREMAP_MAX_ORDER
#define IOREMAP_MAX_ORDER	(7 + PAGE_SHIFT)	/* 128 pages */
#endif
struct vm_struct {
	struct vm_struct	*next;
	void			*addr;
	unsigned long		size;
	unsigned long		flags;
	struct page		**pages;
#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
	unsigned int		page_order;
#endif
	unsigned int		nr_pages;
	phys_addr_t		phys_addr;
	const void		*caller;
};
struct vmap_area {
	unsigned long va_start;
	unsigned long va_end;
	struct rb_node rb_node;         /* address sorted rbtree */
	struct list_head list;          /* address sorted list */
	/*
	 * The following two variables can be packed, because
	 * a vmap_area object can be either:
	 *    1) in "free" tree (root is free_vmap_area_root)
	 *    2) or "busy" tree (root is vmap_area_root)
	 */
	union {
		unsigned long subtree_max_size; /* in "free" tree */
		struct vm_struct *vm;           /* in "busy" tree */
	};
	unsigned long flags; /* mark type of vm_map_ram area */
};
/* archs that select HAVE_ARCH_HUGE_VMAP should override one or more of these */
#ifndef arch_vmap_p4d_supported
static inline bool arch_vmap_p4d_supported(pgprot_t prot)
{
	return false;
}
#endif
#ifndef arch_vmap_pud_supported
static inline bool arch_vmap_pud_supported(pgprot_t prot)
{
	return false;
}
#endif
#ifndef arch_vmap_pmd_supported
static inline bool arch_vmap_pmd_supported(pgprot_t prot)
{
	return false;
}
#endif
#ifndef arch_vmap_pte_range_map_size
static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
							 u64 pfn, unsigned int max_page_shift)
{
	return PAGE_SIZE;
}
#endif
#ifndef arch_vmap_pte_supported_shift
static inline int arch_vmap_pte_supported_shift(unsigned long size)
{
	return PAGE_SHIFT;
}
#endif
#ifndef arch_vmap_pgprot_tagged
static inline pgprot_t arch_vmap_pgprot_tagged(pgprot_t prot)
{
	return prot;
}
#endif
/*
 *	Highlevel APIs for driver use
 */
extern void vm_unmap_ram(const void *mem, unsigned int count);
extern void *vm_map_ram(struct page **pages, unsigned int count, int node);
extern void vm_unmap_aliases(void);
extern void *vmalloc_noprof(unsigned long size) __alloc_size(1);
#define vmalloc(...)		alloc_hooks(vmalloc_noprof(__VA_ARGS__))
extern void *vzalloc_noprof(unsigned long size) __alloc_size(1);
#define vzalloc(...)		alloc_hooks(vzalloc_noprof(__VA_ARGS__))
extern void *vmalloc_user_noprof(unsigned long size) __alloc_size(1);
#define vmalloc_user(...)	alloc_hooks(vmalloc_user_noprof(__VA_ARGS__))
extern void *vmalloc_node_noprof(unsigned long size, int node) __alloc_size(1);
#define vmalloc_node(...)	alloc_hooks(vmalloc_node_noprof(__VA_ARGS__))
extern void *vzalloc_node_noprof(unsigned long size, int node) __alloc_size(1);
#define vzalloc_node(...)	alloc_hooks(vzalloc_node_noprof(__VA_ARGS__))
extern void *vmalloc_32_noprof(unsigned long size) __alloc_size(1);
#define vmalloc_32(...)		alloc_hooks(vmalloc_32_noprof(__VA_ARGS__))
extern void *vmalloc_32_user_noprof(unsigned long size) __alloc_size(1);
#define vmalloc_32_user(...)	alloc_hooks(vmalloc_32_user_noprof(__VA_ARGS__))
extern void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
#define __vmalloc(...)		alloc_hooks(__vmalloc_noprof(__VA_ARGS__))
extern void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
			unsigned long start, unsigned long end, gfp_t gfp_mask,
			pgprot_t prot, unsigned long vm_flags, int node,
			const void *caller) __alloc_size(1);
#define __vmalloc_node_range(...)	alloc_hooks(__vmalloc_node_range_noprof(__VA_ARGS__))
void *__vmalloc_node_noprof(unsigned long size, unsigned long align, gfp_t gfp_mask,
		int node, const void *caller) __alloc_size(1);
#define __vmalloc_node(...)	alloc_hooks(__vmalloc_node_noprof(__VA_ARGS__))
void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
#define vmalloc_huge(...)	alloc_hooks(vmalloc_huge_noprof(__VA_ARGS__))
extern void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
#define __vmalloc_array(...)	alloc_hooks(__vmalloc_array_noprof(__VA_ARGS__))
extern void *vmalloc_array_noprof(size_t n, size_t size) __alloc_size(1, 2);
#define vmalloc_array(...)	alloc_hooks(vmalloc_array_noprof(__VA_ARGS__))
extern void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
#define __vcalloc(...)		alloc_hooks(__vcalloc_noprof(__VA_ARGS__))
extern void *vcalloc_noprof(size_t n, size_t size) __alloc_size(1, 2);
#define vcalloc(...)		alloc_hooks(vcalloc_noprof(__VA_ARGS__))
void * __must_check vrealloc_noprof(const void *p, size_t size, gfp_t flags)
		__realloc_size(2);
#define vrealloc(...)		alloc_hooks(vrealloc_noprof(__VA_ARGS__))
extern void vfree(const void *addr);
extern void vfree_atomic(const void *addr);
extern void *vmap(struct page **pages, unsigned int count,
			unsigned long flags, pgprot_t prot);
void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot);
extern void vunmap(const void *addr);
extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
				       unsigned long uaddr, void *kaddr,
				       unsigned long pgoff, unsigned long size);
extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
							unsigned long pgoff);
int vmap_pages_range(unsigned long addr, unsigned long end, pgprot_t prot,
		     struct page **pages, unsigned int page_shift);
/*
 * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values
 * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings()
 * needs to be called.
 */
#ifndef ARCH_PAGE_TABLE_SYNC_MASK
#define ARCH_PAGE_TABLE_SYNC_MASK 0
#endif
/*
 * There is no default implementation for arch_sync_kernel_mappings(). It is
 * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK
 * is 0.
 */
void arch_sync_kernel_mappings(unsigned long start, unsigned long end);
/*
 *	Lowlevel-APIs (not for driver use!)
 */
static inline size_t get_vm_area_size(const struct vm_struct *area)
{
	if (!(area->flags & VM_NO_GUARD))
		/* return actual size without guard page */
		return area->size - PAGE_SIZE;
	else
		return area->size;
}
extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
extern struct vm_struct *get_vm_area_caller(unsigned long size,
					unsigned long flags, const void *caller);
extern struct vm_struct *__get_vm_area_caller(unsigned long size,
					unsigned long flags,
					unsigned long start, unsigned long end,
					const void *caller);
void free_vm_area(struct vm_struct *area);
extern struct vm_struct *remove_vm_area(const void *addr);
extern struct vm_struct *find_vm_area(const void *addr);
struct vmap_area *find_vmap_area(unsigned long addr);
static inline bool is_vm_area_hugepages(const void *addr)
{
	/*
	 * This may not 100% tell if the area is mapped with > PAGE_SIZE
	 * page table entries, if for some reason the architecture indicates
	 * larger sizes are available but decides not to use them, nothing
	 * prevents that. This only indicates the size of the physical page
	 * allocated in the vmalloc layer.
	 */
#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
	return find_vm_area(addr)->page_order > 0;
#else
	return false;
#endif
}
/* for /proc/kcore */
long vread_iter(struct iov_iter *iter, const char *addr, size_t count);
/*
 *	Internals.  Don't use..
 */
__init void vm_area_add_early(struct vm_struct *vm);
__init void vm_area_register_early(struct vm_struct *vm, size_t align);
int register_vmap_purge_notifier(struct notifier_block *nb);
int unregister_vmap_purge_notifier(struct notifier_block *nb);
#ifdef CONFIG_MMU
#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
unsigned long vmalloc_nr_pages(void);
int vm_area_map_pages(struct vm_struct *area, unsigned long start,
		      unsigned long end, struct page **pages);
void vm_area_unmap_pages(struct vm_struct *area, unsigned long start,
			 unsigned long end);
void vunmap_range(unsigned long addr, unsigned long end);
static inline void set_vm_flush_reset_perms(void *addr)
{
	struct vm_struct *vm = find_vm_area(addr);
	if (vm)
		vm->flags |= VM_FLUSH_RESET_PERMS;
}
#else  /* !CONFIG_MMU */
#define VMALLOC_TOTAL 0UL
static inline unsigned long vmalloc_nr_pages(void) { return 0; }
static inline void set_vm_flush_reset_perms(void *addr) {}
#endif /* CONFIG_MMU */
#if defined(CONFIG_MMU) && defined(CONFIG_SMP)
struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
				     const size_t *sizes, int nr_vms,
				     size_t align);
void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
# else
static inline struct vm_struct **
pcpu_get_vm_areas(const unsigned long *offsets,
		const size_t *sizes, int nr_vms,
		size_t align)
{
	return NULL;
}
static inline void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) {}
#endif
#if defined(CONFIG_MMU) && defined(CONFIG_PRINTK)
bool vmalloc_dump_obj(void *object);
#else
static inline bool vmalloc_dump_obj(void *object) { return false; }
#endif
#endif /* _LINUX_VMALLOC_H */