Contributors: 65
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Nicholas Piggin |
201 |
13.75% |
7 |
6.03% |
Linus Torvalds (pre-git) |
140 |
9.58% |
11 |
9.48% |
Kent Overstreet |
123 |
8.41% |
1 |
0.86% |
Christoph Hellwig |
92 |
6.29% |
6 |
5.17% |
Tejun Heo |
84 |
5.75% |
4 |
3.45% |
Kees Cook |
78 |
5.34% |
2 |
1.72% |
Paul E. McKenney |
53 |
3.63% |
2 |
1.72% |
Andrew Morton |
51 |
3.49% |
4 |
3.45% |
Rick Edgecombe |
49 |
3.35% |
2 |
1.72% |
Paolo Bonzini |
48 |
3.28% |
1 |
0.86% |
Andrey Ryabinin |
33 |
2.26% |
4 |
3.45% |
Jeremy Fitzhardinge |
33 |
2.26% |
2 |
1.72% |
Andrey Konovalov |
31 |
2.12% |
4 |
3.45% |
Christoph Lameter |
30 |
2.05% |
3 |
2.59% |
Christophe Leroy |
29 |
1.98% |
2 |
1.72% |
Joerg Roedel |
23 |
1.57% |
1 |
0.86% |
Graf Yang |
22 |
1.50% |
1 |
0.86% |
Andi Kleen |
21 |
1.44% |
3 |
2.59% |
Roman Gushchin |
21 |
1.44% |
1 |
0.86% |
Chris Wilson |
20 |
1.37% |
1 |
0.86% |
Daisuke Hatayama |
18 |
1.23% |
1 |
0.86% |
David Rientjes |
18 |
1.23% |
1 |
0.86% |
Alexei Starovoitov |
15 |
1.03% |
1 |
0.86% |
Benjamin Herrenschmidt |
14 |
0.96% |
2 |
1.72% |
Claudio Imbrenda |
13 |
0.89% |
1 |
0.86% |
Marek Szyprowski |
13 |
0.89% |
2 |
1.72% |
Nico Pitre |
13 |
0.89% |
2 |
1.72% |
Motohiro Kosaki |
12 |
0.82% |
1 |
0.86% |
Oleg Nesterov |
11 |
0.75% |
1 |
0.86% |
David Howells |
10 |
0.68% |
1 |
0.86% |
Matthew Wilcox |
10 |
0.68% |
1 |
0.86% |
Kefeng Wang |
10 |
0.68% |
1 |
0.86% |
Lorenzo Stoakes |
10 |
0.68% |
1 |
0.86% |
Paul Mundt |
10 |
0.68% |
1 |
0.86% |
Michal Hocko |
9 |
0.62% |
2 |
1.72% |
Håvard Skinnemoen |
8 |
0.55% |
1 |
0.86% |
Pengfei Li |
6 |
0.41% |
1 |
0.86% |
Yang Ruirui |
6 |
0.41% |
1 |
0.86% |
Song Liu |
5 |
0.34% |
1 |
0.86% |
Adrian Bunk |
5 |
0.34% |
2 |
1.72% |
James Bottomley |
5 |
0.34% |
2 |
1.72% |
Deepak Saxena |
5 |
0.34% |
1 |
0.86% |
Al Viro |
5 |
0.34% |
2 |
1.72% |
Rusty Russell |
4 |
0.27% |
1 |
0.86% |
Jann Horn |
4 |
0.27% |
1 |
0.86% |
Ingo Molnar |
4 |
0.27% |
2 |
1.72% |
Prasanna S. Panchamukhi |
4 |
0.27% |
1 |
0.86% |
Thomas Gleixner |
4 |
0.27% |
2 |
1.72% |
Atsushi Kumagai |
3 |
0.21% |
1 |
0.86% |
MinChan Kim |
3 |
0.21% |
1 |
0.86% |
Uladzislau Rezki |
3 |
0.21% |
1 |
0.86% |
Mitsuo Hayasaka |
3 |
0.21% |
1 |
0.86% |
JoonSoo Kim |
2 |
0.14% |
1 |
0.86% |
Hugh Dickins |
2 |
0.14% |
1 |
0.86% |
Zhang Yanfei |
2 |
0.14% |
1 |
0.86% |
Richard Henderson |
2 |
0.14% |
1 |
0.86% |
Peter Zijlstra |
1 |
0.07% |
1 |
0.86% |
Baoquan He |
1 |
0.07% |
1 |
0.86% |
Zhen Lei |
1 |
0.07% |
1 |
0.86% |
Tom Rini |
1 |
0.07% |
1 |
0.86% |
Greg Kroah-Hartman |
1 |
0.07% |
1 |
0.86% |
Will Deacon |
1 |
0.07% |
1 |
0.86% |
Bang Li |
1 |
0.07% |
1 |
0.86% |
David Hildenbrand |
1 |
0.07% |
1 |
0.86% |
Kenji Kaneshige |
1 |
0.07% |
1 |
0.86% |
Total |
1462 |
|
116 |
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_VMALLOC_H
#define _LINUX_VMALLOC_H
#include <linux/alloc_tag.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/llist.h>
#include <asm/page.h> /* pgprot_t */
#include <linux/rbtree.h>
#include <linux/overflow.h>
#include <asm/vmalloc.h>
struct vm_area_struct; /* vma defining user mapping in mm_types.h */
struct notifier_block; /* in notifier.h */
struct iov_iter; /* in uio.h */
/* bits in flags of vmalloc's vm_struct below */
#define VM_IOREMAP 0x00000001 /* ioremap() and friends */
#define VM_ALLOC 0x00000002 /* vmalloc() */
#define VM_MAP 0x00000004 /* vmap()ed pages */
#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
#define VM_DMA_COHERENT 0x00000010 /* dma_alloc_coherent */
#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
#define VM_NO_GUARD 0x00000040 /* ***DANGEROUS*** don't add guard page */
#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
#define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */
#define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */
#define VM_ALLOW_HUGE_VMAP 0x00000400 /* Allow for huge pages on archs with HAVE_ARCH_HUGE_VMALLOC */
#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
!defined(CONFIG_KASAN_VMALLOC)
#define VM_DEFER_KMEMLEAK 0x00000800 /* defer kmemleak object creation */
#else
#define VM_DEFER_KMEMLEAK 0
#endif
#define VM_SPARSE 0x00001000 /* sparse vm_area. not all pages are present. */
/* bits [20..32] reserved for arch specific ioremap internals */
/*
* Maximum alignment for ioremap() regions.
* Can be overridden by arch-specific value.
*/
#ifndef IOREMAP_MAX_ORDER
#define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */
#endif
struct vm_struct {
struct vm_struct *next;
void *addr;
unsigned long size;
unsigned long flags;
struct page **pages;
#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
unsigned int page_order;
#endif
unsigned int nr_pages;
phys_addr_t phys_addr;
const void *caller;
};
struct vmap_area {
unsigned long va_start;
unsigned long va_end;
struct rb_node rb_node; /* address sorted rbtree */
struct list_head list; /* address sorted list */
/*
* The following two variables can be packed, because
* a vmap_area object can be either:
* 1) in "free" tree (root is free_vmap_area_root)
* 2) or "busy" tree (root is vmap_area_root)
*/
union {
unsigned long subtree_max_size; /* in "free" tree */
struct vm_struct *vm; /* in "busy" tree */
};
unsigned long flags; /* mark type of vm_map_ram area */
};
/* archs that select HAVE_ARCH_HUGE_VMAP should override one or more of these */
#ifndef arch_vmap_p4d_supported
static inline bool arch_vmap_p4d_supported(pgprot_t prot)
{
return false;
}
#endif
#ifndef arch_vmap_pud_supported
static inline bool arch_vmap_pud_supported(pgprot_t prot)
{
return false;
}
#endif
#ifndef arch_vmap_pmd_supported
static inline bool arch_vmap_pmd_supported(pgprot_t prot)
{
return false;
}
#endif
#ifndef arch_vmap_pte_range_map_size
static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
u64 pfn, unsigned int max_page_shift)
{
return PAGE_SIZE;
}
#endif
#ifndef arch_vmap_pte_supported_shift
static inline int arch_vmap_pte_supported_shift(unsigned long size)
{
return PAGE_SHIFT;
}
#endif
#ifndef arch_vmap_pgprot_tagged
static inline pgprot_t arch_vmap_pgprot_tagged(pgprot_t prot)
{
return prot;
}
#endif
/*
* Highlevel APIs for driver use
*/
extern void vm_unmap_ram(const void *mem, unsigned int count);
extern void *vm_map_ram(struct page **pages, unsigned int count, int node);
extern void vm_unmap_aliases(void);
#ifdef CONFIG_MMU
extern unsigned long vmalloc_nr_pages(void);
#else
static inline unsigned long vmalloc_nr_pages(void) { return 0; }
#endif
extern void *vmalloc_noprof(unsigned long size) __alloc_size(1);
#define vmalloc(...) alloc_hooks(vmalloc_noprof(__VA_ARGS__))
extern void *vzalloc_noprof(unsigned long size) __alloc_size(1);
#define vzalloc(...) alloc_hooks(vzalloc_noprof(__VA_ARGS__))
extern void *vmalloc_user_noprof(unsigned long size) __alloc_size(1);
#define vmalloc_user(...) alloc_hooks(vmalloc_user_noprof(__VA_ARGS__))
extern void *vmalloc_node_noprof(unsigned long size, int node) __alloc_size(1);
#define vmalloc_node(...) alloc_hooks(vmalloc_node_noprof(__VA_ARGS__))
extern void *vzalloc_node_noprof(unsigned long size, int node) __alloc_size(1);
#define vzalloc_node(...) alloc_hooks(vzalloc_node_noprof(__VA_ARGS__))
extern void *vmalloc_32_noprof(unsigned long size) __alloc_size(1);
#define vmalloc_32(...) alloc_hooks(vmalloc_32_noprof(__VA_ARGS__))
extern void *vmalloc_32_user_noprof(unsigned long size) __alloc_size(1);
#define vmalloc_32_user(...) alloc_hooks(vmalloc_32_user_noprof(__VA_ARGS__))
extern void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
#define __vmalloc(...) alloc_hooks(__vmalloc_noprof(__VA_ARGS__))
extern void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
unsigned long start, unsigned long end, gfp_t gfp_mask,
pgprot_t prot, unsigned long vm_flags, int node,
const void *caller) __alloc_size(1);
#define __vmalloc_node_range(...) alloc_hooks(__vmalloc_node_range_noprof(__VA_ARGS__))
void *__vmalloc_node_noprof(unsigned long size, unsigned long align, gfp_t gfp_mask,
int node, const void *caller) __alloc_size(1);
#define __vmalloc_node(...) alloc_hooks(__vmalloc_node_noprof(__VA_ARGS__))
void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
#define vmalloc_huge(...) alloc_hooks(vmalloc_huge_noprof(__VA_ARGS__))
extern void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
#define __vmalloc_array(...) alloc_hooks(__vmalloc_array_noprof(__VA_ARGS__))
extern void *vmalloc_array_noprof(size_t n, size_t size) __alloc_size(1, 2);
#define vmalloc_array(...) alloc_hooks(vmalloc_array_noprof(__VA_ARGS__))
extern void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
#define __vcalloc(...) alloc_hooks(__vcalloc_noprof(__VA_ARGS__))
extern void *vcalloc_noprof(size_t n, size_t size) __alloc_size(1, 2);
#define vcalloc(...) alloc_hooks(vcalloc_noprof(__VA_ARGS__))
extern void vfree(const void *addr);
extern void vfree_atomic(const void *addr);
extern void *vmap(struct page **pages, unsigned int count,
unsigned long flags, pgprot_t prot);
void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot);
extern void vunmap(const void *addr);
extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
unsigned long uaddr, void *kaddr,
unsigned long pgoff, unsigned long size);
extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
unsigned long pgoff);
/*
* Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values
* and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings()
* needs to be called.
*/
#ifndef ARCH_PAGE_TABLE_SYNC_MASK
#define ARCH_PAGE_TABLE_SYNC_MASK 0
#endif
/*
* There is no default implementation for arch_sync_kernel_mappings(). It is
* relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK
* is 0.
*/
void arch_sync_kernel_mappings(unsigned long start, unsigned long end);
/*
* Lowlevel-APIs (not for driver use!)
*/
static inline size_t get_vm_area_size(const struct vm_struct *area)
{
if (!(area->flags & VM_NO_GUARD))
/* return actual size without guard page */
return area->size - PAGE_SIZE;
else
return area->size;
}
extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
extern struct vm_struct *get_vm_area_caller(unsigned long size,
unsigned long flags, const void *caller);
extern struct vm_struct *__get_vm_area_caller(unsigned long size,
unsigned long flags,
unsigned long start, unsigned long end,
const void *caller);
void free_vm_area(struct vm_struct *area);
extern struct vm_struct *remove_vm_area(const void *addr);
extern struct vm_struct *find_vm_area(const void *addr);
struct vmap_area *find_vmap_area(unsigned long addr);
static inline bool is_vm_area_hugepages(const void *addr)
{
/*
* This may not 100% tell if the area is mapped with > PAGE_SIZE
* page table entries, if for some reason the architecture indicates
* larger sizes are available but decides not to use them, nothing
* prevents that. This only indicates the size of the physical page
* allocated in the vmalloc layer.
*/
#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
return find_vm_area(addr)->page_order > 0;
#else
return false;
#endif
}
#ifdef CONFIG_MMU
int vm_area_map_pages(struct vm_struct *area, unsigned long start,
unsigned long end, struct page **pages);
void vm_area_unmap_pages(struct vm_struct *area, unsigned long start,
unsigned long end);
void vunmap_range(unsigned long addr, unsigned long end);
static inline void set_vm_flush_reset_perms(void *addr)
{
struct vm_struct *vm = find_vm_area(addr);
if (vm)
vm->flags |= VM_FLUSH_RESET_PERMS;
}
#else
static inline void set_vm_flush_reset_perms(void *addr)
{
}
#endif
/* for /proc/kcore */
extern long vread_iter(struct iov_iter *iter, const char *addr, size_t count);
/*
* Internals. Don't use..
*/
extern __init void vm_area_add_early(struct vm_struct *vm);
extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
#ifdef CONFIG_SMP
# ifdef CONFIG_MMU
struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
const size_t *sizes, int nr_vms,
size_t align);
void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
# else
static inline struct vm_struct **
pcpu_get_vm_areas(const unsigned long *offsets,
const size_t *sizes, int nr_vms,
size_t align)
{
return NULL;
}
static inline void
pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
{
}
# endif
#endif
#ifdef CONFIG_MMU
#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
#else
#define VMALLOC_TOTAL 0UL
#endif
int register_vmap_purge_notifier(struct notifier_block *nb);
int unregister_vmap_purge_notifier(struct notifier_block *nb);
#if defined(CONFIG_MMU) && defined(CONFIG_PRINTK)
bool vmalloc_dump_obj(void *object);
#else
static inline bool vmalloc_dump_obj(void *object) { return false; }
#endif
#endif /* _LINUX_VMALLOC_H */