Release 4.12 include/linux/mm.h
#ifndef _LINUX_MM_H
#define _LINUX_MM_H
#include <linux/errno.h>
#ifdef __KERNEL__
#include <linux/mmdebug.h>
#include <linux/gfp.h>
#include <linux/bug.h>
#include <linux/list.h>
#include <linux/mmzone.h>
#include <linux/rbtree.h>
#include <linux/atomic.h>
#include <linux/debug_locks.h>
#include <linux/mm_types.h>
#include <linux/range.h>
#include <linux/pfn.h>
#include <linux/percpu-refcount.h>
#include <linux/bit_spinlock.h>
#include <linux/shrinker.h>
#include <linux/resource.h>
#include <linux/page_ext.h>
#include <linux/err.h>
#include <linux/page_ref.h>
struct mempolicy;
struct anon_vma;
struct anon_vma_chain;
struct file_ra_state;
struct user_struct;
struct writeback_control;
struct bdi_writeback;
void init_mm_internals(void);
#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */
extern unsigned long max_mapnr;
static inline void set_max_mapnr(unsigned long limit)
{
max_mapnr = limit;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiang Liu | 15 | 100.00% | 1 | 100.00% |
Total | 15 | 100.00% | 1 | 100.00% |
#else
static inline void set_max_mapnr(unsigned long limit) { }
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiang Liu | 10 | 100.00% | 1 | 100.00% |
Total | 10 | 100.00% | 1 | 100.00% |
#endif
extern unsigned long totalram_pages;
extern void * high_memory;
extern int page_cluster;
#ifdef CONFIG_SYSCTL
extern int sysctl_legacy_va_layout;
#else
#define sysctl_legacy_va_layout 0
#endif
#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
extern const int mmap_rnd_bits_min;
extern const int mmap_rnd_bits_max;
extern int mmap_rnd_bits __read_mostly;
#endif
#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
extern const int mmap_rnd_compat_bits_min;
extern const int mmap_rnd_compat_bits_max;
extern int mmap_rnd_compat_bits __read_mostly;
#endif
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#ifndef __pa_symbol
#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
#endif
#ifndef page_to_virt
#define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x)))
#endif
#ifndef lm_alias
#define lm_alias(x) __va(__pa_symbol(x))
#endif
/*
* To prevent common memory management code establishing
* a zero page mapping on a read fault.
* This macro should be defined within <asm/pgtable.h>.
* s390 does this to prevent multiplexing of hardware bits
* related to the physical page in case of virtualization.
*/
#ifndef mm_forbids_zeropage
#define mm_forbids_zeropage(X) (0)
#endif
/*
* Default maximum number of active map areas, this limits the number of vmas
* per mm struct. Users can overwrite this number by sysctl but there is a
* problem.
*
* When a program's coredump is generated as ELF format, a section is created
* per a vma. In ELF, the number of sections is represented in unsigned short.
* This means the number of sections should be smaller than 65535 at coredump.
* Because the kernel adds some informative sections to a image of program at
* generating coredump, we need some margin. The number of extra sections is
* 1-3 now and depends on arch. We use "5" as safe margin, here.
*
* ELF extended numbering allows more than 65535 sections, so 16-bit bound is
* not a hard limit any more. Although some userspace tools can be surprised by
* that.
*/
#define MAPCOUNT_ELF_CORE_MARGIN (5)
#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
extern int sysctl_max_map_count;
extern unsigned long sysctl_user_reserve_kbytes;
extern unsigned long sysctl_admin_reserve_kbytes;
extern int sysctl_overcommit_memory;
extern int sysctl_overcommit_ratio;
extern unsigned long sysctl_overcommit_kbytes;
extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *,
size_t *, loff_t *);
extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
size_t *, loff_t *);
#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
/* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
/*
* Linux kernel virtual memory manager primitives.
* The idea being to have a "virtual" mm in the same way
* we have a virtual fs - giving a cleaner interface to the
* mm details, and allowing different kinds of memory mappings
* (from shared memory to executable loading to arbitrary
* mmap() functions).
*/
extern struct kmem_cache *vm_area_cachep;
#ifndef CONFIG_MMU
extern struct rb_root nommu_region_tree;
extern struct rw_semaphore nommu_region_sem;
extern unsigned int kobjsize(const void *objp);
#endif
/*
* vm_flags in vm_area_struct, see mm_types.h.
* When changing, update also include/trace/events/mmflags.h
*/
#define VM_NONE 0x00000000
#define VM_READ 0x00000001
/* currently active flags */
#define VM_WRITE 0x00000002
#define VM_EXEC 0x00000004
#define VM_SHARED 0x00000008
/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
#define VM_MAYREAD 0x00000010
/* limits for mprotect() etc */
#define VM_MAYWRITE 0x00000020
#define VM_MAYEXEC 0x00000040
#define VM_MAYSHARE 0x00000080
#define VM_GROWSDOWN 0x00000100
/* general info on the segment */
#define VM_UFFD_MISSING 0x00000200
/* missing pages tracking */
#define VM_PFNMAP 0x00000400
/* Page-ranges managed without "struct page", just pure PFN */
#define VM_DENYWRITE 0x00000800
/* ETXTBSY on write attempts.. */
#define VM_UFFD_WP 0x00001000
/* wrprotect pages tracking */
#define VM_LOCKED 0x00002000
#define VM_IO 0x00004000
/* Memory mapped I/O or similar */
/* Used by sys_madvise() */
#define VM_SEQ_READ 0x00008000
/* App will access data sequentially */
#define VM_RAND_READ 0x00010000
/* App will not benefit from clustered reads */
#define VM_DONTCOPY 0x00020000
/* Do not copy this vma on fork */
#define VM_DONTEXPAND 0x00040000
/* Cannot expand with mremap() */
#define VM_LOCKONFAULT 0x00080000
/* Lock the pages covered when they are faulted in */
#define VM_ACCOUNT 0x00100000
/* Is a VM accounted object */
#define VM_NORESERVE 0x00200000
/* should the VM suppress accounting */
#define VM_HUGETLB 0x00400000
/* Huge TLB Page VM */
#define VM_ARCH_1 0x01000000
/* Architecture-specific flag */
#define VM_ARCH_2 0x02000000
#define VM_DONTDUMP 0x04000000
/* Do not include in the core dump */
#ifdef CONFIG_MEM_SOFT_DIRTY
# define VM_SOFTDIRTY 0x08000000
/* Not soft dirty clean area */
#else
# define VM_SOFTDIRTY 0
#endif
#define VM_MIXEDMAP 0x10000000
/* Can contain "struct page" and pure PFN pages */
#define VM_HUGEPAGE 0x20000000
/* MADV_HUGEPAGE marked this vma */
#define VM_NOHUGEPAGE 0x40000000
/* MADV_NOHUGEPAGE marked this vma */
#define VM_MERGEABLE 0x80000000
/* KSM may merge identical pages */
#ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
#define VM_HIGH_ARCH_BIT_0 32
/* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_BIT_1 33
/* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_BIT_2 34
/* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_BIT_3 35
/* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0)
#define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1)
#define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2)
#define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3)
#endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */
#if defined(CONFIG_X86)
# define VM_PAT VM_ARCH_1
/* PAT reserves whole VMA at once (x86) */
#if defined (CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS)
# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0
# define VM_PKEY_BIT0 VM_HIGH_ARCH_0
/* A protection key is a 4-bit value */
# define VM_PKEY_BIT1 VM_HIGH_ARCH_1
# define VM_PKEY_BIT2 VM_HIGH_ARCH_2
# define VM_PKEY_BIT3 VM_HIGH_ARCH_3
#endif
#elif defined(CONFIG_PPC)
# define VM_SAO VM_ARCH_1
/* Strong Access Ordering (powerpc) */
#elif defined(CONFIG_PARISC)
# define VM_GROWSUP VM_ARCH_1
#elif defined(CONFIG_METAG)
# define VM_GROWSUP VM_ARCH_1
#elif defined(CONFIG_IA64)
# define VM_GROWSUP VM_ARCH_1
#elif !defined(CONFIG_MMU)
# define VM_MAPPED_COPY VM_ARCH_1
/* T if mapped copy of data (nommu mmap) */
#endif
#if defined(CONFIG_X86)
/* MPX specific bounds table or bounds directory */
# define VM_MPX VM_ARCH_2
#endif
#ifndef VM_GROWSUP
# define VM_GROWSUP VM_NONE
#endif
/* Bits set in the VMA until the stack is in its final location */
#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ)
#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
#endif
#ifdef CONFIG_STACK_GROWSUP
#define VM_STACK VM_GROWSUP
#else
#define VM_STACK VM_GROWSDOWN
#endif
#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
/*
* Special vmas that are non-mergable, non-mlock()able.
* Note: mm/huge_memory.c VM_NO_THP depends on this definition.
*/
#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
/* This mask defines which mm->def_flags a process can inherit its parent */
#define VM_INIT_DEF_MASK VM_NOHUGEPAGE
/* This mask is used to clear all the VMA flags used by mlock */
#define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT))
/*
* mapping from the currently active vm_flags protection bits (the
* low four bits) to a page protection mask..
*/
extern pgprot_t protection_map[16];
#define FAULT_FLAG_WRITE 0x01
/* Fault was a write access */
#define FAULT_FLAG_MKWRITE 0x02
/* Fault was mkwrite of existing pte */
#define FAULT_FLAG_ALLOW_RETRY 0x04
/* Retry fault if blocking */
#define FAULT_FLAG_RETRY_NOWAIT 0x08
/* Don't drop mmap_sem and wait when retrying */
#define FAULT_FLAG_KILLABLE 0x10
/* The fault task is in SIGKILL killable region */
#define FAULT_FLAG_TRIED 0x20
/* Second try */
#define FAULT_FLAG_USER 0x40
/* The fault originated in userspace */
#define FAULT_FLAG_REMOTE 0x80
/* faulting for non current tsk/mm */
#define FAULT_FLAG_INSTRUCTION 0x100
/* The fault was during an instruction fetch */
#define FAULT_FLAG_TRACE \
{ FAULT_FLAG_WRITE, "WRITE" }, \
{ FAULT_FLAG_MKWRITE, "MKWRITE" }, \
{ FAULT_FLAG_ALLOW_RETRY, "ALLOW_RETRY" }, \
{ FAULT_FLAG_RETRY_NOWAIT, "RETRY_NOWAIT" }, \
{ FAULT_FLAG_KILLABLE, "KILLABLE" }, \
{ FAULT_FLAG_TRIED, "TRIED" }, \
{ FAULT_FLAG_USER, "USER" }, \
{ FAULT_FLAG_REMOTE, "REMOTE" }, \
{ FAULT_FLAG_INSTRUCTION, "INSTRUCTION" }
/*
* vm_fault is filled by the the pagefault handler and passed to the vma's
* ->fault function. The vma's ->fault is responsible for returning a bitmask
* of VM_FAULT_xxx flags that give details about how the fault was handled.
*
* MM layer fills up gfp_mask for page allocations but fault handler might
* alter it if its implementation requires a different allocation context.
*
* pgoff should be used in favour of virtual_address, if possible.
*/
struct vm_fault {
struct vm_area_struct *vma; /* Target VMA */
unsigned int flags; /* FAULT_FLAG_xxx flags */
gfp_t gfp_mask; /* gfp mask to be used for allocations */
pgoff_t pgoff; /* Logical page offset based on vma */
unsigned long address; /* Faulting virtual address */
pmd_t *pmd; /* Pointer to pmd entry matching
* the 'address' */
pud_t *pud; /* Pointer to pud entry matching
* the 'address'
*/
pte_t orig_pte; /* Value of PTE at the time of fault */
struct page *cow_page; /* Page handler may use for COW fault */
struct mem_cgroup *memcg; /* Cgroup cow_page belongs to */
struct page *page; /* ->fault handlers should return a
* page here, unless VM_FAULT_NOPAGE
* is set (which is also implied by
* VM_FAULT_ERROR).
*/
/* These three entries are valid only while holding ptl lock */
pte_t *pte; /* Pointer to pte entry matching
* the 'address'. NULL if the page
* table hasn't been allocated.
*/
spinlock_t *ptl; /* Page table lock.
* Protects pte page table if 'pte'
* is not NULL, otherwise pmd.
*/
pgtable_t prealloc_pte; /* Pre-allocated pte page table.
* vm_ops->map_pages() calls
* alloc_set_pte() from atomic context.
* do_fault_around() pre-allocates
* page table to avoid allocation from
* atomic context.
*/
};
/* page entry size for vm->huge_fault() */
enum page_entry_size {
PE_SIZE_PTE = 0,
PE_SIZE_PMD,
PE_SIZE_PUD,
};
/*
* These are the virtual MM functions - opening of an area, closing and
* unmapping it (needed to keep files on disk up-to-date etc), pointer
* to the functions called when a no-page or a wp-page exception occurs.
*/
struct vm_operations_struct {
void (*open)(struct vm_area_struct * area);
void (*close)(struct vm_area_struct * area);
int (*mremap)(struct vm_area_struct * area);
int (*fault)(struct vm_fault *vmf);
int (*huge_fault)(struct vm_fault *vmf, enum page_entry_size pe_size);
void (*map_pages)(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
/* notification that a previously read-only page is about to become
* writable, if an error is returned it will cause a SIGBUS */
int (*page_mkwrite)(struct vm_fault *vmf);
/* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
int (*pfn_mkwrite)(struct vm_fault *vmf);
/* called by access_process_vm when get_user_pages() fails, typically
* for use by special VMAs that can switch between memory and hardware
*/
int (*access)(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write);
/* Called by the /proc/PID/maps code to ask the vma whether it
* has a special name. Returning non-NULL will also cause this
* vma to be dumped unconditionally. */
const char *(*name)(struct vm_area_struct *vma);
#ifdef CONFIG_NUMA
/*
* set_policy() op must add a reference to any non-NULL @new mempolicy
* to hold the policy upon return. Caller should pass NULL @new to
* remove a policy and fall back to surrounding context--i.e. do not
* install a MPOL_DEFAULT policy, nor the task or system default
* mempolicy.
*/
int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
/*
* get_policy() op must add reference [mpol_get()] to any policy at
* (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure
* in mm/mempolicy.c will do this automatically.
* get_policy() must NOT add a ref if the policy at (vma,addr) is not
* marked as MPOL_SHARED. vma policies are protected by the mmap_sem.
* If no [shared/vma] mempolicy exists at the addr, get_policy() op
* must return NULL--i.e., do not "fallback" to task or system default
* policy.
*/
struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
unsigned long addr);
#endif
/*
* Called by vm_normal_page() for special PTEs to find the
* page for @addr. This is useful if the default behavior
* (using pte_page()) would not find the correct page.
*/
struct page *(*find_special_page)(struct vm_area_struct *vma,
unsigned long addr);
};
struct mmu_gather;
struct inode;
#define page_private(page) ((page)->private)
#define set_page_private(page, v) ((page)->private = (v))
#if !defined(__HAVE_ARCH_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
static inline int pmd_devmap(pmd_t pmd)
{
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan J Williams | 13 | 100.00% | 1 | 100.00% |
Total | 13 | 100.00% | 1 | 100.00% |
static inline int pud_devmap(pud_t pud)
{
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Matthew Wilcox | 13 | 100.00% | 1 | 100.00% |
Total | 13 | 100.00% | 1 | 100.00% |
static inline int pgd_devmap(pgd_t pgd)
{
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kirill A. Shutemov | 13 | 100.00% | 1 | 100.00% |
Total | 13 | 100.00% | 1 | 100.00% |
#endif
/*
* FIXME: take this include out, include page-flags.h in
* files which need it (119 of them)
*/
#include <linux/page-flags.h>
#include <linux/huge_mm.h>
/*
* Methods to modify the page usage count.
*
* What counts for a page usage:
* - cache mapping (page->mapping)
* - private data (page->private)
* - page mapped in a task's page tables, each mapping
* is counted separately
*
* Also, many kernel routines increase the page count before a critical
* routine so they can be sure the page doesn't go away from under them.
*/
/*
* Drop a ref, return true if the refcount fell to zero (the page has no users)
*/
static inline int put_page_testzero(struct page *page)
{
VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
return page_ref_dec_and_test(page);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nicholas Piggin | 22 | 73.33% | 2 | 40.00% |
Linus Torvalds (pre-git) | 3 | 10.00% | 1 | 20.00% |
Sasha Levin | 3 | 10.00% | 1 | 20.00% |
JoonSoo Kim | 2 | 6.67% | 1 | 20.00% |
Total | 30 | 100.00% | 5 | 100.00% |
/*
* Try to grab a ref unless the page has a refcount of zero, return false if
* that is the case.
* This can be called when MMU is off so it must not access
* any of the virtual mappings.
*/
static inline int get_page_unless_zero(struct page *page)
{
return page_ref_add_unless(page, 1, 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nicholas Piggin | 15 | 68.18% | 1 | 33.33% |
JoonSoo Kim | 5 | 22.73% | 1 | 33.33% |
Andrew Morton | 2 | 9.09% | 1 | 33.33% |
Total | 22 | 100.00% | 3 | 100.00% |
extern int page_is_ram(unsigned long pfn);
enum {
REGION_INTERSECTS,
REGION_DISJOINT,
REGION_MIXED,
};
int region_intersects(resource_size_t offset, size_t size, unsigned long flags,
unsigned long desc);
/* Support for virtually mapped pages */
struct page *vmalloc_to_page(const void *addr);
unsigned long vmalloc_to_pfn(const void *addr);
/*
* Determine if an address is within the vmalloc range
*
* On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
* is no special casing required.
*/
static inline bool is_vmalloc_addr(const void *x)
{
#ifdef CONFIG_MMU
unsigned long addr = (unsigned long)x;
return addr >= VMALLOC_START && addr < VMALLOC_END;
#else
return false;
#endif
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Lameter | 29 | 70.73% | 1 | 25.00% |
Paul Mundt | 8 | 19.51% | 1 | 25.00% |
Yaowei Bai | 2 | 4.88% | 1 | 25.00% |
David Howells | 2 | 4.88% | 1 | 25.00% |
Total | 41 | 100.00% | 4 | 100.00% |
#ifdef CONFIG_MMU
extern int is_vmalloc_or_module_addr(const void *x);
#else
static inline int is_vmalloc_or_module_addr(const void *x)
{
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kamezawa Hiroyuki | 14 | 93.33% | 1 | 50.00% |
David Howells | 1 | 6.67% | 1 | 50.00% |
Total | 15 | 100.00% | 2 | 100.00% |
#endif
extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
static inline void *kvmalloc(size_t size, gfp_t flags)
{
return kvmalloc_node(size, flags, NUMA_NO_NODE);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Michal Hocko | 24 | 100.00% | 1 | 100.00% |
Total | 24 | 100.00% | 1 | 100.00% |
static inline void *kvzalloc_node(size_t size, gfp_t flags, int node)
{
return kvmalloc_node(size, flags | __GFP_ZERO, node);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Michal Hocko | 29 | 100.00% | 1 | 100.00% |
Total | 29 | 100.00% | 1 | 100.00% |
static inline void *kvzalloc(size_t size, gfp_t flags)
{
return kvmalloc(size, flags | __GFP_ZERO);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Michal Hocko | 24 | 100.00% | 1 | 100.00% |
Total | 24 | 100.00% | 1 | 100.00% |
static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
{
if (size != 0 && n > SIZE_MAX / size)
return NULL;
return kvmalloc(n * size, flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Michal Hocko | 42 | 100.00% | 1 | 100.00% |
Total | 42 | 100.00% | 1 | 100.00% |
extern void kvfree(const void *addr);
static inline atomic_t *compound_mapcount_ptr(struct page *page)
{
return &page[1].compound_mapcount;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kirill A. Shutemov | 22 | 100.00% | 1 | 100.00% |
Total | 22 | 100.00% | 1 | 100.00% |
static inline int compound_mapcount(struct page *page)
{
VM_BUG_ON_PAGE(!PageCompound(page), page);
page = compound_head(page);
return atomic_read(compound_mapcount_ptr(page)) + 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kirill A. Shutemov | 35 | 85.37% | 1 | 50.00% |
Andrea Arcangeli | 6 | 14.63% | 1 | 50.00% |
Total | 41 | 100.00% | 2 | 100.00% |
/*
* The atomic page->_mapcount, starts from -1: so that transitions
* both from it and to it can be tracked, using atomic_inc_and_test
* and atomic_add_negative(-1).
*/
static inline void page_mapcount_reset(struct page *page)
{
atomic_set(&(page)->_mapcount, -1);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrea Arcangeli | 24 | 96.00% | 1 | 50.00% |
Mel Gorman | 1 | 4.00% | 1 | 50.00% |
Total | 25 | 100.00% | 2 | 100.00% |
int __page_mapcount(struct page *page);
static inline int page_mapcount(struct page *page)
{
VM_BUG_ON_PAGE(PageSlab(page), page);
if (unlikely(PageCompound(page)))
return __page_mapcount(page);
return atomic_read(&page->_mapcount) + 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kirill A. Shutemov | 28 | 57.14% | 2 | 50.00% |
Andrea Arcangeli | 11 | 22.45% | 1 | 25.00% |
Yalin Wang | 10 | 20.41% | 1 | 25.00% |
Total | 49 | 100.00% | 4 | 100.00% |
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
int total_mapcount(struct page *page);
int page_trans_huge_mapcount(struct page *page, int *total_mapcount);
#else
static inline int total_mapcount(struct page *page)
{
return page_mapcount(page);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kirill A. Shutemov | 16 | 88.89% | 2 | 66.67% |
Andrea Arcangeli | 2 | 11.11% | 1 | 33.33% |
Total | 18 | 100.00% | 3 | 100.00% |
static inline int page_trans_huge_mapcount(struct page *page,
int *total_mapcount)
{
int mapcount = page_mapcount(page);
if (total_mapcount)
*total_mapcount = mapcount;
return mapcount;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrea Arcangeli | 36 | 100.00% | 1 | 100.00% |
Total | 36 | 100.00% | 1 | 100.00% |
#endif
static inline struct page *virt_to_head_page(const void *x)
{
struct page *page = virt_to_page(x);
return compound_head(page);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Lameter | 29 | 96.67% | 1 | 50.00% |
Kirill A. Shutemov | 1 | 3.33% | 1 | 50.00% |
Total | 30 | 100.00% | 2 | 100.00% |
void __put_page(struct page *page);
void put_pages_list(struct list_head *pages);
void split_page(struct page *page, unsigned int order);
/*
* Compound pages have a destructor function. Provide a
* prototype for that function and accessor functions.
* These are _only_ valid on the head of a compound page.
*/
typedef void compound_page_dtor(struct page *);
/* Keep the enum in sync with compound_page_dtors array in mm/page_alloc.c */
enum compound_dtor_id {
NULL_COMPOUND_DTOR,
COMPOUND_PAGE_DTOR,
#ifdef CONFIG_HUGETLB_PAGE
HUGETLB_PAGE_DTOR,
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
TRANSHUGE_PAGE_DTOR,
#endif
NR_COMPOUND_DTORS,
};
extern compound_page_dtor * const compound_page_dtors[];
static inline void set_compound_page_dtor(struct page *page,
enum compound_dtor_id compound_dtor)
{
VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page);
page[1].compound_dtor = compound_dtor;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andy Whitcroft | 20 | 58.82% | 1 | 33.33% |
Kirill A. Shutemov | 14 | 41.18% | 2 | 66.67% |
Total | 34 | 100.00% | 3 | 100.00% |
static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
{
VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page);
return compound_page_dtors[page[1].compound_dtor];
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andy Whitcroft | 20 | 52.63% | 1 | 33.33% |
Kirill A. Shutemov | 18 | 47.37% | 2 | 66.67% |
Total | 38 | 100.00% | 3 | 100.00% |
static inline unsigned int compound_order(struct page *page)
{
if (!PageHead(page))
return 0;
return page[1].compound_order;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Lameter | 30 | 93.75% | 2 | 50.00% |
Kirill A. Shutemov | 2 | 6.25% | 2 | 50.00% |
Total | 32 | 100.00% | 4 | 100.00% |
static inline void set_compound_order(struct page *page, unsigned int order)
{
page[1].compound_order = order;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Lameter | 23 | 92.00% | 1 | 33.33% |
Kirill A. Shutemov | 2 | 8.00% | 2 | 66.67% |
Total | 25 | 100.00% | 3 | 100.00% |
void free_compound_page(struct page *page);
#ifdef CONFIG_MMU
/*
* Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when
* servicing faults for write access. In the normal case, do always want
* pte_mkwrite. But get_user_pages can cause write faults for mappings
* that do not have writing enabled, when used by access_process_vm.
*/
static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
{
if (likely(vma->vm_flags & VM_WRITE))
pte = pte_mkwrite(pte);
return pte;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrea Arcangeli | 36 | 100.00% | 1 | 100.00% |
Total | 36 | 100.00% | 1 | 100.00% |
int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
struct page *page);
int finish_fault(struct vm_fault *vmf);
int finish_mkwrite_fault(struct vm_fault *vmf);
#endif
/*
* Multiple processes may "see" the same page. E.g. for untouched
* mappings of /dev/null, all processes see the same page full of
* zeroes, and text pages of executables and shared libraries have
* only one copy in memory, at most, normally.
*
* For the non-reserved pages, page_count(page) denotes a reference count.
* page_count() == 0 means the page is free. page->lru is then used for
* freelist management in the buddy allocator.
* page_count() > 0 means the page has been allocated.
*
* Pages are allocated by the slab allocator in order to provide memory
* to kmalloc and kmem_cache_alloc. In this case, the management of the
* page, and the fields in 'struct page' are the responsibility of mm/slab.c
* unless a particular usage is carefully commented. (the responsibility of
* freeing the kmalloc memory is the caller's, of course).
*
* A page may be used by anyone else who does a __get_free_page().
* In this case, page_count still tracks the references, and should only
* be used through the normal accessor functions. The top bits of page->flags
* and page->virtual store page management information, but all other fields
* are unused and could be used privately, carefully. The management of this
* page is the responsibility of the one who allocated it, and those who have
* subsequently been given references to it.
*
* The other pages (we may call them "pagecache pages") are completely
* managed by the Linux memory manager: I/O, buffers, swapping etc.
* The following discussion applies only to them.
*
* A pagecache page contains an opaque `private' member, which belongs to the
* page's address_space. Usually, this is the address of a circular list of
* the page's disk buffers. PG_private must be set to tell the VM to call
* into the filesystem to release these pages.
*
* A page may belong to an inode's memory mapping. In this case, page->mapping
* is the pointer to the inode, and page->index is the file offset of the page,
* in units of PAGE_SIZE.
*
* If pagecache pages are not associated with an inode, they are said to be
* anonymous pages. These may become associated with the swapcache, and in that
* case PG_swapcache is set, and page->private is an offset into the swapcache.
*
* In either case (swapcache or inode backed), the pagecache itself holds one
* reference to the page. Setting PG_private should also increment the
* refcount. The each user mapping also has a reference to the page.
*
* The pagecache pages are stored in a per-mapping radix tree, which is
* rooted at mapping->page_tree, and indexed by offset.
* Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space
* lists, we instead now tag pages as dirty/writeback in the radix tree.
*
* All pagecache pages may be subject to I/O:
* - inode pages may need to be read from disk,
* - inode pages which have been modified and are MAP_SHARED may need
* to be written back to the inode on disk,
* - anonymous pages (including MAP_PRIVATE file mappings) which have been
* modified may need to be swapped out to swap space and (later) to be read
* back into memory.
*/
/*
* The zone field is never updated after free_area_init_core()
* sets it, so none of the operations on it need to be atomic.
*/
/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
/*
* Define the bit shifts to access each section. For non-existent
* sections we define the shift as 0; that plus a 0 mask ensures
* the compiler will optimise away reference to them.
*/
#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
#ifdef NODE_NOT_IN_PAGE_FLAGS
#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \
SECTIONS_PGOFF : ZONES_PGOFF)
#else
#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \
NODES_PGOFF : ZONES_PGOFF)
#endif
#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
#endif
#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
static inline enum zone_type page_zonenum(const struct page *page)
{
return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 20 | 80.00% | 1 | 25.00% |
Dave Hansen | 2 | 8.00% | 1 | 25.00% |
Christoph Lameter | 2 | 8.00% | 1 | 25.00% |
Ian Campbell | 1 | 4.00% | 1 | 25.00% |
Total | 25 | 100.00% | 4 | 100.00% |
#ifdef CONFIG_ZONE_DEVICE
static inline bool is_zone_device_page(const struct page *page)
{
return page_zonenum(page) == ZONE_DEVICE;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan J Williams | 21 | 100.00% | 1 | 100.00% |
Total | 21 | 100.00% | 1 | 100.00% |
#else
static inline bool is_zone_device_page(const struct page *page)
{
return false;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan J Williams | 16 | 100.00% | 1 | 100.00% |
Total | 16 | 100.00% | 1 | 100.00% |
#endif
static inline void get_page(struct page *page)
{
page = compound_head(page);
/*
* Getting a normal page or the head of a compound page
* requires to already have an elevated page->_refcount.
*/
VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page);
page_ref_inc(page);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan J Williams | 34 | 91.89% | 1 | 33.33% |
JoonSoo Kim | 3 | 8.11% | 2 | 66.67% |
Total | 37 | 100.00% | 3 | 100.00% |
static inline void put_page(struct page *page)
{
page = compound_head(page);
if (put_page_testzero(page))
__put_page(page);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan J Williams | 31 | 100.00% | 1 | 100.00% |
Total | 31 | 100.00% | 1 | 100.00% |
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
#define SECTION_IN_PAGE_FLAGS
#endif
/*
* The identification function is mainly used by the buddy allocator for
* determining if two pages could be buddies. We are not really identifying
* the zone since we could be using the section number id if we do not have
* node id available in page flags.
* We only guarantee that it will return the same value for two combinable
* pages in a zone.
*/
static inline int page_zone_id(struct page *page)
{
return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andy Whitcroft | 21 | 91.30% | 2 | 66.67% |
Christoph Lameter | 2 | 8.70% | 1 | 33.33% |
Total | 23 | 100.00% | 3 | 100.00% |
static inline int zone_to_nid(struct zone *zone)
{
#ifdef CONFIG_NUMA
return zone->node;
#else
return 0;
#endif
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Lameter | 26 | 96.30% | 2 | 66.67% |
Andy Whitcroft | 1 | 3.70% | 1 | 33.33% |
Total | 27 | 100.00% | 3 | 100.00% |
#ifdef NODE_NOT_IN_PAGE_FLAGS
extern int page_to_nid(const struct page *page);
#else
static inline int page_to_nid(const struct page *page)
{
return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 18 | 75.00% | 2 | 33.33% |
Dave Hansen | 3 | 12.50% | 1 | 16.67% |
Christoph Lameter | 1 | 4.17% | 1 | 16.67% |
Andy Whitcroft | 1 | 4.17% | 1 | 16.67% |
Ian Campbell | 1 | 4.17% | 1 | 16.67% |
Total | 24 | 100.00% | 6 | 100.00% |
#endif
#ifdef CONFIG_NUMA_BALANCING
static inline int cpu_pid_to_cpupid(int cpu, int pid)
{
return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mel Gorman | 26 | 86.67% | 1 | 50.00% |
Peter Zijlstra | 4 | 13.33% | 1 | 50.00% |
Total | 30 | 100.00% | 2 | 100.00% |
static inline int cpupid_to_pid(int cpupid)
{
return cpupid & LAST__PID_MASK;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mel Gorman | 12 | 80.00% | 1 | 50.00% |
Peter Zijlstra | 3 | 20.00% | 1 | 50.00% |
Total | 15 | 100.00% | 2 | 100.00% |
static inline int cpupid_to_cpu(int cpupid)
{
return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mel Gorman | 15 | 78.95% | 1 | 50.00% |
Peter Zijlstra | 4 | 21.05% | 1 | 50.00% |
Total | 19 | 100.00% | 2 | 100.00% |
static inline int cpupid_to_nid(int cpupid)
{
return cpu_to_node(cpupid_to_cpu(cpupid));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 19 | 100.00% | 1 | 100.00% |
Total | 19 | 100.00% | 1 | 100.00% |
static inline bool cpupid_pid_unset(int cpupid)
{
return cpupid_to_pid