cregit-Linux how code gets into the kernel

Release 4.12 include/linux/rmap.h

Directory: include/linux
#ifndef _LINUX_RMAP_H

#define _LINUX_RMAP_H
 * Declarations for Reverse Mapping functions in mm/rmap.c

#include <linux/list.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/rwsem.h>
#include <linux/memcontrol.h>
#include <linux/highmem.h>

 * The anon_vma heads a list of private "related" vmas, to scan if
 * an anonymous page pointing to this anon_vma needs to be unmapped:
 * the vmas on the list will be related by forking, or by splitting.
 * Since vmas come and go as they are split and merged (particularly
 * in mprotect), the mapping field of an anonymous page cannot point
 * directly to a vma: instead it points to an anon_vma, on whose list
 * the related vmas can be easily linked or unlinked.
 * After unlinking the last vma on the list, we must garbage collect
 * the anon_vma object itself: we're guaranteed no page can be
 * pointing to this anon_vma once its vma list is empty.

struct anon_vma {
struct anon_vma *root;		/* Root of this anon_vma tree */
struct rw_semaphore rwsem;	/* W: modification, R: walking the list */
         * The refcount is taken on an anon_vma when there is no
         * guarantee that the vma of page tables will exist for
         * the duration of the operation. A caller that takes
         * the reference is responsible for clearing up the
         * anon_vma if they are the last user on release
atomic_t refcount;

         * Count of child anon_vmas and VMAs which points to this anon_vma.
         * This counter is used for making decision about reusing anon_vma
         * instead of forking new one. See comments in function anon_vma_clone.
unsigned degree;

struct anon_vma *parent;	/* Parent of this anon_vma */

         * NOTE: the LSB of the rb_root.rb_node is set by
         * mm_take_all_locks() _after_ taking the above lock. So the
         * rb_root must only be read/written after taking the above lock
         * to be sure to see a valid next pointer. The LSB bit itself
         * is serialized by a system wide lock only visible to
         * mm_take_all_locks() (mm_all_locks_mutex).
struct rb_root rb_root;	/* Interval tree of private "related" vmas */

 * The copy-on-write semantics of fork mean that an anon_vma
 * can become associated with multiple processes. Furthermore,
 * each child process will have its own anon_vma, where new
 * pages for that process are instantiated.
 * This structure allows us to find the anon_vmas associated
 * with a VMA, or the VMAs associated with an anon_vma.
 * The "same_vma" list contains the anon_vma_chains linking
 * all the anon_vmas associated with this VMA.
 * The "rb" field indexes on an interval tree the anon_vma_chains
 * which link all the VMAs associated with this anon_vma.

struct anon_vma_chain {
struct vm_area_struct *vma;
struct anon_vma *anon_vma;
struct list_head same_vma;   /* locked by mmap_sem & page_table_lock */
struct rb_node rb;			/* locked by anon_vma->rwsem */
unsigned long rb_subtree_last;

unsigned long cached_vma_start, cached_vma_last;

enum ttu_flags {
TTU_MIGRATION		= 0x1,	/* migration mode */
TTU_MUNLOCK		= 0x2,	/* munlock mode */

TTU_SPLIT_HUGE_PMD	= 0x4,	/* split huge PMD if any */
TTU_IGNORE_MLOCK	= 0x8,	/* ignore mlock */
TTU_IGNORE_ACCESS	= 0x10,	/* don't age */
TTU_IGNORE_HWPOISON	= 0x20,	/* corrupted page is recoverable */
TTU_BATCH_FLUSH		= 0x40,	/* Batch TLB flushes where possible
                                         * and caller guarantees they will
                                         * do a final flush if necessary */
TTU_RMAP_LOCKED		= 0x80	/* do not grab rmap lock:
                                         * caller holds it */


static inline void get_anon_vma(struct anon_vma *anon_vma) { atomic_inc(&anon_vma->refcount); }


Rik Van Riel1995.00%150.00%
Peter Zijlstra15.00%150.00%

void __put_anon_vma(struct anon_vma *anon_vma);
static inline void put_anon_vma(struct anon_vma *anon_vma) { if (atomic_dec_and_test(&anon_vma->refcount)) __put_anon_vma(anon_vma); }


Peter Zijlstra27100.00%1100.00%

static inline void anon_vma_lock_write(struct anon_vma *anon_vma) { down_write(&anon_vma->root->rwsem); }


Rik Van Riel1986.36%250.00%
Ingo Molnar313.64%250.00%

static inline void anon_vma_unlock_write(struct anon_vma *anon_vma) { up_write(&anon_vma->root->rwsem); }


Rik Van Riel1986.36%250.00%
Ingo Molnar29.09%125.00%
Konstantin Khlebnikov14.55%125.00%

static inline void anon_vma_lock_read(struct anon_vma *anon_vma) { down_read(&anon_vma->root->rwsem); }


Ingo Molnar22100.00%1100.00%

static inline void anon_vma_unlock_read(struct anon_vma *anon_vma) { up_read(&anon_vma->root->rwsem); }


Ingo Molnar22100.00%1100.00%

/* * anon_vma helper functions. */ void anon_vma_init(void); /* create anon_vma_cachep */ int __anon_vma_prepare(struct vm_area_struct *); void unlink_anon_vmas(struct vm_area_struct *); int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *); int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
static inline int anon_vma_prepare(struct vm_area_struct *vma) { if (likely(vma->anon_vma)) return 0; return __anon_vma_prepare(vma); }


Vlastimil Babka30100.00%1100.00%

static inline void anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next) { VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma); unlink_anon_vmas(next); }


Rik Van Riel3291.43%150.00%
Sasha Levin38.57%150.00%

struct anon_vma *page_get_anon_vma(struct page *page); /* bitflags for do_page_add_anon_rmap() */ #define RMAP_EXCLUSIVE 0x01 #define RMAP_COMPOUND 0x02 /* * rmap interfaces called when adding or removing pte of page */ void page_move_anon_rmap(struct page *, struct vm_area_struct *); void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long, bool); void do_page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long, int); void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long, bool); void page_add_file_rmap(struct page *, bool); void page_remove_rmap(struct page *, bool); void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
static inline void page_dup_rmap(struct page *page, bool compound) { atomic_inc(compound ? compound_mapcount_ptr(page) : &page->_mapcount); }


Andrew Morton1756.67%133.33%
Kirill A. Shutemov1033.33%133.33%
Hugh Dickins310.00%133.33%

/* * Called from mm/vmscan.c to handle paging out */ int page_referenced(struct page *, int is_locked, struct mem_cgroup *memcg, unsigned long *vm_flags); bool try_to_unmap(struct page *, enum ttu_flags flags); /* Avoid racy checks */ #define PVMW_SYNC (1 << 0) /* Look for migarion entries rather than present PTEs */ #define PVMW_MIGRATION (1 << 1) struct page_vma_mapped_walk { struct page *page; struct vm_area_struct *vma; unsigned long address; pmd_t *pmd; pte_t *pte; spinlock_t *ptl; unsigned int flags; };
static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw) { if (pvmw->pte) pte_unmap(pvmw->pte); if (pvmw->ptl) spin_unlock(pvmw->ptl); }


Kirill A. Shutemov38100.00%1100.00%

bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw); /* * Used by swapoff to help locate where page is expected in vma. */ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); /* * Cleans the PTEs of shared mappings. * (and since clean PTEs should also be readonly, write protects them too) * * returns the number of cleaned PTEs. */ int page_mkclean(struct page *); /* * called in munlock()/munmap() path to check for other vmas holding * the page mlocked. */ void try_to_munlock(struct page *); void remove_migration_ptes(struct page *old, struct page *new, bool locked); /* * Called by memory-failure.c to kill processes. */ struct anon_vma *page_lock_anon_vma_read(struct page *page); void page_unlock_anon_vma_read(struct anon_vma *anon_vma); int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); /* * rmap_walk_control: To control rmap traversing for specific needs * * arg: passed to rmap_one() and invalid_vma() * rmap_one: executed on each vma where page is mapped * done: for checking traversing termination condition * anon_lock: for getting anon_lock by optimized way rather than default * invalid_vma: for skipping uninterested vma */ struct rmap_walk_control { void *arg; /* * Return false if page table scanning in rmap_walk should be stopped. * Otherwise, return true. */ bool (*rmap_one)(struct page *page, struct vm_area_struct *vma, unsigned long addr, void *arg); int (*done)(struct page *page); struct anon_vma *(*anon_lock)(struct page *page); bool (*invalid_vma)(struct vm_area_struct *vma, void *arg); }; void rmap_walk(struct page *page, struct rmap_walk_control *rwc); void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc); #else /* !CONFIG_MMU */ #define anon_vma_init() do {} while (0) #define anon_vma_prepare(vma) (0) #define anon_vma_link(vma) do {} while (0)
static inline int page_referenced(struct page *page, int is_locked, struct mem_cgroup *memcg, unsigned long *vm_flags) { *vm_flags = 0; return 0; }


Mike Frysinger2472.73%114.29%
Andrew Morton412.12%114.29%
Johannes Weiner26.06%228.57%
Hugh Dickins13.03%114.29%
Balbir Singh13.03%114.29%
Fengguang Wu13.03%114.29%

#define try_to_unmap(page, refs) false
static inline int page_mkclean(struct page *page) { return 0; }


Peter Zijlstra15100.00%1100.00%

#endif /* CONFIG_MMU */ #endif /* _LINUX_RMAP_H */

Overall Contributors

Rik Van Riel16617.08%711.86%
Andrew Morton16116.56%58.47%
Kirill A. Shutemov16016.46%813.56%
Peter Zijlstra687.00%46.78%
JoonSoo Kim646.58%23.39%
Ingo Molnar565.76%23.39%
Hugh Dickins454.63%35.08%
Andi Kleen363.70%35.08%
MinChan Kim323.29%58.47%
Vlastimil Babka313.19%11.69%
Naoya Horiguchi303.09%11.69%
Nicholas Piggin252.57%23.39%
Mike Frysinger242.47%11.69%
Michel Lespinasse222.26%23.39%
Konstantin Khlebnikov111.13%23.39%
Balbir Singh80.82%11.69%
Shaohua Li80.82%11.69%
Fengguang Wu60.62%11.69%
Mel Gorman60.62%23.39%
Johannes Weiner30.31%23.39%
Sasha Levin30.31%11.69%
Chris Wright30.31%11.69%
Christoph Lameter30.31%11.69%
Richard Kennedy10.10%11.69%
Directory: include/linux
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.