cregit-Linux how code gets into the kernel

Release 4.7 include/linux/rmap.h

Directory: include/linux
#ifndef _LINUX_RMAP_H

#define _LINUX_RMAP_H
/*
 * Declarations for Reverse Mapping functions in mm/rmap.c
 */

#include <linux/list.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/rwsem.h>
#include <linux/memcontrol.h>

/*
 * The anon_vma heads a list of private "related" vmas, to scan if
 * an anonymous page pointing to this anon_vma needs to be unmapped:
 * the vmas on the list will be related by forking, or by splitting.
 *
 * Since vmas come and go as they are split and merged (particularly
 * in mprotect), the mapping field of an anonymous page cannot point
 * directly to a vma: instead it points to an anon_vma, on whose list
 * the related vmas can be easily linked or unlinked.
 *
 * After unlinking the last vma on the list, we must garbage collect
 * the anon_vma object itself: we're guaranteed no page can be
 * pointing to this anon_vma once its vma list is empty.
 */

struct anon_vma {
	
struct anon_vma *root;		/* Root of this anon_vma tree */
	
struct rw_semaphore rwsem;	/* W: modification, R: walking the list */
	/*
         * The refcount is taken on an anon_vma when there is no
         * guarantee that the vma of page tables will exist for
         * the duration of the operation. A caller that takes
         * the reference is responsible for clearing up the
         * anon_vma if they are the last user on release
         */
	
atomic_t refcount;

	/*
         * Count of child anon_vmas and VMAs which points to this anon_vma.
         *
         * This counter is used for making decision about reusing anon_vma
         * instead of forking new one. See comments in function anon_vma_clone.
         */
	
unsigned degree;

	
struct anon_vma *parent;	/* Parent of this anon_vma */

	/*
         * NOTE: the LSB of the rb_root.rb_node is set by
         * mm_take_all_locks() _after_ taking the above lock. So the
         * rb_root must only be read/written after taking the above lock
         * to be sure to see a valid next pointer. The LSB bit itself
         * is serialized by a system wide lock only visible to
         * mm_take_all_locks() (mm_all_locks_mutex).
         */
	
struct rb_root rb_root;	/* Interval tree of private "related" vmas */
};

/*
 * The copy-on-write semantics of fork mean that an anon_vma
 * can become associated with multiple processes. Furthermore,
 * each child process will have its own anon_vma, where new
 * pages for that process are instantiated.
 *
 * This structure allows us to find the anon_vmas associated
 * with a VMA, or the VMAs associated with an anon_vma.
 * The "same_vma" list contains the anon_vma_chains linking
 * all the anon_vmas associated with this VMA.
 * The "rb" field indexes on an interval tree the anon_vma_chains
 * which link all the VMAs associated with this anon_vma.
 */

struct anon_vma_chain {
	
struct vm_area_struct *vma;
	
struct anon_vma *anon_vma;
	
struct list_head same_vma;   /* locked by mmap_sem & page_table_lock */
	
struct rb_node rb;			/* locked by anon_vma->rwsem */
	
unsigned long rb_subtree_last;
#ifdef CONFIG_DEBUG_VM_RB
	

unsigned long cached_vma_start, cached_vma_last;
#endif
};


enum ttu_flags {
	
TTU_UNMAP = 1,			/* unmap mode */
	
TTU_MIGRATION = 2,		/* migration mode */
	
TTU_MUNLOCK = 4,		/* munlock mode */
	
TTU_LZFREE = 8,			/* lazy free mode */
	
TTU_SPLIT_HUGE_PMD = 16,	/* split huge PMD if any */

	
TTU_IGNORE_MLOCK = (1 << 8),	/* ignore mlock */
	
TTU_IGNORE_ACCESS = (1 << 9),	/* don't age */
	
TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
	
TTU_BATCH_FLUSH = (1 << 11),	/* Batch TLB flushes where possible
                                         * and caller guarantees they will
                                         * do a final flush if necessary */
	
TTU_RMAP_LOCKED = (1 << 12)	/* do not grab rmap lock:
                                         * caller holds it */
};

#ifdef CONFIG_MMU

static inline void get_anon_vma(struct anon_vma *anon_vma) { atomic_inc(&anon_vma->refcount); }

Contributors

PersonTokensPropCommitsCommitProp
rik van rielrik van riel1995.00%150.00%
peter zijlstrapeter zijlstra15.00%150.00%
Total20100.00%2100.00%

void __put_anon_vma(struct anon_vma *anon_vma);
static inline void put_anon_vma(struct anon_vma *anon_vma) { if (atomic_dec_and_test(&anon_vma->refcount)) __put_anon_vma(anon_vma); }

Contributors

PersonTokensPropCommitsCommitProp
peter zijlstrapeter zijlstra27100.00%1100.00%
Total27100.00%1100.00%


static inline void anon_vma_lock_write(struct anon_vma *anon_vma) { down_write(&anon_vma->root->rwsem); }

Contributors

PersonTokensPropCommitsCommitProp
rik van rielrik van riel1986.36%250.00%
ingo molnaringo molnar313.64%250.00%
Total22100.00%4100.00%


static inline void anon_vma_unlock_write(struct anon_vma *anon_vma) { up_write(&anon_vma->root->rwsem); }

Contributors

PersonTokensPropCommitsCommitProp
rik van rielrik van riel1986.36%250.00%
ingo molnaringo molnar29.09%125.00%
konstantin khlebnikovkonstantin khlebnikov14.55%125.00%
Total22100.00%4100.00%


static inline void anon_vma_lock_read(struct anon_vma *anon_vma) { down_read(&anon_vma->root->rwsem); }

Contributors

PersonTokensPropCommitsCommitProp
ingo molnaringo molnar22100.00%1100.00%
Total22100.00%1100.00%


static inline void anon_vma_unlock_read(struct anon_vma *anon_vma) { up_read(&anon_vma->root->rwsem); }

Contributors

PersonTokensPropCommitsCommitProp
ingo molnaringo molnar22100.00%1100.00%
Total22100.00%1100.00%

/* * anon_vma helper functions. */ void anon_vma_init(void); /* create anon_vma_cachep */ int anon_vma_prepare(struct vm_area_struct *); void unlink_anon_vmas(struct vm_area_struct *); int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *); int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
static inline void anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next) { VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma); unlink_anon_vmas(next); }

Contributors

PersonTokensPropCommitsCommitProp
rik van rielrik van riel3291.43%150.00%
sasha levinsasha levin38.57%150.00%
Total35100.00%2100.00%

struct anon_vma *page_get_anon_vma(struct page *page); /* bitflags for do_page_add_anon_rmap() */ #define RMAP_EXCLUSIVE 0x01 #define RMAP_COMPOUND 0x02 /* * rmap interfaces called when adding or removing pte of page */ void page_move_anon_rmap(struct page *, struct vm_area_struct *); void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long, bool); void do_page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long, int); void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long, bool); void page_add_file_rmap(struct page *); void page_remove_rmap(struct page *, bool); void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
static inline void page_dup_rmap(struct page *page, bool compound) { atomic_inc(compound ? compound_mapcount_ptr(page) : &page->_mapcount); }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton1756.67%133.33%
kirill a. shutemovkirill a. shutemov1033.33%133.33%
hugh dickinshugh dickins310.00%133.33%
Total30100.00%3100.00%

/* * Called from mm/vmscan.c to handle paging out */ int page_referenced(struct page *, int is_locked, struct mem_cgroup *memcg, unsigned long *vm_flags); #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) int try_to_unmap(struct page *, enum ttu_flags flags); /* * Used by uprobes to replace a userspace page safely */ pte_t *__page_check_address(struct page *, struct mm_struct *, unsigned long, spinlock_t **, int);
static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm, unsigned long address, spinlock_t **ptlp, int sync) { pte_t *ptep; __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address, ptlp, sync)); return ptep; }

Contributors

PersonTokensPropCommitsCommitProp
namhyung kimnamhyung kim58100.00%1100.00%
Total58100.00%1100.00%

/* * Used by idle page tracking to check if a page was referenced via page * tables. */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE bool page_check_address_transhuge(struct page *page, struct mm_struct *mm, unsigned long address, pmd_t **pmdp, pte_t **ptep, spinlock_t **ptlp); #else
static inline bool page_check_address_transhuge(struct page *page, struct mm_struct *mm, unsigned long address, pmd_t **pmdp, pte_t **ptep, spinlock_t **ptlp) { *ptep = page_check_address(page, mm, address, ptlp, 0); *pmdp = NULL; return !!*ptep; }

Contributors

PersonTokensPropCommitsCommitProp
vladimir davydovvladimir davydov63100.00%1100.00%
Total63100.00%1100.00%

#endif /* * Used by swapoff to help locate where page is expected in vma. */ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); /* * Cleans the PTEs of shared mappings. * (and since clean PTEs should also be readonly, write protects them too) * * returns the number of cleaned PTEs. */ int page_mkclean(struct page *); /* * called in munlock()/munmap() path to check for other vmas holding * the page mlocked. */ int try_to_munlock(struct page *); void remove_migration_ptes(struct page *old, struct page *new, bool locked); /* * Called by memory-failure.c to kill processes. */ struct anon_vma *page_lock_anon_vma_read(struct page *page); void page_unlock_anon_vma_read(struct anon_vma *anon_vma); int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); /* * rmap_walk_control: To control rmap traversing for specific needs * * arg: passed to rmap_one() and invalid_vma() * rmap_one: executed on each vma where page is mapped * done: for checking traversing termination condition * anon_lock: for getting anon_lock by optimized way rather than default * invalid_vma: for skipping uninterested vma */ struct rmap_walk_control { void *arg; int (*rmap_one)(struct page *page, struct vm_area_struct *vma, unsigned long addr, void *arg); int (*done)(struct page *page); struct anon_vma *(*anon_lock)(struct page *page); bool (*invalid_vma)(struct vm_area_struct *vma, void *arg); }; int rmap_walk(struct page *page, struct rmap_walk_control *rwc); int rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc); #else /* !CONFIG_MMU */ #define anon_vma_init() do {} while (0) #define anon_vma_prepare(vma) (0) #define anon_vma_link(vma) do {} while (0)
static inline int page_referenced(struct page *page, int is_locked, struct mem_cgroup *memcg, unsigned long *vm_flags) { *vm_flags = 0; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
mike frysingermike frysinger2472.73%114.29%
andrew mortonandrew morton412.12%114.29%
johannes weinerjohannes weiner26.06%228.57%
fengguang wufengguang wu13.03%114.29%
balbir singhbalbir singh13.03%114.29%
hugh dickinshugh dickins13.03%114.29%
Total33100.00%7100.00%

#define try_to_unmap(page, refs) SWAP_FAIL
static inline int page_mkclean(struct page *page) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
peter zijlstrapeter zijlstra15100.00%1100.00%
Total15100.00%1100.00%

#endif /* CONFIG_MMU */ /* * Return values of try_to_unmap */ #define SWAP_SUCCESS 0 #define SWAP_AGAIN 1 #define SWAP_FAIL 2 #define SWAP_MLOCK 3 #define SWAP_LZFREE 4 #endif /* _LINUX_RMAP_H */

Overall Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton17716.28%58.47%
rik van rielrik van riel16615.27%711.86%
vladimir davydovvladimir davydov1049.57%11.69%
kirill a. shutemovkirill a. shutemov706.44%610.17%
peter zijlstrapeter zijlstra686.26%46.78%
joonsoo kimjoonsoo kim655.98%23.39%
namhyung kimnamhyung kim595.43%11.69%
ingo molnaringo molnar565.15%23.39%
minchan kimminchan kim534.88%23.39%
hugh dickinshugh dickins504.60%46.78%
andi kleenandi kleen433.96%35.08%
nick pigginnick piggin322.94%35.08%
naoya horiguchinaoya horiguchi302.76%11.69%
mike frysingermike frysinger242.21%11.69%
michel lespinassemichel lespinasse222.02%23.39%
carsten ottecarsten otte151.38%11.69%
konstantin khlebnikovkonstantin khlebnikov141.29%35.08%
mel gormanmel gorman111.01%23.39%
balbir singhbalbir singh80.74%11.69%
fengguang wufengguang wu60.55%11.69%
christoph lameterchristoph lameter30.28%11.69%
johannes weinerjohannes weiner30.28%23.39%
sasha levinsasha levin30.28%11.69%
chris wrightchris wright30.28%11.69%
matthew wilcoxmatthew wilcox10.09%11.69%
richard kennedyrichard kennedy10.09%11.69%
Total1087100.00%59100.00%
Directory: include/linux
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
{% endraw %}