cregit-Linux how code gets into the kernel

Release 4.7 mm/rmap.c

Directory: mm
/*
 * mm/rmap.c - physical to virtual reverse mappings
 *
 * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
 * Released under the General Public License (GPL).
 *
 * Simple, low overhead reverse mapping scheme.
 * Please try to keep this thing as modular as possible.
 *
 * Provides methods for unmapping each kind of mapped page:
 * the anon methods track anonymous pages, and
 * the file methods track pages belonging to an inode.
 *
 * Original design by Rik van Riel <riel@conectiva.com.br> 2001
 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
 * Contributions by Hugh Dickins 2003, 2004
 */

/*
 * Lock ordering in mm:
 *
 * inode->i_mutex       (while writing or truncating, not reading or faulting)
 *   mm->mmap_sem
 *     page->flags PG_locked (lock_page)
 *       hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share)
 *         mapping->i_mmap_rwsem
 *           anon_vma->rwsem
 *             mm->page_table_lock or pte_lock
 *               zone->lru_lock (in mark_page_accessed, isolate_lru_page)
 *               swap_lock (in swap_duplicate, swap_info_get)
 *                 mmlist_lock (in mmput, drain_mmlist and others)
 *                 mapping->private_lock (in __set_page_dirty_buffers)
 *                   mem_cgroup_{begin,end}_page_stat (memcg->move_lock)
 *                     mapping->tree_lock (widely used)
 *                 inode->i_lock (in set_page_dirty's __mark_inode_dirty)
 *                 bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty)
 *                   sb_lock (within inode_lock in fs/fs-writeback.c)
 *                   mapping->tree_lock (widely used, in set_page_dirty,
 *                             in arch-dependent flush_dcache_mmap_lock,
 *                             within bdi.wb->list_lock in __sync_single_inode)
 *
 * anon_vma->rwsem,mapping->i_mutex      (memory_failure, collect_procs_anon)
 *   ->tasklist_lock
 *     pte map lock
 */

#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/rcupdate.h>
#include <linux/export.h>
#include <linux/memcontrol.h>
#include <linux/mmu_notifier.h>
#include <linux/migrate.h>
#include <linux/hugetlb.h>
#include <linux/backing-dev.h>
#include <linux/page_idle.h>

#include <asm/tlbflush.h>

#include <trace/events/tlb.h>

#include "internal.h"


static struct kmem_cache *anon_vma_cachep;

static struct kmem_cache *anon_vma_chain_cachep;


static inline struct anon_vma *anon_vma_alloc(void) { struct anon_vma *anon_vma; anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); if (anon_vma) { atomic_set(&anon_vma->refcount, 1); anon_vma->degree = 1; /* Reference for first vma */ anon_vma->parent = anon_vma; /* * Initialise the anon_vma root to point to itself. If called * from fork, the root will be reset to the parents anon_vma. */ anon_vma->root = anon_vma; } return anon_vma; }

Contributors

PersonTokensPropCommitsCommitProp
peter zijlstrapeter zijlstra3351.56%133.33%
adrian bunkadrian bunk1828.12%133.33%
konstantin khlebnikovkonstantin khlebnikov1320.31%133.33%
Total64100.00%3100.00%


static inline void anon_vma_free(struct anon_vma *anon_vma) { VM_BUG_ON(atomic_read(&anon_vma->refcount)); /* * Synchronize against page_lock_anon_vma_read() such that * we can safely hold the lock without the anon_vma getting * freed. * * Relies on the full mb implied by the atomic_dec_and_test() from * put_anon_vma() against the acquire barrier implied by * down_read_trylock() from page_lock_anon_vma_read(). This orders: * * page_lock_anon_vma_read() VS put_anon_vma() * down_read_trylock() atomic_dec_and_test() * LOCK MB * atomic_read() rwsem_is_locked() * * LOCK should suffice since the actual taking of the lock must * happen _before_ what follows. */ might_sleep(); if (rwsem_is_locked(&anon_vma->root->rwsem)) { anon_vma_lock_write(anon_vma); anon_vma_unlock_write(anon_vma); } kmem_cache_free(anon_vma_cachep, anon_vma); }

Contributors

PersonTokensPropCommitsCommitProp
peter zijlstrapeter zijlstra3356.90%228.57%
adrian bunkadrian bunk1729.31%114.29%
ingo molnaringo molnar46.90%228.57%
hugh dickinshugh dickins35.17%114.29%
konstantin khlebnikovkonstantin khlebnikov11.72%114.29%
Total58100.00%7100.00%


static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp) { return kmem_cache_alloc(anon_vma_chain_cachep, gfp); }

Contributors

PersonTokensPropCommitsCommitProp
rik van rielrik van riel1785.00%150.00%
linus torvaldslinus torvalds315.00%150.00%
Total20100.00%2100.00%


static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) { kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); }

Contributors

PersonTokensPropCommitsCommitProp
rik van rielrik van riel1794.44%150.00%
namhyung kimnamhyung kim15.56%150.00%
Total18100.00%2100.00%


static void anon_vma_chain_link(struct vm_area_struct *vma, struct anon_vma_chain *avc, struct anon_vma *anon_vma) { avc->vma = vma; avc->anon_vma = anon_vma; list_add(&avc->same_vma, &vma->anon_vma_chain); anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); }

Contributors

PersonTokensPropCommitsCommitProp
kautuk consulkautuk consul5496.43%150.00%
michel lespinassemichel lespinasse23.57%150.00%
Total56100.00%2100.00%

/** * anon_vma_prepare - attach an anon_vma to a memory region * @vma: the memory region in question * * This makes sure the memory mapping described by 'vma' has * an 'anon_vma' attached to it, so that we can associate the * anonymous pages mapped into it with that anon_vma. * * The common case will be that we already have one, but if * not we either need to find an adjacent mapping that we * can re-use the anon_vma from (very common when the only * reason for splitting a vma has been mprotect()), or we * allocate a new one. * * Anon-vma allocations are very subtle, because we may have * optimistically looked up an anon_vma in page_lock_anon_vma_read() * and that may actually touch the spinlock even in the newly * allocated vma (it depends on RCU to make sure that the * anon_vma isn't actually destroyed). * * As a result, we need to do proper anon_vma locking even * for the new allocation. At the same time, we do not want * to do any locking for the common case of already having * an anon_vma. * * This must be called with the mmap_sem held for reading. */
int anon_vma_prepare(struct vm_area_struct *vma) { struct anon_vma *anon_vma = vma->anon_vma; struct anon_vma_chain *avc; might_sleep(); if (unlikely(!anon_vma)) { struct mm_struct *mm = vma->vm_mm; struct anon_vma *allocated; avc = anon_vma_chain_alloc(GFP_KERNEL); if (!avc) goto out_enomem; anon_vma = find_mergeable_anon_vma(vma); allocated = NULL; if (!anon_vma) { anon_vma = anon_vma_alloc(); if (unlikely(!anon_vma)) goto out_enomem_free_avc; allocated = anon_vma; } anon_vma_lock_write(anon_vma); /* page_table_lock to protect against threads */ spin_lock(&mm->page_table_lock); if (likely(!vma->anon_vma)) { vma->anon_vma = anon_vma; anon_vma_chain_link(vma, avc, anon_vma); /* vma reference or self-parent link for new root */ anon_vma->degree++; allocated = NULL; avc = NULL; } spin_unlock(&mm->page_table_lock); anon_vma_unlock_write(anon_vma); if (unlikely(allocated)) put_anon_vma(allocated); if (unlikely(avc)) anon_vma_chain_free(avc); } return 0; out_enomem_free_avc: anon_vma_chain_free(avc); out_enomem: return -ENOMEM; }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton12759.91%216.67%
rik van rielrik van riel4018.87%18.33%
linus torvaldslinus torvalds125.66%216.67%
oleg nesterovoleg nesterov115.19%18.33%
hugh dickinshugh dickins115.19%18.33%
konstantin khlebnikovkonstantin khlebnikov73.30%216.67%
kautuk consulkautuk consul20.94%18.33%
ingo molnaringo molnar10.47%18.33%
peter zijlstrapeter zijlstra10.47%18.33%
Total212100.00%12100.00%

/* * This is a useful helper function for locking the anon_vma root as * we traverse the vma->anon_vma_chain, looping over anon_vma's that * have the same vma. * * Such anon_vma's should have the same root, so you'd expect to see * just a single mutex_lock for the whole traversal. */
static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma) { struct anon_vma *new_root = anon_vma->root; if (new_root != root) { if (WARN_ON_ONCE(root)) up_write(&root->rwsem); root = new_root; down_write(&root->rwsem); } return root; }

Contributors

PersonTokensPropCommitsCommitProp
linus torvaldslinus torvalds6293.94%150.00%
ingo molnaringo molnar46.06%150.00%
Total66100.00%2100.00%


static inline void unlock_anon_vma_root(struct anon_vma *root) { if (root) up_write(&root->rwsem); }

Contributors

PersonTokensPropCommitsCommitProp
linus torvaldslinus torvalds2291.67%150.00%
ingo molnaringo molnar28.33%150.00%
Total24100.00%2100.00%

/* * Attach the anon_vmas from src to dst. * Returns 0 on success, -ENOMEM on failure. * * If dst->anon_vma is NULL this function tries to find and reuse existing * anon_vma which has no vmas and only one child anon_vma. This prevents * degradation of anon_vma hierarchy to endless linear chain in case of * constantly forking task. On the other hand, an anon_vma with more than one * child isn't reused even if there was no alive vma, thus rmap walker has a * good chance of avoiding scanning the whole hierarchy when it searches where * page is mapped. */
int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) { struct anon_vma_chain *avc, *pavc; struct anon_vma *root = NULL; list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { struct anon_vma *anon_vma; avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN); if (unlikely(!avc)) { unlock_anon_vma_root(root); root = NULL; avc = anon_vma_chain_alloc(GFP_KERNEL); if (!avc) goto enomem_failure; } anon_vma = pavc->anon_vma; root = lock_anon_vma_root(root, anon_vma); anon_vma_chain_link(dst, avc, anon_vma); /* * Reuse existing anon_vma if its degree lower than two, * that means it has no vma and only one anon_vma child. * * Do not chose parent anon_vma, otherwise first child * will always reuse it. Root anon_vma is never reused: * it has self-parent reference and at least one child. */ if (!dst->anon_vma && anon_vma != src->anon_vma && anon_vma->degree < 2) dst->anon_vma = anon_vma; } if (dst->anon_vma) dst->anon_vma->degree++; unlock_anon_vma_root(root); return 0; enomem_failure: /* * dst->anon_vma is dropped here otherwise its degree can be incorrectly * decremented in unlink_anon_vmas(). * We can safely do this because callers of anon_vma_clone() don't care * about dst->anon_vma if anon_vma_clone() failed. */ dst->anon_vma = NULL; unlink_anon_vmas(dst); return -ENOMEM; }

Contributors

PersonTokensPropCommitsCommitProp
linus torvaldslinus torvalds6436.16%342.86%
rik van rielrik van riel5028.25%114.29%
konstantin khlebnikovkonstantin khlebnikov3922.03%114.29%
andrew mortonandrew morton179.60%114.29%
leon yuleon yu73.95%114.29%
Total177100.00%7100.00%

/* * Attach vma to its own anon_vma, as well as to the anon_vmas that * the corresponding VMA in the parent process is attached to. * Returns 0 on success, non-zero on failure. */
int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) { struct anon_vma_chain *avc; struct anon_vma *anon_vma; int error; /* Don't bother if the parent process has no anon_vma here. */ if (!pvma->anon_vma) return 0; /* Drop inherited anon_vma, we'll reuse existing or allocate new. */ vma->anon_vma = NULL; /* * First, attach the new VMA to the parent VMA's anon_vmas, * so rmap can find non-COWed pages in child processes. */ error = anon_vma_clone(vma, pvma); if (error) return error; /* An existing anon_vma has been reused, all done then. */ if (vma->anon_vma) return 0; /* Then add our own anon_vma. */ anon_vma = anon_vma_alloc(); if (!anon_vma) goto out_error; avc = anon_vma_chain_alloc(GFP_KERNEL); if (!avc) goto out_error_free_anon_vma; /* * The root anon_vma's spinlock is the lock actually used when we * lock any of the anon_vmas in this anon_vma tree. */ anon_vma->root = pvma->anon_vma->root; anon_vma->parent = pvma->anon_vma; /* * With refcounts, an anon_vma can stay around longer than the * process it belongs to. The root anon_vma needs to be pinned until * this anon_vma is freed, because the lock lives in the root. */ get_anon_vma(anon_vma->root); /* Mark this anon_vma as the one where our new (COWed) pages go. */ vma->anon_vma = anon_vma; anon_vma_lock_write(anon_vma); anon_vma_chain_link(vma, avc, anon_vma); anon_vma->parent->degree++; anon_vma_unlock_write(anon_vma); return 0; out_error_free_anon_vma: put_anon_vma(anon_vma); out_error: unlink_anon_vmas(vma); return -ENOMEM; }

Contributors

PersonTokensPropCommitsCommitProp
rik van rielrik van riel10054.64%433.33%
konstantin khlebnikovkonstantin khlebnikov3318.03%216.67%
andrew mortonandrew morton2614.21%18.33%
linus torvaldslinus torvalds116.01%216.67%
daniel forrestdaniel forrest105.46%18.33%
peter zijlstrapeter zijlstra21.09%18.33%
ingo molnaringo molnar10.55%18.33%
Total183100.00%12100.00%


void unlink_anon_vmas(struct vm_area_struct *vma) { struct anon_vma_chain *avc, *next; struct anon_vma *root = NULL; /* * Unlink each anon_vma chained to the VMA. This list is ordered * from newest to oldest, ensuring the root anon_vma gets freed last. */ list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { struct anon_vma *anon_vma = avc->anon_vma; root = lock_anon_vma_root(root, anon_vma); anon_vma_interval_tree_remove(avc, &anon_vma->rb_root); /* * Leave empty anon_vmas on the list - we'll need * to free them outside the lock. */ if (RB_EMPTY_ROOT(&anon_vma->rb_root)) { anon_vma->parent->degree--; continue; } list_del(&avc->same_vma); anon_vma_chain_free(avc); } if (vma->anon_vma) vma->anon_vma->degree--; unlock_anon_vma_root(root); /* * Iterate the list once more, it now only contains empty and unlinked * anon_vmas, destroy them. Could not do before due to __put_anon_vma() * needing to write-acquire the anon_vma->root->rwsem. */ list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { struct anon_vma *anon_vma = avc->anon_vma; VM_WARN_ON(anon_vma->degree); put_anon_vma(anon_vma); list_del(&avc->same_vma); anon_vma_chain_free(avc); } }

Contributors

PersonTokensPropCommitsCommitProp
peter zijlstrapeter zijlstra6539.39%112.50%
andrew mortonandrew morton3320.00%112.50%
rik van rielrik van riel3018.18%225.00%
konstantin khlebnikovkonstantin khlebnikov2917.58%225.00%
michel lespinassemichel lespinasse74.24%112.50%
ingo molnaringo molnar10.61%112.50%
Total165100.00%8100.00%


static void anon_vma_ctor(void *data) { struct anon_vma *anon_vma = data; init_rwsem(&anon_vma->rwsem); atomic_set(&anon_vma->refcount, 0); anon_vma->rb_root = RB_ROOT; }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton2356.10%116.67%
peter zijlstrapeter zijlstra614.63%116.67%
mel gormanmel gorman49.76%116.67%
michel lespinassemichel lespinasse37.32%116.67%
christoph lameterchristoph lameter37.32%116.67%
ingo molnaringo molnar24.88%116.67%
Total41100.00%6100.00%


void __init anon_vma_init(void) { anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT, anon_vma_ctor); anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC|SLAB_ACCOUNT); }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton2764.29%125.00%
rik van rielrik van riel921.43%125.00%
vladimir davydovvladimir davydov49.52%125.00%
hugh dickinshugh dickins24.76%125.00%
Total42100.00%4100.00%

/* * Getting a lock on a stable anon_vma from a page off the LRU is tricky! * * Since there is no serialization what so ever against page_remove_rmap() * the best this function can do is return a locked anon_vma that might * have been relevant to this page. * * The page might have been remapped to a different anon_vma or the anon_vma * returned may already be freed (and even reused). * * In case it was remapped to a different anon_vma, the new anon_vma will be a * child of the old anon_vma, and the anon_vma lifetime rules will therefore * ensure that any anon_vma obtained from the page will still be valid for as * long as we observe page_mapped() [ hence all those page_mapped() tests ]. * * All users of this function must be very careful when walking the anon_vma * chain and verify that the page in question is indeed mapped in it * [ something equivalent to page_mapped_in_vma() ]. * * Since anon_vma's slab is DESTROY_BY_RCU and we know from page_remove_rmap() * that the anon_vma pointer from page->mapping is valid if there is a * mapcount, we can dereference the anon_vma after observing those. */
struct anon_vma *page_get_anon_vma(struct page *page) { struct anon_vma *anon_vma = NULL; unsigned long anon_mapping; rcu_read_lock(); anon_mapping = (unsigned long)READ_ONCE(page->mapping); if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) goto out; if (!page_mapped(page)) goto out; anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); if (!atomic_inc_not_zero(&anon_vma->refcount)) { anon_vma = NULL; goto out; } /* * If this page is still mapped, then its anon_vma cannot have been * freed. But if it has been unmapped, we have no security against the * anon_vma structure being freed and reused (for another anon_vma: * SLAB_DESTROY_BY_RCU guarantees that - so the atomic_inc_not_zero() * above cannot corrupt). */ if (!page_mapped(page)) { rcu_read_unlock(); put_anon_vma(anon_vma); return NULL; } out: rcu_read_unlock(); return anon_vma; }

Contributors

PersonTokensPropCommitsCommitProp
hugh dickinshugh dickins9776.98%571.43%
peter zijlstrapeter zijlstra2822.22%114.29%
jason lowjason low10.79%114.29%
Total126100.00%7100.00%

/* * Similar to page_get_anon_vma() except it locks the anon_vma. * * Its a little more complex as it tries to keep the fast path to a single * atomic op -- the trylock. If we fail the trylock, we fall back to getting a * reference like with page_get_anon_vma() and then block on the mutex. */
struct anon_vma *page_lock_anon_vma_read(struct page *page) { struct anon_vma *anon_vma = NULL; struct anon_vma *root_anon_vma; unsigned long anon_mapping; rcu_read_lock(); anon_mapping = (unsigned long)READ_ONCE(page->mapping); if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) goto out; if (!page_mapped(page)) goto out; anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); root_anon_vma = READ_ONCE(anon_vma->root); if (down_read_trylock(&root_anon_vma->rwsem)) { /* * If the page is still mapped, then this anon_vma is still * its anon_vma, and holding the mutex ensures that it will * not go away, see anon_vma_free(). */ if (!page_mapped(page)) { up_read(&root_anon_vma->rwsem); anon_vma = NULL; } goto out; } /* trylock failed, we got to sleep */ if (!atomic_inc_not_zero(&anon_vma->refcount)) { anon_vma = NULL; goto out; } if (!page_mapped(page)) { rcu_read_unlock(); put_anon_vma(anon_vma); return NULL; } /* we pinned the anon_vma, its safe to sleep */ rcu_read_unlock(); anon_vma_lock_read(anon_vma); if (atomic_dec_and_test(&anon_vma->refcount)) { /* * Oops, we held the last refcount, release the lock * and bail -- can't simply use put_anon_vma() because * we'll deadlock on the anon_vma_lock_write() recursion. */ anon_vma_unlock_read(anon_vma); __put_anon_vma(anon_vma); anon_vma = NULL; } return anon_vma; out: rcu_read_unlock(); return anon_vma; }

Contributors

PersonTokensPropCommitsCommitProp
peter zijlstrapeter zijlstra18183.41%333.33%
hugh dickinshugh dickins2611.98%333.33%
ingo molnaringo molnar83.69%222.22%
jason lowjason low20.92%111.11%
Total217100.00%9100.00%


void page_unlock_anon_vma_read(struct anon_vma *anon_vma) { anon_vma_unlock_read(anon_vma); }

Contributors

PersonTokensPropCommitsCommitProp
oleg nesterovoleg nesterov1280.00%133.33%
ingo molnaringo molnar213.33%133.33%
peter zijlstrapeter zijlstra16.67%133.33%
Total15100.00%3100.00%

#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH /* * Flush TLB entries for recently unmapped pages from remote CPUs. It is * important if a PTE was dirty when it was unmapped that it's flushed * before any IO is initiated on the page to prevent lost writes. Similarly, * it must be flushed before freeing to prevent data leakage. */
void try_to_unmap_flush(void) { struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc; int cpu; if (!tlb_ubc->flush_required) return; cpu = get_cpu(); if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask)) { count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); local_flush_tlb(); trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL); } if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids) flush_tlb_others(&tlb_ubc->cpumask, NULL, 0, TLB_FLUSH_ALL); cpumask_clear(&tlb_ubc->cpumask); tlb_ubc->flush_required = false; tlb_ubc->writable = false; put_cpu(); }

Contributors

PersonTokensPropCommitsCommitProp
mel gormanmel gorman9584.07%266.67%
nadav amitnadav amit1815.93%133.33%
Total113100.00%3100.00%

/* Flush iff there are potentially writable TLB entries that can race with IO */
void try_to_unmap_flush_dirty(void) { struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc; if (tlb_ubc->writable) try_to_unmap_flush(); }

Contributors

PersonTokensPropCommitsCommitProp
mel gormanmel gorman26100.00%1100.00%
Total26100.00%1100.00%


static void set_tlb_ubc_flush_pending(struct mm_struct *mm, struct page *page, bool writable) { struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc; cpumask_or(&tlb_ubc->cpumask, &tlb_ubc->cpumask, mm_cpumask(mm)); tlb_ubc->flush_required = true; /* * If the PTE was dirty then it's best to assume it's writable. The * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush() * before the page is queued for IO. */ if (writable) tlb_ubc->writable = true; }

Contributors

PersonTokensPropCommitsCommitProp
mel gormanmel gorman64100.00%2100.00%
Total64100.00%2100.00%

/* * Returns true if the TLB flush should be deferred to the end of a batch of * unmap operations to reduce IPIs. */
static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) { bool should_defer = false; if (!(flags & TTU_BATCH_FLUSH)) return false; /* If remote CPUs need to be flushed then defer batch the flush */ if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids) should_defer = true; put_cpu(); return should_defer; }

Contributors

PersonTokensPropCommitsCommitProp
mel gormanmel gorman58100.00%1100.00%
Total58100.00%1100.00%

#else
static void set_tlb_ubc_flush_pending(struct mm_struct *mm, struct page *page, bool writable) { }

Contributors

PersonTokensPropCommitsCommitProp
mel gormanmel gorman18100.00%2100.00%
Total18100.00%2100.00%


static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) { return false; }

Contributors

PersonTokensPropCommitsCommitProp
mel gormanmel gorman18100.00%1100.00%
Total18100.00%1100.00%

#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ /* * At what user virtual address is page expected in vma? * Caller should check the page is actually part of the vma. */
unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) { unsigned long address; if (PageAnon(page)) { struct anon_vma *page__anon_vma = page_anon_vma(page); /* * Note: swapoff's unuse_vma() is more efficient with this * check, and needs it to match anon_vma when KSM is active. */ if (!vma->anon_vma || !page__anon_vma || vma->anon_vma->root != page__anon_vma->root) return -EFAULT; } else if (page->mapping) { if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping) return -EFAULT; } else return -EFAULT; address = __vma_address(page, vma); if (unlikely(address < vma->vm_start || address >= vma->vm_end)) return -EFAULT; return address; }

Contributors

PersonTokensPropCommitsCommitProp
hugh dickinshugh dickins8463.64%360.00%
michel lespinassemichel lespinasse3123.48%120.00%
andrea arcangeliandrea arcangeli1712.88%120.00%
Total132100.00%5100.00%


pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) { pgd_t *pgd; pud_t *pud; pmd_t *pmd = NULL; pmd_t pmde; pgd = pgd_offset(mm, address); if (!pgd_present(*pgd)) goto out; pud = pud_offset(pgd, address); if (!pud_present(*pud)) goto out; pmd = pmd_offset(pud, address); /* * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at() * without holding anon_vma lock for write. So when looking for a * genuine pmde (in which to find pte), test present and !THP together. */ pmde = *pmd; barrier(); if (!pmd_present(pmde) || pmd_trans_huge(pmde)) pmd = NULL; out: return pmd; }

Contributors

PersonTokensPropCommitsCommitProp
bob liubob liu9684.21%125.00%
hugh dickinshugh dickins1412.28%125.00%
christian borntraegerchristian borntraeger32.63%125.00%
aneesh kumaraneesh kumar10.88%125.00%
Total114100.00%4100.00%

/* * Check that @page is mapped at @address into @mm. * * If @sync is false, page_check_address may perform a racy check to avoid * the page table lock when the pte is not present (helpful when reclaiming * highly shared pages). * * On success returns with pte mapped and locked. */
pte_t *__page_check_address(struct page *page, struct mm_struct *mm, unsigned long address, spinlock_t **ptlp, int sync) { pmd_t *pmd; pte_t *pte; spinlock_t *ptl; if (unlikely(PageHuge(page))) { /* when pud is not present, pte will be NULL */ pte = huge_pte_offset(mm, address); if (!pte) return NULL; ptl = huge_pte_lockptr(page_hstate(page), mm, pte); goto check; } pmd = mm_find_pmd(mm, address); if (!pmd) return NULL; pte = pte_offset_map(pmd, address); /* Make a quick check before getting the lock */ if (!sync && !pte_present(*pte)) { pte_unmap(pte); return NULL; } ptl = pte_lockptr(mm, pmd); check: spin_lock(ptl); if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) { *ptlp = ptl; return pte; } pte_unmap_unlock(pte, ptl); return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
hugh dickinshugh dickins5729.84%214.29%
andrew mortonandrew morton5729.84%535.71%
naoya horiguchinaoya horiguchi3015.71%17.14%
nikita danilovnikita danilov199.95%17.14%
kirill a. shutemovkirill a. shutemov105.24%17.14%
jianguo wujianguo wu94.71%17.14%
nick pigginnick piggin63.14%17.14%
bob liubob liu21.05%17.14%
namhyung kimnamhyung kim10.52%17.14%
Total191100.00%14100.00%

/** * page_mapped_in_vma - check whether a page is really mapped in a VMA * @page: the page to test * @vma: the VMA to test * * Returns 1 if the page is mapped into the page tables of the VMA, 0 * if the page is not mapped into the page tables of this VMA. Only * valid for normal file or anonymous VMAs. */
int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) { unsigned long address; pte_t *pte; spinlock_t *ptl; address = __vma_address(page, vma); if (unlikely(address < vma->vm_start || address >= vma->vm_end)) return 0; pte = page_check_address(page, vma->vm_mm, address, &ptl, 1); if (!pte) /* the page is not in this mm */ return 0; pte_unmap_unlock(pte, ptl); return 1; }

Contributors

PersonTokensPropCommitsCommitProp
nick pigginnick piggin7984.95%150.00%
michel lespinassemichel lespinasse1415.05%150.00%
Total93100.00%2100.00%

#ifdef CONFIG_TRANSPARENT_HUGEPAGE /* * Check that @page is mapped at @address into @mm. In contrast to * page_check_address(), this function can handle transparent huge pages. * * On success returns true with pte mapped and locked. For PMD-mapped * transparent huge pages *@ptep is set to NULL. */
bool page_check_address_transhuge(struct page *page, struct mm_struct *mm, unsigned long address, pmd_t **pmdp, pte_t **ptep, spinlock_t **ptlp) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; spinlock_t *ptl; if (unlikely(PageHuge(page))) { /* when pud is not present, pte will be NULL */ pte = huge_pte_offset(mm, address); if (!pte) return false; ptl = huge_pte_lockptr(page_hstate(page), mm, pte); pmd = NULL; goto check_pte; } pgd = pgd_offset(mm, address); if (!pgd_present(*pgd)) return false; pud = pud_offset(pgd, address); if (!pud_present(*pud)) return false; pmd = pmd_offset(pud, address); if (pmd_trans_huge(*pmd)) { ptl = pmd_lock(mm, pmd); if (!pmd_present(*pmd)) goto unlock_pmd; if (unlikely(!pmd_trans_huge(*pmd))) { spin_unlock(ptl); goto map_pte; } if (pmd_page(*pmd) != page) goto unlock_pmd; pte = NULL; goto found; unlock_pmd: spin_unlock(ptl); return false; } else { pmd_t pmde = *pmd; barrier(); if (!pmd_present(pmde) || pmd_trans_huge(pmde)) return false; } map_pte: pte = pte_offset_map(pmd, address); if (!pte_present(*pte)) { pte_unmap(pte); return false; } ptl = pte_lockptr(mm, pmd); check_pte: spin_lock(ptl); if (!pte_present(*pte)) { pte_unmap_unlock(pte, ptl); return false; } /* THP can be referenced by any subpage */ if (pte_pfn(*pte) - page_to_pfn(page) >= hpage_nr_pages(page)) { pte_unmap_unlock(pte, ptl); return false; } found: *ptep = pte; *pmdp = pmd; *ptlp = ptl; return true; }

Contributors

PersonTokensPropCommitsCommitProp
kirill a. shutemovkirill a. shutemov26067.53%218.18%
vladimir davydovvladimir davydov5414.03%19.09%
andrea arcangeliandrea arcangeli369.35%218.18%
nikita danilovnikita danilov246.23%19.09%
joonsoo kimjoonsoo kim51.30%19.09%
hugh dickinshugh dickins41.04%218.18%
nick pigginnick piggin10.26%19.09%
fengguang wufengguang wu10.26%19.09%
Total385100.00%11100.00%

#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ struct page_referenced_arg { int mapcount; int referenced; unsigned long vm_flags; struct mem_cgroup *memcg; }; /* * arg: page_referenced_arg will be passed */
static int page_referenced_one(struct page *page, struct vm_area_struct *vma, unsigned long address, void *arg) { struct mm_struct *mm = vma->vm_mm; struct page_referenced_arg *pra = arg; pmd_t *pmd; pte_t *pte; spinlock_t *ptl; int referenced = 0; if (!page_check_address_transhuge(page, mm, address, &pmd, &pte, &ptl)) return SWAP_AGAIN; if (vma->vm_flags & VM_LOCKED) { if (pte) pte_unmap(pte); spin_unlock(ptl); pra->vm_flags |= VM_LOCKED; return SWAP_FAIL; /* To break the loop */ } if (pte) { if (ptep_clear_flush_young_notify(vma, address, pte)) { /* * Don't treat a reference through a sequentially read * mapping as such. If the page has been used in * another mapping, we will catch it; if this other * mapping is already gone, the unmap path will have * set PG_referenced or activated the page. */ if (likely(!(vma->vm_flags & VM_SEQ_READ))) referenced++; } pte_unmap(pte); } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { if (pmdp_clear_flush_young_notify(vma, address, pmd)) referenced++; } else { /* unexpected pmd-mapped page? */ WARN_ON_ONCE(1); } spin_unlock(ptl); if (referenced) clear_page_idle(page); if (test_and_clear_page_young(page)) referenced++; if (referenced) { pra->referenced++; pra->vm_flags |= vma->vm_flags; } pra->mapcount--; if (!pra->mapcount) return SWAP_SUCCESS; /* To break the loop */ return SWAP_AGAIN; }

Contributors

PersonTokensPropCommitsCommitProp
vladimir davydovvladimir davydov15260.32%212.50%
joonsoo kimjoonsoo kim5019.84%16.25%
andrea arcangeliandrea arcangeli207.94%212.50%
andrew mortonandrew morton155.95%531.25%
hugh dickinshugh dickins62.38%212.50%
balbir singhbalbir singh41.59%16.25%
johannes weinerjohannes weiner31.19%16.25%
fengguang wufengguang wu10.40%16.25%
david rientjesdavid rientjes10.40%16.25%
Total252100.00%16100.00%


static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg) { struct page_referenced_arg *pra = arg; struct mem_cgroup *memcg = pra->memcg; if (!mm_match_cgroup(vma->vm_mm, memcg)) return true; return false; }

Contributors

PersonTokensPropCommitsCommitProp
joonsoo kimjoonsoo kim4387.76%125.00%
andrew mortonandrew morton612.24%375.00%
Total49100.00%4100.00%

/** * page_referenced - test if the page was referenced * @page: the page to test * @is_locked: caller holds lock on the page * @memcg: target memory cgroup * @vm_flags: collect encountered vma->vm_flags who actually referenced the page * * Quick test_and_clear_referenced for all mappings to a page, * returns the number of ptes which referenced the page. */
int page_referenced(struct page *page, int is_locked, struct mem_cgroup *memcg, unsigned long *vm_flags) { int ret; int we_locked = 0; struct page_referenced_arg pra = { .mapcount = total_mapcount(page), .memcg = memcg, }; struct rmap_walk_control rwc = { .rmap_one = page_referenced_one, .arg = (void *)&pra, .anon_lock = page_lock_anon_vma_read, }; *vm_flags = 0; if (!page_mapped(page)) return 0; if (!page_rmapping(page)) return 0; if (!is_locked && (!PageAnon(page) || PageKsm(page))) { we_locked = trylock_page(page); if (!we_locked) return 1; } /* * If we are reclaiming on behalf of a cgroup, skip * counting on behalf of references from different * cgroups */ if (memcg) { rwc.invalid_vma = invalid_page_referenced_vma; } ret = rmap_walk(page, &rwc); *vm_flags = pra.vm_flags; if (we_locked) unlock_page(page); return pra.referenced; }

Contributors

PersonTokensPropCommitsCommitProp
joonsoo kimjoonsoo kim8043.96%17.14%
hugh dickinshugh dickins5228.57%321.43%
andrew mortonandrew morton3217.58%642.86%
fengguang wufengguang wu116.04%17.14%
balbir singhbalbir singh42.20%17.14%
johannes weinerjohannes weiner21.10%17.14%
kirill a. shutemovkirill a. shutemov10.55%17.14%
Total182100.00%14100.00%


static int page_mkclean_one(struct page *page, struct vm_area_struct *vma, unsigned long address, void *arg) { struct mm_struct *mm = vma->vm_mm; pte_t *pte; spinlock_t *ptl; int ret = 0; int *cleaned = arg; pte = page_check_address(page, mm, address, &ptl, 1); if (!pte) goto out; if (pte_dirty(*pte) || pte_write(*pte)) { pte_t entry; flush_cache_page(vma, address, pte_pfn(*pte)); entry = ptep_clear_flush(vma, address, pte); entry = pte_wrprotect(entry); entry = pte_mkclean(entry); set_pte_at(mm, address, pte, entry); ret = 1; } pte_unmap_unlock(pte, ptl); if (ret) { mmu_notifier_invalidate_page(mm, address); (*cleaned)++; } out: return SWAP_AGAIN; }

Contributors

PersonTokensPropCommitsCommitProp
peter zijlstrapeter zijlstra14178.77%228.57%
joonsoo kimjoonsoo kim1910.61%114.29%
sagi grimbergsagi grimberg126.70%114.29%
hugh dickinshugh dickins42.23%114.29%
nick pigginnick piggin21.12%114.29%
al viroal viro10.56%114.29%
Total179100.00%7100.00%


static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) { if (vma->vm_flags & VM_SHARED) return false; return true; }

Contributors

PersonTokensPropCommitsCommitProp
peter zijlstrapeter zijlstra1965.52%133.33%
joonsoo kimjoonsoo kim827.59%133.33%
fengguang wufengguang wu26.90%133.33%
Total29100.00%3100.00%


int page_mkclean(struct page *page) { int cleaned = 0; struct address_space *mapping; struct rmap_walk_control rwc = { .arg = (void *)&cleaned, .rmap_one = page_mkclean_one, .invalid_vma = invalid_mkclean_vma, }; BUG_ON(!PageLocked(page)); if (!page_mapped(page)) return 0; mapping = page_mapping(page); if (!mapping) return 0; rmap_walk(page, &rwc); return cleaned; }

Contributors

PersonTokensPropCommitsCommitProp
joonsoo kimjoonsoo kim4751.09%150.00%
peter zijlstrapeter zijlstra4548.91%150.00%
Total92100.00%2100.00%

EXPORT_SYMBOL_GPL(page_mkclean); /** * page_move_anon_rmap - move a page to our anon_vma * @page: the page to move to our anon_vma * @vma: the vma the page belongs to * * When a page belongs exclusively to one process after a COW event, * that page can be moved into the anon_vma that belongs to just that * process, so the rmap code will not search the parent or sibling * processes. */
void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) { struct anon_vma *anon_vma = vma->anon_vma; page = compound_head(page); VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_VMA(!anon_vma, vma); anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; /* * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written * simultaneously, so a concurrent reader (eg page_referenced()'s * PageAnon()) will not see one without the other. */ WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); }

Contributors

PersonTokensPropCommitsCommitProp
rik van rielrik van riel5776.00%120.00%
hugh dickinshugh dickins79.33%120.00%
sasha levinsasha levin68.00%240.00%
vladimir davydovvladimir davydov56.67%120.00%
Total75100.00%5100.00%

/** * __page_set_anon_rmap - set up new anonymous rmap * @page: Page to add to rmap * @vma: VM area to add page to. * @address: User virtual address of the mapping * @exclusive: the page is exclusively owned by the current process */
static void __page_set_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address, int exclusive) { struct anon_vma *anon_vma = vma->anon_vma; BUG_ON(!anon_vma); if (PageAnon(page)) return; /* * If the page isn't exclusively mapped into this vma, * we must use the _oldest_ possible anon_vma for the * page mapping! */ if (!exclusive) anon_vma = anon_vma->root; anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; page->mapping = (struct address_space *) anon_vma; page->index = linear_page_index(vma, address); }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton3336.67%433.33%
nick pigginnick piggin1921.11%325.00%
hugh dickinshugh dickins1011.11%18.33%
andrea arcangeliandrea arcangeli88.89%18.33%
linus torvaldslinus torvalds77.78%18.33%
rik van rielrik van riel77.78%18.33%
andi kleenandi kleen66.67%18.33%
Total90100.00%12100.00%

/** * __page_check_anon_rmap - sanity check anonymous rmap addition * @page: the page to add the mapping to * @vma: the vm area in which the mapping is added * @address: the user virtual address mapped */
static void __page_check_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) { #ifdef CONFIG_DEBUG_VM /* * The page's anon-rmap details (mapping and index) are guaranteed to * be set up correctly at this point. * * We have exclusion against page_add_anon_rmap because the caller * always holds the page locked, except if called from page_dup_rmap, * in which case the page is already known to be setup. * * We have exclusion against page_add_new_anon_rmap because those pages * are initially only visible via the pagetables, and the pte is locked * over the call to page_add_new_anon_rmap. */ BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root); BUG_ON(page_to_pgoff(page) != linear_page_index(vma, address)); #endif }

Contributors

PersonTokensPropCommitsCommitProp
nick pigginnick piggin3866.67%133.33%
andrea arcangeliandrea arcangeli1628.07%133.33%
kirill a. shutemovkirill a. shutemov35.26%133.33%
Total57100.00%3100.00%

/** * page_add_anon_rmap - add pte mapping to an anonymous page * @page: the page to add the mapping to * @vma: the vm area in which the mapping is added * @address: the user virtual address mapped * @compound: charge the page as compound or small page * * The caller needs to hold the pte lock, and the page must be locked in * the anon_vma case: to serialize mapping,index checking after setting, * and to ensure that PageAnon is not being upgraded racily to PageKsm * (but PageKsm is never downgraded to PageAnon). */
void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address, bool compound) { do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0); }

Contributors

PersonTokensPropCommitsCommitProp
nick pigginnick piggin1848.65%133.33%
rik van rielrik van riel1232.43%133.33%
kirill a. shutemovkirill a. shutemov718.92%133.33%
Total37100.00%3100.00%

/* * Special version of the above for do_swap_page, which often runs * into pages that are exclusively owned by the current process. * Everybody else should continue to use page_add_anon_rmap above. */
void do_page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address, int flags) { bool compound = flags & RMAP_COMPOUND; bool first; if (compound) { atomic_t *mapcount; VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(!PageTransHuge(page), page); mapcount = compound_mapcount_ptr(page); first = atomic_inc_and_test(mapcount); } else { first = atomic_inc_and_test(&page->_mapcount); } if (first) { int nr = compound ? hpage_nr_pages(page) : 1; /* * We use the irq-unsafe __{inc|mod}_zone_page_stat because * these counters are not modified in interrupt context, and * pte lock(a spinlock) is held, which implies preemption * disabled. */ if (compound) { __inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); } __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, nr); } if (unlikely(PageKsm(page))) return; VM_BUG_ON_PAGE(!PageLocked(page), page); /* address might be in next vma when migration races vma_adjust */ if (first) __page_set_anon_rmap(page, vma, address, flags & RMAP_EXCLUSIVE); else __page_check_anon_rmap(page, vma, address); }

Contributors

PersonTokensPropCommitsCommitProp
kirill a. shutemovkirill a. shutemov9450.81%428.57%
nick pigginnick piggin3016.22%214.29%
hugh dickinshugh dickins2614.05%214.29%
rik van rielrik van riel2111.35%214.29%
andrea arcangeliandrea arcangeli94.86%17.14%
sasha levinsasha levin31.62%17.14%
jianyu zhanjianyu zhan10.54%17.14%
andrew mortonandrew morton10.54%17.14%
Total185100.00%14100.00%

/** * page_add_new_anon_rmap - add pte mapping to a new anonymous page * @page: the page to add the mapping to * @vma: the vm area in which the mapping is added * @address: the user virtual address mapped * @compound: charge the page as compound or small page * * Same as page_add_anon_rmap but must only be called on *new* pages. * This means the inc-and-test can be bypassed. * Page does not have to be locked. */
void page_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address, bool compound) { int nr = compound ? hpage_nr_pages(page) : 1; VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); __SetPageSwapBacked(page); if (compound) { VM_BUG_ON_PAGE(!PageTransHuge(page), page); /* increment count (starts at -1) */ atomic_set(compound_mapcount_ptr(page), 0); __inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); } else { /* Anon THP always mapped first with PMD */ VM_BUG_ON_PAGE(PageTransCompound(page), page); /* increment count (starts at -1) */ atomic_set(&page->_mapcount, 0); } __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, nr); __page_set_anon_rmap(page, vma, address, 1); }

Contributors

PersonTokensPropCommitsCommitProp
kirill a. shutemovkirill a. shutemov7050.36%327.27%
nick pigginnick piggin4230.22%218.18%
andrea arcangeliandrea arcangeli117.91%19.09%
hugh dickinshugh dickins117.91%327.27%
sasha levinsasha levin32.16%19.09%
rik van rielrik van riel21.44%19.09%
Total139100.00%11100.00%

/** * page_add_file_rmap - add pte mapping to a file page * @page: the page to add the mapping to * * The caller needs to hold the pte lock. */
void page_add_file_rmap(struct page *page) { lock_page_memcg(page); if (atomic_inc_and_test(&page->_mapcount)) { __inc_zone_page_state(page, NR_FILE_MAPPED); mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); } unlock_page_memcg(page); }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton1839.13%433.33%
kamezawa hiroyukikamezawa hiroyuki715.22%18.33%
balbir singhbalbir singh613.04%18.33%
hugh dickinshugh dickins510.87%18.33%
johannes weinerjohannes weiner48.70%216.67%
christoph lameterchristoph lameter48.70%18.33%
sha zhengjusha zhengju12.17%18.33%
greg thelengreg thelen12.17%18.33%
Total46100.00%12100.00%


static void page_remove_file_rmap(struct page *page) { lock_page_memcg(page); /* Hugepages are not counted in NR_FILE_MAPPED for now. */ if (unlikely(PageHuge(page))) { /* hugetlb pages are always mapped with pmds */ atomic_dec(compound_mapcount_ptr(page)); goto out; } /* page still mapped by someone else? */ if (!atomic_add_negative(-1, &page->_mapcount)) goto out; /* * We use the irq-unsafe __{inc|mod}_zone_page_stat because * these counters are not modified in interrupt context, and * pte lock(a spinlock) is held, which implies preemption disabled. */ __dec_zone_page_state(page, NR_FILE_MAPPED); mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); if (unlikely(PageMlocked(page))) clear_page_mlock(page); out: unlock_page_memcg(page); }

Contributors

PersonTokensPropCommitsCommitProp
johannes weinerjohannes weiner4142.71%333.33%
kirill a. shutemovkirill a. shutemov2526.04%111.11%
andrew mortonandrew morton1212.50%222.22%
kamezawa hiroyukikamezawa hiroyuki1010.42%111.11%
naoya horiguchinaoya horiguchi66.25%111.11%
hugh dickinshugh dickins22.08%111.11%
Total96100.00%9100.00%


static void page_remove_anon_compound_rmap(struct page *page) { int i, nr; if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) return; /* Hugepages are not counted in NR_ANON_PAGES for now. */ if (unlikely(PageHuge(page))) return; if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) return; __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); if (TestClearPageDoubleMap(page)) { /* * Subpages can be mapped with PTEs too. Check how many of * themi are still mapped. */ for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) { if (atomic_add_negative(-1, &page[i]._mapcount)) nr++; } } else { nr = HPAGE_PMD_NR; } if (unlikely(PageMlocked(page))) clear_page_mlock(page); if (nr) { __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, -nr); deferred_split_huge_page(page); } }

Contributors

PersonTokensPropCommitsCommitProp
kirill a. shutemovkirill a. shutemov11675.82%466.67%
johannes weinerjohannes weiner3422.22%116.67%
kamezawa hiroyukikamezawa hiroyuki31.96%116.67%
Total153100.00%6100.00%

/** * page_remove_rmap - take down pte mapping from a page * @page: page to remove mapping from * @compound: uncharge the page as compound or small page * * The caller needs to hold the pte lock. */
void page_remove_rmap(struct page *page, bool compound) { if (!PageAnon(page)) { VM_BUG_ON_PAGE(compound && !PageHuge(page), page); page_remove_file_rmap(page); return; } if (compound) return page_remove_anon_compound_rmap(page); /* page still mapped by someone else? */ if (!atomic_add_negative(-1, &page->_mapcount)) return; /* * We use the irq-unsafe __{inc|mod}_zone_page_stat because * these counters are not modified in interrupt context, and * pte lock(a spinlock) is held, which implies preemption disabled. */ __dec_zone_page_state(page, NR_ANON_PAGES); if (unlikely(PageMlocked(page))) clear_page_mlock(page); if (PageTransCompound(page)) deferred_split_huge_page(compound_head(page)); /* * It would be tidy to reset the PageAnon mapping here, * but that might overwrite a racing page_add_anon_rmap * which increments mapcount after us but sets mapping * before us: so leave the reset to free_hot_cold_page, * and remember that it's only reliable while mapped. * Leaving it set also helps swapoff to reinstate ptes * faster for those pages still in swapcache. */ }

Contributors

PersonTokensPropCommitsCommitProp
kirill a. shutemovkirill a. shutemov7671.03%440.00%
hugh dickinshugh dickins1514.02%110.00%
andrea arcangeliandrea arcangeli109.35%110.00%
kosaki motohirokosaki motohiro32.80%110.00%
andrew mortonandrew morton21.87%220.00%
christoph lameterchristoph lameter10.93%110.00%
Total107100.00%10100.00%

struct rmap_private { enum ttu_flags flags; int lazyfreed; }; /* * @arg: enum ttu_flags will be passed to this argument */
static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, unsigned long address, void *arg) { struct mm_struct *mm = vma->vm_mm; pte_t *pte; pte_t pteval; spinlock_t *ptl; int ret = SWAP_AGAIN; struct rmap_private *rp = arg; enum ttu_flags flags = rp->flags; /* munlock has nothing to gain from examining un-locked vmas */ if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED)) goto out; if (flags & TTU_SPLIT_HUGE_PMD) { split_huge_pmd_address(vma, address, flags & TTU_MIGRATION, page); /* check if we have anything to do after split */ if (page_mapcount(page) == 0) goto out; } pte = page_check_address(page, mm, address, &ptl, PageTransCompound(page)); if (!pte) goto out; /* * If the page is mlock()d, we cannot swap it out. * If it's recently referenced (perhaps page_referenced * skipped over this mm) then we should reactivate it. */ if (!(flags & TTU_IGNORE_MLOCK)) { if (vma->vm_flags & VM_LOCKED) { /* Holding pte lock, we do *not* need mmap_sem here */ mlock_vma_page(page); ret = SWAP_MLOCK; goto out_unmap; } if (flags & TTU_MUNLOCK) goto out_unmap; } if (!(flags & TTU_IGNORE_ACCESS)) { if (ptep_clear_flush_young_notify(vma, address, pte)) { ret = SWAP_FAIL; goto out_unmap; } } /* Nuke the page table entry. */ flush_cache_page(vma, address, page_to_pfn(page)); if (should_defer_flush(mm, flags)) { /* * We clear the PTE but do not flush so potentially a remote * CPU could still be writing to the page. If the entry was * previously clean then the architecture must guarantee that * a clear->dirty transition on a cached TLB entry is written * through and traps if the PTE is unmapped. */ pteval = ptep_get_and_clear(mm, address, pte); set_tlb_ubc_flush_pending(mm, page, pte_dirty(pteval)); } else { pteval = ptep_clear_flush(vma, address, pte); } /* Move the dirty bit to the physical page now the pte is gone. */ if (pte_dirty(pteval)) set_page_dirty(page); /* Update high watermark before we lower rss */ update_hiwater_rss(mm); if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) { if (PageHuge(page)) { hugetlb_count_sub(1 << compound_order(page), mm); } else { dec_mm_counter(mm, mm_counter(page)); } set_pte_at(mm, address, pte, swp_entry_to_pte(make_hwpoison_entry(page))); } else if (pte_unused(pteval)) { /* * The guest indicated that the page content is of no * interest anymore. Simply discard the pte, vmscan * will take care of the rest. */ dec_mm_counter(mm, mm_counter(page)); } else if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION)) { swp_entry_t entry; pte_t swp_pte; /* * Store the pfn of the page in a special migration * pte. do_swap_page() will wait until the migration * pte is removed and then restart fault handling. */ entry = make_migration_entry(page, pte_write(pteval)); swp_pte = swp_entry_to_pte(entry); if (pte_soft_dirty(pteval)) swp_pte = pte_swp_mksoft_dirty(swp_pte); set_pte_at(mm, address, pte, swp_pte); } else if (PageAnon(page)) { swp_entry_t entry = { .val = page_private(page) }; pte_t swp_pte; /* * Store the swap location in the pte. * See handle_pte_fault() ... */ VM_BUG_ON_PAGE(!PageSwapCache(page), page); if (!PageDirty(page) && (flags & TTU_LZFREE)) { /* It's a freeable page by MADV_FREE */ dec_mm_counter(mm, MM_ANONPAGES); rp->lazyfreed++; goto discard; } if (swap_duplicate(entry) < 0) { set_pte_at(mm, address, pte, pteval); ret = SWAP_FAIL; goto out_unmap; } if (list_empty(&mm->mmlist)) { spin_lock(&mmlist_lock); if (list_empty(&mm->mmlist)) list_add(&mm->mmlist, &init_mm.mmlist); spin_unlock(&mmlist_lock); } dec_mm_counter(mm, MM_ANONPAGES); inc_mm_counter(mm, MM_SWAPENTS); swp_pte = swp_entry_to_pte(entry); if (pte_soft_dirty(pteval)) swp_pte = pte_swp_mksoft_dirty(swp_pte); set_pte_at(mm, address, pte, swp_pte); } else dec_mm_counter(mm, mm_counter_file(page)); discard: page_remove_rmap(page, PageHuge(page)); put_page(page); out_unmap: pte_unmap_unlock(pte, ptl); if (ret != SWAP_FAIL && ret != SWAP_MLOCK && !(flags & TTU_MUNLOCK)) mmu_notifier_invalidate_page(mm, address); out: return ret; }

Contributors

PersonTokensPropCommitsCommitProp
hugh dickinshugh dickins22531.47%1426.92%
andrew mortonandrew morton14820.70%47.69%
andi kleenandi kleen598.25%23.85%
minchan kimminchan kim446.15%11.92%
kirill a. shutemovkirill a. shutemov415.73%59.62%
mel gormanmel gorman385.31%23.85%
naoya horiguchinaoya horiguchi263.64%35.77%
cyrill gorcunovcyrill gorcunov253.50%11.92%
konstantin weitzkonstantin weitz182.52%11.92%
sagi grimbergsagi grimberg141.96%11.92%
nick pigginnick piggin111.54%23.85%
christoph lameterchristoph lameter111.54%47.69%
david s. millerdavid s. miller101.40%23.85%
jerome marchandjerome marchand101.40%11.92%
konstantin khlebnikovkonstantin khlebnikov81.12%23.85%
joonsoo kimjoonsoo kim81.12%11.92%
kamezawa hiroyukikamezawa hiroyuki81.12%23.85%
nikita danilovnikita danilov50.70%11.92%
kosaki motohirokosaki motohiro40.56%11.92%
bjorn steinbrinkbjorn steinbrink10.14%11.92%
andrea arcangeliandrea arcangeli10.14%11.92%
Total715100.00%52100.00%


bool is_vma_temporary_stack(struct vm_area_struct *vma) { int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); if (!maybe_stack) return false; if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) == VM_STACK_INCOMPLETE_SETUP) return true; return false; }

Contributors

PersonTokensPropCommitsCommitProp
kirill a. shutemovkirill a. shutemov2244.90%125.00%
andrew mortonandrew morton1428.57%250.00%
nick pigginnick piggin1326.53%125.00%
Total49100.00%4100.00%


static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) { return is_vma_temporary_stack(vma); }

Contributors

PersonTokensPropCommitsCommitProp
kirill a. shutemovkirill a. shutemov1780.95%150.00%
nick pigginnick piggin419.05%150.00%
Total21100.00%2100.00%


static int page_mapcount_is_zero(struct page *page) { return !page_mapcount(page); }

Contributors

PersonTokensPropCommitsCommitProp
kirill a. shutemovkirill a. shutemov1161.11%240.00%
joonsoo kimjoonsoo kim527.78%120.00%
andrew mortonandrew morton211.11%240.00%
Total18100.00%5100.00%

/** * try_to_unmap - try to remove all page table mappings to a page * @page: the page to get unmapped * @flags: action and flags * * Tries to remove all the page table entries which are mapping this * page, used in the pageout path. Caller must hold the page lock. * Return values are: * * SWAP_SUCCESS - we succeeded in removing all mappings * SWAP_AGAIN - we missed a mapping, try again later * SWAP_FAIL - the page is unswappable * SWAP_MLOCK - page is mlocked. */
int try_to_unmap(struct page *page, enum ttu_flags flags) { int ret; struct rmap_private rp = { .flags = flags, .lazyfreed = 0, }; struct rmap_walk_control rwc = { .rmap_one = try_to_unmap_one, .arg = &rp, .done = page_mapcount_is_zero, .anon_lock = page_lock_anon_vma_read, }; /* * During exec, a temporary VMA is setup and later moved. * The VMA is moved under the anon_vma lock but not the * page tables leading to a race where migration cannot * find the migration ptes. Rather than increasing the * locking requirements of exec(), migration skips * temporary VMAs until after exec() completes. */ if ((flags & TTU_MIGRATION) && !PageKsm(page) && PageAnon(page)) rwc.invalid_vma = invalid_migration_vma; if (flags & TTU_RMAP_LOCKED) ret = rmap_walk_locked(page, &rwc); else ret = rmap_walk(page, &rwc); if (ret != SWAP_MLOCK && !page_mapcount(page)) { ret = SWAP_SUCCESS; if (rp.lazyfreed && !PageDirty(page)) ret = SWAP_LZFREE; } return ret; }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton4026.67%430.77%
joonsoo kimjoonsoo kim3825.33%17.69%
minchan kimminchan kim3624.00%17.69%
kirill a. shutemovkirill a. shutemov1912.67%17.69%
hugh dickinshugh dickins42.67%17.69%
nick pigginnick piggin42.67%17.69%
andi kleenandi kleen32.00%17.69%
rusty russellrusty russell21.33%17.69%
christoph lameterchristoph lameter21.33%17.69%
konstantin khlebnikovkonstantin khlebnikov21.33%17.69%
Total150100.00%13100.00%


static int page_not_mapped(struct page *page) { return !page_mapped(page); }

Contributors

PersonTokensPropCommitsCommitProp
kirill a. shutemovkirill a. shutemov18100.00%1100.00%
Total18100.00%1100.00%

; /** * try_to_munlock - try to munlock a page * @page: the page to be munlocked * * Called from munlock code. Checks all of the VMAs mapping the page * to make sure nobody else has this page mlocked. The page will be * returned with PG_mlocked cleared if no other vmas have it mlocked. * * Return values are: * * SWAP_AGAIN - no vma is holding page mlocked, or, * SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem * SWAP_FAIL - page cannot be located at present * SWAP_MLOCK - page is now mlocked. */
int try_to_munlock(struct page *page) { int ret; struct rmap_private rp = { .flags = TTU_MUNLOCK, .lazyfreed = 0, }; struct rmap_walk_control rwc = { .rmap_one = try_to_unmap_one, .arg = &rp, .done = page_not_mapped, .anon_lock = page_lock_anon_vma_read, }; VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page); ret = rmap_walk(page, &rwc); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
joonsoo kimjoonsoo kim3541.18%125.00%
nick pigginnick piggin2934.12%125.00%
minchan kimminchan kim1821.18%125.00%
sasha levinsasha levin33.53%125.00%
Total85100.00%4100.00%


void __put_anon_vma(struct anon_vma *anon_vma) { struct anon_vma *root = anon_vma->root; anon_vma_free(anon_vma); if (root != anon_vma && atomic_dec_and_test(&root->refcount)) anon_vma_free(root); }

Contributors

PersonTokensPropCommitsCommitProp
rik van rielrik van riel3274.42%125.00%
peter zijlstrapeter zijlstra613.95%250.00%
andrey ryabininandrey ryabinin511.63%125.00%
Total43100.00%4100.00%


static struct anon_vma *rmap_walk_anon_lock(struct page *page, struct rmap_walk_control *rwc) { struct anon_vma *anon_vma; if (rwc->anon_lock) return rwc->anon_lock(page); /* * Note: remove_migration_ptes() cannot use page_lock_anon_vma_read() * because that depends on page_mapped(); but not all its usages * are holding mmap_sem. Users without mmap_sem are required to * take a reference count to prevent the anon_vma disappearing */ anon_vma = page_anon_vma(page); if (!anon_vma) return NULL; anon_vma_lock_read(anon_vma); return anon_vma; }

Contributors

PersonTokensPropCommitsCommitProp
joonsoo kimjoonsoo kim61100.00%2100.00%
Total61100.00%2100.00%

/* * rmap_walk_anon - do something to anonymous page using the object-based * rmap method * @page: the page to be handled * @rwc: control variable according to each walk type * * Find all the mappings of a page using the mapping pointer and the vma chains * contained in the anon_vma struct it points to. * * When called from try_to_munlock(), the mmap_sem of the mm containing the vma * where the page was found will be held for write. So, we won't recheck * vm_flags for that VMA. That should be OK, because that vma shouldn't be * LOCKED. */
static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, bool locked) { struct anon_vma *anon_vma; pgoff_t pgoff; struct anon_vma_chain *avc; int ret = SWAP_AGAIN; if (locked) { anon_vma = page_anon_vma(page); /* anon_vma disappear under us? */ VM_BUG_ON_PAGE(!anon_vma, page); } else { anon_vma = rmap_walk_anon_lock(page, rwc); } if (!anon_vma) return ret; pgoff = page_to_pgoff(page); anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { struct vm_area_struct *vma = avc->vma; unsigned long address = vma_address(page, vma); cond_resched(); if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) continue; ret = rwc->rmap_one(page, vma, address, rwc->arg); if (ret != SWAP_AGAIN) break; if (rwc->done && rwc->done(page)) break; } if (!locked) anon_vma_unlock_read(anon_vma); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
hugh dickinshugh dickins8544.74%110.00%
joonsoo kimjoonsoo kim4121.58%330.00%
kirill a. shutemovkirill a. shutemov3317.37%110.00%
rik van rielrik van riel126.32%110.00%
michel lespinassemichel lespinasse84.21%110.00%
davidlohr buesodavidlohr bueso73.68%110.00%
andrea arcangeliandrea arcangeli31.58%110.00%
ingo molnaringo molnar10.53%110.00%
Total190100.00%10100.00%

/* * rmap_walk_file - do something to file page using the object-based rmap method * @page: the page to be handled * @rwc: control variable according to each walk type * * Find all the mappings of a page using the mapping pointer and the vma chains * contained in the address_space struct it points to. * * When called from try_to_munlock(), the mmap_sem of the mm containing the vma * where the page was found will be held for write. So, we won't recheck * vm_flags for that VMA. That should be OK, because that vma shouldn't be * LOCKED. */
static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, bool locked) { struct address_space *mapping = page_mapping(page); pgoff_t pgoff; struct vm_area_struct *vma; int ret = SWAP_AGAIN; /* * The page lock not only makes sure that page->mapping cannot * suddenly be NULLified by truncation, it makes sure that the * structure at mapping cannot be freed and reused yet, * so we can safely take mapping->i_mmap_rwsem. */ VM_BUG_ON_PAGE(!PageLocked(page), page); if (!mapping) return ret; pgoff = page_to_pgoff(page); if (!locked) i_mmap_lock_read(mapping); vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { unsigned long address = vma_address(page, vma); cond_resched(); if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) continue; ret = rwc->rmap_one(page, vma, address, rwc->arg); if (ret != SWAP_AGAIN) goto done; if (rwc->done && rwc->done(page)) goto done; } done: if (!locked) i_mmap_unlock_read(mapping); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
hugh dickinshugh dickins9452.22%19.09%
joonsoo kimjoonsoo kim5329.44%327.27%
kirill a. shutemovkirill a. shutemov168.89%19.09%
davidlohr buesodavidlohr bueso105.56%327.27%
andrea arcangeliandrea arcangeli31.67%19.09%
sasha levinsasha levin31.67%19.09%
michel lespinassemichel lespinasse10.56%19.09%
Total180100.00%11100.00%


int rmap_walk(struct page *page, struct rmap_walk_control *rwc) { if (unlikely(PageKsm(page))) return rmap_walk_ksm(page, rwc); else if (PageAnon(page)) return rmap_walk_anon(page, rwc, false); else return rmap_walk_file(page, rwc, false); }

Contributors

PersonTokensPropCommitsCommitProp
hugh dickinshugh dickins4267.74%133.33%
kirill a. shutemovkirill a. shutemov1625.81%133.33%
joonsoo kimjoonsoo kim46.45%133.33%
Total62100.00%3100.00%

/* Like rmap_walk, but caller holds relevant rmap lock */
int rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc) { /* no ksm support for now */ VM_BUG_ON_PAGE(PageKsm(page), page); if (PageAnon(page)) return rmap_walk_anon(page, rwc, true); else return rmap_walk_file(page, rwc, true); }

Contributors

PersonTokensPropCommitsCommitProp
kirill a. shutemovkirill a. shutemov4277.78%133.33%
hugh dickinshugh dickins1120.37%133.33%
joonsoo kimjoonsoo kim11.85%133.33%
Total54100.00%3100.00%

#ifdef CONFIG_HUGETLB_PAGE /* * The following three functions are for anonymous (private mapped) hugepages. * Unlike common anonymous pages, anonymous hugepages have no accounting code * and no lru code, because we handle hugepages differently from common pages. */
static void __hugepage_set_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address, int exclusive) { struct anon_vma *anon_vma = vma->anon_vma; BUG_ON(!anon_vma); if (PageAnon(page)) return; if (!exclusive) anon_vma = anon_vma->root; anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; page->mapping = (struct address_space *) anon_vma; page->index = linear_page_index(vma, address); }

Contributors

PersonTokensPropCommitsCommitProp
naoya horiguchinaoya horiguchi89100.00%2100.00%
Total89100.00%2100.00%


void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) { struct anon_vma *anon_vma = vma->anon_vma; int first; BUG_ON(!PageLocked(page)); BUG_ON(!anon_vma); /* address might be in next vma when migration races vma_adjust */ first = atomic_inc_and_test(compound_mapcount_ptr(page)); if (first) __hugepage_set_anon_rmap(page, vma, address, 0); }

Contributors

PersonTokensPropCommitsCommitProp
naoya horiguchinaoya horiguchi6894.44%250.00%
kirill a. shutemovkirill a. shutemov34.17%125.00%
hugh dickinshugh dickins11.39%125.00%
Total72100.00%4100.00%


void hugepage_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) { BUG_ON(address < vma->vm_start || address >= vma->vm_end); atomic_set(compound_mapcount_ptr(page), 0); __hugepage_set_anon_rmap(page, vma, address, 1); }

Contributors

PersonTokensPropCommitsCommitProp
naoya horiguchinaoya horiguchi5294.55%150.00%
kirill a. shutemovkirill a. shutemov35.45%150.00%
Total55100.00%2100.00%

#endif /* CONFIG_HUGETLB_PAGE */

Overall Contributors

PersonTokensPropCommitsCommitProp
kirill a. shutemovkirill a. shutemov90914.02%167.92%
hugh dickinshugh dickins90814.00%3617.82%
andrew mortonandrew morton65710.13%178.42%
peter zijlstrapeter zijlstra5638.68%83.96%
joonsoo kimjoonsoo kim5017.73%73.47%
rik van rielrik van riel4146.38%73.47%
mel gormanmel gorman3355.17%31.49%
nick pigginnick piggin3014.64%62.97%
naoya horiguchinaoya horiguchi2824.35%83.96%
vladimir davydovvladimir davydov2463.79%41.98%
linus torvaldslinus torvalds1822.81%52.48%
andrea arcangeliandrea arcangeli1372.11%83.96%
konstantin khlebnikovkonstantin khlebnikov1332.05%52.48%
minchan kimminchan kim1101.70%10.50%
bob liubob liu981.51%10.50%
johannes weinerjohannes weiner851.31%52.48%
andi kleenandi kleen701.08%31.49%
michel lespinassemichel lespinasse661.02%31.49%
kautuk consulkautuk consul560.86%10.50%
nikita danilovnikita danilov480.74%10.50%
adrian bunkadrian bunk360.56%10.50%
kamezawa hiroyukikamezawa hiroyuki280.43%41.98%
ingo molnaringo molnar270.42%20.99%
sagi grimbergsagi grimberg260.40%10.50%
cyrill gorcunovcyrill gorcunov250.39%10.50%
oleg nesterovoleg nesterov230.35%20.99%
christoph lameterchristoph lameter230.35%62.97%
sasha levinsasha levin180.28%20.99%
nadav amitnadav amit180.28%10.50%
konstantin weitzkonstantin weitz180.28%10.50%
balbir singhbalbir singh170.26%31.49%
davidlohr buesodavidlohr bueso170.26%31.49%
fengguang wufengguang wu150.23%20.99%
jerome marchandjerome marchand100.15%10.50%
david s. millerdavid s. miller100.15%20.99%
daniel forrestdaniel forrest100.15%10.50%
kosaki motohirokosaki motohiro100.15%31.49%
jianguo wujianguo wu90.14%10.50%
leon yuleon yu70.11%10.50%
jaya kumarjaya kumar50.08%10.50%
andrey ryabininandrey ryabinin50.08%10.50%
christoph hellwigchristoph hellwig30.05%10.50%
jan karajan kara30.05%10.50%
jason lowjason low30.05%10.50%
christian borntraegerchristian borntraeger30.05%10.50%
rusty russellrusty russell20.03%10.50%
namhyung kimnamhyung kim20.03%20.99%
pekka j enbergpekka j enberg20.03%10.50%
randy dunlaprandy dunlap10.02%10.50%
david rientjesdavid rientjes10.02%10.50%
jianyu zhanjianyu zhan10.02%10.50%
bjorn steinbrinkbjorn steinbrink10.02%10.50%
aneesh kumaraneesh kumar10.02%10.50%
greg thelengreg thelen10.02%10.50%
al viroal viro10.02%10.50%
paul gortmakerpaul gortmaker10.02%10.50%
sha zhengjusha zhengju10.02%10.50%
Total6485100.00%202100.00%
Directory: mm
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
{% endraw %}