cregit-Linux how code gets into the kernel

Release 4.15 mm/swap.c

Directory: mm
/*
 *  linux/mm/swap.c
 *
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 */

/*
 * This file contains the default values for the operation of the
 * Linux VM subsystem. Fine-tuning documentation can be found in
 * Documentation/sysctl/vm.txt.
 * Started 18.12.91
 * Swap aging added 23.2.95, Stephen Tweedie.
 * Buffermem limits added 12.3.98, Rik van Riel.
 */

#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/kernel_stat.h>
#include <linux/swap.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
#include <linux/pagevec.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/mm_inline.h>
#include <linux/percpu_counter.h>
#include <linux/memremap.h>
#include <linux/percpu.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/backing-dev.h>
#include <linux/memcontrol.h>
#include <linux/gfp.h>
#include <linux/uio.h>
#include <linux/hugetlb.h>
#include <linux/page_idle.h>

#include "internal.h"


#define CREATE_TRACE_POINTS
#include <trace/events/pagemap.h>

/* How many pages do we try to swap or page in/out together? */

int page_cluster;

static DEFINE_PER_CPU(struct pagevec, lru_add_pvec);
static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs);
static DEFINE_PER_CPU(struct pagevec, lru_lazyfree_pvecs);
#ifdef CONFIG_SMP
static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
#endif

/*
 * This path almost never happens for VM activity - pages are normally
 * freed via pagevecs.  But it gets used by networking.
 */

static void __page_cache_release(struct page *page) { if (PageLRU(page)) { struct zone *zone = page_zone(page); struct lruvec *lruvec; unsigned long flags; spin_lock_irqsave(zone_lru_lock(zone), flags); lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat); VM_BUG_ON_PAGE(!PageLRU(page), page); __ClearPageLRU(page); del_page_from_lru_list(page, lruvec, page_off_lru(page)); spin_unlock_irqrestore(zone_lru_lock(zone), flags); } __ClearPageWaiters(page); mem_cgroup_uncharge(page); }

Contributors

PersonTokensPropCommitsCommitProp
Adrian Bunk6055.56%111.11%
Hugh Dickins2624.07%222.22%
Mel Gorman87.41%222.22%
Johannes Weiner54.63%111.11%
Nicholas Piggin54.63%111.11%
Sasha Levin32.78%111.11%
Andrea Arcangeli10.93%111.11%
Total108100.00%9100.00%


static void __put_single_page(struct page *page) { __page_cache_release(page); free_unref_page(page); }

Contributors

PersonTokensPropCommitsCommitProp
Andrea Arcangeli1571.43%133.33%
Adrian Bunk523.81%133.33%
Mel Gorman14.76%133.33%
Total21100.00%3100.00%


static void __put_compound_page(struct page *page) { compound_page_dtor *dtor; /* * __page_cache_release() is supposed to be called for thp, not for * hugetlb. This is because hugetlb page does never have PageLRU set * (it's never listed to any LRU lists) and no memcg routines should * be called for hugetlb (it has a separate hugetlb_cgroup.) */ if (!PageHuge(page)) __page_cache_release(page); dtor = get_compound_page_dtor(page); (*dtor)(page); }

Contributors

PersonTokensPropCommitsCommitProp
Andrea Arcangeli2045.45%120.00%
Andrew Morton1227.27%120.00%
Naoya Horiguchi920.45%120.00%
Hugh Dickins24.55%120.00%
Nicholas Piggin12.27%120.00%
Total44100.00%5100.00%


void __put_page(struct page *page) { if (is_zone_device_page(page)) { put_dev_pagemap(page->pgmap); /* * The page belongs to the device that created pgmap. Do * not return it to page allocator. */ return; } if (unlikely(PageCompound(page))) __put_compound_page(page); else __put_single_page(page); }

Contributors

PersonTokensPropCommitsCommitProp
Dan J Williams1836.73%120.00%
Nicholas Piggin1836.73%120.00%
Pravin B Shelar510.20%120.00%
Andrea Arcangeli510.20%120.00%
Kirill A. Shutemov36.12%120.00%
Total49100.00%5100.00%

EXPORT_SYMBOL(__put_page); /** * put_pages_list() - release a list of pages * @pages: list of pages threaded on page->lru * * Release a list of pages which are strung together on page.lru. Currently * used by read_cache_pages() and related error recovery code. */
void put_pages_list(struct list_head *pages) { while (!list_empty(pages)) { struct page *victim; victim = list_entry(pages->prev, struct page, lru); list_del(&victim->lru); put_page(victim); } }

Contributors

PersonTokensPropCommitsCommitProp
Alexander Zarochentzev5198.08%150.00%
Kirill A. Shutemov11.92%150.00%
Total52100.00%2100.00%

EXPORT_SYMBOL(put_pages_list); /* * get_kernel_pages() - pin kernel pages in memory * @kiov: An array of struct kvec structures * @nr_segs: number of segments to pin * @write: pinning for read/write, currently ignored * @pages: array that receives pointers to the pages pinned. * Should be at least nr_segs long. * * Returns number of pages pinned. This may be fewer than the number * requested. If nr_pages is 0 or negative, returns 0. If no pages * were pinned, returns -errno. Each page returned must be released * with a put_page() call when it is finished with. */
int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write, struct page **pages) { int seg; for (seg = 0; seg < nr_segs; seg++) { if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE)) return seg; pages[seg] = kmap_to_page(kiov[seg].iov_base); get_page(pages[seg]); } return seg; }

Contributors

PersonTokensPropCommitsCommitProp
Mel Gorman8398.81%266.67%
Kirill A. Shutemov11.19%133.33%
Total84100.00%3100.00%

EXPORT_SYMBOL_GPL(get_kernel_pages); /* * get_kernel_page() - pin a kernel page in memory * @start: starting kernel address * @write: pinning for read/write, currently ignored * @pages: array that receives pointer to the page pinned. * Must be at least nr_segs long. * * Returns 1 if page is pinned. If the page was not pinned, returns * -errno. The page returned must be released with a put_page() call * when it is finished with. */
int get_kernel_page(unsigned long start, int write, struct page **pages) { const struct kvec kiov = { .iov_base = (void *)start, .iov_len = PAGE_SIZE }; return get_kernel_pages(&kiov, 1, write, pages); }

Contributors

PersonTokensPropCommitsCommitProp
Mel Gorman52100.00%1100.00%
Total52100.00%1100.00%

EXPORT_SYMBOL_GPL(get_kernel_page);
static void pagevec_lru_move_fn(struct pagevec *pvec, void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg), void *arg) { int i; struct pglist_data *pgdat = NULL; struct lruvec *lruvec; unsigned long flags = 0; for (i = 0; i < pagevec_count(pvec); i++) { struct page *page = pvec->pages[i]; struct pglist_data *pagepgdat = page_pgdat(page); if (pagepgdat != pgdat) { if (pgdat) spin_unlock_irqrestore(&pgdat->lru_lock, flags); pgdat = pagepgdat; spin_lock_irqsave(&pgdat->lru_lock, flags); } lruvec = mem_cgroup_page_lruvec(page, pgdat); (*move_fn)(page, lruvec, arg); } if (pgdat) spin_unlock_irqrestore(&pgdat->lru_lock, flags); release_pages(pvec->pages, pvec->nr); pagevec_reinit(pvec); }

Contributors

PersonTokensPropCommitsCommitProp
Hisashi Hifumi7239.13%125.00%
David Shaohua Li6836.96%125.00%
Mel Gorman2413.04%125.00%
Hugh Dickins2010.87%125.00%
Total184100.00%4100.00%


static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec, void *arg) { int *pgmoved = arg; if (PageLRU(page) && !PageUnevictable(page)) { del_page_from_lru_list(page, lruvec, page_lru(page)); ClearPageActive(page); add_page_to_lru_list_tail(page, lruvec, page_lru(page)); (*pgmoved)++; } }

Contributors

PersonTokensPropCommitsCommitProp
David Shaohua Li2532.89%114.29%
Johannes Weiner2127.63%228.57%
Hisashi Hifumi1823.68%114.29%
Hugh Dickins56.58%114.29%
Lee Schermerhorn45.26%114.29%
Rik Van Riel33.95%114.29%
Total76100.00%7100.00%

/* * pagevec_move_tail() must be called with IRQ disabled. * Otherwise this may cause nasty races. */
static void pagevec_move_tail(struct pagevec *pvec) { int pgmoved = 0; pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved); __count_vm_events(PGROTATED, pgmoved); }

Contributors

PersonTokensPropCommitsCommitProp
David Shaohua Li2163.64%150.00%
Hisashi Hifumi1236.36%150.00%
Total33100.00%2100.00%

/* * Writeback is about to end against a page which has been marked for immediate * reclaim. If it still appears to be reclaimable, move it to the tail of the * inactive list. */
void rotate_reclaimable_page(struct page *page) { if (!PageLocked(page) && !PageDirty(page) && !PageUnevictable(page) && PageLRU(page)) { struct pagevec *pvec; unsigned long flags; get_page(page); local_irq_save(flags); pvec = this_cpu_ptr(&lru_rotate_pvecs); if (!pagevec_add(pvec, page) || PageCompound(page)) pagevec_move_tail(pvec); local_irq_restore(flags); } }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton4550.56%225.00%
Miklos Szeredi1719.10%112.50%
Hisashi Hifumi1415.73%112.50%
Lukasz Odzioba55.62%112.50%
Lee Schermerhorn55.62%112.50%
Christoph Lameter22.25%112.50%
Kirill A. Shutemov11.12%112.50%
Total89100.00%8100.00%


static void update_page_reclaim_stat(struct lruvec *lruvec, int file, int rotated) { struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; reclaim_stat->recent_scanned[file]++; if (rotated) reclaim_stat->recent_rotated[file]++; }

Contributors

PersonTokensPropCommitsCommitProp
Motohiro Kosaki3778.72%133.33%
Hugh Dickins1021.28%266.67%
Total47100.00%3100.00%


static void __activate_page(struct page *page, struct lruvec *lruvec, void *arg) { if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { int file = page_is_file_cache(page); int lru = page_lru_base_type(page); del_page_from_lru_list(page, lruvec, lru); SetPageActive(page); lru += LRU_ACTIVE; add_page_to_lru_list(page, lruvec, lru); trace_mm_lru_activate(page); __count_vm_event(PGACTIVATE); update_page_reclaim_stat(lruvec, file, 1); } }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)2221.36%17.69%
Linus Torvalds2221.36%215.38%
Rik Van Riel1514.56%17.69%
Hugh Dickins109.71%17.69%
Andrew Morton87.77%215.38%
David Shaohua Li76.80%17.69%
Motohiro Kosaki65.83%17.69%
Mel Gorman54.85%17.69%
Lee Schermerhorn32.91%17.69%
Johannes Weiner32.91%17.69%
Christoph Lameter21.94%17.69%
Total103100.00%13100.00%

#ifdef CONFIG_SMP
static void activate_page_drain(int cpu) { struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu); if (pagevec_count(pvec)) pagevec_lru_move_fn(pvec, __activate_page, NULL); }

Contributors

PersonTokensPropCommitsCommitProp
David Shaohua Li38100.00%1100.00%
Total38100.00%1100.00%


static bool need_activate_page_drain(int cpu) { return pagevec_count(&per_cpu(activate_page_pvecs, cpu)) != 0; }

Contributors

PersonTokensPropCommitsCommitProp
Chris Metcalf23100.00%1100.00%
Total23100.00%1100.00%


void activate_page(struct page *page) { page = compound_head(page); if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); get_page(page); if (!pagevec_add(pvec, page) || PageCompound(page)) pagevec_lru_move_fn(pvec, __activate_page, NULL); put_cpu_var(activate_page_pvecs); } }

Contributors

PersonTokensPropCommitsCommitProp
David Shaohua Li7084.34%125.00%
Kirill A. Shutemov89.64%250.00%
Lukasz Odzioba56.02%125.00%
Total83100.00%4100.00%

#else
static inline void activate_page_drain(int cpu) { }

Contributors

PersonTokensPropCommitsCommitProp
David Shaohua Li9100.00%1100.00%
Total9100.00%1100.00%


static bool need_activate_page_drain(int cpu) { return false; }

Contributors

PersonTokensPropCommitsCommitProp
Chris Metcalf12100.00%1100.00%
Total12100.00%1100.00%


void activate_page(struct page *page) { struct zone *zone = page_zone(page); page = compound_head(page); spin_lock_irq(zone_lru_lock(zone)); __activate_page(page, mem_cgroup_page_lruvec(page, zone->zone_pgdat), NULL); spin_unlock_irq(zone_lru_lock(zone)); }

Contributors

PersonTokensPropCommitsCommitProp
David Shaohua Li3152.54%112.50%
Mel Gorman813.56%225.00%
Hugh Dickins711.86%112.50%
Kirill A. Shutemov711.86%112.50%
Linus Torvalds (pre-git)46.78%112.50%
Andrew Morton23.39%225.00%
Total59100.00%8100.00%

#endif
static void __lru_cache_activate_page(struct page *page) { struct pagevec *pvec = &get_cpu_var(lru_add_pvec); int i; /* * Search backwards on the optimistic assumption that the page being * activated has just been added to this pagevec. Note that only * the local pagevec is examined as a !PageLRU page could be in the * process of being released, reclaimed, migrated or on a remote * pagevec that is currently being drained. Furthermore, marking * a remote pagevec's page PageActive potentially hits a race where * a page is marked PageActive just after it is added to the inactive * list causing accounting errors and BUG_ON checks to trigger. */ for (i = pagevec_count(pvec) - 1; i >= 0; i--) { struct page *pagevec_page = pvec->pages[i]; if (pagevec_page == page) { SetPageActive(page); break; } } put_cpu_var(lru_add_pvec); }

Contributors

PersonTokensPropCommitsCommitProp
Mel Gorman77100.00%1100.00%
Total77100.00%1100.00%

/* * Mark a page as having seen activity. * * inactive,unreferenced -> inactive,referenced * inactive,referenced -> active,unreferenced * active,unreferenced -> active,referenced * * When a newly allocated page is not yet visible, so safe for non-atomic ops, * __SetPageReferenced(page) may be substituted for mark_page_accessed(page). */
void mark_page_accessed(struct page *page) { page = compound_head(page); if (!PageActive(page) && !PageUnevictable(page) && PageReferenced(page)) { /* * If the page is on the LRU, queue it for activation via * activate_page_pvecs. Otherwise, assume the page is on a * pagevec, mark it active and it'll be moved to the active * LRU on the next drain. */ if (PageLRU(page)) activate_page(page); else __lru_cache_activate_page(page); ClearPageReferenced(page); if (page_is_file_cache(page)) workingset_activation(page); } else if (!PageReferenced(page)) { SetPageReferenced(page); } if (page_is_idle(page)) clear_page_idle(page); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton5452.94%116.67%
Vladimir Davydov1211.76%116.67%
Johannes Weiner1211.76%116.67%
Mel Gorman1110.78%116.67%
Kirill A. Shutemov76.86%116.67%
Lee Schermerhorn65.88%116.67%
Total102100.00%6100.00%

EXPORT_SYMBOL(mark_page_accessed);
static void __lru_cache_add(struct page *page) { struct pagevec *pvec = &get_cpu_var(lru_add_pvec); get_page(page); if (!pagevec_add(pvec, page) || PageCompound(page)) __pagevec_lru_add(pvec); put_cpu_var(lru_add_pvec); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton1630.77%220.00%
Linus Torvalds (pre-git)1528.85%110.00%
Lukasz Odzioba815.38%110.00%
Robin Dong47.69%110.00%
Rusty Russell47.69%110.00%
Mel Gorman23.85%110.00%
Motohiro Kosaki11.92%110.00%
Jianyu Zhan11.92%110.00%
Kirill A. Shutemov11.92%110.00%
Total52100.00%10100.00%

/** * lru_cache_add: add a page to the page lists * @page: the page to add */
void lru_cache_add_anon(struct page *page) { if (PageActive(page)) ClearPageActive(page); __lru_cache_add(page); }

Contributors

PersonTokensPropCommitsCommitProp
Jianyu Zhan2074.07%150.00%
Mel Gorman725.93%150.00%
Total27100.00%2100.00%


void lru_cache_add_file(struct page *page) { if (PageActive(page)) ClearPageActive(page); __lru_cache_add(page); }

Contributors

PersonTokensPropCommitsCommitProp
Jianyu Zhan2074.07%150.00%
Mel Gorman725.93%150.00%
Total27100.00%2100.00%

EXPORT_SYMBOL(lru_cache_add_file); /** * lru_cache_add - add a page to a page list * @page: the page to be added to the LRU. * * Queue the page for addition to the LRU via pagevec. The decision on whether * to add the page to the [in]active [file|anon] list is deferred until the * pagevec is drained. This gives a chance for the caller of lru_cache_add() * have the page added to the active list using mark_page_accessed(). */
void lru_cache_add(struct page *page) { VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page); VM_BUG_ON_PAGE(PageLRU(page), page); __lru_cache_add(page); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton1537.50%114.29%
Lee Schermerhorn1025.00%114.29%
Sasha Levin615.00%114.29%
Motohiro Kosaki410.00%114.29%
Rusty Russell25.00%114.29%
Naoya Horiguchi25.00%114.29%
Mel Gorman12.50%114.29%
Total40100.00%7100.00%

/** * add_page_to_unevictable_list - add a page to the unevictable list * @page: the page to be added to the unevictable list * * Add page directly to its zone's unevictable list. To avoid races with * tasks that might be making the page evictable, through eg. munlock, * munmap or exit, while it's not on the lru, we want to add the page * while it's locked or otherwise "invisible" to other tasks. This is * difficult to do when using the pagevec cache, so bypass that. */
void add_page_to_unevictable_list(struct page *page) { struct pglist_data *pgdat = page_pgdat(page); struct lruvec *lruvec; spin_lock_irq(&pgdat->lru_lock); lruvec = mem_cgroup_page_lruvec(page, pgdat); ClearPageActive(page); SetPageUnevictable(page); SetPageLRU(page); add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE); spin_unlock_irq(&pgdat->lru_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Lee Schermerhorn4256.76%125.00%
Hugh Dickins1520.27%125.00%
Mel Gorman1216.22%125.00%
Naoya Horiguchi56.76%125.00%
Total74100.00%4100.00%

/** * lru_cache_add_active_or_unevictable * @page: the page to be added to LRU * @vma: vma in which page is mapped for determining reclaimability * * Place @page on the active or unevictable LRU list, depending on its * evictability. Note that if the page is not evictable, it goes * directly back onto it's zone's unevictable list, it does NOT use a * per cpu pagevec. */
void lru_cache_add_active_or_unevictable(struct page *page, struct vm_area_struct *vma) { VM_BUG_ON_PAGE(PageLRU(page), page); if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) { SetPageActive(page); lru_cache_add(page); return; } if (!TestSetPageMlocked(page)) { /* * We use the irq-unsafe __mod_zone_page_stat because this * counter is not modified from interrupt context, and the pte * lock is held(spinlock), which implies preemption disabled. */ __mod_zone_page_state(page_zone(page), NR_MLOCK, hpage_nr_pages(page)); count_vm_event(UNEVICTABLE_PGMLOCKED); } add_page_to_unevictable_list(page); }

Contributors

PersonTokensPropCommitsCommitProp
Johannes Weiner93100.00%1100.00%
Total93100.00%1100.00%

/* * If the page can not be invalidated, it is moved to the * inactive list to speed up its reclaim. It is moved to the * head of the list, rather than the tail, to give the flusher * threads some time to write it out, as this is much more * effective than the single-page writeout from reclaim. * * If the page isn't page_mapped and dirty/writeback, the page * could reclaim asap using PG_reclaim. * * 1. active, mapped page -> none * 2. active, dirty/writeback page -> inactive, head, PG_reclaim * 3. inactive, mapped page -> none * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim * 5. inactive, clean -> inactive, tail * 6. Others -> none * * In 4, why it moves inactive's head, the VM expects the page would * be write it out by flusher threads as this is much more effective * than the single-page writeout from reclaim. */
static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec, void *arg) { int lru, file; bool active; if (!PageLRU(page)) return; if (PageUnevictable(page)) return; /* Some processes are using the page */ if (page_mapped(page)) return; active = PageActive(page); file = page_is_file_cache(page); lru = page_lru_base_type(page); del_page_from_lru_list(page, lruvec, lru + active); ClearPageActive(page); ClearPageReferenced(page); add_page_to_lru_list(page, lruvec, lru); if (PageWriteback(page) || PageDirty(page)) { /* * PG_reclaim could be raced with end_page_writeback * It can make readahead confusing. But race window * is _really_ small and it's non-critical problem. */ SetPageReclaim(page); } else { /* * The page's writeback ends up during pagevec * We moves tha page into tail of inactive. */ list_move_tail(&page->lru, &lruvec->lists[lru]); __count_vm_event(PGROTATED); } if (active) __count_vm_event(PGDEACTIVATE); update_page_reclaim_stat(lruvec, file, 0); }

Contributors

PersonTokensPropCommitsCommitProp
MinChan Kim15391.07%450.00%
Hugh Dickins105.95%112.50%
Johannes Weiner31.79%225.00%
David Shaohua Li21.19%112.50%
Total168100.00%8100.00%


static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec, void *arg) { if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && !PageSwapCache(page) && !PageUnevictable(page)) { bool active = PageActive(page); del_page_from_lru_list(page, lruvec, LRU_INACTIVE_ANON + active); ClearPageActive(page); ClearPageReferenced(page); /* * lazyfree pages are clean anonymous pages. They have * SwapBacked flag cleared to distinguish normal anonymous * pages */ ClearPageSwapBacked(page); add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE); __count_vm_events(PGLAZYFREE, hpage_nr_pages(page)); count_memcg_page_event(page, PGLAZYFREE); update_page_reclaim_stat(lruvec, 1, 0); } }

Contributors

PersonTokensPropCommitsCommitProp
MinChan Kim8166.94%125.00%
Shaohua Li3327.27%250.00%
Roman Gushchin75.79%125.00%
Total121100.00%4100.00%

/* * Drain pages out of the cpu's pagevecs. * Either "cpu" is the current CPU, and preemption has already been * disabled; or "cpu" is being hot-unplugged, and is already dead. */
void lru_add_drain_cpu(int cpu) { struct pagevec *pvec = &per_cpu(lru_add_pvec, cpu); if (pagevec_count(pvec)) __pagevec_lru_add(pvec); pvec = &per_cpu(lru_rotate_pvecs, cpu); if (pagevec_count(pvec)) { unsigned long flags; /* No harm done if a racing interrupt already did this */ local_irq_save(flags); pagevec_move_tail(pvec); local_irq_restore(flags); } pvec = &per_cpu(lru_deactivate_file_pvecs, cpu); if (pagevec_count(pvec)) pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); pvec = &per_cpu(lru_lazyfree_pvecs, cpu); if (pagevec_count(pvec)) pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL); activate_page_drain(cpu); }

Contributors

PersonTokensPropCommitsCommitProp
MinChan Kim4736.43%321.43%
Hisashi Hifumi3930.23%17.14%
Andrew Morton2519.38%321.43%
David Shaohua Li96.98%214.29%
Mel Gorman32.33%17.14%
Shaohua Li21.55%17.14%
Motohiro Kosaki21.55%17.14%
Hugh Dickins10.78%17.14%
Konstantin Khlebnikov10.78%17.14%
Total129100.00%14100.00%

/** * deactivate_file_page - forcefully deactivate a file page * @page: page to deactivate * * This function hints the VM that @page is a good reclaim candidate, * for example if its invalidation fails due to the page being dirty * or under writeback. */
void deactivate_file_page(struct page *page) { /* * In a workload with many unevictable page such as mprotect, * unevictable page deactivation for accelerating reclaim is pointless. */ if (PageUnevictable(page)) return; if (likely(get_page_unless_zero(page))) { struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs); if (!pagevec_add(pvec, page) || PageCompound(page)) pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); put_cpu_var(lru_deactivate_file_pvecs); } }

Contributors

PersonTokensPropCommitsCommitProp
MinChan Kim6185.92%350.00%
Lukasz Odzioba57.04%116.67%
David Shaohua Li45.63%116.67%
Andrew Morton11.41%116.67%
Total71100.00%6100.00%

/** * mark_page_lazyfree - make an anon page lazyfree * @page: page to deactivate * * mark_page_lazyfree() moves @page to the inactive file list. * This is done to accelerate the reclaim of @page. */
void mark_page_lazyfree(struct page *page) { if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && !PageSwapCache(page) && !PageUnevictable(page)) { struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs); get_page(page); if (!pagevec_add(pvec, page) || PageCompound(page)) pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL); put_cpu_var(lru_lazyfree_pvecs); } }

Contributors

PersonTokensPropCommitsCommitProp
MinChan Kim6474.42%120.00%
Shaohua Li1618.60%240.00%
Lukasz Odzioba55.81%120.00%
Kirill A. Shutemov11.16%120.00%
Total86100.00%5100.00%


void lru_add_drain(void) { lru_add_drain_cpu(get_cpu()); put_cpu(); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton1168.75%125.00%
Rusty Russell212.50%125.00%
Linus Torvalds (pre-git)212.50%125.00%
Konstantin Khlebnikov16.25%125.00%
Total16100.00%4100.00%


static void lru_add_drain_per_cpu(struct work_struct *dummy) { lru_add_drain(); }

Contributors

PersonTokensPropCommitsCommitProp
Nicholas Piggin1285.71%150.00%
David Howells214.29%150.00%
Total14100.00%2100.00%

static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
void lru_add_drain_all_cpuslocked(void) { static DEFINE_MUTEX(lock); static struct cpumask has_work; int cpu; /* * Make sure nobody triggers this path before mm_percpu_wq is fully * initialized. */ if (WARN_ON(!mm_percpu_wq)) return; mutex_lock(&lock); cpumask_clear(&has_work); for_each_online_cpu(cpu) { struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) || pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) || pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) || pagevec_count(&per_cpu(lru_lazyfree_pvecs, cpu)) || need_activate_page_drain(cpu)) { INIT_WORK(work, lru_add_drain_per_cpu); queue_work_on(cpu, mm_percpu_wq, work); cpumask_set_cpu(cpu, &has_work); } } for_each_cpu(cpu, &has_work) flush_work(&per_cpu(lru_add_drain_work, cpu)); mutex_unlock(&lock); }

Contributors

PersonTokensPropCommitsCommitProp
Chris Metcalf12879.01%112.50%
MinChan Kim116.79%225.00%
Michal Hocko116.79%112.50%
Nicholas Piggin84.94%112.50%
Wang Sheng-Hui21.23%112.50%
Shaohua Li10.62%112.50%
Thomas Gleixner10.62%112.50%
Total162100.00%8100.00%


void lru_add_drain_all(void) { get_online_cpus(); lru_add_drain_all_cpuslocked(); put_online_cpus(); }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner16100.00%1100.00%
Total16100.00%1100.00%

/** * release_pages - batched put_page() * @pages: array of pages to release * @nr: number of pages * @cold: whether the pages are cache cold * * Decrement the reference count on all the pages in @pages. If it * fell to zero, remove the page from the LRU and free it. */
void release_pages(struct page **pages, int nr) { int i; LIST_HEAD(pages_to_free); struct pglist_data *locked_pgdat = NULL; struct lruvec *lruvec; unsigned long uninitialized_var(flags); unsigned int uninitialized_var(lock_batch); for (i = 0; i < nr; i++) { struct page *page = pages[i]; /* * Make sure the IRQ-safe lock-holding time does not get * excessive with a continuous string of pages from the * same pgdat. The lock is held only if pgdat != NULL. */ if (locked_pgdat && ++lock_batch == SWAP_CLUSTER_MAX) { spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags); locked_pgdat = NULL; } if (is_huge_zero_page(page)) continue; /* Device public page can not be huge page */ if (is_device_public_page(page)) { if (locked_pgdat) { spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags); locked_pgdat = NULL; } put_zone_device_private_or_public_page(page); continue; } page = compound_head(page); if (!put_page_testzero(page)) continue; if (PageCompound(page)) { if (locked_pgdat) { spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags); locked_pgdat = NULL; } __put_compound_page(page); continue; } if (PageLRU(page)) { struct pglist_data *pgdat = page_pgdat(page); if (pgdat != locked_pgdat) { if (locked_pgdat) spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags); lock_batch = 0; locked_pgdat = pgdat; spin_lock_irqsave(&locked_pgdat->lru_lock, flags); } lruvec = mem_cgroup_page_lruvec(page, locked_pgdat); VM_BUG_ON_PAGE(!PageLRU(page), page); __ClearPageLRU(page); del_page_from_lru_list(page, lruvec, page_off_lru(page)); } /* Clear Active bit in case of parallel mark_page_accessed */ __ClearPageActive(page); __ClearPageWaiters(page); list_add(&page->lru, &pages_to_free); } if (locked_pgdat) spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags); mem_cgroup_uncharge_list(&pages_to_free); free_unref_page_list(&pages_to_free); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton9828.99%313.04%
Mel Gorman4413.02%417.39%
Nicholas Piggin4413.02%521.74%
Jérôme Glisse3610.65%14.35%
Kirill A. Shutemov3510.36%28.70%
Michal Hocko257.40%14.35%
Hugh Dickins216.21%28.70%
Hisashi Hifumi205.92%14.35%
Johannes Weiner61.78%28.70%
Konstantin Khlebnikov61.78%14.35%
Sasha Levin30.89%14.35%
Total338100.00%23100.00%

EXPORT_SYMBOL(release_pages); /* * The pages which we're about to release may be in the deferred lru-addition * queues. That would prevent them from really being freed right now. That's * OK from a correctness point of view but is inefficient - those pages may be * cache-warm and we want to give them back to the page allocator ASAP. * * So __pagevec_release() will drain those queues here. __pagevec_lru_add() * and __pagevec_lru_add_active() call release_pages() directly to avoid * mutual recursion. */
void __pagevec_release(struct pagevec *pvec) { if (!pvec->percpu_pvec_drained) { lru_add_drain(); pvec->percpu_pvec_drained = true; } release_pages(pvec->pages, pagevec_count(pvec)); pagevec_reinit(pvec); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton3066.67%466.67%
Mel Gorman1533.33%233.33%
Total45100.00%6100.00%

EXPORT_SYMBOL(__pagevec_release); #ifdef CONFIG_TRANSPARENT_HUGEPAGE /* used by __split_huge_page_refcount() */
void lru_add_page_tail(struct page *page, struct page *page_tail, struct lruvec *lruvec, struct list_head *list) { const int file = 0; VM_BUG_ON_PAGE(!PageHead(page), page); VM_BUG_ON_PAGE(PageCompound(page_tail), page); VM_BUG_ON_PAGE(PageLRU(page_tail), page); VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&lruvec_pgdat(lruvec)->lru_lock)); if (!list) SetPageLRU(page_tail); if (likely(PageLRU(page))) list_add_tail(&page_tail->lru, &page->lru); else if (list) { /* page reclaim is reclaiming a huge page */ get_page(page_tail); list_add_tail(&page_tail->lru, list); } else { struct list_head *list_head; /* * Head page has not yet been counted, as an hpage, * so we must account for each subpage individually. * * Use the standard add function to put page_tail on the list, * but then correct its position so they all end up in order. */ add_page_to_lru_list(page_tail, lruvec, page_lru(page_tail)); list_head = page_tail->lru.prev; list_move_tail(&page_tail->lru, list_head); } if (!PageUnevictable(page)) update_page_reclaim_stat(lruvec, file, PageActive(page_tail)); }

Contributors

PersonTokensPropCommitsCommitProp
Andrea Arcangeli8141.33%19.09%
Hugh Dickins5226.53%436.36%
Shaohua Li3316.84%19.09%
Sasha Levin94.59%19.09%
Johannes Weiner94.59%218.18%
Kirill A. Shutemov84.08%19.09%
Mel Gorman42.04%19.09%
Total196100.00%11100.00%

#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, void *arg) { int file = page_is_file_cache(page); int active = PageActive(page); enum lru_list lru = page_lru(page); VM_BUG_ON_PAGE(PageLRU(page), page); SetPageLRU(page); add_page_to_lru_list(page, lruvec, lru); update_page_reclaim_stat(lruvec, file, active); trace_mm_lru_insertion(page, lru); }

Contributors

PersonTokensPropCommitsCommitProp
Mel Gorman1720.00%214.29%
Hugh Dickins1517.65%321.43%
Andrew Morton1214.12%17.14%
David Shaohua Li1112.94%17.14%
Motohiro Kosaki89.41%214.29%
Nicholas Piggin78.24%17.14%
Rik Van Riel44.71%17.14%
Lee Schermerhorn44.71%17.14%
Linus Torvalds44.71%17.14%
Sasha Levin33.53%17.14%
Total85100.00%14100.00%

/* * Add the passed pages to the LRU, then drop the caller's refcount * on them. Reinitialises the caller's pagevec. */
void __pagevec_lru_add(struct pagevec *pvec) { pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, NULL); }

Contributors

PersonTokensPropCommitsCommitProp
David Shaohua Li947.37%120.00%
Andrew Morton736.84%240.00%
Hugh Dickins210.53%120.00%
Mel Gorman15.26%120.00%
Total19100.00%5100.00%

EXPORT_SYMBOL(__pagevec_lru_add); /** * pagevec_lookup_entries - gang pagecache lookup * @pvec: Where the resulting entries are placed * @mapping: The address_space to search * @start: The starting entry index * @nr_entries: The maximum number of entries * @indices: The cache indices corresponding to the entries in @pvec * * pagevec_lookup_entries() will search for and return a group of up * to @nr_entries pages and shadow entries in the mapping. All * entries are placed in @pvec. pagevec_lookup_entries() takes a * reference against actual pages in @pvec. * * The search returns a group of mapping-contiguous entries with * ascending indexes. There may be holes in the indices due to * not-present entries. * * pagevec_lookup_entries() returns the number of entries which were * found. */
unsigned pagevec_lookup_entries(struct pagevec *pvec, struct address_space *mapping, pgoff_t start, unsigned nr_pages, pgoff_t *indices) { pvec->nr = find_get_entries(mapping, start, nr_pages, pvec->pages, indices); return pagevec_count(pvec); }

Contributors

PersonTokensPropCommitsCommitProp
Johannes Weiner50100.00%1100.00%
Total50100.00%1100.00%

/** * pagevec_remove_exceptionals - pagevec exceptionals pruning * @pvec: The pagevec to prune * * pagevec_lookup_entries() fills both pages and exceptional radix * tree entries into the pagevec. This function prunes all * exceptionals from @pvec without leaving holes, so that it can be * passed on to page-only pagevec operations. */
void pagevec_remove_exceptionals(struct pagevec *pvec) { int i, j; for (i = 0, j = 0; i < pagevec_count(pvec); i++) { struct page *page = pvec->pages[i]; if (!radix_tree_exceptional_entry(page)) pvec->pages[j++] = page; } pvec->nr = j; }

Contributors

PersonTokensPropCommitsCommitProp
Johannes Weiner73100.00%1100.00%
Total73100.00%1100.00%

/** * pagevec_lookup_range - gang pagecache lookup * @pvec: Where the resulting pages are placed * @mapping: The address_space to search * @start: The starting page index * @end: The final page index * @nr_pages: The maximum number of pages * * pagevec_lookup_range() will search for and return a group of up to @nr_pages * pages in the mapping starting from index @start and upto index @end * (inclusive). The pages are placed in @pvec. pagevec_lookup() takes a * reference against the pages in @pvec. * * The search returns a group of mapping-contiguous pages with ascending * indexes. There may be holes in the indices due to not-present pages. We * also update @start to index the next page for the traversal. * * pagevec_lookup_range() returns the number of pages which were found. If this * number is smaller than @nr_pages, the end of specified range has been * reached. */
unsigned pagevec_lookup_range(struct pagevec *pvec, struct address_space *mapping, pgoff_t *start, pgoff_t end) { pvec->nr = find_get_pages_range(mapping, start, end, PAGEVEC_SIZE, pvec->pages); return pagevec_count(pvec); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton3982.98%240.00%
Jan Kara817.02%360.00%
Total47100.00%5100.00%

EXPORT_SYMBOL(pagevec_lookup_range);
unsigned pagevec_lookup_range_tag(struct pagevec *pvec, struct address_space *mapping, pgoff_t *index, pgoff_t end, int tag) { pvec->nr = find_get_pages_range_tag(mapping, index, end, tag, PAGEVEC_SIZE, pvec->pages); return pagevec_count(pvec); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton4484.62%250.00%
Jan Kara815.38%250.00%
Total52100.00%4100.00%

EXPORT_SYMBOL(pagevec_lookup_range_tag);
unsigned pagevec_lookup_range_nr_tag(struct pagevec *pvec, struct address_space *mapping, pgoff_t *index, pgoff_t end, int tag, unsigned max_pages) { pvec->nr = find_get_pages_range_tag(mapping, index, end, tag, min_t(unsigned int, max_pages, PAGEVEC_SIZE), pvec->pages); return pagevec_count(pvec); }

Contributors

PersonTokensPropCommitsCommitProp
Jan Kara62100.00%1100.00%
Total62100.00%1100.00%

EXPORT_SYMBOL(pagevec_lookup_range_nr_tag); /* * Perform any setup for the swap system */
void __init swap_setup(void) { unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT); /* Use a smaller cluster for small-memory machines */ if (megs < 16) page_cluster = 2; else page_cluster = 3; /* * Right now other parts of the system means that we * _really_ don't want to cluster much more */ }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds2054.05%250.00%
Linus Torvalds (pre-git)1643.24%125.00%
Jan Beulich12.70%125.00%
Total37100.00%4100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton43711.88%1912.84%
MinChan Kim43611.85%64.05%
Mel Gorman40110.90%1510.14%
David Shaohua Li3138.51%21.35%
Johannes Weiner2787.56%96.08%
Hugh Dickins2045.55%96.08%
Hisashi Hifumi1935.25%10.68%
Chris Metcalf1724.68%10.68%
Andrea Arcangeli1273.45%32.03%
Nicholas Piggin952.58%64.05%
Shaohua Li872.37%32.03%
Jan Kara862.34%64.05%
Linus Torvalds (pre-git)832.26%106.76%
Lee Schermerhorn782.12%21.35%
Kirill A. Shutemov752.04%74.73%
Adrian Bunk661.79%10.68%
Motohiro Kosaki621.69%21.35%
Alexander Zarochentzev561.52%10.68%
Linus Torvalds461.25%53.38%
Jianyu Zhan441.20%10.68%
Jérôme Glisse360.98%10.68%
Michal Hocko360.98%21.35%
Lukasz Odzioba280.76%10.68%
Miklos Szeredi270.73%32.03%
Sasha Levin240.65%10.68%
Rik Van Riel220.60%21.35%
Dan J Williams210.57%21.35%
Naoya Horiguchi190.52%21.35%
Thomas Gleixner170.46%10.68%
Vladimir Davydov150.41%10.68%
Ming Li140.38%10.68%
Rusty Russell140.38%21.35%
Steve French90.24%10.68%
Konstantin Khlebnikov80.22%21.35%
Roman Gushchin70.19%10.68%
Pravin B Shelar50.14%10.68%
Arnaldo Carvalho de Melo50.14%10.68%
Robin Dong40.11%10.68%
Christoph Hellwig40.11%10.68%
Christoph Lameter40.11%21.35%
Tejun Heo30.08%10.68%
Balbir Singh30.08%10.68%
Peter Zijlstra30.08%10.68%
Kent Overstreet30.08%10.68%
David Howells20.05%10.68%
Wang Sheng-Hui20.05%10.68%
Simon Arlott10.03%10.68%
Randy Dunlap10.03%10.68%
Jan Beulich10.03%10.68%
Paul Gortmaker10.03%10.68%
Total3678100.00%148100.00%
Directory: mm
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.