Release 4.18 mm/swap_state.c
// SPDX-License-Identifier: GPL-2.0
/*
* linux/mm/swap_state.c
*
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
* Swap reorganised 29.12.95, Stephen Tweedie
*
* Rewritten to use page cache, (C) 1998 Stephen Tweedie
*/
#include <linux/mm.h>
#include <linux/gfp.h>
#include <linux/kernel_stat.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/init.h>
#include <linux/pagemap.h>
#include <linux/backing-dev.h>
#include <linux/blkdev.h>
#include <linux/pagevec.h>
#include <linux/migrate.h>
#include <linux/vmalloc.h>
#include <linux/swap_slots.h>
#include <linux/huge_mm.h>
#include <asm/pgtable.h>
/*
* swapper_space is a fiction, retained to simplify the path through
* vmscan's shrink_page_list.
*/
static const struct address_space_operations swap_aops = {
.writepage = swap_writepage,
.set_page_dirty = swap_set_page_dirty,
#ifdef CONFIG_MIGRATION
.migratepage = migrate_page,
#endif
};
struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
static bool enable_vma_readahead __read_mostly = true;
#define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2)
#define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1)
#define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK
#define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK)
#define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK)
#define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
#define SWAP_RA_ADDR(v) ((v) & PAGE_MASK)
#define SWAP_RA_VAL(addr, win, hits) \
(((addr) & PAGE_MASK) | \
(((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \
((hits) & SWAP_RA_HITS_MASK))
/* Initial readahead hits is 4 to start up with a small window */
#define GET_SWAP_RA_VAL(vma) \
(atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
#define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
#define ADD_CACHE_INFO(x, nr) do { swap_cache_info.x += (nr); } while (0)
static struct {
unsigned long add_total;
unsigned long del_total;
unsigned long find_success;
unsigned long find_total;
} swap_cache_info;
unsigned long total_swapcache_pages(void)
{
unsigned int i, j, nr;
unsigned long ret = 0;
struct address_space *spaces;
rcu_read_lock();
for (i = 0; i < MAX_SWAPFILES; i++) {
/*
* The corresponding entries in nr_swapper_spaces and
* swapper_spaces will be reused only after at least
* one grace period. So it is impossible for them
* belongs to different usage.
*/
nr = nr_swapper_spaces[i];
spaces = rcu_dereference(swapper_spaces[i]);
if (!nr || !spaces)
continue;
for (j = 0; j < nr; j++)
ret += spaces[j].nrpages;
}
rcu_read_unlock();
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Huang Ying | 60 | 60.00% | 1 | 50.00% |
Shaohua Li | 40 | 40.00% | 1 | 50.00% |
Total | 100 | 100.00% | 2 | 100.00% |
static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
void show_swap_cache_info(void)
{
printk("%lu pages in swap cache\n", total_swapcache_pages());
printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
swap_cache_info.add_total, swap_cache_info.del_total,
swap_cache_info.find_success, swap_cache_info.find_total);
printk("Free swap = %ldkB\n",
get_nr_swap_pages() << (PAGE_SHIFT - 10));
printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrea Arcangeli | 24 | 38.10% | 1 | 14.29% |
Linus Torvalds | 14 | 22.22% | 1 | 14.29% |
Linus Torvalds (pre-git) | 13 | 20.63% | 1 | 14.29% |
Johannes Weiner | 8 | 12.70% | 1 | 14.29% |
Shaohua Li | 3 | 4.76% | 2 | 28.57% |
Hugh Dickins | 1 | 1.59% | 1 | 14.29% |
Total | 63 | 100.00% | 7 | 100.00% |
/*
* __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
* but sets SwapCache flag and private instead of mapping and index.
*/
int __add_to_swap_cache(struct page *page, swp_entry_t entry)
{
int error, i, nr = hpage_nr_pages(page);
struct address_space *address_space;
pgoff_t idx = swp_offset(entry);
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(PageSwapCache(page), page);
VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
page_ref_add(page, nr);
SetPageSwapCache(page);
address_space = swap_address_space(entry);
xa_lock_irq(&address_space->i_pages);
for (i = 0; i < nr; i++) {
set_page_private(page + i, entry.val + i);
error = radix_tree_insert(&address_space->i_pages,
idx + i, page + i);
if (unlikely(error))
break;
}
if (likely(!error)) {
address_space->nrpages += nr;
__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
ADD_CACHE_INFO(add_total, nr);
} else {
/*
* Only the context which have set SWAP_HAS_CACHE flag
* would call add_to_swap_cache().
* So add_to_swap_cache() doesn't returns -EEXIST.
*/
VM_BUG_ON(error == -EEXIST);
set_page_private(page + i, 0UL);
while (i--) {
radix_tree_delete(&address_space->i_pages, idx + i);
set_page_private(page + i, 0UL);
}
ClearPageSwapCache(page);
page_ref_sub(page, nr);
}
xa_unlock_irq(&address_space->i_pages);
return error;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Huang Ying | 111 | 43.02% | 1 | 5.56% |
Nicholas Piggin | 38 | 14.73% | 2 | 11.11% |
Andrew Morton | 34 | 13.18% | 2 | 11.11% |
Shaohua Li | 19 | 7.36% | 1 | 5.56% |
Daisuke Nishimura | 14 | 5.43% | 2 | 11.11% |
Linus Torvalds (pre-git) | 11 | 4.26% | 4 | 22.22% |
Sasha Levin | 9 | 3.49% | 1 | 5.56% |
Rik Van Riel | 8 | 3.10% | 1 | 5.56% |
Matthew Wilcox | 6 | 2.33% | 1 | 5.56% |
Hugh Dickins | 4 | 1.55% | 1 | 5.56% |
Christoph Lameter | 3 | 1.16% | 1 | 5.56% |
Linus Torvalds | 1 | 0.39% | 1 | 5.56% |
Total | 258 | 100.00% | 18 | 100.00% |
int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
{
int error;
error = radix_tree_maybe_preload_order(gfp_mask, compound_order(page));
if (!error) {
error = __add_to_swap_cache(page, entry);
radix_tree_preload_end();
}
return error;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Daisuke Nishimura | 42 | 79.25% | 1 | 33.33% |
Huang Ying | 6 | 11.32% | 1 | 33.33% |
Andrew Morton | 5 | 9.43% | 1 | 33.33% |
Total | 53 | 100.00% | 3 | 100.00% |
/*
* This must be called only on pages that have
* been verified to be in the swap cache.
*/
void __delete_from_swap_cache(struct page *page)
{
struct address_space *address_space;
int i, nr = hpage_nr_pages(page);
swp_entry_t entry;
pgoff_t idx;
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(!PageSwapCache(page), page);
VM_BUG_ON_PAGE(PageWriteback(page), page);
entry.val = page_private(page);
address_space = swap_address_space(entry);
idx = swp_offset(entry);
for (i = 0; i < nr; i++) {
radix_tree_delete(&address_space->i_pages, idx + i);
set_page_private(page + i, 0);
}
ClearPageSwapCache(page);
address_space->nrpages -= nr;
__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
ADD_CACHE_INFO(del_total, nr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Huang Ying | 55 | 35.95% | 1 | 7.14% |
Shaohua Li | 26 | 16.99% | 1 | 7.14% |
Andrew Morton | 25 | 16.34% | 2 | 14.29% |
Linus Torvalds (pre-git) | 19 | 12.42% | 3 | 21.43% |
Linus Torvalds | 11 | 7.19% | 3 | 21.43% |
Sasha Levin | 9 | 5.88% | 1 | 7.14% |
Hugh Dickins | 4 | 2.61% | 1 | 7.14% |
Christoph Lameter | 3 | 1.96% | 1 | 7.14% |
Matthew Wilcox | 1 | 0.65% | 1 | 7.14% |
Total | 153 | 100.00% | 14 | 100.00% |
/**
* add_to_swap - allocate swap space for a page
* @page: page we want to move to swap
*
* Allocate swap space for the page and add the page to the
* swap cache. Caller needs to hold the page lock.
*/
int add_to_swap(struct page *page)
{
swp_entry_t entry;
int err;
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(!PageUptodate(page), page);
entry = get_swap_page(page);
if (!entry.val)
return 0;
/*
* Radix-tree node allocations from PF_MEMALLOC contexts could
* completely exhaust the page allocator. __GFP_NOMEMALLOC
* stops emergency reserves from being allocated.
*
* TODO: this could cause a theoretical memory reclaim
* deadlock in the swap out path.
*/
/*
* Add it to the swap cache.
*/
err = add_to_swap_cache(page, entry,
__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
/* -ENOMEM radix-tree allocation failure */
if (err)
/*
* add_to_swap_cache() doesn't return -EEXIST, so we can safely
* clear SWAP_HAS_CACHE flag.
*/
goto fail;
/*
* Normally the page will be dirtied in unmap because its pte should be
* dirty. A special case is MADV_FREE page. The page'e pte could have
* dirty bit cleared but the page's SwapBacked bit is still set because
* clearing the dirty bit and SwapBacked bit has no lock protected. For
* such page, unmap will not set dirty bit for it, so page reclaim will
* not write the page out. This can cause data corruption when the page
* is swap in later. Always setting the dirty bit for the page solves
* the problem.
*/
set_page_dirty(page);
return 1;
fail:
put_swap_page(page, entry);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 37 | 36.27% | 2 | 18.18% |
Huang Ying | 28 | 27.45% | 1 | 9.09% |
Nicholas Piggin | 8 | 7.84% | 1 | 9.09% |
MinChan Kim | 7 | 6.86% | 2 | 18.18% |
Shaohua Li | 6 | 5.88% | 1 | 9.09% |
Sasha Levin | 6 | 5.88% | 1 | 9.09% |
Hugh Dickins | 4 | 3.92% | 1 | 9.09% |
Vladimir Davydov | 4 | 3.92% | 1 | 9.09% |
Eric Sesterhenn / Snakebyte | 2 | 1.96% | 1 | 9.09% |
Total | 102 | 100.00% | 11 | 100.00% |
/*
* This must be called only on pages that have
* been verified to be in the swap cache and locked.
* It will never put the page into the free list,
* the caller has a reference on the page.
*/
void delete_from_swap_cache(struct page *page)
{
swp_entry_t entry;
struct address_space *address_space;
entry.val = page_private(page);
address_space = swap_address_space(entry);
xa_lock_irq(&address_space->i_pages);
__delete_from_swap_cache(page);
xa_unlock_irq(&address_space->i_pages);
put_swap_page(page, entry);
page_ref_sub(page, hpage_nr_pages(page));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 28 | 38.89% | 6 | 50.00% |
Shaohua Li | 16 | 22.22% | 1 | 8.33% |
Linus Torvalds | 11 | 15.28% | 1 | 8.33% |
Huang Ying | 8 | 11.11% | 1 | 8.33% |
Matthew Wilcox | 4 | 5.56% | 1 | 8.33% |
Hugh Dickins | 3 | 4.17% | 1 | 8.33% |
MinChan Kim | 2 | 2.78% | 1 | 8.33% |
Total | 72 | 100.00% | 12 | 100.00% |
/*
* If we are the only user, then try to free up the swap cache.
*
* Its ok to check for PageSwapCache without the page lock
* here because we are going to recheck again inside
* try_to_free_swap() _with_ the lock.
* - Marcelo
*/
static inline void free_swap_cache(struct page *page)
{
if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
try_to_free_swap(page);
unlock_page(page);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 29 | 69.05% | 1 | 20.00% |
Hugh Dickins | 7 | 16.67% | 1 | 20.00% |
Andrew Morton | 5 | 11.90% | 2 | 40.00% |
Nicholas Piggin | 1 | 2.38% | 1 | 20.00% |
Total | 42 | 100.00% | 5 | 100.00% |
/*
* Perform a free_page(), also freeing any swap cache associated with
* this page if it is the last user of the page.
*/
void free_page_and_swap_cache(struct page *page)
{
free_swap_cache(page);
if (!is_huge_zero_page(page))
put_page(page);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 14 | 50.00% | 1 | 20.00% |
Gerald Schaefer | 7 | 25.00% | 1 | 20.00% |
Linus Torvalds | 5 | 17.86% | 1 | 20.00% |
Aaron Lu | 1 | 3.57% | 1 | 20.00% |
Kirill A. Shutemov | 1 | 3.57% | 1 | 20.00% |
Total | 28 | 100.00% | 5 | 100.00% |
/*
* Passed an array of pages, drop them all from swapcache and then release
* them. They are removed from the LRU and freed if this is their last use.
*/
void free_pages_and_swap_cache(struct page **pages, int nr)
{
struct page **pagep = pages;
int i;
lru_add_drain();
for (i = 0; i < nr; i++)
free_swap_cache(pagep[i]);
release_pages(pagep, nr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 51 | 91.07% | 1 | 50.00% |
Michal Hocko | 5 | 8.93% | 1 | 50.00% |
Total | 56 | 100.00% | 2 | 100.00% |
static inline bool swap_use_vma_readahead(void)
{
return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
MinChan Kim | 22 | 100.00% | 1 | 100.00% |
Total | 22 | 100.00% | 1 | 100.00% |
/*
* Lookup a swap entry in the swap cache. A found page will be returned
* unlocked and with its refcount incremented - we rely on the kernel
* lock getting page table operations atomic even if we drop the page
* lock before returning.
*/
struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
unsigned long addr)
{
struct page *page;
page = find_get_page(swap_address_space(entry), swp_offset(entry));
INC_CACHE_INFO(find_total);
if (page) {
bool vma_ra = swap_use_vma_readahead();
bool readahead;
INC_CACHE_INFO(find_success);
/*
* At the moment, we don't support PG_readahead for anon THP
* so let's bail out rather than confusing the readahead stat.
*/
if (unlikely(PageTransCompound(page)))
return page;
readahead = TestClearPageReadahead(page);
if (vma && vma_ra) {
unsigned long ra_val;
int win, hits;
ra_val = GET_SWAP_RA_VAL(vma);
win = SWAP_RA_WIN(ra_val);
hits = SWAP_RA_HITS(ra_val);
if (readahead)
hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
atomic_long_set(&vma->swap_readahead_info,
SWAP_RA_VAL(addr, win, hits));
}
if (readahead) {
count_vm_event(SWAP_RA_HIT);
if (!vma || !vma_ra)
atomic_inc(&swapin_readahead_hits);
}
}
return page;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Huang Ying | 104 | 56.22% | 3 | 21.43% |
MinChan Kim | 27 | 14.59% | 1 | 7.14% |
Linus Torvalds (pre-git) | 26 | 14.05% | 5 | 35.71% |
Shaohua Li | 16 | 8.65% | 2 | 14.29% |
Andrew Morton | 7 | 3.78% | 1 | 7.14% |
Linus Torvalds | 4 | 2.16% | 1 | 7.14% |
Marcelo Tosatti | 1 | 0.54% | 1 | 7.14% |
Total | 185 | 100.00% | 14 | 100.00% |
struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
struct vm_area_struct *vma, unsigned long addr,
bool *new_page_allocated)
{
struct page *found_page, *new_page = NULL;
struct address_space *swapper_space = swap_address_space(entry);
int err;
*new_page_allocated = false;
do {
/*
* First check the swap cache. Since this is normally
* called after lookup_swap_cache() failed, re-calling
* that would confuse statistics.
*/
found_page = find_get_page(swapper_space, swp_offset(entry));
if (found_page)
break;
/*
* Just skip read ahead for unused swap slot.
* During swap_off when swap_slot_cache is disabled,
* we have to handle the race between putting
* swap entry in swap cache and marking swap slot
* as SWAP_HAS_CACHE. That's done in later part of code or
* else swap_off will be aborted if we return NULL.
*/
if (!__swp_swapcount(entry) && swap_slot_cache_enabled)
break;
/*
* Get a new page to read into from swap.
*/
if (!new_page) {
new_page = alloc_page_vma(gfp_mask, vma, addr);
if (!new_page)
break; /* Out of memory */
}
/*
* call radix_tree_preload() while we can wait.
*/
err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL);
if (err)
break;
/*
* Swap entry may have been freed since our caller observed it.
*/
err = swapcache_prepare(entry);
if (err == -EEXIST) {
radix_tree_preload_end();
/*
* We might race against get_swap_page() and stumble
* across a SWAP_HAS_CACHE swap_map entry whose page
* has not been brought into the swapcache yet.
*/
cond_resched();
continue;
}
if (err) { /* swp entry is obsolete ? */
radix_tree_preload_end();
break;
}
/* May fail (-ENOMEM) if radix-tree node allocation failed. */
__SetPageLocked(new_page);
__SetPageSwapBacked(new_page);
err = __add_to_swap_cache(new_page, entry);
if (likely(!err)) {
radix_tree_preload_end();
/*
* Initiate read into locked page and return.
*/
lru_cache_add_anon(new_page);
*new_page_allocated = true;
return new_page;
}
radix_tree_preload_end();
__ClearPageLocked(new_page);
/*
* add_to_swap_cache() doesn't return -EEXIST, so we can safely
* clear SWAP_HAS_CACHE flag.
*/
put_swap_page(new_page, entry);
} while (err != -ENOMEM);
if (new_page)
put_page(new_page);
return found_page;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 54 | 21.86% | 3 | 9.38% |
Linus Torvalds | 40 | 16.19% | 4 | 12.50% |
Daisuke Nishimura | 33 | 13.36% | 2 | 6.25% |
Dmitry Safonov | 25 | 10.12% | 1 | 3.12% |
Hugh Dickins | 23 | 9.31% | 3 | 9.38% |
Andrew Morton | 21 | 8.50% | 4 | 12.50% |
Kamezawa Hiroyuki | 16 | 6.48% | 2 | 6.25% |
Tim Chen | 8 | 3.24% | 1 | 3.12% |
Huang Ying | 8 | 3.24% | 3 | 9.38% |
Rik Van Riel | 5 | 2.02% | 2 | 6.25% |
Nicholas Piggin | 3 | 1.21% | 1 | 3.12% |
Rafael Aquini | 3 | 1.21% | 1 | 3.12% |
MinChan Kim | 3 | 1.21% | 1 | 3.12% |
Kirill A. Shutemov | 3 | 1.21% | 2 | 6.25% |
Marcelo Tosatti | 1 | 0.40% | 1 | 3.12% |
Jan Kara | 1 | 0.40% | 1 | 3.12% |
Total | 247 | 100.00% | 32 | 100.00% |
/*
* Locate a page of swap in physical memory, reserving swap cache space
* and reading the disk if it is not already cached.
* A failure return means that either the page allocation failed or that
* the swap entry is no longer in use.
*/
struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
struct vm_area_struct *vma, unsigned long addr, bool do_poll)
{
bool page_was_allocated;
struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
vma, addr, &page_was_allocated);
if (page_was_allocated)
swap_readpage(retpage, do_poll);
return retpage;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dmitry Safonov | 56 | 91.80% | 1 | 50.00% |
Shaohua Li | 5 | 8.20% | 1 | 50.00% |
Total | 61 | 100.00% | 2 | 100.00% |
static unsigned int __swapin_nr_pages(unsigned long prev_offset,
unsigned long offset,
int hits,
int max_pages,
int prev_win)
{
unsigned int pages, last_ra;
/*
* This heuristic has been found to work well on both sequential and
* random loads, swapping to hard disk or to SSD: please don't ask
* what the "+ 2" means, it just happens to work well, that's all.
*/
pages = hits + 2;
if (pages == 2) {
/*
* We can have no readahead hits to judge by: but must not get
* stuck here forever, so check for an adjacent offset instead
* (and don't even bother to check whether swap type is same).
*/
if (offset != prev_offset + 1 && offset != prev_offset - 1)
pages = 1;
} else {
unsigned int roundup = 4;
while (roundup < pages)
roundup <<= 1;
pages = roundup;
}
if (pages > max_pages)
pages = max_pages;
/* Don't shrink readahead too fast */
last_ra = prev_win / 2;
if (pages < last_ra)
pages = last_ra;
return pages;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Shaohua Li | 100 | 85.47% | 1 | 50.00% |
Huang Ying | 17 | 14.53% | 1 | 50.00% |
Total | 117 | 100.00% | 2 | 100.00% |
static unsigned long swapin_nr_pages(unsigned long offset)
{
static unsigned long prev_offset;
unsigned int hits, pages, max_pages;
static atomic_t last_readahead_pages;
max_pages = 1 << READ_ONCE(page_cluster);
if (max_pages <= 1)
return 1;
hits = atomic_xchg(&swapin_readahead_hits, 0);
pages = __swapin_nr_pages(prev_offset, offset, hits, max_pages,
atomic_read(&last_readahead_pages));
if (!hits)
prev_offset = offset;
atomic_set(&last_readahead_pages, pages);
return pages;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Huang Ying | 95 | 100.00% | 1 | 100.00% |
Total | 95 | 100.00% | 1 | 100.00% |
/**
* swap_cluster_readahead - swap in pages in hope we need them soon
* @entry: swap entry of this memory
* @gfp_mask: memory allocation flags
* @vmf: fault information
*
* Returns the struct page for entry and addr, after queueing swapin.
*
* Primitive swap readahead code. We simply read an aligned block of
* (1 << page_cluster) entries in the swap area. This method is chosen
* because it doesn't cost us any seek time. We also make sure to queue
* the 'original' request together with the readahead ones...
*
* This has been extended to use the NUMA policies from the mm triggering
* the readahead.
*
* Caller must hold down_read on the vma->vm_mm if vmf->vma is not NULL.
*/
struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
struct vm_fault *vmf)
{
struct page *page;
unsigned long entry_offset = swp_offset(entry);
unsigned long offset = entry_offset;
unsigned long start_offset, end_offset;
unsigned long mask;
struct swap_info_struct *si = swp_swap_info(entry);
struct blk_plug plug;
bool do_poll = true, page_allocated;
struct vm_area_struct *vma = vmf->vma;
unsigned long addr = vmf->address;
mask = swapin_nr_pages(offset) - 1;
if (!mask)
goto skip;
do_poll = false;
/* Read a page_cluster sized and aligned cluster around offset. */
start_offset = offset & ~mask;
end_offset = offset | mask;
if (!start_offset) /* First page is swap header. */
start_offset++;
if (end_offset >= si->max)
end_offset = si->max - 1;
blk_start_plug(&plug);
for (offset = start_offset; offset <= end_offset ; offset++) {
/* Ok, do the async read-ahead now */
page = __read_swap_cache_async(
swp_entry(swp_type(entry), offset),
gfp_mask, vma, addr, &page_allocated);
if (!page)
continue;
if (page_allocated) {
swap_readpage(page, false);
if (offset != entry_offset) {
SetPageReadahead(page);
count_vm_event(SWAP_RA);
}
}
put_page(page);
}
blk_finish_plug(&plug);
lru_add_drain(); /* Push any new pages onto the LRU now */
skip:
return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hugh Dickins | 87 | 33.46% | 2 | 16.67% |
Huang Ying | 53 | 20.38% | 4 | 33.33% |
Shaohua Li | 49 | 18.85% | 2 | 16.67% |
Rik Van Riel | 34 | 13.08% | 1 | 8.33% |
MinChan Kim | 20 | 7.69% | 1 | 8.33% |
Christian Ehrhardt | 16 | 6.15% | 1 | 8.33% |
Kirill A. Shutemov | 1 | 0.38% | 1 | 8.33% |
Total | 260 | 100.00% | 12 | 100.00% |
int init_swap_address_space(unsigned int type, unsigned long nr_pages)
{
struct address_space *spaces, *space;
unsigned int i, nr;
nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL);
if (!spaces)
return -ENOMEM;
for (i = 0; i < nr; i++) {
space = spaces + i;
INIT_RADIX_TREE(&space->i_pages, GFP_ATOMIC|__GFP_NOWARN);
atomic_set(&space->i_mmap_writable, 0);
space->a_ops = &swap_aops;
/* swap cache doesn't use writeback related tags */
mapping_set_no_writeback_tags(space);
}
nr_swapper_spaces[type] = nr;
rcu_assign_pointer(swapper_spaces[type], spaces);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Huang Ying | 132 | 97.06% | 2 | 50.00% |
Kees Cook | 3 | 2.21% | 1 | 25.00% |
Matthew Wilcox | 1 | 0.74% | 1 | 25.00% |
Total | 136 | 100.00% | 4 | 100.00% |
void exit_swap_address_space(unsigned int type)
{
struct address_space *spaces;
spaces = swapper_spaces[type];
nr_swapper_spaces[type] = 0;
rcu_assign_pointer(swapper_spaces[type], NULL);
synchronize_rcu();
kvfree(spaces);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Huang Ying | 46 | 100.00% | 1 | 100.00% |
Total | 46 | 100.00% | 1 | 100.00% |
static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma,
unsigned long faddr,
unsigned long lpfn,
unsigned long rpfn,
unsigned long *start,
unsigned long *end)
{
*start = max3(lpfn, PFN_DOWN(vma->vm_start),
PFN_DOWN(faddr & PMD_MASK));
*end = min3(rpfn, PFN_DOWN(vma->vm_end),
PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Huang Ying | 82 | 100.00% | 1 | 100.00% |
Total | 82 | 100.00% | 1 | 100.00% |
static void swap_ra_info(struct vm_fault *vmf,
struct vma_swap_readahead *ra_info)
{
struct vm_area_struct *vma = vmf->vma;
unsigned long ra_val;
swp_entry_t entry;
unsigned long faddr, pfn, fpfn;
unsigned long start, end;
pte_t *pte, *orig_pte;
unsigned int max_win, hits, prev_win, win, left;
#ifndef CONFIG_64BIT
pte_t *tpte;
#endif
max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
SWAP_RA_ORDER_CEILING);
if (max_win == 1) {
ra_info->win = 1;
return;
}
faddr = vmf->address;
orig_pte = pte = pte_offset_map(vmf->pmd, faddr);
entry = pte_to_swp_entry(*pte);
if ((unlikely(non_swap_entry(entry)))) {
pte_unmap(orig_pte);
return;
}
fpfn = PFN_DOWN(faddr);
ra_val = GET_SWAP_RA_VAL(vma);
pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val));
prev_win = SWAP_RA_WIN(ra_val);
hits = SWAP_RA_HITS(ra_val);
ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits,
max_win, prev_win);
atomic_long_set(&vma->swap_readahead_info,
SWAP_RA_VAL(faddr, win, 0));
if (win == 1) {
pte_unmap(orig_pte);
return;
}
/* Copy the PTEs because the page table may be unmapped */
if (fpfn == pfn + 1)
swap_ra_clamp_pfn(vma, faddr, fpfn, fpfn + win, &start, &end);
else if (pfn == fpfn + 1)
swap_ra_clamp_pfn(vma, faddr, fpfn - win + 1, fpfn + 1,
&start, &end);
else {
left = (win - 1) / 2;
swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left,
&start, &end);
}
ra_info->nr_pte = end - start;
ra_info->offset = fpfn - start;
pte -= ra_info->offset;
#ifdef CONFIG_64BIT
ra_info->ptes = pte;
#else
tpte = ra_info->ptes;
for (pfn = start; pfn != end; pfn++)
*tpte++ = *pte++;
#endif
pte_unmap(orig_pte);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Huang Ying | 346 | 86.50% | 2 | 66.67% |
MinChan Kim | 54 | 13.50% | 1 | 33.33% |
Total | 400 | 100.00% | 3 | 100.00% |
static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
struct vm_fault *vmf)
{
struct blk_plug plug;
struct vm_area_struct *vma = vmf->vma;
struct page *page;
pte_t *pte, pentry;
swp_entry_t entry;
unsigned int i;
bool page_allocated;
struct vma_swap_readahead ra_info = {0,};
swap_ra_info(vmf, &ra_info);
if (ra_info.win == 1)
goto skip;
blk_start_plug(&plug);
for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte;
i++, pte++) {
pentry = *pte;
if (pte_none(pentry))
continue;
if (pte_present(pentry))
continue;
entry = pte_to_swp_entry(pentry);
if (unlikely(non_swap_entry(entry)))
continue;
page = __read_swap_cache_async(entry, gfp_mask, vma,
vmf->address, &page_allocated);
if (!page)
continue;
if (page_allocated) {
swap_readpage(page, false);
if (i != ra_info.offset) {
SetPageReadahead(page);
count_vm_event(SWAP_RA);
}
}
put_page(page);
}
blk_finish_plug(&plug);
lru_add_drain();
skip:
return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
ra_info.win == 1);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Huang Ying | 216 | 88.52% | 1 | 25.00% |
MinChan Kim | 27 | 11.07% | 2 | 50.00% |
Colin Ian King | 1 | 0.41% | 1 | 25.00% |
Total | 244 | 100.00% | 4 | 100.00% |
/**
* swapin_readahead - swap in pages in hope we need them soon
* @entry: swap entry of this memory
* @gfp_mask: memory allocation flags
* @vmf: fault information
*
* Returns the struct page for entry and addr, after queueing swapin.
*
* It's a main entry function for swap readahead. By the configuration,
* it will read ahead blocks by cluster-based(ie, physical disk based)
* or vma-based(ie, virtual address based on faulty address) readahead.
*/
struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
struct vm_fault *vmf)
{
return swap_use_vma_readahead() ?
swap_vma_readahead(entry, gfp_mask, vmf) :
swap_cluster_readahead(entry, gfp_mask, vmf);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
MinChan Kim | 40 | 100.00% | 1 | 100.00% |
Total | 40 | 100.00% | 1 | 100.00% |
#ifdef CONFIG_SYSFS
static ssize_t vma_ra_enabled_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%s\n", enable_vma_readahead ? "true" : "false");
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Huang Ying | 33 | 97.06% | 1 | 50.00% |
MinChan Kim | 1 | 2.94% | 1 | 50.00% |
Total | 34 | 100.00% | 2 | 100.00% |
static ssize_t vma_ra_enabled_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1))
enable_vma_readahead = true;
else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1))
enable_vma_readahead = false;
else
return -EINVAL;
return count;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Huang Ying | 83 | 97.65% | 1 | 50.00% |
MinChan Kim | 2 | 2.35% | 1 | 50.00% |
Total | 85 | 100.00% | 2 | 100.00% |
static struct kobj_attribute vma_ra_enabled_attr =
__ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show,
vma_ra_enabled_store);
static struct attribute *swap_attrs[] = {
&vma_ra_enabled_attr.attr,
NULL,
};
static struct attribute_group swap_attr_group = {
.attrs = swap_attrs,
};
static int __init swap_init_sysfs(void)
{
int err;
struct kobject *swap_kobj;
swap_kobj = kobject_create_and_add("swap", mm_kobj);
if (!swap_kobj) {
pr_err("failed to create swap kobject\n");
return -ENOMEM;
}
err = sysfs_create_group(swap_kobj, &swap_attr_group);
if (err) {
pr_err("failed to register swap group\n");
goto delete_obj;
}
return 0;
delete_obj:
kobject_put(swap_kobj);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Huang Ying | 79 | 100.00% | 1 | 100.00% |
Total | 79 | 100.00% | 1 | 100.00% |
subsys_initcall(swap_init_sysfs);
#endif
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Huang Ying | 1699 | 50.99% | 12 | 11.01% |
Shaohua Li | 296 | 8.88% | 5 | 4.59% |
Andrew Morton | 226 | 6.78% | 13 | 11.93% |
MinChan Kim | 209 | 6.27% | 5 | 4.59% |
Linus Torvalds (pre-git) | 186 | 5.58% | 18 | 16.51% |
Hugh Dickins | 141 | 4.23% | 11 | 10.09% |
Linus Torvalds | 133 | 3.99% | 5 | 4.59% |
Daisuke Nishimura | 90 | 2.70% | 2 | 1.83% |
Dmitry Safonov | 82 | 2.46% | 1 | 0.92% |
Nicholas Piggin | 50 | 1.50% | 4 | 3.67% |
Rik Van Riel | 47 | 1.41% | 3 | 2.75% |
Sasha Levin | 24 | 0.72% | 1 | 0.92% |
Andrea Arcangeli | 24 | 0.72% | 1 | 0.92% |
Christian Ehrhardt | 19 | 0.57% | 1 | 0.92% |
Kamezawa Hiroyuki | 16 | 0.48% | 2 | 1.83% |
Christoph Lameter | 14 | 0.42% | 3 | 2.75% |
Matthew Wilcox | 12 | 0.36% | 1 | 0.92% |
Tim Chen | 11 | 0.33% | 2 | 1.83% |
Johannes Weiner | 8 | 0.24% | 1 | 0.92% |
Gerald Schaefer | 7 | 0.21% | 1 | 0.92% |
Michal Hocko | 5 | 0.15% | 1 | 0.92% |
Kirill A. Shutemov | 5 | 0.15% | 2 | 1.83% |
Vladimir Davydov | 4 | 0.12% | 1 | 0.92% |
Kees Cook | 3 | 0.09% | 1 | 0.92% |
Tejun Heo | 3 | 0.09% | 1 | 0.92% |
Changbin Du | 3 | 0.09% | 1 | 0.92% |
Rafael Aquini | 3 | 0.09% | 1 | 0.92% |
Colin Ian King | 2 | 0.06% | 1 | 0.92% |
Eric Sesterhenn / Snakebyte | 2 | 0.06% | 1 | 0.92% |
Marcelo Tosatti | 2 | 0.06% | 1 | 0.92% |
Mel Gorman | 1 | 0.03% | 1 | 0.92% |
Greg Kroah-Hartman | 1 | 0.03% | 1 | 0.92% |
Christoph Hellwig | 1 | 0.03% | 1 | 0.92% |
Aaron Lu | 1 | 0.03% | 1 | 0.92% |
Jan Kara | 1 | 0.03% | 1 | 0.92% |
Jens Axboe | 1 | 0.03% | 1 | 0.92% |
Total | 3332 | 100.00% | 109 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.