cregit-Linux how code gets into the kernel

Release 4.18 mm/swap_state.c

Directory: mm
// SPDX-License-Identifier: GPL-2.0
/*
 *  linux/mm/swap_state.c
 *
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *  Swap reorganised 29.12.95, Stephen Tweedie
 *
 *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
 */
#include <linux/mm.h>
#include <linux/gfp.h>
#include <linux/kernel_stat.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/init.h>
#include <linux/pagemap.h>
#include <linux/backing-dev.h>
#include <linux/blkdev.h>
#include <linux/pagevec.h>
#include <linux/migrate.h>
#include <linux/vmalloc.h>
#include <linux/swap_slots.h>
#include <linux/huge_mm.h>

#include <asm/pgtable.h>

/*
 * swapper_space is a fiction, retained to simplify the path through
 * vmscan's shrink_page_list.
 */

static const struct address_space_operations swap_aops = {
	.writepage	= swap_writepage,
	.set_page_dirty	= swap_set_page_dirty,
#ifdef CONFIG_MIGRATION
	.migratepage	= migrate_page,
#endif
};


struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;

static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;

static bool enable_vma_readahead __read_mostly = true;


#define SWAP_RA_WIN_SHIFT	(PAGE_SHIFT / 2)

#define SWAP_RA_HITS_MASK	((1UL << SWAP_RA_WIN_SHIFT) - 1)

#define SWAP_RA_HITS_MAX	SWAP_RA_HITS_MASK

#define SWAP_RA_WIN_MASK	(~PAGE_MASK & ~SWAP_RA_HITS_MASK)


#define SWAP_RA_HITS(v)		((v) & SWAP_RA_HITS_MASK)

#define SWAP_RA_WIN(v)		(((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)

#define SWAP_RA_ADDR(v)		((v) & PAGE_MASK)


#define SWAP_RA_VAL(addr, win, hits)				\
	(((addr) & PAGE_MASK) |                                 \
         (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) |    \
         ((hits) & SWAP_RA_HITS_MASK))

/* Initial readahead hits is 4 to start up with a small window */

#define GET_SWAP_RA_VAL(vma)					\
	(atomic_long_read(&(vma)->swap_readahead_info) ? : 4)


#define INC_CACHE_INFO(x)	do { swap_cache_info.x++; } while (0)

#define ADD_CACHE_INFO(x, nr)	do { swap_cache_info.x += (nr); } while (0)


static struct {
	
unsigned long add_total;
	
unsigned long del_total;
	
unsigned long find_success;
	
unsigned long find_total;

} swap_cache_info;


unsigned long total_swapcache_pages(void) { unsigned int i, j, nr; unsigned long ret = 0; struct address_space *spaces; rcu_read_lock(); for (i = 0; i < MAX_SWAPFILES; i++) { /* * The corresponding entries in nr_swapper_spaces and * swapper_spaces will be reused only after at least * one grace period. So it is impossible for them * belongs to different usage. */ nr = nr_swapper_spaces[i]; spaces = rcu_dereference(swapper_spaces[i]); if (!nr || !spaces) continue; for (j = 0; j < nr; j++) ret += spaces[j].nrpages; } rcu_read_unlock(); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying6060.00%150.00%
Shaohua Li4040.00%150.00%
Total100100.00%2100.00%

static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
void show_swap_cache_info(void) { printk("%lu pages in swap cache\n", total_swapcache_pages()); printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n", swap_cache_info.add_total, swap_cache_info.del_total, swap_cache_info.find_success, swap_cache_info.find_total); printk("Free swap = %ldkB\n", get_nr_swap_pages() << (PAGE_SHIFT - 10)); printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10)); }

Contributors

PersonTokensPropCommitsCommitProp
Andrea Arcangeli2438.10%114.29%
Linus Torvalds1422.22%114.29%
Linus Torvalds (pre-git)1320.63%114.29%
Johannes Weiner812.70%114.29%
Shaohua Li34.76%228.57%
Hugh Dickins11.59%114.29%
Total63100.00%7100.00%

/* * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space, * but sets SwapCache flag and private instead of mapping and index. */
int __add_to_swap_cache(struct page *page, swp_entry_t entry) { int error, i, nr = hpage_nr_pages(page); struct address_space *address_space; pgoff_t idx = swp_offset(entry); VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(PageSwapCache(page), page); VM_BUG_ON_PAGE(!PageSwapBacked(page), page); page_ref_add(page, nr); SetPageSwapCache(page); address_space = swap_address_space(entry); xa_lock_irq(&address_space->i_pages); for (i = 0; i < nr; i++) { set_page_private(page + i, entry.val + i); error = radix_tree_insert(&address_space->i_pages, idx + i, page + i); if (unlikely(error)) break; } if (likely(!error)) { address_space->nrpages += nr; __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); ADD_CACHE_INFO(add_total, nr); } else { /* * Only the context which have set SWAP_HAS_CACHE flag * would call add_to_swap_cache(). * So add_to_swap_cache() doesn't returns -EEXIST. */ VM_BUG_ON(error == -EEXIST); set_page_private(page + i, 0UL); while (i--) { radix_tree_delete(&address_space->i_pages, idx + i); set_page_private(page + i, 0UL); } ClearPageSwapCache(page); page_ref_sub(page, nr); } xa_unlock_irq(&address_space->i_pages); return error; }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying11143.02%15.56%
Nicholas Piggin3814.73%211.11%
Andrew Morton3413.18%211.11%
Shaohua Li197.36%15.56%
Daisuke Nishimura145.43%211.11%
Linus Torvalds (pre-git)114.26%422.22%
Sasha Levin93.49%15.56%
Rik Van Riel83.10%15.56%
Matthew Wilcox62.33%15.56%
Hugh Dickins41.55%15.56%
Christoph Lameter31.16%15.56%
Linus Torvalds10.39%15.56%
Total258100.00%18100.00%


int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) { int error; error = radix_tree_maybe_preload_order(gfp_mask, compound_order(page)); if (!error) { error = __add_to_swap_cache(page, entry); radix_tree_preload_end(); } return error; }

Contributors

PersonTokensPropCommitsCommitProp
Daisuke Nishimura4279.25%133.33%
Huang Ying611.32%133.33%
Andrew Morton59.43%133.33%
Total53100.00%3100.00%

/* * This must be called only on pages that have * been verified to be in the swap cache. */
void __delete_from_swap_cache(struct page *page) { struct address_space *address_space; int i, nr = hpage_nr_pages(page); swp_entry_t entry; pgoff_t idx; VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(!PageSwapCache(page), page); VM_BUG_ON_PAGE(PageWriteback(page), page); entry.val = page_private(page); address_space = swap_address_space(entry); idx = swp_offset(entry); for (i = 0; i < nr; i++) { radix_tree_delete(&address_space->i_pages, idx + i); set_page_private(page + i, 0); } ClearPageSwapCache(page); address_space->nrpages -= nr; __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr); ADD_CACHE_INFO(del_total, nr); }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying5535.95%17.14%
Shaohua Li2616.99%17.14%
Andrew Morton2516.34%214.29%
Linus Torvalds (pre-git)1912.42%321.43%
Linus Torvalds117.19%321.43%
Sasha Levin95.88%17.14%
Hugh Dickins42.61%17.14%
Christoph Lameter31.96%17.14%
Matthew Wilcox10.65%17.14%
Total153100.00%14100.00%

/** * add_to_swap - allocate swap space for a page * @page: page we want to move to swap * * Allocate swap space for the page and add the page to the * swap cache. Caller needs to hold the page lock. */
int add_to_swap(struct page *page) { swp_entry_t entry; int err; VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(!PageUptodate(page), page); entry = get_swap_page(page); if (!entry.val) return 0; /* * Radix-tree node allocations from PF_MEMALLOC contexts could * completely exhaust the page allocator. __GFP_NOMEMALLOC * stops emergency reserves from being allocated. * * TODO: this could cause a theoretical memory reclaim * deadlock in the swap out path. */ /* * Add it to the swap cache. */ err = add_to_swap_cache(page, entry, __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN); /* -ENOMEM radix-tree allocation failure */ if (err) /* * add_to_swap_cache() doesn't return -EEXIST, so we can safely * clear SWAP_HAS_CACHE flag. */ goto fail; /* * Normally the page will be dirtied in unmap because its pte should be * dirty. A special case is MADV_FREE page. The page'e pte could have * dirty bit cleared but the page's SwapBacked bit is still set because * clearing the dirty bit and SwapBacked bit has no lock protected. For * such page, unmap will not set dirty bit for it, so page reclaim will * not write the page out. This can cause data corruption when the page * is swap in later. Always setting the dirty bit for the page solves * the problem. */ set_page_dirty(page); return 1; fail: put_swap_page(page, entry); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton3736.27%218.18%
Huang Ying2827.45%19.09%
Nicholas Piggin87.84%19.09%
MinChan Kim76.86%218.18%
Shaohua Li65.88%19.09%
Sasha Levin65.88%19.09%
Hugh Dickins43.92%19.09%
Vladimir Davydov43.92%19.09%
Eric Sesterhenn / Snakebyte21.96%19.09%
Total102100.00%11100.00%

/* * This must be called only on pages that have * been verified to be in the swap cache and locked. * It will never put the page into the free list, * the caller has a reference on the page. */
void delete_from_swap_cache(struct page *page) { swp_entry_t entry; struct address_space *address_space; entry.val = page_private(page); address_space = swap_address_space(entry); xa_lock_irq(&address_space->i_pages); __delete_from_swap_cache(page); xa_unlock_irq(&address_space->i_pages); put_swap_page(page, entry); page_ref_sub(page, hpage_nr_pages(page)); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)2838.89%650.00%
Shaohua Li1622.22%18.33%
Linus Torvalds1115.28%18.33%
Huang Ying811.11%18.33%
Matthew Wilcox45.56%18.33%
Hugh Dickins34.17%18.33%
MinChan Kim22.78%18.33%
Total72100.00%12100.00%

/* * If we are the only user, then try to free up the swap cache. * * Its ok to check for PageSwapCache without the page lock * here because we are going to recheck again inside * try_to_free_swap() _with_ the lock. * - Marcelo */
static inline void free_swap_cache(struct page *page) { if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) { try_to_free_swap(page); unlock_page(page); } }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds2969.05%120.00%
Hugh Dickins716.67%120.00%
Andrew Morton511.90%240.00%
Nicholas Piggin12.38%120.00%
Total42100.00%5100.00%

/* * Perform a free_page(), also freeing any swap cache associated with * this page if it is the last user of the page. */
void free_page_and_swap_cache(struct page *page) { free_swap_cache(page); if (!is_huge_zero_page(page)) put_page(page); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton1450.00%120.00%
Gerald Schaefer725.00%120.00%
Linus Torvalds517.86%120.00%
Aaron Lu13.57%120.00%
Kirill A. Shutemov13.57%120.00%
Total28100.00%5100.00%

/* * Passed an array of pages, drop them all from swapcache and then release * them. They are removed from the LRU and freed if this is their last use. */
void free_pages_and_swap_cache(struct page **pages, int nr) { struct page **pagep = pages; int i; lru_add_drain(); for (i = 0; i < nr; i++) free_swap_cache(pagep[i]); release_pages(pagep, nr); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton5191.07%150.00%
Michal Hocko58.93%150.00%
Total56100.00%2100.00%


static inline bool swap_use_vma_readahead(void) { return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap); }

Contributors

PersonTokensPropCommitsCommitProp
MinChan Kim22100.00%1100.00%
Total22100.00%1100.00%

/* * Lookup a swap entry in the swap cache. A found page will be returned * unlocked and with its refcount incremented - we rely on the kernel * lock getting page table operations atomic even if we drop the page * lock before returning. */
struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma, unsigned long addr) { struct page *page; page = find_get_page(swap_address_space(entry), swp_offset(entry)); INC_CACHE_INFO(find_total); if (page) { bool vma_ra = swap_use_vma_readahead(); bool readahead; INC_CACHE_INFO(find_success); /* * At the moment, we don't support PG_readahead for anon THP * so let's bail out rather than confusing the readahead stat. */ if (unlikely(PageTransCompound(page))) return page; readahead = TestClearPageReadahead(page); if (vma && vma_ra) { unsigned long ra_val; int win, hits; ra_val = GET_SWAP_RA_VAL(vma); win = SWAP_RA_WIN(ra_val); hits = SWAP_RA_HITS(ra_val); if (readahead) hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX); atomic_long_set(&vma->swap_readahead_info, SWAP_RA_VAL(addr, win, hits)); } if (readahead) { count_vm_event(SWAP_RA_HIT); if (!vma || !vma_ra) atomic_inc(&swapin_readahead_hits); } } return page; }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying10456.22%321.43%
MinChan Kim2714.59%17.14%
Linus Torvalds (pre-git)2614.05%535.71%
Shaohua Li168.65%214.29%
Andrew Morton73.78%17.14%
Linus Torvalds42.16%17.14%
Marcelo Tosatti10.54%17.14%
Total185100.00%14100.00%


struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, struct vm_area_struct *vma, unsigned long addr, bool *new_page_allocated) { struct page *found_page, *new_page = NULL; struct address_space *swapper_space = swap_address_space(entry); int err; *new_page_allocated = false; do { /* * First check the swap cache. Since this is normally * called after lookup_swap_cache() failed, re-calling * that would confuse statistics. */ found_page = find_get_page(swapper_space, swp_offset(entry)); if (found_page) break; /* * Just skip read ahead for unused swap slot. * During swap_off when swap_slot_cache is disabled, * we have to handle the race between putting * swap entry in swap cache and marking swap slot * as SWAP_HAS_CACHE. That's done in later part of code or * else swap_off will be aborted if we return NULL. */ if (!__swp_swapcount(entry) && swap_slot_cache_enabled) break; /* * Get a new page to read into from swap. */ if (!new_page) { new_page = alloc_page_vma(gfp_mask, vma, addr); if (!new_page) break; /* Out of memory */ } /* * call radix_tree_preload() while we can wait. */ err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL); if (err) break; /* * Swap entry may have been freed since our caller observed it. */ err = swapcache_prepare(entry); if (err == -EEXIST) { radix_tree_preload_end(); /* * We might race against get_swap_page() and stumble * across a SWAP_HAS_CACHE swap_map entry whose page * has not been brought into the swapcache yet. */ cond_resched(); continue; } if (err) { /* swp entry is obsolete ? */ radix_tree_preload_end(); break; } /* May fail (-ENOMEM) if radix-tree node allocation failed. */ __SetPageLocked(new_page); __SetPageSwapBacked(new_page); err = __add_to_swap_cache(new_page, entry); if (likely(!err)) { radix_tree_preload_end(); /* * Initiate read into locked page and return. */ lru_cache_add_anon(new_page); *new_page_allocated = true; return new_page; } radix_tree_preload_end(); __ClearPageLocked(new_page); /* * add_to_swap_cache() doesn't return -EEXIST, so we can safely * clear SWAP_HAS_CACHE flag. */ put_swap_page(new_page, entry); } while (err != -ENOMEM); if (new_page) put_page(new_page); return found_page; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)5421.86%39.38%
Linus Torvalds4016.19%412.50%
Daisuke Nishimura3313.36%26.25%
Dmitry Safonov2510.12%13.12%
Hugh Dickins239.31%39.38%
Andrew Morton218.50%412.50%
Kamezawa Hiroyuki166.48%26.25%
Tim Chen83.24%13.12%
Huang Ying83.24%39.38%
Rik Van Riel52.02%26.25%
Nicholas Piggin31.21%13.12%
Rafael Aquini31.21%13.12%
MinChan Kim31.21%13.12%
Kirill A. Shutemov31.21%26.25%
Marcelo Tosatti10.40%13.12%
Jan Kara10.40%13.12%
Total247100.00%32100.00%

/* * Locate a page of swap in physical memory, reserving swap cache space * and reading the disk if it is not already cached. * A failure return means that either the page allocation failed or that * the swap entry is no longer in use. */
struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, struct vm_area_struct *vma, unsigned long addr, bool do_poll) { bool page_was_allocated; struct page *retpage = __read_swap_cache_async(entry, gfp_mask, vma, addr, &page_was_allocated); if (page_was_allocated) swap_readpage(retpage, do_poll); return retpage; }

Contributors

PersonTokensPropCommitsCommitProp
Dmitry Safonov5691.80%150.00%
Shaohua Li58.20%150.00%
Total61100.00%2100.00%


static unsigned int __swapin_nr_pages(unsigned long prev_offset, unsigned long offset, int hits, int max_pages, int prev_win) { unsigned int pages, last_ra; /* * This heuristic has been found to work well on both sequential and * random loads, swapping to hard disk or to SSD: please don't ask * what the "+ 2" means, it just happens to work well, that's all. */ pages = hits + 2; if (pages == 2) { /* * We can have no readahead hits to judge by: but must not get * stuck here forever, so check for an adjacent offset instead * (and don't even bother to check whether swap type is same). */ if (offset != prev_offset + 1 && offset != prev_offset - 1) pages = 1; } else { unsigned int roundup = 4; while (roundup < pages) roundup <<= 1; pages = roundup; } if (pages > max_pages) pages = max_pages; /* Don't shrink readahead too fast */ last_ra = prev_win / 2; if (pages < last_ra) pages = last_ra; return pages; }

Contributors

PersonTokensPropCommitsCommitProp
Shaohua Li10085.47%150.00%
Huang Ying1714.53%150.00%
Total117100.00%2100.00%


static unsigned long swapin_nr_pages(unsigned long offset) { static unsigned long prev_offset; unsigned int hits, pages, max_pages; static atomic_t last_readahead_pages; max_pages = 1 << READ_ONCE(page_cluster); if (max_pages <= 1) return 1; hits = atomic_xchg(&swapin_readahead_hits, 0); pages = __swapin_nr_pages(prev_offset, offset, hits, max_pages, atomic_read(&last_readahead_pages)); if (!hits) prev_offset = offset; atomic_set(&last_readahead_pages, pages); return pages; }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying95100.00%1100.00%
Total95100.00%1100.00%

/** * swap_cluster_readahead - swap in pages in hope we need them soon * @entry: swap entry of this memory * @gfp_mask: memory allocation flags * @vmf: fault information * * Returns the struct page for entry and addr, after queueing swapin. * * Primitive swap readahead code. We simply read an aligned block of * (1 << page_cluster) entries in the swap area. This method is chosen * because it doesn't cost us any seek time. We also make sure to queue * the 'original' request together with the readahead ones... * * This has been extended to use the NUMA policies from the mm triggering * the readahead. * * Caller must hold down_read on the vma->vm_mm if vmf->vma is not NULL. */
struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, struct vm_fault *vmf) { struct page *page; unsigned long entry_offset = swp_offset(entry); unsigned long offset = entry_offset; unsigned long start_offset, end_offset; unsigned long mask; struct swap_info_struct *si = swp_swap_info(entry); struct blk_plug plug; bool do_poll = true, page_allocated; struct vm_area_struct *vma = vmf->vma; unsigned long addr = vmf->address; mask = swapin_nr_pages(offset) - 1; if (!mask) goto skip; do_poll = false; /* Read a page_cluster sized and aligned cluster around offset. */ start_offset = offset & ~mask; end_offset = offset | mask; if (!start_offset) /* First page is swap header. */ start_offset++; if (end_offset >= si->max) end_offset = si->max - 1; blk_start_plug(&plug); for (offset = start_offset; offset <= end_offset ; offset++) { /* Ok, do the async read-ahead now */ page = __read_swap_cache_async( swp_entry(swp_type(entry), offset), gfp_mask, vma, addr, &page_allocated); if (!page) continue; if (page_allocated) { swap_readpage(page, false); if (offset != entry_offset) { SetPageReadahead(page); count_vm_event(SWAP_RA); } } put_page(page); } blk_finish_plug(&plug); lru_add_drain(); /* Push any new pages onto the LRU now */ skip: return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll); }

Contributors

PersonTokensPropCommitsCommitProp
Hugh Dickins8733.46%216.67%
Huang Ying5320.38%433.33%
Shaohua Li4918.85%216.67%
Rik Van Riel3413.08%18.33%
MinChan Kim207.69%18.33%
Christian Ehrhardt166.15%18.33%
Kirill A. Shutemov10.38%18.33%
Total260100.00%12100.00%


int init_swap_address_space(unsigned int type, unsigned long nr_pages) { struct address_space *spaces, *space; unsigned int i, nr; nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES); spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL); if (!spaces) return -ENOMEM; for (i = 0; i < nr; i++) { space = spaces + i; INIT_RADIX_TREE(&space->i_pages, GFP_ATOMIC|__GFP_NOWARN); atomic_set(&space->i_mmap_writable, 0); space->a_ops = &swap_aops; /* swap cache doesn't use writeback related tags */ mapping_set_no_writeback_tags(space); } nr_swapper_spaces[type] = nr; rcu_assign_pointer(swapper_spaces[type], spaces); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying13297.06%250.00%
Kees Cook32.21%125.00%
Matthew Wilcox10.74%125.00%
Total136100.00%4100.00%


void exit_swap_address_space(unsigned int type) { struct address_space *spaces; spaces = swapper_spaces[type]; nr_swapper_spaces[type] = 0; rcu_assign_pointer(swapper_spaces[type], NULL); synchronize_rcu(); kvfree(spaces); }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying46100.00%1100.00%
Total46100.00%1100.00%


static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma, unsigned long faddr, unsigned long lpfn, unsigned long rpfn, unsigned long *start, unsigned long *end) { *start = max3(lpfn, PFN_DOWN(vma->vm_start), PFN_DOWN(faddr & PMD_MASK)); *end = min3(rpfn, PFN_DOWN(vma->vm_end), PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE)); }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying82100.00%1100.00%
Total82100.00%1100.00%


static void swap_ra_info(struct vm_fault *vmf, struct vma_swap_readahead *ra_info) { struct vm_area_struct *vma = vmf->vma; unsigned long ra_val; swp_entry_t entry; unsigned long faddr, pfn, fpfn; unsigned long start, end; pte_t *pte, *orig_pte; unsigned int max_win, hits, prev_win, win, left; #ifndef CONFIG_64BIT pte_t *tpte; #endif max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster), SWAP_RA_ORDER_CEILING); if (max_win == 1) { ra_info->win = 1; return; } faddr = vmf->address; orig_pte = pte = pte_offset_map(vmf->pmd, faddr); entry = pte_to_swp_entry(*pte); if ((unlikely(non_swap_entry(entry)))) { pte_unmap(orig_pte); return; } fpfn = PFN_DOWN(faddr); ra_val = GET_SWAP_RA_VAL(vma); pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val)); prev_win = SWAP_RA_WIN(ra_val); hits = SWAP_RA_HITS(ra_val); ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits, max_win, prev_win); atomic_long_set(&vma->swap_readahead_info, SWAP_RA_VAL(faddr, win, 0)); if (win == 1) { pte_unmap(orig_pte); return; } /* Copy the PTEs because the page table may be unmapped */ if (fpfn == pfn + 1) swap_ra_clamp_pfn(vma, faddr, fpfn, fpfn + win, &start, &end); else if (pfn == fpfn + 1) swap_ra_clamp_pfn(vma, faddr, fpfn - win + 1, fpfn + 1, &start, &end); else { left = (win - 1) / 2; swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left, &start, &end); } ra_info->nr_pte = end - start; ra_info->offset = fpfn - start; pte -= ra_info->offset; #ifdef CONFIG_64BIT ra_info->ptes = pte; #else tpte = ra_info->ptes; for (pfn = start; pfn != end; pfn++) *tpte++ = *pte++; #endif pte_unmap(orig_pte); }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying34686.50%266.67%
MinChan Kim5413.50%133.33%
Total400100.00%3100.00%


static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask, struct vm_fault *vmf) { struct blk_plug plug; struct vm_area_struct *vma = vmf->vma; struct page *page; pte_t *pte, pentry; swp_entry_t entry; unsigned int i; bool page_allocated; struct vma_swap_readahead ra_info = {0,}; swap_ra_info(vmf, &ra_info); if (ra_info.win == 1) goto skip; blk_start_plug(&plug); for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte; i++, pte++) { pentry = *pte; if (pte_none(pentry)) continue; if (pte_present(pentry)) continue; entry = pte_to_swp_entry(pentry); if (unlikely(non_swap_entry(entry))) continue; page = __read_swap_cache_async(entry, gfp_mask, vma, vmf->address, &page_allocated); if (!page) continue; if (page_allocated) { swap_readpage(page, false); if (i != ra_info.offset) { SetPageReadahead(page); count_vm_event(SWAP_RA); } } put_page(page); } blk_finish_plug(&plug); lru_add_drain(); skip: return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address, ra_info.win == 1); }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying21688.52%125.00%
MinChan Kim2711.07%250.00%
Colin Ian King10.41%125.00%
Total244100.00%4100.00%

/** * swapin_readahead - swap in pages in hope we need them soon * @entry: swap entry of this memory * @gfp_mask: memory allocation flags * @vmf: fault information * * Returns the struct page for entry and addr, after queueing swapin. * * It's a main entry function for swap readahead. By the configuration, * it will read ahead blocks by cluster-based(ie, physical disk based) * or vma-based(ie, virtual address based on faulty address) readahead. */
struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, struct vm_fault *vmf) { return swap_use_vma_readahead() ? swap_vma_readahead(entry, gfp_mask, vmf) : swap_cluster_readahead(entry, gfp_mask, vmf); }

Contributors

PersonTokensPropCommitsCommitProp
MinChan Kim40100.00%1100.00%
Total40100.00%1100.00%

#ifdef CONFIG_SYSFS
static ssize_t vma_ra_enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%s\n", enable_vma_readahead ? "true" : "false"); }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying3397.06%150.00%
MinChan Kim12.94%150.00%
Total34100.00%2100.00%


static ssize_t vma_ra_enabled_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1)) enable_vma_readahead = true; else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1)) enable_vma_readahead = false; else return -EINVAL; return count; }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying8397.65%150.00%
MinChan Kim22.35%150.00%
Total85100.00%2100.00%

static struct kobj_attribute vma_ra_enabled_attr = __ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show, vma_ra_enabled_store); static struct attribute *swap_attrs[] = { &vma_ra_enabled_attr.attr, NULL, }; static struct attribute_group swap_attr_group = { .attrs = swap_attrs, };
static int __init swap_init_sysfs(void) { int err; struct kobject *swap_kobj; swap_kobj = kobject_create_and_add("swap", mm_kobj); if (!swap_kobj) { pr_err("failed to create swap kobject\n"); return -ENOMEM; } err = sysfs_create_group(swap_kobj, &swap_attr_group); if (err) { pr_err("failed to register swap group\n"); goto delete_obj; } return 0; delete_obj: kobject_put(swap_kobj); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying79100.00%1100.00%
Total79100.00%1100.00%

subsys_initcall(swap_init_sysfs); #endif

Overall Contributors

PersonTokensPropCommitsCommitProp
Huang Ying169950.99%1211.01%
Shaohua Li2968.88%54.59%
Andrew Morton2266.78%1311.93%
MinChan Kim2096.27%54.59%
Linus Torvalds (pre-git)1865.58%1816.51%
Hugh Dickins1414.23%1110.09%
Linus Torvalds1333.99%54.59%
Daisuke Nishimura902.70%21.83%
Dmitry Safonov822.46%10.92%
Nicholas Piggin501.50%43.67%
Rik Van Riel471.41%32.75%
Sasha Levin240.72%10.92%
Andrea Arcangeli240.72%10.92%
Christian Ehrhardt190.57%10.92%
Kamezawa Hiroyuki160.48%21.83%
Christoph Lameter140.42%32.75%
Matthew Wilcox120.36%10.92%
Tim Chen110.33%21.83%
Johannes Weiner80.24%10.92%
Gerald Schaefer70.21%10.92%
Michal Hocko50.15%10.92%
Kirill A. Shutemov50.15%21.83%
Vladimir Davydov40.12%10.92%
Kees Cook30.09%10.92%
Tejun Heo30.09%10.92%
Changbin Du30.09%10.92%
Rafael Aquini30.09%10.92%
Colin Ian King20.06%10.92%
Eric Sesterhenn / Snakebyte20.06%10.92%
Marcelo Tosatti20.06%10.92%
Mel Gorman10.03%10.92%
Greg Kroah-Hartman10.03%10.92%
Christoph Hellwig10.03%10.92%
Aaron Lu10.03%10.92%
Jan Kara10.03%10.92%
Jens Axboe10.03%10.92%
Total3332100.00%109100.00%
Directory: mm
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.