cregit-Linux how code gets into the kernel

Release 4.14 mm/swap_state.c

Directory: mm
// SPDX-License-Identifier: GPL-2.0
/*
 *  linux/mm/swap_state.c
 *
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *  Swap reorganised 29.12.95, Stephen Tweedie
 *
 *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
 */
#include <linux/mm.h>
#include <linux/gfp.h>
#include <linux/kernel_stat.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/init.h>
#include <linux/pagemap.h>
#include <linux/backing-dev.h>
#include <linux/blkdev.h>
#include <linux/pagevec.h>
#include <linux/migrate.h>
#include <linux/vmalloc.h>
#include <linux/swap_slots.h>
#include <linux/huge_mm.h>

#include <asm/pgtable.h>

/*
 * swapper_space is a fiction, retained to simplify the path through
 * vmscan's shrink_page_list.
 */

static const struct address_space_operations swap_aops = {
	.writepage	= swap_writepage,
	.set_page_dirty	= swap_set_page_dirty,
#ifdef CONFIG_MIGRATION
	.migratepage	= migrate_page,
#endif
};


struct address_space *swapper_spaces[MAX_SWAPFILES];

static unsigned int nr_swapper_spaces[MAX_SWAPFILES];

bool swap_vma_readahead = true;


#define SWAP_RA_WIN_SHIFT	(PAGE_SHIFT / 2)

#define SWAP_RA_HITS_MASK	((1UL << SWAP_RA_WIN_SHIFT) - 1)

#define SWAP_RA_HITS_MAX	SWAP_RA_HITS_MASK

#define SWAP_RA_WIN_MASK	(~PAGE_MASK & ~SWAP_RA_HITS_MASK)


#define SWAP_RA_HITS(v)		((v) & SWAP_RA_HITS_MASK)

#define SWAP_RA_WIN(v)		(((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)

#define SWAP_RA_ADDR(v)		((v) & PAGE_MASK)


#define SWAP_RA_VAL(addr, win, hits)				\
	(((addr) & PAGE_MASK) |                                 \
         (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) |    \
         ((hits) & SWAP_RA_HITS_MASK))

/* Initial readahead hits is 4 to start up with a small window */

#define GET_SWAP_RA_VAL(vma)					\
	(atomic_long_read(&(vma)->swap_readahead_info) ? : 4)


#define INC_CACHE_INFO(x)	do { swap_cache_info.x++; } while (0)

#define ADD_CACHE_INFO(x, nr)	do { swap_cache_info.x += (nr); } while (0)

static struct {
	
unsigned long add_total;
	
unsigned long del_total;
	
unsigned long find_success;
	
unsigned long find_total;

} swap_cache_info;


unsigned long total_swapcache_pages(void) { unsigned int i, j, nr; unsigned long ret = 0; struct address_space *spaces; rcu_read_lock(); for (i = 0; i < MAX_SWAPFILES; i++) { /* * The corresponding entries in nr_swapper_spaces and * swapper_spaces will be reused only after at least * one grace period. So it is impossible for them * belongs to different usage. */ nr = nr_swapper_spaces[i]; spaces = rcu_dereference(swapper_spaces[i]); if (!nr || !spaces) continue; for (j = 0; j < nr; j++) ret += spaces[j].nrpages; } rcu_read_unlock(); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying6060.00%150.00%
Shaohua Li4040.00%150.00%
Total100100.00%2100.00%

static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
void show_swap_cache_info(void) { printk("%lu pages in swap cache\n", total_swapcache_pages()); printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n", swap_cache_info.add_total, swap_cache_info.del_total, swap_cache_info.find_success, swap_cache_info.find_total); printk("Free swap = %ldkB\n", get_nr_swap_pages() << (PAGE_SHIFT - 10)); printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10)); }

Contributors

PersonTokensPropCommitsCommitProp
Andrea Arcangeli2438.10%114.29%
Linus Torvalds1422.22%114.29%
Linus Torvalds (pre-git)1320.63%114.29%
Johannes Weiner812.70%114.29%
Shaohua Li34.76%228.57%
Hugh Dickins11.59%114.29%
Total63100.00%7100.00%

/* * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space, * but sets SwapCache flag and private instead of mapping and index. */
int __add_to_swap_cache(struct page *page, swp_entry_t entry) { int error, i, nr = hpage_nr_pages(page); struct address_space *address_space; pgoff_t idx = swp_offset(entry); VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(PageSwapCache(page), page); VM_BUG_ON_PAGE(!PageSwapBacked(page), page); page_ref_add(page, nr); SetPageSwapCache(page); address_space = swap_address_space(entry); spin_lock_irq(&address_space->tree_lock); for (i = 0; i < nr; i++) { set_page_private(page + i, entry.val + i); error = radix_tree_insert(&address_space->page_tree, idx + i, page + i); if (unlikely(error)) break; } if (likely(!error)) { address_space->nrpages += nr; __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); ADD_CACHE_INFO(add_total, nr); } else { /* * Only the context which have set SWAP_HAS_CACHE flag * would call add_to_swap_cache(). * So add_to_swap_cache() doesn't returns -EEXIST. */ VM_BUG_ON(error == -EEXIST); set_page_private(page + i, 0UL); while (i--) { radix_tree_delete(&address_space->page_tree, idx + i); set_page_private(page + i, 0UL); } ClearPageSwapCache(page); page_ref_sub(page, nr); } spin_unlock_irq(&address_space->tree_lock); return error; }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying11444.19%15.56%
Nicholas Piggin3915.12%316.67%
Andrew Morton3613.95%211.11%
Shaohua Li197.36%15.56%
Daisuke Nishimura145.43%211.11%
Linus Torvalds (pre-git)114.26%422.22%
Sasha Levin93.49%15.56%
Rik Van Riel83.10%15.56%
Hugh Dickins41.55%15.56%
Christoph Lameter31.16%15.56%
Linus Torvalds10.39%15.56%
Total258100.00%18100.00%


int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) { int error; error = radix_tree_maybe_preload_order(gfp_mask, compound_order(page)); if (!error) { error = __add_to_swap_cache(page, entry); radix_tree_preload_end(); } return error; }

Contributors

PersonTokensPropCommitsCommitProp
Daisuke Nishimura4279.25%133.33%
Huang Ying611.32%133.33%
Andrew Morton59.43%133.33%
Total53100.00%3100.00%

/* * This must be called only on pages that have * been verified to be in the swap cache. */
void __delete_from_swap_cache(struct page *page) { struct address_space *address_space; int i, nr = hpage_nr_pages(page); swp_entry_t entry; pgoff_t idx; VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(!PageSwapCache(page), page); VM_BUG_ON_PAGE(PageWriteback(page), page); entry.val = page_private(page); address_space = swap_address_space(entry); idx = swp_offset(entry); for (i = 0; i < nr; i++) { radix_tree_delete(&address_space->page_tree, idx + i); set_page_private(page + i, 0); } ClearPageSwapCache(page); address_space->nrpages -= nr; __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr); ADD_CACHE_INFO(del_total, nr); }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying5535.95%17.69%
Andrew Morton2616.99%215.38%
Shaohua Li2616.99%17.69%
Linus Torvalds (pre-git)1912.42%323.08%
Linus Torvalds117.19%323.08%
Sasha Levin95.88%17.69%
Hugh Dickins42.61%17.69%
Christoph Lameter31.96%17.69%
Total153100.00%13100.00%

/** * add_to_swap - allocate swap space for a page * @page: page we want to move to swap * * Allocate swap space for the page and add the page to the * swap cache. Caller needs to hold the page lock. */
int add_to_swap(struct page *page) { swp_entry_t entry; int err; VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(!PageUptodate(page), page); entry = get_swap_page(page); if (!entry.val) return 0; if (mem_cgroup_try_charge_swap(page, entry)) goto fail; /* * Radix-tree node allocations from PF_MEMALLOC contexts could * completely exhaust the page allocator. __GFP_NOMEMALLOC * stops emergency reserves from being allocated. * * TODO: this could cause a theoretical memory reclaim * deadlock in the swap out path. */ /* * Add it to the swap cache. */ err = add_to_swap_cache(page, entry, __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN); /* -ENOMEM radix-tree allocation failure */ if (err) /* * add_to_swap_cache() doesn't return -EEXIST, so we can safely * clear SWAP_HAS_CACHE flag. */ goto fail; /* * Normally the page will be dirtied in unmap because its pte should be * dirty. A special case is MADV_FREE page. The page'e pte could have * dirty bit cleared but the page's SwapBacked bit is still set because * clearing the dirty bit and SwapBacked bit has no lock protected. For * such page, unmap will not set dirty bit for it, so page reclaim will * not write the page out. This can cause data corruption when the page * is swap in later. Always setting the dirty bit for the page solves * the problem. */ set_page_dirty(page); return 1; fail: put_swap_page(page, entry); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton3732.46%216.67%
Huang Ying3026.32%18.33%
Vladimir Davydov119.65%18.33%
MinChan Kim87.02%216.67%
Nicholas Piggin87.02%18.33%
Shaohua Li65.26%18.33%
Sasha Levin65.26%18.33%
Hugh Dickins43.51%18.33%
Eric Sesterhenn / Snakebyte21.75%18.33%
Andrea Arcangeli21.75%18.33%
Total114100.00%12100.00%

/* * This must be called only on pages that have * been verified to be in the swap cache and locked. * It will never put the page into the free list, * the caller has a reference on the page. */
void delete_from_swap_cache(struct page *page) { swp_entry_t entry; struct address_space *address_space; entry.val = page_private(page); address_space = swap_address_space(entry); spin_lock_irq(&address_space->tree_lock); __delete_from_swap_cache(page); spin_unlock_irq(&address_space->tree_lock); put_swap_page(page, entry); page_ref_sub(page, hpage_nr_pages(page)); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)2838.89%646.15%
Shaohua Li1622.22%17.69%
Linus Torvalds1115.28%17.69%
Huang Ying811.11%17.69%
Hugh Dickins34.17%17.69%
MinChan Kim22.78%17.69%
Andrew Morton22.78%17.69%
Nicholas Piggin22.78%17.69%
Total72100.00%13100.00%

/* * If we are the only user, then try to free up the swap cache. * * Its ok to check for PageSwapCache without the page lock * here because we are going to recheck again inside * try_to_free_swap() _with_ the lock. * - Marcelo */
static inline void free_swap_cache(struct page *page) { if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) { try_to_free_swap(page); unlock_page(page); } }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds2969.05%120.00%
Hugh Dickins716.67%120.00%
Andrew Morton511.90%240.00%
Nicholas Piggin12.38%120.00%
Total42100.00%5100.00%

/* * Perform a free_page(), also freeing any swap cache associated with * this page if it is the last user of the page. */
void free_page_and_swap_cache(struct page *page) { free_swap_cache(page); if (!is_huge_zero_page(page)) put_page(page); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton1450.00%120.00%
Gerald Schaefer725.00%120.00%
Linus Torvalds517.86%120.00%
Aaron Lu13.57%120.00%
Kirill A. Shutemov13.57%120.00%
Total28100.00%5100.00%

/* * Passed an array of pages, drop them all from swapcache and then release * them. They are removed from the LRU and freed if this is their last use. */
void free_pages_and_swap_cache(struct page **pages, int nr) { struct page **pagep = pages; int i; lru_add_drain(); for (i = 0; i < nr; i++) free_swap_cache(pagep[i]); release_pages(pagep, nr, false); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton5289.66%250.00%
Michal Hocko58.62%125.00%
Mel Gorman11.72%125.00%
Total58100.00%4100.00%

/* * Lookup a swap entry in the swap cache. A found page will be returned * unlocked and with its refcount incremented - we rely on the kernel * lock getting page table operations atomic even if we drop the page * lock before returning. */
struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma, unsigned long addr) { struct page *page; unsigned long ra_info; int win, hits, readahead; page = find_get_page(swap_address_space(entry), swp_offset(entry)); INC_CACHE_INFO(find_total); if (page) { INC_CACHE_INFO(find_success); if (unlikely(PageTransCompound(page))) return page; readahead = TestClearPageReadahead(page); if (vma) { ra_info = GET_SWAP_RA_VAL(vma); win = SWAP_RA_WIN(ra_info); hits = SWAP_RA_HITS(ra_info); if (readahead) hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX); atomic_long_set(&vma->swap_readahead_info, SWAP_RA_VAL(addr, win, hits)); } if (readahead) { count_vm_event(SWAP_RA_HIT); if (!vma) atomic_inc(&swapin_readahead_hits); } } return page; }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying11868.60%323.08%
Linus Torvalds (pre-git)2615.12%538.46%
Shaohua Li169.30%215.38%
Andrew Morton74.07%17.69%
Linus Torvalds42.33%17.69%
Marcelo Tosatti10.58%17.69%
Total172100.00%13100.00%


struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, struct vm_area_struct *vma, unsigned long addr, bool *new_page_allocated) { struct page *found_page, *new_page = NULL; struct address_space *swapper_space = swap_address_space(entry); int err; *new_page_allocated = false; do { /* * First check the swap cache. Since this is normally * called after lookup_swap_cache() failed, re-calling * that would confuse statistics. */ found_page = find_get_page(swapper_space, swp_offset(entry)); if (found_page) break; /* * Just skip read ahead for unused swap slot. * During swap_off when swap_slot_cache is disabled, * we have to handle the race between putting * swap entry in swap cache and marking swap slot * as SWAP_HAS_CACHE. That's done in later part of code or * else swap_off will be aborted if we return NULL. */ if (!__swp_swapcount(entry) && swap_slot_cache_enabled) break; /* * Get a new page to read into from swap. */ if (!new_page) { new_page = alloc_page_vma(gfp_mask, vma, addr); if (!new_page) break; /* Out of memory */ } /* * call radix_tree_preload() while we can wait. */ err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL); if (err) break; /* * Swap entry may have been freed since our caller observed it. */ err = swapcache_prepare(entry); if (err == -EEXIST) { radix_tree_preload_end(); /* * We might race against get_swap_page() and stumble * across a SWAP_HAS_CACHE swap_map entry whose page * has not been brought into the swapcache yet. */ cond_resched(); continue; } if (err) { /* swp entry is obsolete ? */ radix_tree_preload_end(); break; } /* May fail (-ENOMEM) if radix-tree node allocation failed. */ __SetPageLocked(new_page); __SetPageSwapBacked(new_page); err = __add_to_swap_cache(new_page, entry); if (likely(!err)) { radix_tree_preload_end(); /* * Initiate read into locked page and return. */ lru_cache_add_anon(new_page); *new_page_allocated = true; return new_page; } radix_tree_preload_end(); __ClearPageLocked(new_page); /* * add_to_swap_cache() doesn't return -EEXIST, so we can safely * clear SWAP_HAS_CACHE flag. */ put_swap_page(new_page, entry); } while (err != -ENOMEM); if (new_page) put_page(new_page); return found_page; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)5421.86%39.38%
Linus Torvalds4116.60%412.50%
Daisuke Nishimura3313.36%26.25%
Dmitry Safonov2510.12%13.12%
Hugh Dickins239.31%39.38%
Andrew Morton208.10%412.50%
Kamezawa Hiroyuki166.48%26.25%
Huang Ying83.24%39.38%
Tim Chen83.24%13.12%
Rik Van Riel52.02%26.25%
MinChan Kim31.21%13.12%
Rafael Aquini31.21%13.12%
Nicholas Piggin31.21%13.12%
Kirill A. Shutemov31.21%26.25%
Jan Kara10.40%13.12%
Marcelo Tosatti10.40%13.12%
Total247100.00%32100.00%

/* * Locate a page of swap in physical memory, reserving swap cache space * and reading the disk if it is not already cached. * A failure return means that either the page allocation failed or that * the swap entry is no longer in use. */
struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, struct vm_area_struct *vma, unsigned long addr, bool do_poll) { bool page_was_allocated; struct page *retpage = __read_swap_cache_async(entry, gfp_mask, vma, addr, &page_was_allocated); if (page_was_allocated) swap_readpage(retpage, do_poll); return retpage; }

Contributors

PersonTokensPropCommitsCommitProp
Dmitry Safonov5691.80%150.00%
Shaohua Li58.20%150.00%
Total61100.00%2100.00%


static unsigned int __swapin_nr_pages(unsigned long prev_offset, unsigned long offset, int hits, int max_pages, int prev_win) { unsigned int pages, last_ra; /* * This heuristic has been found to work well on both sequential and * random loads, swapping to hard disk or to SSD: please don't ask * what the "+ 2" means, it just happens to work well, that's all. */ pages = hits + 2; if (pages == 2) { /* * We can have no readahead hits to judge by: but must not get * stuck here forever, so check for an adjacent offset instead * (and don't even bother to check whether swap type is same). */ if (offset != prev_offset + 1 && offset != prev_offset - 1) pages = 1; } else { unsigned int roundup = 4; while (roundup < pages) roundup <<= 1; pages = roundup; } if (pages > max_pages) pages = max_pages; /* Don't shrink readahead too fast */ last_ra = prev_win / 2; if (pages < last_ra) pages = last_ra; return pages; }

Contributors

PersonTokensPropCommitsCommitProp
Shaohua Li10085.47%150.00%
Huang Ying1714.53%150.00%
Total117100.00%2100.00%


static unsigned long swapin_nr_pages(unsigned long offset) { static unsigned long prev_offset; unsigned int hits, pages, max_pages; static atomic_t last_readahead_pages; max_pages = 1 << READ_ONCE(page_cluster); if (max_pages <= 1) return 1; hits = atomic_xchg(&swapin_readahead_hits, 0); pages = __swapin_nr_pages(prev_offset, offset, hits, max_pages, atomic_read(&last_readahead_pages)); if (!hits) prev_offset = offset; atomic_set(&last_readahead_pages, pages); return pages; }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying95100.00%1100.00%
Total95100.00%1100.00%

/** * swapin_readahead - swap in pages in hope we need them soon * @entry: swap entry of this memory * @gfp_mask: memory allocation flags * @vma: user vma this address belongs to * @addr: target address for mempolicy * * Returns the struct page for entry and addr, after queueing swapin. * * Primitive swap readahead code. We simply read an aligned block of * (1 << page_cluster) entries in the swap area. This method is chosen * because it doesn't cost us any seek time. We also make sure to queue * the 'original' request together with the readahead ones... * * This has been extended to use the NUMA policies from the mm triggering * the readahead. * * Caller must hold down_read on the vma->vm_mm if vma is not NULL. */
struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, struct vm_area_struct *vma, unsigned long addr) { struct page *page; unsigned long entry_offset = swp_offset(entry); unsigned long offset = entry_offset; unsigned long start_offset, end_offset; unsigned long mask; struct blk_plug plug; bool do_poll = true, page_allocated; mask = swapin_nr_pages(offset) - 1; if (!mask) goto skip; do_poll = false; /* Read a page_cluster sized and aligned cluster around offset. */ start_offset = offset & ~mask; end_offset = offset | mask; if (!start_offset) /* First page is swap header. */ start_offset++; blk_start_plug(&plug); for (offset = start_offset; offset <= end_offset ; offset++) { /* Ok, do the async read-ahead now */ page = __read_swap_cache_async( swp_entry(swp_type(entry), offset), gfp_mask, vma, addr, &page_allocated); if (!page) continue; if (page_allocated) { swap_readpage(page, false); if (offset != entry_offset && likely(!PageTransCompound(page))) { SetPageReadahead(page); count_vm_event(SWAP_RA); } } put_page(page); } blk_finish_plug(&plug); lru_add_drain(); /* Push any new pages onto the LRU now */ skip: return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll); }

Contributors

PersonTokensPropCommitsCommitProp
Hugh Dickins9440.87%218.18%
Shaohua Li4921.30%218.18%
Huang Ying3615.65%436.36%
Rik Van Riel3414.78%19.09%
Christian Ehrhardt166.96%19.09%
Kirill A. Shutemov10.43%19.09%
Total230100.00%11100.00%


int init_swap_address_space(unsigned int type, unsigned long nr_pages) { struct address_space *spaces, *space; unsigned int i, nr; nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES); spaces = kvzalloc(sizeof(struct address_space) * nr, GFP_KERNEL); if (!spaces) return -ENOMEM; for (i = 0; i < nr; i++) { space = spaces + i; INIT_RADIX_TREE(&space->page_tree, GFP_ATOMIC|__GFP_NOWARN); atomic_set(&space->i_mmap_writable, 0); space->a_ops = &swap_aops; /* swap cache doesn't use writeback related tags */ mapping_set_no_writeback_tags(space); spin_lock_init(&space->tree_lock); } nr_swapper_spaces[type] = nr; rcu_assign_pointer(swapper_spaces[type], spaces); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying144100.00%2100.00%
Total144100.00%2100.00%


void exit_swap_address_space(unsigned int type) { struct address_space *spaces; spaces = swapper_spaces[type]; nr_swapper_spaces[type] = 0; rcu_assign_pointer(swapper_spaces[type], NULL); synchronize_rcu(); kvfree(spaces); }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying46100.00%1100.00%
Total46100.00%1100.00%


static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma, unsigned long faddr, unsigned long lpfn, unsigned long rpfn, unsigned long *start, unsigned long *end) { *start = max3(lpfn, PFN_DOWN(vma->vm_start), PFN_DOWN(faddr & PMD_MASK)); *end = min3(rpfn, PFN_DOWN(vma->vm_end), PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE)); }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying82100.00%1100.00%
Total82100.00%1100.00%


struct page *swap_readahead_detect(struct vm_fault *vmf, struct vma_swap_readahead *swap_ra) { struct vm_area_struct *vma = vmf->vma; unsigned long swap_ra_info; struct page *page; swp_entry_t entry; unsigned long faddr, pfn, fpfn; unsigned long start, end; pte_t *pte; unsigned int max_win, hits, prev_win, win, left; #ifndef CONFIG_64BIT pte_t *tpte; #endif max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster), SWAP_RA_ORDER_CEILING); if (max_win == 1) { swap_ra->win = 1; return NULL; } faddr = vmf->address; entry = pte_to_swp_entry(vmf->orig_pte); if ((unlikely(non_swap_entry(entry)))) return NULL; page = lookup_swap_cache(entry, vma, faddr); if (page) return page; fpfn = PFN_DOWN(faddr); swap_ra_info = GET_SWAP_RA_VAL(vma); pfn = PFN_DOWN(SWAP_RA_ADDR(swap_ra_info)); prev_win = SWAP_RA_WIN(swap_ra_info); hits = SWAP_RA_HITS(swap_ra_info); swap_ra->win = win = __swapin_nr_pages(pfn, fpfn, hits, max_win, prev_win); atomic_long_set(&vma->swap_readahead_info, SWAP_RA_VAL(faddr, win, 0)); if (win == 1) return NULL; /* Copy the PTEs because the page table may be unmapped */ if (fpfn == pfn + 1) swap_ra_clamp_pfn(vma, faddr, fpfn, fpfn + win, &start, &end); else if (pfn == fpfn + 1) swap_ra_clamp_pfn(vma, faddr, fpfn - win + 1, fpfn + 1, &start, &end); else { left = (win - 1) / 2; swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left, &start, &end); } swap_ra->nr_pte = end - start; swap_ra->offset = fpfn - start; pte = vmf->pte - swap_ra->offset; #ifdef CONFIG_64BIT swap_ra->ptes = pte; #else tpte = swap_ra->ptes; for (pfn = start; pfn != end; pfn++) *tpte++ = *pte++; #endif return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying403100.00%2100.00%
Total403100.00%2100.00%


struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask, struct vm_fault *vmf, struct vma_swap_readahead *swap_ra) { struct blk_plug plug; struct vm_area_struct *vma = vmf->vma; struct page *page; pte_t *pte, pentry; swp_entry_t entry; unsigned int i; bool page_allocated; if (swap_ra->win == 1) goto skip; blk_start_plug(&plug); for (i = 0, pte = swap_ra->ptes; i < swap_ra->nr_pte; i++, pte++) { pentry = *pte; if (pte_none(pentry)) continue; if (pte_present(pentry)) continue; entry = pte_to_swp_entry(pentry); if (unlikely(non_swap_entry(entry))) continue; page = __read_swap_cache_async(entry, gfp_mask, vma, vmf->address, &page_allocated); if (!page) continue; if (page_allocated) { swap_readpage(page, false); if (i != swap_ra->offset && likely(!PageTransCompound(page))) { SetPageReadahead(page); count_vm_event(SWAP_RA); } } put_page(page); } blk_finish_plug(&plug); lru_add_drain(); skip: return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address, swap_ra->win == 1); }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying241100.00%1100.00%
Total241100.00%1100.00%

#ifdef CONFIG_SYSFS
static ssize_t vma_ra_enabled_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sprintf(buf, "%s\n", swap_vma_readahead ? "true" : "false"); }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying34100.00%1100.00%
Total34100.00%1100.00%


static ssize_t vma_ra_enabled_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1)) swap_vma_readahead = true; else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1)) swap_vma_readahead = false; else return -EINVAL; return count; }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying85100.00%1100.00%
Total85100.00%1100.00%

static struct kobj_attribute vma_ra_enabled_attr = __ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show, vma_ra_enabled_store); static struct attribute *swap_attrs[] = { &vma_ra_enabled_attr.attr, NULL, }; static struct attribute_group swap_attr_group = { .attrs = swap_attrs, };
static int __init swap_init_sysfs(void) { int err; struct kobject *swap_kobj; swap_kobj = kobject_create_and_add("swap", mm_kobj); if (!swap_kobj) { pr_err("failed to create swap kobject\n"); return -ENOMEM; } err = sysfs_create_group(swap_kobj, &swap_attr_group); if (err) { pr_err("failed to register swap group\n"); goto delete_obj; } return 0; delete_obj: kobject_put(swap_kobj); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying79100.00%1100.00%
Total79100.00%1100.00%

subsys_initcall(swap_init_sysfs); #endif

Overall Contributors

PersonTokensPropCommitsCommitProp
Huang Ying180055.49%1110.38%
Shaohua Li2969.12%54.72%
Andrew Morton2317.12%1514.15%
Linus Torvalds (pre-git)1865.73%1716.04%
Hugh Dickins1484.56%1110.38%
Linus Torvalds1344.13%54.72%
Daisuke Nishimura902.77%21.89%
Dmitry Safonov822.53%10.94%
Nicholas Piggin531.63%54.72%
Rik Van Riel471.45%32.83%
Andrea Arcangeli260.80%21.89%
Sasha Levin240.74%10.94%
Christian Ehrhardt190.59%10.94%
Kamezawa Hiroyuki160.49%21.89%
MinChan Kim140.43%32.83%
Christoph Lameter140.43%32.83%
Vladimir Davydov110.34%10.94%
Tim Chen110.34%21.89%
Johannes Weiner80.25%10.94%
Gerald Schaefer70.22%10.94%
Kirill A. Shutemov50.15%21.89%
Michal Hocko50.15%10.94%
Rafael Aquini30.09%10.94%
Tejun Heo30.09%10.94%
Marcelo Tosatti20.06%10.94%
Eric Sesterhenn / Snakebyte20.06%10.94%
Mel Gorman20.06%21.89%
Aaron Lu10.03%10.94%
Christoph Hellwig10.03%10.94%
Jan Kara10.03%10.94%
Jens Axboe10.03%10.94%
Greg Kroah-Hartman10.03%10.94%
Total3244100.00%106100.00%
Directory: mm
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.