cregit-Linux how code gets into the kernel

Release 4.8 mm/shmem.c

Directory: mm
/*
 * Resizable virtual memory filesystem for Linux.
 *
 * Copyright (C) 2000 Linus Torvalds.
 *               2000 Transmeta Corp.
 *               2000-2001 Christoph Rohland
 *               2000-2001 SAP AG
 *               2002 Red Hat Inc.
 * Copyright (C) 2002-2011 Hugh Dickins.
 * Copyright (C) 2011 Google Inc.
 * Copyright (C) 2002-2005 VERITAS Software Corporation.
 * Copyright (C) 2004 Andi Kleen, SuSE Labs
 *
 * Extended attribute support for tmpfs:
 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
 *
 * tiny-shmem:
 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
 *
 * This file is released under the GPL.
 */

#include <linux/fs.h>
#include <linux/init.h>
#include <linux/vfs.h>
#include <linux/mount.h>
#include <linux/ramfs.h>
#include <linux/pagemap.h>
#include <linux/file.h>
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/swap.h>
#include <linux/uio.h>
#include <linux/khugepaged.h>


static struct vfsmount *shm_mnt;

#ifdef CONFIG_SHMEM
/*
 * This virtual memory filesystem is heavily based on the ramfs. It
 * extends ramfs by the ability to use swap and honor resource limits
 * which makes it a completely usable filesystem.
 */

#include <linux/xattr.h>
#include <linux/exportfs.h>
#include <linux/posix_acl.h>
#include <linux/posix_acl_xattr.h>
#include <linux/mman.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/backing-dev.h>
#include <linux/shmem_fs.h>
#include <linux/writeback.h>
#include <linux/blkdev.h>
#include <linux/pagevec.h>
#include <linux/percpu_counter.h>
#include <linux/falloc.h>
#include <linux/splice.h>
#include <linux/security.h>
#include <linux/swapops.h>
#include <linux/mempolicy.h>
#include <linux/namei.h>
#include <linux/ctype.h>
#include <linux/migrate.h>
#include <linux/highmem.h>
#include <linux/seq_file.h>
#include <linux/magic.h>
#include <linux/syscalls.h>
#include <linux/fcntl.h>
#include <uapi/linux/memfd.h>

#include <asm/uaccess.h>
#include <asm/pgtable.h>

#include "internal.h"


#define BLOCKS_PER_PAGE  (PAGE_SIZE/512)

#define VM_ACCT(size)    (PAGE_ALIGN(size) >> PAGE_SHIFT)

/* Pretend that each entry is of this size in directory's i_size */

#define BOGO_DIRENT_SIZE 20

/* Symlink up to this size is kmalloc'ed instead of using a swappable page */

#define SHORT_SYMLINK_LEN 128

/*
 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
 * inode->i_private (with i_mutex making sure that it has only one user at
 * a time): we would prefer not to enlarge the shmem inode just for that.
 */

struct shmem_falloc {
	
wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
	
pgoff_t start;		/* start of range currently being fallocated */
	
pgoff_t next;		/* the next page offset to be fallocated */
	
pgoff_t nr_falloced;	/* how many new pages have been fallocated */
	
pgoff_t nr_unswapped;	/* how often writepage refused to swap out */
};

#ifdef CONFIG_TMPFS

static unsigned long shmem_default_max_blocks(void) { return totalram_pages / 2; }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton14100.00%1100.00%
Total14100.00%1100.00%


static unsigned long shmem_default_max_inodes(void) { return min(totalram_pages - totalhigh_pages, totalram_pages / 2); }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton21100.00%1100.00%
Total21100.00%1100.00%

#endif static bool shmem_should_replace_page(struct page *page, gfp_t gfp); static int shmem_replace_page(struct page **pagep, gfp_t gfp, struct shmem_inode_info *info, pgoff_t index); static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, struct page **pagep, enum sgp_type sgp, gfp_t gfp, struct mm_struct *fault_mm, int *fault_type);
int shmem_getpage(struct inode *inode, pgoff_t index, struct page **pagep, enum sgp_type sgp) { return shmem_getpage_gfp(inode, index, pagep, sgp, mapping_gfp_mask(inode->i_mapping), NULL, NULL); }

Contributors

PersonTokensPropCommitsCommitProp
hugh dickinshugh dickins4393.48%150.00%
andres lagar-cavillaandres lagar-cavilla36.52%150.00%
Total46100.00%2100.00%


static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) { return sb->s_fs_info; }

Contributors

PersonTokensPropCommitsCommitProp
brian gerstbrian gerst1578.95%266.67%
linus torvaldslinus torvalds421.05%133.33%
Total19100.00%3100.00%

/* * shmem_file_setup pre-accounts the whole fixed size of a VM object, * for shared memory and for shared anonymous (/dev/zero) mappings * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), * consistent with the pre-accounting of private mappings ... */
static inline int shmem_acct_size(unsigned long flags, loff_t size) { return (flags & VM_NORESERVE) ? 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size)); }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton2777.14%133.33%
al viroal viro514.29%133.33%
hugh dickinshugh dickins38.57%133.33%
Total35100.00%3100.00%


static inline void shmem_unacct_size(unsigned long flags, loff_t size) { if (!(flags & VM_NORESERVE)) vm_unacct_memory(VM_ACCT(size)); }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton2787.10%150.00%
hugh dickinshugh dickins412.90%150.00%
Total31100.00%2100.00%


static inline int shmem_reacct_size(unsigned long flags, loff_t oldsize, loff_t newsize) { if (!(flags & VM_NORESERVE)) { if (VM_ACCT(newsize) > VM_ACCT(oldsize)) return security_vm_enough_memory_mm(current->mm, VM_ACCT(newsize) - VM_ACCT(oldsize)); else if (VM_ACCT(newsize) < VM_ACCT(oldsize)) vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize)); } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
konstantin khlebnikovkonstantin khlebnikov87100.00%1100.00%
Total87100.00%1100.00%

/* * ... whereas tmpfs objects are accounted incrementally as * pages are allocated, in order to allow large sparse files. * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. */
static inline int shmem_acct_block(unsigned long flags, long pages) { if (!(flags & VM_NORESERVE)) return 0; return security_vm_enough_memory_mm(current->mm, pages * VM_ACCT(PAGE_SIZE)); }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton2151.22%120.00%
kirill a. shutemovkirill a. shutemov1434.15%240.00%
al viroal viro512.20%120.00%
hugh dickinshugh dickins12.44%120.00%
Total41100.00%5100.00%


static inline void shmem_unacct_blocks(unsigned long flags, long pages) { if (flags & VM_NORESERVE) vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE)); }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton2893.33%133.33%
kirill a. shutemovkirill a. shutemov13.33%133.33%
hugh dickinshugh dickins13.33%133.33%
Total30100.00%3100.00%

static const struct super_operations shmem_ops; static const struct address_space_operations shmem_aops; static const struct file_operations shmem_file_operations; static const struct inode_operations shmem_inode_operations; static const struct inode_operations shmem_dir_inode_operations; static const struct inode_operations shmem_special_inode_operations; static const struct vm_operations_struct shmem_vm_ops; static struct file_system_type shmem_fs_type; static LIST_HEAD(shmem_swaplist); static DEFINE_MUTEX(shmem_swaplist_mutex);
static int shmem_reserve_inode(struct super_block *sb) { struct shmem_sb_info *sbinfo = SHMEM_SB(sb); if (sbinfo->max_inodes) { spin_lock(&sbinfo->stat_lock); if (!sbinfo->free_inodes) { spin_unlock(&sbinfo->stat_lock); return -ENOSPC; } sbinfo->free_inodes--; spin_unlock(&sbinfo->stat_lock); } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
pavel emelianovpavel emelianov74100.00%1100.00%
Total74100.00%1100.00%


static void shmem_free_inode(struct super_block *sb) { struct shmem_sb_info *sbinfo = SHMEM_SB(sb); if (sbinfo->max_inodes) { spin_lock(&sbinfo->stat_lock); sbinfo->free_inodes++; spin_unlock(&sbinfo->stat_lock); } }

Contributors

PersonTokensPropCommitsCommitProp
pavel emelianovpavel emelianov50100.00%1100.00%
Total50100.00%1100.00%

/** * shmem_recalc_inode - recalculate the block usage of an inode * @inode: inode to recalc * * We have to calculate the free blocks since the mm can drop * undirtied hole pages behind our back. * * But normally info->alloced == inode->i_mapping->nrpages + info->swapped * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) * * It has to be called with the spinlock held. */
static void shmem_recalc_inode(struct inode *inode) { struct shmem_inode_info *info = SHMEM_I(inode); long freed; freed = info->alloced - info->swapped - inode->i_mapping->nrpages; if (freed > 0) { struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); if (sbinfo->max_blocks) percpu_counter_add(&sbinfo->used_blocks, -freed); info->alloced -= freed; inode->i_blocks -= freed * BLOCKS_PER_PAGE; shmem_unacct_blocks(info->flags, freed); } }

Contributors

PersonTokensPropCommitsCommitProp
hugh dickinshugh dickins6767.00%375.00%
linus torvaldslinus torvalds3333.00%125.00%
Total100100.00%4100.00%


bool shmem_charge(struct inode *inode, long pages) { struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); unsigned long flags; if (shmem_acct_block(info->flags, pages)) return false; spin_lock_irqsave(&info->lock, flags); info->alloced += pages; inode->i_blocks += pages * BLOCKS_PER_PAGE; shmem_recalc_inode(inode); spin_unlock_irqrestore(&info->lock, flags); inode->i_mapping->nrpages += pages; if (!sbinfo->max_blocks) return true; if (percpu_counter_compare(&sbinfo->used_blocks, sbinfo->max_blocks - pages) > 0) { inode->i_mapping->nrpages -= pages; spin_lock_irqsave(&info->lock, flags); info->alloced -= pages; shmem_recalc_inode(inode); spin_unlock_irqrestore(&info->lock, flags); shmem_unacct_blocks(info->flags, pages); return false; } percpu_counter_add(&sbinfo->used_blocks, pages); return true; }

Contributors

PersonTokensPropCommitsCommitProp
kirill a. shutemovkirill a. shutemov14373.71%240.00%
hugh dickinshugh dickins4523.20%240.00%
johannes weinerjohannes weiner63.09%120.00%
Total194100.00%5100.00%


void shmem_uncharge(struct inode *inode, long pages) { struct shmem_inode_info *info = SHMEM_I(inode); struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); unsigned long flags; spin_lock_irqsave(&info->lock, flags); info->alloced -= pages; inode->i_blocks -= pages * BLOCKS_PER_PAGE; shmem_recalc_inode(inode); spin_unlock_irqrestore(&info->lock, flags); if (sbinfo->max_blocks) percpu_counter_sub(&sbinfo->used_blocks, pages); shmem_unacct_blocks(info->flags, pages); }

Contributors

PersonTokensPropCommitsCommitProp
kirill a. shutemovkirill a. shutemov8380.58%250.00%
hugh dickinshugh dickins2019.42%250.00%
Total103100.00%4100.00%

/* * Replace item expected in radix tree by a new item, while holding tree lock. */
static int shmem_radix_tree_replace(struct address_space *mapping, pgoff_t index, void *expected, void *replacement) { void **pslot; void *item; VM_BUG_ON(!expected); VM_BUG_ON(!replacement); pslot = radix_tree_lookup_slot(&mapping->page_tree, index); if (!pslot) return -ENOENT; item = radix_tree_deref_slot_protected(pslot, &mapping->tree_lock); if (item != expected) return -ENOENT; radix_tree_replace_slot(pslot, replacement); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
kirill a. shutemovkirill a. shutemov96100.00%1100.00%
Total96100.00%1100.00%

/* * Sometimes, before we decide whether to proceed or to fail, we must check * that an entry was not already brought back from swap by a racing thread. * * Checking page is not enough: by the time a SwapCache page is locked, it * might be reused, and again be SwapCache, using the same swap as before. */
static bool shmem_confirm_swap(struct address_space *mapping, pgoff_t index, swp_entry_t swap) { void *item; rcu_read_lock(); item = radix_tree_lookup(&mapping->page_tree, index); rcu_read_unlock(); return item == swp_to_radix_entry(swap); }

Contributors

PersonTokensPropCommitsCommitProp
kirill a. shutemovkirill a. shutemov3778.72%150.00%
hugh dickinshugh dickins1021.28%150.00%
Total47100.00%2100.00%

/* * Definitions for "huge tmpfs": tmpfs mounted with the huge= option * * SHMEM_HUGE_NEVER: * disables huge pages for the mount; * SHMEM_HUGE_ALWAYS: * enables huge pages for the mount; * SHMEM_HUGE_WITHIN_SIZE: * only allocate huge pages if the page will be fully within i_size, * also respect fadvise()/madvise() hints; * SHMEM_HUGE_ADVISE: * only allocate huge pages if requested with fadvise()/madvise(); */ #define SHMEM_HUGE_NEVER 0 #define SHMEM_HUGE_ALWAYS 1 #define SHMEM_HUGE_WITHIN_SIZE 2 #define SHMEM_HUGE_ADVISE 3 /* * Special values. * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled: * * SHMEM_HUGE_DENY: * disables huge on shm_mnt and all mounts, for emergency use; * SHMEM_HUGE_FORCE: * enables huge on shm_mnt and all mounts, w/o needing option, for testing; * */ #define SHMEM_HUGE_DENY (-1) #define SHMEM_HUGE_FORCE (-2) #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE /* ifdef here to avoid bloating shmem.o when not necessary */ int shmem_huge __read_mostly;
static int shmem_parse_huge(const char *str) { if (!strcmp(str, "never")) return SHMEM_HUGE_NEVER; if (!strcmp(str, "always")) return SHMEM_HUGE_ALWAYS; if (!strcmp(str, "within_size")) return SHMEM_HUGE_WITHIN_SIZE; if (!strcmp(str, "advise")) return SHMEM_HUGE_ADVISE; if (!strcmp(str, "deny")) return SHMEM_HUGE_DENY; if (!strcmp(str, "force")) return SHMEM_HUGE_FORCE; return -EINVAL; }

Contributors

PersonTokensPropCommitsCommitProp
kirill a. shutemovkirill a. shutemov93100.00%1100.00%
Total93100.00%1100.00%


static const char *shmem_format_huge(int huge) { switch (huge) { case SHMEM_HUGE_NEVER: return "never"; case SHMEM_HUGE_ALWAYS: return "always"; case SHMEM_HUGE_WITHIN_SIZE: return "within_size"; case SHMEM_HUGE_ADVISE: return "advise"; case SHMEM_HUGE_DENY: return "deny"; case SHMEM_HUGE_FORCE: return "force"; default: VM_BUG_ON(1); return "bad_val"; } }

Contributors

PersonTokensPropCommitsCommitProp
kirill a. shutemovkirill a. shutemov62100.00%1100.00%
Total62100.00%1100.00%


static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, struct shrink_control *sc, unsigned long nr_to_split) { LIST_HEAD(list), *pos, *next; struct inode *inode; struct shmem_inode_info *info; struct page *page; unsigned long batch = sc ? sc->nr_to_scan : 128; int removed = 0, split = 0; if (list_empty(&sbinfo->shrinklist)) return SHRINK_STOP; spin_lock(&sbinfo->shrinklist_lock); list_for_each_safe(pos, next, &sbinfo->shrinklist) { info = list_entry(pos, struct shmem_inode_info, shrinklist); /* pin the inode */ inode = igrab(&info->vfs_inode); /* inode is about to be evicted */ if (!inode) { list_del_init(&info->shrinklist); removed++; goto next; } /* Check if there's anything to gain */ if (round_up(inode->i_size, PAGE_SIZE) == round_up(inode->i_size, HPAGE_PMD_SIZE)) { list_del_init(&info->shrinklist); removed++; iput(inode); goto next; } list_move(&info->shrinklist, &list); next: if (!--batch) break; } spin_unlock(&sbinfo->shrinklist_lock); list_for_each_safe(pos, next, &list) { int ret; info = list_entry(pos, struct shmem_inode_info, shrinklist); inode = &info->vfs_inode; if (nr_to_split && split >= nr_to_split) { iput(inode); continue; } page = find_lock_page(inode->i_mapping, (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT); if (!page) goto drop; if (!PageTransHuge(page)) { unlock_page(page); put_page(page); goto drop; } ret = split_huge_page(page); unlock_page(page); put_page(page); if (ret) { /* split failed: leave it on the list */ iput(inode); continue; } split++; drop: list_del_init(&info->shrinklist); removed++; iput(inode); } spin_lock(&sbinfo->shrinklist_lock); list_splice_tail(&list, &sbinfo->shrinklist); sbinfo->shrinklist_len -= removed; spin_unlock(&sbinfo->shrinklist_lock); return split; }

Contributors

PersonTokensPropCommitsCommitProp
kirill a. shutemovkirill a. shutemov399100.00%1100.00%
Total399100.00%1100.00%


static long shmem_unused_huge_scan(struct super_block *sb, struct shrink_control *sc) { struct shmem_sb_info *sbinfo = SHMEM_SB(sb); if (!READ_ONCE(sbinfo->shrinklist_len)) return SHRINK_STOP; return shmem_unused_huge_shrink(sbinfo, sc, 0); }

Contributors

PersonTokensPropCommitsCommitProp
kirill a. shutemovkirill a. shutemov49100.00%1100.00%
Total49100.00%1100.00%


static long shmem_unused_huge_count(struct super_block *sb, struct shrink_control *sc) { struct shmem_sb_info *sbinfo = SHMEM_SB(sb); return READ_ONCE(sbinfo->shrinklist_len); }

Contributors

PersonTokensPropCommitsCommitProp
kirill a. shutemovkirill a. shutemov34100.00%1100.00%
Total34100.00%1100.00%

#else /* !CONFIG_TRANSPARENT_HUGE_PAGECACHE */ #define shmem_huge SHMEM_HUGE_DENY
static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, struct shrink_control *sc, unsigned long nr_to_split) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
kirill a. shutemovkirill a. shutemov24100.00%1100.00%
Total24100.00%1100.00%

#endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */ /* * Like add_to_page_cache_locked, but error if expected item has gone. */
static int shmem_add_to_page_cache(struct page *page, struct address_space *mapping, pgoff_t index, void *expected) { int error, nr = hpage_nr_pages(page); VM_BUG_ON_PAGE(PageTail(page), page); VM_BUG_ON_PAGE(index != round_down(index, nr), page); VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(!PageSwapBacked(page), page); VM_BUG_ON(expected && PageTransHuge(page)); page_ref_add(page, nr); page->mapping = mapping; page->index = index; spin_lock_irq(&mapping->tree_lock); if (PageTransHuge(page)) { void __rcu **results; pgoff_t idx; int i; error = 0; if (radix_tree_gang_lookup_slot(&mapping->page_tree, &results, &idx, index, 1) && idx < index + HPAGE_PMD_NR) { error = -EEXIST; } if (!error) { for (i = 0; i < HPAGE_PMD_NR; i++) { error = radix_tree_insert(&mapping->page_tree, index + i, page + i); VM_BUG_ON(error); } count_vm_event(THP_FILE_ALLOC); } } else if (!expected) { error = radix_tree_insert(&mapping->page_tree, index, page); } else { error = shmem_radix_tree_replace(mapping, index, expected, page); } if (!error) { mapping->nrpages += nr; if (PageTransHuge(page)) __inc_node_page_state(page, NR_SHMEM_THPS); __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); __mod_node_page_state(page_pgdat(page), NR_SHMEM, nr); spin_unlock_irq(&mapping->tree_lock); } else { page->mapping = NULL; spin_unlock_irq(&mapping->tree_lock); page_ref_sub(page, nr); } return error; }

Contributors

PersonTokensPropCommitsCommitProp
kirill a. shutemovkirill a. shutemov20258.05%125.00%
hugh dickinshugh dickins13538.79%125.00%
sasha levinsasha levin61.72%125.00%
mel gormanmel gorman51.44%125.00%
Total348100.00%4100.00%

/* * Like delete_from_page_cache, but substitutes swap for page. */
static void shmem_delete_from_page_cache(struct page *page, void *radswap) { struct address_space *mapping = page->mapping; int error; VM_BUG_ON_PAGE(PageCompound(page), page); spin_lock_irq(&mapping->tree_lock); error = shmem_radix_tree_replace(mapping, page->index, page, radswap); page->mapping = NULL; mapping->nrpages--; __dec_node_page_state(page, NR_FILE_PAGES); __dec_node_page_state(page, NR_SHMEM); spin_unlock_irq(&mapping->tree_lock); put_page(page); BUG_ON(error); }

Contributors

PersonTokensPropCommitsCommitProp
hugh dickinshugh dickins9087.38%125.00%
kirill a. shutemovkirill a. shutemov1110.68%250.00%
mel gormanmel gorman21.94%125.00%
Total103100.00%4100.00%

/* * Remove swap entry from radix tree, free the swap and its page cache. */
static int shmem_free_swap(struct address_space *mapping, pgoff_t index, void *radswap) { void *old; spin_lock_irq(&mapping->tree_lock); old = radix_tree_delete_item(&mapping->page_tree, index, radswap); spin_unlock_irq(&mapping->tree_lock); if (old != radswap) return -ENOENT; free_swap_and_cache(radix_to_swp_entry(radswap)); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
hugh dickinshugh dickins5778.08%150.00%
johannes weinerjohannes weiner1621.92%150.00%
Total73100.00%2100.00%

/* * Determine (in bytes) how many of the shmem object's pages mapped by the * given offsets are swapped out. * * This is safe to call without i_mutex or mapping->tree_lock thanks to RCU, * as long as the inode doesn't go away and racy results are not a problem. */
unsigned long shmem_partial_swap_usage(struct address_space *mapping, pgoff_t start, pgoff_t end) { struct radix_tree_iter iter; void **slot; struct page *page; unsigned long swapped = 0; rcu_read_lock(); radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { if (iter.index >= end) break; page = radix_tree_deref_slot(slot); if (radix_tree_deref_retry(page)) { slot = radix_tree_iter_retry(&iter); continue; } if (radix_tree_exceptional_entry(page)) swapped++; if (need_resched()) { cond_resched_rcu(); slot = radix_tree_iter_next(&iter); } } rcu_read_unlock(); return swapped << PAGE_SHIFT; }

Contributors

PersonTokensPropCommitsCommitProp
vlastimil babkavlastimil babka10686.89%250.00%
matthew wilcoxmatthew wilcox1613.11%250.00%
Total122100.00%4100.00%

/* * Determine (in bytes) how many of the shmem object's pages mapped by the * given vma is swapped out. * * This is safe to call without i_mutex or mapping->tree_lock thanks to RCU, * as long as the inode doesn't go away and racy results are not a problem. */
unsigned long shmem_swap_usage(struct vm_area_struct *vma) { struct inode *inode = file_inode(vma->vm_file); struct shmem_inode_info *info = SHMEM_I(inode); struct address_space *mapping = inode->i_mapping; unsigned long swapped; /* Be careful as we don't hold info->lock */ swapped = READ_ONCE(info->swapped); /* * The easier cases are when the shmem object has nothing in swap, or * the vma maps it whole. Then we can simply use the stats that we * already track. */ if (!swapped) return 0; if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size) return swapped << PAGE_SHIFT; /* Here comes the more involved part */ return shmem_partial_swap_usage(mapping, linear_page_index(vma, vma->vm_start), linear_page_index(vma, vma->vm_end)); }

Contributors

PersonTokensPropCommitsCommitProp
vlastimil babkavlastimil babka114100.00%1100.00%
Total114100.00%1100.00%

/* * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists. */
void shmem_unlock_mapping(struct address_space *mapping) { struct pagevec pvec; pgoff_t indices[PAGEVEC_SIZE]; pgoff_t index = 0; pagevec_init(&pvec, 0); /* * Minor point, but we might as well stop if someone else SHM_LOCKs it. */ while (!mapping_unevictable(mapping)) { /* * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it * has finished, if it hits a row of PAGEVEC_SIZE swap entries. */ pvec.nr = find_get_entries(mapping, index, PAGEVEC_SIZE, pvec.pages, indices); if (!pvec.nr) break; index = indices[pvec.nr - 1]