cregit-Linux how code gets into the kernel

Release 4.7 mm/swapfile.c

Directory: mm
/*
 *  linux/mm/swapfile.c
 *
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *  Swap reorganised 29.12.95, Stephen Tweedie
 */

#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/mman.h>
#include <linux/slab.h>
#include <linux/kernel_stat.h>
#include <linux/swap.h>
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
#include <linux/namei.h>
#include <linux/shmem_fs.h>
#include <linux/blkdev.h>
#include <linux/random.h>
#include <linux/writeback.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/security.h>
#include <linux/backing-dev.h>
#include <linux/mutex.h>
#include <linux/capability.h>
#include <linux/syscalls.h>
#include <linux/memcontrol.h>
#include <linux/poll.h>
#include <linux/oom.h>
#include <linux/frontswap.h>
#include <linux/swapfile.h>
#include <linux/export.h>

#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <linux/swapops.h>
#include <linux/swap_cgroup.h>

static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
				 unsigned char);
static void free_swap_count_continuations(struct swap_info_struct *);
static sector_t map_swap_entry(swp_entry_t, struct block_device**);


DEFINE_SPINLOCK(swap_lock);

static unsigned int nr_swapfiles;

atomic_long_t nr_swap_pages;
/*
 * Some modules use swappable objects and may try to swap them out under
 * memory pressure (via the shrinker). Before doing so, they may wish to
 * check to see if any swap space is available.
 */

EXPORT_SYMBOL_GPL(nr_swap_pages);
/* protected with swap_lock. reading in vm_swap_full() doesn't need lock */

long total_swap_pages;

static int least_priority;


static const char Bad_file[] = "Bad swap file entry ";

static const char Unused_file[] = "Unused swap file entry ";

static const char Bad_offset[] = "Bad swap offset entry ";

static const char Unused_offset[] = "Unused swap offset entry ";

/*
 * all active swap_info_structs
 * protected with swap_lock, and ordered by priority.
 */

PLIST_HEAD(swap_active_head);

/*
 * all available (active, not full) swap_info_structs
 * protected with swap_avail_lock, ordered by priority.
 * This is used by get_swap_page() instead of swap_active_head
 * because swap_active_head includes all swap_info_structs,
 * but get_swap_page() doesn't need to look at full ones.
 * This uses its own lock instead of swap_lock because when a
 * swap_info_struct changes between not-full/full, it needs to
 * add/remove itself to/from this list, but the swap_info_struct->lock
 * is held and the locking order requires swap_lock to be taken
 * before any swap_info_struct->lock.
 */
static PLIST_HEAD(swap_avail_head);
static DEFINE_SPINLOCK(swap_avail_lock);


struct swap_info_struct *swap_info[MAX_SWAPFILES];

static DEFINE_MUTEX(swapon_mutex);

static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
/* Activity counter to indicate that a swapon or swapoff has occurred */

static atomic_t proc_poll_event = ATOMIC_INIT(0);


static inline unsigned char swap_count(unsigned char ent) { return ent & ~SWAP_HAS_CACHE; /* may include SWAP_HAS_CONT flag */ }

Contributors

PersonTokensPropCommitsCommitProp
kamezawa hiroyukikamezawa hiroyuki1473.68%125.00%
hugh dickinshugh dickins526.32%375.00%
Total19100.00%4100.00%

/* returns 1 if swap entry is freed */
static int __try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset) { swp_entry_t entry = swp_entry(si->type, offset); struct page *page; int ret = 0; page = find_get_page(swap_address_space(entry), entry.val); if (!page) return 0; /* * This function is called from scan_swap_map() and it's called * by vmscan.c at reclaiming pages. So, we hold a lock on a page, here. * We have to use trylock for avoiding deadlock. This is a special * case and you should use try_to_free_swap() with explicit lock_page() * in usual operations. */ if (trylock_page(page)) { ret = try_to_free_swap(page); unlock_page(page); } put_page(page); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
kamezawa hiroyukikamezawa hiroyuki8292.13%125.00%
shaohua lishaohua li44.49%125.00%
hugh dickinshugh dickins22.25%125.00%
kirill a. shutemovkirill a. shutemov11.12%125.00%
Total89100.00%4100.00%

/* * swapon tell device that all the old swap contents can be discarded, * to allow the swap device to optimize its wear-levelling. */
static int discard_swap(struct swap_info_struct *si) { struct swap_extent *se; sector_t start_block; sector_t nr_blocks; int err = 0; /* Do not discard the swap header page! */ se = &si->first_swap_extent; start_block = (se->start_block + 1) << (PAGE_SHIFT - 9); nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9); if (nr_blocks) { err = blkdev_issue_discard(si->bdev, start_block, nr_blocks, GFP_KERNEL, 0); if (err) return err; cond_resched(); } list_for_each_entry(se, &si->first_swap_extent.list, list) { start_block = se->start_block << (PAGE_SHIFT - 9); nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); err = blkdev_issue_discard(si->bdev, start_block, nr_blocks, GFP_KERNEL, 0); if (err) break; cond_resched(); } return err; /* That will often be -EOPNOTSUPP */ }

Contributors

PersonTokensPropCommitsCommitProp
hugh dickinshugh dickins16698.22%360.00%
christoph hellwigchristoph hellwig31.78%240.00%
Total169100.00%5100.00%

/* * swap allocation tell device that a cluster of swap can now be discarded, * to allow the swap device to optimize its wear-levelling. */
static void discard_swap_cluster(struct swap_info_struct *si, pgoff_t start_page, pgoff_t nr_pages) { struct swap_extent *se = si->curr_swap_extent; int found_extent = 0; while (nr_pages) { if (se->start_page <= start_page && start_page < se->start_page + se->nr_pages) { pgoff_t offset = start_page - se->start_page; sector_t start_block = se->start_block + offset; sector_t nr_blocks = se->nr_pages - offset; if (nr_blocks > nr_pages) nr_blocks = nr_pages; start_page += nr_blocks; nr_pages -= nr_blocks; if (!found_extent++) si->curr_swap_extent = se; start_block <<= PAGE_SHIFT - 9; nr_blocks <<= PAGE_SHIFT - 9; if (blkdev_issue_discard(si->bdev, start_block, nr_blocks, GFP_NOIO, 0)) break; } se = list_next_entry(se, list); } }

Contributors

PersonTokensPropCommitsCommitProp
hugh dickinshugh dickins14997.39%240.00%
geliang tanggeliang tang21.31%120.00%
christoph hellwigchristoph hellwig21.31%240.00%
Total153100.00%5100.00%

#define SWAPFILE_CLUSTER 256 #define LATENCY_LIMIT 256
static inline void cluster_set_flag(struct swap_cluster_info *info, unsigned int flag) { info->flags = flag; }

Contributors

PersonTokensPropCommitsCommitProp
shaohua lishaohua li22100.00%1100.00%
Total22100.00%1100.00%


static inline unsigned int cluster_count(struct swap_cluster_info *info) { return info->data; }

Contributors

PersonTokensPropCommitsCommitProp
shaohua lishaohua li18100.00%1100.00%
Total18100.00%1100.00%


static inline void cluster_set_count(struct swap_cluster_info *info, unsigned int c) { info->data = c; }

Contributors

PersonTokensPropCommitsCommitProp
shaohua lishaohua li22100.00%1100.00%
Total22100.00%1100.00%


static inline void cluster_set_count_flag(struct swap_cluster_info *info, unsigned int c, unsigned int f) { info->flags = f; info->data = c; }

Contributors

PersonTokensPropCommitsCommitProp
shaohua lishaohua li32100.00%1100.00%
Total32100.00%1100.00%


static inline unsigned int cluster_next(struct swap_cluster_info *info) { return info->data; }

Contributors

PersonTokensPropCommitsCommitProp
shaohua lishaohua li18100.00%1100.00%
Total18100.00%1100.00%


static inline void cluster_set_next(struct swap_cluster_info *info, unsigned int n) { info->data = n; }

Contributors

PersonTokensPropCommitsCommitProp
shaohua lishaohua li22100.00%1100.00%
Total22100.00%1100.00%


static inline void cluster_set_next_flag(struct swap_cluster_info *info, unsigned int n, unsigned int f) { info->flags = f; info->data = n; }

Contributors

PersonTokensPropCommitsCommitProp
shaohua lishaohua li32100.00%1100.00%
Total32100.00%1100.00%


static inline bool cluster_is_free(struct swap_cluster_info *info) { return info->flags & CLUSTER_FLAG_FREE; }

Contributors

PersonTokensPropCommitsCommitProp
shaohua lishaohua li19100.00%1100.00%
Total19100.00%1100.00%


static inline bool cluster_is_null(struct swap_cluster_info *info) { return info->flags & CLUSTER_FLAG_NEXT_NULL; }

Contributors

PersonTokensPropCommitsCommitProp
shaohua lishaohua li19100.00%1100.00%
Total19100.00%1100.00%


static inline void cluster_set_null(struct swap_cluster_info *info) { info->flags = CLUSTER_FLAG_NEXT_NULL; info->data = 0; }

Contributors

PersonTokensPropCommitsCommitProp
shaohua lishaohua li24100.00%1100.00%
Total24100.00%1100.00%

/* Add a cluster to discard list and schedule it to do discard */
static void swap_cluster_schedule_discard(struct swap_info_struct *si, unsigned int idx) { /* * If scan_swap_map() can't find a free cluster, it will check * si->swap_map directly. To make sure the discarding cluster isn't * taken by scan_swap_map(), mark the swap entries bad (occupied). It * will be cleared after discard */ memset(si->swap_map + idx * SWAPFILE_CLUSTER, SWAP_MAP_BAD, SWAPFILE_CLUSTER); if (cluster_is_null(&si->discard_cluster_head)) { cluster_set_next_flag(&si->discard_cluster_head, idx, 0); cluster_set_next_flag(&si->discard_cluster_tail, idx, 0); } else { unsigned int tail = cluster_next(&si->discard_cluster_tail); cluster_set_next(&si->cluster_info[tail], idx); cluster_set_next_flag(&si->discard_cluster_tail, idx, 0); } schedule_work(&si->discard_work); }

Contributors

PersonTokensPropCommitsCommitProp
shaohua lishaohua li115100.00%1100.00%
Total115100.00%1100.00%

/* * Doing discard actually. After a cluster discard is finished, the cluster * will be added to free cluster list. caller should hold si->lock. */
static void swap_do_scheduled_discard(struct swap_info_struct *si) { struct swap_cluster_info *info; unsigned int idx; info = si->cluster_info; while (!cluster_is_null(&si->discard_cluster_head)) { idx = cluster_next(&si->discard_cluster_head); cluster_set_next_flag(&si->discard_cluster_head, cluster_next(&info[idx]), 0); if (cluster_next(&si->discard_cluster_tail) == idx) { cluster_set_null(&si->discard_cluster_head); cluster_set_null(&si->discard_cluster_tail); } spin_unlock(&si->lock); discard_swap_cluster(si, idx * SWAPFILE_CLUSTER, SWAPFILE_CLUSTER); spin_lock(&si->lock); cluster_set_flag(&info[idx], CLUSTER_FLAG_FREE); if (cluster_is_null(&si->free_cluster_head)) { cluster_set_next_flag(&si->free_cluster_head, idx, 0); cluster_set_next_flag(&si->free_cluster_tail, idx, 0); } else { unsigned int tail; tail = cluster_next(&si->free_cluster_tail); cluster_set_next(&info[tail], idx); cluster_set_next_flag(&si->free_cluster_tail, idx, 0); } memset(si->swap_map + idx * SWAPFILE_CLUSTER, 0, SWAPFILE_CLUSTER); } }

Contributors

PersonTokensPropCommitsCommitProp
shaohua lishaohua li227100.00%1100.00%
Total227100.00%1100.00%


static void swap_discard_work(struct work_struct *work) { struct swap_info_struct *si; si = container_of(work, struct swap_info_struct, discard_work); spin_lock(&si->lock); swap_do_scheduled_discard(si); spin_unlock(&si->lock); }

Contributors

PersonTokensPropCommitsCommitProp
shaohua lishaohua li49100.00%1100.00%
Total49100.00%1100.00%

/* * The cluster corresponding to page_nr will be used. The cluster will be * removed from free cluster list and its usage counter will be increased. */
static void inc_cluster_info_page(struct swap_info_struct *p, struct swap_cluster_info *cluster_info, unsigned long page_nr) { unsigned long idx = page_nr / SWAPFILE_CLUSTER; if (!cluster_info) return; if (cluster_is_free(&cluster_info[idx])) { VM_BUG_ON(cluster_next(&p->free_cluster_head) != idx); cluster_set_next_flag(&p->free_cluster_head, cluster_next(&cluster_info[idx]), 0); if (cluster_next(&p->free_cluster_tail) == idx) { cluster_set_null(&p->free_cluster_tail); cluster_set_null(&p->free_cluster_head); } cluster_set_count_flag(&cluster_info[idx], 0, 0); } VM_BUG_ON(cluster_count(&cluster_info[idx]) >= SWAPFILE_CLUSTER); cluster_set_count(&cluster_info[idx], cluster_count(&cluster_info[idx]) + 1); }

Contributors

PersonTokensPropCommitsCommitProp
shaohua lishaohua li156100.00%1100.00%
Total156100.00%1100.00%

/* * The cluster corresponding to page_nr decreases one usage. If the usage * counter becomes 0, which means no page in the cluster is in using, we can * optionally discard the cluster and add it to free cluster list. */
static void dec_cluster_info_page(struct swap_info_struct *p, struct swap_cluster_info *cluster_info, unsigned long page_nr) { unsigned long idx = page_nr / SWAPFILE_CLUSTER; if (!cluster_info) return; VM_BUG_ON(cluster_count(&cluster_info[idx]) == 0); cluster_set_count(&cluster_info[idx], cluster_count(&cluster_info[idx]) - 1); if (cluster_count(&cluster_info[idx]) == 0) { /* * If the swap is discardable, prepare discard the cluster * instead of free it immediately. The cluster will be freed * after discard. */ if ((p->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) == (SWP_WRITEOK | SWP_PAGE_DISCARD)) { swap_cluster_schedule_discard(p, idx); return; } cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE); if (cluster_is_null(&p->free_cluster_head)) { cluster_set_next_flag(&p->free_cluster_head, idx, 0); cluster_set_next_flag(&p->free_cluster_tail, idx, 0); } else { unsigned int tail = cluster_next(&p->free_cluster_tail); cluster_set_next(&cluster_info[tail], idx); cluster_set_next_flag(&p->free_cluster_tail, idx, 0); } } }

Contributors

PersonTokensPropCommitsCommitProp
shaohua lishaohua li199100.00%3100.00%
Total199100.00%3100.00%

/* * It's possible scan_swap_map() uses a free cluster in the middle of free * cluster list. Avoiding such abuse to avoid list corruption. */
static bool scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si, unsigned long offset) { struct percpu_cluster *percpu_cluster; bool conflict; offset /= SWAPFILE_CLUSTER; conflict = !cluster_is_null(&si->free_cluster_head) && offset != cluster_next(&si->free_cluster_head) && cluster_is_free(&si->cluster_info[offset]); if (!conflict) return false; percpu_cluster = this_cpu_ptr(si->percpu_cluster); cluster_set_null(&percpu_cluster->index); return true; }

Contributors

PersonTokensPropCommitsCommitProp
shaohua lishaohua li87100.00%2100.00%
Total87100.00%2100.00%

/* * Try to get a swap entry from current cpu's swap entry pool (a cluster). This * might involve allocating a new cluster for current CPU too. */
static void scan_swap_map_try_ssd_cluster(struct swap_info_struct *si, unsigned long *offset, unsigned long *scan_base) { struct percpu_cluster *cluster; bool found_free; unsigned long tmp; new_cluster: cluster = this_cpu_ptr(si->percpu_cluster); if (cluster_is_null(&cluster->index)) { if (!cluster_is_null(&si->free_cluster_head)) { cluster->index = si->free_cluster_head; cluster->next = cluster_next(&cluster->index) * SWAPFILE_CLUSTER; } else if (!cluster_is_null(&si->discard_cluster_head)) { /* * we don't have free cluster but have some clusters in * discarding, do discard now and reclaim them */ swap_do_scheduled_discard(si); *scan_base = *offset = si->cluster_next; goto new_cluster; } else return; } found_free = false; /* * Other CPUs can use our cluster if they can't find a free cluster, * check if there is still free entry in the cluster */ tmp = cluster->next; while (tmp < si->max && tmp < (cluster_next(&cluster->index) + 1) * SWAPFILE_CLUSTER) { if (!si->swap_map[tmp]) { found_free = true; break; } tmp++; } if (!found_free) { cluster_set_null(&cluster->index); goto new_cluster; } cluster->next = tmp + 1; *offset = tmp; *scan_base = tmp; }

Contributors

PersonTokensPropCommitsCommitProp
shaohua lishaohua li18283.11%325.00%
hugh dickinshugh dickins188.22%541.67%
pre-gitpre-git188.22%325.00%
kamezawa hiroyukikamezawa hiroyuki10.46%18.33%
Total219100.00%12100.00%


static unsigned long scan_swap_map(struct swap_info_struct *si, unsigned char usage) { unsigned long offset; unsigned long scan_base; unsigned long last_in_cluster = 0; int latency_ration = LATENCY_LIMIT; /* * We try to cluster swap pages by allocating them sequentially * in swap. Once we've allocated SWAPFILE_CLUSTER pages this * way, however, we resort to first-free allocation, starting * a new cluster. This prevents us from scattering swap pages * all over the entire swap partition, so that we reduce * overall disk seek times between swap pages. -- sct * But we do now try to find an empty cluster. -Andrea * And we let swap pages go all over an SSD partition. Hugh */ si->flags += SWP_SCANNING; scan_base = offset = si->cluster_next; /* SSD algorithm */ if (si->cluster_info) { scan_swap_map_try_ssd_cluster(si, &offset, &scan_base); goto checks; } if (unlikely(!si->cluster_nr--)) { if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) { si->cluster_nr = SWAPFILE_CLUSTER - 1; goto checks; } spin_unlock(&si->lock); /* * If seek is expensive, start searching for new cluster from * start of partition, to minimize the span of allocated swap. * If seek is cheap, that is the SWP_SOLIDSTATE si->cluster_info * case, just handled by scan_swap_map_try_ssd_cluster() above. */ scan_base = offset = si->lowest_bit; last_in_cluster = offset + SWAPFILE_CLUSTER - 1; /* Locate the first empty (unaligned) cluster */ for (; last_in_cluster <= si->highest_bit; offset++) { if (si->swap_map[offset]) last_in_cluster = offset + SWAPFILE_CLUSTER; else if (offset == last_in_cluster) { spin_lock(&si->lock); offset -= SWAPFILE_CLUSTER - 1; si->cluster_next = offset; si->cluster_nr = SWAPFILE_CLUSTER - 1; goto checks; } if (unlikely(--latency_ration < 0)) { cond_resched(); latency_ration = LATENCY_LIMIT; } } offset = scan_base; spin_lock(&si->lock); si->cluster_nr = SWAPFILE_CLUSTER - 1; } checks: if (si->cluster_info) { while (scan_swap_map_ssd_cluster_conflict(si, offset)) scan_swap_map_try_ssd_cluster(si, &offset, &scan_base); } if (!(si->flags & SWP_WRITEOK)) goto no_page; if (!si->highest_bit) goto no_page; if (offset > si->highest_bit) scan_base = offset = si->lowest_bit; /* reuse swap entry of cache-only swap if not busy. */ if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { int swap_was_freed; spin_unlock(&si->lock); swap_was_freed = __try_to_reclaim_swap(si, offset); spin_lock(&si->lock); /* entry was freed successfully, try to use this again */ if (swap_was_freed) goto checks; goto scan; /* check next one */ } if (si->swap_map[offset]) goto scan; if (offset == si->lowest_bit) si->lowest_bit++; if (offset == si->highest_bit) si->highest_bit--; si->inuse_pages++; if (si->inuse_pages == si->pages) { si->lowest_bit = si->max; si->highest_bit = 0; spin_lock(&swap_avail_lock); plist_del(&si->avail_list, &swap_avail_head); spin_unlock(&swap_avail_lock); } si->swap_map[offset] = usage; inc_cluster_info_page(si, si->cluster_info, offset); si->cluster_next = offset + 1; si->flags -= SWP_SCANNING; return offset; scan: spin_unlock(&si->lock); while (++offset <= si->highest_bit) { if (!si->swap_map[offset]) { spin_lock(&si->lock); goto checks; } if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { spin_lock(&si->lock); goto checks; } if (unlikely(--latency_ration < 0)) { cond_resched(); latency_ration = LATENCY_LIMIT; } } offset = si->lowest_bit; while (offset < scan_base) { if (!si->swap_map[offset]) { spin_lock(&si->lock); goto checks; } if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { spin_lock(&si->lock); goto checks; } if (unlikely(--latency_ration < 0)) { cond_resched(); latency_ration = LATENCY_LIMIT; } offset++; } spin_lock(&si->lock); no_page: si->flags -= SWP_SCANNING; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
hugh dickinshugh dickins27138.94%838.10%
shaohua lishaohua li18126.01%419.05%
kamezawa hiroyukikamezawa hiroyuki9814.08%14.76%
pre-gitpre-git9012.93%314.29%
linus torvaldslinus torvalds273.88%14.76%
dan streetmandan streetman233.30%14.76%
jamie liujamie liu30.43%14.76%
andrew mortonandrew morton20.29%14.76%
chen yucongchen yucong10.14%14.76%
Total696100.00%21100.00%


swp_entry_t get_swap_page(void) { struct swap_info_struct *si, *next; pgoff_t offset; if (atomic_long_read(&nr_swap_pages) <= 0) goto noswap; atomic_long_dec(&nr_swap_pages); spin_lock(&swap_avail_lock); start_over: plist_for_each_entry_safe(si, next, &swap_avail_head, avail_list) { /* requeue si to after same-priority siblings */ plist_requeue(&si->avail_list, &swap_avail_head); spin_unlock(&swap_avail_lock); spin_lock(&si->lock); if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) { spin_lock(&swap_avail_lock); if (plist_node_empty(&si->avail_list)) { spin_unlock(&si->lock); goto nextsi; } WARN(!si->highest_bit, "swap_info %d in list but !highest_bit\n", si->type); WARN(!(si->flags & SWP_WRITEOK), "swap_info %d in list but !SWP_WRITEOK\n", si->type); plist_del(&si->avail_list, &swap_avail_head); spin_unlock(&si->lock); goto nextsi; } /* This is called for allocating swap entry for cache */ offset = scan_swap_map(si, SWAP_HAS_CACHE); spin_unlock(&si->lock); if (offset) return swp_entry(si->type, offset); pr_debug("scan_swap_map of si %d failed to find offset\n", si->type); spin_lock(&swap_avail_lock); nextsi: /* * if we got here, it's likely that si was almost full before, * and since scan_swap_map() can drop the si->lock, multiple * callers probably all tried to get a page from the same si * and it filled up before we could get one; or, the si filled * up between us dropping swap_avail_lock and taking si->lock. * Since we dropped the swap_avail_lock, the swap_avail_head * list may have been modified; so if next is still in the * swap_avail_head list then try it, otherwise start over. */ if (plist_node_empty(&next->avail_list)) goto start_over; } spin_unlock(&swap_avail_lock); atomic_long_inc(&nr_swap_pages); noswap: return (swp_entry_t) {0}; }

Contributors

PersonTokensPropCommitsCommitProp
dan streetmandan streetman14052.04%214.29%
shaohua lishaohua li5420.07%17.14%
hugh dickinshugh dickins3412.64%321.43%
pre-gitpre-git259.29%535.71%
rafael j. wysockirafael j. wysocki114.09%17.14%
linus torvaldslinus torvalds31.12%17.14%
kamezawa hiroyukikamezawa hiroyuki20.74%17.14%
Total269100.00%14100.00%

/* The only caller of this function is now suspend routine */
swp_entry_t get_swap_page_of_type(int type) { struct swap_info_struct *si; pgoff_t offset; si = swap_info[type]; spin_lock(&si->lock); if (si && (si->flags & SWP_WRITEOK)) { atomic_long_dec(