cregit-Linux how code gets into the kernel

Release 4.17 mm/swapfile.c

Directory: mm
/*
 *  linux/mm/swapfile.c
 *
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *  Swap reorganised 29.12.95, Stephen Tweedie
 */

#include <linux/mm.h>
#include <linux/sched/mm.h>
#include <linux/sched/task.h>
#include <linux/hugetlb.h>
#include <linux/mman.h>
#include <linux/slab.h>
#include <linux/kernel_stat.h>
#include <linux/swap.h>
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
#include <linux/namei.h>
#include <linux/shmem_fs.h>
#include <linux/blkdev.h>
#include <linux/random.h>
#include <linux/writeback.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/security.h>
#include <linux/backing-dev.h>
#include <linux/mutex.h>
#include <linux/capability.h>
#include <linux/syscalls.h>
#include <linux/memcontrol.h>
#include <linux/poll.h>
#include <linux/oom.h>
#include <linux/frontswap.h>
#include <linux/swapfile.h>
#include <linux/export.h>
#include <linux/swap_slots.h>
#include <linux/sort.h>

#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <linux/swapops.h>
#include <linux/swap_cgroup.h>

static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
				 unsigned char);
static void free_swap_count_continuations(struct swap_info_struct *);
static sector_t map_swap_entry(swp_entry_t, struct block_device**);

DEFINE_SPINLOCK(swap_lock);

static unsigned int nr_swapfiles;

atomic_long_t nr_swap_pages;
/*
 * Some modules use swappable objects and may try to swap them out under
 * memory pressure (via the shrinker). Before doing so, they may wish to
 * check to see if any swap space is available.
 */
EXPORT_SYMBOL_GPL(nr_swap_pages);
/* protected with swap_lock. reading in vm_swap_full() doesn't need lock */

long total_swap_pages;

static int least_priority = -1;


static const char Bad_file[] = "Bad swap file entry ";

static const char Unused_file[] = "Unused swap file entry ";

static const char Bad_offset[] = "Bad swap offset entry ";

static const char Unused_offset[] = "Unused swap offset entry ";

/*
 * all active swap_info_structs
 * protected with swap_lock, and ordered by priority.
 */
PLIST_HEAD(swap_active_head);

/*
 * all available (active, not full) swap_info_structs
 * protected with swap_avail_lock, ordered by priority.
 * This is used by get_swap_page() instead of swap_active_head
 * because swap_active_head includes all swap_info_structs,
 * but get_swap_page() doesn't need to look at full ones.
 * This uses its own lock instead of swap_lock because when a
 * swap_info_struct changes between not-full/full, it needs to
 * add/remove itself to/from this list, but the swap_info_struct->lock
 * is held and the locking order requires swap_lock to be taken
 * before any swap_info_struct->lock.
 */

static struct plist_head *swap_avail_heads;
static DEFINE_SPINLOCK(swap_avail_lock);


struct swap_info_struct *swap_info[MAX_SWAPFILES];

static DEFINE_MUTEX(swapon_mutex);

static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
/* Activity counter to indicate that a swapon or swapoff has occurred */

static atomic_t proc_poll_event = ATOMIC_INIT(0);


atomic_t nr_rotate_swap = ATOMIC_INIT(0);


static inline unsigned char swap_count(unsigned char ent) { return ent & ~SWAP_HAS_CACHE; /* may include SWAP_HAS_CONT flag */ }

Contributors

PersonTokensPropCommitsCommitProp
Kamezawa Hiroyuki1473.68%125.00%
Hugh Dickins526.32%375.00%
Total19100.00%4100.00%

/* returns 1 if swap entry is freed */
static int __try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset) { swp_entry_t entry = swp_entry(si->type, offset); struct page *page; int ret = 0; page = find_get_page(swap_address_space(entry), swp_offset(entry)); if (!page) return 0; /* * This function is called from scan_swap_map() and it's called * by vmscan.c at reclaiming pages. So, we hold a lock on a page, here. * We have to use trylock for avoiding deadlock. This is a special * case and you should use try_to_free_swap() with explicit lock_page() * in usual operations. */ if (trylock_page(page)) { ret = try_to_free_swap(page); unlock_page(page); } put_page(page); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Kamezawa Hiroyuki8088.89%120.00%
Shaohua Li44.44%120.00%
Huang Ying33.33%120.00%
Hugh Dickins22.22%120.00%
Kirill A. Shutemov11.11%120.00%
Total90100.00%5100.00%

/* * swapon tell device that all the old swap contents can be discarded, * to allow the swap device to optimize its wear-levelling. */
static int discard_swap(struct swap_info_struct *si) { struct swap_extent *se; sector_t start_block; sector_t nr_blocks; int err = 0; /* Do not discard the swap header page! */ se = &si->first_swap_extent; start_block = (se->start_block + 1) << (PAGE_SHIFT - 9); nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9); if (nr_blocks) { err = blkdev_issue_discard(si->bdev, start_block, nr_blocks, GFP_KERNEL, 0); if (err) return err; cond_resched(); } list_for_each_entry(se, &si->first_swap_extent.list, list) { start_block = se->start_block << (PAGE_SHIFT - 9); nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); err = blkdev_issue_discard(si->bdev, start_block, nr_blocks, GFP_KERNEL, 0); if (err) break; cond_resched(); } return err; /* That will often be -EOPNOTSUPP */ }

Contributors

PersonTokensPropCommitsCommitProp
Hugh Dickins16698.22%360.00%
Christoph Hellwig31.78%240.00%
Total169100.00%5100.00%

/* * swap allocation tell device that a cluster of swap can now be discarded, * to allow the swap device to optimize its wear-levelling. */
static void discard_swap_cluster(struct swap_info_struct *si, pgoff_t start_page, pgoff_t nr_pages) { struct swap_extent *se = si->curr_swap_extent; int found_extent = 0; while (nr_pages) { if (se->start_page <= start_page && start_page < se->start_page + se->nr_pages) { pgoff_t offset = start_page - se->start_page; sector_t start_block = se->start_block + offset; sector_t nr_blocks = se->nr_pages - offset; if (nr_blocks > nr_pages) nr_blocks = nr_pages; start_page += nr_blocks; nr_pages -= nr_blocks; if (!found_extent++) si->curr_swap_extent = se; start_block <<= PAGE_SHIFT - 9; nr_blocks <<= PAGE_SHIFT - 9; if (blkdev_issue_discard(si->bdev, start_block, nr_blocks, GFP_NOIO, 0)) break; } se = list_next_entry(se, list); } }

Contributors

PersonTokensPropCommitsCommitProp
Hugh Dickins14997.39%240.00%
Geliang Tang21.31%120.00%
Christoph Hellwig21.31%240.00%
Total153100.00%5100.00%

#ifdef CONFIG_THP_SWAP #define SWAPFILE_CLUSTER HPAGE_PMD_NR #else #define SWAPFILE_CLUSTER 256 #endif #define LATENCY_LIMIT 256
static inline void cluster_set_flag(struct swap_cluster_info *info, unsigned int flag) { info->flags = flag; }

Contributors

PersonTokensPropCommitsCommitProp
Shaohua Li22100.00%1100.00%
Total22100.00%1100.00%


static inline unsigned int cluster_count(struct swap_cluster_info *info) { return info->data; }

Contributors

PersonTokensPropCommitsCommitProp
Shaohua Li18100.00%1100.00%
Total18100.00%1100.00%


static inline void cluster_set_count(struct swap_cluster_info *info, unsigned int c) { info->data = c; }

Contributors

PersonTokensPropCommitsCommitProp
Shaohua Li22100.00%1100.00%
Total22100.00%1100.00%


static inline void cluster_set_count_flag(struct swap_cluster_info *info, unsigned int c, unsigned int f) { info->flags = f; info->data = c; }

Contributors

PersonTokensPropCommitsCommitProp
Shaohua Li32100.00%1100.00%
Total32100.00%1100.00%


static inline unsigned int cluster_next(struct swap_cluster_info *info) { return info->data; }

Contributors

PersonTokensPropCommitsCommitProp
Shaohua Li18100.00%1100.00%
Total18100.00%1100.00%


static inline void cluster_set_next(struct swap_cluster_info *info, unsigned int n) { info->data = n; }

Contributors

PersonTokensPropCommitsCommitProp
Shaohua Li22100.00%1100.00%
Total22100.00%1100.00%


static inline void cluster_set_next_flag(struct swap_cluster_info *info, unsigned int n, unsigned int f) { info->flags = f; info->data = n; }

Contributors

PersonTokensPropCommitsCommitProp
Shaohua Li32100.00%1100.00%
Total32100.00%1100.00%


static inline bool cluster_is_free(struct swap_cluster_info *info) { return info->flags & CLUSTER_FLAG_FREE; }

Contributors

PersonTokensPropCommitsCommitProp
Shaohua Li19100.00%1100.00%
Total19100.00%1100.00%


static inline bool cluster_is_null(struct swap_cluster_info *info) { return info->flags & CLUSTER_FLAG_NEXT_NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Shaohua Li19100.00%1100.00%
Total19100.00%1100.00%


static inline void cluster_set_null(struct swap_cluster_info *info) { info->flags = CLUSTER_FLAG_NEXT_NULL; info->data = 0; }

Contributors

PersonTokensPropCommitsCommitProp
Shaohua Li24100.00%1100.00%
Total24100.00%1100.00%


static inline bool cluster_is_huge(struct swap_cluster_info *info) { return info->flags & CLUSTER_FLAG_HUGE; }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying19100.00%1100.00%
Total19100.00%1100.00%


static inline void cluster_clear_huge(struct swap_cluster_info *info) { info->flags &= ~CLUSTER_FLAG_HUGE; }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying19100.00%1100.00%
Total19100.00%1100.00%


static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si, unsigned long offset) { struct swap_cluster_info *ci; ci = si->cluster_info; if (ci) { ci += offset / SWAPFILE_CLUSTER; spin_lock(&ci->lock); } return ci; }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying52100.00%1100.00%
Total52100.00%1100.00%


static inline void unlock_cluster(struct swap_cluster_info *ci) { if (ci) spin_unlock(&ci->lock); }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying24100.00%1100.00%
Total24100.00%1100.00%


static inline struct swap_cluster_info *lock_cluster_or_swap_info( struct swap_info_struct *si, unsigned long offset) { struct swap_cluster_info *ci; ci = lock_cluster(si, offset); if (!ci) spin_lock(&si->lock); return ci; }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying48100.00%1100.00%
Total48100.00%1100.00%


static inline void unlock_cluster_or_swap_info(struct swap_info_struct *si, struct swap_cluster_info *ci) { if (ci) unlock_cluster(ci); else spin_unlock(&si->lock); }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying35100.00%1100.00%
Total35100.00%1100.00%


static inline bool cluster_list_empty(struct swap_cluster_list *list) { return cluster_is_null(&list->head); }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying1780.95%150.00%
Shaohua Li419.05%150.00%
Total21100.00%2100.00%


static inline unsigned int cluster_list_first(struct swap_cluster_list *list) { return cluster_next(&list->head); }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying1672.73%150.00%
Shaohua Li627.27%150.00%
Total22100.00%2100.00%


static void cluster_list_init(struct swap_cluster_list *list) { cluster_set_null(&list->head); cluster_set_null(&list->tail); }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying2281.48%150.00%
Shaohua Li518.52%150.00%
Total27100.00%2100.00%


static void cluster_list_add_tail(struct swap_cluster_list *list, struct swap_cluster_info *ci, unsigned int idx) { if (cluster_list_empty(list)) { cluster_set_next_flag(&list->head, idx, 0); cluster_set_next_flag(&list->tail, idx, 0); } else { struct swap_cluster_info *ci_tail; unsigned int tail = cluster_next(&list->tail); /* * Nested cluster lock, but both cluster locks are * only acquired when we held swap_info_struct->lock */ ci_tail = ci + tail; spin_lock_nested(&ci_tail->lock, SINGLE_DEPTH_NESTING); cluster_set_next(ci_tail, idx); spin_unlock(&ci_tail->lock); cluster_set_next_flag(&list->tail, idx, 0); } }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying6454.70%375.00%
Shaohua Li5345.30%125.00%
Total117100.00%4100.00%


static unsigned int cluster_list_del_first(struct swap_cluster_list *list, struct swap_cluster_info *ci) { unsigned int idx; idx = cluster_next(&list->head); if (cluster_next(&list->tail) == idx) { cluster_set_null(&list->head); cluster_set_null(&list->tail); } else cluster_set_next_flag(&list->head, cluster_next(&ci[idx]), 0); return idx; }

Contributors

PersonTokensPropCommitsCommitProp
Shaohua Li4250.00%150.00%
Huang Ying4250.00%150.00%
Total84100.00%2100.00%

/* Add a cluster to discard list and schedule it to do discard */
static void swap_cluster_schedule_discard(struct swap_info_struct *si, unsigned int idx) { /* * If scan_swap_map() can't find a free cluster, it will check * si->swap_map directly. To make sure the discarding cluster isn't * taken by scan_swap_map(), mark the swap entries bad (occupied). It * will be cleared after discard */ memset(si->swap_map + idx * SWAPFILE_CLUSTER, SWAP_MAP_BAD, SWAPFILE_CLUSTER); cluster_list_add_tail(&si->discard_clusters, si->cluster_info, idx); schedule_work(&si->discard_work); }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying53100.00%1100.00%
Total53100.00%1100.00%


static void __free_cluster(struct swap_info_struct *si, unsigned long idx) { struct swap_cluster_info *ci = si->cluster_info; cluster_set_flag(ci + idx, CLUSTER_FLAG_FREE); cluster_list_add_tail(&si->free_clusters, ci, idx); }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying45100.00%1100.00%
Total45100.00%1100.00%

/* * Doing discard actually. After a cluster discard is finished, the cluster * will be added to free cluster list. caller should hold si->lock. */
static void swap_do_scheduled_discard(struct swap_info_struct *si) { struct swap_cluster_info *info, *ci; unsigned int idx; info = si->cluster_info; while (!cluster_list_empty(&si->discard_clusters)) { idx = cluster_list_del_first(&si->discard_clusters, info); spin_unlock(&si->lock); discard_swap_cluster(si, idx * SWAPFILE_CLUSTER, SWAPFILE_CLUSTER); spin_lock(&si->lock); ci = lock_cluster(si, idx * SWAPFILE_CLUSTER); __free_cluster(si, idx); memset(si->swap_map + idx * SWAPFILE_CLUSTER, 0, SWAPFILE_CLUSTER); unlock_cluster(ci); } }

Contributors

PersonTokensPropCommitsCommitProp
Shaohua Li6050.42%125.00%
Huang Ying5949.58%375.00%
Total119100.00%4100.00%


static void swap_discard_work(struct work_struct *work) { struct swap_info_struct *si; si = container_of(work, struct swap_info_struct, discard_work); spin_lock(&si->lock); swap_do_scheduled_discard(si); spin_unlock(&si->lock); }

Contributors

PersonTokensPropCommitsCommitProp
Shaohua Li49100.00%1100.00%
Total49100.00%1100.00%


static void alloc_cluster(struct swap_info_struct *si, unsigned long idx) { struct swap_cluster_info *ci = si->cluster_info; VM_BUG_ON(cluster_list_first(&si->free_clusters) != idx); cluster_list_del_first(&si->free_clusters, ci); cluster_set_count_flag(ci + idx, 0, 0); }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying58100.00%1100.00%
Total58100.00%1100.00%


static void free_cluster(struct swap_info_struct *si, unsigned long idx) { struct swap_cluster_info *ci = si->cluster_info + idx; VM_BUG_ON(cluster_count(ci) != 0); /* * If the swap is discardable, prepare discard the cluster * instead of free it immediately. The cluster will be freed * after discard. */ if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) == (SWP_WRITEOK | SWP_PAGE_DISCARD)) { swap_cluster_schedule_discard(si, idx); return; } __free_cluster(si, idx); }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying74100.00%1100.00%
Total74100.00%1100.00%

/* * The cluster corresponding to page_nr will be used. The cluster will be * removed from free cluster list and its usage counter will be increased. */
static void inc_cluster_info_page(struct swap_info_struct *p, struct swap_cluster_info *cluster_info, unsigned long page_nr) { unsigned long idx = page_nr / SWAPFILE_CLUSTER; if (!cluster_info) return; if (cluster_is_free(&cluster_info[idx])) alloc_cluster(p, idx); VM_BUG_ON(cluster_count(&cluster_info[idx]) >= SWAPFILE_CLUSTER); cluster_set_count(&cluster_info[idx], cluster_count(&cluster_info[idx]) + 1); }

Contributors

PersonTokensPropCommitsCommitProp
Shaohua Li8598.84%150.00%
Huang Ying11.16%150.00%
Total86100.00%2100.00%

/* * The cluster corresponding to page_nr decreases one usage. If the usage * counter becomes 0, which means no page in the cluster is in using, we can * optionally discard the cluster and add it to free cluster list. */
static void dec_cluster_info_page(struct swap_info_struct *p, struct swap_cluster_info *cluster_info, unsigned long page_nr) { unsigned long idx = page_nr / SWAPFILE_CLUSTER; if (!cluster_info) return; VM_BUG_ON(cluster_count(&cluster_info[idx]) == 0); cluster_set_count(&cluster_info[idx], cluster_count(&cluster_info[idx]) - 1); if (cluster_count(&cluster_info[idx]) == 0) free_cluster(p, idx); }

Contributors

PersonTokensPropCommitsCommitProp
Shaohua Li8798.86%150.00%
Huang Ying11.14%150.00%
Total88100.00%2100.00%

/* * It's possible scan_swap_map() uses a free cluster in the middle of free * cluster list. Avoiding such abuse to avoid list corruption. */
static bool scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si, unsigned long offset) { struct percpu_cluster *percpu_cluster; bool conflict; offset /= SWAPFILE_CLUSTER; conflict = !cluster_list_empty(&si->free_clusters) && offset != cluster_list_first(&si->free_clusters) && cluster_is_free(&si->cluster_info[offset]); if (!conflict) return false; percpu_cluster = this_cpu_ptr(si->percpu_cluster); cluster_set_null(&percpu_cluster->index); return true; }

Contributors

PersonTokensPropCommitsCommitProp
Shaohua Li8395.40%266.67%
Huang Ying44.60%133.33%
Total87100.00%3100.00%

/* * Try to get a swap entry from current cpu's swap entry pool (a cluster). This * might involve allocating a new cluster for current CPU too. */
static bool scan_swap_map_try_ssd_cluster(struct swap_info_struct *si, unsigned long *offset, unsigned long *scan_base) { struct percpu_cluster *cluster; struct swap_cluster_info *ci; bool found_free; unsigned long tmp, max; new_cluster: cluster = this_cpu_ptr(si->percpu_cluster); if (cluster_is_null(&cluster->index)) { if (!cluster_list_empty(&si->free_clusters)) { cluster->index = si->free_clusters.head; cluster->next = cluster_next(&cluster->index) * SWAPFILE_CLUSTER; } else if (!cluster_list_empty(&si->discard_clusters)) { /* * we don't have free cluster but have some clusters in * discarding, do discard now and reclaim them */ swap_do_scheduled_discard(si); *scan_base = *offset = si->cluster_next; goto new_cluster; } else return false; } found_free = false; /* * Other CPUs can use our cluster if they can't find a free cluster, * check if there is still free entry in the cluster */ tmp = cluster->next; max = min_t(unsigned long, si->max, (cluster_next(&cluster->index) + 1) * SWAPFILE_CLUSTER); if (tmp >= max) { cluster_set_null(&cluster->index); goto new_cluster; } ci = lock_cluster(si, tmp); while (tmp < max) { if (!si->swap_map[tmp]) { found_free = true; break; } tmp++; } unlock_cluster(ci); if (!found_free) { cluster_set_null(&cluster->index); goto new_cluster; } cluster->next = tmp + 1; *offset = tmp; *scan_base = tmp; return found_free; }

Contributors

PersonTokensPropCommitsCommitProp
Shaohua Li16161.69%320.00%
Huang Ying5621.46%213.33%
Hugh Dickins186.90%533.33%
Linus Torvalds (pre-git)186.90%320.00%
Tim Chen72.68%16.67%
Kamezawa Hiroyuki10.38%16.67%
Total261100.00%15100.00%


static void __del_from_avail_list(struct swap_info_struct *p) { int nid; for_each_node(nid) plist_del(&p->avail_lists[nid], &swap_avail_heads[nid]); }

Contributors

PersonTokensPropCommitsCommitProp
Aaron Lu35100.00%1100.00%
Total35100.00%1100.00%


static void del_from_avail_list(struct swap_info_struct *p) { spin_lock(&swap_avail_lock); __del_from_avail_list(p); spin_unlock(&swap_avail_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Aaron Lu28100.00%1100.00%
Total28100.00%1100.00%


static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset, unsigned int nr_entries) { unsigned int end = offset + nr_entries - 1; if (offset == si->lowest_bit) si->lowest_bit += nr_entries; if (end == si->highest_bit) si->highest_bit -= nr_entries; si->inuse_pages += nr_entries; if (si->inuse_pages == si->pages) { si->lowest_bit = si->max; si->highest_bit = 0; del_from_avail_list(si); } }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying3739.36%112.50%
Shaohua Li3436.17%337.50%
Aaron Lu77.45%112.50%
Linus Torvalds (pre-git)77.45%112.50%
Tim Chen77.45%112.50%
Hugh Dickins22.13%112.50%
Total94100.00%8100.00%


static void add_to_avail_list(struct swap_info_struct *p) { int nid; spin_lock(&swap_avail_lock); for_each_node(nid) { WARN_ON(!plist_node_empty(&p->avail_lists[nid])); plist_add(&p->avail_lists[nid], &swap_avail_heads[nid]); } spin_unlock(&swap_avail_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Aaron Lu4468.75%120.00%
Huang Ying1320.31%120.00%
Hugh Dickins710.94%360.00%
Total64100.00%5100.00%


static void swap_range_free(struct swap_info_struct *si, unsigned long offset, unsigned int nr_entries) { unsigned long end = offset + nr_entries - 1; void (*swap_slot_free_notify)(struct block_device *, unsigned long); if (offset < si->lowest_bit) si->lowest_bit = offset; if (end > si->highest_bit) { bool was_full = !si->highest_bit; si->highest_bit = end; if (was_full && (si->flags & SWP_WRITEOK)) add_to_avail_list(si); } atomic_long_add(nr_entries, &nr_swap_pages); si->inuse_pages -= nr_entries; if (si->flags & SWP_BLKDEV) swap_slot_free_notify = si->bdev->bd_disk->fops->swap_slot_free_notify; else swap_slot_free_notify = NULL; while (offset <= end) { frontswap_invalidate_page(si->type, offset); if (swap_slot_free_notify) swap_slot_free_notify(si->bdev, offset); offset++; } }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying16999.41%150.00%
Aaron Lu10.59%150.00%
Total170100.00%2100.00%


static int scan_swap_map_slots(struct swap_info_struct *si, unsigned char usage, int nr, swp_entry_t slots[]) { struct swap_cluster_info *ci; unsigned long offset; unsigned long scan_base; unsigned long last_in_cluster = 0; int latency_ration = LATENCY_LIMIT; int n_ret = 0; if (nr > SWAP_BATCH) nr = SWAP_BATCH; /* * We try to cluster swap pages by allocating them sequentially * in swap. Once we've allocated SWAPFILE_CLUSTER pages this * way, however, we resort to first-free allocation, starting * a new cluster. This prevents us from scattering swap pages * all over the entire swap partition, so that we reduce * overall disk seek times between swap pages. -- sct * But we do now try to find an empty cluster. -Andrea * And we let swap pages go all over an SSD partition. Hugh */ si->flags += SWP_SCANNING; scan_base = offset = si->cluster_next; /* SSD algorithm */ if (si