cregit-Linux how code gets into the kernel

Release 4.8 mm/page_io.c

Directory: mm
 *  linux/mm/page_io.c
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *  Swap reorganised 29.12.95, 
 *  Asynchronous swapping added 30.12.95. Stephen Tweedie
 *  Removed race in async swapping. 14.4.1996. Bruno Haible
 *  Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
 *  Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman

#include <linux/mm.h>
#include <linux/kernel_stat.h>
#include <linux/gfp.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <linux/bio.h>
#include <linux/swapops.h>
#include <linux/buffer_head.h>
#include <linux/writeback.h>
#include <linux/frontswap.h>
#include <linux/blkdev.h>
#include <linux/uio.h>
#include <asm/pgtable.h>

static struct bio *get_swap_bio(gfp_t gfp_flags, struct page *page, bio_end_io_t end_io) { struct bio *bio; bio = bio_alloc(gfp_flags, 1); if (bio) { bio->bi_iter.bi_sector = map_swap_page(page, &bio->bi_bdev); bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9; bio->bi_end_io = end_io; bio_add_page(bio, page, PAGE_SIZE, 0); BUG_ON(bio->bi_iter.bi_size != PAGE_SIZE); } return bio; }


andrew mortonandrew morton4546.88%215.38%
kent overstreetkent overstreet1919.79%215.38%
hugh dickinshugh dickins1010.42%17.69%
al viroal viro11.04%17.69%
linus torvaldslinus torvalds11.04%17.69%
lee schermerhornlee schermerhorn11.04%17.69%

void end_swap_bio_write(struct bio *bio) { struct page *page = bio->bi_io_vec[0].bv_page; if (bio->bi_error) { SetPageError(page); /* * We failed to write the page out to swap-space. * Re-dirty the page in order to avoid it being reclaimed. * Also print a dire warning that things will go BAD (tm) * very quickly. * * Also clear PG_reclaim to avoid rotate_reclaimable_page() */ set_page_dirty(page); pr_alert("Write-error on swap-device (%u:%u:%llu)\n", imajor(bio->bi_bdev->bd_inode), iminor(bio->bi_bdev->bd_inode), (unsigned long long)bio->bi_iter.bi_sector); ClearPageReclaim(page); } end_page_writeback(page); bio_put(bio); }


peter zijlstrapeter zijlstra4346.74%112.50%
andrew mortonandrew morton3639.13%112.50%
christoph hellwigchristoph hellwig33.26%112.50%
joe perchesjoe perches22.17%112.50%
kent overstreetkent overstreet22.17%112.50%
neil brownneil brown11.09%112.50%

static void swap_slot_free_notify(struct page *page) { struct swap_info_struct *sis; struct gendisk *disk; /* * There is no guarantee that the page is in swap cache - the software * suspend code (at least) uses end_swap_bio_read() against a non- * swapcache page. So we must check PG_swapcache before proceeding with * this optimization. */ if (unlikely(!PageSwapCache(page))) return; sis = page_swap_info(page); if (!(sis->flags & SWP_BLKDEV)) return; /* * The swap subsystem performs lazy swap slot freeing, * expecting that the page will be swapped out again. * So we can avoid an unnecessary write if the page * isn't redirtied. * This is good for real swap storage because we can * reduce unnecessary I/O and enhance wear-leveling * if an SSD is used as the as swap device. * But if in-memory swap device (eg zram) is used, * this causes a duplicated copy between uncompressed * data in VM-owned memory and compressed data in * zram-owned memory. So let's free zram-owned memory * and make the VM-owned decompressed page *dirty*, * so the page should be swapped out somewhere again if * we again wish to reclaim it. */ disk = sis->bdev->bd_disk; if (disk->fops->swap_slot_free_notify) { swp_entry_t entry; unsigned long offset; entry.val = page_private(page); offset = swp_offset(entry); SetPageDirty(page); disk->fops->swap_slot_free_notify(sis->bdev, offset); } }


minchan kimminchan kim10391.15%240.00%
andrew mortonandrew morton87.08%120.00%
christoph hellwigchristoph hellwig10.88%120.00%
neil brownneil brown10.88%120.00%

static void end_swap_bio_read(struct bio *bio) { struct page *page = bio->bi_io_vec[0].bv_page; if (bio->bi_error) { SetPageError(page); ClearPageUptodate(page); pr_alert("Read-error on swap-device (%u:%u:%llu)\n", imajor(bio->bi_bdev->bd_inode), iminor(bio->bi_bdev->bd_inode), (unsigned long long)bio->bi_iter.bi_sector); goto out; } SetPageUptodate(page); swap_slot_free_notify(page); out: unlock_page(page); bio_put(bio); }


minchan kimminchan kim9088.24%240.00%
andrew mortonandrew morton65.88%120.00%

int generic_swapfile_activate(struct swap_info_struct *sis, struct file *swap_file, sector_t *span) { struct address_space *mapping = swap_file->f_mapping; struct inode *inode = mapping->host; unsigned blocks_per_page; unsigned long page_no; unsigned blkbits; sector_t probe_block; sector_t last_block; sector_t lowest_block = -1; sector_t highest_block = 0; int nr_extents = 0; int ret; blkbits = inode->i_blkbits; blocks_per_page = PAGE_SIZE >> blkbits; /* * Map all the blocks into the extent list. This code doesn't try * to be very smart. */ probe_block = 0; page_no = 0; last_block = i_size_read(inode) >> blkbits; while ((probe_block + blocks_per_page) <= last_block && page_no < sis->max) { unsigned block_in_page; sector_t first_block; cond_resched(); first_block = bmap(inode, probe_block); if (first_block == 0) goto bad_bmap; /* * It must be PAGE_SIZE aligned on-disk */ if (first_block & (blocks_per_page - 1)) { probe_block++; goto reprobe; } for (block_in_page = 1; block_in_page < blocks_per_page; block_in_page++) { sector_t block; block = bmap(inode, probe_block + block_in_page); if (block == 0) goto bad_bmap; if (block != first_block + block_in_page) { /* Discontiguity */ probe_block++; goto reprobe; } } first_block >>= (PAGE_SHIFT - blkbits); if (page_no) { /* exclude the header page */ if (first_block < lowest_block) lowest_block = first_block; if (first_block > highest_block) highest_block = first_block; } /* * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks */ ret = add_swap_extent(sis, page_no, 1, first_block); if (ret < 0) goto out; nr_extents += ret; page_no++; probe_block += blocks_per_page; reprobe: continue; } ret = nr_extents; *span = 1 + highest_block - lowest_block; if (page_no == 0) page_no = 1; /* force Empty message */ sis->max = page_no; sis->pages = page_no - 1; sis->highest_bit = page_no - 1; out: return ret; bad_bmap: pr_err("swapon: swapfile has holes\n"); ret = -EINVAL; goto out; }


mel gormanmel gorman35598.89%133.33%
mikulas patockamikulas patocka30.84%133.33%
joe perchesjoe perches10.28%133.33%

/* * We may have stale swap cache pages in memory: notice * them here and get rid of the unnecessary final write. */
int swap_writepage(struct page *page, struct writeback_control *wbc) { int ret = 0; if (try_to_free_swap(page)) { unlock_page(page); goto out; } if (frontswap_store(page) == 0) { set_page_writeback(page); unlock_page(page); end_page_writeback(page); goto out; } ret = __swap_writepage(page, wbc, end_swap_bio_write); out: return ret; }


dan magenheimerdan magenheimer2834.15%19.09%
seth jenningsseth jennings1720.73%218.18%
andrew mortonandrew morton1720.73%218.18%
konrad rzeszutek wilkkonrad rzeszutek wilk11.22%19.09%
mel gormanmel gorman11.22%19.09%
hugh dickinshugh dickins11.22%19.09%

static sector_t swap_page_sector(struct page *page) { return (sector_t)__page_file_index(page) << (PAGE_SHIFT - 9); }


matthew wilcoxmatthew wilcox2596.15%150.00%
kirill a. shutemovkirill a. shutemov13.85%150.00%

int __swap_writepage(struct page *page, struct writeback_control *wbc, bio_end_io_t end_write_func) { struct bio *bio; int ret; struct swap_info_struct *sis = page_swap_info(page); BUG_ON(!PageSwapCache(page)); if (sis->flags & SWP_FILE) { struct kiocb kiocb; struct file *swap_file = sis->swap_file; struct address_space *mapping = swap_file->f_mapping; struct bio_vec bv = { .bv_page = page, .bv_len = PAGE_SIZE, .bv_offset = 0 }; struct iov_iter from; iov_iter_bvec(&from, ITER_BVEC | WRITE, &bv, 1, PAGE_SIZE); init_sync_kiocb(&kiocb, swap_file); kiocb.ki_pos = page_file_offset(page); set_page_writeback(page); unlock_page(page); ret = mapping->a_ops->direct_IO(&kiocb, &from); if (ret == PAGE_SIZE) { count_vm_event(PSWPOUT); ret = 0; } else { /* * In the case of swap-over-nfs, this can be a * temporary failure if the system has limited * memory for allocating transmit buffers. * Mark the page dirty and avoid * rotate_reclaimable_page but rate-limit the * messages but do not flag PageError like * the normal direct-to-bio case as it could * be temporary. */ set_page_dirty(page); ClearPageReclaim(page); pr_err_ratelimited("Write error on dio swapfile (%llu)\n", page_file_offset(page)); } end_page_writeback(page); return ret; } ret = bdev_write_page(sis->bdev, swap_page_sector(page), page, wbc); if (!ret) { count_vm_event(PSWPOUT); return 0; } ret = 0; bio = get_swap_bio(GFP_NOIO, page, end_write_func); if (bio == NULL) { set_page_dirty(page); unlock_page(page); ret = -ENOMEM; goto out; } if (wbc->sync_mode == WB_SYNC_ALL) bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC); else bio_set_op_attrs(bio, REQ_OP_WRITE, 0); count_vm_event(PSWPOUT); set_page_writeback(page); unlock_page(page); submit_bio(bio); out: return ret; }


mel gormanmel gorman12436.90%311.11%
andrew mortonandrew morton5917.56%725.93%
matthew wilcoxmatthew wilcox3711.01%13.70%
seth jenningsseth jennings3510.42%27.41%
al viroal viro329.52%414.81%
jens axboejens axboe164.76%13.70%
santosh shilimkarsantosh shilimkar92.68%13.70%
jerome marchandjerome marchand82.38%13.70%
christoph hellwigchristoph hellwig20.60%27.41%
christoph lameterchristoph lameter20.60%13.70%
michael christiemichael christie10.30%13.70%
joe perchesjoe perches10.30%13.70%

int swap_readpage(struct page *page) { struct bio *bio; int ret = 0; struct swap_info_struct *sis = page_swap_info(page); BUG_ON(!PageSwapCache(page)); VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(PageUptodate(page), page); if (frontswap_load(page) == 0) { SetPageUptodate(page); unlock_page(page); goto out; } if (sis->flags & SWP_FILE) { struct file *swap_file = sis->swap_file; struct address_space *mapping = swap_file->f_mapping; ret = mapping->a_ops->readpage(swap_file, page); if (!ret) count_vm_event(PSWPIN); return ret; } ret = bdev_read_page(sis->bdev, swap_page_sector(page), page); if (!ret) { if (trylock_page(page)) { swap_slot_free_notify(page); unlock_page(page); } count_vm_event(PSWPIN); return 0; } ret = 0; bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read); if (bio == NULL) { unlock_page(page); ret = -ENOMEM; goto out; } bio_set_op_attrs(bio, REQ_OP_READ, 0); count_vm_event(PSWPIN); submit_bio(bio); out: return ret; }


andrew mortonandrew morton7429.84%526.32%
mel gormanmel gorman6425.81%15.26%
matthew wilcoxmatthew wilcox3514.11%15.26%
dan magenheimerdan magenheimer239.27%15.26%
minchan kimminchan kim197.66%210.53%
santosh shilimkarsantosh shilimkar93.63%15.26%
michael christiemichael christie93.63%210.53%
sasha levinsasha levin62.42%15.26%
nick pigginnick piggin31.21%15.26%
christoph lameterchristoph lameter20.81%15.26%
konrad rzeszutek wilkkonrad rzeszutek wilk10.40%15.26%

int swap_set_page_dirty(struct page *page) { struct swap_info_struct *sis = page_swap_info(page); if (sis->flags & SWP_FILE) { struct address_space *mapping = sis->swap_file->f_mapping; BUG_ON(!PageSwapCache(page)); return mapping->a_ops->set_page_dirty(page); } else { return __set_page_dirty_no_writeback(page); } }


mel gormanmel gorman6086.96%150.00%
santosh shilimkarsantosh shilimkar913.04%150.00%

Overall Contributors

mel gormanmel gorman60738.81%46.45%
andrew mortonandrew morton25216.11%1016.13%
minchan kimminchan kim21413.68%34.84%
matthew wilcoxmatthew wilcox976.20%11.61%
dan magenheimerdan magenheimer543.45%11.61%
seth jenningsseth jennings523.32%23.23%
peter zijlstrapeter zijlstra432.75%11.61%
al viroal viro332.11%58.06%
santosh shilimkarsantosh shilimkar271.73%11.61%
kent overstreetkent overstreet231.47%34.84%
jens axboejens axboe161.02%11.61%
christoph hellwigchristoph hellwig120.77%69.68%
hugh dickinshugh dickins110.70%23.23%
michael christiemichael christie100.64%23.23%
jerome marchandjerome marchand80.51%11.61%
sasha levinsasha levin60.38%11.61%
joe perchesjoe perches40.26%11.61%
christoph lameterchristoph lameter40.26%11.61%
tejun heotejun heo30.19%11.61%
mikulas patockamikulas patocka30.19%11.61%
nick pigginnick piggin30.19%11.61%
neil brownneil brown20.13%11.61%
konrad rzeszutek wilkkonrad rzeszutek wilk20.13%11.61%
lee schermerhornlee schermerhorn10.06%11.61%
linus torvaldslinus torvalds10.06%11.61%
kirill a. shutemovkirill a. shutemov10.06%11.61%
Directory: mm
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.