Release 4.15 kernel/power/swap.c
/*
* linux/kernel/power/swap.c
*
* This file provides functions for reading the suspend image from
* and writing it to a swap partition.
*
* Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
* Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
* Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com>
*
* This file is released under the GPLv2.
*
*/
#define pr_fmt(fmt) "PM: " fmt
#include <linux/module.h>
#include <linux/file.h>
#include <linux/delay.h>
#include <linux/bitops.h>
#include <linux/genhd.h>
#include <linux/device.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/lzo.h>
#include <linux/vmalloc.h>
#include <linux/cpumask.h>
#include <linux/atomic.h>
#include <linux/kthread.h>
#include <linux/crc32.h>
#include <linux/ktime.h>
#include "power.h"
#define HIBERNATE_SIG "S1SUSPEND"
/*
* When reading an {un,}compressed image, we may restore pages in place,
* in which case some architectures need these pages cleaning before they
* can be executed. We don't know which pages these may be, so clean the lot.
*/
static bool clean_pages_on_read;
static bool clean_pages_on_decompress;
/*
* The swap map is a data structure used for keeping track of each page
* written to a swap partition. It consists of many swap_map_page
* structures that contain each an array of MAP_PAGE_ENTRIES swap entries.
* These structures are stored on the swap and linked together with the
* help of the .next_swap member.
*
* The swap map is created during suspend. The swap map pages are
* allocated and populated one at a time, so we only need one memory
* page to set up the entire structure.
*
* During resume we pick up all swap_map_page structures into a list.
*/
#define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1)
/*
* Number of free pages that are not high.
*/
static inline unsigned long low_free_pages(void)
{
return nr_free_pages() - nr_free_highpages();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Bojan Smojver | 17 | 100.00% | 1 | 100.00% |
Total | 17 | 100.00% | 1 | 100.00% |
/*
* Number of pages required to be kept free while writing the image. Always
* half of all available low pages before the writing starts.
*/
static inline unsigned long reqd_free_pages(void)
{
return low_free_pages() / 2;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Bojan Smojver | 16 | 100.00% | 1 | 100.00% |
Total | 16 | 100.00% | 1 | 100.00% |
struct swap_map_page {
sector_t entries[MAP_PAGE_ENTRIES];
sector_t next_swap;
};
struct swap_map_page_list {
struct swap_map_page *map;
struct swap_map_page_list *next;
};
/**
* The swap_map_handle structure is used for handling swap in
* a file-alike way
*/
struct swap_map_handle {
struct swap_map_page *cur;
struct swap_map_page_list *maps;
sector_t cur_swap;
sector_t first_sector;
unsigned int k;
unsigned long reqd_free_pages;
u32 crc32;
};
struct swsusp_header {
char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
sizeof(u32)];
u32 crc32;
sector_t image;
unsigned int flags; /* Flags to pass to the "boot" kernel */
char orig_sig[10];
char sig[10];
} __packed;
static struct swsusp_header *swsusp_header;
/**
* The following functions are used for tracing the allocated
* swap pages, so that they can be freed in case of an error.
*/
struct swsusp_extent {
struct rb_node node;
unsigned long start;
unsigned long end;
};
static struct rb_root swsusp_extents = RB_ROOT;
static int swsusp_extents_insert(unsigned long swap_offset)
{
struct rb_node **new = &(swsusp_extents.rb_node);
struct rb_node *parent = NULL;
struct swsusp_extent *ext;
/* Figure out where to put the new node */
while (*new) {
ext = rb_entry(*new, struct swsusp_extent, node);
parent = *new;
if (swap_offset < ext->start) {
/* Try to merge */
if (swap_offset == ext->start - 1) {
ext->start--;
return 0;
}
new = &((*new)->rb_left);
} else if (swap_offset > ext->end) {
/* Try to merge */
if (swap_offset == ext->end + 1) {
ext->end++;
return 0;
}
new = &((*new)->rb_right);
} else {
/* It already is in the tree */
return -EINVAL;
}
}
/* Add the new node and rebalance the tree. */
ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
if (!ext)
return -ENOMEM;
ext->start = swap_offset;
ext->end = swap_offset;
rb_link_node(&ext->node, parent, new);
rb_insert_color(&ext->node, &swsusp_extents);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nigel Cunningham | 216 | 99.54% | 1 | 50.00% |
Davidlohr Bueso A | 1 | 0.46% | 1 | 50.00% |
Total | 217 | 100.00% | 2 | 100.00% |
/**
* alloc_swapdev_block - allocate a swap page and register that it has
* been allocated, so that it can be freed in case of an error.
*/
sector_t alloc_swapdev_block(int swap)
{
unsigned long offset;
offset = swp_offset(get_swap_page_of_type(swap));
if (offset) {
if (swsusp_extents_insert(offset))
swap_free(swp_entry(swap, offset));
else
return swapdev_block(swap, offset);
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nigel Cunningham | 55 | 96.49% | 1 | 50.00% |
Hugh Dickins | 2 | 3.51% | 1 | 50.00% |
Total | 57 | 100.00% | 2 | 100.00% |
/**
* free_all_swap_pages - free swap pages allocated for saving image data.
* It also frees the extents used to register which swap entries had been
* allocated.
*/
void free_all_swap_pages(int swap)
{
struct rb_node *node;
while ((node = swsusp_extents.rb_node)) {
struct swsusp_extent *ext;
unsigned long offset;
ext = rb_entry(node, struct swsusp_extent, node);
rb_erase(node, &swsusp_extents);
for (offset = ext->start; offset <= ext->end; offset++)
swap_free(swp_entry(swap, offset));
kfree(ext);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nigel Cunningham | 84 | 97.67% | 1 | 33.33% |
Hugh Dickins | 1 | 1.16% | 1 | 33.33% |
Geliang Tang | 1 | 1.16% | 1 | 33.33% |
Total | 86 | 100.00% | 3 | 100.00% |
int swsusp_swap_in_use(void)
{
return (swsusp_extents.rb_node != NULL);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Nigel Cunningham | 16 | 100.00% | 1 | 100.00% |
Total | 16 | 100.00% | 1 | 100.00% |
/*
* General things
*/
static unsigned short root_swap = 0xffff;
static struct block_device *hib_resume_bdev;
struct hib_bio_batch {
atomic_t count;
wait_queue_head_t wait;
blk_status_t error;
};
static void hib_init_batch(struct hib_bio_batch *hb)
{
atomic_set(&hb->count, 0);
init_waitqueue_head(&hb->wait);
hb->error = BLK_STS_OK;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 35 | 100.00% | 2 | 100.00% |
Total | 35 | 100.00% | 2 | 100.00% |
static void hib_end_io(struct bio *bio)
{
struct hib_bio_batch *hb = bio->bi_private;
struct page *page = bio->bi_io_vec[0].bv_page;
if (bio->bi_status) {
pr_alert("Read-error on swap-device (%u:%u:%Lu)\n",
MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
(unsigned long long)bio->bi_iter.bi_sector);
}
if (bio_data_dir(bio) == WRITE)
put_page(page);
else if (clean_pages_on_read)
flush_icache_range((unsigned long)page_address(page),
(unsigned long)page_address(page) + PAGE_SIZE);
if (bio->bi_status && !hb->error)
hb->error = bio->bi_status;
if (atomic_dec_and_test(&hb->count))
wake_up(&hb->wait);
bio_put(bio);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 129 | 81.65% | 4 | 66.67% |
James Morse | 28 | 17.72% | 1 | 16.67% |
Joe Perches | 1 | 0.63% | 1 | 16.67% |
Total | 158 | 100.00% | 6 | 100.00% |
static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr,
struct hib_bio_batch *hb)
{
struct page *page = virt_to_page(addr);
struct bio *bio;
int error = 0;
bio = bio_alloc(__GFP_RECLAIM | __GFP_HIGH, 1);
bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
bio_set_dev(bio, hib_resume_bdev);
bio_set_op_attrs(bio, op, op_flags);
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
pr_err("Adding page to bio failed at %llu\n",
(unsigned long long)bio->bi_iter.bi_sector);
bio_put(bio);
return -EFAULT;
}
if (hb) {
bio->bi_end_io = hib_end_io;
bio->bi_private = hb;
atomic_inc(&hb->count);
submit_bio(bio);
} else {
error = submit_bio_wait(bio);
bio_put(bio);
}
return error;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 160 | 90.91% | 2 | 33.33% |
Michael Christie | 13 | 7.39% | 2 | 33.33% |
Joe Perches | 2 | 1.14% | 1 | 16.67% |
Mel Gorman | 1 | 0.57% | 1 | 16.67% |
Total | 176 | 100.00% | 6 | 100.00% |
static blk_status_t hib_wait_io(struct hib_bio_batch *hb)
{
wait_event(hb->wait, atomic_read(&hb->count) == 0);
return blk_status_to_errno(hb->error);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 36 | 100.00% | 2 | 100.00% |
Total | 36 | 100.00% | 2 | 100.00% |
/*
* Saving part
*/
static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
{
int error;
hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block,
swsusp_header, NULL);
if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
!memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
swsusp_header->image = handle->first_sector;
swsusp_header->flags = flags;
if (flags & SF_CRC32_MODE)
swsusp_header->crc32 = handle->crc32;
error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
swsusp_resume_block, swsusp_header, NULL);
} else {
pr_err("Swap header not found!\n");
error = -ENODEV;
}
return error;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rafael J. Wysocki | 99 | 69.72% | 4 | 33.33% |
Bojan Smojver | 14 | 9.86% | 1 | 8.33% |
Jiri Slaby | 7 | 4.93% | 1 | 8.33% |
Vivek Goyal | 6 | 4.23% | 1 | 8.33% |
Christoph Hellwig | 6 | 4.23% | 2 | 16.67% |
Michael Christie | 4 | 2.82% | 1 | 8.33% |
Andrew Morton | 4 | 2.82% | 1 | 8.33% |
Joe Perches | 2 | 1.41% | 1 | 8.33% |
Total | 142 | 100.00% | 12 | 100.00% |
/**
* swsusp_swap_check - check if the resume device is a swap device
* and get its index (if so)
*
* This is called before saving image
*/
static int swsusp_swap_check(void)
{
int res;
res = swap_type_of(swsusp_resume_device, swsusp_resume_block,
&hib_resume_bdev);
if (res < 0)
return res;
root_swap = res;
res = blkdev_get(hib_resume_bdev, FMODE_WRITE, NULL);
if (res)
return res;
res = set_blocksize(hib_resume_bdev, PAGE_SIZE);
if (res < 0)
blkdev_put(hib_resume_bdev, FMODE_WRITE);
/*
* Update the resume device to the one actually used,
* so the test_resume mode can use it in case it is
* invoked from hibernate() to test the snapshot.
*/
swsusp_resume_device = hib_resume_bdev->bd_dev;
return res;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rafael J. Wysocki | 71 | 82.56% | 5 | 55.56% |
Chen Yu | 7 | 8.14% | 1 | 11.11% |
Jiri Slaby | 4 | 4.65% | 1 | 11.11% |
Al Viro | 2 | 2.33% | 1 | 11.11% |
Tejun Heo | 2 | 2.33% | 1 | 11.11% |
Total | 86 | 100.00% | 9 | 100.00% |
/**
* write_page - Write one page to given swap location.
* @buf: Address we're writing.
* @offset: Offset of the swap page we're writing to.
* @hb: bio completion batch
*/
static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
{
void *src;
int ret;
if (!offset)
return -ENOSPC;
if (hb) {
src = (void *)__get_free_page(__GFP_RECLAIM | __GFP_NOWARN |
__GFP_NORETRY);
if (src) {
copy_page(src, buf);
} else {
ret = hib_wait_io(hb); /* Free pages */
if (ret)
return ret;
src = (void *)__get_free_page(__GFP_RECLAIM |
__GFP_NOWARN |
__GFP_NORETRY);
if (src) {
copy_page(src, buf);
} else {
WARN_ON_ONCE(1);
hb = NULL; /* Go synchronous */
src = buf;
}
}
} else {
src = buf;
}
return hib_submit_io(REQ_OP_WRITE, REQ_SYNC, offset, src, hb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rafael J. Wysocki | 56 | 36.84% | 3 | 27.27% |
Bojan Smojver | 49 | 32.24% | 2 | 18.18% |
Andrew Morton | 32 | 21.05% | 1 | 9.09% |
Christoph Hellwig | 10 | 6.58% | 2 | 18.18% |
Michael Christie | 2 | 1.32% | 1 | 9.09% |
Mel Gorman | 2 | 1.32% | 1 | 9.09% |
Jan Beulich | 1 | 0.66% | 1 | 9.09% |
Total | 152 | 100.00% | 11 | 100.00% |
static void release_swap_writer(struct swap_map_handle *handle)
{
if (handle->cur)
free_page((unsigned long)handle->cur);
handle->cur = NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rafael J. Wysocki | 27 | 79.41% | 2 | 66.67% |
Andrew Morton | 7 | 20.59% | 1 | 33.33% |
Total | 34 | 100.00% | 3 | 100.00% |
static int get_swap_writer(struct swap_map_handle *handle)
{
int ret;
ret = swsusp_swap_check();
if (ret) {
if (ret != -ENOSPC)
pr_err("Cannot find swap device, try swapon -a\n");
return ret;
}
handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
if (!handle->cur) {
ret = -ENOMEM;
goto err_close;
}
handle->cur_swap = alloc_swapdev_block(root_swap);
if (!handle->cur_swap) {
ret = -ENOSPC;
goto err_rel;
}
handle->k = 0;
handle->reqd_free_pages = reqd_free_pages();
handle->first_sector = handle->cur_swap;
return 0;
err_rel:
release_swap_writer(handle);
err_close:
swsusp_close(FMODE_WRITE);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Slaby | 66 | 47.83% | 2 | 22.22% |
Rafael J. Wysocki | 53 | 38.41% | 3 | 33.33% |
Andrew Morton | 10 | 7.25% | 1 | 11.11% |
Bojan Smojver | 7 | 5.07% | 2 | 22.22% |
Joe Perches | 2 | 1.45% | 1 | 11.11% |
Total | 138 | 100.00% | 9 | 100.00% |
static int swap_write_page(struct swap_map_handle *handle, void *buf,
struct hib_bio_batch *hb)
{
int error = 0;
sector_t offset;
if (!handle->cur)
return -EINVAL;
offset = alloc_swapdev_block(root_swap);
error = write_page(buf, offset, hb);
if (error)
return error;
handle->cur->entries[handle->k++] = offset;
if (handle->k >= MAP_PAGE_ENTRIES) {
offset = alloc_swapdev_block(root_swap);
if (!offset)
return -ENOSPC;
handle->cur->next_swap = offset;
error = write_page(handle->cur, handle->cur_swap, hb);
if (error)
goto out;
clear_page(handle->cur);
handle->cur_swap = offset;
handle->k = 0;
if (hb && low_free_pages() <= handle->reqd_free_pages) {
error = hib_wait_io(hb);
if (error)
goto out;
/*
* Recalculate the number of required free pages, to
* make sure we never take more than half.
*/
handle->reqd_free_pages = reqd_free_pages();
}
}
out:
return error;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rafael J. Wysocki | 139 | 72.02% | 2 | 25.00% |
Bojan Smojver | 33 | 17.10% | 3 | 37.50% |
Andrew Morton | 13 | 6.74% | 1 | 12.50% |
Christoph Hellwig | 7 | 3.63% | 1 | 12.50% |
Jan Beulich | 1 | 0.52% | 1 | 12.50% |
Total | 193 | 100.00% | 8 | 100.00% |
static int flush_swap_writer(struct swap_map_handle *handle)
{
if (handle->cur && handle->cur_swap)
return write_page(handle->cur, handle->cur_swap, NULL);
else
return -EINVAL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rafael J. Wysocki | 38 | 95.00% | 1 | 50.00% |
Andrew Morton | 2 | 5.00% | 1 | 50.00% |
Total | 40 | 100.00% | 2 | 100.00% |
static int swap_writer_finish(struct swap_map_handle *handle,
unsigned int flags, int error)
{
if (!error) {
flush_swap_writer(handle);
pr_info("S");
error = mark_swapfiles(handle, flags);
pr_cont("|\n");
}
if (error)
free_all_swap_pages(root_swap);
release_swap_writer(handle);
swsusp_close(FMODE_WRITE);
return error;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Slaby | 68 | 95.77% | 1 | 50.00% |
Joe Perches | 3 | 4.23% | 1 | 50.00% |
Total | 71 | 100.00% | 2 | 100.00% |
/* We need to remember how much compressed data we need to read. */
#define LZO_HEADER sizeof(size_t)
/* Number of pages/bytes we'll compress at one time. */
#define LZO_UNC_PAGES 32
#define LZO_UNC_SIZE (LZO_UNC_PAGES * PAGE_SIZE)
/* Number of pages/bytes we need for compressed data (worst case). */
#define LZO_CMP_PAGES DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \
LZO_HEADER, PAGE_SIZE)
#define LZO_CMP_SIZE (LZO_CMP_PAGES * PAGE_SIZE)
/* Maximum number of threads for compression/decompression. */
#define LZO_THREADS 3
/* Minimum/maximum number of pages for read buffering. */
#define LZO_MIN_RD_PAGES 1024
#define LZO_MAX_RD_PAGES 8192
/**
* save_image - save the suspend image data
*/
static int save_image(struct swap_map_handle *handle,
struct snapshot_handle *snapshot,
unsigned int nr_to_write)
{
unsigned int m;
int ret;
int nr_pages;
int err2;
struct hib_bio_batch hb;
ktime_t start;
ktime_t stop;
hib_init_batch(&hb);
pr_info("Saving image data pages (%u pages)...\n",
nr_to_write);
m = nr_to_write / 10;
if (!m)
m = 1;
nr_pages = 0;
start = ktime_get();
while (1) {
ret = snapshot_read_next(snapshot);
if (ret <= 0)
break;
ret = swap_write_page(handle, data_of(*snapshot), &hb);
if (ret)
break;
if (!(nr_pages % m))
pr_info("Image saving progress: %3d%%\n",
nr_pages / m * 10);
nr_pages++;
}
err2 = hib_wait_io(&hb);
stop = ktime_get();
if (!ret)
ret = err2;
if (!ret)
pr_info("Image saving done\n");
swsusp_show_speed(start, stop, nr_to_write, "Wrote");
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rafael J. Wysocki | 105 | 55.26% | 2 | 22.22% |
Andrew Morton | 43 | 22.63% | 2 | 22.22% |
Jiri Slaby | 14 | 7.37% | 1 | 11.11% |
Christoph Hellwig | 11 | 5.79% | 1 | 11.11% |
Tina Ruchandani | 8 | 4.21% | 1 | 11.11% |
Joe Perches | 6 | 3.16% | 1 | 11.11% |
Bojan Smojver | 3 | 1.58% | 1 | 11.11% |
Total | 190 | 100.00% | 9 | 100.00% |
/**
* Structure used for CRC32.
*/
struct crc_data {
struct task_struct *thr; /* thread */
atomic_t ready; /* ready to start flag */
atomic_t stop; /* ready to stop flag */
unsigned run_threads; /* nr current threads */
wait_queue_head_t go; /* start crc update */
wait_queue_head_t done; /* crc update done */
u32 *crc32; /* points to handle's crc32 */
size_t *unc_len[LZO_THREADS]; /* uncompressed lengths */
unsigned char *unc[LZO_THREADS]; /* uncompressed data */
};
/**
* CRC32 update function that runs in its own thread.
*/
static int crc32_threadfn(void *data)
{
struct crc_data *d = data;
unsigned i;
while (1) {
wait_event(d->go, atomic_read(&d->ready) ||
kthread_should_stop());
if (kthread_should_stop()) {
d->thr = NULL;
atomic_set(&d->stop, 1);
wake_up(&d->done);
break;
}
atomic_set(&d->ready, 0);
for (i = 0; i < d->run_threads; i++)
*d->crc32 = crc32_le(*d->crc32,
d->unc[i], *d->unc_len[i]);
atomic_set(&d->stop, 1);
wake_up(&d->done);
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Bojan Smojver | 129 | 86.00% | 2 | 33.33% |
Rafael J. Wysocki | 14 | 9.33% | 3 | 50.00% |
Jiri Slaby | 7 | 4.67% | 1 | 16.67% |
Total | 150 | 100.00% | 6 | 100.00% |
/**
* Structure used for LZO data compression.
*/
struct cmp_data {
struct task_struct *thr; /* thread */
atomic_t ready; /* ready to start flag */
atomic_t stop; /* ready to stop flag */
int ret; /* return code */
wait_queue_head_t go; /* start compression */
wait_queue_head_t done; /* compression done */
size_t unc_len; /* uncompressed length */
size_t cmp_len; /* compressed length */
unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */
unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */
unsigned char wrk[LZO1X_1_MEM_COMPRESS]; /* compression workspace */
};
/**
* Compression function that runs in its own thread.
*/
static int lzo_compress_threadfn(void *data)
{
struct cmp_data *d = data;
while (1) {
wait_event(d->go, atomic_read(&d->ready) ||
kthread_should_stop());
if (kthread_should_stop()) {
d->thr = NULL;
d->ret = -1;
atomic_set(&d->stop, 1);
wake_up(&d->done);
break;
}
atomic_set(&d->ready, 0);
d->ret = lzo1x_1_compress(d->unc, d->unc_len,
d->cmp + LZO_HEADER, &d->cmp_len,
d->wrk);
atomic_set(&d->stop, 1);
wake_up(&d->done);
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Bojan Smojver | 135 | 95.74% | 2 | 66.67% |
Rafael J. Wysocki | 6 | 4.26% | 1 | 33.33% |
Total | 141 | 100.00% | 3 | 100.00% |
/**
* save_image_lzo - Save the suspend image data compressed with LZO.
* @handle: Swap map handle to use for saving the image.
* @snapshot: Image to read data from.
* @nr_to_write: Number of pages to save.
*/
static int save_image_lzo(struct swap_map_handle *handle,
struct snapshot_handle *snapshot,
unsigned int nr_to_write)
{
unsigned int m;
int ret = 0;
int nr_pages;
int err2;
struct hib_bio_batch hb;
ktime_t start;
ktime_t stop;
size_t off;
unsigned thr, run_threads, nr_threads;
unsigned char *page = NULL;
struct cmp_data *data = NULL;
struct crc_data *crc = NULL;
hib_init_batch(&hb);
/*
* We'll limit the number of threads for compression to limit memory
* footprint.
*/
nr_threads = num_online_cpus() - 1;
nr_threads = clamp_val(nr_threads, 1