Contributors: 8
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Vivek Goyal |
103 |
83.74% |
2 |
20.00% |
Akinobu Mita |
9 |
7.32% |
1 |
10.00% |
Cliff Wickman |
4 |
3.25% |
2 |
20.00% |
Gustavo Fernando Padovan |
2 |
1.63% |
1 |
10.00% |
Michael Ellerman |
2 |
1.63% |
1 |
10.00% |
Dave Jones |
1 |
0.81% |
1 |
10.00% |
Randy Dunlap |
1 |
0.81% |
1 |
10.00% |
Greg Kroah-Hartman |
1 |
0.81% |
1 |
10.00% |
Total |
123 |
|
10 |
|
// SPDX-License-Identifier: GPL-2.0
/*
* Memory preserving reboot related code.
*
* Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
* Copyright (C) IBM Corporation, 2004. All rights reserved
*/
#include <linux/errno.h>
#include <linux/crash_dump.h>
#include <linux/uaccess.h>
#include <linux/io.h>
/**
* copy_oldmem_page - copy one page from "oldmem"
* @pfn: page frame number to be copied
* @buf: target memory address for the copy; this can be in kernel address
* space or user address space (see @userbuf)
* @csize: number of bytes to copy
* @offset: offset in bytes into the page (based on pfn) to begin the copy
* @userbuf: if set, @buf is in user address space, use copy_to_user(),
* otherwise @buf is in kernel address space, use memcpy().
*
* Copy a page from "oldmem". For this page, there is no pte mapped
* in the current kernel. We stitch up a pte, similar to kmap_atomic.
*/
ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
size_t csize, unsigned long offset, int userbuf)
{
void *vaddr;
if (!csize)
return 0;
vaddr = ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE);
if (!vaddr)
return -ENOMEM;
if (userbuf) {
if (copy_to_user(buf, vaddr + offset, csize)) {
iounmap(vaddr);
return -EFAULT;
}
} else
memcpy(buf, vaddr + offset, csize);
set_iounmap_nonlazy();
iounmap(vaddr);
return csize;
}