Contributors: 6
Author Tokens Token Proportion Commits Commit Proportion
Ralf Baechle 182 95.79% 1 16.67%
Eunbong Song 송은봉 3 1.58% 1 16.67%
Joe Perches 2 1.05% 1 16.67%
Linus Torvalds 1 0.53% 1 16.67%
Greg Kroah-Hartman 1 0.53% 1 16.67%
Mike Rapoport 1 0.53% 1 16.67%
Total 190 6


// SPDX-License-Identifier: GPL-2.0
#include <linux/highmem.h>
#include <linux/memblock.h>
#include <linux/crash_dump.h>
#include <linux/uaccess.h>
#include <linux/slab.h>

static void *kdump_buf_page;

/**
 * copy_oldmem_page - copy one page from "oldmem"
 * @pfn: page frame number to be copied
 * @buf: target memory address for the copy; this can be in kernel address
 *	space or user address space (see @userbuf)
 * @csize: number of bytes to copy
 * @offset: offset in bytes into the page (based on pfn) to begin the copy
 * @userbuf: if set, @buf is in user address space, use copy_to_user(),
 *	otherwise @buf is in kernel address space, use memcpy().
 *
 * Copy a page from "oldmem". For this page, there is no pte mapped
 * in the current kernel.
 *
 * Calling copy_to_user() in atomic context is not desirable. Hence first
 * copying the data to a pre-allocated kernel page and then copying to user
 * space in non-atomic context.
 */
ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
			 size_t csize, unsigned long offset, int userbuf)
{
	void  *vaddr;

	if (!csize)
		return 0;

	vaddr = kmap_atomic_pfn(pfn);

	if (!userbuf) {
		memcpy(buf, (vaddr + offset), csize);
		kunmap_atomic(vaddr);
	} else {
		if (!kdump_buf_page) {
			pr_warn("Kdump: Kdump buffer page not allocated\n");

			return -EFAULT;
		}
		copy_page(kdump_buf_page, vaddr);
		kunmap_atomic(vaddr);
		if (copy_to_user(buf, (kdump_buf_page + offset), csize))
			return -EFAULT;
	}

	return csize;
}

static int __init kdump_buf_page_init(void)
{
	int ret = 0;

	kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
	if (!kdump_buf_page) {
		pr_warn("Kdump: Failed to allocate kdump buffer page\n");
		ret = -ENOMEM;
	}

	return ret;
}
arch_initcall(kdump_buf_page_init);