Contributors: 7
| Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
| Dan J Williams |
161 |
73.18% |
8 |
57.14% |
| Ross Zwisler |
30 |
13.64% |
1 |
7.14% |
| Huaisheng Ye |
16 |
7.27% |
1 |
7.14% |
| Jane Chu |
4 |
1.82% |
1 |
7.14% |
| Alistair Popple |
4 |
1.82% |
1 |
7.14% |
| Stefan Hajnoczi |
3 |
1.36% |
1 |
7.14% |
| Thomas Gleixner |
2 |
0.91% |
1 |
7.14% |
| Total |
220 |
|
14 |
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014-2016, Intel Corporation.
*/
#include "test/nfit_test.h"
#include <linux/blkdev.h>
#include <linux/dax.h>
#include <pmem.h>
#include <nd.h>
long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
long nr_pages, enum dax_access_mode mode, void **kaddr,
unsigned long *pfn)
{
resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
PFN_PHYS(nr_pages))))
return -EIO;
/*
* Limit dax to a single page at a time given vmalloc()-backed
* in the nfit_test case.
*/
if (get_nfit_res(pmem->phys_addr + offset)) {
struct page *page;
if (kaddr)
*kaddr = pmem->virt_addr + offset;
page = vmalloc_to_page(pmem->virt_addr + offset);
if (pfn)
*pfn = page_to_pfn(page);
pr_debug_ratelimited("%s: pmem: %p pgoff: %#lx pfn: %#lx\n",
__func__, pmem, pgoff, page_to_pfn(page));
return 1;
}
if (kaddr)
*kaddr = pmem->virt_addr + offset;
if (pfn)
*pfn = PHYS_PFN(pmem->phys_addr + offset);
/*
* If badblocks are present, limit known good range to the
* requested range.
*/
if (unlikely(pmem->bb.count))
return nr_pages;
return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
}