Release 4.14 arch/x86/xen/mmu.c
#include <linux/pfn.h>
#include <asm/xen/page.h>
#include <asm/xen/hypercall.h>
#include <xen/interface/memory.h>
#include "multicalls.h"
#include "mmu.h"
/*
* Protects atomic reservation decrease/increase against concurrent increases.
* Also protects non-atomic updates of current_pages and balloon lists.
*/
DEFINE_SPINLOCK(xen_reservation_lock);
unsigned long arbitrary_virt_to_mfn(void *vaddr)
{
xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
return PFN_DOWN(maddr.maddr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeremy Fitzhardinge | 26 | 100.00% | 1 | 100.00% |
Total | 26 | 100.00% | 1 | 100.00% |
xmaddr_t arbitrary_virt_to_machine(void *vaddr)
{
unsigned long address = (unsigned long)vaddr;
unsigned int level;
pte_t *pte;
unsigned offset;
/*
* if the PFN is in the linear mapped vaddr range, we can just use
* the (quick) virt_to_machine() p2m lookup
*/
if (virt_addr_valid(vaddr))
return virt_to_machine(vaddr);
/* otherwise we have to do a (slower) full page-table walk */
pte = lookup_address(address, &level);
BUG_ON(pte == NULL);
offset = address & ~PAGE_MASK;
return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeremy Fitzhardinge | 52 | 59.09% | 3 | 50.00% |
Chris Lalancette | 29 | 32.95% | 1 | 16.67% |
Ingo Molnar | 6 | 6.82% | 1 | 16.67% |
Harvey Harrison | 1 | 1.14% | 1 | 16.67% |
Total | 88 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
static void xen_flush_tlb_all(void)
{
struct mmuext_op *op;
struct multicall_space mcs;
trace_xen_mmu_flush_tlb_all(0);
preempt_disable();
mcs = xen_mc_entry(sizeof(*op));
op = mcs.args;
op->cmd = MMUEXT_TLB_FLUSH_ALL;
MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
xen_mc_issue(PARAVIRT_LAZY_MMU);
preempt_enable();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeremy Fitzhardinge | 49 | 69.01% | 2 | 40.00% |
Vitaly Kuznetsov | 20 | 28.17% | 1 | 20.00% |
Ingo Molnar | 1 | 1.41% | 1 | 20.00% |
Juergen Gross | 1 | 1.41% | 1 | 20.00% |
Total | 71 | 100.00% | 5 | 100.00% |
#define REMAP_BATCH_SIZE 16
struct remap_data {
xen_pfn_t *mfn;
bool contiguous;
pgprot_t prot;
struct mmu_update *mmu_update;
};
static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
unsigned long addr, void *data)
{
struct remap_data *rmd = data;
pte_t pte = pte_mkspecial(mfn_pte(*rmd->mfn, rmd->prot));
/* If we have a contiguous range, just update the mfn itself,
else update pointer to be "next mfn". */
if (rmd->contiguous)
(*rmd->mfn)++;
else
rmd->mfn++;
rmd->mmu_update->ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
rmd->mmu_update->val = pte_val_ma(pte);
rmd->mmu_update++;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ian Campbell | 75 | 74.26% | 1 | 16.67% |
David Vrabel | 22 | 21.78% | 2 | 33.33% |
Wei Liu | 2 | 1.98% | 1 | 16.67% |
Jeremy Fitzhardinge | 1 | 0.99% | 1 | 16.67% |
Adam Buchbinder | 1 | 0.99% | 1 | 16.67% |
Total | 101 | 100.00% | 6 | 100.00% |
static int do_remap_gfn(struct vm_area_struct *vma,
unsigned long addr,
xen_pfn_t *gfn, int nr,
int *err_ptr, pgprot_t prot,
unsigned domid,
struct page **pages)
{
int err = 0;
struct remap_data rmd;
struct mmu_update mmu_update[REMAP_BATCH_SIZE];
unsigned long range;
int mapped = 0;
BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
rmd.mfn = gfn;
rmd.prot = prot;
/* We use the err_ptr to indicate if there we are doing a contiguous
* mapping or a discontigious mapping. */
rmd.contiguous = !err_ptr;
while (nr) {
int index = 0;
int done = 0;
int batch = min(REMAP_BATCH_SIZE, nr);
int batch_left = batch;
range = (unsigned long)batch << PAGE_SHIFT;
rmd.mmu_update = mmu_update;
err = apply_to_page_range(vma->vm_mm, addr, range,
remap_area_mfn_pte_fn, &rmd);
if (err)
goto out;
/* We record the error for each page that gives an error, but
* continue mapping until the whole set is done */
do {
int i;
err = HYPERVISOR_mmu_update(&mmu_update[index],
batch_left, &done, domid);
/*
* @err_ptr may be the same buffer as @gfn, so
* only clear it after each chunk of @gfn is
* used.
*/
if (err_ptr) {
for (i = index; i < index + done; i++)
err_ptr[i] = 0;
}
if (err < 0) {
if (!err_ptr)
goto out;
err_ptr[i] = err;
done++; /* Skip failed frame. */
} else
mapped += done;
batch_left -= done;
index += done;
} while (batch_left);
nr -= batch;
addr += range;
if (err_ptr)
err_ptr += batch;
cond_resched();
}
out:
xen_flush_tlb_all();
return err < 0 ? err : mapped;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ian Campbell | 149 | 47.91% | 3 | 30.00% |
David Vrabel | 138 | 44.37% | 3 | 30.00% |
Stefano Stabellini | 18 | 5.79% | 1 | 10.00% |
Julien Grall | 4 | 1.29% | 1 | 10.00% |
Konrad Rzeszutek Wilk | 1 | 0.32% | 1 | 10.00% |
Adam Buchbinder | 1 | 0.32% | 1 | 10.00% |
Total | 311 | 100.00% | 10 | 100.00% |
int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
unsigned long addr,
xen_pfn_t gfn, int nr,
pgprot_t prot, unsigned domid,
struct page **pages)
{
return do_remap_gfn(vma, addr, &gfn, nr, NULL, prot, domid, pages);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Vrabel | 47 | 88.68% | 1 | 33.33% |
Julien Grall | 4 | 7.55% | 1 | 33.33% |
Ian Campbell | 2 | 3.77% | 1 | 33.33% |
Total | 53 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
unsigned long addr,
xen_pfn_t *gfn, int nr,
int *err_ptr, pgprot_t prot,
unsigned domid, struct page **pages)
{
/* We BUG_ON because it's a programmer error to pass a NULL err_ptr,
* and the consequences later is quite hard to detect what the actual
* cause of "wrong memory was mapped in".
*/
BUG_ON(err_ptr == NULL);
return do_remap_gfn(vma, addr, gfn, nr, err_ptr, prot, domid, pages);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Vrabel | 61 | 93.85% | 1 | 50.00% |
Julien Grall | 4 | 6.15% | 1 | 50.00% |
Total | 65 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);
/* Returns: 0 success */
int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
int numpgs, struct page **pages)
{
if (!pages || !xen_feature(XENFEAT_auto_translated_physmap))
return 0;
return -EINVAL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ian Campbell | 36 | 97.30% | 1 | 50.00% |
Julien Grall | 1 | 2.70% | 1 | 50.00% |
Total | 37 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ian Campbell | 290 | 35.32% | 3 | 10.00% |
David Vrabel | 277 | 33.74% | 4 | 13.33% |
Jeremy Fitzhardinge | 142 | 17.30% | 9 | 30.00% |
Chris Lalancette | 29 | 3.53% | 1 | 3.33% |
Vitaly Kuznetsov | 22 | 2.68% | 1 | 3.33% |
Stefano Stabellini | 18 | 2.19% | 1 | 3.33% |
Julien Grall | 16 | 1.95% | 1 | 3.33% |
Ingo Molnar | 7 | 0.85% | 1 | 3.33% |
Alex Nixon | 5 | 0.61% | 1 | 3.33% |
Stephen Rothwell | 5 | 0.61% | 1 | 3.33% |
Adam Buchbinder | 2 | 0.24% | 1 | 3.33% |
Wei Liu | 2 | 0.24% | 1 | 3.33% |
Andrew Jones | 2 | 0.24% | 1 | 3.33% |
Juergen Gross | 1 | 0.12% | 1 | 3.33% |
Harvey Harrison | 1 | 0.12% | 1 | 3.33% |
Daniel Kiper | 1 | 0.12% | 1 | 3.33% |
Konrad Rzeszutek Wilk | 1 | 0.12% | 1 | 3.33% |
Total | 821 | 100.00% | 30 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.