cregit-Linux how code gets into the kernel

Release 4.14 arch/x86/xen/mmu.c

Directory: arch/x86/xen
#include <linux/pfn.h>
#include <asm/xen/page.h>
#include <asm/xen/hypercall.h>
#include <xen/interface/memory.h>

#include "multicalls.h"
#include "mmu.h"

/*
 * Protects atomic reservation decrease/increase against concurrent increases.
 * Also protects non-atomic updates of current_pages and balloon lists.
 */

DEFINE_SPINLOCK(xen_reservation_lock);


unsigned long arbitrary_virt_to_mfn(void *vaddr) { xmaddr_t maddr = arbitrary_virt_to_machine(vaddr); return PFN_DOWN(maddr.maddr); }

Contributors

PersonTokensPropCommitsCommitProp
Jeremy Fitzhardinge26100.00%1100.00%
Total26100.00%1100.00%


xmaddr_t arbitrary_virt_to_machine(void *vaddr) { unsigned long address = (unsigned long)vaddr; unsigned int level; pte_t *pte; unsigned offset; /* * if the PFN is in the linear mapped vaddr range, we can just use * the (quick) virt_to_machine() p2m lookup */ if (virt_addr_valid(vaddr)) return virt_to_machine(vaddr); /* otherwise we have to do a (slower) full page-table walk */ pte = lookup_address(address, &level); BUG_ON(pte == NULL); offset = address & ~PAGE_MASK; return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset); }

Contributors

PersonTokensPropCommitsCommitProp
Jeremy Fitzhardinge5259.09%350.00%
Chris Lalancette2932.95%116.67%
Ingo Molnar66.82%116.67%
Harvey Harrison11.14%116.67%
Total88100.00%6100.00%

EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
static void xen_flush_tlb_all(void) { struct mmuext_op *op; struct multicall_space mcs; trace_xen_mmu_flush_tlb_all(0); preempt_disable(); mcs = xen_mc_entry(sizeof(*op)); op = mcs.args; op->cmd = MMUEXT_TLB_FLUSH_ALL; MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); xen_mc_issue(PARAVIRT_LAZY_MMU); preempt_enable(); }

Contributors

PersonTokensPropCommitsCommitProp
Jeremy Fitzhardinge4969.01%240.00%
Vitaly Kuznetsov2028.17%120.00%
Ingo Molnar11.41%120.00%
Juergen Gross11.41%120.00%
Total71100.00%5100.00%

#define REMAP_BATCH_SIZE 16 struct remap_data { xen_pfn_t *mfn; bool contiguous; pgprot_t prot; struct mmu_update *mmu_update; };
static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr, void *data) { struct remap_data *rmd = data; pte_t pte = pte_mkspecial(mfn_pte(*rmd->mfn, rmd->prot)); /* If we have a contiguous range, just update the mfn itself, else update pointer to be "next mfn". */ if (rmd->contiguous) (*rmd->mfn)++; else rmd->mfn++; rmd->mmu_update->ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE; rmd->mmu_update->val = pte_val_ma(pte); rmd->mmu_update++; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Ian Campbell7574.26%116.67%
David Vrabel2221.78%233.33%
Wei Liu21.98%116.67%
Jeremy Fitzhardinge10.99%116.67%
Adam Buchbinder10.99%116.67%
Total101100.00%6100.00%


static int do_remap_gfn(struct vm_area_struct *vma, unsigned long addr, xen_pfn_t *gfn, int nr, int *err_ptr, pgprot_t prot, unsigned domid, struct page **pages) { int err = 0; struct remap_data rmd; struct mmu_update mmu_update[REMAP_BATCH_SIZE]; unsigned long range; int mapped = 0; BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO))); rmd.mfn = gfn; rmd.prot = prot; /* We use the err_ptr to indicate if there we are doing a contiguous * mapping or a discontigious mapping. */ rmd.contiguous = !err_ptr; while (nr) { int index = 0; int done = 0; int batch = min(REMAP_BATCH_SIZE, nr); int batch_left = batch; range = (unsigned long)batch << PAGE_SHIFT; rmd.mmu_update = mmu_update; err = apply_to_page_range(vma->vm_mm, addr, range, remap_area_mfn_pte_fn, &rmd); if (err) goto out; /* We record the error for each page that gives an error, but * continue mapping until the whole set is done */ do { int i; err = HYPERVISOR_mmu_update(&mmu_update[index], batch_left, &done, domid); /* * @err_ptr may be the same buffer as @gfn, so * only clear it after each chunk of @gfn is * used. */ if (err_ptr) { for (i = index; i < index + done; i++) err_ptr[i] = 0; } if (err < 0) { if (!err_ptr) goto out; err_ptr[i] = err; done++; /* Skip failed frame. */ } else mapped += done; batch_left -= done; index += done; } while (batch_left); nr -= batch; addr += range; if (err_ptr) err_ptr += batch; cond_resched(); } out: xen_flush_tlb_all(); return err < 0 ? err : mapped; }

Contributors

PersonTokensPropCommitsCommitProp
Ian Campbell14947.91%330.00%
David Vrabel13844.37%330.00%
Stefano Stabellini185.79%110.00%
Julien Grall41.29%110.00%
Konrad Rzeszutek Wilk10.32%110.00%
Adam Buchbinder10.32%110.00%
Total311100.00%10100.00%


int xen_remap_domain_gfn_range(struct vm_area_struct *vma, unsigned long addr, xen_pfn_t gfn, int nr, pgprot_t prot, unsigned domid, struct page **pages) { return do_remap_gfn(vma, addr, &gfn, nr, NULL, prot, domid, pages); }

Contributors

PersonTokensPropCommitsCommitProp
David Vrabel4788.68%133.33%
Julien Grall47.55%133.33%
Ian Campbell23.77%133.33%
Total53100.00%3100.00%

EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
int xen_remap_domain_gfn_array(struct vm_area_struct *vma, unsigned long addr, xen_pfn_t *gfn, int nr, int *err_ptr, pgprot_t prot, unsigned domid, struct page **pages) { /* We BUG_ON because it's a programmer error to pass a NULL err_ptr, * and the consequences later is quite hard to detect what the actual * cause of "wrong memory was mapped in". */ BUG_ON(err_ptr == NULL); return do_remap_gfn(vma, addr, gfn, nr, err_ptr, prot, domid, pages); }

Contributors

PersonTokensPropCommitsCommitProp
David Vrabel6193.85%150.00%
Julien Grall46.15%150.00%
Total65100.00%2100.00%

EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array); /* Returns: 0 success */
int xen_unmap_domain_gfn_range(struct vm_area_struct *vma, int numpgs, struct page **pages) { if (!pages || !xen_feature(XENFEAT_auto_translated_physmap)) return 0; return -EINVAL; }

Contributors

PersonTokensPropCommitsCommitProp
Ian Campbell3697.30%150.00%
Julien Grall12.70%150.00%
Total37100.00%2100.00%

EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);

Overall Contributors

PersonTokensPropCommitsCommitProp
Ian Campbell29035.32%310.00%
David Vrabel27733.74%413.33%
Jeremy Fitzhardinge14217.30%930.00%
Chris Lalancette293.53%13.33%
Vitaly Kuznetsov222.68%13.33%
Stefano Stabellini182.19%13.33%
Julien Grall161.95%13.33%
Ingo Molnar70.85%13.33%
Alex Nixon50.61%13.33%
Stephen Rothwell50.61%13.33%
Adam Buchbinder20.24%13.33%
Wei Liu20.24%13.33%
Andrew Jones20.24%13.33%
Juergen Gross10.12%13.33%
Harvey Harrison10.12%13.33%
Daniel Kiper10.12%13.33%
Konrad Rzeszutek Wilk10.12%13.33%
Total821100.00%30100.00%
Directory: arch/x86/xen
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.