cregit-Linux how code gets into the kernel

Release 4.10 arch/x86/kvm/iommu.c

Directory: arch/x86/kvm
/*
 * Copyright (c) 2006, Intel Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
 * Place - Suite 330, Boston, MA 02111-1307 USA.
 *
 * Copyright (C) 2006-2008 Intel Corporation
 * Copyright IBM Corporation, 2008
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
 *
 * Author: Allen M. Kay <allen.m.kay@intel.com>
 * Author: Weidong Han <weidong.han@intel.com>
 * Author: Ben-Ami Yassour <benami@il.ibm.com>
 */

#include <linux/list.h>
#include <linux/kvm_host.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/stat.h>
#include <linux/iommu.h>
#include "assigned-dev.h"


static bool allow_unsafe_assigned_interrupts;
module_param_named(allow_unsafe_assigned_interrupts,
		   allow_unsafe_assigned_interrupts, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(allow_unsafe_assigned_interrupts,
 "Enable device assignment on platforms without interrupt remapping support.");

static int kvm_iommu_unmap_memslots(struct kvm *kvm);
static void kvm_iommu_put_pages(struct kvm *kvm,
				gfn_t base_gfn, unsigned long npages);


static kvm_pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn, unsigned long npages) { gfn_t end_gfn; kvm_pfn_t pfn; pfn = gfn_to_pfn_memslot(slot, gfn); end_gfn = gfn + npages; gfn += 1; if (is_error_noslot_pfn(pfn)) return pfn; while (gfn < end_gfn) gfn_to_pfn_memslot(slot, gfn++); return pfn; }

Contributors

PersonTokensPropCommitsCommitProp
joerg roedeljoerg roedel6592.86%125.00%
quentin casasnovasquentin casasnovas22.86%125.00%
dan williamsdan williams22.86%125.00%
xiao guangrongxiao guangrong11.43%125.00%
Total70100.00%4100.00%


static void kvm_unpin_pages(struct kvm *kvm, kvm_pfn_t pfn, unsigned long npages) { unsigned long i; for (i = 0; i < npages; ++i) kvm_release_pfn_clean(pfn + i); }

Contributors

PersonTokensPropCommitsCommitProp
michael s. tsirkinmichael s. tsirkin4197.62%150.00%
dan williamsdan williams12.38%150.00%
Total42100.00%2100.00%


int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) { gfn_t gfn, end_gfn; kvm_pfn_t pfn; int r = 0; struct iommu_domain *domain = kvm->arch.iommu_domain; int flags; /* check if iommu exists and in use */ if (!domain) return 0; gfn = slot->base_gfn; end_gfn = gfn + slot->npages; flags = IOMMU_READ; if (!(slot->flags & KVM_MEM_READONLY)) flags |= IOMMU_WRITE; if (!kvm->arch.iommu_noncoherent) flags |= IOMMU_CACHE; while (gfn < end_gfn) { unsigned long page_size; /* Check if already mapped */ if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) { gfn += 1; continue; } /* Get the page size we could use to map */ page_size = kvm_host_page_size(kvm, gfn); /* Make sure the page_size does not exceed the memslot */ while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn) page_size >>= 1; /* Make sure gfn is aligned to the page size we want to map */ while ((gfn << PAGE_SHIFT) & (page_size - 1)) page_size >>= 1; /* Make sure hva is aligned to the page size we want to map */ while (__gfn_to_hva_memslot(slot, gfn) & (page_size - 1)) page_size >>= 1; /* * Pin all pages we are about to map in memory. This is * important because we unmap and unpin in 4kb steps later. */ pfn = kvm_pin_pages(slot, gfn, page_size >> PAGE_SHIFT); if (is_error_noslot_pfn(pfn)) { gfn += 1; continue; } /* Map into IO address space */ r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn), page_size, flags); if (r) { printk(KERN_ERR "kvm_iommu_map_address:" "iommu failed to map pfn=%llx\n", pfn); kvm_unpin_pages(kvm, pfn, page_size >> PAGE_SHIFT); goto unmap_pages; } gfn += page_size >> PAGE_SHIFT; cond_resched(); } return 0; unmap_pages: kvm_iommu_put_pages(kvm, slot->base_gfn, gfn - slot->base_gfn); return r; }

Contributors

PersonTokensPropCommitsCommitProp
joerg roedeljoerg roedel11236.36%425.00%
ben-ami yassourben-ami yassour10634.42%16.25%
sheng yangsheng yang206.49%16.25%
greg edwardsgreg edwards206.49%16.25%
alex williamsonalex williamson165.19%212.50%
michael s. tsirkinmichael s. tsirkin134.22%16.25%
marcelo tosattimarcelo tosatti92.92%16.25%
weidong hanweidong han61.95%212.50%
quentin casasnovasquentin casasnovas41.30%16.25%
xiao guangrongxiao guangrong10.32%16.25%
dan williamsdan williams10.32%16.25%
Total308100.00%16100.00%


static int kvm_iommu_map_memslots(struct kvm *kvm) { int idx, r = 0; struct kvm_memslots *slots; struct kvm_memory_slot *memslot; if (kvm->arch.iommu_noncoherent) kvm_arch_register_noncoherent_dma(kvm); idx = srcu_read_lock(&kvm->srcu); slots = kvm_memslots(kvm); kvm_for_each_memslot(memslot, slots) { r = kvm_iommu_map_pages(kvm, memslot); if (r) break; } srcu_read_unlock(&kvm->srcu, idx); return r; }

Contributors

PersonTokensPropCommitsCommitProp
ben-ami yassourben-ami yassour3234.41%112.50%
sheng yangsheng yang2223.66%112.50%
alex williamsonalex williamson1313.98%112.50%
xiao guangrongxiao guangrong1212.90%112.50%
marcelo tosattimarcelo tosatti1111.83%225.00%
joerg roedeljoerg roedel22.15%112.50%
lai jiangshanlai jiangshan11.08%112.50%
Total93100.00%8100.00%


int kvm_assign_device(struct kvm *kvm, struct pci_dev *pdev) { struct iommu_domain *domain = kvm->arch.iommu_domain; int r; bool noncoherent; /* check if iommu exists and in use */ if (!domain) return 0; if (pdev == NULL) return -ENODEV; r = iommu_attach_device(domain, &pdev->dev); if (r) { dev_err(&pdev->dev, "kvm assign device failed ret %d", r); return r; } noncoherent = !iommu_capable(&pci_bus_type, IOMMU_CAP_CACHE_COHERENCY); /* Check if need to update IOMMU page table for guest memory */ if (noncoherent != kvm->arch.iommu_noncoherent) { kvm_iommu_unmap_memslots(kvm); kvm->arch.iommu_noncoherent = noncoherent; r = kvm_iommu_map_memslots(kvm); if (r) goto out_unmap; } kvm_arch_start_assignment(kvm); pci_set_dev_assigned(pdev); dev_info(&pdev->dev, "kvm assign device\n"); return 0; out_unmap: kvm_iommu_unmap_memslots(kvm); return r; }

Contributors

PersonTokensPropCommitsCommitProp
weidong hanweidong han4828.57%215.38%
sheng yangsheng yang4526.79%17.69%
ben-ami yassourben-ami yassour2816.67%17.69%
alex williamsonalex williamson169.52%17.69%
joerg roedeljoerg roedel95.36%215.38%
paolo bonzinipaolo bonzini74.17%215.38%
andre richterandre richter52.98%17.69%
shuah khanshuah khan52.98%17.69%
ethan zhaoethan zhao31.79%17.69%
greg rosegreg rose21.19%17.69%
Total168100.00%13100.00%


int kvm_deassign_device(struct kvm *kvm, struct pci_dev *pdev) { struct iommu_domain *domain = kvm->arch.iommu_domain; /* check if iommu exists and in use */ if (!domain) return 0; if (pdev == NULL) return -ENODEV; iommu_detach_device(domain, &pdev->dev); pci_clear_dev_assigned(pdev); kvm_arch_end_assignment(kvm); dev_info(&pdev->dev, "kvm deassign device\n"); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
weidong hanweidong han5367.95%222.22%
paolo bonzinipaolo bonzini78.97%222.22%
joerg roedeljoerg roedel67.69%111.11%
andre richterandre richter56.41%111.11%
ethan zhaoethan zhao33.85%111.11%
greg rosegreg rose22.56%111.11%
ben-ami yassourben-ami yassour22.56%111.11%
Total78100.00%9100.00%


int kvm_iommu_map_guest(struct kvm *kvm) { int r; if (!iommu_present(&pci_bus_type)) { printk(KERN_ERR "%s: iommu not found\n", __func__); return -ENODEV; } mutex_lock(&kvm->slots_lock); kvm->arch.iommu_domain = iommu_domain_alloc(&pci_bus_type); if (!kvm->arch.iommu_domain) { r = -ENOMEM; goto out_unlock; } if (!allow_unsafe_assigned_interrupts && !iommu_capable(&pci_bus_type, IOMMU_CAP_INTR_REMAP)) { printk(KERN_WARNING "%s: No interrupt remapping support," " disallowing device assignment." " Re-enable with \"allow_unsafe_assigned_interrupts=1\"" " module option.\n", __func__); iommu_domain_free(kvm->arch.iommu_domain); kvm->arch.iommu_domain = NULL; r = -EPERM; goto out_unlock; } r = kvm_iommu_map_memslots(kvm); if (r) kvm_iommu_unmap_memslots(kvm); out_unlock: mutex_unlock(&kvm->slots_lock); return r; }

Contributors

PersonTokensPropCommitsCommitProp
alex williamsonalex williamson7447.44%222.22%
ben-ami yassourben-ami yassour4830.77%111.11%
weidong hanweidong han1710.90%111.11%
joerg roedeljoerg roedel1610.26%444.44%
masanari iidamasanari iida10.64%111.11%
Total156100.00%9100.00%


static void kvm_iommu_put_pages(struct kvm *kvm, gfn_t base_gfn, unsigned long npages) { struct iommu_domain *domain; gfn_t end_gfn, gfn; kvm_pfn_t pfn; u64 phys; domain = kvm->arch.iommu_domain; end_gfn = base_gfn + npages; gfn = base_gfn; /* check if iommu exists and in use */ if (!domain) return; while (gfn < end_gfn) { unsigned long unmap_pages; size_t size; /* Get physical address */ phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn)); if (!phys) { gfn++; continue; } pfn = phys >> PAGE_SHIFT; /* Unmap address from IO address space */ size = iommu_unmap(domain, gfn_to_gpa(gfn), PAGE_SIZE); unmap_pages = 1ULL << get_order(size); /* Unpin all pages we just unmapped to not leak any memory */ kvm_unpin_pages(kvm, pfn, unmap_pages); gfn += unmap_pages; cond_resched(); } }

Contributors

PersonTokensPropCommitsCommitProp
joerg roedeljoerg roedel6242.76%337.50%
ben-ami yassourben-ami yassour4027.59%112.50%
weidong hanweidong han2315.86%112.50%
xiao guangrongxiao guangrong117.59%112.50%
ohad ben-cohenohad ben-cohen85.52%112.50%
dan williamsdan williams10.69%112.50%
Total145100.00%8100.00%


void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot) { kvm_iommu_put_pages(kvm, slot->base_gfn, slot->npages); }

Contributors

PersonTokensPropCommitsCommitProp
alex williamsonalex williamson28100.00%1100.00%
Total28100.00%1100.00%


static int kvm_iommu_unmap_memslots(struct kvm *kvm) { int idx; struct kvm_memslots *slots; struct kvm_memory_slot *memslot; idx = srcu_read_lock(&kvm->srcu); slots = kvm_memslots(kvm); kvm_for_each_memslot(memslot, slots) kvm_iommu_unmap_pages(kvm, memslot); srcu_read_unlock(&kvm->srcu, idx); if (kvm->arch.iommu_noncoherent) kvm_arch_unregister_noncoherent_dma(kvm); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
sheng yangsheng yang2126.25%112.50%
ben-ami yassourben-ami yassour2126.25%112.50%
alex williamsonalex williamson1417.50%225.00%
xiao guangrongxiao guangrong1215.00%112.50%
marcelo tosattimarcelo tosatti1113.75%225.00%
lai jiangshanlai jiangshan11.25%112.50%
Total80100.00%8100.00%


int kvm_iommu_unmap_guest(struct kvm *kvm) { struct iommu_domain *domain = kvm->arch.iommu_domain; /* check if iommu exists and in use */ if (!domain) return 0; mutex_lock(&kvm->slots_lock); kvm_iommu_unmap_memslots(kvm); kvm->arch.iommu_domain = NULL; kvm->arch.iommu_noncoherent = false; mutex_unlock(&kvm->slots_lock); iommu_domain_free(domain); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
ben-ami yassourben-ami yassour4053.33%125.00%
alex williamsonalex williamson3242.67%250.00%
joerg roedeljoerg roedel34.00%125.00%
Total75100.00%4100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
ben-ami yassourben-ami yassour35526.98%12.27%
joerg roedeljoerg roedel27821.12%818.18%
alex williamsonalex williamson21616.41%613.64%
weidong hanweidong han14711.17%36.82%
sheng yangsheng yang1088.21%24.55%
michael s. tsirkinmichael s. tsirkin544.10%12.27%
xiao guangrongxiao guangrong372.81%36.82%
marcelo tosattimarcelo tosatti312.36%36.82%
greg edwardsgreg edwards201.52%12.27%
paolo bonzinipaolo bonzini151.14%24.55%
andre richterandre richter100.76%12.27%
ohad ben-cohenohad ben-cohen80.61%12.27%
ethan zhaoethan zhao60.46%12.27%
quentin casasnovasquentin casasnovas60.46%12.27%
paul gortmakerpaul gortmaker60.46%36.82%
shuah khanshuah khan50.38%12.27%
dan williamsdan williams50.38%12.27%
greg rosegreg rose40.30%12.27%
lai jiangshanlai jiangshan20.15%12.27%
avi kivityavi kivity10.08%12.27%
rusty russellrusty russell10.08%12.27%
masanari iidamasanari iida10.08%12.27%
Total1316100.00%44100.00%
Directory: arch/x86/kvm
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.