cregit-Linux how code gets into the kernel

Release 4.14 drivers/iommu/intel-iommu.c

Directory: drivers/iommu
/*
 * Copyright © 2006-2014 Intel Corporation.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * Authors: David Woodhouse <dwmw2@infradead.org>,
 *          Ashok Raj <ashok.raj@intel.com>,
 *          Shaohua Li <shaohua.li@intel.com>,
 *          Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
 *          Fenghua Yu <fenghua.yu@intel.com>
 *          Joerg Roedel <jroedel@suse.de>
 */


#define pr_fmt(fmt)     "DMAR: " fmt

#include <linux/init.h>
#include <linux/bitmap.h>
#include <linux/debugfs.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/dmar.h>
#include <linux/dma-mapping.h>
#include <linux/mempool.h>
#include <linux/memory.h>
#include <linux/cpu.h>
#include <linux/timer.h>
#include <linux/io.h>
#include <linux/iova.h>
#include <linux/iommu.h>
#include <linux/intel-iommu.h>
#include <linux/syscore_ops.h>
#include <linux/tboot.h>
#include <linux/dmi.h>
#include <linux/pci-ats.h>
#include <linux/memblock.h>
#include <linux/dma-contiguous.h>
#include <linux/crash_dump.h>
#include <asm/irq_remapping.h>
#include <asm/cacheflush.h>
#include <asm/iommu.h>

#include "irq_remapping.h"


#define ROOT_SIZE		VTD_PAGE_SIZE

#define CONTEXT_SIZE		VTD_PAGE_SIZE


#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)

#define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)

#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)

#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)


#define IOAPIC_RANGE_START	(0xfee00000)

#define IOAPIC_RANGE_END	(0xfeefffff)

#define IOVA_START_ADDR		(0x1000)


#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48


#define MAX_AGAW_WIDTH 64

#define MAX_AGAW_PFN_WIDTH	(MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)


#define __DOMAIN_MAX_PFN(gaw)  ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)

#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)

/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
   to match. That way, we can use 'unsigned long' for PFNs with impunity. */

#define DOMAIN_MAX_PFN(gaw)	((unsigned long) min_t(uint64_t, \
                                __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))

#define DOMAIN_MAX_ADDR(gaw)	(((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)

/* IO virtual address start page frame number */

#define IOVA_START_PFN		(1)


#define IOVA_PFN(addr)		((addr) >> PAGE_SHIFT)

#define DMA_32BIT_PFN		IOVA_PFN(DMA_BIT_MASK(32))

#define DMA_64BIT_PFN		IOVA_PFN(DMA_BIT_MASK(64))

/* page table handling */

#define LEVEL_STRIDE		(9)

#define LEVEL_MASK		(((u64)1 << LEVEL_STRIDE) - 1)

/*
 * This bitmap is used to advertise the page sizes our hardware support
 * to the IOMMU core, which will then use this information to split
 * physically contiguous memory regions it is mapping into page sizes
 * that we support.
 *
 * Traditionally the IOMMU core just handed us the mappings directly,
 * after making sure the size is an order of a 4KiB page and that the
 * mapping has natural alignment.
 *
 * To retain this behavior, we currently advertise that we support
 * all page sizes that are an order of 4KiB.
 *
 * If at some point we'd like to utilize the IOMMU core's new behavior,
 * we could change this to advertise the real page sizes we support.
 */

#define INTEL_IOMMU_PGSIZES	(~0xFFFUL)


static inline int agaw_to_level(int agaw) { return agaw + 2; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton15100.00%1100.00%
Total15100.00%1100.00%


static inline int agaw_to_width(int agaw) { return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton1770.83%150.00%
Jiang Liu729.17%150.00%
Total24100.00%2100.00%


static inline int width_to_agaw(int width) { return DIV_ROUND_UP(width - 30, LEVEL_STRIDE); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton1680.00%150.00%
Jiang Liu420.00%150.00%
Total20100.00%2100.00%


static inline unsigned int level_to_offset_bits(int level) { return (level - 1) * LEVEL_STRIDE; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton20100.00%1100.00%
Total20100.00%1100.00%


static inline int pfn_level_offset(unsigned long pfn, int level) { return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton26100.00%1100.00%
Total26100.00%1100.00%


static inline unsigned long level_mask(int level) { return -1UL << level_to_offset_bits(level); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton20100.00%1100.00%
Total20100.00%1100.00%


static inline unsigned long level_size(int level) { return 1UL << level_to_offset_bits(level); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton19100.00%1100.00%
Total19100.00%1100.00%


static inline unsigned long align_to_level(unsigned long pfn, int level) { return (pfn + level_size(level) - 1) & level_mask(level); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton32100.00%1100.00%
Total32100.00%1100.00%


static inline unsigned long lvl_to_nr_pages(unsigned int lvl) { return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH); }

Contributors

PersonTokensPropCommitsCommitProp
Youquan Song2376.67%150.00%
Jiang Liu723.33%150.00%
Total30100.00%2100.00%

/* VT-d pages must always be _smaller_ than MM pages. Otherwise things are never going to work. */
static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn) { return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT); }

Contributors

PersonTokensPropCommitsCommitProp
David Woodhouse21100.00%1100.00%
Total21100.00%1100.00%


static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn) { return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT); }

Contributors

PersonTokensPropCommitsCommitProp
David Woodhouse21100.00%1100.00%
Total21100.00%1100.00%


static inline unsigned long page_to_dma_pfn(struct page *pg) { return mm_to_dma_pfn(page_to_pfn(pg)); }

Contributors

PersonTokensPropCommitsCommitProp
David Woodhouse22100.00%1100.00%
Total22100.00%1100.00%


static inline unsigned long virt_to_dma_pfn(void *p) { return page_to_dma_pfn(virt_to_page(p)); }

Contributors

PersonTokensPropCommitsCommitProp
David Woodhouse21100.00%1100.00%
Total21100.00%1100.00%

/* global iommu list, set NULL for ignored DMAR units */ static struct intel_iommu **g_iommus; static void __init check_tylersburg_isoch(void); static int rwbf_quirk; /* * set to 1 to panic kernel if can't successfully enable VT-d * (used when kernel is launched w/ TXT) */ static int force_on = 0; int intel_iommu_tboot_noforce; /* * 0: Present * 1-11: Reserved * 12-63: Context Ptr (12 - (haw-1)) * 64-127: Reserved */ struct root_entry { u64 lo; u64 hi; }; #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry)) /* * Take a root_entry and return the Lower Context Table Pointer (LCTP) * if marked present. */
static phys_addr_t root_entry_lctp(struct root_entry *re) { if (!(re->lo & 1)) return 0; return re->lo & VTD_PAGE_MASK; }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel32100.00%1100.00%
Total32100.00%1100.00%

/* * Take a root_entry and return the Upper Context Table Pointer (UCTP) * if marked present. */
static phys_addr_t root_entry_uctp(struct root_entry *re) { if (!(re->hi & 1)) return 0; return re->hi & VTD_PAGE_MASK; }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel32100.00%1100.00%
Total32100.00%1100.00%

/* * low 64 bits: * 0: present * 1: fault processing disable * 2-3: translation type * 12-63: address space root * high 64 bits: * 0-2: address width * 3-6: aval * 8-23: domain id */ struct context_entry { u64 lo; u64 hi; };
static inline void context_clear_pasid_enable(struct context_entry *context) { context->lo &= ~(1ULL << 11); }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel23100.00%1100.00%
Total23100.00%1100.00%


static inline bool context_pasid_enabled(struct context_entry *context) { return !!(context->lo & (1ULL << 11)); }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel2488.89%150.00%
Mark McLoughlin311.11%150.00%
Total27100.00%2100.00%


static inline void context_set_copied(struct context_entry *context) { context->hi |= (1ull << 3); }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel22100.00%1100.00%
Total22100.00%1100.00%


static inline bool context_copied(struct context_entry *context) { return !!(context->hi & (1ULL << 3)); }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel27100.00%1100.00%
Total27100.00%1100.00%


static inline bool __context_present(struct context_entry *context) { return (context->lo & 1); }

Contributors

PersonTokensPropCommitsCommitProp
Mark McLoughlin1780.95%266.67%
Joerg Roedel419.05%133.33%
Total21100.00%3100.00%


static inline bool context_present(struct context_entry *context) { return context_pasid_enabled(context) ? __context_present(context) : __context_present(context) && !context_copied(context); }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel34100.00%1100.00%
Total34100.00%1100.00%


static inline void context_set_present(struct context_entry *context) { context->lo |= 1; }

Contributors

PersonTokensPropCommitsCommitProp
Mark McLoughlin18100.00%2100.00%
Total18100.00%2100.00%


static inline void context_set_fault_enable(struct context_entry *context) { context->lo &= (((u64)-1) << 2) | 1; }

Contributors

PersonTokensPropCommitsCommitProp
Mark McLoughlin30100.00%2100.00%
Total30100.00%2100.00%


static inline void context_set_translation_type(struct context_entry *context, unsigned long value) { context->lo &= (((u64)-1) << 4) | 3; context->lo |= (value & 3) << 2; }

Contributors

PersonTokensPropCommitsCommitProp
Mark McLoughlin46100.00%1100.00%
Total46100.00%1100.00%


static inline void context_set_address_root(struct context_entry *context, unsigned long value) { context->lo &= ~VTD_PAGE_MASK; context->lo |= value & VTD_PAGE_MASK; }

Contributors

PersonTokensPropCommitsCommitProp
Mark McLoughlin2477.42%266.67%
Li, Zhen-Hua722.58%133.33%
Total31100.00%3100.00%


static inline void context_set_address_width(struct context_entry *context, unsigned long value) { context->hi |= value & 7; }

Contributors

PersonTokensPropCommitsCommitProp
Mark McLoughlin24100.00%2100.00%
Total24100.00%2100.00%


static inline void context_set_domain_id(struct context_entry *context, unsigned long value) { context->hi |= (value & ((1 << 16) - 1)) << 8; }

Contributors

PersonTokensPropCommitsCommitProp
Mark McLoughlin36100.00%2100.00%
Total36100.00%2100.00%


static inline int context_domain_id(struct context_entry *c) { return((c->hi >> 8) & 0xffff); }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel25100.00%1100.00%
Total25100.00%1100.00%


static inline void context_clear_entry(struct context_entry *context) { context->lo = 0; context->hi = 0; }

Contributors

PersonTokensPropCommitsCommitProp
Mark McLoughlin24100.00%2100.00%
Total24100.00%2100.00%

/* * 0: readable * 1: writable * 2-6: reserved * 7: super page * 8-10: available * 11: snoop behavior * 12-63: Host physcial address */ struct dma_pte { u64 val; };
static inline void dma_clear_pte(struct dma_pte *pte) { pte->val = 0; }

Contributors

PersonTokensPropCommitsCommitProp
Mark McLoughlin18100.00%2100.00%
Total18100.00%2100.00%


static inline u64 dma_pte_addr(struct dma_pte *pte) { #ifdef CONFIG_64BIT return pte->val & VTD_PAGE_MASK; #else /* Must have a full atomic 64-bit read */ return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK; #endif }

Contributors

PersonTokensPropCommitsCommitProp
David Woodhouse2354.76%250.00%
Mark McLoughlin1945.24%250.00%
Total42100.00%4100.00%


static inline bool dma_pte_present(struct dma_pte *pte) { return (pte->val & 3) != 0; }

Contributors

PersonTokensPropCommitsCommitProp
Mark McLoughlin23100.00%2100.00%
Total23100.00%2100.00%


static inline bool dma_pte_superpage(struct dma_pte *pte) { return (pte->val & DMA_PTE_LARGE_PAGE); }

Contributors

PersonTokensPropCommitsCommitProp
Allen M Kay2095.24%150.00%
Joerg Roedel14.76%150.00%
Total21100.00%2100.00%


static inline int first_pte_in_page(struct dma_pte *pte) { return !((unsigned long)pte & ~VTD_PAGE_MASK); }

Contributors

PersonTokensPropCommitsCommitProp
David Woodhouse25100.00%1100.00%
Total25100.00%1100.00%

/* * This domain is a statically identity mapping domain. * 1. This domain creats a static 1:1 mapping to all usable memory. * 2. It maps to each iommu if successful. * 3. Each iommu mapps to this domain if successful. */ static struct dmar_domain *si_domain; static int hw_pass_through = 1; /* * Domain represents a virtual machine, more than one devices * across iommus may be owned in one domain, e.g. kvm guest. */ #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0) /* si_domain contains mulitple devices */ #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1) #define for_each_domain_iommu(idx, domain) \ for (idx = 0; idx < g_num_of_iommus; idx++) \ if (domain->iommu_refcnt[idx]) struct dmar_domain { int nid; /* node id */ unsigned iommu_refcnt[DMAR_UNITS_SUPPORTED]; /* Refcount of devices per iommu */ u16 iommu_did[DMAR_UNITS_SUPPORTED]; /* Domain ids per IOMMU. Use u16 since * domain ids are 16 bit wide according * to VT-d spec, section 9.3 */ bool has_iotlb_device; struct list_head devices; /* all devices' list */ struct iova_domain iovad; /* iova's that belong to this domain */ struct dma_pte *pgd; /* virtual address */ int gaw; /* max guest address width */ /* adjusted guest address width, 0 is level 2 30-bit */ int agaw; int flags; /* flags to find out type of domain */ int iommu_coherency;/* indicate coherency of iommu access */ int iommu_snooping; /* indicate snooping control feature*/ int iommu_count; /* reference count of iommu */ int iommu_superpage;/* Level of superpages supported: 0 == 4KiB (no superpages), 1 == 2MiB, 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */ u64 max_addr; /* maximum mapped address */ struct iommu_domain domain; /* generic domain data structure for iommu core */ }; /* PCI domain-device relationship */ struct device_domain_info { struct list_head link; /* link to domain siblings */ struct list_head global; /* link to global list */ u8 bus; /* PCI bus number */ u8 devfn; /* PCI devfn number */ u8 pasid_supported:3; u8 pasid_enabled:1; u8 pri_supported:1; u8 pri_enabled:1; u8 ats_supported:1; u8 ats_enabled:1; u8 ats_qdep; struct device *dev; /* it's NULL for PCIe-to-PCI bridge */ struct intel_iommu *iommu; /* IOMMU used by this device */ struct dmar_domain *domain; /* pointer to domain */ }; struct dmar_rmrr_unit { struct list_head list; /* list of rmrr units */ struct acpi_dmar_header *hdr; /* ACPI header */ u64 base_address; /* reserved base address*/ u64 end_address; /* reserved end address */ struct dmar_dev_scope *devices; /* target devices */ int devices_cnt; /* target device count */ struct iommu_resv_region *resv; /* reserved region handle */ }; struct dmar_atsr_unit { struct list_head list; /* list of ATSR units */ struct acpi_dmar_header *hdr; /* ACPI header */ struct dmar_dev_scope *devices; /* target devices */ int devices_cnt; /* target device count */ u8 include_all:1; /* include all ports */ }; static LIST_HEAD(dmar_atsr_units); static LIST_HEAD(dmar_rmrr_units); #define for_each_rmrr_units(rmrr) \ list_for_each_entry(rmrr, &dmar_rmrr_units, list) /* bitmap for indexing intel_iommus */ static int g_num_of_iommus; static void domain_exit(struct dmar_domain *domain); static void domain_remove_dev_info(struct dmar_domain *domain); static void dmar_remove_one_dev_info(struct dmar_domain *domain, struct device *dev); static void __dmar_remove_one_dev_info(struct device_domain_info *info); static void domain_context_clear(struct intel_iommu *iommu, struct device *dev); static int domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu); #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON int dmar_disabled = 0; #else int dmar_disabled = 1; #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/ int intel_iommu_enabled = 0; EXPORT_SYMBOL_GPL(intel_iommu_enabled); static int dmar_map_gfx = 1; static int dmar_forcedac; static int intel_iommu_strict; static int intel_iommu_superpage = 1; static int intel_iommu_ecs = 1; static int intel_iommu_pasid28; static int iommu_identity_mapping; #define IDENTMAP_ALL 1 #define IDENTMAP_GFX 2 #define IDENTMAP_AZALIA 4 /* Broadwell and Skylake have broken ECS support — normal so-called "second * level" translation of DMA requests-without-PASID doesn't actually happen * unless you also set the NESTE bit in an extended context-entry. Which of * course means that SVM doesn't work because it's trying to do nested * translation of the physical addresses it finds in the process page tables, * through the IOVA->phys mapping found in the "second level" page tables. * * The VT-d specification was retroactively changed to change the definition * of the capability bits and pretend that Broadwell/Skylake never happened... * but unfortunately the wrong bit was changed. It's ECS which is broken, but * for some reason it was the PASID capability bit which was redefined (from * bit 28 on BDW/SKL to bit 40 in future). * * So our test for ECS needs to eschew those implementations which set the old * PASID capabiity bit 28, since those are the ones on which ECS is broken. * Unless we are working around the 'pasid28' limitations, that is, by putting * the device into passthrough mode for normal DMA and thus masking the bug. */ #define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \ (intel_iommu_pasid28 || !ecap_broken_pasid(iommu->ecap))) /* PASID support is thus enabled if ECS is enabled and *either* of the old * or new capability bits are set. */ #define pasid_enabled(iommu) (ecs_enabled(iommu) && \ (ecap_pasid(iommu->ecap) || ecap_broken_pasid(iommu->ecap))) int intel_iommu_gfx_mapped; EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped); #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1)) static DEFINE_SPINLOCK(device_domain_lock); static LIST_HEAD(device_domain_list); const struct iommu_ops intel_iommu_ops;
static bool translation_pre_enabled(struct intel_iommu *iommu) { return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED); }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel20100.00%1100.00%
Total20100.00%1100.00%


static void clear_translation_pre_enabled(struct intel_iommu *iommu) { iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED; }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel18100.00%1100.00%
Total18100.00%1100.00%


static void init_translation_status(struct intel_iommu *iommu) { u32 gsts; gsts = readl(iommu->reg + DMAR_GSTS_REG); if (gsts & DMA_GSTS_TES) iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED; }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel37100.00%1100.00%
Total37100.00%1100.00%

/* Convert generic 'struct iommu_domain to private struct dmar_domain */
static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom) { return container_of(dom, struct dmar_domain, domain); }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel24100.00%1100.00%
Total24100.00%1100.00%


static int __init intel_iommu_setup(char *str) { if (!str) return -EINVAL; while (*str) { if (!strncmp(str, "on", 2)) { dmar_disabled = 0; pr_info("IOMMU enabled\n"); } else if (!strncmp(str, "off", 3)) { dmar_disabled = 1; pr_info("IOMMU disabled\n"); } else if (!strncmp(str, "igfx_off", 8)) { dmar_map_gfx = 0; pr_info("Disable GFX device mapping\n"); } else if (!strncmp(str, "forcedac", 8)) { pr_info("Forcing DAC for PCI devices\n"); dmar_forcedac = 1; } else if (!strncmp(str, "strict", 6)) { pr_info("Disable batched IOTLB flush\n"); intel_iommu_strict = 1; } else if (!strncmp(str, "sp_off", 6)) { pr_info("Disable supported super page\n"); intel_iommu_superpage = 0; } else if (!strncmp(str, "ecs_off", 7)) { printk(KERN_INFO "Intel-IOMMU: disable extended context table support\n"); intel_iommu_ecs = 0; } else if (!strncmp(str, "pasid28", 7)) { printk(KERN_INFO "Intel-IOMMU: enable pre-production PASID support\n"); intel_iommu_pasid28 = 1; iommu_identity_mapping |= IDENTMAP_GFX; } else if (!strncmp(str, "tboot_noforce", 13)) { printk(KERN_INFO "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n"); intel_iommu_tboot_noforce = 1; } str += strcspn(str, ","); while (*str == ',') str++; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy11442.07%222.22%
David Woodhouse5419.93%222.22%
Shaohua Li259.23%111.11%
Kyle McMartin228.12%111.11%
Mark Gross228.12%111.11%
Youquan Song228.12%111.11%
Joerg Roedel124.43%111.11%
Total271100.00%9100.00%

__setup("intel_iommu=", intel_iommu_setup); static struct kmem_cache *iommu_domain_cache; static struct kmem_cache *iommu_devinfo_cache;
static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did) { struct dmar_domain **domains; int idx = did >> 8; domains = iommu->domains[idx]; if (!domains) return NULL; return domains[did & 0xff]; }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel54100.00%2100.00%
Total54100.00%2100.00%


static void set_iommu_domain(struct intel_iommu *iommu, u16 did, struct dmar_domain *domain) { struct dmar_domain **domains; int idx = did >> 8; if (!iommu->domains[idx]) { size_t size = 256 * sizeof(struct dmar_domain *); iommu->domains[idx] = kzalloc(size, GFP_ATOMIC); } domains = iommu->domains[idx]; if (WARN_ON(!domains)) return; else domains[did & 0xff] = domain; }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel98100.00%2100.00%
Total98100.00%2100.00%


static inline void *alloc_pgtable_page(int node) { struct page *page; void *vaddr = NULL; page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0); if (page) vaddr = page_address(page); return vaddr; }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy2857.14%266.67%
Suresh B. Siddha2142.86%133.33%
Total49100.00%3100.00%


static inline void free_pgtable_page(void *vaddr) { free_page((unsigned long)vaddr); }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy20100.00%1100.00%
Total20100.00%1100.00%


static inline void *alloc_domain_mem(void) { return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC); }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy1583.33%150.00%
Motohiro Kosaki316.67%150.00%
Total18100.00%2100.00%


static void free_domain_mem(void *vaddr) { kmem_cache_free(iommu_domain_cache, vaddr); }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy17100.00%1100.00%
Total17100.00%1100.00%


static inline void * alloc_devinfo_mem(void) { return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC); }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy1583.33%150.00%
Motohiro Kosaki316.67%150.00%
Total18100.00%2100.00%


static inline void free_devinfo_mem(void *vaddr) { kmem_cache_free(iommu_devinfo_cache, vaddr); }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy18100.00%1100.00%
Total18100.00%1100.00%


static inline int domain_type_is_vm(struct dmar_domain *domain) { return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE; }

Contributors

PersonTokensPropCommitsCommitProp
Jiang Liu19100.00%1100.00%
Total19100.00%1100.00%


static inline int domain_type_is_si(struct dmar_domain *domain) { return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY; }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel19100.00%1100.00%
Total19100.00%1100.00%


static inline int domain_type_is_vm_or_si(struct dmar_domain *domain) { return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE | DOMAIN_FLAG_STATIC_IDENTITY); }

Contributors

PersonTokensPropCommitsCommitProp
Jiang Liu23100.00%1100.00%
Total23100.00%1100.00%


static inline int domain_pfn_supported(struct dmar_domain *domain, unsigned long pfn) { int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; return !(addr_width < BITS_PER_LONG && pfn >> addr_width); }

Contributors

PersonTokensPropCommitsCommitProp
Jiang Liu40100.00%1100.00%
Total40100.00%1100.00%


static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw) { unsigned long sagaw; int agaw = -1; sagaw = cap_sagaw(iommu->cap); for (agaw = width_to_agaw(max_gaw); agaw >= 0; agaw--) { if (test_bit(agaw, &sagaw)) break; } return agaw; }

Contributors

PersonTokensPropCommitsCommitProp
Weidong Han5990.77%150.00%
Fenghua Yu69.23%150.00%
Total65100.00%2100.00%

/* * Calculate max SAGAW for each iommu. */
int iommu_calculate_max_sagaw(struct intel_iommu *iommu) { return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH); }

Contributors

PersonTokensPropCommitsCommitProp
Fenghua Yu18100.00%1100.00%
Total18100.00%1100.00%

/* * calculate agaw for each iommu. * "SAGAW" may be different across iommus, use a default agaw, and * get a supported less agaw for iommus that don't support the default agaw. */
int iommu_calculate_agaw(struct intel_iommu *iommu) { return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH); }

Contributors

PersonTokensPropCommitsCommitProp
Fenghua Yu18100.00%1100.00%
Total18100.00%1100.00%

/* This functionin only returns single iommu in a domain */
static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) { int iommu_id; /* si_domain and vm domain should not get here. */ BUG_ON(domain_type_is_vm_or_si(domain)); for_each_domain_iommu(iommu_id, domain) break; if (iommu_id < 0 || iommu_id >= g_num_of_iommus) return NULL; return g_iommus[iommu_id]; }

Contributors

PersonTokensPropCommitsCommitProp
Weidong Han4078.43%240.00%
Fenghua Yu59.80%120.00%
Joerg Roedel47.84%120.00%
Jiang Liu23.92%120.00%
Total51100.00%5100.00%


static void domain_update_iommu_coherency(struct dmar_domain *domain) { struct dmar_drhd_unit *drhd; struct intel_iommu *iommu; bool found = false; int i; domain->iommu_coherency = 1; for_each_domain_iommu(i, domain) { found = true; if (!ecap_coherent(g_iommus[i]->ecap)) { domain->iommu_coherency = 0; break; } } if (found) return; /* No hardware attached; use lowest common denominator */ rcu_read_lock(); for_each_active_iommu(iommu, drhd) { if (!ecap_coherent(iommu->ecap)) { domain->iommu_coherency = 0; break; } } rcu_read_unlock(); }

Contributors

PersonTokensPropCommitsCommitProp
David Woodhouse5450.00%120.00%
Weidong Han4541.67%120.00%
Quentin Lambert65.56%120.00%
Joerg Roedel21.85%120.00%
Akinobu Mita10.93%120.00%
Total108100.00%5100.00%


static int domain_update_iommu_snooping(struct intel_iommu *skip) { struct dmar_drhd_unit *drhd; struct intel_iommu *iommu; int ret = 1; rcu_read_lock(); for_each_active_iommu(iommu, drhd) { if (iommu != skip) { if (!ecap_sc_support(iommu->ecap)) { ret = 0; break; } } } rcu_read_unlock(); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Jiang Liu3754.41%133.33%
Sheng Yang3044.12%133.33%
Akinobu Mita11.47%133.33%
Total68100.00%3100.00%


static int domain_update_iommu_superpage(struct intel_iommu *skip) { struct dmar_drhd_unit *drhd; struct intel_iommu *iommu; int mask = 0xf; if (!intel_iommu_superpage) { return 0; } /* set iommu_superpage to the smallest common denominator */ rcu_read_lock(); for_each_active_iommu(iommu, drhd) { if (iommu != skip) { mask &= cap_super_page_val(iommu->cap); if (!mask) break; } } rcu_read_unlock(); return fls(mask); }

Contributors

PersonTokensPropCommitsCommitProp
Youquan Song4455.00%125.00%
Jiang Liu2025.00%250.00%
Allen M Kay1620.00%125.00%
Total80100.00%4100.00%

/* Some capabilities may be different across iommus */
static void domain_update_iommu_cap(struct dmar_domain *domain) { domain_update_iommu_coherency(domain); domain->iommu_snooping = domain_update_iommu_snooping(NULL); domain->iommu_superpage = domain_update_iommu_superpage(NULL); }

Contributors

PersonTokensPropCommitsCommitProp
Sheng Yang2058.82%133.33%
Jiang Liu1029.41%133.33%
Youquan Song411.76%133.33%
Total34100.00%3100.00%


static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, u8 devfn, int alloc) { struct root_entry *root = &iommu->root_entry[bus]; struct context_entry *context; u64 *entry; entry = &root->lo; if (ecs_enabled(iommu)) { if (devfn >= 0x80) { devfn -= 0x80; entry = &root->hi; } devfn *= 2; } if (*entry & 1) context = phys_to_virt(*entry & VTD_PAGE_MASK); else { unsigned long phy_addr; if (!alloc) return NULL; context = alloc_pgtable_page(iommu->node); if (!context) return NULL; __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE); phy_addr = virt_to_phys((void *)context); *entry = phy_addr | 1; __iommu_flush_cache(iommu, entry, sizeof(*entry)); } return &context[devfn]; }

Contributors

PersonTokensPropCommitsCommitProp
David Woodhouse15383.15%450.00%
Weidong Han189.78%112.50%
Joerg Roedel73.80%112.50%
Jiang Liu63.26%225.00%
Total184100.00%8100.00%


static int iommu_dummy(struct device *dev) { return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO; }

Contributors

PersonTokensPropCommitsCommitProp
David Woodhouse20100.00%1100.00%
Total20100.00%1100.00%


static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn) { struct dmar_drhd_unit *drhd = NULL; struct intel_iommu *iommu; struct device *tmp; struct pci_dev *ptmp, *pdev = NULL; u16 segment = 0; int i; if (iommu_dummy(dev)) return NULL; if (dev_is_pci(dev)) { struct pci_dev *pf_pdev; pdev = to_pci_dev(dev); #ifdef CONFIG_X86 /* VMD child devices currently cannot be handled individually */ if (is_vmd(pdev->bus)) return NULL; #endif /* VFs aren't listed in scope tables; we need to look up * the PF instead to find the IOMMU. */ pf_pdev = pci_physfn(pdev); dev = &pf_pdev->dev; segment = pci_domain_nr(pdev->bus); } else if (has_acpi_companion(dev)) dev = &ACPI_COMPANION(dev)->dev; rcu_read_lock(); for_each_active_iommu(iommu, drhd) { if (pdev && segment != drhd->segment) continue; for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, tmp) { if (tmp == dev) { /* For a VF use its original BDF# not that of the PF * which we used for the IOMMU lookup. Strictly speaking * we could do this for all PCI devices; we only need to * get the BDF# from the scope table for ACPI matches. */ if (pdev && pdev->is_virtfn) goto got_pdev; *bus = drhd->devices[i].bus; *devfn = drhd->devices[i].devfn; goto out; } if (!pdev || !dev_is_pci(tmp)) continue; ptmp = to_pci_dev(tmp); if (ptmp->subordinate && ptmp->subordinate->number <= pdev->bus->number && ptmp->subordinate->busn_res.end >= pdev->bus->number) goto got_pdev; } if (pdev && drhd->include_all) { got_pdev: *bus = pdev->bus->number; *devfn = pdev->devfn; goto out; } } iommu = NULL; out: rcu_read_unlock(); return iommu; }

Contributors

PersonTokensPropCommitsCommitProp
David Woodhouse19359.38%637.50%
Weidong Han4012.31%16.25%
Ashok Raj309.23%16.25%
Jiang Liu288.62%212.50%
Jon Derrick185.54%16.25%
Anil S Keshavamurthy82.46%16.25%
Yinghai Lu30.92%16.25%
Koos Vriezen20.62%16.25%
Yijing Wang20.62%16.25%
Linus Torvalds10.31%16.25%
Total325100.00%16100.00%


static void domain_flush_cache(struct dmar_domain *domain, void *addr, int size) { if (!domain->iommu_coherency) clflush_cache_range(addr, size); }

Contributors

PersonTokensPropCommitsCommitProp
David Woodhouse2784.38%150.00%
Anil S Keshavamurthy515.62%150.00%
Total32100.00%2100.00%


static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn) { struct context_entry *context; int ret = 0; unsigned long flags; spin_lock_irqsave(&iommu->lock, flags); context = iommu_context_addr(iommu, bus, devfn, 0); if (context) ret = context_present(context); spin_unlock_irqrestore(&iommu->lock, flags); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy6887.18%150.00%
David Woodhouse1012.82%150.00%
Total78100.00%2100.00%


static void free_context_table(struct intel_iommu *iommu) { int i; unsigned long flags; struct context_entry *context; spin_lock_irqsave(&iommu->lock, flags); if (!iommu->root_entry) { goto out; } for (i = 0; i < ROOT_ENTRY_NR; i++) { context = iommu_context_addr(iommu, i, 0, 0); if (context) free_pgtable_page(context); if (!ecs_enabled(iommu)) continue; context = iommu_context_addr(iommu, i, 0x80, 0); if (context) free_pgtable_page(context); } free_pgtable_page(iommu->root_entry); iommu->root_entry = NULL; out: spin_unlock_irqrestore(&iommu->lock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy10374.64%133.33%
David Woodhouse3525.36%266.67%
Total138100.00%3100.00%


static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, unsigned long pfn, int *target_level) { struct dma_pte *parent, *pte = NULL; int level = agaw_to_level(domain->agaw); int offset; BUG_ON(!domain->pgd); if (!domain_pfn_supported(domain, pfn)) /* Address beyond IOMMU's addressing capabilities. */ return NULL; parent = domain->pgd; while (1) { void *tmp_page; offset = pfn_level_offset(pfn, level); pte = &parent[offset]; if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte))) break; if (level == *target_level) break; if (!dma_pte_present(pte)) { uint64_t pteval; tmp_page = alloc_pgtable_page(domain->nid); if (!tmp_page) return NULL; domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE); pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE; if (cmpxchg64(&pte->val, 0ULL, pteval)) /* Someone else set it while we were thinking; use theirs. */ free_pgtable_page(tmp_page); else domain_flush_cache(domain, pte, sizeof(*pte)); } if (level == 1) break; parent = phys_to_virt(dma_pte_addr(pte)); level--; } if (!*target_level) *target_level = level; return pte; }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy14356.75%17.69%
David Woodhouse6124.21%538.46%
Youquan Song124.76%17.69%
Allen M Kay114.37%17.69%
Julian Stecklina72.78%17.69%
Jiang Liu62.38%17.69%
Suresh B. Siddha51.98%17.69%
Weidong Han41.59%17.69%
Benjamin LaHaise31.19%17.69%
Total252100.00%13100.00%

/* return address's pte at specific level */
static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain, unsigned long pfn, int level, int *large_page) { struct dma_pte *parent, *pte = NULL; int total = agaw_to_level(domain->agaw); int offset; parent = domain->pgd; while (level <= total) { offset = pfn_level_offset(pfn, total); pte = &parent[offset]; if (level == total) return pte; if (!dma_pte_present(pte)) { *large_page = total; break; } if (dma_pte_superpage(pte)) { *large_page = total; return pte; } parent = phys_to_virt(dma_pte_addr(pte)); total--; } return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy10275.00%120.00%
Youquan Song2518.38%120.00%
David Woodhouse64.41%240.00%
Yijing Wang32.21%120.00%
Total136100.00%5100.00%

/* clear last level pte, a tlb flush should be followed */
static void dma_pte_clear_range(struct dmar_domain *domain, unsigned long start_pfn, unsigned long last_pfn) { unsigned int large_page = 1; struct dma_pte *first_pte, *pte; BUG_ON(!domain_pfn_supported(domain, start_pfn)); BUG_ON(!domain_pfn_supported(domain, last_pfn)); BUG_ON(start_pfn > last_pfn); /* we don't need lock here; nobody else touches the iova range */ do { large_page = 1; first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page); if (!pte) { start_pfn = align_to_level(start_pfn + 1, large_page + 1); continue; } do { dma_clear_pte(pte); start_pfn += lvl_to_nr_pages(large_page); pte++; } while (start_pfn <= last_pfn && !first_pte_in_page(pte)); domain_flush_cache(domain, first_pte, (void *)pte - (void *)first_pte); } while (start_pfn && start_pfn <= last_pfn); }

Contributors

PersonTokensPropCommitsCommitProp
David Woodhouse11468.67%770.00%
Youquan Song2112.65%110.00%
Anil S Keshavamurthy1911.45%110.00%
Jiang Liu127.23%110.00%
Total166100.00%10100.00%


static void dma_pte_free_level(struct dmar_domain *domain, int level, int retain_level, struct dma_pte *pte, unsigned long pfn, unsigned long start_pfn, unsigned long last_pfn) { pfn = max(start_pfn, pfn); pte = &pte[pfn_level_offset(pfn, level)]; do { unsigned long level_pfn; struct dma_pte *level_pte; if (!dma_pte_present(pte) || dma_pte_superpage(pte)) goto next; level_pfn = pfn & level_mask(level); level_pte = phys_to_virt(dma_pte_addr(pte)); if (level > 2) { dma_pte_free_level(domain, level - 1, retain_level, level_pte, level_pfn, start_pfn, last_pfn); } /* * Free the page table if we're below the level we want to * retain and the range covers the entire table. */ if (level < retain_level && !(start_pfn > level_pfn || last_pfn < level_pfn + level_size(level) - 1)) { dma_clear_pte(pte); domain_flush_cache(domain, pte, sizeof(*pte)); free_pgtable_page(level_pte); } next: pfn += level_size(level); } while (!first_pte_in_page(++pte) && pfn <= last_pfn); }

Contributors

PersonTokensPropCommitsCommitProp
David Woodhouse14169.46%350.00%
Alex Williamson3316.26%116.67%
Anil S Keshavamurthy178.37%116.67%
Dave Dillow125.91%116.67%
Total203100.00%6100.00%

/* * clear last level (leaf) ptes and free page table pages below the * level we wish to keep intact. */
static void dma_pte_free_pagetable(struct dmar_domain *domain, unsigned long start_pfn, unsigned long last_pfn, int retain_level) { BUG_ON(!domain_pfn_supported(domain, start_pfn)); BUG_ON(!domain_pfn_supported(domain, last_pfn)); BUG_ON(start_pfn > last_pfn); dma_pte_clear_range(domain, start_pfn, last_pfn); /* We don't need lock here; nobody else touches the iova range */ dma_pte_free_level(domain, agaw_to_level(domain->agaw), retain_level, domain->pgd, 0, start_pfn, last_pfn); /* free pgd */ if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { free_pgtable_page(domain->pgd); domain->pgd = NULL; } }

Contributors

PersonTokensPropCommitsCommitProp
David Woodhouse9077.59%125.00%
Jiang Liu2118.10%250.00%
Dave Dillow54.31%125.00%
Total116100.00%4100.00%

/* When a page at a given level is being unlinked from its parent, we don't need to *modify* it at all. All we need to do is make a list of all the pages which can be freed just as soon as we've flushed the IOTLB and we know the hardware page-walk will no longer touch them. The 'pte' argument is the *parent* PTE, pointing to the page that is to be freed. */
static struct page *dma_pte_list_pagetables(struct dmar_domain *domain, int level, struct dma_pte *pte, struct page *freelist) { struct page *pg; pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT); pg->freelist = freelist; freelist = pg; if (level == 1) return freelist; pte = page_address(pg); do { if (dma_pte_present(pte) && !dma_pte_superpage(pte)) freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist); pte++; } while (!first_pte_in_page(pte)); return freelist; }

Contributors

PersonTokensPropCommitsCommitProp
David Woodhouse10187.83%150.00%
Jiang Liu1412.17%150.00%
Total115100.00%2100.00%


static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level, struct dma_pte *pte, unsigned long pfn, unsigned long start_pfn, unsigned long last_pfn, struct page *freelist) { struct dma_pte *first_pte = NULL, *last_pte = NULL; pfn = max(start_pfn, pfn); pte = &pte[pfn_level_offset(pfn, level)]; do { unsigned long level_pfn; if (!dma_pte_present(pte)) goto next; level_pfn = pfn & level_mask(level); /* If range covers entire pagetable, free it */ if (start_pfn <= level_pfn && last_pfn >= level_pfn + level_size(level) - 1) { /* These suborbinate page tables are going away entirely. Don't bother to clear them; we're just going to *free* them. */ if (level > 1 && !dma_pte_superpage(pte)) freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist); dma_clear_pte(pte); if (!first_pte) first_pte = pte; last_pte = pte; } else if (level > 1) { /* Recurse down into a level that isn't *entirely* obsolete */ freelist = dma_pte_clear_level(domain, level - 1, phys_to_virt(dma_pte_addr(pte)), level_pfn, start_pfn, last_pfn, freelist); } next: pfn += level_size(level); } while (!first_pte_in_page(++pte) && pfn <= last_pfn); if (first_pte) domain_flush_cache(domain, first_pte, (void *)++last_pte - (void *)first_pte); return freelist; }

Contributors

PersonTokensPropCommitsCommitProp
David Woodhouse19175.79%660.00%
Alex Williamson3112.30%220.00%
Anil S Keshavamurthy2911.51%110.00%
Youquan Song10.40%110.00%
Total252100.00%10100.00%

/* We can't just free the pages because the IOMMU may still be walking the page tables, and may have cached the intermediate levels. The pages can only be freed after the IOTLB flush has been done. */
static struct page *domain_unmap(struct dmar_domain *domain, unsigned long start_pfn, unsigned long last_pfn) { struct page *freelist = NULL; BUG_ON(!domain_pfn_supported(domain, start_pfn)); BUG_ON(!domain_pfn_supported(domain, last_pfn)); BUG_ON(start_pfn > last_pfn); /* we don't need lock here; nobody else touches the iova range */ freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw), domain->pgd, 0, start_pfn, last_pfn, NULL); /* free pgd */ if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { struct page *pgd_page = virt_to_page(domain->pgd); pgd_page->freelist = freelist; freelist = pgd_page; domain->pgd = NULL; } return freelist; }

Contributors

PersonTokensPropCommitsCommitProp
David Woodhouse5138.35%555.56%
Alex Williamson4735.34%111.11%
Anil S Keshavamurthy2216.54%111.11%
Jiang Liu129.02%111.11%
Joerg Roedel10.75%111.11%
Total133100.00%9100.00%


static void dma_free_pagelist(struct page *freelist) { struct page *pg; while ((pg = freelist)) { freelist = pg->freelist; free_pgtable_page(page_address(pg)); } }

Contributors

PersonTokensPropCommitsCommitProp
David Woodhouse3895.00%133.33%
Anil S Keshavamurthy12.50%133.33%
Joerg Roedel12.50%133.33%
Total40100.00%3100.00%


static void iova_entry_free(unsigned long data) { struct page *freelist = (struct page *)data; dma_free_pagelist(freelist); }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel27100.00%1100.00%
Total27100.00%1100.00%

/* iommu handling */
static int iommu_alloc_root_entry(struct intel_iommu *iommu) { struct root_entry *root; unsigned long flags; root = (struct root_entry *)alloc_pgtable_page(iommu->node); if (!root) { pr_err("Allocating root entry for %s failed\n", iommu->name); return -ENOMEM; } __iommu_flush_cache(iommu, root, ROOT_SIZE); spin_lock_irqsave(&iommu->lock, flags); iommu->root_entry = root; spin_unlock_irqrestore(&iommu->lock, flags); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy7581.52%120.00%
Jiang Liu1010.87%120.00%
Suresh B. Siddha55.43%120.00%
Joerg Roedel11.09%120.00%
Fenghua Yu11.09%120.00%
Total92100.00%5100.00%


static void iommu_set_root_entry(struct intel_iommu *iommu) { u64 addr; u32 sts; unsigned long flag; addr = virt_to_phys(iommu->root_entry); if (ecs_enabled(iommu)) addr |= DMA_RTADDR_RTT; raw_spin_lock_irqsave(&iommu->register_lock, flag); dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr); writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG); /* Make sure hardware complete it */ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_RTPS), sts); raw_spin_unlock_irqrestore(&iommu->register_lock, flag); }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy8681.90%120.00%
David Woodhouse1716.19%360.00%
Thomas Gleixner21.90%120.00%
Total105100.00%5100.00%


static void iommu_flush_write_buffer(struct intel_iommu *iommu) { u32 val; unsigned long flag; if (!rwbf_quirk && !cap_rwbf(iommu->cap)) return; raw_spin_lock_irqsave(&iommu->register_lock, flag); writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG); /* Make sure hardware complete it */ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (!(val & DMA_GSTS_WBFS)), val); raw_spin_unlock_irqrestore(&iommu->register_lock, flag); }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy7888.64%125.00%
David Woodhouse89.09%250.00%
Thomas Gleixner22.27%125.00%
Total88100.00%4100.00%

/* return value determine if we need a write buffer flush */
static void __iommu_flush_context(struct intel_iommu *iommu, u16 did, u16 source_id, u8 function_mask, u64 type) { u64 val = 0; unsigned long flag; switch (type) { case DMA_CCMD_GLOBAL_INVL: val = DMA_CCMD_GLOBAL_INVL; break; case DMA_CCMD_DOMAIN_INVL: val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did); break; case DMA_CCMD_DEVICE_INVL: val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did) | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask); break; default: BUG(); } val |= DMA_CCMD_ICC; raw_spin_lock_irqsave(&iommu->register_lock, flag); dmar_writeq(iommu->reg + DMAR_CCMD_REG, val); /* Make sure hardware complete it */ IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG, dmar_readq, (!(val & DMA_CCMD_ICC)), val); raw_spin_unlock_irqrestore(&iommu->register_lock, flag); }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy13997.89%133.33%
Thomas Gleixner21.41%133.33%
David Woodhouse10.70%133.33%
Total142100.00%3100.00%

/* return value determine if we need a write buffer flush */
static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, unsigned int size_order, u64 type) { int tlb_offset = ecap_iotlb_offset(iommu->ecap); u64 val = 0, val_iva = 0; unsigned long flag; switch (type) { case DMA_TLB_GLOBAL_FLUSH: /* global flush doesn't need set IVA_REG */ val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT; break; case DMA_TLB_DSI_FLUSH: val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did); break; case DMA_TLB_PSI_FLUSH: val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did); /* IH bit is passed in as part of address */ val_iva = size_order | addr; break; default: BUG(); } /* Note: set drain read/write */ #if 0 /* * This is probably to be super secure.. Looks like we can * ignore it without any impact. */ if (cap_read_drain(iommu->cap)) val |= DMA_TLB_READ_DRAIN; #endif if (cap_write_drain(iommu->cap)) val |= DMA_TLB_WRITE_DRAIN; raw_spin_lock_irqsave(&iommu->register_lock, flag); /* Note: Only uses first TLB reg currently */ if (val_iva) dmar_writeq(iommu->reg + tlb_offset, val_iva); dmar_writeq(iommu->reg + tlb_offset + 8, val); /* Make sure hardware complete it */ IOMMU_WAIT_OP(iommu, tlb_offset + 8, dmar_readq, (!(val & DMA_TLB_IVT)), val); raw_spin_unlock_irqrestore(&iommu->register_lock, flag); /* check IOTLB invalidation granularity */ if (DMA_TLB_IAIG(val) == 0) pr_err("Flush IOTLB failed\n"); if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type)) pr_debug("TLB flush request %Lx, actual %Lx\n", (unsigned long long)DMA_TLB_IIRG(type), (unsigned long long)DMA_TLB_IAIG(val)); }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy23393.20%116.67%
Fenghua Yu104.00%116.67%
Joerg Roedel31.20%116.67%
David Woodhouse20.80%233.33%
Thomas Gleixner20.80%116.67%
Total250100.00%6100.00%


static struct device_domain_info * iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu, u8 bus, u8 devfn) { struct device_domain_info *info; assert_spin_locked(&device_domain_lock); if (!iommu->qi) return NULL; list_for_each_entry(info, &domain->devices, link) if (info->iommu == iommu && info->bus == bus && info->devfn == devfn) { if (info->ats_supported && info->dev) return info; break; } return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Yu Zhao5357.61%114.29%
Anil S Keshavamurthy1516.30%114.29%
David Woodhouse1213.04%342.86%
Jiang Liu66.52%114.29%
Joerg Roedel66.52%114.29%
Total92100.00%7100.00%


static void domain_update_iotlb(struct dmar_domain *domain) { struct device_domain_info *info; bool has_iotlb_device = false; assert_spin_locked(&device_domain_lock); list_for_each_entry(info, &domain->devices, link) { struct pci_dev *pdev; if (!info->dev || !dev_is_pci(info->dev)) continue; pdev = to_pci_dev(info->dev); if (pdev->ats_enabled) { has_iotlb_device = true; break; } } domain->has_iotlb_device = has_iotlb_device; }

Contributors

PersonTokensPropCommitsCommitProp
Omer Peleg86100.00%1100.00%
Total86100.00%1100.00%


static void iommu_enable_dev_iotlb(struct device_domain_info *info) { struct pci_dev *pdev; assert_spin_locked(&device_domain_lock); if (!info || !dev_is_pci(info->dev)) return; pdev = to_pci_dev(info->dev); #ifdef CONFIG_INTEL_IOMMU_SVM /* The PCIe spec, in its wisdom, declares that the behaviour of the device if you enable PASID support after ATS support is undefined. So always enable PASID support on devices which have it, even if we can't yet know if we're ever going to use it. */ if (info->pasid_supported && !pci_enable_pasid(pdev, info->pasid_supported & ~1)) info->pasid_enabled = 1; if (info->pri_supported && !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32)) info->pri_enabled = 1; #endif if (info->ats_supported && !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) { info->ats_enabled = 1; domain_update_iotlb(info->domain); info->ats_qdep = pci_ats_queue_depth(pdev); } }

Contributors

PersonTokensPropCommitsCommitProp
David Woodhouse7755.00%240.00%
Björn Helgaas2618.57%120.00%
Yu Zhao2417.14%120.00%
Omer Peleg139.29%120.00%
Total140100.00%5100.00%


static void iommu_disable_dev_iotlb(struct device_domain_info *info) { struct pci_dev *pdev; assert_spin_locked(&device_domain_lock); if (!dev_is_pci(info->dev)) return; pdev = to_pci_dev(info->dev); if (info->ats_enabled) { pci_disable_ats(pdev); info->ats_enabled = 0; domain_update_iotlb(info->domain); } #ifdef CONFIG_INTEL_IOMMU_SVM if (info->pri_enabled) { pci_disable_pri(pdev); info->pri_enabled = 0; } if (info->pasid_enabled) { pci_disable_pasid(pdev); info->pasid_enabled = 0; } #endif }

Contributors

PersonTokensPropCommitsCommitProp
David Woodhouse6760.36%233.33%
Yu Zhao2320.72%116.67%
Omer Peleg1311.71%116.67%
Björn Helgaas76.31%116.67%
Jeremy McNicoll10.90%116.67%
Total111100.00%6100.00%


static void iommu_flush_dev_iotlb(struct dmar_domain *domain, u64 addr, unsigned mask) { u16 sid, qdep; unsigned long flags; struct device_domain_info *info; if (!domain->has_iotlb_device) return; spin_lock_irqsave(&device_domain_lock, flags); list_for_each_entry(info, &domain->devices, link) { if (!info->ats_enabled) continue; sid = info->bus << 8 | info->devfn; qdep = info->ats_qdep; qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask); } spin_unlock_irqrestore(&device_domain_lock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Yu Zhao9488.68%125.00%
Omer Peleg87.55%125.00%
David Woodhouse21.89%125.00%
Björn Helgaas21.89%125.00%
Total106100.00%4100.00%


static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, struct dmar_domain *domain, unsigned long pfn, unsigned int pages, int ih, int map) { unsigned int mask = ilog2(__roundup_pow_of_two(pages)); uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT; u16 did = domain->iommu_did[iommu->seq_id]; BUG_ON(pages == 0); if (ih) ih = 1 << 6; /* * Fallback to domain selective flush if no PSI support or the size is * too big. * PSI requires page size to be 2 ^ x, and the base address is naturally * aligned to the size */ if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap)) iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); else iommu->flush.flush_iotlb(iommu, did, addr | ih, mask, DMA_TLB_PSI_FLUSH); /* * In caching mode, changes of pages from non-present to present require * flush. However, device IOTLB doesn't need to be flushed in this case. */ if (!cap_caching_mode(iommu->cap) || !map) iommu_flush_dev_iotlb(get_iommu_domain(iommu, did), addr, mask); }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy6740.36%216.67%
Yu Zhao3219.28%325.00%
David Woodhouse2615.66%325.00%
Joerg Roedel2012.05%216.67%
Youquan Song159.04%18.33%
Nadav Amit63.61%18.33%
Total166100.00%12100.00%


static void iommu_flush_iova(struct iova_domain *iovad) { struct dmar_domain *domain; int idx; domain = container_of(iovad, struct dmar_domain, iovad); for_each_domain_iommu(idx, domain) { struct intel_iommu *iommu = g_iommus[idx]; u16 did = domain->iommu_did[iommu->seq_id]; iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); if (!cap_caching_mode(iommu->cap)) iommu_flush_dev_iotlb(get_iommu_domain(iommu, did), 0, MAX_AGAW_PFN_WIDTH); } }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel8785.29%150.00%
Mark Gross1514.71%150.00%
Total102100.00%2100.00%


static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) { u32 pmen; unsigned long flags; raw_spin_lock_irqsave(&iommu->register_lock, flags); pmen = readl(iommu->reg + DMAR_PMEN_REG); pmen &= ~DMA_PMEN_EPM; writel(pmen, iommu->reg + DMAR_PMEN_REG); /* wait for the protected region status bit to clear */ IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG, readl, !(pmen & DMA_PMEN_PRS), pmen); raw_spin_unlock_irqrestore(&iommu->register_lock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Mark Gross8297.62%150.00%
Thomas Gleixner22.38%150.00%
Total84100.00%2100.00%


static void iommu_enable_translation(struct intel_iommu *iommu) { u32 sts; unsigned long flags; raw_spin_lock_irqsave(&iommu->register_lock, flags); iommu->gcmd |= DMA_GCMD_TE; writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); /* Make sure hardware complete it */ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_TES), sts); raw_spin_unlock_irqrestore(&iommu->register_lock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy6688.00%125.00%
David Woodhouse68.00%125.00%
Thomas Gleixner22.67%125.00%
Jiang Liu11.33%125.00%
Total75100.00%4100.00%


static void iommu_disable_translation(struct intel_iommu *iommu) { u32 sts; unsigned long flag; raw_spin_lock_irqsave(&iommu->register_lock, flag); iommu->gcmd &= ~DMA_GCMD_TE; writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); /* Make sure hardware complete it */ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (!(sts & DMA_GSTS_TES)), sts); raw_spin_unlock_irqrestore(&iommu->register_lock, flag); }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy7696.20%133.33%
Thomas Gleixner22.53%133.33%
Jiang Liu11.27%133.33%
Total79100.00%3100.00%


static int iommu_init_domains(struct intel_iommu *iommu) { u32 ndomains, nlongs; size_t size; ndomains = cap_ndoms(iommu->cap); pr_debug("%s: Number of Domains supported <%d>\n", iommu->name, ndomains); nlongs = BITS_TO_LONGS(ndomains); spin_lock_init(&iommu->lock); iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL); if (!iommu->domain_ids) { pr_err("%s: Allocating domain id array failed\n", iommu->name); return -ENOMEM; } size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **); iommu->domains = kzalloc(size, GFP_KERNEL); if (iommu->domains) { size = 256 * sizeof(struct dmar_domain *); iommu->domains[0] = kzalloc(size, GFP_KERNEL); } if (!iommu->domains || !iommu->domains[0]) { pr_err("%s: Allocating domain array failed\n", iommu->name); kfree(iommu->domain_ids); kfree(iommu->domains); iommu->domain_ids = NULL; iommu->domains = NULL; return -ENOMEM; } /* * If Caching mode is set, then invalid translations are tagged * with domain-id 0, hence we need to pre-allocate it. We also * use domain-id 0 as a marker for non-allocated domain-id, so * make sure it is not used for a real domain. */ set_bit(0, iommu->domain_ids); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy11550.66%112.50%
Joerg Roedel7733.92%337.50%
Jiang Liu198.37%112.50%
Donald Dutile83.52%112.50%
Wei Yang52.20%112.50%
Yinghai Lu31.32%112.50%
Total227100.00%8100.00%


static void disable_dmar_iommu(struct intel_iommu *iommu) { struct device_domain_info *info, *tmp; unsigned long flags; if (!iommu->domains || !iommu->domain_ids) return; again: spin_lock_irqsave(&device_domain_lock, flags); list_for_each_entry_safe(info, tmp, &device_domain_list, global) { struct dmar_domain *domain; if (info->iommu != iommu) continue; if (!info->dev || !info->domain) continue; domain = info->domain; __dmar_remove_one_dev_info(info); if (!domain_type_is_vm_or_si(domain)) { /* * The domain_exit() function can't be called under * device_domain_lock, as it takes this lock itself. * So release the lock here and re-run the loop * afterwards. */ spin_unlock_irqrestore(&device_domain_lock, flags); domain_exit(domain); goto again; } } spin_unlock_irqrestore(&device_domain_lock, flags); if (iommu->gcmd & DMA_GCMD_TE) iommu_disable_translation(iommu); }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel7451.39%323.08%
Anil S Keshavamurthy4229.17%17.69%
Jiang Liu1510.42%538.46%
Donald Dutile96.25%17.69%
Weidong Han32.08%215.38%
Akinobu Mita10.69%17.69%
Total144100.00%13100.00%


static void free_dmar_iommu(struct intel_iommu *iommu) { if ((iommu->domains) && (iommu->domain_ids)) { int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8; int i; for (i = 0; i < elems; i++) kfree(iommu->domains[i]); kfree(iommu->domains); kfree(iommu->domain_ids); iommu->domains = NULL; iommu->domain_ids = NULL; } g_iommus[iommu->seq_id] = NULL; /* free context mapping */ free_context_table(iommu); #ifdef CONFIG_INTEL_IOMMU_SVM if (pasid_enabled(iommu)) { if (ecap_prs(iommu->ecap)) intel_svm_finish_prq(iommu); intel_svm_free_pasid_tables(iommu); } #endif }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel3423.61%112.50%
Jiang Liu3423.61%225.00%
David Woodhouse3322.92%225.00%
Anil S Keshavamurthy2920.14%112.50%
Weidong Han96.25%112.50%
Wei Yang53.47%112.50%
Total144100.00%8100.00%


static struct dmar_domain *alloc_domain(int flags) { struct dmar_domain *domain; domain = alloc_domain_mem(); if (!domain) return NULL; memset(domain, 0, sizeof(*domain)); domain->nid = -1; domain->flags = flags; domain->has_iotlb_device = false; INIT_LIST_HEAD(&domain->devices); return domain; }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy2534.72%116.67%
Jiang Liu1926.39%233.33%
Fenghua Yu1825.00%116.67%
Omer Peleg68.33%116.67%
Mike Travis45.56%116.67%
Total72100.00%6100.00%

/* Must be called with iommu->lock */
static int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu) { unsigned long ndomains; int num; assert_spin_locked(&device_domain_lock); assert_spin_locked(&iommu->lock); domain->iommu_refcnt[iommu->seq_id] += 1; domain->iommu_count += 1; if (domain->iommu_refcnt[iommu->seq_id] == 1) { ndomains = cap_ndoms(iommu->cap); num = find_first_zero_bit(iommu->domain_ids, ndomains); if (num >= ndomains) { pr_err("%s: No free domain ids\n", iommu->name); domain->iommu_refcnt[iommu->seq_id] -= 1; domain->iommu_count -= 1; return -ENOSPC; } set_bit(num, iommu->domain_ids); set_iommu_domain(iommu, num, domain); domain->iommu_did[iommu->seq_id] = num; domain->nid = iommu->node; domain_update_iommu_cap(domain); } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Jiang Liu6437.21%216.67%
Joerg Roedel5129.65%758.33%
Fenghua Yu3218.60%18.33%
Anil S Keshavamurthy2313.37%18.33%
Weidong Han21.16%18.33%
Total172100.00%12100.00%


static int domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu) { int num, count = INT_MAX; assert_spin_locked(&device_domain_lock); assert_spin_locked(&iommu->lock); domain->iommu_refcnt[iommu->seq_id] -= 1; count = --domain->iommu_count; if (domain->iommu_refcnt[iommu->seq_id] == 0) { num = domain->iommu_did[iommu->seq_id]; clear_bit(num, iommu->domain_ids); set_iommu_domain(iommu, num, NULL); domain_update_iommu_cap(domain); domain->iommu_did[iommu->seq_id] = 0; } return count; }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel6353.39%466.67%
Jiang Liu5143.22%116.67%
Fenghua Yu43.39%116.67%
Total118100.00%6100.00%

static struct iova_domain reserved_iova_list; static struct lock_class_key reserved_rbtree_key;
static int dmar_init_reserved_ranges(void) { struct pci_dev *pdev = NULL; struct iova *iova; int i; init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN, DMA_32BIT_PFN); lockdep_set_class(&reserved_iova_list.iova_rbtree_lock, &reserved_rbtree_key); /* IOAPIC ranges shouldn't be accessed by DMA */ iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START), IOVA_PFN(IOAPIC_RANGE_END)); if (!iova) { pr_err("Reserve IOAPIC range failed\n"); return -ENODEV; } /* Reserve all PCI MMIO to avoid peer-to-peer access */ for_each_pci_dev(pdev) { struct resource *r; for (i = 0; i < PCI_NUM_RESOURCES; i++) { r = &pdev->resource[i]; if (!r->flags || !(r->flags & IORESOURCE_MEM)) continue; iova = reserve_iova(&reserved_iova_list, IOVA_PFN(r->start), IOVA_PFN(r->end)); if (!iova) { pr_err("Reserve iova failed\n"); return -ENODEV; } } } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy13677.27%112.50%
Joseph Cihula158.52%112.50%
Mark Gross116.25%112.50%
David Woodhouse63.41%112.50%
Robin Murphy42.27%225.00%
Joerg Roedel21.14%112.50%
David S. Miller21.14%112.50%
Total176100.00%8100.00%


static void domain_reserve_special_ranges(struct dmar_domain *domain) { copy_reserved_iova(&reserved_iova_list, &domain->iovad); }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy22100.00%1100.00%
Total22100.00%1100.00%


static inline int guestwidth_to_adjustwidth(int gaw) { int agaw; int r = (gaw - 12) % 9; if (r == 0) agaw = gaw; else agaw = gaw + 9 - r; if (agaw > 64) agaw = 64; return agaw; }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy56100.00%1100.00%
Total56100.00%1100.00%


static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu, int guest_width) { int adjust_width, agaw; unsigned long sagaw; int err; init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN, DMA_32BIT_PFN); err = init_iova_flush_queue(&domain->iovad, iommu_flush_iova, iova_entry_free); if (err) return err; domain_reserve_special_ranges(domain); /* calculate AGAW */ if (guest_width > cap_mgaw(iommu->cap)) guest_width = cap_mgaw(iommu->cap); domain->gaw = guest_width; adjust_width = guestwidth_to_adjustwidth(guest_width); agaw = width_to_agaw(adjust_width); sagaw = cap_sagaw(iommu->cap); if (!test_bit(agaw, &sagaw)) { /* hardware doesn't support it, choose a bigger one */ pr_debug("Hardware doesn't support agaw %d\n", agaw); agaw = find_next_bit(&sagaw, 5, agaw); if (agaw >= 5) return -ENODEV; } domain->agaw = agaw; if (ecap_coherent(iommu->ecap)) domain->iommu_coherency = 1; else domain->iommu_coherency = 0; if (ecap_sc_support(iommu->ecap)) domain->iommu_snooping = 1; else domain->iommu_snooping = 0; if (intel_iommu_superpage) domain->iommu_superpage = fls(cap_super_page_val(iommu->cap)); else domain->iommu_superpage = 0; domain->nid = iommu->node; /* always allocate the top pgd */ domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid); if (!domain->pgd) return -ENOMEM; __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy17058.82%17.14%
Joerg Roedel3010.38%321.43%
Weidong Han248.30%214.29%
Sheng Yang227.61%17.14%
Youquan Song144.84%17.14%
Suresh B. Siddha113.81%17.14%
David Woodhouse113.81%17.14%
Robin Murphy41.38%214.29%
David S. Miller20.69%17.14%
Fenghua Yu10.35%17.14%
Total289100.00%14100.00%


static void domain_exit(struct dmar_domain *domain) { struct page *freelist = NULL; /* Domain 0 is reserved, so dont process it */ if (!domain) return; /* Remove associated devices and clear attached or cached domains */ rcu_read_lock(); domain_remove_dev_info(domain); rcu_read_unlock(); /* destroy iovas */ put_iova_domain(&domain->iovad); freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); dma_free_pagelist(freelist); free_domain_mem(domain); }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy4258.33%114.29%
David Woodhouse1825.00%228.57%
Joerg Roedel79.72%114.29%
Alex Williamson34.17%228.57%
Fenghua Yu22.78%114.29%
Total72100.00%7100.00%


static int domain_context_mapping_one(struct dmar_domain *domain, struct intel_iommu *iommu, u8 bus, u8 devfn) { u16 did = domain->iommu_did[iommu->seq_id]; int translation = CONTEXT_TT_MULTI_LEVEL; struct device_domain_info *info = NULL; struct context_entry *context; unsigned long flags; struct dma_pte *pgd; int ret, agaw; WARN_ON(did == 0); if (hw_pass_through && domain_type_is_si(domain)) translation = CONTEXT_TT_PASS_THROUGH; pr_debug("Set context mapping for %02x:%02x.%d\n", bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); BUG_ON(!domain->pgd); spin_lock_irqsave(&device_domain_lock, flags); spin_lock(&iommu->lock); ret = -ENOMEM; context = iommu_context_addr(iommu, bus, devfn, 1); if (!context) goto out_unlock; ret = 0; if (context_present(context)) goto out_unlock; /* * For kdump cases, old valid entries may be cached due to the * in-flight DMA and copied pgtable, but there is no unmapping * behaviour for them, thus we need an explicit cache flush for * the newly-mapped device. For kdump, at this point, the device * is supposed to finish reset at its driver probe stage, so no * in-flight DMA will exist, and we don't need to worry anymore * hereafter. */ if (context_copied(context)) { u16 did_old = context_domain_id(context); if (did_old >= 0 && did_old < cap_ndoms(iommu->cap)) { iommu->flush.flush_context(iommu, did_old, (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT, DMA_CCMD_DEVICE_INVL); iommu->flush.flush_iotlb(iommu, did_old, 0, 0, DMA_TLB_DSI_FLUSH); } } pgd = domain->pgd; context_clear_entry(context); context_set_domain_id(context, did); /* * Skip top levels of page tables for iommu which has less agaw * than default. Unnecessary for PT mode. */ if (translation != CONTEXT_TT_PASS_THROUGH) { for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) { ret = -ENOMEM; pgd = phys_to_virt(dma_pte_addr(pgd)); if (!dma_pte_present(pgd)) goto out_unlock; } info = iommu_support_dev_iotlb(domain, iommu, bus, devfn); if (info && info->ats_supported) translation = CONTEXT_TT_DEV_IOTLB; else translation = CONTEXT_TT_MULTI_LEVEL; context_set_address_root(context, virt_to_phys(pgd)); context_set_address_width(context, iommu->agaw); } else { /* * In pass through mode, AW must be programmed to * indicate the largest AGAW value supported by * hardware. And ASR is ignored by hardware. */ context_set_address_width(context, iommu->msagaw); } context_set_translation_type(context, translation); context_set_fault_enable(context); context_set_present(context); domain_flush_cache(domain, context, sizeof(*context)); /* * It's a non-present to present mapping. If hardware doesn't cache * non-present entry we only need to flush the write-buffer. If the * _does_ cache non-present entries, then it does so in the special * domain #0, which we have to flush: */ if (cap_caching_mode(iommu->cap)) { iommu->flush.flush_context(iommu, 0, (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT, DMA_CCMD_DEVICE_INVL); iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); } else { iommu_flush_write_buffer(iommu); } iommu_enable_dev_iotlb(info); ret = 0; out_unlock: spin_unlock(&iommu->lock); spin_unlock_irqrestore(&device_domain_lock, flags); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy14929.50%14.76%
Joerg Roedel9218.22%523.81%
Weidong Han6512.87%314.29%
Xunlei Pang6112.08%14.76%
David Woodhouse5110.10%523.81%
Yu Zhao244.75%14.76%
Fenghua Yu203.96%14.76%
KarimAllah Ahmed193.76%14.76%
Youquan Song152.97%14.76%
Chris Wright81.58%14.76%
Wei Yang10.20%14.76%
Total505100.00%21100.00%

struct domain_context_mapping_data { struct dmar_domain *domain; struct intel_iommu *iommu; };
static int domain_context_mapping_cb(struct pci_dev *pdev, u16 alias, void *opaque) { struct domain_context_mapping_data *data = opaque; return domain_context_mapping_one(data->domain, data->iommu, PCI_BUS_NUM(alias), alias & 0xff); }

Contributors

PersonTokensPropCommitsCommitProp
Alex Williamson46100.00%1100.00%
Total46100.00%1100.00%


static int domain_context_mapping(struct dmar_domain *domain, struct device *dev) { struct intel_iommu *iommu; u8 bus, devfn; struct domain_context_mapping_data data; iommu = device_to_iommu(dev, &bus, &devfn); if (!iommu) return -ENODEV; if (!dev_is_pci(dev)) return domain_context_mapping_one(domain, iommu, bus, devfn); data.domain = domain; data.iommu = iommu; return pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_mapping_cb, &data); }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy3737.37%114.29%
David Woodhouse3636.36%457.14%
Alex Williamson2525.25%114.29%
Fenghua Yu11.01%114.29%
Total99100.00%7100.00%


static int domain_context_mapped_cb(struct pci_dev *pdev, u16 alias, void *opaque) { struct intel_iommu *iommu = opaque; return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff); }

Contributors

PersonTokensPropCommitsCommitProp
Alex Williamson3380.49%120.00%
Anil S Keshavamurthy512.20%120.00%
David Woodhouse24.88%240.00%
Fenghua Yu12.44%120.00%
Total41100.00%5100.00%


static int domain_context_mapped(struct device *dev) { struct intel_iommu *iommu; u8 bus, devfn; iommu = device_to_iommu(dev, &bus, &devfn); if (!iommu) return -ENODEV; if (!dev_is_pci(dev)) return device_context_mapped(iommu, bus, devfn); return !pci_for_each_dma_alias(to_pci_dev(dev), domain_context_mapped_cb, iommu); }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy3141.33%114.29%
Weidong Han2229.33%228.57%
David Woodhouse1520.00%342.86%
Alex Williamson79.33%114.29%
Total75100.00%7100.00%

/* Returns a number of VTD pages, but aligned to MM page size */
static inline unsigned long aligned_nrpages(unsigned long host_addr, size_t size) { host_addr &= ~PAGE_MASK; return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT; }

Contributors

PersonTokensPropCommitsCommitProp
Fenghua Yu30100.00%1100.00%
Total30100.00%1100.00%

/* Return largest possible superpage level for a given mapping */
static inline int hardware_largepage_caps(struct dmar_domain *domain, unsigned long iov_pfn, unsigned long phy_pfn, unsigned long pages) { int support, level = 1; unsigned long pfnmerge; support = domain->iommu_superpage; /* To use a large page, the virtual *and* physical addresses must be aligned to 2MiB/1GiB/etc. Lower bits set in either of them will mean we have to use smaller pages. So just merge them and check both at once. */ pfnmerge = iov_pfn | phy_pfn; while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) { pages >>= VTD_STRIDE_SHIFT; if (!pages) break; pfnmerge >>= VTD_STRIDE_SHIFT; level++; support--; } return level; }

Contributors

PersonTokensPropCommitsCommitProp
Youquan Song85100.00%1100.00%
Total85100.00%1100.00%


static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, struct scatterlist *sg, unsigned long phys_pfn, unsigned long nr_pages, int prot) { struct dma_pte *first_pte = NULL, *pte = NULL; phys_addr_t uninitialized_var(pteval); unsigned long sg_res = 0; unsigned int largepage_lvl = 0; unsigned long lvl_pages = 0; BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1)); if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0) return -EINVAL; prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP; if (!sg) { sg_res = nr_pages; pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot; } while (nr_pages > 0) { uint64_t tmp; if (!sg_res) { sg_res = aligned_nrpages(sg->offset, sg->length); sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; sg->dma_length = sg->length; pteval = page_to_phys(sg_page(sg)) | prot; phys_pfn = pteval >> VTD_PAGE_SHIFT; } if (!pte) { largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res); first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl); if (!pte) return -ENOMEM; /* It is large page*/ if (largepage_lvl > 1) { unsigned long nr_superpages, end_pfn; pteval |= DMA_PTE_LARGE_PAGE; lvl_pages = lvl_to_nr_pages(largepage_lvl); nr_superpages = sg_res / lvl_pages; end_pfn = iov_pfn + nr_superpages * lvl_pages - 1; /* * Ensure that old small page tables are * removed to make room for superpage(s). * We're adding new large pages, so make sure * we don't remove their parent tables. */ dma_pte_free_pagetable(domain, iov_pfn, end_pfn, largepage_lvl + 1); } else { pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE; } } /* We don't need lock here, nobody else * touches the iova range */ tmp = cmpxchg64_local(&pte->val, 0ULL, pteval); if (tmp) { static int dumps = 5; pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n", iov_pfn, tmp, (unsigned long long)pteval); if (dumps) { dumps--; debug_dma_dump_mappings(NULL); } WARN_ON(1); } lvl_pages = lvl_to_nr_pages(largepage_lvl); BUG_ON(nr_pages < lvl_pages); BUG_ON(sg_res < lvl_pages); nr_pages -= lvl_pages; iov_pfn += lvl_pages; phys_pfn += lvl_pages; pteval += lvl_pages * VTD_PAGE_SIZE; sg_res -= lvl_pages; /* If the next PTE would be the first in a new page, then we need to flush the cache on the entries we've just written. And then we'll need to recalculate 'pte', so clear it and let it get set again in the if (!pte) block above. If we're done (!nr_pages) we need to flush the cache too. Also if we've been setting superpages, we may need to recalculate 'pte' and switch back to smaller pages for the end of the mapping, if the trailing size is not enough to use another superpage (i.e. sg_res < lvl_pages). */ pte++; if (!nr_pages || first_pte_in_page(pte) || (largepage_lvl > 1 && sg_res < lvl_pages)) { domain_flush_cache(domain, first_pte, (void *)pte - (void *)first_pte); pte = NULL; } if (!sg_res && nr_pages) sg = sg_next(sg); } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
David Woodhouse32867.35%847.06%
Youquan Song11122.79%15.88%
Christian Zander234.72%15.88%
Jiang Liu112.26%317.65%
Dave Dillow51.03%15.88%
Dan J Williams40.82%15.88%
Fenghua Yu40.82%15.88%
Joerg Roedel10.21%15.88%
Total487100.00%17100.00%


static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn, struct scatterlist *sg, unsigned long nr_pages, int prot) { return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot); }

Contributors

PersonTokensPropCommitsCommitProp
David Woodhouse2352.27%360.00%
Anil S Keshavamurthy1943.18%120.00%
Fenghua Yu24.55%120.00%
Total44100.00%5100.00%


static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn, unsigned long phys_pfn, unsigned long nr_pages, int prot) { return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot); }

Contributors

PersonTokensPropCommitsCommitProp
David Woodhouse3786.05%360.00%
Anil S Keshavamurthy511.63%120.00%
Weidong Han12.33%120.00%
Total43100.00%5100.00%


static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn) { unsigned long flags; struct context_entry *context; u16 did_old; if (!iommu) return; spin_lock_irqsave(&iommu->lock, flags); context = iommu_context_addr(iommu, bus, devfn, 0); if (!context) { spin_unlock_irqrestore(&iommu->lock, flags); return; } did_old = context_domain_id(context); context_clear_entry(context); __iommu_flush_cache(iommu, context, sizeof(*context)); spin_unlock_irqrestore(&iommu->lock, flags); iommu->flush.flush_context(iommu, did_old, (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT, DMA_CCMD_DEVICE_INVL); iommu->flush.flush_iotlb(iommu, did_old, 0, 0, DMA_TLB_DSI_FLUSH); }

Contributors

PersonTokensPropCommitsCommitProp
Filippo Sironi9762.18%116.67%
Anil S Keshavamurthy3119.87%116.67%
Youquan Song1811.54%116.67%
Weidong Han95.77%233.33%
Joerg Roedel10.64%116.67%
Total156100.00%6100.00%


static inline void unlink_domain_info(struct device_domain_info *info) { assert_spin_locked(&device_domain_lock); list_del(&info->link); list_del(&info->global); if (info->dev) info->dev->archdata.iommu = NULL; }

Contributors

PersonTokensPropCommitsCommitProp
David Woodhouse50100.00%1100.00%
Total50100.00%1100.00%


static void domain_remove_dev_info(struct dmar_domain *domain) { struct device_domain_info *info, *tmp; unsigned long flags; spin_lock_irqsave(&device_domain_lock, flags); list_for_each_entry_safe(info, tmp, &domain->devices, link) __dmar_remove_one_dev_info(info); spin_unlock_irqrestore(&device_domain_lock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy4481.48%133.33%
Yijing Wang916.67%133.33%
Joerg Roedel11.85%133.33%
Total54100.00%3100.00%

/* * find_domain * Note: we use struct device->archdata.iommu stores the info */
static struct dmar_domain *find_domain(struct device *dev) { struct device_domain_info *info; /* No lock here, assumes no domain exit in normal case */ info = dev->archdata.iommu; if (likely(info)) return info->domain; return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy3583.33%240.00%
Peter Xu37.14%120.00%
David Woodhouse37.14%120.00%
Allen M Kay12.38%120.00%
Total42100.00%5100.00%


static inline struct device_domain_info * dmar_search_domain_by_dev_info(int segment, int bus, int devfn) { struct device_domain_info *info; list_for_each_entry(info, &device_domain_list, global) if (info->iommu->segment == segment && info->bus == bus && info->devfn == devfn) return info; return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy3966.10%120.00%
Jiang Liu1016.95%120.00%
David Woodhouse1016.95%360.00%
Total59100.00%5100.00%


static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, int bus, int devfn, struct device *dev, struct dmar_domain *domain) { struct dmar_domain *found = NULL; struct device_domain_info *info; unsigned long flags; int ret; info = alloc_devinfo_mem(); if (!info) return NULL; info->bus = bus; info->devfn = devfn; info->ats_supported = info->pasid_supported = info->pri_supported = 0; info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0; info->ats_qdep = 0; info->dev = dev; info->domain = domain; info->iommu = iommu; if (dev && dev_is_pci(dev)) { struct pci_dev *pdev = to_pci_dev(info->dev); if (ecap_dev_iotlb_support(iommu->ecap) && pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) && dmar_find_matched_atsr_unit(pdev)) info->ats_supported = 1; if (ecs_enabled(iommu)) { if (pasid_enabled(iommu)) { int features = pci_pasid_features(pdev); if (features >= 0) info->pasid_supported = features | 1; } if (info->ats_supported && ecap_prs(iommu->ecap) && pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI)) info->pri_supported = 1; } } spin_lock_irqsave(&device_domain_lock, flags); if (dev) found = find_domain(dev); if (!found) { struct device_domain_info *info2; info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn); if (info2) { found = info2->domain; info2->dev = dev; } } if (found) { spin_unlock_irqrestore(&device_domain_lock, flags); free_devinfo_mem(info); /* Caller must free the original domain */ return found; } spin_lock(&iommu->lock); ret = domain_attach_iommu(domain, iommu); spin_unlock(&iommu->lock); if (ret) { spin_unlock_irqrestore(&device_domain_lock, flags); free_devinfo_mem(info); return NULL; } list_add(&info->link, &domain->devices); list_add(&info->global, &device_domain_list); if (dev) dev->archdata.iommu = info; spin_unlock_irqrestore(&device_domain_lock, flags); if (dev && domain_context_mapping(domain, dev)) { pr_err("Domain context map for %s failed\n", dev_name(dev)); dmar_remove_one_dev_info(domain, dev); return NULL; } return domain; }

Contributors

PersonTokensPropCommitsCommitProp
David Woodhouse18340.40%531.25%
Joerg Roedel9220.31%637.50%
Anil S Keshavamurthy8318.32%16.25%
Jiang Liu7215.89%16.25%
Björn Helgaas102.21%16.25%
Jiri Slaby81.77%16.25%
Sudip Mukherjee51.10%16.25%
Total453100.00%16100.00%


static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque) { *(u16 *)opaque = alias; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Alex Williamson30100.00%1100.00%
Total30100.00%1100.00%


static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw) { struct device_domain_info *info = NULL; struct dmar_domain *domain = NULL; struct intel_iommu *iommu; u16 req_id, dma_alias; unsigned long flags; u8 bus, devfn; iommu = device_to_iommu(dev, &bus, &devfn); if (!iommu) return NULL; req_id = ((u16)bus << 8) | devfn; if (dev_is_pci(dev)) { struct pci_dev *pdev = to_pci_dev(dev); pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias); spin_lock_irqsave(&device_domain_lock, flags); info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus), PCI_BUS_NUM(dma_alias), dma_alias & 0xff); if (info) { iommu = info->iommu; domain = info->domain; } spin_unlock_irqrestore(&device_domain_lock, flags); /* DMA alias already has a domain, use it */ if (info) goto out; } /* Allocate and initialize new domain for the device */ domain = alloc_domain(0); if (!domain) return NULL; if (domain_init(domain, iommu, gaw)) { domain_exit(domain); return NULL; } out: return domain; }

Contributors

PersonTokensPropCommitsCommitProp
Jiang Liu6831.34%323.08%
Alex Williamson5424.88%17.69%
David Woodhouse4319.82%215.38%
Joerg Roedel3516.13%430.77%
Anil S Keshavamurthy156.91%17.69%
Yijing Wang10.46%17.69%
Dan Carpenter10.46%17.69%
Total217100.00%13100.00%


static struct dmar_domain *set_domain_for_dev(struct device *dev, struct dmar_domain *domain) { struct intel_iommu *iommu; struct dmar_domain *tmp; u16 req_id, dma_alias; u8 bus, devfn; iommu = device_to_iommu(dev, &bus, &devfn); if (!iommu) return NULL; req_id = ((u16)bus << 8) | devfn; if (dev_is_pci(dev)) { struct pci_dev *pdev = to_pci_dev(dev); pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias); /* register PCI DMA alias device */ if (req_id != dma_alias) { tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias), dma_alias & 0xff, NULL, domain); if (!tmp || tmp != domain) return tmp; } } tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain); if (!tmp || tmp != domain) return tmp; return domain; }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel12170.35%225.00%
Alex Williamson2514.53%112.50%
Jiang Liu116.40%112.50%
David Woodhouse95.23%225.00%
Anil S Keshavamurthy31.74%112.50%
Dan Carpenter31.74%112.50%
Total172100.00%8100.00%


static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw) { struct dmar_domain *domain, *tmp; domain = find_domain(dev); if (domain) goto out; domain = find_or_alloc_domain(dev, gaw); if (!domain) goto out; tmp = set_domain_for_dev(dev, domain); if (!tmp || domain != tmp) { domain_exit(domain); domain = tmp; } out: return domain; }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel5865.17%114.29%
Alex Williamson1112.36%114.29%
Jiang Liu910.11%228.57%
Anil S Keshavamurthy66.74%114.29%
David Woodhouse55.62%228.57%
Total89100.00%7100.00%


static int iommu_domain_identity_map(struct dmar_domain *domain, unsigned long long start, unsigned long long end) { unsigned long first_vpfn = start >> VTD_PAGE_SHIFT; unsigned long last_vpfn = end >> VTD_PAGE_SHIFT; if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn), dma_to_mm_pfn(last_vpfn))) { pr_err("Reserving iova failed\n"); return -ENOMEM; } pr_debug("Mapping reserved region %llx-%llx\n", start, end); /* * RMRR range might have overlap with physical memory range, * clear it first */ dma_pte_clear_range(domain, first_vpfn, last_vpfn); return domain_pfn_mapping(domain, first_vpfn, first_vpfn, last_vpfn - first_vpfn + 1, DMA_PTE_READ|DMA_PTE_WRITE); }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy6358.33%114.29%
David Woodhouse3431.48%342.86%
Fenghua Yu87.41%114.29%
Joerg Roedel32.78%228.57%
Total108100.00%7100.00%


static int domain_prepare_identity_map(struct device *dev, struct dmar_domain *domain, unsigned long long start, unsigned long long end) { /* For _hardware_ passthrough, don't bother. But for software passthrough, we do it anyway -- it may indicate a memory range which is reserved in E820, so which didn't get set up to start with in si_domain */ if (domain == si_domain && hw_pass_through) { pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n", dev_name(dev), start, end); return 0; } pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n", dev_name(dev), start, end); if (end < start) { WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n" "BIOS vendor: %s; Ver: %s; Product Version: %s\n", dmi_get_system_info(DMI_BIOS_VENDOR), dmi_get_system_info(DMI_BIOS_VERSION), dmi_get_system_info(DMI_PRODUCT_VERSION)); return -EIO; } if (end >> agaw_to_width(domain->agaw)) { WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n" "BIOS vendor: %s; Ver: %s; Product Version: %s\n", agaw_to_width(domain->agaw), dmi_get_system_info(DMI_BIOS_VENDOR), dmi_get_system_info(DMI_BIOS_VERSION), dmi_get_system_info(DMI_PRODUCT_VERSION)); return -EIO; } return iommu_domain_identity_map(domain, start, end); }

Contributors

PersonTokensPropCommitsCommitProp
David Woodhouse14490.00%571.43%
Joerg Roedel1610.00%228.57%
Total160100.00%7100.00%


static int iommu_prepare_identity_map(struct device *dev, unsigned long long start, unsigned long long end) { struct dmar_domain *domain; int ret; domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH); if (!domain) return -ENOMEM; ret = domain_prepare_identity_map(dev, domain, start, end); if (ret) domain_exit(domain); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel5779.17%150.00%
Anil S Keshavamurthy1520.83%150.00%
Total72100.00%2100.00%


static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr, struct device *dev) { if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) return 0; return iommu_prepare_identity_map(dev, rmrr->base_address, rmrr->end_address); }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy4090.91%266.67%
David Woodhouse49.09%133.33%
Total44100.00%3100.00%

#ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
static inline void iommu_prepare_isa(void) { struct pci_dev *pdev; int ret; pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); if (!pdev) return; pr_info("Prepare 0-16MiB unity mapping for LPC\n"); ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1); if (ret) pr_err("Failed to create 0-16MiB identity map - floppy might not work\n"); pci_dev_put(pdev); }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy5980.82%120.00%
David Woodhouse56.85%240.00%
Yijing Wang56.85%120.00%
Joerg Roedel45.48%120.00%
Total73100.00%5100.00%

#else
static inline void iommu_prepare_isa(void) { return; }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy10100.00%1100.00%
Total10100.00%1100.00%

#endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */ static int md_domain_init(struct dmar_domain *domain, int guest_width);
static int __init si_domain_init(int hw) { int nid, ret = 0; si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY); if (!si_domain) return -EFAULT; if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { domain_exit(si_domain); return -EFAULT; } pr_debug("Identity mapping domain allocated\n"); if (hw) return 0; for_each_online_node(nid) { unsigned long start_pfn, end_pfn; int i; for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { ret = iommu_domain_identity_map(si_domain, PFN_PHYS(start_pfn), PFN_PHYS(end_pfn)); if (ret) return ret; } } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Tejun Heo3327.27%17.14%
Fenghua Yu3125.62%214.29%
David Woodhouse3125.62%214.29%
Mark Gross75.79%17.14%
Jiang Liu75.79%321.43%
Anil S Keshavamurthy64.96%17.14%
Weidong Han32.48%17.14%
Mark McLoughlin10.83%17.14%
Matt Kraai10.83%17.14%
Joerg Roedel10.83%17.14%
Total121100.00%14100.00%


static int identity_mapping(struct device *dev) { struct device_domain_info *info; if (likely(!iommu_identity_mapping)) return 0; info = dev->archdata.iommu; if (info && info != DUMMY_DEVICE_DOMAIN_INFO) return (info->domain == si_domain); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Fenghua Yu3360.00%133.33%
Mike Travis1934.55%133.33%
David Woodhouse35.45%133.33%
Total55100.00%3100.00%


static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev) { struct dmar_domain *ndomain; struct intel_iommu *iommu; u8 bus, devfn; iommu = device_to_iommu(dev, &bus, &devfn); if (!iommu) return -ENODEV; ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain); if (ndomain != domain) return -EBUSY; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
David Woodhouse5466.67%466.67%
Fenghua Yu2632.10%116.67%
Joerg Roedel11.23%116.67%
Total81100.00%6100.00%


static bool device_has_rmrr(struct device *dev) { struct dmar_rmrr_unit *rmrr; struct device *tmp; int i; rcu_read_lock(); for_each_rmrr_units(rmrr) { /* * Return TRUE if this RMRR contains the device that * is passed in. */ for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt, i, tmp) if (tmp == dev) { rcu_read_unlock(); return true; } } rcu_read_unlock(); return false; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Mingarelli3656.25%120.00%
Jiang Liu2640.62%240.00%
David Woodhouse23.12%240.00%
Total64100.00%5100.00%

/* * There are a couple cases where we need to restrict the functionality of * devices associated with RMRRs. The first is when evaluating a device for * identity mapping because problems exist when devices are moved in and out * of domains and their respective RMRR information is lost. This means that * a device with associated RMRRs will never be in a "passthrough" domain. * The second is use of the device through the IOMMU API. This interface * expects to have full control of the IOVA space for the device. We cannot * satisfy both the requirement that RMRR access is maintained and have an * unencumbered IOVA space. We also have no ability to quiesce the device's * use of the RMRR space or even inform the IOMMU API user of the restriction. * We therefore prevent devices associated with an RMRR from participating in * the IOMMU API, which eliminates them from device assignment. * * In both cases we assume that PCI USB devices with RMRRs have them largely * for historical reasons and that the RMRR space is not actively used post * boot. This exclusion may change if vendors begin to abuse it. * * The same exception is made for graphics devices, with the requirement that * any use of the RMRR regions will be torn down before assigning the device * to a guest. */
static bool device_is_rmrr_locked(struct device *dev) { if (!device_has_rmrr(dev)) return false; if (dev_is_pci(dev)) { struct pci_dev *pdev = to_pci_dev(dev); if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev)) return false; } return true; }

Contributors

PersonTokensPropCommitsCommitProp
Alex Williamson5186.44%150.00%
David Woodhouse813.56%150.00%
Total59100.00%2100.00%


static int iommu_should_identity_map(struct device *dev, int startup) { if (dev_is_pci(dev)) { struct pci_dev *pdev = to_pci_dev(dev); if (device_is_rmrr_locked(dev)) return 0; if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev)) return 1; if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev)) return 1; if (!(iommu_identity_mapping & IDENTMAP_ALL)) return 0; /* * We want to start off with all devices in the 1:1 domain, and * take them out later if we find they can't access all of memory. * * However, we can't do this for PCI devices behind bridges, * because all PCI devices behind the same bridge will end up * with the same source-id on their transactions. * * Practically speaking, we can't change things around for these * devices at run-time, because we can't be sure there'll be no * DMA transactions in flight for any of their siblings. * * So PCI devices (unless they're on the root bus) as well as * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of * the 1:1 domain, just in _case_ one of their siblings turns out * not to be able to map all of memory. */ if (!pci_is_pcie(pdev)) { if (!pci_is_root_bus(pdev->bus)) return 0; if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI) return 0; } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE) return 0; } else { if (device_has_rmrr(dev)) return 0; } /* * At boot time, we don't yet know if devices will be 64-bit capable. * Assume that they will — if they turn out not to be, then we can * take them out of the 1:1 domain later. */ if (!startup) { /* * If the device's dma_mask is less than the system's memory * size then this is not a candidate for identity mapping. */ u64 dma_mask = *dev->dma_mask; if (dev->coherent_dma_mask && dev->coherent_dma_mask < dma_mask) dma_mask = dev->coherent_dma_mask; return dma_mask >= dma_get_required_mask(dev); } return 1; }

Contributors

PersonTokensPropCommitsCommitProp
David Woodhouse15378.06%550.00%
Chris Wright2713.78%110.00%
Thomas Mingarelli94.59%110.00%
Kenji Kaneshige31.53%110.00%
Yijing Wang31.53%110.00%
Alex Williamson10.51%110.00%
Total196100.00%10100.00%


static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw) { int ret; if (!iommu_should_identity_map(dev, 1)) return 0; ret = domain_add_dev_info(si_domain, dev); if (!ret) pr_info("%s identity mapping for device %s\n", hw ? "Hardware" : "Software", dev_name(dev)); else if (ret == -ENODEV) /* device not associated with an iommu */ ret = 0; return ret; }

Contributors

PersonTokensPropCommitsCommitProp
David Woodhouse7496.10%150.00%
Joerg Roedel33.90%150.00%
Total77100.00%2100.00%


static int __init iommu_prepare_static_identity_mapping(int hw) { struct pci_dev *pdev = NULL; struct dmar_drhd_unit *drhd; struct intel_iommu *iommu; struct device *dev; int i; int ret = 0; for_each_pci_dev(pdev) { ret = dev_prepare_static_identity_mapping(&pdev->dev, hw); if (ret) return ret; } for_each_active_iommu(iommu, drhd) for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) { struct acpi_device_physical_node *pn; struct acpi_device *adev; if (dev->bus != &acpi_bus_type) continue; adev= to_acpi_device(dev); mutex_lock(&adev->physical_node_lock); list_for_each_entry(pn, &adev->physical_node_list, node) { ret = dev_prepare_static_identity_mapping(pn->dev, hw); if (ret) break; } mutex_unlock(&adev->physical_node_lock); if (ret) return ret; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
David Woodhouse12174.69%770.00%
Fenghua Yu3622.22%110.00%
Mike Travis42.47%110.00%
Matt Kraai10.62%110.00%
Total162100.00%10100.00%


static void intel_iommu_init_qi(struct intel_iommu *iommu) { /* * Start from the sane iommu hardware state. * If the queued invalidation is already initialized by us * (for example, while enabling interrupt-remapping) then * we got the things already rolling from a sane state. */ if (!iommu->qi) { /* * Clear any previous faults. */ dmar_fault(-1, iommu); /* * Disable queued invalidation if supported and already enabled * before OS handover. */ dmar_disable_qi(iommu); } if (dmar_enable_qi(iommu)) { /* * Queued Invalidate not enabled, use Register Based Invalidate */ iommu->flush.flush_context = __iommu_flush_context; iommu->flush.flush_iotlb = __iommu_flush_iotlb; pr_info("%s: Using Register based invalidation\n", iommu->name); } else { iommu->flush.flush_context = qi_flush_context; iommu->flush.flush_iotlb = qi_flush_iotlb; pr_info("%s: Using Queued invalidation\n", iommu->name); } }

Contributors

PersonTokensPropCommitsCommitProp
Jiang Liu9797.98%150.00%
Joerg Roedel22.02%150.00%
Total99100.00%2100.00%


static int copy_context_table(struct intel_iommu *iommu, struct root_entry *old_re, struct context_entry **tbl, int bus, bool ext) { int tbl_idx, pos = 0, idx, devfn, ret = 0, did; struct context_entry *new_ce = NULL, ce; struct context_entry *old_ce = NULL; struct root_entry re; phys_addr_t old_ce_phys; tbl_idx = ext ? bus * 2 : bus; memcpy(&re, old_re, sizeof(re)); for (devfn = 0; devfn < 256; devfn++) { /* First calculate the correct index */ idx = (ext ? devfn * 2 : devfn) % 256; if (idx == 0) { /* First save what we may have and clean up */ if (new_ce) { tbl[tbl_idx] = new_ce; __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE); pos = 1; } if (old_ce) iounmap(old_ce); ret = 0; if (devfn < 0x80) old_ce_phys = root_entry_lctp(&re); else old_ce_phys = root_entry_uctp(&re); if (!old_ce_phys) { if (ext && devfn == 0) { /* No LCTP, try UCTP */ devfn = 0x7f; continue; } else { goto out; } } ret = -ENOMEM; old_ce = memremap(old_ce_phys, PAGE_SIZE, MEMREMAP_WB); if (!old_ce) goto out; new_ce = alloc_pgtable_page(iommu->node); if (!new_ce) goto out_unmap; ret = 0; } /* Now copy the context entry */ memcpy(&ce, old_ce + idx, sizeof(ce)); if (!__context_present(&ce)) continue; did = context_domain_id(&ce); if (did >= 0 && did < cap_ndoms(iommu->cap)) set_bit(did, iommu->domain_ids); /* * We need a marker for copied context entries. This * marker needs to work for the old format as well as * for extended context entries. * * Bit 67 of the context entry is used. In the old * format this bit is available to software, in the * extended format it is the PGE bit, but PGE is ignored * by HW if PASIDs are disabled (and thus still * available). * * So disable PASIDs first and then mark the entry * copied. This means that we don't copy PASID * translations from the old kernel, but this is fine as * faults there are not fatal. */ context_clear_pasid_enable(&ce); context_set_copied(&ce); new_ce[idx] = ce; } tbl[tbl_idx + pos] = new_ce; __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE); out_unmap: memunmap(old_ce); out: return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel32386.36%444.44%
Fenghua Yu318.29%111.11%
Dan J Williams102.67%111.11%
Jiang Liu51.34%111.11%
Mike Travis41.07%111.11%
Joseph Cihula10.27%111.11%
Total374100.00%9100.00%


static int copy_translation_tables(struct intel_iommu *iommu) { struct context_entry **ctxt_tbls; struct root_entry *old_rt; phys_addr_t old_rt_phys; int ctxt_table_entries; unsigned long flags; u64 rtaddr_reg; int bus, ret; bool new_ext, ext; rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG); ext = !!(rtaddr_reg & DMA_RTADDR_RTT); new_ext = !!ecap_ecs(iommu->ecap); /* * The RTT bit can only be changed when translation is disabled, * but disabling translation means to open a window for data * corruption. So bail out and don't copy anything if we would * have to change the bit. */ if (new_ext != ext) return -EINVAL; old_rt_phys = rtaddr_reg & VTD_PAGE_MASK; if (!old_rt_phys) return -EINVAL; old_rt = memremap(old_rt_phys, PAGE_SIZE, MEMREMAP_WB); if (!old_rt) return -ENOMEM; /* This is too big for the stack - allocate it from slab */ ctxt_table_entries = ext ? 512 : 256; ret = -ENOMEM; ctxt_tbls = kzalloc(ctxt_table_entries * sizeof(void *), GFP_KERNEL); if (!ctxt_tbls) goto out_unmap; for (bus = 0; bus < 256; bus++) { ret = copy_context_table(iommu, &old_rt[bus], ctxt_tbls, bus, ext); if (ret) { pr_err("%s: Failed to copy context table for bus %d\n", iommu->name, bus); continue; } } spin_lock_irqsave(&iommu->lock, flags); /* Context tables are copied, now write them to the root_entry table */ for (bus = 0; bus < 256; bus++) { int idx = ext ? bus * 2 : bus; u64 val; if (ctxt_tbls[idx]) { val = virt_to_phys(ctxt_tbls[idx]) | 1; iommu->root_entry[bus].lo = val; } if (!ext || !ctxt_tbls[idx + 1]) continue; val = virt_to_phys(ctxt_tbls[idx + 1]) | 1; iommu->root_entry[bus].hi = val; } spin_unlock_irqrestore(&iommu->lock, flags); kfree(ctxt_tbls); __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE); ret = 0; out_unmap: memunmap(old_rt); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel35497.52%375.00%
Dan J Williams92.48%125.00%
Total363100.00%4100.00%


static int __init init_dmars(void) { struct dmar_drhd_unit *drhd; struct dmar_rmrr_unit *rmrr; bool copied_tables = false; struct device *dev; struct intel_iommu *iommu; int i, ret; /* * for each drhd * allocate root * initialize and program root entry to not present * endfor */ for_each_drhd_unit(drhd) { /* * lock not needed as this is only incremented in the single * threaded kernel __init code path all other access are read * only */ if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) { g_num_of_iommus++; continue; } pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED); } /* Preallocate enough resources for IOMMU hot-addition */ if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) g_num_of_iommus = DMAR_UNITS_SUPPORTED; g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *), GFP_KERNEL); if (!g_iommus) { pr_err("Allocating global iommu array failed\n"); ret = -ENOMEM; goto error; } for_each_active_iommu(iommu, drhd) { g_iommus[iommu->seq_id] = iommu; intel_iommu_init_qi(iommu); ret = iommu_init_domains(iommu); if (ret) goto free_iommu; init_translation_status(iommu); if (translation_pre_enabled(iommu) && !is_kdump_kernel()) { iommu_disable_translation(iommu); clear_translation_pre_enabled(iommu); pr_warn("Translation was enabled for %s but we are not in kdump mode\n", iommu->name); } /* * TBD: * we could share the same root & context tables * among all IOMMU's. Need to Split it later. */ ret = iommu_alloc_root_entry(iommu); if (ret) goto free_iommu; if (translation_pre_enabled(iommu)) { pr_info("Translation already enabled - trying to copy translation structures\n"); ret = copy_translation_tables(iommu); if (ret) { /* * We found the IOMMU with translation * enabled - but failed to copy over the * old root-entry table. Try to proceed * by disabling translation now and * allocating a clean root-entry table. * This might cause DMAR faults, but * probably the dump will still succeed. */ pr_err("Failed to copy translation tables from previous kernel for %s\n", iommu->name); iommu_disable_translation(iommu); clear_translation_pre_enabled(iommu); } else { pr_info("Copied translation tables from previous kernel for %s\n", iommu->name); copied_tables = true; } } if (!ecap_pass_through(iommu->ecap)) hw_pass_through = 0; #ifdef CONFIG_INTEL_IOMMU_SVM if (pasid_enabled(iommu)) intel_svm_alloc_pasid_tables(iommu); #endif } /* * Now that qi is enabled on all iommus, set the root entry and flush * caches. This is required on some Intel X58 chipsets, otherwise the * flush_context function will loop forever and the boot hangs. */ for_each_active_iommu(iommu, drhd) { iommu_flush_write_buffer(iommu); iommu_set_root_entry(iommu); iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); } if (iommu_pass_through) iommu_identity_mapping |= IDENTMAP_ALL; #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA iommu_identity_mapping |= IDENTMAP_GFX; #endif check_tylersburg_isoch(); if (iommu_identity_mapping) { ret = si_domain_init(hw_pass_through); if (ret) goto free_iommu; } /* * If we copied translations from a previous kernel in the kdump * case, we can not assign the devices to domains now, as that * would eliminate the old mappings. So skip this part and defer * the assignment to device driver initialization time. */ if (copied_tables) goto domains_done; /* * If pass through is not set or not enabled, setup context entries for * identity mappings for rmrr, gfx, and isa and may fall back to static * identity mapping if iommu_identity_mapping is set. */ if (iommu_identity_mapping) { ret = iommu_prepare_static_identity_mapping(hw_pass_through); if (ret) { pr_crit("Failed to setup IOMMU pass-through\n"); goto free_iommu; } } /* * For each rmrr * for each dev attached to rmrr * do * locate drhd for dev, alloc domain for dev * allocate free domain * allocate page table entries for rmrr * if context not allocated for bus * allocate and init context * set present in root table for this bus * init context with domain, translation etc * endfor * endfor */ pr_info("Setting RMRR:\n"); for_each_rmrr_units(rmrr) { /* some BIOS lists non-exist devices in DMAR table. */ for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt, i, dev) { ret = iommu_prepare_rmrr_dev(rmrr, dev); if (ret) pr_err("Mapping reserved region failed\n"); } } iommu_prepare_isa(); domains_done: /* * for each drhd * enable fault log * global invalidate context cache * global invalidate iotlb * enable translation */ for_each_iommu(iommu, drhd) { if (drhd->ignored) { /* * we always have to disable PMRs or DMA may fail on * this device */ if (force_on) iommu_disable_protect_mem_regions(iommu); continue; } iommu_flush_write_buffer(iommu); #ifdef CONFIG_INTEL_IOMMU_SVM if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) { ret = intel_svm_enable_prq(iommu); if (ret) goto free_iommu; } #endif ret = dmar_set_interrupt(iommu); if (ret) goto free_iommu; if (!translation_pre_enabled(iommu)) iommu_enable_translation(iommu); iommu_disable_protect_mem_regions(iommu); } return 0; free_iommu: for_each_active_iommu(iommu, drhd) { disable_dmar_iommu(iommu); free_dmar_iommu(iommu); } kfree(g_iommus); error: return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel34557.02%1030.30%
Anil S Keshavamurthy9014.88%39.09%
David Woodhouse7211.90%618.18%
Jiang Liu376.12%515.15%
Fenghua Yu223.64%26.06%
Suresh B. Siddha142.31%26.06%
Joseph Cihula121.98%13.03%
Weidong Han60.99%13.03%
Ashok Raj30.50%13.03%
Youquan Song30.50%13.03%
Lucas De Marchi10.17%13.03%
Total605100.00%33100.00%

/* This takes a number of _MM_ pages, not VTD pages */
static unsigned long intel_alloc_iova(struct device *dev, struct dmar_domain *domain, unsigned long nrpages, uint64_t dma_mask) { unsigned long iova_pfn = 0; /* Restrict dma_mask to the width that the iommu can handle */ dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask); /* Ensure we reserve the whole size-aligned region */ nrpages = __roundup_pow_of_two(nrpages); if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) { /* * First try to allocate an io virtual address in * DMA_BIT_MASK(32) and if that fails then try allocating * from higher range */ iova_pfn = alloc_iova_fast(&domain->iovad, nrpages, IOVA_PFN(DMA_BIT_MASK(32))); if (iova_pfn) return iova_pfn; } iova_pfn = alloc_iova_fast(&domain->iovad, nrpages, IOVA_PFN(dma_mask)); if (unlikely(!iova_pfn)) { pr_err("Allocating %ld-page iova for %s failed", nrpages, dev_name(dev)); return 0; } return iova_pfn; }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy6042.25%220.00%
David Woodhouse5135.92%220.00%
Omer Peleg1510.56%220.00%
Robin Murphy85.63%110.00%
Yang Hongyang53.52%110.00%
FUJITA Tomonori21.41%110.00%
Joerg Roedel10.70%110.00%
Total142100.00%10100.00%


static struct dmar_domain *get_valid_domain_for_dev(struct device *dev) { struct dmar_domain *domain, *tmp; struct dmar_rmrr_unit *rmrr; struct device *i_dev; int i, ret; domain = find_domain(dev); if (domain) goto out; domain = find_or_alloc_domain(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH); if (!domain) goto out; /* We have a new domain - setup possible RMRRs for the device */ rcu_read_lock(); for_each_rmrr_units(rmrr) { for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt, i, i_dev) { if (i_dev != dev) continue; ret = domain_prepare_identity_map(dev, domain, rmrr->base_address, rmrr->end_address); if (ret) dev_err(dev, "Mapping reserved region failed\n"); } } rcu_read_unlock(); tmp = set_domain_for_dev(dev, domain); if (!tmp || domain != tmp) { domain_exit(domain); domain = tmp; } out: if (!domain) pr_err("Allocating domain for %s failed\n", dev_name(dev)); return domain; }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel14381.25%225.00%
Anil S Keshavamurthy147.95%225.00%
Fenghua Yu147.95%112.50%
David Woodhouse42.27%225.00%
Peter Xu10.57%112.50%
Total176100.00%8100.00%

/* Check if the dev needs to go through non-identity map and unmap process.*/
static int iommu_no_mapping(struct device *dev) { int found; if (iommu_dummy(dev)) return 1; if (!iommu_identity_mapping) return 0; found = identity_mapping(dev); if (found) { if (iommu_should_identity_map(dev, 0)) return 1; else { /* * 32 bit DMA is removed from si_domain and fall back * to non-identity mapping. */ dmar_remove_one_dev_info(si_domain, dev); pr_info("32bit %s uses non-identity mapping\n", dev_name(dev)); return 0; } } else { /* * In case of a detached 64 bit DMA device from vm, the device * is put into si_domain for identity mapping. */ if (iommu_should_identity_map(dev, 0)) { int ret; ret = domain_add_dev_info(si_domain, dev); if (!ret) { pr_info("64bit %s uses identity mapping\n", dev_name(dev)); return 1; } } } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Fenghua Yu5340.46%16.25%
Anil S Keshavamurthy4131.30%212.50%
David Woodhouse3425.95%1168.75%
Joerg Roedel32.29%212.50%
Total131100.00%16100.00%


static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir, u64 dma_mask) { struct dmar_domain *domain; phys_addr_t start_paddr; unsigned long iova_pfn; int prot = 0; int ret; struct intel_iommu *iommu; unsigned long paddr_pfn = paddr >> PAGE_SHIFT; BUG_ON(dir == DMA_NONE); if (iommu_no_mapping(dev)) return paddr; domain = get_valid_domain_for_dev(dev); if (!domain) return 0; iommu = domain_get_iommu(domain); size = aligned_nrpages(paddr, size); iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask); if (!iova_pfn) goto error; /* * Check if DMAR supports zero-length reads on write only * mappings.. */ if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \ !cap_zlr(iommu->cap)) prot |= DMA_PTE_READ; if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) prot |= DMA_PTE_WRITE; /* * paddr - (paddr + size) might be partial page, we should map the whole * page. Note: if two part of one page are separately mapped, we * might have two guest_addr mapping to the same host paddr, but this * is not a big problem */ ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn), mm_to_dma_pfn(paddr_pfn), size, prot); if (ret) goto error; /* it's a non-present to present mapping. Only flush if caching mode */ if (cap_caching_mode(iommu->cap)) iommu_flush_iotlb_psi(iommu, domain, mm_to_dma_pfn(iova_pfn), size, 0, 1); else iommu_flush_write_buffer(iommu); start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT; start_paddr += paddr & ~PAGE_MASK; return start_paddr; error: if (iova_pfn) free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size)); pr_err("Device %s request: %zx@%llx dir %d --- failed\n", dev_name(dev), size, (unsigned long long)paddr, dir); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy11940.61%210.00%
Joerg Roedel7625.94%15.00%
Mark Gross237.85%210.00%
Omer Peleg175.80%315.00%
Fenghua Yu165.46%315.00%
Weidong Han144.78%15.00%
David Woodhouse134.44%630.00%
Ingo Molnar93.07%15.00%
FUJITA Tomonori62.05%15.00%
Total293100.00%20100.00%


static dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, unsigned long attrs) { return __intel_map_single(dev, page_to_phys(page) + offset, size, dir, *dev->dma_mask); }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel4890.57%133.33%
Mark Gross47.55%133.33%
Omer Peleg11.89%133.33%
Total53100.00%3100.00%


static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size) { struct dmar_domain *domain; unsigned long start_pfn, last_pfn; unsigned long nrpages; unsigned long iova_pfn; struct intel_iommu *iommu; struct page *freelist; if (iommu_no_mapping(dev)) return; domain = find_domain(dev); BUG_ON(!domain); iommu = domain_get_iommu(domain); iova_pfn = IOVA_PFN(dev_addr); nrpages = aligned_nrpages(dev_addr, size); start_pfn = mm_to_dma_pfn(iova_pfn); last_pfn = start_pfn + nrpages - 1; pr_debug("Device %s unmapping: pfn %lx-%lx\n", dev_name(dev), start_pfn, last_pfn); freelist = domain_unmap(domain, start_pfn, last_pfn); if (intel_iommu_strict) { iommu_flush_iotlb_psi(iommu, domain, start_pfn, nrpages, !freelist, 0); /* free iova */ free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages)); dma_free_pagelist(freelist); } else { queue_iova(&domain->iovad, iova_pfn, nrpages, (unsigned long)freelist); /* * queue up the release of the unmap to save the 1/6th of the * cpu used up by the iotlb flush operation... */ } }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy8141.54%211.11%
David Woodhouse4422.56%633.33%
Omer Peleg3015.38%316.67%
Mark Gross136.67%15.56%
Weidong Han126.15%15.56%
Joerg Roedel84.10%15.56%
Fenghua Yu31.54%15.56%
Nadav Amit21.03%15.56%
FUJITA Tomonori10.51%15.56%
Jiang Liu10.51%15.56%
Total195100.00%18100.00%


static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { intel_unmap(dev, dev_addr, size); }

Contributors

PersonTokensPropCommitsCommitProp
Jiang Liu3088.24%133.33%
Krzysztof Kozlowski25.88%133.33%
Omer Peleg25.88%133.33%
Total34100.00%3100.00%


static void *intel_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) { struct page *page = NULL; int order; size = PAGE_ALIGN(size); order = get_order(size); if (!iommu_no_mapping(dev)) flags &= ~(GFP_DMA | GFP_DMA32); else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) { if (dev->coherent_dma_mask < DMA_BIT_MASK(32)) flags |= GFP_DMA; else flags |= GFP_DMA32; } if (gfpflags_allow_blocking(flags)) { unsigned int count = size >> PAGE_SHIFT; page = dma_alloc_from_contiguous(dev, count, order, flags); if (page && iommu_no_mapping(dev) && page_to_phys(page) + size > dev->coherent_dma_mask) { dma_release_from_contiguous(dev, page, count); page = NULL; } } if (!page) page = alloc_pages(flags, order); if (!page) return NULL; memset(page_address(page), 0, size); *dma_handle = __intel_map_single(dev, page_to_phys(page), size, DMA_BIDIRECTIONAL, dev->coherent_dma_mask); if (*dma_handle) return page_address(page); if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) __free_pages(page, order); return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Akinobu Mita9938.67%18.33%
Anil S Keshavamurthy9537.11%18.33%
Alex Williamson3814.84%18.33%
David Woodhouse72.73%18.33%
FUJITA Tomonori51.95%216.67%
Mel Gorman31.17%18.33%
Lucas Stach20.78%18.33%
Ingo Molnar20.78%18.33%
Andrzej Pietrasiewicz20.78%18.33%
Krzysztof Kozlowski20.78%18.33%
Fenghua Yu10.39%18.33%
Total256100.00%12100.00%


static void intel_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { int order; struct page *page = virt_to_page(vaddr); size = PAGE_ALIGN(size); order = get_order(size); intel_unmap(dev, dma_handle, size); if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) __free_pages(page, order); }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy4554.88%111.11%
Akinobu Mita2631.71%111.11%
Andrzej Pietrasiewicz22.44%111.11%
Krzysztof Kozlowski22.44%111.11%
Omer Peleg22.44%111.11%
David Woodhouse22.44%111.11%
Jiang Liu11.22%111.11%
Fenghua Yu11.22%111.11%
FUJITA Tomonori11.22%111.11%
Total82100.00%9100.00%


static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, unsigned long attrs) { dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK; unsigned long nrpages = 0; struct scatterlist *sg; int i; for_each_sg(sglist, sg, nelems, i) { nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg)); } intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT); }

Contributors

PersonTokensPropCommitsCommitProp
Omer Peleg5662.92%110.00%
Anil S Keshavamurthy1820.22%110.00%
David Woodhouse66.74%330.00%
FUJITA Tomonori66.74%330.00%
Krzysztof Kozlowski22.25%110.00%
Jiang Liu11.12%110.00%
Total89100.00%10100.00%


static int intel_nontranslate_map_sg(struct device *hddev, struct scatterlist *sglist, int nelems, int dir) { int i; struct scatterlist *sg; for_each_sg(sglist, sg, nelems, i) { BUG_ON(!sg_page(sg)); sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset; sg->dma_length = sg->length; } return nelems; }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy5064.10%125.00%
FUJITA Tomonori2025.64%250.00%
Dan J Williams810.26%125.00%
Total78100.00%4100.00%


static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, unsigned long attrs) { int i; struct dmar_domain *domain; size_t size = 0; int prot = 0; unsigned long iova_pfn; int ret; struct scatterlist *sg; unsigned long start_vpfn; struct intel_iommu *iommu; BUG_ON(dir == DMA_NONE); if (iommu_no_mapping(dev)) return intel_nontranslate_map_sg(dev, sglist, nelems, dir); domain = get_valid_domain_for_dev(dev); if (!domain) return 0; iommu = domain_get_iommu(domain); for_each_sg(sglist, sg, nelems, i) size += aligned_nrpages(sg->offset, sg->length); iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), *dev->dma_mask); if (!iova_pfn) { sglist->dma_length = 0; return 0; } /* * Check if DMAR supports zero-length reads on write only * mappings.. */ if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \ !cap_zlr(iommu->cap)) prot |= DMA_PTE_READ; if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) prot |= DMA_PTE_WRITE; start_vpfn = mm_to_dma_pfn(iova_pfn); ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot); if (unlikely(ret)) { dma_pte_free_pagetable(domain, start_vpfn, start_vpfn + size - 1, agaw_to_level(domain->agaw) + 1); free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size)); return 0; } /* it's a non-present to present mapping. Only flush if caching mode */ if (cap_caching_mode(iommu->cap)) iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1); else iommu_flush_write_buffer(iommu); return nelems; }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy20464.76%28.70%
David Woodhouse4714.92%939.13%
FUJITA Tomonori216.67%417.39%
Omer Peleg134.13%28.70%
Weidong Han123.81%14.35%
Dave Dillow92.86%14.35%
Nadav Amit41.27%28.70%
Fenghua Yu30.95%14.35%
Krzysztof Kozlowski20.63%14.35%
Total315100.00%23100.00%


static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr) { return !dma_addr; }

Contributors

PersonTokensPropCommitsCommitProp
FUJITA Tomonori18100.00%1100.00%
Total18100.00%1100.00%

const struct dma_map_ops intel_dma_ops = { .alloc = intel_alloc_coherent, .free = intel_free_coherent, .map_sg = intel_map_sg, .unmap_sg = intel_unmap_sg, .map_page = intel_map_page, .unmap_page = intel_unmap_page, .mapping_error = intel_mapping_error, #ifdef CONFIG_X86 .dma_supported = x86_dma_supported, #endif };
static inline int iommu_domain_cache_init(void) { int ret = 0; iommu_domain_cache = kmem_cache_create("iommu_domain", sizeof(struct dmar_domain), 0, SLAB_HWCACHE_ALIGN, NULL); if (!iommu_domain_cache) { pr_err("Couldn't create iommu_domain cache\n"); ret = -ENOMEM; } return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy5298.11%150.00%
Joerg Roedel11.89%150.00%
Total53100.00%2100.00%


static inline int iommu_devinfo_cache_init(void) { int ret = 0; iommu_devinfo_cache = kmem_cache_create("iommu_devinfo", sizeof(struct device_domain_info), 0, SLAB_HWCACHE_ALIGN, NULL); if (!iommu_devinfo_cache) { pr_err("Couldn't create devinfo cache\n"); ret = -ENOMEM; } return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy5298.11%150.00%
Joerg Roedel11.89%150.00%
Total53100.00%2100.00%


static int __init iommu_init_mempool(void) { int ret; ret = iova_cache_get(); if (ret) return ret; ret = iommu_domain_cache_init(); if (ret) goto domain_error; ret = iommu_devinfo_cache_init(); if (!ret) return ret; kmem_cache_destroy(iommu_domain_cache); domain_error: iova_cache_put(); return -ENOMEM; }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy6095.24%133.33%
Sakari Ailus23.17%133.33%
Robin Murphy11.59%133.33%
Total63100.00%3100.00%


static void __init iommu_exit_mempool(void) { kmem_cache_destroy(iommu_devinfo_cache); kmem_cache_destroy(iommu_domain_cache); iova_cache_put(); }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy2090.91%133.33%
Robin Murphy14.55%133.33%
Sakari Ailus14.55%133.33%
Total22100.00%3100.00%


static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev) { struct dmar_drhd_unit *drhd; u32 vtbar; int rc; /* We know that this device on this chipset has its own IOMMU. * If we find it under a different IOMMU, then the BIOS is lying * to us. Hope that the IOMMU for this device is actually * disabled, and it needs no translation... */ rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar); if (rc) { /* "can't" happen */ dev_info(&pdev->dev, "failed to run vt-d quirk\n"); return; } vtbar &= 0xffff0000; /* we know that the this iommu should be at offset 0xa000 from vtbar */ drhd = dmar_find_matched_drhd_unit(pdev); if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000, TAINT_FIRMWARE_WORKAROUND, "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n")) pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; }

Contributors

PersonTokensPropCommitsCommitProp
Dan J Williams104100.00%1100.00%
Total104100.00%1100.00%

DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
static void __init init_no_remapping_devices(void) { struct dmar_drhd_unit *drhd; struct device *dev; int i; for_each_drhd_unit(drhd) { if (!drhd->include_all) { for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) break; /* ignore DMAR unit if no devices exist */ if (i == drhd->devices_cnt) drhd->ignored = 1; } } for_each_active_drhd_unit(drhd) { if (drhd->include_all) continue; for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev))) break; if (i < drhd->devices_cnt) continue; /* This IOMMU has *only* gfx devices. Either bypass it or set the gfx_mapped flag, as appropriate */ if (dmar_map_gfx) { intel_iommu_gfx_mapped = 1; } else { drhd->ignored = 1; for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; } } }

Contributors

PersonTokensPropCommitsCommitProp
Anil S Keshavamurthy8757.62%233.33%
Jiang Liu4026.49%233.33%
David Woodhouse2415.89%233.33%
Total151100.00%6100.00%

#ifdef CONFIG_SUSPEND
static int init_iommu_hw(void) { struct dmar_drhd_unit *drhd; struct intel_iommu *iommu = NULL; for_each_active_iommu(iommu, drhd) if (iommu->qi) dmar_reenable_qi(iommu); for_each_iommu(iommu, drhd) { if (drhd->ignored) { /* * we always have to disable PMRs or DMA may fail on * this device */ if (force_on) iommu_disable_protect_mem_regions(iommu); continue; } iommu_flush_write_buffer(iommu); iommu_set_root_entry(iommu); iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); iommu_enable_translation(iommu); iommu_disable_protect_mem_regions(iommu); } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Fenghua Yu7561.98%116.67%
Anil S Keshavamurthy2218.18%116.67%
Joseph Cihula2016.53%116.67%
David Woodhouse21.65%116.67%
Suresh B. Siddha10.83%116.67%
Jiang Liu10.83%116.67%
Total121100.00%6100.00%


static void iommu_flush_all(void) { struct dmar_drhd_unit *drhd; struct intel_iommu *iommu; for_each_active_iommu(iommu, drhd) { iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); } }

Contributors

PersonTokensPropCommitsCommitProp
Fenghua Yu5490.00%133.33%
Anil S Keshavamurthy35.00%133.33%
Mark Gross35.00%133.33%
Total60100.00%3100.00%


static int iommu_suspend(void) { struct dmar_drhd_unit *drhd; struct intel_iommu *iommu = NULL; unsigned long flag; for_each_active_iommu(iommu, drhd) { iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS, GFP_ATOMIC); if (!iommu->iommu_state) goto nomem; } iommu_flush_all(); for_each_active_iommu(iommu, drhd) { iommu_disable_translation(iommu); raw_spin_lock_irqsave(&iommu->register_lock, flag); iommu->iommu_state[SR_DMAR_FECTL_REG] = readl(iommu->reg + DMAR_FECTL_REG); iommu->iommu_state[SR_DMAR_FEDATA_REG] = readl(iommu->reg + DMAR_FEDATA_REG); iommu->iommu_state[SR_DMAR_FEADDR_REG] = readl(iommu->reg + DMAR_FEADDR_REG); iommu->iommu_state[SR_DMAR_FEUADDR_REG] = readl(iommu->reg + DMAR_FEUADDR_REG); raw_spin_unlock_irqrestore(&iommu->register_lock, flag); } return 0; nomem: for_each_active_iommu(iommu, drhd) kfree(iommu->iommu_state); return -ENOMEM; }

Contributors

PersonTokensPropCommitsCommitProp
Fenghua Yu17597.22%125.00%
Thomas Gleixner21.11%125.00%
Anil S Keshavamurthy21.11%125.00%
Rafael J. Wysocki10.56%125.00%
Total180100.00%4100.00%


static void iommu_resume(void) { struct dmar_drhd_unit *drhd; struct intel_iommu *iommu = NULL; unsigned long flag; if (init_iommu_hw()) { if (force_on) panic("tboot: IOMMU setup failed, DMAR can not resume!\n"); else WARN(1, "IOMMU setup failed, DMAR can not resume!\n"); return; } for_each_active_iommu(iommu, drhd) { raw_spin_lock_irqsave(&iommu->register_lock, flag); writel(iommu->iommu_state[SR_DMAR_FECTL_REG], iommu->reg + DMAR_FECTL_REG); writel(iommu->iommu_state[SR_DMAR_FEDATA_REG], iommu->reg + DMAR_FEDATA_REG); writel(iommu->iommu_state[SR_DMAR_FEADDR_REG], iommu->reg + DMAR_FEADDR_REG); writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG], iommu->reg + DMAR_FEUADDR_REG); raw_spin_unlock_irqrestore(&iommu->register_lock, flag); } for_each_active_iommu(iommu, drhd) kfree(iommu->iommu_state); }

Contributors

PersonTokensPropCommitsCommitProp
Fenghua Yu13990.26%125.00%
Joseph Cihula106.49%125.00%
Rafael J. Wysocki31.95%125.00%
Thomas Gleixner21.30%125.00%
Total154100.00%4100.00%

static struct syscore_ops iommu_syscore_ops = { .resume = iommu_resume, .suspend = iommu_suspend, };
static void __init init_iommu_pm_ops(void) { register_syscore_ops(&iommu_syscore_ops); }

Contributors

PersonTokensPropCommitsCommitProp
Fenghua Yu1173.33%150.00%
Rafael J. Wysocki426.67%150.00%
Total15100.00%2100.00%

#else
static inline void init_iommu_pm_ops(void) {}

Contributors

PersonTokensPropCommitsCommitProp
Rafael J. Wysocki450.00%266.67%
Fenghua Yu450.00%133.33%
Total8100.00%3100.00%

#endif /* CONFIG_PM */
int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg) { struct acpi_dmar_reserved_memory *rmrr; int prot = DMA_PTE_READ|DMA_PTE_WRITE; struct dmar_rmrr_unit *rmrru; size_t length; rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL); if (!rmrru) goto out; rmrru->hdr = header; rmrr = (struct acpi_dmar_reserved_memory *)header; rmrru->base_address = rmrr->base_address; rmrru->end_address = rmrr->end_address; length = rmrr->end_address - rmrr->base_address + 1; rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot, IOMMU_RESV_DIRECT); if (!rmrru->resv) goto free_rmrru; rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1), ((void *)rmrr) + rmrr->header.length, &rmrru->devices_cnt); if (rmrru->devices_cnt && rmrru->devices == NULL) goto free_all; list_add(&rmrru->list, &dmar_rmrr_units); return 0; free_all: kfree(rmrru->resv); free_rmrru: kfree(rmrru); out: return -ENOMEM; }

Contributors

PersonTokensPropCommitsCommitProp
Suresh B. Siddha10750.23%125.00%
Eric Auger7736.15%125.00%
Jiang Liu2913.62%250.00%
Total213100.00%4100.00%


static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr) { struct dmar_atsr_unit *atsru; struct acpi_dmar_atsr *tmp; list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) { tmp = (struct acpi_dmar_atsr *)atsru->hdr; if (atsr->segment != tmp->segment) continue; if (atsr->header.length != tmp->header.length) continue; if (memcmp(atsr, tmp, atsr->header.length) == 0) return atsru; } return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Jiang Liu93100.00%1100.00%
Total93100.00%1100.00%


int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg) { struct acpi_dmar_atsr *atsr; struct dmar_atsr_unit *atsru; if (system_state >= SYSTEM_RUNNING && !intel_iommu_enabled) return 0; atsr = container_of(hdr, struct acpi_dmar_atsr, header); atsru = dmar_find_atsr(atsr); if (atsru) return 0; atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL); if (!atsru) return -ENOMEM; /* * If memory is allocated from slab by ACPI _DSM method, we need to * copy the memory content because the memory buffer will be freed * on return. */ atsru->hdr = (void *)(atsru + 1); memcpy(atsru->hdr, hdr, hdr->length); atsru->include_all = atsr->flags & 0x1; if (!atsru->include_all) { atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1), (void *)atsr + atsr->header.length, &atsru->devices_cnt); if (atsru->devices_cnt && atsru->devices == NULL) { kfree(atsru); return -ENOMEM; } } list_add_rcu(&atsru->list, &dmar_atsr_units); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Suresh B. Siddha11053.40%116.67%
Jiang Liu9445.63%466.67%
Thomas Gleixner20.97%116.67%
Total206100.00%6100.00%


static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru) { dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt); kfree(atsru); }

Contributors

PersonTokensPropCommitsCommitProp
Jiang Liu2896.55%150.00%
Suresh B. Siddha13.45%150.00%
Total29100.00%2100.00%


int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg) { struct acpi_dmar_atsr *atsr; struct dmar_atsr_unit *atsru; atsr = container_of(hdr, struct acpi_dmar_atsr, header); atsru = dmar_find_atsr(atsr); if (atsru) { list_del_rcu(&atsru->list); synchronize_rcu(); intel_iommu_free_atsr(atsru); } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Jiang Liu6798.53%266.67%
Suresh B. Siddha11.47%133.33%
Total68100.00%3100.00%


int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg) { int i; struct device *dev; struct acpi_dmar_atsr *atsr; struct dmar_atsr_unit *atsru; atsr = container_of(hdr, struct acpi_dmar_atsr, header); atsru = dmar_find_atsr(atsr); if (!atsru) return 0; if (!atsru->include_all && atsru->devices && atsru->devices_cnt) { for_each_active_dev_scope(atsru->devices, atsru->devices_cnt, i, dev) return -EBUSY; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Jiang Liu9197.85%150.00%
Linus Torvalds22.15%150.00%
Total93100.00%2100.00%


static int intel_iommu_add(struct dmar_drhd_unit *dmaru) { int sp, ret = 0; struct intel_iommu *iommu = dmaru->iommu; if (g_iommus[iommu->seq_id]) return 0; if (hw_pass_through && !ecap_pass_through(iommu->ecap)) { pr_warn("%s: Doesn't support hardware pass through.\n", iommu->name); return -ENXIO; } if (!ecap_sc_support(iommu->ecap) && domain_update_iommu_snooping(iommu)) { pr_warn("%s: Doesn't support snooping.\n", iommu->name); return -ENXIO; } sp = domain_update_iommu_superpage(iommu) - 1; if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) { pr_warn("%s: Doesn't support large page.\n", iommu->name); return -ENXIO; } /* * Disable translation if already enabled prior to OS handover. */ if (iommu->gcmd & DMA_GCMD_TE) iommu_disable_translation(iommu); g_iommus[iommu->seq_id] = iommu; ret = iommu_init_domains(iommu); if (ret == 0) ret = iommu_alloc_root_entry(iommu); if (ret) goto out; #ifdef CONFIG_INTEL_IOMMU_SVM if (pasid_enabled(iommu)) intel_svm_alloc_pasid_tables(iommu); #endif if (dmaru->ignored) { /* * we always have to disable PMRs or DMA may fail on this device */ if (force_on) iommu_disable_protect_mem_regions(iommu); return 0; } intel_iommu_init_qi(iommu); iommu_flush_write_buffer(iommu); #ifdef CONFIG_INTEL_IOMMU_SVM if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) { ret = intel_svm_enable_prq(iommu); if (ret) goto disable_iommu; } #endif ret = dmar_set_interrupt(iommu); if (ret) goto disable_iommu; iommu_set_root_entry(iommu); iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); iommu_enable_translation(iommu); iommu_disable_protect_mem_regions(iommu); return 0; disable_iommu: disable_dmar_iommu(iommu); out: free_dmar_iommu(iommu); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Jiang Liu30384.64%125.00%
David Woodhouse5214.53%250.00%
Joerg Roedel30.84%125.00%
Total358100.00%4100.00%


int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert) { int ret = 0; struct intel_iommu *iommu = dmaru->iommu; if (!intel_iommu_enabled) return 0; if (iommu == NULL) return -EINVAL; if (insert) { ret = intel_iommu_add(dmaru); } else { disable_dmar_iommu(iommu); free_dmar_iommu(iommu); } return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Jiang Liu74100.00%2100.00%
Total74100.00%2100.00%


static void intel_iommu_free_dmars(void) { struct dmar_rmrr_unit *rmrru, *rmrr_n; struct dmar_atsr_unit *atsru, *atsr_n; list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) { list_del(&rmrru->list); dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt); kfree(rmrru->resv); kfree(rmrru); } list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) { list_del(&atsru->list); intel_iommu_free_atsr(atsru); } }

Contributors

PersonTokensPropCommitsCommitProp
Jiang Liu6973.40%133.33%
Suresh B. Siddha1819.15%133.33%
Eric Auger77.45%133.33%
Total94100.00%3100.00%


int dmar_find_matched_atsr_unit(struct pci_dev *dev) { int i, ret = 1; struct pci_bus *bus; struct pci_dev *bridge = NULL; struct device *tmp; struct acpi_dmar_atsr *atsr; struct dmar_atsr_unit *atsru; dev = pci_physfn(dev); for (bus = dev->bus; bus; bus = bus->parent) { bridge = bus->self; /* If it's an integrated device, allow ATS */ if (!bridge) return 1; /* Connected via non-PCIe: no ATS */ if (!pci_is_pcie(bridge) || pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) return 0; /* If we found the root port, look it up in the ATSR */ if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT) break; } rcu_read_lock(); list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) { atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); if (atsr->segment != pci_domain_nr(dev->bus)) continue; for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp) if (tmp == &bridge->dev) goto out; if (atsru->include_all) goto out; } ret = 0; out: rcu_read_unlock(); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Suresh B. Siddha9346.97%114.29%
Jiang Liu8442.42%342.86%
David Woodhouse157.58%228.57%
Yijing Wang63.03%114.29%
Total198100.00%7100.00%


int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) { int ret = 0; struct dmar_rmrr_unit *rmrru; struct dmar_atsr_unit *atsru; struct acpi_dmar_atsr *atsr; struct acpi_dmar_reserved_memory *rmrr; if (!intel_iommu_enabled && system_state >= SYSTEM_RUNNING) return 0; list_for_each_entry(rmrru, &dmar_rmrr_units, list) { rmrr = container_of(rmrru->hdr, struct acpi_dmar_reserved_memory, header); if (info->event == BUS_NOTIFY_ADD_DEVICE) { ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1), ((void *)rmrr) + rmrr->header.length, rmrr->segment, rmrru->devices, rmrru->devices_cnt); if(ret < 0) return ret; } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) { dmar_remove_dev_scope(info, rmrr->segment, rmrru->devices, rmrru->devices_cnt); } } list_for_each_entry(atsru, &dmar_atsr_units, list) { if (atsru->include_all) continue; atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); if (info->event == BUS_NOTIFY_ADD_DEVICE) { ret = dmar_insert_dev_scope(info, (void *)(atsr + 1), (void *)atsr + atsr->header.length, atsr->segment, atsru->devices, atsru->devices_cnt); if (ret > 0) break; else if(ret < 0) return ret; } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) { if (dmar_remove_dev_scope(info, atsr->segment, atsru->devices, atsru->devices_cnt)) break; } } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Jiang Liu29098.64%250.00%
Joerg Roedel20.68%125.00%
Thomas Gleixner20.68%125.00%
Total294100.00%4100.00%

/* * Here we only respond to action of unbound device from driver. * * Added device is not attached to its DMAR domain here yet. That will happen * when mapping the device to iova. */
static int device_notifier(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; struct dmar_domain *domain; if (iommu_dummy(dev)) return 0; if (action != BUS_NOTIFY_REMOVED_DEVICE) return 0; domain = find_domain(dev); if (!domain) return 0; dmar_remove_one_dev_info(domain, dev); if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices)) domain_exit(domain); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Fenghua Yu5355.21%19.09%
Alex Williamson1818.75%19.09%
Jiang Liu1212.50%327.27%
David Woodhouse1111.46%436.36%
Joerg Roedel22.08%218.18%
Total96100.00%11100.00%

static struct notifier_block device_nb = { .notifier_call = device_notifier, };
static int intel_iommu_memory_notifier(struct notifier_block *nb, unsigned long val, void *v) { struct memory_notify *mhp = v; unsigned long long start, end; unsigned long start_vpfn, last_vpfn; switch (val) { case MEM_GOING_ONLINE: start = mhp->start_pfn << PAGE_SHIFT; end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1; if (iommu_domain_identity_map(si_domain, start, end)) { pr_warn("Failed to build identity map for [%llx-%llx]\n", start, end); return NOTIFY_BAD; } break; case MEM_OFFLINE: case MEM_CANCEL_ONLINE: start_vpfn = mm_to_dma_pfn(mhp->start_pfn); last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1); while (start_vpfn <= last_vpfn) { struct iova *iova; struct dmar_drhd_unit *drhd; struct intel_iommu *iommu; struct page *freelist; iova = find_iova(&si_domain->iovad, start_vpfn); if (iova == NULL) { pr_debug("Failed get IOVA for PFN %lx\n", start_vpfn); break; } iova = split_and_remove_iova(&si_domain->iovad, iova, start_vpfn, last_vpfn); if (iova == NULL) { pr_warn("Failed to split IOVA PFN [%lx-%lx]\n", start_vpfn, last_vpfn); return NOTIFY_BAD; } freelist = domain_unmap(si_domain, iova->pfn_lo, iova->pfn_hi); rcu_read_lock(); for_each_active_iommu(iommu, drhd) iommu_flush_iotlb_psi(iommu, si_domain, iova->pfn_lo, iova_size(iova), !freelist, 0); rcu_read_unlock(); dma_free_pagelist(freelist); start_vpfn = iova->pfn_hi + 1; free_iova_mem(iova); } break; } return NOTIFY_OK; }

Contributors

PersonTokensPropCommitsCommitProp
Jiang Liu26490.41%250.00%
David Woodhouse258.56%125.00%
Joerg Roedel31.03%125.00%
Total292100.00%4100.00%

static struct notifier_block intel_iommu_memory_nb = { .notifier_call = intel_iommu_memory_notifier, .priority = 0 };
static void free_all_cpu_cached_iovas(unsigned int cpu) { int i; for (i = 0; i < g_num_of_iommus; i++) { struct intel_iommu *iommu = g_iommus[i]; struct dmar_domain *domain; int did; if (!iommu) continue; for (did = 0; did < cap_ndoms(iommu->cap); did++) { domain = get_iommu_domain(iommu, (u16)did); if (!domain) continue; free_cpu_cached_iovas(cpu, &domain->iovad); } } }

Contributors

PersonTokensPropCommitsCommitProp
Omer Peleg9090.00%133.33%
Jan Niehusmann66.00%133.33%
Aaron Campbell44.00%133.33%
Total100100.00%3100.00%


static int intel_iommu_cpu_dead(unsigned int cpu) { free_all_cpu_cached_iovas(cpu); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Omer Peleg1583.33%266.67%
Anna-Maria Gleixner316.67%133.33%
Total18100.00%3100.00%


static void intel_disable_iommus(void) { struct intel_iommu *iommu = NULL; struct dmar_drhd_unit *drhd; for_each_iommu(iommu, drhd) iommu_disable_translation(iommu); }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel31100.00%1100.00%
Total31100.00%1100.00%


static inline struct intel_iommu *dev_to_intel_iommu(struct device *dev) { struct iommu_device *iommu_dev = dev_to_iommu_device(dev); return container_of(iommu_dev, struct intel_iommu, iommu); }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel35100.00%2100.00%
Total35100.00%2100.00%


static ssize_t intel_iommu_show_version(struct device *dev, struct device_attribute *attr, char *buf) { struct intel_iommu *iommu = dev_to_intel_iommu(dev); u32 ver = readl(iommu->reg + DMAR_VER_REG); return sprintf(buf, "%d:%d\n", DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver)); }

Contributors

PersonTokensPropCommitsCommitProp
Alex Williamson4676.67%116.67%
Jiang Liu1016.67%233.33%
Takao Indoh23.33%116.67%
Joerg Roedel11.67%116.67%
Fenghua Yu11.67%116.67%
Total60100.00%6100.00%

static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
static ssize_t intel_iommu_show_address(struct device *dev, struct device_attribute *attr, char *buf) { struct intel_iommu *iommu = dev_to_intel_iommu(dev); return sprintf(buf, "%llx\n", iommu->reg_phys); }

Contributors

PersonTokensPropCommitsCommitProp
Alex Williamson3480.95%120.00%
Takao Indoh37.14%120.00%
Joseph Cihula37.14%120.00%
Joerg Roedel12.38%120.00%
Jiang Liu12.38%120.00%
Total42100.00%5100.00%

static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
static ssize_t intel_iommu_show_cap(struct device *dev, struct device_attribute *attr, char *buf) { struct intel_iommu *iommu = dev_to_intel_iommu(dev); return sprintf(buf, "%llx\n", iommu->cap); }

Contributors

PersonTokensPropCommitsCommitProp
Alex Williamson4197.62%150.00%
Joerg Roedel12.38%150.00%
Total42100.00%2100.00%

static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
static ssize_t intel_iommu_show_ecap(struct device *dev, struct device_attribute *attr, char *buf) { struct intel_iommu *iommu = dev_to_intel_iommu(dev); return sprintf(buf, "%llx\n", iommu->ecap); }

Contributors

PersonTokensPropCommitsCommitProp
Alex Williamson4197.62%150.00%
Joerg Roedel12.38%150.00%
Total42100.00%2100.00%

static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
static ssize_t intel_iommu_show_ndoms(struct device *dev, struct device_attribute *attr, char *buf) { struct intel_iommu *iommu = dev_to_intel_iommu(dev); return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap)); }

Contributors

PersonTokensPropCommitsCommitProp
Alex Williamson4497.78%150.00%
Joerg Roedel12.22%150.00%
Total45100.00%2100.00%

static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
static ssize_t intel_iommu_show_ndoms_used(struct device *dev, struct device_attribute *attr, char *buf) { struct intel_iommu *iommu = dev_to_intel_iommu(dev); return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids, cap_ndoms(iommu->cap))); }

Contributors

PersonTokensPropCommitsCommitProp
Alex Williamson5198.08%150.00%
Joerg Roedel11.92%150.00%
Total52100.00%2100.00%

static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL); static struct attribute *intel_iommu_attrs[] = { &dev_attr_version.attr, &dev_attr_address.attr, &dev_attr_cap.attr, &dev_attr_ecap.attr, &dev_attr_domains_supported.attr, &dev_attr_domains_used.attr, NULL, }; static struct attribute_group intel_iommu_group = { .name = "intel-iommu", .attrs = intel_iommu_attrs, }; const struct attribute_group *intel_iommu_groups[] = { &intel_iommu_group, NULL, };
int __init intel_iommu_init(void) { int ret = -ENODEV; struct dmar_drhd_unit *drhd; struct intel_iommu *iommu; /* VT-d is required for a TXT/tboot launch, so enforce that */ force_on = tboot_force_iommu(); if (iommu_init_mempool()) { if (force_on) panic("tboot: Failed to initialize iommu memory\n"); return -ENOMEM; } down_write(&dmar_global_lock); if (dmar_table_init()) { if (force_on) panic("tboot: Failed to initialize DMAR table\n"); goto out_free_dmar; } if (dmar_dev_scope_init() < 0) { if (force_on) panic("tboot: Failed to initialize DMAR device scope\n"); goto out_free_dmar; } if (no_iommu || dmar_disabled) { /* * We exit the function here to ensure IOMMU's remapping and * mempool aren't setup, which means that the IOMMU's PMRs * won't be disabled via the call to init_dmars(). So disable * it explicitly here. The PMRs were setup by tboot prior to * calling SENTER, but the kernel is expected to reset/tear * down the PMRs. */ if (intel_iommu_tboot_noforce) { for_each_iommu(iommu, drhd) iommu_disable_protect_mem_regions(iommu); } /* * Make sure the IOMMUs are switched off, even when we * boot into a kexec kernel and the previous kernel left * them enabled */ intel_disable_iommus(); goto out_free_dmar; } if (list_empty(&dmar_rmrr_units)) pr_info("No RMRR found\n"); if (list_empty(&dmar_atsr_units)) pr_info("No ATSR found\n"); if (dmar_init_reserved_ranges()) { if (force_on) panic("tboot: Failed to reserve iommu ranges\n"); goto out_free_reserved_range; } init_no_remapping_devices(); ret = init_dmars(); if (ret) { if (force_on) panic("tboot: Failed to initialize DMARs\n"); pr_err("Initialization failed\n"); goto out_free_reserved_range; } up_write(&dmar_global_lock); pr_info("Intel(R) Virtualization Technology for Directed I/O\n"); #ifdef CONFIG_SWIOTLB swiotlb = 0; #endif dma_ops = &intel_dma_ops; init_iommu_pm_ops(); for_each_active_iommu(iommu, drhd) { iommu_device_sysfs_add(&iommu->iommu, NULL, intel_iommu_groups, "%s", iommu->name); iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops); iommu_device_register(&iommu->iommu); } bus_set_iommu(&pci_bus_type, &intel_iommu_ops); bus_register_notifier(&pci_bus_type, &device_nb); if (si_domain && !hw_pass_through) register_memory_notifier(&intel_iommu_memory_nb); cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL, intel_iommu_cpu_dead); intel_iommu_enabled = 1; return 0; out_free_reserved_range: put_iova_domain(&reserved_iova_list); out_free_dmar: intel_iommu_free_dmars(); up_write(&dmar_global_lock); iommu_exit_mempool(); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Jiang Liu9226.14%415.38%
Fenghua Yu6317.90%27.69%
Joseph Cihula5114.49%311.54%
Joerg Roedel5014.20%519.23%
Suresh B. Siddha246.82%27.69%
Shaohua Li185.11%13.85%
Alex Williamson164.55%13.85%
Anna-Maria Gleixner82.27%13.85%
Anil S Keshavamurthy82.27%13.85%
FUJITA Tomonori71.99%13.85%
Takao Indoh51.42%13.85%
Eugeni Dodonov41.14%13.85%
Omer Peleg30.85%13.85%
Kees Cook20.57%13.85%
Rafael J. Wysocki10.28%13.85%
Total352100.00%26100.00%


static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque) { struct intel_iommu *iommu = opaque; domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Alex Williamson4095.24%150.00%
Joerg Roedel24.76%150.00%
Total42100.00%2100.00%

/* * NB - intel-iommu lacks any sort of reference counting for the users of * dependent devices. If multiple endpoints have intersecting dependent * devices, unbinding the driver from any one of them will possibly leave * the others unable to operate. */
static void domain_context_clear(struct intel_iommu *iommu, struct device *dev) { if (!iommu || !dev || !dev_is_pci(dev)) return; pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu); }

Contributors

PersonTokensPropCommitsCommitProp
Weidong Han2659.09%125.00%
David Woodhouse1329.55%125.00%
Alex Williamson36.82%125.00%
Joerg Roedel24.55%125.00%
Total44100.00%4100.00%


static void __dmar_remove_one_dev_info(struct device_domain_info *info) { struct intel_iommu *iommu; unsigned long flags; assert_spin_locked(&device_domain_lock); if (WARN_ON(!info)) return; iommu = info->iommu; if (info->dev) { iommu_disable_dev_iotlb(info); domain_context_clear(iommu, info->dev); } unlink_domain_info(info); spin_lock_irqsave(&iommu->lock, flags); domain_detach_iommu(info->domain, iommu); spin_unlock_irqrestore(&iommu->lock, flags); free_devinfo_mem(info); }

Contributors

PersonTokensPropCommitsCommitProp
Weidong Han6058.82%215.38%
Joerg Roedel2827.45%538.46%
Yu Zhao54.90%17.69%
Roland Dreier43.92%17.69%
David Woodhouse32.94%323.08%
Mike Habeck21.96%17.69%
Total102100.00%13100.00%


static void dmar_remove_one_dev_info(struct dmar_domain *domain, struct device *dev) { struct device_domain_info *info; unsigned long flags; spin_lock_irqsave(&device_domain_lock, flags); info = dev->archdata.iommu; __dmar_remove_one_dev_info(info); spin_unlock_irqrestore(&device_domain_lock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel3361.11%233.33%
Weidong Han1120.37%116.67%
Roland Dreier814.81%116.67%
David Woodhouse11.85%116.67%
Alex Williamson11.85%116.67%
Total54100.00%6100.00%


static int md_domain_init(struct dmar_domain *domain, int guest_width) { int adjust_width; init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN, DMA_32BIT_PFN); domain_reserve_special_ranges(domain); /* calculate AGAW */ domain->gaw = guest_width; adjust_width = guestwidth_to_adjustwidth(guest_width); domain->agaw = width_to_agaw(adjust_width); domain->iommu_coherency = 0; domain->iommu_snooping = 0; domain->iommu_superpage = 0; domain->max_addr = 0; /* always allocate the top pgd */ domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid); if (!domain->pgd) return -ENOMEM; domain_flush_cache(domain, domain->pgd, PAGE_SIZE); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Weidong Han10382.40%225.00%
Sheng Yang64.80%112.50%
Youquan Song64.80%112.50%
Suresh B. Siddha54.00%112.50%
Robin Murphy43.20%225.00%
Fenghua Yu10.80%112.50%
Total125100.00%8100.00%


static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) { struct dmar_domain *dmar_domain; struct iommu_domain *domain; if (type != IOMMU_DOMAIN_UNMANAGED) return NULL; dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE); if (!dmar_domain) { pr_err("Can't allocate dmar_domain\n"); return NULL; } if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { pr_err("Domain initialization failed\n"); domain_exit(dmar_domain); return NULL; } domain_update_iommu_cap(dmar_domain); domain = &dmar_domain->domain; domain->geometry.aperture_start = 0; domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw); domain->geometry.force_aperture = true; return domain; }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel6856.67%436.36%
Weidong Han3125.83%218.18%
Allen M Kay1512.50%218.18%
Jiang Liu54.17%218.18%
Fenghua Yu10.83%19.09%
Total120100.00%11100.00%


static void intel_iommu_domain_free(struct iommu_domain *domain) { domain_exit(to_dmar_domain(domain)); }

Contributors

PersonTokensPropCommitsCommitProp
Joerg Roedel736.84%240.00%
Weidong Han736.84%120.00%
Allen M Kay421.05%120.00%
Jiang Liu15.26%120.00%
Total19100.00%5100.00%


static int intel_iommu_attach_device(struct iommu_domain *domain, struct device *dev) { struct dmar_domain *dmar_domain = to_dmar_domain(domain); struct intel_iommu *iommu; int addr_width; u8 bus, devfn; if (device_is_rmrr_locked(dev)) { dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n"); return -EPERM; } /* normally dev is not mapped */ if (unlikely(domain_context_mapped(dev))) { struct dmar_domain *old_domain; old_domain = find_domain(dev); if (old_domain) { rcu_read_lock(); dmar_remove_one_dev_info(old_domain, dev); rcu_read_unlock(); if (!domain_type_is_vm_or_si(old_domain) && list_empty(&old_domain->devices)) domain_exit(old_domain); } } iommu = device_to_iommu(dev, &bus, &devfn); if (!iommu) return -ENODEV; /* check if this iommu agaw is sufficient for max mapped address */ addr_width = agaw_to_width(iommu->agaw); if (addr_width > cap_mgaw(iommu->cap)) addr_width = cap_mgaw(iommu->cap); if (dmar_domain->max_addr > (1LL << addr_width)) { pr_err("%s: iommu width (%d) is not " "sufficient for the mapped address (%llx)\n", __func__, addr_width, dmar_domain->max_addr); return -EFAULT; } dmar_domain->gaw = addr_width; /* * Knock out extra levels of page tables if necessary */ while (iommu->agaw < dmar_domain->agaw) { struct dma_pte *pte; pte = dmar_domain->pgd; if (dma_pte_present(pte)) { dmar_domain->pgd = (struct dma_pte *) phys_to_virt(dma_pte_addr(pte)); free_pgtable_page(pte); } dmar_domain->agaw--; } return domain_add_dev_info(dmar_domain, dev); }

Contributors

PersonTokensPropCommitsCommitProp
Weidong Han8530.14%29.52%
Tom Lyon8229.08%14.76%
Joerg Roedel4515.96%628.57%
Allen M Kay279.57%14.76%
Alex Williamson207.09%14.76%
David Woodhouse155.32%838.10%
Jan Kiszka51.77%14.76%
Sheng Yang31.06%14.76%
Total282100.00%21100.00%


static void intel_iommu_detach_device(struct iommu_domain *domain, struct device *dev) { dmar_remove_one_dev_info(to_dmar_domain(domain), dev); }

Contributors

PersonTokensPropCommitsCommitProp
Allen M Kay1453.85%116.67%
Joerg Roedel934.62%350.00%
Weidong Han27.69%116.67%
David Woodhouse13.85%116.67%
Total26100.00%6100.00%


static int intel_iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t hpa, size_t size, int iommu_prot) { struct dmar_domain *dmar_domain = to_dmar_domain(domain); u64 max_addr; int prot = 0; int ret; if (iommu_prot & IOMMU_READ) prot |= DMA_PTE_READ; if (iommu_prot & IOMMU_WRITE) prot |= DMA_PTE_WRITE; if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping) prot |= DMA_PTE_SNP; max_addr = iova + size; if (dmar_domain->max_addr < max_addr) { u64 end; /* check if minimum agaw is sufficient for mapped address */ end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1; if (end < max_addr) { pr_err("%s: iommu width (%d) is not " "sufficient for the mapped address (%llx)\n", __func__, dmar_domain->gaw, max_addr); return -EFAULT; } dmar_domain->max_addr = max_addr; } /* Round up size to next multiple of PAGE_SIZE, if it and the low bits of hpa would take us onto the next page */ size = aligned_nrpages(hpa, size); ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT, hpa >> VTD_PAGE_SHIFT, size, prot); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Weidong Han5732.39%216.67%
Joerg Roedel4626.14%433.33%
Allen M Kay3017.05%18.33%
Sheng Yang169.09%18.33%
David Woodhouse158.52%216.67%
Tom Lyon105.68%18.33%
Ohad Ben-Cohen21.14%18.33%
Total176100.00%12100.00%


static size_t intel_iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) { struct dmar_domain *dmar_domain = to_dmar_domain(domain); struct page *freelist = NULL; struct intel_iommu *iommu; unsigned long start_pfn, last_pfn; unsigned int npages; int iommu_id, level = 0; /* Cope with horrid API which requires us to unmap more than the size argument if it happens to be a large-page mapping. */ BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level)); if (size < VTD_PAGE_SIZE << level_to_offset_bits(level)) size = VTD_PAGE_SIZE << level_to_offset_bits(level); start_pfn = iova >> VTD_PAGE_SHIFT; last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT; freelist = domain_unmap(dmar_domain, start_pfn, last_pfn); npages = last_pfn - start_pfn + 1; for_each_domain_iommu(iommu_id, dmar_domain) { iommu = g_iommus[iommu_id]; iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain, start_pfn, npages, !freelist, 0); } dma_free_pagelist(freelist); if (dmar_domain->max_addr == iova + size) dmar_domain->max_addr = iova; return size; }

Contributors

PersonTokensPropCommitsCommitProp
David Woodhouse13068.78%425.00%
Joerg Roedel2714.29%743.75%
Weidong Han157.94%212.50%
Allen M Kay147.41%212.50%
Ohad Ben-Cohen31.59%16.25%
Total189100.00%16100.00%


static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) { struct dmar_domain *dmar_domain = to_dmar_domain(domain); struct dma_pte *pte; int level = 0; u64 phys = 0; pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level); if (pte) phys = dma_pte_addr(pte); return phys; }

Contributors

PersonTokensPropCommitsCommitProp
Allen M Kay3552.24%112.50%
Joerg Roedel1420.90%225.00%
David Woodhouse1014.93%225.00%
Weidong Han68.96%112.50%
Youquan Song11.49%112.50%
Varun Sethi11.49%112.50%
Total67100.00%8100.00%


static bool intel_iommu_capable(enum iommu_cap cap) { if (cap == IOMMU_CAP_CACHE_COHERENCY) return domain_update_iommu_snooping(NULL) == 1; if (cap == IOMMU_CAP_INTR_REMAP) return irq_remapping_enabled == 1; return false; }

Contributors

PersonTokensPropCommitsCommitProp
Sheng Yang1642.11%125.00%
Joerg Roedel1334.21%125.00%
Tom Lyon821.05%125.00%
Suresh B. Siddha12.63%125.00%
Total38100.00%4100.00%


static int intel_iommu_add_device(struct device *dev) { struct intel_iommu *iommu; struct iommu_group *group; u8 bus, devfn; iommu = device_to_iommu(dev, &bus, &devfn); if (!iommu) return -ENODEV; iommu_device_link(&iommu->iommu, dev); group = iommu_group_get_for_dev(dev); if (IS_ERR(group)) return PTR_ERR(group); iommu_group_put(group); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Alex Williamson7688.37%880.00%
David Woodhouse89.30%110.00%
Joerg Roedel22.33%110.00%
Total86100.00%10100.00%


static void intel_iommu_remove_device(struct device *dev) { struct intel_iommu *iommu; u8 bus, devfn; iommu = device_to_iommu(dev, &bus, &devfn); if (!iommu) return; iommu_group_remove_device(dev); iommu_device_unlink(&iommu->iommu, dev); }

Contributors

PersonTokensPropCommitsCommitProp
Alex Williamson5498.18%266.67%
Joerg Roedel11.82%133.33%
Total55100.00%3100.00%


static void intel_iommu_get_resv_regions(struct device *device, struct list_head *head) { struct iommu_resv_region *reg; struct dmar_rmrr_unit *rmrr; struct device *i_dev; int i; rcu_read_lock(); for_each_rmrr_units(rmrr) { for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt, i, i_dev) { if (i_dev != device) continue; list_add_tail(&rmrr->resv->list, head); } } rcu_read_unlock(); reg = iommu_alloc_resv_region(IOAPIC_RANGE_START, IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1, 0, IOMMU_RESV_MSI); if (!reg) return; list_add_tail(&reg->list, head); }

Contributors

PersonTokensPropCommitsCommitProp
Eric Auger10797.27%133.33%
Alex Williamson21.82%133.33%
Robin Murphy10.91%133.33%
Total110100.00%3100.00%


static void intel_iommu_put_resv_regions(struct device *dev, struct list_head *head) { struct iommu_resv_region *entry, *next; list_for_each_entry_safe(entry, next, head, list) { if (entry->type == IOMMU_RESV_RESERVED) kfree(entry); } }

Contributors

PersonTokensPropCommitsCommitProp
Eric Auger4591.84%133.33%
Alex Williamson48.16%266.67%
Total49100.00%3100.00%

#ifdef CONFIG_INTEL_IOMMU_SVM #define MAX_NR_PASID_BITS (20)
static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu) { /* * Convert ecap_pss to extend context entry pts encoding, also * respect the soft pasid_max value set by the iommu. * - number of PASID bits = ecap_pss + 1 * - number of PASID table entries = 2^(pts + 5) * Therefore, pts = ecap_pss - 4 * e.g. KBL ecap_pss = 0x13, PASID has 20 bits, pts = 15 */ if (ecap_pss(iommu->ecap) < 5) return 0; /* pasid_max is encoded as actual number of entries not the bits */ return find_first_bit((unsigned long *)&iommu->pasid_max, MAX_NR_PASID_BITS) - 5; }

Contributors

PersonTokensPropCommitsCommitProp
Jacob jun Pan47100.00%1100.00%
Total47100.00%1100.00%


int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev) { struct device_domain_info *info; struct context_entry *context; struct dmar_domain *domain; unsigned long flags; u64 ctx_lo; int ret; domain = get_valid_domain_for_dev(sdev->dev); if (!domain) return -EINVAL; spin_lock_irqsave(&device_domain_lock, flags); spin_lock(&iommu->lock); ret = -EINVAL; info = sdev->dev->archdata.iommu; if (!info || !info->pasid_supported) goto out; context = iommu_context_addr(iommu, info->bus, info->devfn, 0); if (WARN_ON(!context)) goto out; ctx_lo = context[0].lo; sdev->did = domain->iommu_did[iommu->seq_id]; sdev->sid = PCI_DEVID(info->bus, info->devfn); if (!(ctx_lo & CONTEXT_PASIDE)) { if (iommu->pasid_state_table) context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table); context[1].lo = (u64)virt_to_phys(iommu->pasid_table) | intel_iommu_get_pts(iommu); wmb(); /* CONTEXT_TT_MULTI_LEVEL and CONTEXT_TT_DEV_IOTLB are both * extended to permit requests-with-PASID if the PASIDE bit * is set. which makes sense. For CONTEXT_TT_PASS_THROUGH, * however, the PASIDE bit is ignored and requests-with-PASID * are unconditionally blocked. Which makes less sense. * So convert from CONTEXT_TT_PASS_THROUGH to one of the new * "guest mode" translation types depending on whether ATS * is available or not. Annoyingly, we can't use the new * modes *unless* PASIDE is set. */ if ((ctx_lo & CONTEXT_TT_MASK) == (CONTEXT_TT_PASS_THROUGH << 2)) { ctx_lo &= ~CONTEXT_TT_MASK; if (info->ats_supported) ctx_lo |= CONTEXT_TT_PT_PASID_DEV_IOTLB << 2; else ctx_lo |= CONTEXT_TT_PT_PASID << 2; } ctx_lo |= CONTEXT_PASIDE; if (iommu->pasid_state_table) ctx_lo |= CONTEXT_DINVE; if (info->pri_supported) ctx_lo |= CONTEXT_PRS; context[0].lo = ctx_lo; wmb(); iommu->flush.flush_context(iommu, sdev->did, sdev->sid, DMA_CCMD_MASK_NOBIT, DMA_CCMD_DEVICE_INVL); } /* Enable PASID support in the device, if it wasn't already */ if (!info->pasid_enabled) iommu_enable_dev_iotlb(info); if (info->ats_enabled) { sdev->dev_iotlb = 1; sdev->qdep = info->ats_qdep; if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS) sdev->qdep = 0; } ret = 0; out: spin_unlock(&iommu->lock); spin_unlock_irqrestore(&device_domain_lock, flags); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
David Woodhouse39198.24%360.00%
Ashok Raj61.51%120.00%
Jacob jun Pan10.25%120.00%
Total398100.00%5100.00%


struct intel_iommu *intel_svm_device_to_iommu(struct device *dev) { struct intel_iommu *iommu; u8 bus, devfn; if (iommu_dummy(dev)) { dev_warn(dev, "No IOMMU translation for device; cannot enable SVM\n"); return NULL; } iommu = device_to_iommu(dev, &bus, &devfn); if ((!iommu)) { dev_err(dev, "No IOMMU for device; cannot enable SVM\n"); return NULL; } if (!iommu->pasid_table) { dev_err(dev, "PASID not enabled on IOMMU; cannot enable SVM\n"); return NULL; } return iommu; }

Contributors

PersonTokensPropCommitsCommitProp
David Woodhouse9397.89%150.00%
Sudeep Dutt22.11%150.00%
Total95100.00%2100.00%

#endif /* CONFIG_INTEL_IOMMU_SVM */ const struct iommu_ops intel_iommu_ops = { .capable = intel_iommu_capable, .domain_alloc = intel_iommu_domain_alloc, .domain_free = intel_iommu_domain_free, .attach_dev = intel_iommu_attach_device, .detach_dev = intel_iommu_detach_device, .map = intel_iommu_map, .unmap = intel_iommu_unmap, .map_sg = default_iommu_map_sg, .iova_to_phys = intel_iommu_iova_to_phys, .add_device = intel_iommu_add_device, .remove_device = intel_iommu_remove_device, .get_resv_regions = intel_iommu_get_resv_regions, .put_resv_regions = intel_iommu_put_resv_regions, .device_group = pci_device_group, .pgsize_bitmap = INTEL_IOMMU_PGSIZES, };
static void quirk_iommu_g4x_gfx(struct pci_dev *dev) { /* G4x/GM45 integrated gfx dmar support is totally busted. */ pr_info("Disabling IOMMU for graphics on this chipset\n"); dmar_map_gfx = 0; }

Contributors

PersonTokensPropCommitsCommitProp
David Woodhouse1466.67%133.33%
Daniel Vetter523.81%133.33%
Joerg Roedel29.52%133.33%
Total21100.00%3100.00%

DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
static void quirk_iommu_rwbf(struct pci_dev *dev) { /* * Mobile 4 Series Chipset neglects to set RWBF capability, * but needs it. Same seems to hold for the desktop versions. */ pr_info("Forcing write-buffer flush capability\n"); rwbf_quirk = 1; }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Vetter1152.38%250.00%
David Woodhouse838.10%125.00%
Joerg Roedel29.52%125.00%
Total21100.00%4100.00%

DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf); #define GGC 0x52 #define GGC_MEMORY_SIZE_MASK (0xf << 8) #define GGC_MEMORY_SIZE_NONE (0x0 << 8) #define GGC_MEMORY_SIZE_1M (0x1 << 8) #define GGC_MEMORY_SIZE_2M (0x3 << 8) #define GGC_MEMORY_VT_ENABLED (0x8 << 8) #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8) #define GGC_MEMORY_SIZE_3M_VT (0xa << 8) #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev) { unsigned short ggc; if (pci_read_config_word(dev, GGC, &ggc)) return; if (!(ggc & GGC_MEMORY_VT_ENABLED)) { pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n"); dmar_map_gfx = 0; } else if (dmar_map_gfx) { /* we have to ensure the gfx device is idle before we flush */ pr_info("Disabling batched IOTLB flush on Ironlake\n"); intel_iommu_strict = 1; } }

Contributors

PersonTokensPropCommitsCommitProp
David Woodhouse5990.77%250.00%
Joerg Roedel46.15%125.00%
Adam Jackson23.08%125.00%
Total65100.00%4100.00%

DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt); /* On Tylersburg chipsets, some BIOSes have been known to enable the ISOCH DMAR unit for the Azalia sound device, but not give it any TLB entries, which causes it to deadlock. Check for that. We do this in a function called from init_dmars(), instead of in a PCI quirk, because we don't want to print the obnoxious "BIOS broken" message if VT-d is actually disabled. */
static void __init check_tylersburg_isoch(void) { struct pci_dev *pdev; uint32_t vtisochctrl; /* If there's no Azalia in the system anyway, forget it. */ pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL); if (!pdev) return; pci_dev_put(pdev); /* System Management Registers. Might be hidden, in which case we can't do the sanity check. But that's OK, because the known-broken BIOSes _don't_ actually hide it, so far. */ pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL); if (!pdev) return; if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) { pci_dev_put(pdev); return; } pci_dev_put(pdev); /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */ if (vtisochctrl & 1) return; /* Drop all bits other than the number of TLB entries */ vtisochctrl &= 0x1c; /* If we have the recommended number of TLB entries (16), fine. */ if (vtisochctrl == 0x10) return; /* Zero TLB entries? You get to ride the short bus to school. */ if (!vtisochctrl) { WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n" "BIOS vendor: %s; Ver: %s; Product Version: %s\n", dmi_get_system_info(DMI_BIOS_VENDOR), dmi_get_system_info(DMI_BIOS_VERSION), dmi_get_system_info(DMI_PRODUCT_VERSION)); iommu_identity_mapping |= IDENTMAP_AZALIA; return; } pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n", vtisochctrl); }

Contributors

PersonTokensPropCommitsCommitProp
David Woodhouse14598.64%150.00%
Joerg Roedel21.36%150.00%
Total147100.00%2100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
David Woodhouse470120.84%10525.49%
Anil S Keshavamurthy423318.76%71.70%
Joerg Roedel335114.85%6816.50%
Jiang Liu269111.93%399.47%
Fenghua Yu11054.90%71.70%
Alex Williamson10884.82%184.37%
Weidong Han8153.61%133.16%
Youquan Song4311.91%20.49%
Suresh B. Siddha4261.89%81.94%
Mark McLoughlin3951.75%92.18%
Omer Peleg3761.67%61.46%
Yu Zhao2611.16%30.73%
Eric Auger2521.12%10.24%
Mark Gross2020.90%40.97%
Allen M Kay1890.84%40.97%
Andrew Morton1740.77%10.24%
Dan J Williams1470.65%30.73%
Daniel Vetter1330.59%20.49%
Akinobu Mita1310.58%20.49%
Sheng Yang1210.54%51.21%
Joseph Cihula1210.54%30.73%
Tom Lyon1000.44%30.73%
FUJITA Tomonori980.43%92.18%
Filippo Sironi970.43%10.24%
Xunlei Pang610.27%10.24%
Jacob jun Pan520.23%10.24%
Björn Helgaas510.23%10.24%
Shaohua Li460.20%10.24%
Thomas Mingarelli450.20%10.24%
Ashok Raj390.17%30.73%
Adam Jackson380.17%10.24%
Tejun Heo360.16%20.49%
Kyle McMartin350.16%10.24%
Chris Wright350.16%20.49%
Dave Dillow320.14%10.24%
Mike Travis310.14%30.73%
Yijing Wang290.13%51.21%
Robin Murphy280.12%51.21%
Christian Zander230.10%10.24%
Thomas Gleixner220.10%20.49%
KarimAllah Ahmed190.08%10.24%
Jon Derrick180.08%10.24%
Donald Dutile170.08%10.24%
Rafael J. Wysocki160.07%20.49%
Eugeni Dodonov140.06%10.24%
Takao Indoh130.06%10.24%
Ohad Ben-Cohen130.06%20.49%
Nadav Amit120.05%20.49%
Roland Dreier120.05%10.24%
Christoph Hellwig120.05%10.24%
Ingo Molnar110.05%10.24%
Anna-Maria Gleixner110.05%10.24%
Wei Yang110.05%20.49%
Krzysztof Kozlowski100.04%10.24%
Jiri Slaby80.04%10.24%
Li, Zhen-Hua70.03%10.24%
Yang Hongyang70.03%20.49%
Julian Stecklina70.03%10.24%
Andrzej Pietrasiewicz60.03%10.24%
Quentin Lambert60.03%10.24%
Yinghai Lu60.03%20.49%
Jan Niehusmann60.03%10.24%
Motohiro Kosaki60.03%10.24%
Sudip Mukherjee50.02%10.24%
Jan Kiszka50.02%10.24%
Olav Haugan50.02%10.24%
Peter Xu40.02%10.24%
Aaron Campbell40.02%10.24%
David S. Miller40.02%10.24%
Dan Carpenter40.02%20.49%
Mel Gorman30.01%10.24%
Benjamin LaHaise30.01%10.24%
Linus Torvalds30.01%20.49%
Sakari Ailus30.01%10.24%
Kenji Kaneshige30.01%10.24%
Stephen Rothwell30.01%10.24%
Paul Gortmaker30.01%10.24%
Thierry Reding20.01%10.24%
Shane Wang20.01%10.24%
Lucas Stach20.01%10.24%
Kees Cook20.01%10.24%
Mike Habeck20.01%10.24%
Koos Vriezen20.01%10.24%
Sudeep Dutt20.01%10.24%
Matt Kraai20.01%10.24%
Jeremy McNicoll10.00%10.24%
Varun Sethi10.00%10.24%
Stefan Assmann10.00%10.24%
Arvind Yadav10.00%10.24%
Lucas De Marchi10.00%10.24%
Total22562100.00%412100.00%
Directory: drivers/iommu
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.