Release 4.14 arch/x86/kernel/pci-dma.c
// SPDX-License-Identifier: GPL-2.0
#include <linux/dma-mapping.h>
#include <linux/dma-debug.h>
#include <linux/dmar.h>
#include <linux/export.h>
#include <linux/bootmem.h>
#include <linux/gfp.h>
#include <linux/pci.h>
#include <linux/kmemleak.h>
#include <asm/proto.h>
#include <asm/dma.h>
#include <asm/iommu.h>
#include <asm/gart.h>
#include <asm/calgary.h>
#include <asm/x86_init.h>
#include <asm/iommu_table.h>
static int forbid_dac __read_mostly;
const struct dma_map_ops *dma_ops = &nommu_dma_ops;
EXPORT_SYMBOL(dma_ops);
static int iommu_sac_force __read_mostly;
#ifdef CONFIG_IOMMU_DEBUG
int panic_on_overflow __read_mostly = 1;
int force_iommu __read_mostly = 1;
#else
int panic_on_overflow __read_mostly = 0;
int force_iommu __read_mostly = 0;
#endif
int iommu_merge __read_mostly = 0;
int no_iommu __read_mostly;
/* Set this to 1 if there is a HW IOMMU in the system */
int iommu_detected __read_mostly = 0;
/*
* This variable becomes 1 if iommu=pt is passed on the kernel command line.
* If this variable is 1, IOMMU implementations do no DMA translation for
* devices and allow every device to access to whole physical memory. This is
* useful if a user wants to use an IOMMU only for KVM device assignment to
* guests and not for driver dma translation.
*/
int iommu_pass_through __read_mostly;
extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
/* Dummy device used for NULL arguments (normally ISA). */
struct device x86_dma_fallback_dev = {
.init_name = "fallback device",
.coherent_dma_mask = ISA_DMA_BIT_MASK,
.dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
};
EXPORT_SYMBOL(x86_dma_fallback_dev);
/* Number of entries preallocated for DMA-API debugging */
#define PREALLOC_DMA_DEBUG_ENTRIES 65536
void __init pci_iommu_alloc(void)
{
struct iommu_table_entry *p;
sort_iommu_table(__iommu_table, __iommu_table_end);
check_iommu_entries(__iommu_table, __iommu_table_end);
for (p = __iommu_table; p < __iommu_table_end; p++) {
if (p && p->detect && p->detect() > 0) {
p->flags |= IOMMU_DETECTED;
if (p->early_init)
p->early_init();
if (p->flags & IOMMU_FINISH_IF_DETECTED)
break;
}
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Konrad Rzeszutek Wilk | 71 | 83.53% | 2 | 40.00% |
Glauber de Oliveira Costa | 8 | 9.41% | 1 | 20.00% |
FUJITA Tomonori | 6 | 7.06% | 2 | 40.00% |
Total | 85 | 100.00% | 5 | 100.00% |
void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag,
unsigned long attrs)
{
unsigned long dma_mask;
struct page *page;
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
dma_addr_t addr;
dma_mask = dma_alloc_coherent_mask(dev, flag);
flag &= ~__GFP_ZERO;
again:
page = NULL;
/* CMA can be used only in the context which permits sleeping */
if (gfpflags_allow_blocking(flag)) {
page = dma_alloc_from_contiguous(dev, count, get_order(size),
flag);
if (page) {
addr = phys_to_dma(dev, page_to_phys(page));
if (addr + size > dma_mask) {
dma_release_from_contiguous(dev, page, count);
page = NULL;
}
}
}
/* fallback */
if (!page)
page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
if (!page)
return NULL;
addr = phys_to_dma(dev, page_to_phys(page));
if (addr + size > dma_mask) {
__free_pages(page, get_order(size));
if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
flag = (flag & ~GFP_DMA32) | GFP_DMA;
goto again;
}
return NULL;
}
memset(page_address(page), 0, size);
*dma_addr = addr;
return page_address(page);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
FUJITA Tomonori | 141 | 55.08% | 2 | 15.38% |
Akinobu Mita | 43 | 16.80% | 2 | 15.38% |
Marek Szyprowski | 40 | 15.62% | 3 | 23.08% |
Tom Lendacky | 19 | 7.42% | 1 | 7.69% |
Yang Hongyang | 4 | 1.56% | 1 | 7.69% |
Mel Gorman | 3 | 1.17% | 1 | 7.69% |
Andrzej Pietrasiewicz | 2 | 0.78% | 1 | 7.69% |
Lucas Stach | 2 | 0.78% | 1 | 7.69% |
Krzysztof Kozlowski | 2 | 0.78% | 1 | 7.69% |
Total | 256 | 100.00% | 13 | 100.00% |
void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_addr, unsigned long attrs)
{
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct page *page = virt_to_page(vaddr);
if (!dma_release_from_contiguous(dev, page, count))
free_pages((unsigned long)vaddr, get_order(size));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Marek Szyprowski | 69 | 97.18% | 1 | 50.00% |
Krzysztof Kozlowski | 2 | 2.82% | 1 | 50.00% |
Total | 71 | 100.00% | 2 | 100.00% |
bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp)
{
if (!*dev)
*dev = &x86_dma_fallback_dev;
*gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
*gfp = dma_alloc_coherent_gfp_flags(*dev, *gfp);
if (!is_device_dma_capable(*dev))
return false;
return true;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Denys Vlasenko | 33 | 50.00% | 1 | 25.00% |
Jun'ichi Nomura | 12 | 18.18% | 1 | 25.00% |
Ville Syrjälä | 12 | 18.18% | 1 | 25.00% |
Christoph Hellwig | 9 | 13.64% | 1 | 25.00% |
Total | 66 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(arch_dma_alloc_attrs);
/*
* See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel
* parameter documentation.
*/
static __init int iommu_setup(char *p)
{
iommu_merge = 1;
if (!p)
return -EINVAL;
while (*p) {
if (!strncmp(p, "off", 3))
no_iommu = 1;
/* gart_parse_options has more force support */
if (!strncmp(p, "force", 5))
force_iommu = 1;
if (!strncmp(p, "noforce", 7)) {
iommu_merge = 0;
force_iommu = 0;
}
if (!strncmp(p, "biomerge", 8)) {
iommu_merge = 1;
force_iommu = 1;
}
if (!strncmp(p, "panic", 5))
panic_on_overflow = 1;
if (!strncmp(p, "nopanic", 7))
panic_on_overflow = 0;
if (!strncmp(p, "merge", 5)) {
iommu_merge = 1;
force_iommu = 1;
}
if (!strncmp(p, "nomerge", 7))
iommu_merge = 0;
if (!strncmp(p, "forcesac", 8))
iommu_sac_force = 1;
if (!strncmp(p, "allowdac", 8))
forbid_dac = 0;
if (!strncmp(p, "nodac", 5))
forbid_dac = 1;
if (!strncmp(p, "usedac", 6)) {
forbid_dac = -1;
return 1;
}
#ifdef CONFIG_SWIOTLB
if (!strncmp(p, "soft", 4))
swiotlb = 1;
#endif
if (!strncmp(p, "pt", 2))
iommu_pass_through = 1;
gart_parse_options(p);
#ifdef CONFIG_CALGARY_IOMMU
if (!strncmp(p, "calgary", 7))
use_calgary = 1;
#endif /* CONFIG_CALGARY_IOMMU */
p += strcspn(p, ",");
if (*p == ',')
++p;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Glauber de Oliveira Costa | 316 | 94.61% | 2 | 50.00% |
Fenghua Yu | 16 | 4.79% | 1 | 25.00% |
David Woodhouse | 2 | 0.60% | 1 | 25.00% |
Total | 334 | 100.00% | 4 | 100.00% |
early_param("iommu", iommu_setup);
int x86_dma_supported(struct device *dev, u64 mask)
{
#ifdef CONFIG_PCI
if (mask > 0xffffffff && forbid_dac > 0) {
dev_info(dev, "PCI: Disallowing DAC for device\n");
return 0;
}
#endif
/* Copied from i386. Doesn't make much sense, because it will
only work for pci_alloc_coherent.
The caller just has to use GFP_DMA in this case. */
if (mask < DMA_BIT_MASK(24))
return 0;
/* Tell the device to use SAC when IOMMU force is on. This
allows the driver to use cheaper accesses in some cases.
Problem with this is that if we overflow the IOMMU area and
return DAC as fallback address the device may not handle it
correctly.
As a special case some controllers have a 39bit address
mode that is as efficient as 32bit (aic79xx). Don't force
SAC for these. Assume all masks <= 40 bits are of this
type. Normally this doesn't make any difference, but gives
more gentle handling of IOMMU overflow. */
if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
dev_info(dev, "Force SAC with mask %Lx\n", mask);
return 0;
}
return 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Glauber de Oliveira Costa | 69 | 82.14% | 1 | 20.00% |
Yang Hongyang | 8 | 9.52% | 2 | 40.00% |
Greg Kroah-Hartman | 6 | 7.14% | 1 | 20.00% |
Christoph Hellwig | 1 | 1.19% | 1 | 20.00% |
Total | 84 | 100.00% | 5 | 100.00% |
static int __init pci_iommu_init(void)
{
struct iommu_table_entry *p;
dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
#ifdef CONFIG_PCI
dma_debug_add_bus(&pci_bus_type);
#endif
x86_init.iommu.iommu_init();
for (p = __iommu_table; p < __iommu_table_end; p++) {
if (p && (p->flags & IOMMU_DETECTED) && p->late_init)
p->late_init();
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Konrad Rzeszutek Wilk | 35 | 46.05% | 1 | 16.67% |
Joerg Roedel | 16 | 21.05% | 2 | 33.33% |
FUJITA Tomonori | 13 | 17.11% | 2 | 33.33% |
Glauber de Oliveira Costa | 12 | 15.79% | 1 | 16.67% |
Total | 76 | 100.00% | 6 | 100.00% |
/* Must execute after PCI subsystem */
rootfs_initcall(pci_iommu_init);
#ifdef CONFIG_PCI
/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
static void via_no_dac(struct pci_dev *dev)
{
if (forbid_dac == 0) {
dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
forbid_dac = 1;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Fenghua Yu | 26 | 78.79% | 1 | 50.00% |
Björn Helgaas | 7 | 21.21% | 1 | 50.00% |
Total | 33 | 100.00% | 2 | 100.00% |
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
PCI_CLASS_BRIDGE_PCI, 8, via_no_dac);
#endif
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Glauber de Oliveira Costa | 521 | 43.06% | 9 | 14.06% |
FUJITA Tomonori | 165 | 13.64% | 8 | 12.50% |
Konrad Rzeszutek Wilk | 118 | 9.75% | 2 | 3.12% |
Marek Szyprowski | 109 | 9.01% | 3 | 4.69% |
Fenghua Yu | 64 | 5.29% | 3 | 4.69% |
Akinobu Mita | 43 | 3.55% | 2 | 3.12% |
Denys Vlasenko | 37 | 3.06% | 2 | 3.12% |
Joerg Roedel | 36 | 2.98% | 6 | 9.38% |
Tom Lendacky | 19 | 1.57% | 1 | 1.56% |
Yang Hongyang | 12 | 0.99% | 3 | 4.69% |
Jun'ichi Nomura | 12 | 0.99% | 1 | 1.56% |
Ville Syrjälä | 12 | 0.99% | 1 | 1.56% |
Christoph Hellwig | 11 | 0.91% | 2 | 3.12% |
Björn Helgaas | 7 | 0.58% | 1 | 1.56% |
Greg Kroah-Hartman | 7 | 0.58% | 2 | 3.12% |
Yinghai Lu | 5 | 0.41% | 1 | 1.56% |
Krzysztof Kozlowski | 4 | 0.33% | 1 | 1.56% |
Tejun Heo | 3 | 0.25% | 1 | 1.56% |
Paul Gortmaker | 3 | 0.25% | 1 | 1.56% |
Mel Gorman | 3 | 0.25% | 1 | 1.56% |
David Woodhouse | 3 | 0.25% | 2 | 3.12% |
Catalin Marinas | 3 | 0.25% | 1 | 1.56% |
Jan Beulich | 2 | 0.17% | 1 | 1.56% |
Lucas Stach | 2 | 0.17% | 1 | 1.56% |
Andrzej Pietrasiewicz | 2 | 0.17% | 1 | 1.56% |
Dmitri Vorobiev | 1 | 0.08% | 1 | 1.56% |
Ingo Molnar | 1 | 0.08% | 1 | 1.56% |
Paul Bolle | 1 | 0.08% | 1 | 1.56% |
Kay Sievers | 1 | 0.08% | 1 | 1.56% |
Maarten Lankhorst | 1 | 0.08% | 1 | 1.56% |
Justin P. Mattock | 1 | 0.08% | 1 | 1.56% |
Bart Van Assche | 1 | 0.08% | 1 | 1.56% |
Total | 1210 | 100.00% | 64 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.