Release 4.10 arch/x86/kernel/pci-dma.c
#include <linux/dma-mapping.h>
#include <linux/dma-debug.h>
#include <linux/dmar.h>
#include <linux/export.h>
#include <linux/bootmem.h>
#include <linux/gfp.h>
#include <linux/pci.h>
#include <linux/kmemleak.h>
#include <asm/proto.h>
#include <asm/dma.h>
#include <asm/iommu.h>
#include <asm/gart.h>
#include <asm/calgary.h>
#include <asm/x86_init.h>
#include <asm/iommu_table.h>
static int forbid_dac __read_mostly;
struct dma_map_ops *dma_ops = &nommu_dma_ops;
EXPORT_SYMBOL(dma_ops);
static int iommu_sac_force __read_mostly;
#ifdef CONFIG_IOMMU_DEBUG
int panic_on_overflow __read_mostly = 1;
int force_iommu __read_mostly = 1;
#else
int panic_on_overflow __read_mostly = 0;
int force_iommu __read_mostly = 0;
#endif
int iommu_merge __read_mostly = 0;
int no_iommu __read_mostly;
/* Set this to 1 if there is a HW IOMMU in the system */
int iommu_detected __read_mostly = 0;
/*
* This variable becomes 1 if iommu=pt is passed on the kernel command line.
* If this variable is 1, IOMMU implementations do no DMA translation for
* devices and allow every device to access to whole physical memory. This is
* useful if a user wants to use an IOMMU only for KVM device assignment to
* guests and not for driver dma translation.
*/
int iommu_pass_through __read_mostly;
extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
/* Dummy device used for NULL arguments (normally ISA). */
struct device x86_dma_fallback_dev = {
.init_name = "fallback device",
.coherent_dma_mask = ISA_DMA_BIT_MASK,
.dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
};
EXPORT_SYMBOL(x86_dma_fallback_dev);
/* Number of entries preallocated for DMA-API debugging */
#define PREALLOC_DMA_DEBUG_ENTRIES 65536
void __init pci_iommu_alloc(void)
{
struct iommu_table_entry *p;
sort_iommu_table(__iommu_table, __iommu_table_end);
check_iommu_entries(__iommu_table, __iommu_table_end);
for (p = __iommu_table; p < __iommu_table_end; p++) {
if (p && p->detect && p->detect() > 0) {
p->flags |= IOMMU_DETECTED;
if (p->early_init)
p->early_init();
if (p->flags & IOMMU_FINISH_IF_DETECTED)
break;
}
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
konrad rzeszutek wilk | konrad rzeszutek wilk | 71 | 83.53% | 2 | 40.00% |
glauber de oliveira costa | glauber de oliveira costa | 8 | 9.41% | 1 | 20.00% |
fujita tomonori | fujita tomonori | 6 | 7.06% | 2 | 40.00% |
| Total | 85 | 100.00% | 5 | 100.00% |
void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag,
unsigned long attrs)
{
unsigned long dma_mask;
struct page *page;
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
dma_addr_t addr;
dma_mask = dma_alloc_coherent_mask(dev, flag);
flag &= ~__GFP_ZERO;
again:
page = NULL;
/* CMA can be used only in the context which permits sleeping */
if (gfpflags_allow_blocking(flag)) {
page = dma_alloc_from_contiguous(dev, count, get_order(size));
if (page && page_to_phys(page) + size > dma_mask) {
dma_release_from_contiguous(dev, page, count);
page = NULL;
}
}
/* fallback */
if (!page)
page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
if (!page)
return NULL;
addr = page_to_phys(page);
if (addr + size > dma_mask) {
__free_pages(page, get_order(size));
if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
flag = (flag & ~GFP_DMA32) | GFP_DMA;
goto again;
}
return NULL;
}
memset(page_address(page), 0, size);
*dma_addr = addr;
return page_address(page);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
fujita tomonori | fujita tomonori | 141 | 59.75% | 2 | 18.18% |
akinobu mita | akinobu mita | 44 | 18.64% | 2 | 18.18% |
marek szyprowski | marek szyprowski | 40 | 16.95% | 3 | 27.27% |
yang hongyang | yang hongyang | 4 | 1.69% | 1 | 9.09% |
mel gorman | mel gorman | 3 | 1.27% | 1 | 9.09% |
krzysztof kozlowski | krzysztof kozlowski | 2 | 0.85% | 1 | 9.09% |
andrzej pietrasiewicz | andrzej pietrasiewicz | 2 | 0.85% | 1 | 9.09% |
| Total | 236 | 100.00% | 11 | 100.00% |
void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_addr, unsigned long attrs)
{
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct page *page = virt_to_page(vaddr);
if (!dma_release_from_contiguous(dev, page, count))
free_pages((unsigned long)vaddr, get_order(size));
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
marek szyprowski | marek szyprowski | 69 | 97.18% | 1 | 50.00% |
krzysztof kozlowski | krzysztof kozlowski | 2 | 2.82% | 1 | 50.00% |
| Total | 71 | 100.00% | 2 | 100.00% |
bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp)
{
if (!*dev)
*dev = &x86_dma_fallback_dev;
*gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
*gfp = dma_alloc_coherent_gfp_flags(*dev, *gfp);
if (!is_device_dma_capable(*dev))
return false;
return true;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
denys vlasenko | denys vlasenko | 33 | 50.00% | 1 | 25.00% |
ville syrjala | ville syrjala | 12 | 18.18% | 1 | 25.00% |
jun'ichi nomura | jun'ichi nomura | 12 | 18.18% | 1 | 25.00% |
christoph hellwig | christoph hellwig | 9 | 13.64% | 1 | 25.00% |
| Total | 66 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(arch_dma_alloc_attrs);
/*
* See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel
* parameter documentation.
*/
static __init int iommu_setup(char *p)
{
iommu_merge = 1;
if (!p)
return -EINVAL;
while (*p) {
if (!strncmp(p, "off", 3))
no_iommu = 1;
/* gart_parse_options has more force support */
if (!strncmp(p, "force", 5))
force_iommu = 1;
if (!strncmp(p, "noforce", 7)) {
iommu_merge = 0;
force_iommu = 0;
}
if (!strncmp(p, "biomerge", 8)) {
iommu_merge = 1;
force_iommu = 1;
}
if (!strncmp(p, "panic", 5))
panic_on_overflow = 1;
if (!strncmp(p, "nopanic", 7))
panic_on_overflow = 0;
if (!strncmp(p, "merge", 5)) {
iommu_merge = 1;
force_iommu = 1;
}
if (!strncmp(p, "nomerge", 7))
iommu_merge = 0;
if (!strncmp(p, "forcesac", 8))
iommu_sac_force = 1;
if (!strncmp(p, "allowdac", 8))
forbid_dac = 0;
if (!strncmp(p, "nodac", 5))
forbid_dac = 1;
if (!strncmp(p, "usedac", 6)) {
forbid_dac = -1;
return 1;
}
#ifdef CONFIG_SWIOTLB
if (!strncmp(p, "soft", 4))
swiotlb = 1;
#endif
if (!strncmp(p, "pt", 2))
iommu_pass_through = 1;
gart_parse_options(p);
#ifdef CONFIG_CALGARY_IOMMU
if (!strncmp(p, "calgary", 7))
use_calgary = 1;
#endif /* CONFIG_CALGARY_IOMMU */
p += strcspn(p, ",");
if (*p == ',')
++p;
}
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
glauber de oliveira costa | glauber de oliveira costa | 316 | 94.61% | 2 | 50.00% |
fenghua yu | fenghua yu | 16 | 4.79% | 1 | 25.00% |
david woodhouse | david woodhouse | 2 | 0.60% | 1 | 25.00% |
| Total | 334 | 100.00% | 4 | 100.00% |
early_param("iommu", iommu_setup);
int dma_supported(struct device *dev, u64 mask)
{
struct dma_map_ops *ops = get_dma_ops(dev);
#ifdef CONFIG_PCI
if (mask > 0xffffffff && forbid_dac > 0) {
dev_info(dev, "PCI: Disallowing DAC for device\n");
return 0;
}
#endif
if (ops->dma_supported)
return ops->dma_supported(dev, mask);
/* Copied from i386. Doesn't make much sense, because it will
only work for pci_alloc_coherent.
The caller just has to use GFP_DMA in this case. */
if (mask < DMA_BIT_MASK(24))
return 0;
/* Tell the device to use SAC when IOMMU force is on. This
allows the driver to use cheaper accesses in some cases.
Problem with this is that if we overflow the IOMMU area and
return DAC as fallback address the device may not handle it
correctly.
As a special case some controllers have a 39bit address
mode that is as efficient as 32bit (aic79xx). Don't force
SAC for these. Assume all masks <= 40 bits are of this
type. Normally this doesn't make any difference, but gives
more gentle handling of IOMMU overflow. */
if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
dev_info(dev, "Force SAC with mask %Lx\n", mask);
return 0;
}
return 1;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
glauber de oliveira costa | glauber de oliveira costa | 84 | 76.36% | 1 | 16.67% |
fujita tomonori | fujita tomonori | 12 | 10.91% | 2 | 33.33% |
yang hongyang | yang hongyang | 8 | 7.27% | 2 | 33.33% |
greg kroah-hartman | greg kroah-hartman | 6 | 5.45% | 1 | 16.67% |
| Total | 110 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL(dma_supported);
static int __init pci_iommu_init(void)
{
struct iommu_table_entry *p;
dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
#ifdef CONFIG_PCI
dma_debug_add_bus(&pci_bus_type);
#endif
x86_init.iommu.iommu_init();
for (p = __iommu_table; p < __iommu_table_end; p++) {
if (p && (p->flags & IOMMU_DETECTED) && p->late_init)
p->late_init();
}
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
konrad rzeszutek wilk | konrad rzeszutek wilk | 35 | 46.05% | 1 | 16.67% |
joerg roedel | joerg roedel | 16 | 21.05% | 2 | 33.33% |
fujita tomonori | fujita tomonori | 13 | 17.11% | 2 | 33.33% |
glauber de oliveira costa | glauber de oliveira costa | 12 | 15.79% | 1 | 16.67% |
| Total | 76 | 100.00% | 6 | 100.00% |
/* Must execute after PCI subsystem */
rootfs_initcall(pci_iommu_init);
#ifdef CONFIG_PCI
/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
static void via_no_dac(struct pci_dev *dev)
{
if (forbid_dac == 0) {
dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
forbid_dac = 1;
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
fenghua yu | fenghua yu | 26 | 78.79% | 1 | 50.00% |
bjorn helgaas | bjorn helgaas | 7 | 21.21% | 1 | 50.00% |
| Total | 33 | 100.00% | 2 | 100.00% |
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
PCI_CLASS_BRIDGE_PCI, 8, via_no_dac);
#endif
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
glauber de oliveira costa | glauber de oliveira costa | 541 | 44.38% | 9 | 15.00% |
fujita tomonori | fujita tomonori | 177 | 14.52% | 9 | 15.00% |
konrad rzeszutek wilk | konrad rzeszutek wilk | 118 | 9.68% | 2 | 3.33% |
marek szyprowski | marek szyprowski | 109 | 8.94% | 3 | 5.00% |
fenghua yu | fenghua yu | 64 | 5.25% | 3 | 5.00% |
akinobu mita | akinobu mita | 44 | 3.61% | 2 | 3.33% |
denys vlasenko | denys vlasenko | 37 | 3.04% | 2 | 3.33% |
joerg roedel | joerg roedel | 36 | 2.95% | 6 | 10.00% |
ville syrjala | ville syrjala | 12 | 0.98% | 1 | 1.67% |
yang hongyang | yang hongyang | 12 | 0.98% | 3 | 5.00% |
jun'ichi nomura | jun'ichi nomura | 12 | 0.98% | 1 | 1.67% |
christoph hellwig | christoph hellwig | 10 | 0.82% | 1 | 1.67% |
bjorn helgaas | bjorn helgaas | 7 | 0.57% | 1 | 1.67% |
greg kroah-hartman | greg kroah-hartman | 6 | 0.49% | 1 | 1.67% |
yinghai lu | yinghai lu | 5 | 0.41% | 1 | 1.67% |
krzysztof kozlowski | krzysztof kozlowski | 4 | 0.33% | 1 | 1.67% |
mel gorman | mel gorman | 3 | 0.25% | 1 | 1.67% |
david woodhouse | david woodhouse | 3 | 0.25% | 2 | 3.33% |
paul gortmaker | paul gortmaker | 3 | 0.25% | 1 | 1.67% |
tejun heo | tejun heo | 3 | 0.25% | 1 | 1.67% |
catalin marinas | catalin marinas | 3 | 0.25% | 1 | 1.67% |
andrzej pietrasiewicz | andrzej pietrasiewicz | 2 | 0.16% | 1 | 1.67% |
jan beulich | jan beulich | 2 | 0.16% | 1 | 1.67% |
justin mattock | justin mattock | 1 | 0.08% | 1 | 1.67% |
paul bolle | paul bolle | 1 | 0.08% | 1 | 1.67% |
maarten lankhorst | maarten lankhorst | 1 | 0.08% | 1 | 1.67% |
dmitri vorobiev | dmitri vorobiev | 1 | 0.08% | 1 | 1.67% |
ingo molnar | ingo molnar | 1 | 0.08% | 1 | 1.67% |
kay sievers | kay sievers | 1 | 0.08% | 1 | 1.67% |
| Total | 1219 | 100.00% | 60 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.