Release 4.14 arch/x86/kernel/amd_gart_64.c
/*
* Dynamic DMA mapping support for AMD Hammer.
*
* Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
* This allows to use PCI devices that only support 32bit addresses on systems
* with more than 4GB.
*
* See Documentation/DMA-API-HOWTO.txt for the interface specification.
*
* Copyright 2002 Andi Kleen, SuSE Labs.
* Subject to the GNU General Public License v2 only.
*/
#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/agp_backend.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/topology.h>
#include <linux/interrupt.h>
#include <linux/bitmap.h>
#include <linux/kdebug.h>
#include <linux/scatterlist.h>
#include <linux/iommu-helper.h>
#include <linux/syscore_ops.h>
#include <linux/io.h>
#include <linux/gfp.h>
#include <linux/atomic.h>
#include <asm/mtrr.h>
#include <asm/pgtable.h>
#include <asm/proto.h>
#include <asm/iommu.h>
#include <asm/gart.h>
#include <asm/set_memory.h>
#include <asm/swiotlb.h>
#include <asm/dma.h>
#include <asm/amd_nb.h>
#include <asm/x86_init.h>
#include <asm/iommu_table.h>
static unsigned long iommu_bus_base;
/* GART remapping area (physical) */
static unsigned long iommu_size;
/* size of remapping area bytes */
static unsigned long iommu_pages;
/* .. and in pages */
static u32 *iommu_gatt_base;
/* Remapping table */
static dma_addr_t bad_dma_addr;
/*
* If this is disabled the IOMMU will use an optimized flushing strategy
* of only flushing when an mapping is reused. With it true the GART is
* flushed for every mapping. Problem is that doing the lazy flush seems
* to trigger bugs with some popular PCI cards, in particular 3ware (but
* has been also also seen with Qlogic at least).
*/
static int iommu_fullflush = 1;
/* Allocation bitmap for the remapping area: */
static DEFINE_SPINLOCK(iommu_bitmap_lock);
/* Guarded by iommu_bitmap_lock: */
static unsigned long *iommu_gart_bitmap;
static u32 gart_unmapped_entry;
#define GPTE_VALID 1
#define GPTE_COHERENT 2
#define GPTE_ENCODE(x) \
(((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
#define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
#define EMERGENCY_PAGES 32
/* = 128KB */
#ifdef CONFIG_AGP
#define AGPEXTERN extern
#else
#define AGPEXTERN
#endif
/* GART can only remap to physical addresses < 1TB */
#define GART_MAX_PHYS_ADDR (1ULL << 40)
/* backdoor interface to AGP driver */
AGPEXTERN int agp_memory_reserved;
AGPEXTERN __u32 *agp_gatt_table;
static unsigned long next_bit;
/* protected by iommu_bitmap_lock */
static bool need_flush;
/* global flush state. set for each gart wrap */
static unsigned long alloc_iommu(struct device *dev, int size,
unsigned long align_mask)
{
unsigned long offset, flags;
unsigned long boundary_size;
unsigned long base_index;
base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
PAGE_SIZE) >> PAGE_SHIFT;
boundary_size = ALIGN((u64)dma_get_seg_boundary(dev) + 1,
PAGE_SIZE) >> PAGE_SHIFT;
spin_lock_irqsave(&iommu_bitmap_lock, flags);
offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
size, base_index, boundary_size, align_mask);
if (offset == -1) {
need_flush = true;
offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
size, base_index, boundary_size,
align_mask);
}
if (offset != -1) {
next_bit = offset+size;
if (next_bit >= iommu_pages) {
next_bit = 0;
need_flush = true;
}
}
if (iommu_fullflush)
need_flush = true;
spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
return offset;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andi Kleen | 103 | 58.19% | 4 | 40.00% |
FUJITA Tomonori | 67 | 37.85% | 2 | 20.00% |
Joerg Roedel | 3 | 1.69% | 1 | 10.00% |
Prarit Bhargava | 2 | 1.13% | 1 | 10.00% |
Ingo Molnar | 1 | 0.56% | 1 | 10.00% |
Mike Waychison | 1 | 0.56% | 1 | 10.00% |
Total | 177 | 100.00% | 10 | 100.00% |
static void free_iommu(unsigned long offset, int size)
{
unsigned long flags;
spin_lock_irqsave(&iommu_bitmap_lock, flags);
bitmap_clear(iommu_gart_bitmap, offset, size);
if (offset >= next_bit)
next_bit = offset + size;
spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andi Kleen | 37 | 68.52% | 1 | 25.00% |
Joerg Roedel | 12 | 22.22% | 1 | 25.00% |
Andrew Morton | 4 | 7.41% | 1 | 25.00% |
Akinobu Mita | 1 | 1.85% | 1 | 25.00% |
Total | 54 | 100.00% | 4 | 100.00% |
/*
* Use global flush state to avoid races with multiple flushers.
*/
static void flush_gart(void)
{
unsigned long flags;
spin_lock_irqsave(&iommu_bitmap_lock, flags);
if (need_flush) {
amd_flush_garts();
need_flush = false;
}
spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andi Kleen | 38 | 92.68% | 4 | 57.14% |
Andrew Morton | 1 | 2.44% | 1 | 14.29% |
Joerg Roedel | 1 | 2.44% | 1 | 14.29% |
Hans Rosenfeld | 1 | 2.44% | 1 | 14.29% |
Total | 41 | 100.00% | 7 | 100.00% |
#ifdef CONFIG_IOMMU_LEAK
/* Debugging aid for drivers that don't free their IOMMU tables */
static int leak_trace;
static int iommu_leak_pages = 20;
static void dump_leak(void)
{
static int dump;
if (dump)
return;
dump = 1;
show_stack(NULL, NULL);
debug_dma_dump_mappings(NULL);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Muli Ben-Yehuda | 16 | 48.48% | 1 | 20.00% |
Andi Kleen | 14 | 42.42% | 2 | 40.00% |
FUJITA Tomonori | 2 | 6.06% | 1 | 20.00% |
Joerg Roedel | 1 | 3.03% | 1 | 20.00% |
Total | 33 | 100.00% | 5 | 100.00% |
#endif
static void iommu_full(struct device *dev, size_t size, int dir)
{
/*
* Ran out of IOMMU space for this operation. This is very bad.
* Unfortunately the drivers cannot handle this operation properly.
* Return some non mapped prereserved space in the aperture and
* let the Northbridge deal with it. This will result in garbage
* in the IO operation. When the size exceeds the prereserved space
* memory corruption will occur or random memory will be DMAed
* out. Hopefully no network devices use single mappings that big.
*/
dev_err(dev, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size);
if (size > PAGE_SIZE*EMERGENCY_PAGES) {
if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
panic("PCI-DMA: Memory would be corrupted\n");
if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
panic(KERN_ERR
"PCI-DMA: Random memory would be DMAed\n");
}
#ifdef CONFIG_IOMMU_LEAK
dump_leak();
#endif
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andi Kleen | 70 | 92.11% | 2 | 40.00% |
Greg Kroah-Hartman | 4 | 5.26% | 1 | 20.00% |
Muli Ben-Yehuda | 1 | 1.32% | 1 | 20.00% |
Ingo Molnar | 1 | 1.32% | 1 | 20.00% |
Total | 76 | 100.00% | 5 | 100.00% |
static inline int
need_iommu(struct device *dev, unsigned long addr, size_t size)
{
return force_iommu || !dma_capable(dev, addr, size);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andi Kleen | 23 | 71.88% | 2 | 50.00% |
FUJITA Tomonori | 9 | 28.12% | 2 | 50.00% |
Total | 32 | 100.00% | 4 | 100.00% |
static inline int
nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
{
return !dma_capable(dev, addr, size);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andi Kleen | 23 | 76.67% | 3 | 60.00% |
FUJITA Tomonori | 7 | 23.33% | 2 | 40.00% |
Total | 30 | 100.00% | 5 | 100.00% |
/* Map a single continuous physical area into the IOMMU.
* Caller needs to check if the iommu is needed and flush.
*/
static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
size_t size, int dir, unsigned long align_mask)
{
unsigned long npages = iommu_num_pages(phys_mem, size, PAGE_SIZE);
unsigned long iommu_page;
int i;
if (unlikely(phys_mem + size > GART_MAX_PHYS_ADDR))
return bad_dma_addr;
iommu_page = alloc_iommu(dev, npages, align_mask);
if (iommu_page == -1) {
if (!nonforced_iommu(dev, phys_mem, size))
return phys_mem;
if (panic_on_overflow)
panic("dma_map_area overflow %lu bytes\n", size);
iommu_full(dev, size, dir);
return bad_dma_addr;
}
for (i = 0; i < npages; i++) {
iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
phys_mem += PAGE_SIZE;
}
return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andi Kleen | 127 | 78.88% | 3 | 33.33% |
Joerg Roedel | 24 | 14.91% | 2 | 22.22% |
FUJITA Tomonori | 9 | 5.59% | 3 | 33.33% |
Muli Ben-Yehuda | 1 | 0.62% | 1 | 11.11% |
Total | 161 | 100.00% | 9 | 100.00% |
/* Map a single area into the IOMMU */
static dma_addr_t gart_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
unsigned long attrs)
{
unsigned long bus;
phys_addr_t paddr = page_to_phys(page) + offset;
if (!dev)
dev = &x86_dma_fallback_dev;
if (!need_iommu(dev, paddr, size))
return paddr;
bus = dma_map_area(dev, paddr, size, dir, 0);
flush_gart();
return bus;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andi Kleen | 30 | 32.97% | 3 | 30.00% |
FUJITA Tomonori | 29 | 31.87% | 2 | 20.00% |
Muli Ben-Yehuda | 25 | 27.47% | 1 | 10.00% |
Ingo Molnar | 3 | 3.30% | 1 | 10.00% |
Krzysztof Kozlowski | 2 | 2.20% | 1 | 10.00% |
Joerg Roedel | 1 | 1.10% | 1 | 10.00% |
Yinghai Lu | 1 | 1.10% | 1 | 10.00% |
Total | 91 | 100.00% | 10 | 100.00% |
/*
* Free a DMA mapping.
*/
static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
unsigned long iommu_page;
int npages;
int i;
if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
dma_addr >= iommu_bus_base + iommu_size)
return;
iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
for (i = 0; i < npages; i++) {
iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
}
free_iommu(iommu_page, npages);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jon Mason | 92 | 88.46% | 1 | 20.00% |
FUJITA Tomonori | 6 | 5.77% | 1 | 20.00% |
Joerg Roedel | 3 | 2.88% | 1 | 20.00% |
Krzysztof Kozlowski | 2 | 1.92% | 1 | 20.00% |
Yinghai Lu | 1 | 0.96% | 1 | 20.00% |
Total | 104 | 100.00% | 5 | 100.00% |
/*
* Wrapper for pci_unmap_single working with scatterlists.
*/
static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, unsigned long attrs)
{
struct scatterlist *s;
int i;
for_each_sg(sg, s, nents, i) {
if (!s->dma_length || !s->length)
break;
gart_unmap_page(dev, s->dma_address, s->dma_length, dir, 0);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Muli Ben-Yehuda | 39 | 50.65% | 1 | 12.50% |
Jens Axboe | 13 | 16.88% | 1 | 12.50% |
Andi Kleen | 10 | 12.99% | 1 | 12.50% |
FUJITA Tomonori | 6 | 7.79% | 2 | 25.00% |
Jon Mason | 5 | 6.49% | 1 | 12.50% |
Krzysztof Kozlowski | 3 | 3.90% | 1 | 12.50% |
Yinghai Lu | 1 | 1.30% | 1 | 12.50% |
Total | 77 | 100.00% | 8 | 100.00% |
/* Fallback for dma_map_sg in case of overflow */
static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
int nents, int dir)
{
struct scatterlist *s;
int i;
#ifdef CONFIG_IOMMU_DEBUG
pr_debug("dma_map_sg overflow\n");
#endif
for_each_sg(sg, s, nents, i) {
unsigned long addr = sg_phys(s);
if (nonforced_iommu(dev, addr, s->length)) {
addr = dma_map_area(dev, addr, s->length, dir, 0);
if (addr == bad_dma_addr) {
if (i > 0)
gart_unmap_sg(dev, sg, i, dir, 0);
nents = 0;
sg[0].dma_length = 0;
break;
}
}
s->dma_address = addr;
s->dma_length = s->length;
}
flush_gart();
return nents;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andi Kleen | 131 | 85.06% | 4 | 33.33% |
Jens Axboe | 16 | 10.39% | 2 | 16.67% |
FUJITA Tomonori | 4 | 2.60% | 3 | 25.00% |
Krzysztof Kozlowski | 1 | 0.65% | 1 | 8.33% |
Muli Ben-Yehuda | 1 | 0.65% | 1 | 8.33% |
Ingo Molnar | 1 | 0.65% | 1 | 8.33% |
Total | 154 | 100.00% | 12 | 100.00% |
/* Map multiple scatterlist entries continuous into the first. */
static int __dma_map_cont(struct device *dev, struct scatterlist *start,
int nelems, struct scatterlist *sout,
unsigned long pages)
{
unsigned long iommu_start = alloc_iommu(dev, pages, 0);
unsigned long iommu_page = iommu_start;
struct scatterlist *s;
int i;
if (iommu_start == -1)
return -1;
for_each_sg(start, s, nelems, i) {
unsigned long pages, addr;
unsigned long phys_addr = s->dma_address;
BUG_ON(s != start && s->offset);
if (s == start) {
sout->dma_address = iommu_bus_base;
sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
sout->dma_length = s->length;
} else {
sout->dma_length += s->length;
}
addr = phys_addr;
pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE);
while (pages--) {
iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
addr += PAGE_SIZE;
iommu_page++;
}
}
BUG_ON(iommu_page - iommu_start != pages);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andi Kleen | 140 | 68.97% | 4 | 40.00% |
Andrew Morton | 32 | 15.76% | 2 | 20.00% |
Jens Axboe | 19 | 9.36% | 1 | 10.00% |
FUJITA Tomonori | 9 | 4.43% | 2 | 20.00% |
Joerg Roedel | 3 | 1.48% | 1 | 10.00% |
Total | 203 | 100.00% | 10 | 100.00% |
static inline int
dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
struct scatterlist *sout, unsigned long pages, int need)
{
if (!need) {
BUG_ON(nelems != 1);
sout->dma_address = start->dma_address;
sout->dma_length = start->length;
return 0;
}
return __dma_map_cont(dev, start, nelems, sout, pages);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andi Kleen | 60 | 75.95% | 3 | 42.86% |
FUJITA Tomonori | 11 | 13.92% | 2 | 28.57% |
Jon Mason | 4 | 5.06% | 1 | 14.29% |
Jens Axboe | 4 | 5.06% | 1 | 14.29% |
Total | 79 | 100.00% | 7 | 100.00% |
/*
* DMA map all entries in a scatterlist.
* Merge chunks that have page aligned sizes into a continuous mapping.
*/
static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, unsigned long attrs)
{
struct scatterlist *s, *ps, *start_sg, *sgmap;
int need = 0, nextneed, i, out, start;
unsigned long pages = 0;
unsigned int seg_size;
unsigned int max_seg_size;
if (nents == 0)
return 0;
if (!dev)
dev = &x86_dma_fallback_dev;
out = 0;
start = 0;
start_sg = sg;
sgmap = sg;
seg_size = 0;
max_seg_size = dma_get_max_seg_size(dev);
ps = NULL; /* shut up gcc */
for_each_sg(sg, s, nents, i) {
dma_addr_t addr = sg_phys(s);
s->dma_address = addr;
BUG_ON(s->length == 0);
nextneed = need_iommu(dev, addr, s->length);
/* Handle the previous not yet processed entries */
if (i > start) {
/*
* Can only merge when the last chunk ends on a
* page boundary and the new one doesn't have an
* offset.
*/
if (!iommu_merge || !nextneed || !need || s->offset ||
(s->length + seg_size > max_seg_size) ||
(ps->offset + ps->length) % PAGE_SIZE) {
if (dma_map_cont(dev, start_sg, i - start,
sgmap, pages, need) < 0)
goto error;
out++;
seg_size = 0;
sgmap = sg_next(sgmap);
pages = 0;
start = i;
start_sg = s;
}
}
seg_size += s->length;
need = nextneed;
pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE);
ps = s;
}
if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
goto error;
out++;
flush_gart();
if (out < nents) {
sgmap = sg_next(sgmap);
sgmap->dma_length = 0;
}
return out;
error:
flush_gart();
gart_unmap_sg(dev, sg, out, dir, 0);
/* When it was forced or merged try again in a dumb way */
if (force_iommu || iommu_merge) {
out = dma_map_sg_nonforce(dev, sg, nents, dir);
if (out > 0)
return out;
}
if (panic_on_overflow)
panic("dma_map_sg: overflow on %lu pages\n", pages);
iommu_full(dev, pages << PAGE_SHIFT, dir);
for_each_sg(sg, s, nents, i)
s->dma_address = bad_dma_addr;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andi Kleen | 199 | 45.33% | 7 | 31.82% |
Jens Axboe | 81 | 18.45% | 2 | 9.09% |
FUJITA Tomonori | 50 | 11.39% | 5 | 22.73% |
Muli Ben-Yehuda | 45 | 10.25% | 1 | 4.55% |
Ingo Molnar | 22 | 5.01% | 2 | 9.09% |
Andrew Morton | 19 | 4.33% | 1 | 4.55% |
Kevin VanMaren | 16 | 3.64% | 1 | 4.55% |
Joerg Roedel | 4 | 0.91% | 2 | 9.09% |
Krzysztof Kozlowski | 3 | 0.68% | 1 | 4.55% |
Total | 439 | 100.00% | 22 | 100.00% |
/* allocate and map a coherent mapping */
static void *
gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
gfp_t flag, unsigned long attrs)
{
dma_addr_t paddr;
unsigned long align_mask;
struct page *page;
if (force_iommu && !(flag & GFP_DMA)) {
flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
page = alloc_pages(flag | __GFP_ZERO, get_order(size));
if (!page)
return NULL;
align_mask = (1UL << get_order(size)) - 1;
paddr = dma_map_area(dev, page_to_phys(page), size,
DMA_BIDIRECTIONAL, align_mask);
flush_gart();
if (paddr != bad_dma_addr) {
*dma_addr = paddr;
return page_address(page);
}
__free_pages(page, get_order(size));
} else
return dma_generic_alloc_coherent(dev, size, dma_addr, flag,
attrs);
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
FUJITA Tomonori | 90 | 54.55% | 4 | 57.14% |
Joerg Roedel | 69 | 41.82% | 1 | 14.29% |
Andrzej Pietrasiewicz | 4 | 2.42% | 1 | 14.29% |
Krzysztof Kozlowski | 2 | 1.21% | 1 | 14.29% |
Total | 165 | 100.00% | 7 | 100.00% |
/* free a coherent mapping */
static void
gart_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_addr, unsigned long attrs)
{
gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0);
dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Joerg Roedel | 36 | 70.59% | 1 | 20.00% |
Akinobu Mita | 8 | 15.69% | 1 | 20.00% |
Krzysztof Kozlowski | 3 | 5.88% | 1 | 20.00% |
FUJITA Tomonori | 2 | 3.92% | 1 | 20.00% |
Andrzej Pietrasiewicz | 2 | 3.92% | 1 | 20.00% |
Total | 51 | 100.00% | 5 | 100.00% |
static int gart_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return (dma_addr == bad_dma_addr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
FUJITA Tomonori | 21 | 100.00% | 1 | 100.00% |
Total | 21 | 100.00% | 1 | 100.00% |
static int no_agp;
static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
{
unsigned long a;
if (!iommu_size) {
iommu_size = aper_size;
if (!no_agp)
iommu_size /= 2;
}
a = aper + iommu_size;
iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
if (iommu_size < 64*1024*1024) {
pr_warning(
"PCI-DMA: Warning: Small IOMMU %luMB."
" Consider increasing the AGP aperture in BIOS\n",
iommu_size >> 20);
}
return iommu_size;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andi Kleen | 76 | 93.83% | 2 | 50.00% |
Ingo Molnar | 5 | 6.17% | 2 | 50.00% |
Total | 81 | 100.00% | 4 | 100.00% |
static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
{
unsigned aper_size = 0, aper_base_32, aper_order;
u64 aper_base;
pci_read_config_dword(dev, AMD64_GARTAPERTUREBASE, &aper_base_32);
pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &aper_order);
aper_order = (aper_order >> 1) & 7;
aper_base = aper_base_32 & 0x7fff;
aper_base <<= 25;
aper_size = (32 * 1024 * 1024) << aper_order;
if (aper_base + aper_size > 0x100000000UL || !aper_size)
aper_base = 0;
*size = aper_size;
return aper_base;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andi Kleen | 97 | 94.17% | 1 | 25.00% |
Pavel Machek | 2 | 1.94% | 1 | 25.00% |
Ingo Molnar | 2 | 1.94% | 1 | 25.00% |
Andrew Hastings | 2 | 1.94% | 1 | 25.00% |
Total | 103 | 100.00% | 4 | 100.00% |
static void enable_gart_translations(void)
{
int i;
if (!amd_nb_has_feature(AMD_NB_GART))
return;
for (i = 0; i < amd_nb_num(); i++) {
struct pci_dev *dev = node_to_amd_nb(i)->misc;
enable_gart_translation(dev, __pa(agp_gatt_table));
}
/* Flush the GART-TLB to remove stale entries */
amd_flush_garts();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rafael J. Wysocki | 42 | 67.74% | 1 | 20.00% |
Hans Rosenfeld | 12 | 19.35% | 2 | 40.00% |
Andreas Herrmann | 5 | 8.06% | 1 | 20.00% |
Joerg Roedel | 3 | 4.84% | 1 | 20.00% |
Total | 62 | 100.00% | 5 | 100.00% |
/*
* If fix_up_north_bridges is set, the north bridges have to be fixed up on
* resume in the same way as they are handled in gart_iommu_hole_init().
*/
static bool fix_up_north_bridges;
static u32 aperture_order;
static u32 aperture_alloc;
void set_up_gart_resume(u32 aper_order, u32 aper_alloc)
{
fix_up_north_bridges = true;
aperture_order = aper_order;
aperture_alloc = aper_alloc;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rafael J. Wysocki | 23 | 100.00% | 1 | 100.00% |
Total | 23 | 100.00% | 1 | 100.00% |
static void gart_fixup_northbridges(void)
{
int i;
if (!fix_up_north_bridges)
return;
if (!amd_nb_has_feature(AMD_NB_GART))
return;
pr_info("PCI-DMA: Restoring GART aperture settings\n");
for (i = 0; i < amd_nb_num(); i++) {
struct pci_dev *dev = node_to_amd_nb(i)->misc;
/*
* Don't enable translations just yet. That is the next
* step. Restore the pre-suspend aperture settings.
*/
gart_set_size_and_enable(dev, aperture_order);
pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE, aperture_alloc >> 25);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rafael J. Wysocki | 48 | 61.54% | 2 | 28.57% |
Hans Rosenfeld | 11 | 14.10% | 1 | 14.29% |
Ingo Molnar | 9 | 11.54% | 1 | 14.29% |
Andreas Herrmann | 5 | 6.41% | 1 | 14.29% |
Pavel Machek | 4 | 5.13% | 1 | 14.29% |
Borislav Petkov | 1 | 1.28% | 1 | 14.29% |
Total | 78 | 100.00% | 7 | 100.00% |
static void gart_resume(void)
{
pr_info("PCI-DMA: Resuming GART IOMMU\n");
gart_fixup_northbridges();
enable_gart_translations();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ingo Molnar | 12 | 63.16% | 1 | 25.00% |
Rafael J. Wysocki | 6 | 31.58% | 2 | 50.00% |
Pavel Machek | 1 | 5.26% | 1 | 25.00% |
Total | 19 | 100.00% | 4 | 100.00% |
static struct syscore_ops gart_syscore_ops = {
.resume = gart_resume,
};
/*
* Private Northbridge GATT initialization in case we cannot use the
* AGP driver for some reason.
*/
static __init int init_amd_gatt(struct agp_kern_info *info)
{
unsigned aper_size, gatt_size, new_aper_size;
unsigned aper_base, new_aper_base;
struct pci_dev *dev;
void *gatt;
int i;
pr_info("PCI-DMA: Disabling AGP.\n");
aper_size = aper_base = info->aper_size = 0;
dev = NULL;
for (i = 0; i < amd_nb_num(); i++) {
dev = node_to_amd_nb(i)->misc;
new_aper_base = read_aperture(dev, &new_aper_size);
if (!new_aper_base)
goto nommu;
if (!aper_base) {
aper_size = new_aper_size;
aper_base = new_aper_base;
}
if (aper_size != new_aper_size || aper_base != new_aper_base)
goto nommu;
}
if (!aper_base)
goto nommu;
info->aper_base = aper_base;
info->aper_size = aper_size >> 20;
gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
gatt = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
get_order(gatt_size));
if (!gatt)
panic("Cannot allocate GATT table");
if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
panic("Could not set GART PTEs to uncacheable pages");
agp_gatt_table = gatt;
register_syscore_ops(&gart_syscore_ops);
flush_gart();
pr_info("PCI-DMA: aperture base @ %x size %u KB\n",
aper_base, aper_size>>10);
return 0;
nommu:
/* Should not happen anymore */
pr_warning("PCI-DMA: More than 4GB of RAM and no IOMMU\n"
"falling back to iommu=soft.\n");
return -1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andi Kleen | 196 | 78.71% | 5 | 31.25% |
Joachim Deguara | 19 | 7.63% | 1 | 6.25% |
Ingo Molnar | 15 | 6.02% | 2 | 12.50% |
Hans Rosenfeld | 8 | 3.21% | 2 | 12.50% |
Pavel Machek | 5 | 2.01% | 2 | 12.50% |
Rafael J. Wysocki | 2 | 0.80% | 1 | 6.25% |
Joerg Roedel | 2 | 0.80% | 1 | 6.25% |
Andrew Morton | 1 | 0.40% | 1 | 6.25% |
Arjan van de Ven | 1 | 0.40% | 1 | 6.25% |
Total | 249 | 100.00% | 16 | 100.00% |
static const struct dma_map_ops gart_dma_ops = {
.map_sg = gart_map_sg,
.unmap_sg = gart_unmap_sg,
.map_page = gart_map_page,
.unmap_page = gart_unmap_page,
.alloc = gart_alloc_coherent,
.free = gart_free_coherent,
.mapping_error = gart_mapping_error,
.dma_supported = x86_dma_supported,
};
static void gart_iommu_shutdown(void)
{
struct pci_dev *dev;
int i;
/* don't shutdown it if there is AGP installed */
if (!no_agp)
return;
if (!amd_nb_has_feature(AMD_NB_GART))
return;
for (i = 0; i < amd_nb_num(); i++) {
u32 ctl;
dev = node_to_amd_nb(i)->misc;
pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
ctl &= ~GARTEN;
pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yinghai Lu | 64 | 76.19% | 2 | 33.33% |
Hans Rosenfeld | 11 | 13.10% | 1 | 16.67% |
Andreas Herrmann | 5 | 5.95% | 1 | 16.67% |
Pavel Machek | 3 | 3.57% | 1 | 16.67% |
FUJITA Tomonori | 1 | 1.19% | 1 | 16.67% |
Total | 84 | 100.00% | 6 | 100.00% |
int __init gart_iommu_init(void)
{
struct agp_kern_info info;
unsigned long iommu_start;
unsigned long aper_base, aper_size;
unsigned long start_pfn, end_pfn;
unsigned long scratch;
long i;
if (!amd_nb_has_feature(AMD_NB_GART))
return 0;
#ifndef CONFIG_AGP_AMD64
no_agp = 1;
#else
/* Makefile puts PCI initialization via subsys_initcall first. */
/* Add other AMD AGP bridge drivers here */
no_agp = no_agp ||
(agp_amd64_init() < 0) ||
(agp_copy_info(agp_bridge, &info) < 0);
#endif
if (no_iommu ||
(!force_iommu && max_pfn <= MAX_DMA32_PFN) ||
!gart_iommu_aperture ||
(no_agp && init_amd_gatt(&info) < 0)) {
if (max_pfn > MAX_DMA32_PFN) {
pr_warning("More than 4GB of memory but GART IOMMU not available.\n");
pr_warning("falling back to iommu=soft.\n");
}
return 0;
}
/* need to map that range */
aper_size = info.aper_size << 20;
aper_base = info.aper_base;
end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
start_pfn = PFN_DOWN(aper_base);
if (!pfn_range_is_mapped(start_pfn, end_pfn))
init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
pr_info("PCI-DMA: using GART IOMMU.\n");
iommu_size = check_iommu_size(info.aper_base, aper_size);
iommu_pages = iommu_size >> PAGE_SHIFT;
iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
get_order(iommu_pages/8));
if (!iommu_gart_bitmap)
panic("Cannot allocate iommu bitmap\n");
#ifdef CONFIG_IOMMU_LEAK
if (leak_trace) {
int ret;
ret = dma_debug_resize_entries(iommu_pages);
if (ret)
pr_debug("PCI-DMA: Cannot trace all the entries\n");
}
#endif
/*
* Out of IOMMU space handling.
* Reserve some invalid pages at the beginning of the GART.
*/
bitmap_set(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
pr_info("PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
iommu_size >> 20);
agp_memory_reserved = iommu_size;
iommu_start = aper_size - iommu_size;
iommu_bus_base = info.aper_base + iommu_start;
bad_dma_addr = iommu_bus_base;
iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
/*
* Unmap the IOMMU part of the GART. The alias of the page is
* always mapped with cache enabled and there is no full cache
* coherency across the GART remapping. The unmapping avoids
* automatic prefetches from the CPU allocating cache lines in
* there. All CPU accesses are done via the direct mapping to
* the backing memory. The GART address is only used by PCI
* devices.
*/
set_memory_np((unsigned long)__va(iommu_bus_base),
iommu_size >> PAGE_SHIFT);
/*
* Tricky. The GART table remaps the physical memory range,
* so the CPU wont notice potential aliases and if the memory
* is remapped to UC later on, we might surprise the PCI devices
* with a stray writeout of a cacheline. So play it sure and
* do an explicit, full-scale wbinvd() _after_ having marked all
* the pages as Not-Present:
*/
wbinvd();
/*
* Now all caches are flushed and we can safely enable
* GART hardware. Doing it early leaves the possibility
* of stale cache entries that can lead to GART PTE
* errors.
*/
enable_gart_translations();
/*
* Try to workaround a bug (thanks to BenH):
* Set unmapped entries to a scratch page instead of 0.
* Any prefetches that hit unmapped entries won't get an bus abort
* then. (P2P bridge may be prefetching on DMA reads).
*/
scratch = get_zeroed_page(GFP_KERNEL);
if (!scratch)
panic("Cannot allocate iommu scratch page");
gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
iommu_gatt_base[i] = gart_unmapped_entry;
flush_gart();
dma_ops = &gart_dma_ops;
x86_platform.iommu_shutdown = gart_iommu_shutdown;
swiotlb = 0;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andi Kleen | 264 | 63.31% | 12 | 30.00% |
Yinghai Lu | 65 | 15.59% | 3 | 7.50% |
FUJITA Tomonori | 28 | 6.71% | 5 | 12.50% |
Ingo Molnar | 18 | 4.32% | 3 | 7.50% |
Jon Mason | 11 | 2.64% | 2 | 5.00% |
Joerg Roedel | 6 | 1.44% | 3 | 7.50% |
Hans Rosenfeld | 6 | 1.44% | 2 | 5.00% |
Muli Ben-Yehuda | 6 | 1.44% | 1 | 2.50% |
Langsdorf, Mark | 4 | 0.96% | 1 | 2.50% |
Dave Jones | 3 | 0.72% | 2 | 5.00% |
Pavel Machek | 2 | 0.48% | 2 | 5.00% |
Andrew Morton | 2 | 0.48% | 2 | 5.00% |
Andreas Herrmann | 1 | 0.24% | 1 | 2.50% |
Akinobu Mita | 1 | 0.24% | 1 | 2.50% |
Total | 417 | 100.00% | 40 | 100.00% |
void __init gart_parse_options(char *p)
{
int arg;
#ifdef CONFIG_IOMMU_LEAK
if (!strncmp(p, "leak", 4)) {
leak_trace = 1;
p += 4;
if (*p == '=')
++p;
if (isdigit(*p) && get_option(&p, &arg))
iommu_leak_pages = arg;
}
#endif
if (isdigit(*p) && get_option(&p, &arg))
iommu_size = arg;
if (!strncmp(p, "fullflush", 9))
iommu_fullflush = 1;
if (!strncmp(p, "nofullflush", 11))
iommu_fullflush = 0;
if (!strncmp(p, "noagp", 5))
no_agp = 1;
if (!strncmp(p, "noaperture", 10))
fix_aperture = 0;
/* duplicated from pci-dma.c */
if (!strncmp(p, "force", 5))
gart_iommu_aperture_allowed = 1;
if (!strncmp(p, "allowed", 7))
gart_iommu_aperture_allowed = 1;
if (!strncmp(p, "memaper", 7)) {
fallback_aper_force = 1;
p += 7;
if (*p == '=') {
++p;
if (get_option(&p, &arg))
fallback_aper_order = arg;
}
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andi Kleen | 163 | 68.49% | 6 | 54.55% |
Muli Ben-Yehuda | 59 | 24.79% | 1 | 9.09% |
Linus Torvalds | 12 | 5.04% | 1 | 9.09% |
Joerg Roedel | 2 | 0.84% | 1 | 9.09% |
Sam Ravnborg | 1 | 0.42% | 1 | 9.09% |
Joe Perches | 1 | 0.42% | 1 | 9.09% |
Total | 238 | 100.00% | 11 | 100.00% |
IOMMU_INIT_POST(gart_iommu_hole_init);
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andi Kleen | 1950 | 53.19% | 21 | 18.10% |
FUJITA Tomonori | 380 | 10.37% | 19 | 16.38% |
Muli Ben-Yehuda | 237 | 6.46% | 1 | 0.86% |
Joerg Roedel | 185 | 5.05% | 13 | 11.21% |
Rafael J. Wysocki | 137 | 3.74% | 2 | 1.72% |
Jens Axboe | 136 | 3.71% | 2 | 1.72% |
Yinghai Lu | 134 | 3.66% | 7 | 6.03% |
Jon Mason | 113 | 3.08% | 4 | 3.45% |
Ingo Molnar | 98 | 2.67% | 5 | 4.31% |
Andrew Morton | 64 | 1.75% | 4 | 3.45% |
Hans Rosenfeld | 49 | 1.34% | 2 | 1.72% |
Pavel Machek | 29 | 0.79% | 4 | 3.45% |
Joachim Deguara | 19 | 0.52% | 1 | 0.86% |
Andreas Herrmann | 17 | 0.46% | 2 | 1.72% |
Kevin VanMaren | 16 | 0.44% | 1 | 0.86% |
Krzysztof Kozlowski | 16 | 0.44% | 1 | 0.86% |
Linus Torvalds | 15 | 0.41% | 1 | 0.86% |
Akinobu Mita | 11 | 0.30% | 2 | 1.72% |
Christoph Hellwig | 8 | 0.22% | 2 | 1.72% |
Konrad Rzeszutek Wilk | 8 | 0.22% | 1 | 0.86% |
Andrzej Pietrasiewicz | 8 | 0.22% | 1 | 0.86% |
Langsdorf, Mark | 4 | 0.11% | 1 | 0.86% |
Greg Kroah-Hartman | 4 | 0.11% | 1 | 0.86% |
Thomas Gleixner | 4 | 0.11% | 1 | 0.86% |
Tejun Heo | 3 | 0.08% | 1 | 0.86% |
Alexey Dobriyan | 3 | 0.08% | 1 | 0.86% |
Dave Jones | 3 | 0.08% | 2 | 1.72% |
Andrew Hastings | 2 | 0.05% | 1 | 0.86% |
Prarit Bhargava | 2 | 0.05% | 1 | 0.86% |
Laura Abbott | 1 | 0.03% | 1 | 0.86% |
Mike Waychison | 1 | 0.03% | 1 | 0.86% |
Arun Sharma | 1 | 0.03% | 1 | 0.86% |
Jaswinder Singh Rajput | 1 | 0.03% | 1 | 0.86% |
Joe Perches | 1 | 0.03% | 1 | 0.86% |
Bart Van Assche | 1 | 0.03% | 1 | 0.86% |
Paul Bolle | 1 | 0.03% | 1 | 0.86% |
Borislav Petkov | 1 | 0.03% | 1 | 0.86% |
Sam Ravnborg | 1 | 0.03% | 1 | 0.86% |
Arjan van de Ven | 1 | 0.03% | 1 | 0.86% |
Adrian Bunk | 1 | 0.03% | 1 | 0.86% |
Total | 3666 | 100.00% | 116 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.