Release 4.9 lib/swiotlb.c
/*
* Dynamic DMA mapping support.
*
* This implementation is a fallback for platforms that do not support
* I/O TLBs (aka DMA address translation hardware).
* Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
* Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
* Copyright (C) 2000, 2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* 03/05/07 davidm Switch from PCI-DMA to generic device DMA API.
* 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid
* unnecessary i-cache flushing.
* 04/07/.. ak Better overflow handling. Assorted fixes.
* 05/09/10 linville Add support for syncing ranges, support syncing for
* DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
* 08/12/11 beckyb Add highmem support
*/
#include <linux/cache.h>
#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/swiotlb.h>
#include <linux/pfn.h>
#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <linux/scatterlist.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/iommu-helper.h>
#define CREATE_TRACE_POINTS
#include <trace/events/swiotlb.h>
#define OFFSET(val,align) ((unsigned long) \
( (val) & ( (align) - 1)))
#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
/*
* Minimum IO TLB size to bother booting with. Systems with mainly
* 64bit capable cards will only lightly use the swiotlb. If we can't
* allocate a contiguous 1MB, we're probably in trouble anyway.
*/
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
int swiotlb_force;
/*
* Used to do a quick range check in swiotlb_tbl_unmap_single and
* swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
* API.
*/
static phys_addr_t io_tlb_start, io_tlb_end;
/*
* The number of IO TLB blocks (in groups of 64) between io_tlb_start and
* io_tlb_end. This is command line adjustable via setup_io_tlb_npages.
*/
static unsigned long io_tlb_nslabs;
/*
* When the IOMMU overflows we return a fallback buffer. This sets the size.
*/
static unsigned long io_tlb_overflow = 32*1024;
static phys_addr_t io_tlb_overflow_buffer;
/*
* This is a free list describing the number of free entries available from
* each index
*/
static unsigned int *io_tlb_list;
static unsigned int io_tlb_index;
/*
* We need to save away the original address corresponding to a mapped entry
* for the sync operations.
*/
#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
static phys_addr_t *io_tlb_orig_addr;
/*
* Protect the above data structures in the map and unmap calls
*/
static DEFINE_SPINLOCK(io_tlb_lock);
static int late_alloc;
static int __init
setup_io_tlb_npages(char *str)
{
if (isdigit(*str)) {
io_tlb_nslabs = simple_strtoul(str, &str, 0);
/* avoid tail segment of size < IO_TLB_SEGSIZE */
io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
}
if (*str == ',')
++str;
if (!strcmp(str, "force"))
swiotlb_force = 1;
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
andi kleen | andi kleen | 36 | 51.43% | 1 | 25.00% |
linus torvalds | linus torvalds | 23 | 32.86% | 1 | 25.00% |
david mosberger | david mosberger | 10 | 14.29% | 1 | 25.00% |
yinghai lu | yinghai lu | 1 | 1.43% | 1 | 25.00% |
| Total | 70 | 100.00% | 4 | 100.00% |
early_param("swiotlb", setup_io_tlb_npages);
/* make io_tlb_overflow tunable too? */
unsigned long swiotlb_nr_tbl(void)
{
return io_tlb_nslabs;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
fujita tomonori | fujita tomonori | 10 | 90.91% | 1 | 50.00% |
konrad rzeszutek wilk | konrad rzeszutek wilk | 1 | 9.09% | 1 | 50.00% |
| Total | 11 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
/* default to 64MB */
#define IO_TLB_DEFAULT_SIZE (64UL<<20)
unsigned long swiotlb_size_or_default(void)
{
unsigned long size;
size = io_tlb_nslabs << IO_TLB_SHIFT;
return size ? size : (IO_TLB_DEFAULT_SIZE);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
yinghai lu | yinghai lu | 27 | 100.00% | 1 | 100.00% |
| Total | 27 | 100.00% | 1 | 100.00% |
/* Note that this doesn't work with highmem page */
static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
volatile void *address)
{
return phys_to_dma(hwdev, virt_to_phys(address));
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
ian campbell | ian campbell | 19 | 70.37% | 1 | 33.33% |
jeremy fitzhardinge | jeremy fitzhardinge | 7 | 25.93% | 1 | 33.33% |
fujita tomonori | fujita tomonori | 1 | 3.70% | 1 | 33.33% |
| Total | 27 | 100.00% | 3 | 100.00% |
static bool no_iotlb_memory;
void swiotlb_print_info(void)
{
unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
unsigned char *vstart, *vend;
if (no_iotlb_memory) {
pr_warn("software IO TLB: No low mem\n");
return;
}
vstart = phys_to_virt(io_tlb_start);
vend = phys_to_virt(io_tlb_end);
printk(KERN_INFO "software IO TLB [mem %#010llx-%#010llx] (%luMB) mapped at [%p-%p]\n",
(unsigned long long)io_tlb_start,
(unsigned long long)io_tlb_end,
bytes >> 20, vstart, vend - 1);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
ian campbell | ian campbell | 35 | 44.30% | 1 | 16.67% |
alexander duyck | alexander duyck | 15 | 18.99% | 2 | 33.33% |
yinghai lu | yinghai lu | 12 | 15.19% | 1 | 16.67% |
bjorn helgaas | bjorn helgaas | 9 | 11.39% | 1 | 16.67% |
fujita tomonori | fujita tomonori | 8 | 10.13% | 1 | 16.67% |
| Total | 79 | 100.00% | 6 | 100.00% |
int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
{
void *v_overflow_buffer;
unsigned long i, bytes;
bytes = nslabs << IO_TLB_SHIFT;
io_tlb_nslabs = nslabs;
io_tlb_start = __pa(tlb);
io_tlb_end = io_tlb_start + bytes;
/*
* Get the overflow emergency buffer
*/
v_overflow_buffer = memblock_virt_alloc_low_nopanic(
PAGE_ALIGN(io_tlb_overflow),
PAGE_SIZE);
if (!v_overflow_buffer)
return -ENOMEM;
io_tlb_overflow_buffer = __pa(v_overflow_buffer);
/*
* Allocate and initialize the free list array. This array is used
* to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
* between io_tlb_start and io_tlb_end.
*/
io_tlb_list = memblock_virt_alloc(
PAGE_ALIGN(io_tlb_nslabs * sizeof(int)),
PAGE_SIZE);
io_tlb_orig_addr = memblock_virt_alloc(
PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)),
PAGE_SIZE);
for (i = 0; i < io_tlb_nslabs; i++) {
io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
}
io_tlb_index = 0;
if (verbose)
swiotlb_print_info();
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
linus torvalds | linus torvalds | 50 | 30.49% | 1 | 6.67% |
jan beulich | jan beulich | 35 | 21.34% | 2 | 13.33% |
alexander duyck | alexander duyck | 29 | 17.68% | 2 | 13.33% |
fujita tomonori | fujita tomonori | 21 | 12.80% | 2 | 13.33% |
yinghai lu | yinghai lu | 12 | 7.32% | 3 | 20.00% |
david mosberger | david mosberger | 9 | 5.49% | 2 | 13.33% |
santosh shilimkar | santosh shilimkar | 5 | 3.05% | 1 | 6.67% |
andi kleen | andi kleen | 2 | 1.22% | 1 | 6.67% |
ian campbell | ian campbell | 1 | 0.61% | 1 | 6.67% |
| Total | 164 | 100.00% | 15 | 100.00% |
/*
* Statically reserve bounce buffer space and initialize bounce buffer data
* structures for the software IO TLB used to implement the DMA API.
*/
void __init
swiotlb_init(int verbose)
{
size_t default_size = IO_TLB_DEFAULT_SIZE;
unsigned char *vstart;
unsigned long bytes;
if (!io_tlb_nslabs) {
io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
}
bytes = io_tlb_nslabs << IO_TLB_SHIFT;
/* Get IO TLB memory from the low pages */
vstart = memblock_virt_alloc_low_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE);
if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
return;
if (io_tlb_start)
memblock_free_early(io_tlb_start,
PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
pr_warn("Cannot allocate SWIOTLB buffer");
no_iotlb_memory = true;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
fujita tomonori | fujita tomonori | 56 | 52.83% | 2 | 22.22% |
yinghai lu | yinghai lu | 35 | 33.02% | 4 | 44.44% |
alexander duyck | alexander duyck | 8 | 7.55% | 1 | 11.11% |
david mosberger | david mosberger | 4 | 3.77% | 1 | 11.11% |
santosh shilimkar | santosh shilimkar | 3 | 2.83% | 1 | 11.11% |
| Total | 106 | 100.00% | 9 | 100.00% |
/*
* Systems with larger DMA zones (those that don't support ISA) can
* initialize the swiotlb later using the slab allocator if needed.
* This should be just like above, but with some error catching.
*/
int
swiotlb_late_init_with_default_size(size_t default_size)
{
unsigned long bytes, req_nslabs = io_tlb_nslabs;
unsigned char *vstart = NULL;
unsigned int order;
int rc = 0;
if (!io_tlb_nslabs) {
io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
}
/*
* Get IO TLB memory from the low pages
*/
order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
io_tlb_nslabs = SLABS_PER_PAGE << order;
bytes = io_tlb_nslabs << IO_TLB_SHIFT;
while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
order);
if (vstart)
break;
order--;
}
if (!vstart) {
io_tlb_nslabs = req_nslabs;
return -ENOMEM;
}
if (order != get_order(bytes)) {
printk(KERN_WARNING "Warning: only able to allocate %ld MB "
"for software IO TLB\n", (PAGE_SIZE << order) >> 20);
io_tlb_nslabs = SLABS_PER_PAGE << order;
}
rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs);
if (rc)
free_pages((unsigned long)vstart, order);
return rc;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
alex williamson | alex williamson | 114 | 60.96% | 1 | 16.67% |
konrad rzeszutek wilk | konrad rzeszutek wilk | 40 | 21.39% | 1 | 16.67% |
alexander duyck | alexander duyck | 12 | 6.42% | 1 | 16.67% |
jan beulich | jan beulich | 11 | 5.88% | 1 | 16.67% |
fujita tomonori | fujita tomonori | 9 | 4.81% | 1 | 16.67% |
jeremy fitzhardinge | jeremy fitzhardinge | 1 | 0.53% | 1 | 16.67% |
| Total | 187 | 100.00% | 6 | 100.00% |
int
swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
{
unsigned long i, bytes;
unsigned char *v_overflow_buffer;
bytes = nslabs << IO_TLB_SHIFT;
io_tlb_nslabs = nslabs;
io_tlb_start = virt_to_phys(tlb);
io_tlb_end = io_tlb_start + bytes;
memset(tlb, 0, bytes);
/*
* Get the overflow emergency buffer
*/
v_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
get_order(io_tlb_overflow));
if (!v_overflow_buffer)
goto cleanup2;
io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer);
/*
* Allocate and initialize the free list array. This array is used
* to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
* between io_tlb_start and io_tlb_end.
*/
io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
get_order(io_tlb_nslabs * sizeof(int)));
if (!io_tlb_list)
goto cleanup3;
io_tlb_orig_addr = (phys_addr_t *)
__get_free_pages(GFP_KERNEL,
get_order(io_tlb_nslabs *
sizeof(phys_addr_t)));
if (!io_tlb_orig_addr)
goto cleanup4;
for (i = 0; i < io_tlb_nslabs; i++) {
io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
}
io_tlb_index = 0;
swiotlb_print_info();
late_alloc = 1;
return 0;
cleanup4:
free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
sizeof(int)));
io_tlb_list = NULL;
cleanup3:
free_pages((unsigned long)v_overflow_buffer,
get_order(io_tlb_overflow));
io_tlb_overflow_buffer = 0;
cleanup2:
io_tlb_end = 0;
io_tlb_start = 0;
io_tlb_nslabs = 0;
return -ENOMEM;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
alex williamson | alex williamson | 129 | 49.43% | 1 | 9.09% |
alexander duyck | alexander duyck | 52 | 19.92% | 3 | 27.27% |
jan beulich | jan beulich | 42 | 16.09% | 2 | 18.18% |
konrad rzeszutek wilk | konrad rzeszutek wilk | 30 | 11.49% | 1 | 9.09% |
fujita tomonori | fujita tomonori | 5 | 1.92% | 2 | 18.18% |
becky bruce | becky bruce | 2 | 0.77% | 1 | 9.09% |
ian campbell | ian campbell | 1 | 0.38% | 1 | 9.09% |
| Total | 261 | 100.00% | 11 | 100.00% |
void __init swiotlb_free(void)
{
if (!io_tlb_orig_addr)
return;
if (late_alloc) {
free_pages((unsigned long)phys_to_virt(io_tlb_overflow_buffer),
get_order(io_tlb_overflow));
free_pages((unsigned long)io_tlb_orig_addr,
get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
sizeof(int)));
free_pages((unsigned long)phys_to_virt(io_tlb_start),
get_order(io_tlb_nslabs << IO_TLB_SHIFT));
} else {
memblock_free_late(io_tlb_overflow_buffer,
PAGE_ALIGN(io_tlb_overflow));
memblock_free_late(__pa(io_tlb_orig_addr),
PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
memblock_free_late(__pa(io_tlb_list),
PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
memblock_free_late(io_tlb_start,
PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
}
io_tlb_nslabs = 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
fujita tomonori | fujita tomonori | 132 | 83.02% | 1 | 16.67% |
yinghai lu | yinghai lu | 12 | 7.55% | 1 | 16.67% |
alexander duyck | alexander duyck | 7 | 4.40% | 2 | 33.33% |
konrad rzeszutek wilk | konrad rzeszutek wilk | 4 | 2.52% | 1 | 16.67% |
santosh shilimkar | santosh shilimkar | 4 | 2.52% | 1 | 16.67% |
| Total | 159 | 100.00% | 6 | 100.00% |
int is_swiotlb_buffer(phys_addr_t paddr)
{
return paddr >= io_tlb_start && paddr < io_tlb_end;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
fujita tomonori | fujita tomonori | 17 | 100.00% | 2 | 100.00% |
| Total | 17 | 100.00% | 2 | 100.00% |
/*
* Bounce: copy the swiotlb buffer back to the original dma location
*/
static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
size_t size, enum dma_data_direction dir)
{
unsigned long pfn = PFN_DOWN(orig_addr);
unsigned char *vaddr = phys_to_virt(tlb_addr);
if (PageHighMem(pfn_to_page(pfn))) {
/* The buffer does not have a mapping. Map it in and copy */
unsigned int offset = orig_addr & ~PAGE_MASK;
char *buffer;
unsigned int sz = 0;
unsigned long flags;
while (size) {
sz = min_t(size_t, PAGE_SIZE - offset, size);
local_irq_save(flags);
buffer = kmap_atomic(pfn_to_page(pfn));
if (dir == DMA_TO_DEVICE)
memcpy(vaddr, buffer + offset, sz);
else
memcpy(buffer + offset, vaddr, sz);
kunmap_atomic(buffer);
local_irq_restore(flags);
size -= sz;
pfn++;
vaddr += sz;
offset = 0;
}
} else if (dir == DMA_TO_DEVICE) {
memcpy(vaddr, phys_to_virt(orig_addr), size);
} else {
memcpy(phys_to_virt(orig_addr), vaddr, size);
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jeremy fitzhardinge | jeremy fitzhardinge | 103 | 52.02% | 2 | 40.00% |
becky bruce | becky bruce | 68 | 34.34% | 2 | 40.00% |
alexander duyck | alexander duyck | 27 | 13.64% | 1 | 20.00% |
| Total | 198 | 100.00% | 5 | 100.00% |
phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
dma_addr_t tbl_dma_addr,
phys_addr_t orig_addr, size_t size,
enum dma_data_direction dir)
{
unsigned long flags;
phys_addr_t tlb_addr;
unsigned int nslots, stride, index, wrap;
int i;
unsigned long mask;
unsigned long offset_slots;
unsigned long max_slots;
if (no_iotlb_memory)
panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
mask = dma_get_seg_boundary(hwdev);
tbl_dma_addr &= mask;
offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
/*
* Carefully handle integer overflow which can occur when mask == ~0UL.
*/
max_slots = mask + 1
? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
: 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
/*
* For mappings greater than a page, we limit the stride (and
* hence alignment) to a page size.
*/
nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
if (size > PAGE_SIZE)
stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
else
stride = 1;
BUG_ON(!nslots);
/*
* Find suitable number of IO TLB entries size that will fit this
* request and allocate a buffer from that IO TLB pool.
*/
spin_lock_irqsave(&io_tlb_lock, flags);
index = ALIGN(io_tlb_index, stride);
if (index >= io_tlb_nslabs)
index = 0;
wrap = index;
do {
while (iommu_is_span_boundary(index, nslots, offset_slots,
max_slots)) {
index += stride;
if (index >= io_tlb_nslabs)
index = 0;
if (index == wrap)
goto not_found;
}
/*
* If we find a slot that indicates we have 'nslots' number of
* contiguous buffers, we allocate the buffers from that slot
* and mark the entries as '0' indicating unavailable.
*/
if (io_tlb_list[index] >= nslots) {
int count = 0;
for (i = index; i < (int) (index + nslots); i++)
io_tlb_list[i] = 0;
for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
io_tlb_list[i] = ++count;
tlb_addr = io_tlb_start + (index << IO_TLB_SHIFT);
/*
* Update the indices to avoid searching in the next
* round.
*/
io_tlb_index = ((index + nslots) < io_tlb_nslabs
? (index + nslots) : 0);
goto found;
}
index += stride;
if (index >= io_tlb_nslabs)
index = 0;
} while (index != wrap);
not_found:
spin_unlock_irqrestore(&io_tlb_lock, flags);
if (printk_ratelimit())
dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size);
return SWIOTLB_MAP_ERROR;
found:
spin_unlock_irqrestore(&io_tlb_lock, flags);
/*
* Save away the mapping from the original address to the DMA address.
* This is needed when we sync the memory. Then we sync the buffer if
* needed.
*/
for (i = 0; i < nslots; i++)
io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE);
return tlb_addr;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
linus torvalds | linus torvalds | 246 | 53.71% | 1 | 4.35% |
fujita tomonori | fujita tomonori | 83 | 18.12% | 3 | 13.04% |
jan beulich | jan beulich | 30 | 6.55% | 2 | 8.70% |
david mosberger | david mosberger | 21 | 4.59% | 2 | 8.70% |
keir fraser | keir fraser | 17 | 3.71% | 1 | 4.35% |
konrad rzeszutek wilk | konrad rzeszutek wilk | 10 | 2.18% | 2 | 8.70% |
alexander duyck | alexander duyck | 10 | 2.18% | 1 | 4.35% |
yinghai lu | yinghai lu | 9 | 1.97% | 1 | 4.35% |
andi kleen | andi kleen | 6 | 1.31% | 1 | 4.35% |
stefano stabellini | stefano stabellini | 6 | 1.31% | 1 | 4.35% |
becky bruce | becky bruce | 6 | 1.31% | 2 | 8.70% |
jesse barnes | jesse barnes | 4 | 0.87% | 1 | 4.35% |
eric sesterhenn | eric sesterhenn | 3 | 0.66% | 1 | 4.35% |
jeremy fitzhardinge | jeremy fitzhardinge | 3 | 0.66% | 1 | 4.35% |
andrew morton | andrew morton | 2 | 0.44% | 1 | 4.35% |
ian campbell | ian campbell | 1 | 0.22% | 1 | 4.35% |
tony luck | tony luck | 1 | 0.22% | 1 | 4.35% |
| Total | 458 | 100.00% | 23 | 100.00% |
EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
/*
* Allocates bounce buffer and returns its kernel virtual address.
*/
static phys_addr_t
map_single(struct device *hwdev, phys_addr_t phys, size_t size,
enum dma_data_direction dir)
{
dma_addr_t start_dma_addr = phys_to_dma(hwdev, io_tlb_start);
return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
fujita tomonori | fujita tomonori | 40 | 88.89% | 1 | 20.00% |
konrad rzeszutek wilk | konrad rzeszutek wilk | 2 | 4.44% | 1 | 20.00% |
alexander duyck | alexander duyck | 2 | 4.44% | 2 | 40.00% |
alexandre courbot | alexandre courbot | 1 | 2.22% | 1 | 20.00% |
| Total | 45 | 100.00% | 5 | 100.00% |
/*
* dma_addr is the kernel virtual address of the bounce buffer to unmap.
*/
void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
size_t size, enum dma_data_direction dir)
{
unsigned long flags;
int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
phys_addr_t orig_addr = io_tlb_orig_addr[index];
/*
* First, sync the memory before unmapping the entry
*/
if (orig_addr != INVALID_PHYS_ADDR &&
((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE);
/*
* Return the buffer to the free list by setting the corresponding
* entries to indicate the number of contiguous entries available.
* While returning the entries to the free list, we merge the entries
* with slots below and above the pool being returned.
*/
spin_lock_irqsave(&io_tlb_lock, flags);
{
count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
io_tlb_list[index + nslots] : 0);
/*
* Step 1: return the slots to the free list, merging the
* slots with superceeding slots
*/
for (i = index + nslots - 1; i >= index; i--) {
io_tlb_list[i] = ++count;
io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
}
/*
* Step 2: merge the returned slots with the preceding slots,
* if available (non zero)
*/
for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
io_tlb_list[i] = ++count;
}
spin_unlock_irqrestore(&io_tlb_lock, flags);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
linus torvalds | linus torvalds | 150 | 70.42% | 1 | 7.69% |
david mosberger | david mosberger | 23 | 10.80% | 2 | 15.38% |
jan beulich | jan beulich | 11 | 5.16% | 1 | 7.69% |
becky bruce | becky bruce | 9 | 4.23% | 2 | 15.38% |
alexander duyck | alexander duyck | 8 | 3.76% | 2 | 15.38% |
jesse barnes | jesse barnes | 6 | 2.82% | 1 | 7.69% |
konrad rzeszutek wilk | konrad rzeszutek wilk | 3 | 1.41% | 2 | 15.38% |
jeremy fitzhardinge | jeremy fitzhardinge | 2 | 0.94% | 1 | 7.69% |
andre goddard rosa | andre goddard rosa | 1 | 0.47% | 1 | 7.69% |
| Total | 213 | 100.00% | 13 | 100.00% |
EXPORT_SYMBOL_GPL(swiotlb_tbl_unmap_single);
void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
size_t size, enum dma_data_direction dir,
enum dma_sync_target target)
{
int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
phys_addr_t orig_addr = io_tlb_orig_addr[index];
if (orig_addr == INVALID_PHYS_ADDR)
return;
orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1);
switch (target) {
case SYNC_FOR_CPU:
if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
swiotlb_bounce(orig_addr, tlb_addr,
size, DMA_FROM_DEVICE);
else
BUG_ON(dir != DMA_TO_DEVICE);
break;
case SYNC_FOR_DEVICE:
if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
swiotlb_bounce(orig_addr, tlb_addr,
size, DMA_TO_DEVICE);
else
BUG_ON(dir != DMA_FROM_DEVICE);
break;
default:
BUG();
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
john w. linville | john w. linville | 41 | 27.33% | 1 | 6.67% |
becky bruce | becky bruce | 33 | 22.00% | 2 | 13.33% |
linus torvalds | linus torvalds | 31 | 20.67% | 1 | 6.67% |
alexander duyck | alexander duyck | 11 | 7.33% | 1 | 6.67% |
jan beulich | jan beulich | 8 | 5.33% | 2 | 13.33% |
eric sesterhenn | eric sesterhenn | 8 | 5.33% | 1 | 6.67% |
jeremy fitzhardinge | jeremy fitzhardinge | 6 | 4.00% | 2 | 13.33% |
david mosberger | david mosberger | 6 | 4.00% | 1 | 6.67% |
konrad rzeszutek wilk | konrad rzeszutek wilk | 5 | 3.33% | 3 | 20.00% |
tony luck | tony luck | 1 | 0.67% | 1 | 6.67% |
| Total | 150 | 100.00% | 15 | 100.00% |
EXPORT_SYMBOL_GPL(swiotlb_tbl_sync_single);
void *
swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags)
{
dma_addr_t dev_addr;
void *ret;
int order = get_order(size);
u64 dma_mask = DMA_BIT_MASK(32);
if (hwdev && hwdev->coherent_dma_mask)
dma_mask = hwdev->coherent_dma_mask;
ret = (void *)__get_free_pages(flags, order);
if (ret) {
dev_addr = swiotlb_virt_to_bus(hwdev, ret);
if (dev_addr + size - 1 > dma_mask) {
/*
* The allocated memory isn't reachable by the device.
*/
free_pages((unsigned long) ret, order);
ret = NULL;
}
}
if (!ret) {
/*
* We are either out of memory or the device can't DMA to
* GFP_DMA memory; fall back on map_single(), which
* will grab memory from the lowest available address range.
*/
phys_addr_t paddr = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
if (paddr == SWIOTLB_MAP_ERROR)
goto err_warn;
ret = phys_to_virt(paddr);
dev_addr = phys_to_dma(hwdev, paddr);
/* Confirm address can be DMA'd by device */
if (dev_addr + size - 1 > dma_mask) {
printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
(unsigned long long)dma_mask,
(unsigned long long)dev_addr);
/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
swiotlb_tbl_unmap_single(hwdev, paddr,
size, DMA_TO_DEVICE);
goto err_warn;
}
}
*dma_handle = dev_addr;
memset(ret, 0, size);
return ret;
err_warn:
pr_warn("swiotlb: coherent allocation failed for device %s size=%zu\n",
dev_name(hwdev), size);
dump_stack();
return NULL;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
linus torvalds | linus torvalds | 54 | 22.04% | 1 | 4.35% |
david mosberger | david mosberger | 42 | 17.14% | 2 | 8.70% |
fujita tomonori | fujita tomonori | 35 | 14.29% | 5 | 21.74% |
alexander duyck | alexander duyck | 34 | 13.88% | 2 | 8.70% |
joerg roedel | joerg roedel | 26 | 10.61% | 1 | 4.35% |
jan beulich | jan beulich | 14 | 5.71% | 3 | 13.04% |
suresh siddha | suresh siddha | 11 | 4.49% | 1 | 4.35% |
jesse barnes | jesse barnes | 11 | 4.49% | 1 | 4.35% |
randy dunlap | randy dunlap | 5 | 2.04% | 1 | 4.35% |
jeremy fitzhardinge | jeremy fitzhardinge | 4 | 1.63% | 1 | 4.35% |
yang hongyang | yang hongyang | 4 | 1.63% | 1 | 4.35% |
konrad rzeszutek wilk | konrad rzeszutek wilk | 2 | 0.82% | 1 | 4.35% |
al viro | al viro | 1 | 0.41% | 1 | 4.35% |
ian campbell | ian campbell | 1 | 0.41% | 1 | 4.35% |
becky bruce | becky bruce | 1 | 0.41% | 1 | 4.35% |
| Total | 245 | 100.00% | 23 | 100.00% |
EXPORT_SYMBOL(swiotlb_alloc_coherent);
void
swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
dma_addr_t dev_addr)
{
phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
WARN_ON(irqs_disabled());
if (!is_swiotlb_buffer(paddr))
free_pages((unsigned long)vaddr, get_order(size));
else
/* DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single */
swiotlb_tbl_unmap_single(hwdev, paddr, size, DMA_TO_DEVICE);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
linus torvalds | linus torvalds | 31 | 43.66% | 1 | 11.11% |
fujita tomonori | fujita tomonori | 15 | 21.13% | 3 | 33.33% |
suresh siddha | suresh siddha | 14 | 19.72% | 1 | 11.11% |
david brownell | david brownell | 6 | 8.45% | 1 | 11.11% |
david mosberger | david mosberger | 2 | 2.82% | 1 | 11.11% |
konrad rzeszutek wilk | konrad rzeszutek wilk | 2 | 2.82% | 1 | 11.11% |
alexander duyck | alexander duyck | 1 | 1.41% | 1 | 11.11% |
| Total | 71 | 100.00% | 9 | 100.00% |
EXPORT_SYMBOL(swiotlb_free_coherent);
static void
swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
int do_panic)
{
/*
* Ran out of IOMMU space for this operation. This is very bad.
* Unfortunately the drivers cannot handle this operation properly.
* unless they check for dma_mapping_error (most don't)
* When the mapping is small enough return a static buffer to limit
* the damage, or panic when the transfer is too big.
*/
printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
"device %s\n", size, dev ? dev_name(dev) : "?");
if (size <= io_tlb_overflow || !do_panic)
return;
if (dir == DMA_BIDIRECTIONAL)
panic("DMA: Random memory could be DMA accessed\n");
if (dir == DMA_FROM_DEVICE)
panic("DMA: Random memory could be DMA written\n");
if (dir == DMA_TO_DEVICE)
panic("DMA: Random memory could be DMA read\n");
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
andi kleen | andi kleen | 58 | 69.88% | 1 | 14.29% |
casey dahlin | casey dahlin | 16 | 19.28% | 1 | 14.29% |
kay sievers | kay sievers | 3 | 3.61% | 1 | 14.29% |
tony luck | tony luck | 2 | 2.41% | 1 | 14.29% |
konrad rzeszutek wilk | konrad rzeszutek wilk | 2 | 2.41% | 1 | 14.29% |
jan beulich | jan beulich | 1 | 1.20% | 1 | 14.29% |
jesse barnes | jesse barnes | 1 | 1.20% | 1 | 14.29% |
| Total | 83 | 100.00% | 7 | 100.00% |
/*
* Map a single buffer of the indicated size for DMA in streaming mode. The
* physical address to use is returned.
*
* Once the device is given the dma address, the device owns this memory until
* either swiotlb_unmap_page or swiotlb_dma_sync_single is performed.
*/
dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
unsigned long attrs)
{
phys_addr_t map, phys = page_to_phys(page) + offset;
dma_addr_t dev_addr = phys_to_dma(dev, phys);
BUG_ON(dir == DMA_NONE);
/*
* If the address happens to be in the device's DMA window,
* we can safely return the device addr and not worry about bounce
* buffering it.
*/
if (dma_capable(dev, dev_addr, size) && !swiotlb_force)
return dev_addr;
trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
/* Oh well, have to allocate and map a bounce buffer. */
map = map_sing