Release 4.12 lib/swiotlb.c
/*
* Dynamic DMA mapping support.
*
* This implementation is a fallback for platforms that do not support
* I/O TLBs (aka DMA address translation hardware).
* Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
* Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
* Copyright (C) 2000, 2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* 03/05/07 davidm Switch from PCI-DMA to generic device DMA API.
* 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid
* unnecessary i-cache flushing.
* 04/07/.. ak Better overflow handling. Assorted fixes.
* 05/09/10 linville Add support for syncing ranges, support syncing for
* DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
* 08/12/11 beckyb Add highmem support
*/
#include <linux/cache.h>
#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/swiotlb.h>
#include <linux/pfn.h>
#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <linux/scatterlist.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/iommu-helper.h>
#define CREATE_TRACE_POINTS
#include <trace/events/swiotlb.h>
#define OFFSET(val,align) ((unsigned long) \
( (val) & ( (align) - 1)))
#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
/*
* Minimum IO TLB size to bother booting with. Systems with mainly
* 64bit capable cards will only lightly use the swiotlb. If we can't
* allocate a contiguous 1MB, we're probably in trouble anyway.
*/
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
enum swiotlb_force swiotlb_force;
/*
* Used to do a quick range check in swiotlb_tbl_unmap_single and
* swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
* API.
*/
static phys_addr_t io_tlb_start, io_tlb_end;
/*
* The number of IO TLB blocks (in groups of 64) between io_tlb_start and
* io_tlb_end. This is command line adjustable via setup_io_tlb_npages.
*/
static unsigned long io_tlb_nslabs;
/*
* When the IOMMU overflows we return a fallback buffer. This sets the size.
*/
static unsigned long io_tlb_overflow = 32*1024;
static phys_addr_t io_tlb_overflow_buffer;
/*
* This is a free list describing the number of free entries available from
* each index
*/
static unsigned int *io_tlb_list;
static unsigned int io_tlb_index;
/*
* Max segment that we can provide which (if pages are contingous) will
* not be bounced (unless SWIOTLB_FORCE is set).
*/
unsigned int max_segment;
/*
* We need to save away the original address corresponding to a mapped entry
* for the sync operations.
*/
#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
static phys_addr_t *io_tlb_orig_addr;
/*
* Protect the above data structures in the map and unmap calls
*/
static DEFINE_SPINLOCK(io_tlb_lock);
static int late_alloc;
static int __init
setup_io_tlb_npages(char *str)
{
if (isdigit(*str)) {
io_tlb_nslabs = simple_strtoul(str, &str, 0);
/* avoid tail segment of size < IO_TLB_SEGSIZE */
io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
}
if (*str == ',')
++str;
if (!strcmp(str, "force")) {
swiotlb_force = SWIOTLB_FORCE;
} else if (!strcmp(str, "noforce")) {
swiotlb_force = SWIOTLB_NO_FORCE;
io_tlb_nslabs = 1;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andi Kleen | 35 | 37.63% | 1 | 16.67% |
Geert Uytterhoeven | 24 | 25.81% | 2 | 33.33% |
Linus Torvalds | 23 | 24.73% | 1 | 16.67% |
David Mosberger-Tang | 10 | 10.75% | 1 | 16.67% |
Yinghai Lu | 1 | 1.08% | 1 | 16.67% |
Total | 93 | 100.00% | 6 | 100.00% |
early_param("swiotlb", setup_io_tlb_npages);
/* make io_tlb_overflow tunable too? */
unsigned long swiotlb_nr_tbl(void)
{
return io_tlb_nslabs;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
FUJITA Tomonori | 10 | 90.91% | 1 | 50.00% |
Konrad Rzeszutek Wilk | 1 | 9.09% | 1 | 50.00% |
Total | 11 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
unsigned int swiotlb_max_segment(void)
{
return max_segment;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Konrad Rzeszutek Wilk | 11 | 100.00% | 1 | 100.00% |
Total | 11 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(swiotlb_max_segment);
void swiotlb_set_max_segment(unsigned int val)
{
if (swiotlb_force == SWIOTLB_FORCE)
max_segment = 1;
else
max_segment = rounddown(val, PAGE_SIZE);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Konrad Rzeszutek Wilk | 29 | 100.00% | 1 | 100.00% |
Total | 29 | 100.00% | 1 | 100.00% |
/* default to 64MB */
#define IO_TLB_DEFAULT_SIZE (64UL<<20)
unsigned long swiotlb_size_or_default(void)
{
unsigned long size;
size = io_tlb_nslabs << IO_TLB_SHIFT;
return size ? size : (IO_TLB_DEFAULT_SIZE);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yinghai Lu | 27 | 100.00% | 1 | 100.00% |
Total | 27 | 100.00% | 1 | 100.00% |
/* Note that this doesn't work with highmem page */
static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
volatile void *address)
{
return phys_to_dma(hwdev, virt_to_phys(address));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ian Campbell | 19 | 70.37% | 1 | 33.33% |
Jeremy Fitzhardinge | 7 | 25.93% | 1 | 33.33% |
FUJITA Tomonori | 1 | 3.70% | 1 | 33.33% |
Total | 27 | 100.00% | 3 | 100.00% |
static bool no_iotlb_memory;
void swiotlb_print_info(void)
{
unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
unsigned char *vstart, *vend;
if (no_iotlb_memory) {
pr_warn("software IO TLB: No low mem\n");
return;
}
vstart = phys_to_virt(io_tlb_start);
vend = phys_to_virt(io_tlb_end);
printk(KERN_INFO "software IO TLB [mem %#010llx-%#010llx] (%luMB) mapped at [%p-%p]\n",
(unsigned long long)io_tlb_start,
(unsigned long long)io_tlb_end,
bytes >> 20, vstart, vend - 1);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ian Campbell | 35 | 44.30% | 1 | 16.67% |
Alexander Duyck | 15 | 18.99% | 2 | 33.33% |
Yinghai Lu | 12 | 15.19% | 1 | 16.67% |
Björn Helgaas | 9 | 11.39% | 1 | 16.67% |
FUJITA Tomonori | 8 | 10.13% | 1 | 16.67% |
Total | 79 | 100.00% | 6 | 100.00% |
int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
{
void *v_overflow_buffer;
unsigned long i, bytes;
bytes = nslabs << IO_TLB_SHIFT;
io_tlb_nslabs = nslabs;
io_tlb_start = __pa(tlb);
io_tlb_end = io_tlb_start + bytes;
/*
* Get the overflow emergency buffer
*/
v_overflow_buffer = memblock_virt_alloc_low_nopanic(
PAGE_ALIGN(io_tlb_overflow),
PAGE_SIZE);
if (!v_overflow_buffer)
return -ENOMEM;
io_tlb_overflow_buffer = __pa(v_overflow_buffer);
/*
* Allocate and initialize the free list array. This array is used
* to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
* between io_tlb_start and io_tlb_end.
*/
io_tlb_list = memblock_virt_alloc(
PAGE_ALIGN(io_tlb_nslabs * sizeof(int)),
PAGE_SIZE);
io_tlb_orig_addr = memblock_virt_alloc(
PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)),
PAGE_SIZE);
for (i = 0; i < io_tlb_nslabs; i++) {
io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
}
io_tlb_index = 0;
if (verbose)
swiotlb_print_info();
swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 50 | 29.24% | 1 | 6.25% |
Jan Beulich | 35 | 20.47% | 2 | 12.50% |
Alexander Duyck | 29 | 16.96% | 2 | 12.50% |
FUJITA Tomonori | 21 | 12.28% | 2 | 12.50% |
Yinghai Lu | 12 | 7.02% | 3 | 18.75% |
David Mosberger-Tang | 9 | 5.26% | 2 | 12.50% |
Konrad Rzeszutek Wilk | 7 | 4.09% | 1 | 6.25% |
Santosh Shilimkar | 5 | 2.92% | 1 | 6.25% |
Andi Kleen | 2 | 1.17% | 1 | 6.25% |
Ian Campbell | 1 | 0.58% | 1 | 6.25% |
Total | 171 | 100.00% | 16 | 100.00% |
/*
* Statically reserve bounce buffer space and initialize bounce buffer data
* structures for the software IO TLB used to implement the DMA API.
*/
void __init
swiotlb_init(int verbose)
{
size_t default_size = IO_TLB_DEFAULT_SIZE;
unsigned char *vstart;
unsigned long bytes;
if (!io_tlb_nslabs) {
io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
}
bytes = io_tlb_nslabs << IO_TLB_SHIFT;
/* Get IO TLB memory from the low pages */
vstart = memblock_virt_alloc_low_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE);
if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
return;
if (io_tlb_start)
memblock_free_early(io_tlb_start,
PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
pr_warn("Cannot allocate SWIOTLB buffer");
no_iotlb_memory = true;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
FUJITA Tomonori | 56 | 52.83% | 2 | 22.22% |
Yinghai Lu | 35 | 33.02% | 4 | 44.44% |
Alexander Duyck | 8 | 7.55% | 1 | 11.11% |
David Mosberger-Tang | 4 | 3.77% | 1 | 11.11% |
Santosh Shilimkar | 3 | 2.83% | 1 | 11.11% |
Total | 106 | 100.00% | 9 | 100.00% |
/*
* Systems with larger DMA zones (those that don't support ISA) can
* initialize the swiotlb later using the slab allocator if needed.
* This should be just like above, but with some error catching.
*/
int
swiotlb_late_init_with_default_size(size_t default_size)
{
unsigned long bytes, req_nslabs = io_tlb_nslabs;
unsigned char *vstart = NULL;
unsigned int order;
int rc = 0;
if (!io_tlb_nslabs) {
io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
}
/*
* Get IO TLB memory from the low pages
*/
order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
io_tlb_nslabs = SLABS_PER_PAGE << order;
bytes = io_tlb_nslabs << IO_TLB_SHIFT;
while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
order);
if (vstart)
break;
order--;
}
if (!vstart) {
io_tlb_nslabs = req_nslabs;
return -ENOMEM;
}
if (order != get_order(bytes)) {
printk(KERN_WARNING "Warning: only able to allocate %ld MB "
"for software IO TLB\n", (PAGE_SIZE << order) >> 20);
io_tlb_nslabs = SLABS_PER_PAGE << order;
}
rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs);
if (rc)
free_pages((unsigned long)vstart, order);
return rc;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Williamson | 114 | 60.96% | 1 | 16.67% |
Konrad Rzeszutek Wilk | 40 | 21.39% | 1 | 16.67% |
Alexander Duyck | 12 | 6.42% | 1 | 16.67% |
Jan Beulich | 11 | 5.88% | 1 | 16.67% |
FUJITA Tomonori | 9 | 4.81% | 1 | 16.67% |
Jeremy Fitzhardinge | 1 | 0.53% | 1 | 16.67% |
Total | 187 | 100.00% | 6 | 100.00% |
int
swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
{
unsigned long i, bytes;
unsigned char *v_overflow_buffer;
bytes = nslabs << IO_TLB_SHIFT;
io_tlb_nslabs = nslabs;
io_tlb_start = virt_to_phys(tlb);
io_tlb_end = io_tlb_start + bytes;
memset(tlb, 0, bytes);
/*
* Get the overflow emergency buffer
*/
v_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
get_order(io_tlb_overflow));
if (!v_overflow_buffer)
goto cleanup2;
io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer);
/*
* Allocate and initialize the free list array. This array is used
* to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
* between io_tlb_start and io_tlb_end.
*/
io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
get_order(io_tlb_nslabs * sizeof(int)));
if (!io_tlb_list)
goto cleanup3;
io_tlb_orig_addr = (phys_addr_t *)
__get_free_pages(GFP_KERNEL,
get_order(io_tlb_nslabs *
sizeof(phys_addr_t)));
if (!io_tlb_orig_addr)
goto cleanup4;
for (i = 0; i < io_tlb_nslabs; i++) {
io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
}
io_tlb_index = 0;
swiotlb_print_info();
late_alloc = 1;
swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT);
return 0;
cleanup4:
free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
sizeof(int)));
io_tlb_list = NULL;
cleanup3:
free_pages((unsigned long)v_overflow_buffer,
get_order(io_tlb_overflow));
io_tlb_overflow_buffer = 0;
cleanup2:
io_tlb_end = 0;
io_tlb_start = 0;
io_tlb_nslabs = 0;
max_segment = 0;
return -ENOMEM;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Williamson | 129 | 47.43% | 1 | 8.33% |
Alexander Duyck | 52 | 19.12% | 3 | 25.00% |
Jan Beulich | 42 | 15.44% | 2 | 16.67% |
Konrad Rzeszutek Wilk | 41 | 15.07% | 2 | 16.67% |
FUJITA Tomonori | 5 | 1.84% | 2 | 16.67% |
Becky Bruce | 2 | 0.74% | 1 | 8.33% |
Ian Campbell | 1 | 0.37% | 1 | 8.33% |
Total | 272 | 100.00% | 12 | 100.00% |
void __init swiotlb_free(void)
{
if (!io_tlb_orig_addr)
return;
if (late_alloc) {
free_pages((unsigned long)phys_to_virt(io_tlb_overflow_buffer),
get_order(io_tlb_overflow));
free_pages((unsigned long)io_tlb_orig_addr,
get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
sizeof(int)));
free_pages((unsigned long)phys_to_virt(io_tlb_start),
get_order(io_tlb_nslabs << IO_TLB_SHIFT));
} else {
memblock_free_late(io_tlb_overflow_buffer,
PAGE_ALIGN(io_tlb_overflow));
memblock_free_late(__pa(io_tlb_orig_addr),
PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
memblock_free_late(__pa(io_tlb_list),
PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
memblock_free_late(io_tlb_start,
PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
}
io_tlb_nslabs = 0;
max_segment = 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
FUJITA Tomonori | 132 | 80.98% | 1 | 14.29% |
Yinghai Lu | 12 | 7.36% | 1 | 14.29% |
Konrad Rzeszutek Wilk | 8 | 4.91% | 2 | 28.57% |
Alexander Duyck | 7 | 4.29% | 2 | 28.57% |
Santosh Shilimkar | 4 | 2.45% | 1 | 14.29% |
Total | 163 | 100.00% | 7 | 100.00% |
int is_swiotlb_buffer(phys_addr_t paddr)
{
return paddr >= io_tlb_start && paddr < io_tlb_end;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
FUJITA Tomonori | 17 | 100.00% | 2 | 100.00% |
Total | 17 | 100.00% | 2 | 100.00% |
/*
* Bounce: copy the swiotlb buffer back to the original dma location
*/
static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
size_t size, enum dma_data_direction dir)
{
unsigned long pfn = PFN_DOWN(orig_addr);
unsigned char *vaddr = phys_to_virt(tlb_addr);
if (PageHighMem(pfn_to_page(pfn))) {
/* The buffer does not have a mapping. Map it in and copy */
unsigned int offset = orig_addr & ~PAGE_MASK;
char *buffer;
unsigned int sz = 0;
unsigned long flags;
while (size) {
sz = min_t(size_t, PAGE_SIZE - offset, size);
local_irq_save(flags);
buffer = kmap_atomic(pfn_to_page(pfn));
if (dir == DMA_TO_DEVICE)
memcpy(vaddr, buffer + offset, sz);
else
memcpy(buffer + offset, vaddr, sz);
kunmap_atomic(buffer);
local_irq_restore(flags);
size -= sz;
pfn++;
vaddr += sz;
offset = 0;
}
} else if (dir == DMA_TO_DEVICE) {
memcpy(vaddr, phys_to_virt(orig_addr), size);
} else {
memcpy(phys_to_virt(orig_addr), vaddr, size);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeremy Fitzhardinge | 102 | 51.52% | 2 | 40.00% |
Becky Bruce | 69 | 34.85% | 2 | 40.00% |
Alexander Duyck | 27 | 13.64% | 1 | 20.00% |
Total | 198 | 100.00% | 5 | 100.00% |
phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
dma_addr_t tbl_dma_addr,
phys_addr_t orig_addr, size_t size,
enum dma_data_direction dir,
unsigned long attrs)
{
unsigned long flags;
phys_addr_t tlb_addr;
unsigned int nslots, stride, index, wrap;
int i;
unsigned long mask;
unsigned long offset_slots;
unsigned long max_slots;
if (no_iotlb_memory)
panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
mask = dma_get_seg_boundary(hwdev);
tbl_dma_addr &= mask;
offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
/*
* Carefully handle integer overflow which can occur when mask == ~0UL.
*/
max_slots = mask + 1
? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
: 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
/*
* For mappings greater than or equal to a page, we limit the stride
* (and hence alignment) to a page size.
*/
nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
if (size >= PAGE_SIZE)
stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
else
stride = 1;
BUG_ON(!nslots);
/*
* Find suitable number of IO TLB entries size that will fit this
* request and allocate a buffer from that IO TLB pool.
*/
spin_lock_irqsave(&io_tlb_lock, flags);
index = ALIGN(io_tlb_index, stride);
if (index >= io_tlb_nslabs)
index = 0;
wrap = index;
do {
while (iommu_is_span_boundary(index, nslots, offset_slots,
max_slots)) {
index += stride;
if (index >= io_tlb_nslabs)
index = 0;
if (index == wrap)
goto not_found;
}
/*
* If we find a slot that indicates we have 'nslots' number of
* contiguous buffers, we allocate the buffers from that slot
* and mark the entries as '0' indicating unavailable.
*/
if (io_tlb_list[index] >= nslots) {
int count = 0;
for (i = index; i < (int) (index + nslots); i++)
io_tlb_list[i] = 0;
for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
io_tlb_list[i] = ++count;
tlb_addr = io_tlb_start + (index << IO_TLB_SHIFT);
/*
* Update the indices to avoid searching in the next
* round.
*/
io_tlb_index = ((index + nslots) < io_tlb_nslabs
? (index + nslots) : 0);
goto found;
}
index += stride;
if (index >= io_tlb_nslabs)
index = 0;
} while (index != wrap);
not_found:
spin_unlock_irqrestore(&io_tlb_lock, flags);
if (printk_ratelimit())
dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size);
return SWIOTLB_MAP_ERROR;
found:
spin_unlock_irqrestore(&io_tlb_lock, flags);
/*
* Save away the mapping from the original address to the DMA address.
* This is needed when we sync the memory. Then we sync the buffer if
* needed.
*/
for (i = 0; i < nslots; i++)
io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE);
return tlb_addr;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 245 | 52.02% | 1 | 4.00% |
FUJITA Tomonori | 83 | 17.62% | 3 | 12.00% |
Jan Beulich | 30 | 6.37% | 2 | 8.00% |
Alexander Duyck | 23 | 4.88% | 2 | 8.00% |
David Mosberger-Tang | 21 | 4.46% | 2 | 8.00% |
Keir Fraser | 17 | 3.61% | 1 | 4.00% |
Konrad Rzeszutek Wilk | 10 | 2.12% | 2 | 8.00% |
Yinghai Lu | 9 | 1.91% | 1 | 4.00% |
Stefano Stabellini | 6 | 1.27% | 1 | 4.00% |
Andi Kleen | 6 | 1.27% | 1 | 4.00% |
Becky Bruce | 6 | 1.27% | 2 | 8.00% |
Jesse Barnes | 3 | 0.64% | 1 | 4.00% |
Eric Sesterhenn / Snakebyte | 3 | 0.64% | 1 | 4.00% |
Jeremy Fitzhardinge | 3 | 0.64% | 1 | 4.00% |
Andrew Morton | 2 | 0.42% | 1 | 4.00% |
Nikita Yushchenko | 2 | 0.42% | 1 | 4.00% |
Ian Campbell | 1 | 0.21% | 1 | 4.00% |
Tony Luck | 1 | 0.21% | 1 | 4.00% |
Total | 471 | 100.00% | 25 | 100.00% |
EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
/*
* Allocates bounce buffer and returns its kernel virtual address.
*/
static phys_addr_t
map_single(struct device *hwdev, phys_addr_t phys, size_t size,
enum dma_data_direction dir, unsigned long attrs)
{
dma_addr_t start_dma_addr;
if (swiotlb_force == SWIOTLB_NO_FORCE) {
dev_warn_ratelimited(hwdev, "Cannot do DMA to address %pa\n",
&phys);
return SWIOTLB_MAP_ERROR;
}
start_dma_addr = phys_to_dma(hwdev, io_tlb_start);
return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size,
dir, attrs);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
FUJITA Tomonori | 38 | 51.35% | 1 | 14.29% |
Geert Uytterhoeven | 25 | 33.78% | 1 | 14.29% |
Alexander Duyck | 8 | 10.81% | 3 | 42.86% |
Konrad Rzeszutek Wilk | 2 | 2.70% | 1 | 14.29% |
Alexandre Courbot | 1 | 1.35% | 1 | 14.29% |
Total | 74 | 100.00% | 7 | 100.00% |
/*
* dma_addr is the kernel virtual address of the bounce buffer to unmap.
*/
void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
unsigned long flags;
int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
phys_addr_t orig_addr = io_tlb_orig_addr[index];
/*
* First, sync the memory before unmapping the entry
*/
if (orig_addr != INVALID_PHYS_ADDR &&
!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE);
/*
* Return the buffer to the free list by setting the corresponding
* entries to indicate the number of contiguous entries available.
* While returning the entries to the free list, we merge the entries
* with slots below and above the pool being returned.
*/
spin_lock_irqsave(&io_tlb_lock, flags);
{
count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
io_tlb_list[index + nslots] : 0);
/*
* Step 1: return the slots to the free list, merging the
* slots with superceeding slots
*/
for (i = index + nslots - 1; i >= index; i--) {
io_tlb_list[i] = ++count;
io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
}
/*
* Step 2: merge the returned slots with the preceding slots,
* if available (non zero)
*/
for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
io_tlb_list[i] = ++count;
}
spin_unlock_irqrestore(&io_tlb_lock, flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 150 | 66.96% | 1 | 7.14% |
David Mosberger-Tang | 23 | 10.27% | 2 | 14.29% |
Alexander Duyck | 19 | 8.48% | 3 | 21.43% |
Jan Beulich | 11 | 4.91% | 1 | 7.14% |
Becky Bruce | 9 | 4.02% | 2 | 14.29% |
Jesse Barnes | 6 | 2.68% | 1 | 7.14% |
Konrad Rzeszutek Wilk | 3 | 1.34% | 2 | 14.29% |
Jeremy Fitzhardinge | 2 | 0.89% | 1 | 7.14% |
André Goddard Rosa | 1 | 0.45% | 1 | 7.14% |
Total | 224 | 100.00% | 14 | 100.00% |
EXPORT_SYMBOL_GPL(swiotlb_tbl_unmap_single);
void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
size_t size, enum dma_data_direction dir,
enum dma_sync_target target)
{
int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
phys_addr_t orig_addr = io_tlb_orig_addr[index];
if (orig_addr == INVALID_PHYS_ADDR)
return;
orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1);
switch (target) {
case SYNC_FOR_CPU:
if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
swiotlb_bounce(orig_addr, tlb_addr,
size, DMA_FROM_DEVICE);
else
BUG_ON(dir != DMA_TO_DEVICE);
break;
case SYNC_FOR_DEVICE:
if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
swiotlb_bounce(orig_addr, tlb_addr,
size, DMA_TO_DEVICE);
else
BUG_ON(dir != DMA_FROM_DEVICE);
break;
default:
BUG();
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John W. Linville | 41 | 27.33% | 1 | 6.67% |
Becky Bruce | 32 | 21.33% | 2 | 13.33% |
Linus Torvalds | 31 | 20.67% | 1 | 6.67% |
Alexander Duyck | 11 | 7.33% | 1 | 6.67% |
Eric Sesterhenn / Snakebyte | 8 | 5.33% | 1 | 6.67% |
Jan Beulich | 8 | 5.33% | 2 | 13.33% |
Jeremy Fitzhardinge | 7 | 4.67% | 2 | 13.33% |
David Mosberger-Tang | 6 | 4.00% | 1 | 6.67% |
Konrad Rzeszutek Wilk | 5 | 3.33% | 3 | 20.00% |
Tony Luck | 1 | 0.67% | 1 | 6.67% |
Total | 150 | 100.00% | 15 | 100.00% |
EXPORT_SYMBOL_GPL(swiotlb_tbl_sync_single);
void *
swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags)
{
dma_addr_t dev_addr;
void *ret;
int order = get_order(size);
u64 dma_mask = DMA_BIT_MASK(32);
if (hwdev && hwdev->coherent_dma_mask)
dma_mask = hwdev->coherent_dma_mask;
ret = (void *)__get_free_pages(flags, order);
if (ret) {
dev_addr = swiotlb_virt_to_bus(hwdev, ret);
if (dev_addr + size - 1 > dma_mask) {
/*
* The allocated memory isn't reachable by the device.
*/
free_pages((unsigned long) ret, order);
ret = NULL;
}
}
if (!ret) {
/*
* We are either out of memory or the device can't DMA to
* GFP_DMA memory; fall back on map_single(), which
* will grab memory from the lowest available address range.
*/
phys_addr_t paddr = map_single(hwdev, 0, size,
DMA_FROM_DEVICE, 0);
if (paddr == SWIOTLB_MAP_ERROR)
goto err_warn;
ret = phys_to_virt(paddr);
dev_addr = phys_to_dma(hwdev, paddr);
/* Confirm address can be DMA'd by device */
if (dev_addr + size - 1 > dma_mask) {
printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
(unsigned long long)dma_mask,
(unsigned long long)dev_addr);
/*
* DMA_TO_DEVICE to avoid memcpy in unmap_single.
* The DMA_ATTR_SKIP_CPU_SYNC is optional.
*/
swiotlb_tbl_unmap_single(hwdev, paddr,
size, DMA_TO_DEVICE,
DMA_ATTR_SKIP_CPU_SYNC);
goto err_warn;
}
}
*dma_handle = dev_addr;
memset(ret, 0, size);
return ret;
err_warn:
pr_warn("swiotlb: coherent allocation failed for device %s size=%zu\n",
dev_name(hwdev), size);
dump_stack();
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 55 | 22.09% | 1 | 4.17% |
David Mosberger-Tang | 42 | 16.87% | 2 | 8.33% |
Alexander Duyck | 39 | 15.66% | 3 | 12.50% |
FUJITA Tomonori | 34 | 13.65% | 5 | 20.83% |
Joerg Roedel | 26 | 10.44% | 1 | 4.17% |
Suresh B. Siddha | 11 | 4.42% | 1 | 4.17% |
Jesse Barnes | 11 | 4.42% | 1 | 4.17% |
Jan Beulich | 11 | 4.42% | 2 | 8.33% |
Randy Dunlap | 5 | 2.01% | 1 | 4.17% |
Jeremy Fitzhardinge | 4 | 1.61% | 1 | 4.17% |
Yang Hongyang | 4 | 1.61% | 1 | 4.17% |
Konrad Rzeszutek Wilk | 2 | 0.80% | 1 | 4.17% |
Tony Luck | 2 | 0.80% | 1 | 4.17% |
Al Viro | 1 | 0.40% | 1 | 4.17% |
Ian Campbell | 1 | 0.40% | 1 | 4.17% |
Becky Bruce | 1 | 0.40% | 1 | 4.17% |
Total | 249 | 100.00% | 24 | 100.00% |
EXPORT_SYMBOL(swiotlb_alloc_coherent);
void
swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
dma_addr_t dev_addr)
{
phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
WARN_ON(irqs_disabled());
if (!is_swiotlb_buffer(paddr))
free_pages((unsigned long)vaddr, get_order(size));
else
/*
* DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single.
* DMA_ATTR_SKIP_CPU_SYNC is optional.
*/
swiotlb_tbl_unmap_single(hwdev, paddr, size, DMA_TO_DEVICE,
DMA_ATTR_SKIP_CPU_SYNC);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 31 | 42.47% | 1 | 10.00% |
FUJITA Tomonori | 15 | 20.55% | 3 | 30.00% |
Suresh B. Siddha | 14 | 19.18% | 1 | 10.00% |
David Brownell | 6 | 8.22% | 1 | 10.00% |
Alexander Duyck | 4 | 5.48% | 2 | 20.00% |
David Mosberger-Tang | 2 | 2.74% | 1 | 10.00% |
Konrad Rzeszutek Wilk | 1 | 1.37% | 1 | 10.00% |
Total | 73 | 100.00% | 10 | 100.00% |
EXPORT_SYMBOL(swiotlb_free_coherent);
static void
swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
int do_panic)
{
if (swiotlb_force == SWIOTLB_NO_FORCE)
return;
/*
* Ran out of IOMMU space for this operation. This is very bad.
* Unfortunately the drivers cannot handle this operation properly.
* unless they check for dma_mapping_error (most don't)
* When the mapping is small enough return a static buffer to limit
* the damage, or panic when the transfer is too big.
*/
dev_err_ratelimited(dev, "DMA: Out of SW-IOMMU space for %zu bytes\n",
size);
if (size <= io_tlb_overflow || !do_panic)
return;
if (dir == DMA_BIDIRECTIONAL)
panic("DMA: Random memory could be DMA accessed\n");
if (dir == DMA_FROM_DEVICE)
panic("DMA: Random memory could be DMA written\n");
if (dir == DMA_TO_DEVICE)
panic("DMA: Random memory could be DMA read\n");
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andi Kleen | 50 | 61.73% | 1 | 16.67% |
Casey Dahlin | 16 | 19.75% | 1 | 16.67% |
Geert Uytterhoeven | 11 | 13.58% | 2 | 33.33% |
Konrad Rzeszutek Wilk | 2 | 2.47% | 1 | 16.67% |
Tony Luck | 2 | 2.47% | 1 | 16.67% |
Total | 81 | 100.00% | 6 | 100.00% |
/*
* Map a single buffer of the indicated size for DMA in streaming mode. The
* physical address to use is returned.
*
* Once the device is given the dma address, the device owns this memory until
* either swiotlb_unmap_page or swiotlb_dma_sync_single is performed.
*/
dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
unsigned long attrs)
{
phys_addr_t map, phys = page_to_phys(page) + offset;
dma_addr_t dev_addr = phys_to_dma(dev, phys);
BUG_ON(dir == DMA_NONE);
/*
* If the address happens to be in the device's DMA window,
* we can safely return the device addr and not worry about bounce
* buffering it.
*/
if (dma_capable(dev, dev_addr, size) && swiotlb_force != SWIOTLB_FORCE)
return dev_addr;
trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
/* Oh well, have to allocate and map a bounce buffer. */
map = map_single(dev, phys, size, dir, attrs);
if (map == SWIOTLB_MAP_ERROR) {
swiotlb_full(dev, size, dir, 1);
return phys_to_dma(dev, io_tlb_overflow_buffer);
}
dev_addr = phys_to_dma(dev, map);
/* Ensure that the address returned is DMA'ble */
if (dma_capable(dev, dev_addr, size))
return dev_addr;
attrs |= DMA_ATTR_SKIP_CPU_SYNC;
swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
return phys_to_dma(dev, io_tlb_overflow_buffer);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
FUJITA Tomonori | 52 | 28.73% | 6 | 27.27% |
Linus Torvalds | 38 | 20.99% | 1 | 4.55% |
Andi Kleen | 29 | 16.02% | 1 | 4.55% |
Alexander Duyck | 28 | 15.47% | 4 | 18.18% |
Zoltan Kiss | 11 | 6.08% | 1 | 4.55% |
David Mosberger-Tang | 9 | 4.97% | 1 | 4.55% |
Eric Sesterhenn / Snakebyte | 3 | 1.66% | 1 | 4.55% |
Krzysztof Kozlowski | 2 | 1.10% | 1 | 4.55% |
Jeremy Fitzhardinge | 2 | 1.10% | 1 | 4.55% |
Arthur Kepner | 2 | 1.10% | 1 | 4.55% |
Geert Uytterhoeven | 2 | 1.10% | 1 | 4.55% |
Jan Beulich | 1 | 0.55% | 1 | 4.55% |
Becky Bruce | 1 | 0.55% | 1 | 4.55% |
Tony Luck | 1 | 0.55% | 1 | 4.55% |
Total | 181 | 100.00% | 22 | 100.00% |
EXPORT_SYMBOL_GPL(swiotlb_map_page);
/*
* Unmap a single streaming mode DMA translation. The dma_addr and size must
* match what was provided for in a previous swiotlb_map_page call. All
* other usages are undefined.
*
* After this call, reads by the cpu to the buffer are guaranteed to see
* whatever the device wrote there.
*/
static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
BUG_ON(dir == DMA_NONE);
if (is_swiotlb_buffer(paddr)) {
swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
return;
}
if (dir != DMA_FROM_DEVICE)
return;
/*
* phys_to_virt doesn't work with hihgmem page but we could
* call dma_mark_clean() with hihgmem page here. However, we
* are fine since dma_mark_clean() is null on POWERPC. We can
* make dma_mark_clean() take a physical address if necessary.
*/
dma_mark_clean(phys_to_virt(paddr), size);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 39 | 46.99% | 1 | 8.33% |
FUJITA Tomonori | 13 | 15.66% | 3 | 25.00% |
David Mosberger-Tang | 9 | 10.84% | 1 | 8.33% |
Becky Bruce | 9 | 10.84% | 2 | 16.67% |
Alexander Duyck | 6 | 7.23% | 1 | 8.33% |
Konrad Rzeszutek Wilk | 3 | 3.61% | 2 | 16.67% |
Eric Sesterhenn / Snakebyte | 3 | 3.61% | 1 | 8.33% |
Jan Beulich | 1 | 1.20% | 1 | 8.33% |
Total | 83 | 100.00% | 12 | 100.00% |
void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
unmap_single(hwdev, dev_addr, size, dir, attrs);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Becky Bruce | 33 | 89.19% | 1 | 33.33% |
Alexander Duyck | 2 | 5.41% | 1 | 33.33% |
Krzysztof Kozlowski | 2 | 5.41% | 1 | 33.33% |
Total | 37 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
/*
* Make physical memory consistent for a single streaming mode DMA translation
* after a transfer.
*
* If you perform a swiotlb_map_page() but wish to interrogate the buffer
* using the cpu, yet do not wish to teardown the dma mapping, you must
* call this function before doing so. At the next point you give the dma
* address back to the card, you must first perform a
* swiotlb_dma_sync_for_device, and then the device again owns the buffer
*/
static void
swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
enum dma_sync_target target)
{
phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
BUG_ON(dir == DMA_NONE);
if (is_swiotlb_buffer(paddr)) {
swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
return;
}
if (dir != DMA_FROM_DEVICE)
return;
dma_mark_clean(phys_to_virt(paddr), size);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 47 | 57.32% | 1 | 7.14% |
FUJITA Tomonori | 12 | 14.63% | 3 | 21.43% |
Becky Bruce | 7 | 8.54% | 2 | 14.29% |
John W. Linville | 6 | 7.32% | 2 | 14.29% |
Konrad Rzeszutek Wilk | 5 | 6.10% | 3 | 21.43% |
Eric Sesterhenn / Snakebyte | 3 | 3.66% | 1 | 7.14% |
Linus Torvalds | 1 | 1.22% | 1 | 7.14% |
Jan Beulich | 1 | 1.22% | 1 | 7.14% |
Total | 82 | 100.00% | 14 | 100.00% |
void
swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir)
{
swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 20 | 60.61% | 1 | 16.67% |
John W. Linville | 6 | 18.18% | 2 | 33.33% |
David Mosberger-Tang | 4 | 12.12% | 1 | 16.67% |
FUJITA Tomonori | 2 | 6.06% | 1 | 16.67% |
Andrew Morton | 1 | 3.03% | 1 | 16.67% |
Total | 33 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
void
swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir)
{
swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John W. Linville | 24 | 72.73% | 2 | 40.00% |
Linus Torvalds | 6 | 18.18% | 1 | 20.00% |
FUJITA Tomonori | 2 | 6.06% | 1 | 20.00% |
David Mosberger-Tang | 1 | 3.03% | 1 | 20.00% |
Total | 33 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL(swiotlb_sync_single_for_device);
/*
* Map a set of buffers described by scatterlist in streaming mode for DMA.
* This is the scatter-gather version of the above swiotlb_map_page
* interface. Here the scatter gather list elements are each tagged with the
* appropriate dma address and length. They are obtained via
* sg_dma_{address,length}(SG).
*
* NOTE: An implementation may be able to use a smaller number of
* DMA address/length pairs than there are SG table elements.
* (for example via virtual mapping capabilities)
* The routine returns the number of addr/length pairs actually
* used, at most nents.
*
* Device ownership issues as mentioned above for swiotlb_map_page are the
* same here.
*/
int
swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
enum dma_data_direction dir, unsigned long attrs)
{
struct scatterlist *sg;
int i;
BUG_ON(dir == DMA_NONE);
for_each_sg(sgl, sg, nelems, i) {
phys_addr_t paddr = sg_phys(sg);
dma_addr_t dev_addr = phys_to_dma(hwdev, paddr);
if (swiotlb_force == SWIOTLB_FORCE ||
!dma_capable(hwdev, dev_addr, sg->length)) {
phys_addr_t map = map_single(hwdev, sg_phys(sg),
sg->length, dir, attrs);
if (map == SWIOTLB_MAP_ERROR) {
/* Don't panic here, we expect map_sg users
to do proper error handling. */
swiotlb_full(hwdev, sg->length, dir, 0);
attrs |= DMA_ATTR_SKIP_CPU_SYNC;
swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
attrs);
sg_dma_len(sgl) = 0;
return 0;
}
sg->dma_address = phys_to_dma(hwdev, map);
} else
sg->dma_address = dev_addr;
sg_dma_len(sg) = sg->length;
}
return nelems;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 45 | 23.56% | 2 | 6.90% |
Andi Kleen | 40 | 20.94% | 2 | 6.90% |
Jens Axboe | 17 | 8.90% | 1 | 3.45% |
Jan Beulich | 12 | 6.28% | 2 | 6.90% |
FUJITA Tomonori | 11 | 5.76% | 6 | 20.69% |
Becky Bruce | 11 | 5.76% | 1 | 3.45% |
Alexander Duyck | 10 | 5.24% | 3 | 10.34% |
David Mosberger-Tang | 8 | 4.19% | 1 | 3.45% |
Arthur Kepner | 6 | 3.14% | 1 | 3.45% |
Eunbong Song 송은봉 | 6 | 3.14% | 1 | 3.45% |
Jeremy Fitzhardinge | 5 | 2.62% | 2 | 6.90% |
Björn Helgaas | 5 | 2.62% | 1 | 3.45% |
Ian Campbell | 4 | 2.09% | 1 | 3.45% |
Eric Sesterhenn / Snakebyte | 3 | 1.57% | 1 | 3.45% |
Tony Luck | 3 | 1.57% | 1 | 3.45% |
Krzysztof Kozlowski | 2 | 1.05% | 1 | 3.45% |
Geert Uytterhoeven | 2 | 1.05% | 1 | 3.45% |
Jesse Barnes | 1 | 0.52% | 1 | 3.45% |
Total | 191 | 100.00% | 29 | 100.00% |
EXPORT_SYMBOL(swiotlb_map_sg_attrs);
/*
* Unmap a set of streaming mode DMA translations. Again, cpu read rules
* concerning calls here are the same as for swiotlb_unmap_page() above.
*/
void
swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
unsigned long attrs)
{
struct scatterlist *sg;
int i;
BUG_ON(dir == DMA_NONE);
for_each_sg(sgl, sg, nelems, i)
unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir,
attrs);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 30 | 43.48% | 1 | 10.00% |
Jens Axboe | 17 | 24.64% | 1 | 10.00% |
David Mosberger-Tang | 5 | 7.25% | 1 | 10.00% |
Eric Sesterhenn / Snakebyte | 3 | 4.35% | 1 | 10.00% |
Arthur Kepner | 3 | 4.35% | 1 | 10.00% |
Eunbong Song 송은봉 | 3 | 4.35% | 1 | 10.00% |
FUJITA Tomonori | 2 | 2.90% | 1 | 10.00% |
Krzysztof Kozlowski | 2 | 2.90% | 1 | 10.00% |
Björn Helgaas | 2 | 2.90% | 1 | 10.00% |
Alexander Duyck | 2 | 2.90% | 1 | 10.00% |
Total | 69 | 100.00% | 10 | 100.00% |
EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
/*
* Make physical memory consistent for a set of streaming mode DMA translations
* after a transfer.
*
* The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
* and usage.
*/
static void
swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
enum dma_sync_target target)
{
struct scatterlist *sg;
int i;
for_each_sg(sgl, sg, nelems, i)
swiotlb_sync_single(hwdev, sg->dma_address,
sg_dma_len(sg), dir, target);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 29 | 46.03% | 1 | 10.00% |
Jens Axboe | 17 | 26.98% | 1 | 10.00% |
John W. Linville | 6 | 9.52% | 2 | 20.00% |
Konrad Rzeszutek Wilk | 4 | 6.35% | 2 | 20.00% |
Eunbong Song 송은봉 | 3 | 4.76% | 1 | 10.00% |
Jeremy Fitzhardinge | 2 | 3.17% | 1 | 10.00% |
Becky Bruce | 1 | 1.59% | 1 | 10.00% |
Linus Torvalds | 1 | 1.59% | 1 | 10.00% |
Total | 63 | 100.00% | 10 | 100.00% |
void
swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir)
{
swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 18 | 51.43% | 2 | 28.57% |
John W. Linville | 12 | 34.29% | 2 | 28.57% |
FUJITA Tomonori | 2 | 5.71% | 1 | 14.29% |
David Mosberger-Tang | 2 | 5.71% | 1 | 14.29% |
Andrew Morton | 1 | 2.86% | 1 | 14.29% |
Total | 35 | 100.00% | 7 | 100.00% |
EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
void
swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir)
{
swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John W. Linville | 23 | 65.71% | 2 | 40.00% |
Linus Torvalds | 9 | 25.71% | 1 | 20.00% |
FUJITA Tomonori | 2 | 5.71% | 1 | 20.00% |
David Mosberger-Tang | 1 | 2.86% | 1 | 20.00% |
Total | 35 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
int
swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
{
return (dma_addr == phys_to_dma(hwdev, io_tlb_overflow_buffer));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 10 | 40.00% | 1 | 20.00% |
Andi Kleen | 7 | 28.00% | 1 | 20.00% |
FUJITA Tomonori | 5 | 20.00% | 1 | 20.00% |
Jeremy Fitzhardinge | 2 | 8.00% | 1 | 20.00% |
Alexander Duyck | 1 | 4.00% | 1 | 20.00% |
Total | 25 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL(swiotlb_dma_mapping_error);
/*
* Return whether the given device DMA address mask can be supported
* properly. For example, if your device can only drive the low 24-bits
* during bus mastering, then you would pass 0x00ffffff as the mask to
* this function.
*/
int
swiotlb_dma_supported(struct device *hwdev, u64 mask)
{
return phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Mosberger-Tang | 16 | 64.00% | 3 | 42.86% |
Tony Luck | 5 | 20.00% | 1 | 14.29% |
Jeremy Fitzhardinge | 2 | 8.00% | 1 | 14.29% |
Jan Beulich | 1 | 4.00% | 1 | 14.29% |
Alexander Duyck | 1 | 4.00% | 1 | 14.29% |
Total | 25 | 100.00% | 7 | 100.00% |
EXPORT_SYMBOL(swiotlb_dma_supported);
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 851 | 22.00% | 3 | 2.61% |
FUJITA Tomonori | 588 | 15.20% | 24 | 20.87% |
Alexander Duyck | 309 | 7.99% | 9 | 7.83% |
Alex Williamson | 253 | 6.54% | 1 | 0.87% |
Konrad Rzeszutek Wilk | 205 | 5.30% | 7 | 6.09% |
Becky Bruce | 192 | 4.96% | 7 | 6.09% |
David Mosberger-Tang | 189 | 4.89% | 6 | 5.22% |
Andi Kleen | 185 | 4.78% | 2 | 1.74% |
Jan Beulich | 168 | 4.34% | 6 | 5.22% |
Jeremy Fitzhardinge | 145 | 3.75% | 4 | 3.48% |
Yinghai Lu | 119 | 3.08% | 4 | 3.48% |
John W. Linville | 118 | 3.05% | 2 | 1.74% |
Andrew Morton | 90 | 2.33% | 3 | 2.61% |
Geert Uytterhoeven | 66 | 1.71% | 3 | 2.61% |
Ian Campbell | 62 | 1.60% | 4 | 3.48% |
Jens Axboe | 51 | 1.32% | 1 | 0.87% |
Joerg Roedel | 26 | 0.67% | 1 | 0.87% |
Eric Sesterhenn / Snakebyte | 26 | 0.67% | 1 | 0.87% |
Suresh B. Siddha | 25 | 0.65% | 1 | 0.87% |
Jesse Barnes | 25 | 0.65% | 1 | 0.87% |
Arthur Kepner | 21 | 0.54% | 1 | 0.87% |
Tony Luck | 18 | 0.47% | 2 | 1.74% |
Keir Fraser | 17 | 0.44% | 1 | 0.87% |
Björn Helgaas | 16 | 0.41% | 2 | 1.74% |
Casey Dahlin | 16 | 0.41% | 1 | 0.87% |
Zoltan Kiss | 14 | 0.36% | 1 | 0.87% |
Eunbong Song 송은봉 | 12 | 0.31% | 1 | 0.87% |
Santosh Shilimkar | 12 | 0.31% | 1 | 0.87% |
Krzysztof Kozlowski | 8 | 0.21% | 1 | 0.87% |
Stefano Stabellini | 6 | 0.16% | 1 | 0.87% |
David Brownell | 6 | 0.16% | 1 | 0.87% |
Randy Dunlap | 5 | 0.13% | 1 | 0.87% |
Thomas Gleixner | 4 | 0.10% | 1 | 0.87% |
Yang Hongyang | 4 | 0.10% | 1 | 0.87% |
Tejun Heo | 3 | 0.08% | 1 | 0.87% |
Thierry Reding | 3 | 0.08% | 1 | 0.87% |
Christoph Hellwig | 3 | 0.08% | 1 | 0.87% |
Nikita Yushchenko | 2 | 0.05% | 1 | 0.87% |
Al Viro | 1 | 0.03% | 1 | 0.87% |
Uwe Kleine-König | 1 | 0.03% | 1 | 0.87% |
Alexandre Courbot | 1 | 0.03% | 1 | 0.87% |
Paul Gortmaker | 1 | 0.03% | 1 | 0.87% |
André Goddard Rosa | 1 | 0.03% | 1 | 0.87% |
Total | 3868 | 100.00% | 115 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.