Release 4.16 lib/swiotlb.c
/*
* Dynamic DMA mapping support.
*
* This implementation is a fallback for platforms that do not support
* I/O TLBs (aka DMA address translation hardware).
* Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
* Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
* Copyright (C) 2000, 2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* 03/05/07 davidm Switch from PCI-DMA to generic device DMA API.
* 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid
* unnecessary i-cache flushing.
* 04/07/.. ak Better overflow handling. Assorted fixes.
* 05/09/10 linville Add support for syncing ranges, support syncing for
* DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
* 08/12/11 beckyb Add highmem support
*/
#include <linux/cache.h>
#include <linux/dma-direct.h>
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/swiotlb.h>
#include <linux/pfn.h>
#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <linux/scatterlist.h>
#include <linux/mem_encrypt.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/iommu-helper.h>
#define CREATE_TRACE_POINTS
#include <trace/events/swiotlb.h>
#define OFFSET(val,align) ((unsigned long) \
( (val) & ( (align) - 1)))
#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
/*
* Minimum IO TLB size to bother booting with. Systems with mainly
* 64bit capable cards will only lightly use the swiotlb. If we can't
* allocate a contiguous 1MB, we're probably in trouble anyway.
*/
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
enum swiotlb_force swiotlb_force;
/*
* Used to do a quick range check in swiotlb_tbl_unmap_single and
* swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
* API.
*/
static phys_addr_t io_tlb_start, io_tlb_end;
/*
* The number of IO TLB blocks (in groups of 64) between io_tlb_start and
* io_tlb_end. This is command line adjustable via setup_io_tlb_npages.
*/
static unsigned long io_tlb_nslabs;
/*
* When the IOMMU overflows we return a fallback buffer. This sets the size.
*/
static unsigned long io_tlb_overflow = 32*1024;
static phys_addr_t io_tlb_overflow_buffer;
/*
* This is a free list describing the number of free entries available from
* each index
*/
static unsigned int *io_tlb_list;
static unsigned int io_tlb_index;
/*
* Max segment that we can provide which (if pages are contingous) will
* not be bounced (unless SWIOTLB_FORCE is set).
*/
unsigned int max_segment;
/*
* We need to save away the original address corresponding to a mapped entry
* for the sync operations.
*/
#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
static phys_addr_t *io_tlb_orig_addr;
/*
* Protect the above data structures in the map and unmap calls
*/
static DEFINE_SPINLOCK(io_tlb_lock);
static int late_alloc;
static int __init
setup_io_tlb_npages(char *str)
{
if (isdigit(*str)) {
io_tlb_nslabs = simple_strtoul(str, &str, 0);
/* avoid tail segment of size < IO_TLB_SEGSIZE */
io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
}
if (*str == ',')
++str;
if (!strcmp(str, "force")) {
swiotlb_force = SWIOTLB_FORCE;
} else if (!strcmp(str, "noforce")) {
swiotlb_force = SWIOTLB_NO_FORCE;
io_tlb_nslabs = 1;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andi Kleen | 35 | 37.63% | 1 | 16.67% |
Geert Uytterhoeven | 24 | 25.81% | 2 | 33.33% |
Linus Torvalds | 23 | 24.73% | 1 | 16.67% |
David Mosberger-Tang | 10 | 10.75% | 1 | 16.67% |
Yinghai Lu | 1 | 1.08% | 1 | 16.67% |
Total | 93 | 100.00% | 6 | 100.00% |
early_param("swiotlb", setup_io_tlb_npages);
/* make io_tlb_overflow tunable too? */
unsigned long swiotlb_nr_tbl(void)
{
return io_tlb_nslabs;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
FUJITA Tomonori | 10 | 90.91% | 1 | 50.00% |
Konrad Rzeszutek Wilk | 1 | 9.09% | 1 | 50.00% |
Total | 11 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
unsigned int swiotlb_max_segment(void)
{
return max_segment;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Konrad Rzeszutek Wilk | 11 | 100.00% | 1 | 100.00% |
Total | 11 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(swiotlb_max_segment);
void swiotlb_set_max_segment(unsigned int val)
{
if (swiotlb_force == SWIOTLB_FORCE)
max_segment = 1;
else
max_segment = rounddown(val, PAGE_SIZE);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Konrad Rzeszutek Wilk | 29 | 100.00% | 1 | 100.00% |
Total | 29 | 100.00% | 1 | 100.00% |
/* default to 64MB */
#define IO_TLB_DEFAULT_SIZE (64UL<<20)
unsigned long swiotlb_size_or_default(void)
{
unsigned long size;
size = io_tlb_nslabs << IO_TLB_SHIFT;
return size ? size : (IO_TLB_DEFAULT_SIZE);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yinghai Lu | 27 | 100.00% | 1 | 100.00% |
Total | 27 | 100.00% | 1 | 100.00% |
void __weak swiotlb_set_mem_attributes(void *vaddr, unsigned long size) { }
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Lendacky | 13 | 100.00% | 1 | 100.00% |
Total | 13 | 100.00% | 1 | 100.00% |
/* For swiotlb, clear memory encryption mask from dma addresses */
static dma_addr_t swiotlb_phys_to_dma(struct device *hwdev,
phys_addr_t address)
{
return __sme_clr(phys_to_dma(hwdev, address));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Lendacky | 25 | 100.00% | 1 | 100.00% |
Total | 25 | 100.00% | 1 | 100.00% |
/* Note that this doesn't work with highmem page */
static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
volatile void *address)
{
return phys_to_dma(hwdev, virt_to_phys(address));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ian Campbell | 19 | 70.37% | 1 | 33.33% |
Jeremy Fitzhardinge | 7 | 25.93% | 1 | 33.33% |
FUJITA Tomonori | 1 | 3.70% | 1 | 33.33% |
Total | 27 | 100.00% | 3 | 100.00% |
static bool no_iotlb_memory;
void swiotlb_print_info(void)
{
unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
unsigned char *vstart, *vend;
if (no_iotlb_memory) {
pr_warn("software IO TLB: No low mem\n");
return;
}
vstart = phys_to_virt(io_tlb_start);
vend = phys_to_virt(io_tlb_end);
printk(KERN_INFO "software IO TLB [mem %#010llx-%#010llx] (%luMB) mapped at [%p-%p]\n",
(unsigned long long)io_tlb_start,
(unsigned long long)io_tlb_end,
bytes >> 20, vstart, vend - 1);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ian Campbell | 35 | 44.30% | 1 | 16.67% |
Alexander Duyck | 15 | 18.99% | 2 | 33.33% |
Yinghai Lu | 12 | 15.19% | 1 | 16.67% |
Björn Helgaas | 9 | 11.39% | 1 | 16.67% |
FUJITA Tomonori | 8 | 10.13% | 1 | 16.67% |
Total | 79 | 100.00% | 6 | 100.00% |
/*
* Early SWIOTLB allocation may be too early to allow an architecture to
* perform the desired operations. This function allows the architecture to
* call SWIOTLB when the operations are possible. It needs to be called
* before the SWIOTLB memory is used.
*/
void __init swiotlb_update_mem_attributes(void)
{
void *vaddr;
unsigned long bytes;
if (no_iotlb_memory || late_alloc)
return;
vaddr = phys_to_virt(io_tlb_start);
bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT);
swiotlb_set_mem_attributes(vaddr, bytes);
memset(vaddr, 0, bytes);
vaddr = phys_to_virt(io_tlb_overflow_buffer);
bytes = PAGE_ALIGN(io_tlb_overflow);
swiotlb_set_mem_attributes(vaddr, bytes);
memset(vaddr, 0, bytes);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Lendacky | 85 | 100.00% | 1 | 100.00% |
Total | 85 | 100.00% | 1 | 100.00% |
int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
{
void *v_overflow_buffer;
unsigned long i, bytes;
bytes = nslabs << IO_TLB_SHIFT;
io_tlb_nslabs = nslabs;
io_tlb_start = __pa(tlb);
io_tlb_end = io_tlb_start + bytes;
/*
* Get the overflow emergency buffer
*/
v_overflow_buffer = memblock_virt_alloc_low_nopanic(
PAGE_ALIGN(io_tlb_overflow),
PAGE_SIZE);
if (!v_overflow_buffer)
return -ENOMEM;
io_tlb_overflow_buffer = __pa(v_overflow_buffer);
/*
* Allocate and initialize the free list array. This array is used
* to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
* between io_tlb_start and io_tlb_end.
*/
io_tlb_list = memblock_virt_alloc(
PAGE_ALIGN(io_tlb_nslabs * sizeof(int)),
PAGE_SIZE);
io_tlb_orig_addr = memblock_virt_alloc(
PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)),
PAGE_SIZE);
for (i = 0; i < io_tlb_nslabs; i++) {
io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
}
io_tlb_index = 0;
if (verbose)
swiotlb_print_info();
swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 52 | 30.41% | 1 | 6.67% |
Jan Beulich | 35 | 20.47% | 2 | 13.33% |
Alexander Duyck | 29 | 16.96% | 2 | 13.33% |
FUJITA Tomonori | 21 | 12.28% | 2 | 13.33% |
Yinghai Lu | 12 | 7.02% | 3 | 20.00% |
David Mosberger-Tang | 9 | 5.26% | 2 | 13.33% |
Konrad Rzeszutek Wilk | 7 | 4.09% | 1 | 6.67% |
Santosh Shilimkar | 5 | 2.92% | 1 | 6.67% |
Ian Campbell | 1 | 0.58% | 1 | 6.67% |
Total | 171 | 100.00% | 15 | 100.00% |
/*
* Statically reserve bounce buffer space and initialize bounce buffer data
* structures for the software IO TLB used to implement the DMA API.
*/
void __init
swiotlb_init(int verbose)
{
size_t default_size = IO_TLB_DEFAULT_SIZE;
unsigned char *vstart;
unsigned long bytes;
if (!io_tlb_nslabs) {
io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
}
bytes = io_tlb_nslabs << IO_TLB_SHIFT;
/* Get IO TLB memory from the low pages */
vstart = memblock_virt_alloc_low_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE);
if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
return;
if (io_tlb_start)
memblock_free_early(io_tlb_start,
PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
pr_warn("Cannot allocate SWIOTLB buffer");
no_iotlb_memory = true;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
FUJITA Tomonori | 56 | 52.83% | 2 | 22.22% |
Yinghai Lu | 35 | 33.02% | 4 | 44.44% |
Alexander Duyck | 8 | 7.55% | 1 | 11.11% |
David Mosberger-Tang | 4 | 3.77% | 1 | 11.11% |
Santosh Shilimkar | 3 | 2.83% | 1 | 11.11% |
Total | 106 | 100.00% | 9 | 100.00% |
/*
* Systems with larger DMA zones (those that don't support ISA) can
* initialize the swiotlb later using the slab allocator if needed.
* This should be just like above, but with some error catching.
*/
int
swiotlb_late_init_with_default_size(size_t default_size)
{
unsigned long bytes, req_nslabs = io_tlb_nslabs;
unsigned char *vstart = NULL;
unsigned int order;
int rc = 0;
if (!io_tlb_nslabs) {
io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
}
/*
* Get IO TLB memory from the low pages
*/
order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
io_tlb_nslabs = SLABS_PER_PAGE << order;
bytes = io_tlb_nslabs << IO_TLB_SHIFT;
while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
order);
if (vstart)
break;
order--;
}
if (!vstart) {
io_tlb_nslabs = req_nslabs;
return -ENOMEM;
}
if (order != get_order(bytes)) {
printk(KERN_WARNING "Warning: only able to allocate %ld MB "
"for software IO TLB\n", (PAGE_SIZE << order) >> 20);
io_tlb_nslabs = SLABS_PER_PAGE << order;
}
rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs);
if (rc)
free_pages((unsigned long)vstart, order);
return rc;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Williamson | 114 | 60.96% | 1 | 16.67% |
Konrad Rzeszutek Wilk | 40 | 21.39% | 1 | 16.67% |
Alexander Duyck | 12 | 6.42% | 1 | 16.67% |
Jan Beulich | 11 | 5.88% | 1 | 16.67% |
FUJITA Tomonori | 9 | 4.81% | 1 | 16.67% |
Jeremy Fitzhardinge | 1 | 0.53% | 1 | 16.67% |
Total | 187 | 100.00% | 6 | 100.00% |
int
swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
{
unsigned long i, bytes;
unsigned char *v_overflow_buffer;
bytes = nslabs << IO_TLB_SHIFT;
io_tlb_nslabs = nslabs;
io_tlb_start = virt_to_phys(tlb);
io_tlb_end = io_tlb_start + bytes;
swiotlb_set_mem_attributes(tlb, bytes);
memset(tlb, 0, bytes);
/*
* Get the overflow emergency buffer
*/
v_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
get_order(io_tlb_overflow));
if (!v_overflow_buffer)
goto cleanup2;
swiotlb_set_mem_attributes(v_overflow_buffer, io_tlb_overflow);
memset(v_overflow_buffer, 0, io_tlb_overflow);
io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer);
/*
* Allocate and initialize the free list array. This array is used
* to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
* between io_tlb_start and io_tlb_end.
*/
io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
get_order(io_tlb_nslabs * sizeof(int)));
if (!io_tlb_list)
goto cleanup3;
io_tlb_orig_addr = (phys_addr_t *)
__get_free_pages(GFP_KERNEL,
get_order(io_tlb_nslabs *
sizeof(phys_addr_t)));
if (!io_tlb_orig_addr)
goto cleanup4;
for (i = 0; i < io_tlb_nslabs; i++) {
io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
}
io_tlb_index = 0;
swiotlb_print_info();
late_alloc = 1;
swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT);
return 0;
cleanup4:
free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
sizeof(int)));
io_tlb_list = NULL;
cleanup3:
free_pages((unsigned long)v_overflow_buffer,
get_order(io_tlb_overflow));
io_tlb_overflow_buffer = 0;
cleanup2:
io_tlb_end = 0;
io_tlb_start = 0;
io_tlb_nslabs = 0;
max_segment = 0;
return -ENOMEM;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Williamson | 129 | 43.73% | 1 | 7.69% |
Alexander Duyck | 52 | 17.63% | 3 | 23.08% |
Jan Beulich | 42 | 14.24% | 2 | 15.38% |
Konrad Rzeszutek Wilk | 41 | 13.90% | 2 | 15.38% |
Tom Lendacky | 23 | 7.80% | 1 | 7.69% |
FUJITA Tomonori | 5 | 1.69% | 2 | 15.38% |
Becky Bruce | 2 | 0.68% | 1 | 7.69% |
Ian Campbell | 1 | 0.34% | 1 | 7.69% |
Total | 295 | 100.00% | 13 | 100.00% |
void __init swiotlb_exit(void)
{
if (!io_tlb_orig_addr)
return;
if (late_alloc) {
free_pages((unsigned long)phys_to_virt(io_tlb_overflow_buffer),
get_order(io_tlb_overflow));
free_pages((unsigned long)io_tlb_orig_addr,
get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
sizeof(int)));
free_pages((unsigned long)phys_to_virt(io_tlb_start),
get_order(io_tlb_nslabs << IO_TLB_SHIFT));
} else {
memblock_free_late(io_tlb_overflow_buffer,
PAGE_ALIGN(io_tlb_overflow));
memblock_free_late(__pa(io_tlb_orig_addr),
PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
memblock_free_late(__pa(io_tlb_list),
PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
memblock_free_late(io_tlb_start,
PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
}
io_tlb_nslabs = 0;
max_segment = 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
FUJITA Tomonori | 131 | 80.37% | 1 | 12.50% |
Yinghai Lu | 12 | 7.36% | 1 | 12.50% |
Konrad Rzeszutek Wilk | 8 | 4.91% | 2 | 25.00% |
Alexander Duyck | 7 | 4.29% | 2 | 25.00% |
Santosh Shilimkar | 4 | 2.45% | 1 | 12.50% |
Christoph Hellwig | 1 | 0.61% | 1 | 12.50% |
Total | 163 | 100.00% | 8 | 100.00% |
int is_swiotlb_buffer(phys_addr_t paddr)
{
return paddr >= io_tlb_start && paddr < io_tlb_end;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
FUJITA Tomonori | 17 | 100.00% | 2 | 100.00% |
Total | 17 | 100.00% | 2 | 100.00% |
/*
* Bounce: copy the swiotlb buffer back to the original dma location
*/
static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
size_t size, enum dma_data_direction dir)
{
unsigned long pfn = PFN_DOWN(orig_addr);
unsigned char *vaddr = phys_to_virt(tlb_addr);
if (PageHighMem(pfn_to_page(pfn))) {
/* The buffer does not have a mapping. Map it in and copy */
unsigned int offset = orig_addr & ~PAGE_MASK;
char *buffer;
unsigned int sz = 0;
unsigned long flags;
while (size) {
sz = min_t(size_t, PAGE_SIZE - offset, size);
local_irq_save(flags);
buffer = kmap_atomic(pfn_to_page(pfn));
if (dir == DMA_TO_DEVICE)
memcpy(vaddr, buffer + offset, sz);
else
memcpy(buffer + offset, vaddr, sz);
kunmap_atomic(buffer);
local_irq_restore(flags);
size -= sz;
pfn++;
vaddr += sz;
offset = 0;
}
} else if (dir == DMA_TO_DEVICE) {
memcpy(vaddr, phys_to_virt(orig_addr), size);
} else {
memcpy(phys_to_virt(orig_addr), vaddr, size);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeremy Fitzhardinge | 102 | 51.52% | 2 | 40.00% |
Becky Bruce | 69 | 34.85% | 2 | 40.00% |
Alexander Duyck | 27 | 13.64% | 1 | 20.00% |
Total | 198 | 100.00% | 5 | 100.00% |
phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
dma_addr_t tbl_dma_addr,
phys_addr_t orig_addr, size_t size,
enum dma_data_direction dir,
unsigned long attrs)
{
unsigned long flags;
phys_addr_t tlb_addr;
unsigned int nslots, stride, index, wrap;
int i;
unsigned long mask;
unsigned long offset_slots;
unsigned long max_slots;
if (no_iotlb_memory)
panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
if (mem_encrypt_active())
pr_warn_once("%s is active and system is using DMA bounce buffers\n",
sme_active() ? "SME" : "SEV");
mask = dma_get_seg_boundary(hwdev);
tbl_dma_addr &= mask;
offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
/*
* Carefully handle integer overflow which can occur when mask == ~0UL.
*/
max_slots = mask + 1
? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
: 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
/*
* For mappings greater than or equal to a page, we limit the stride
* (and hence alignment) to a page size.
*/
nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
if (size >= PAGE_SIZE)
stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
else
stride = 1;
BUG_ON(!nslots);
/*
* Find suitable number of IO TLB entries size that will fit this
* request and allocate a buffer from that IO TLB pool.
*/
spin_lock_irqsave(&io_tlb_lock, flags);
index = ALIGN(io_tlb_index, stride);
if (index >= io_tlb_nslabs)
index = 0;
wrap = index;
do {
while (iommu_is_span_boundary(index, nslots, offset_slots,
max_slots)) {
index += stride;
if (index >= io_tlb_nslabs)
index = 0;
if (index == wrap)
goto not_found;
}
/*
* If we find a slot that indicates we have 'nslots' number of
* contiguous buffers, we allocate the buffers from that slot
* and mark the entries as '0' indicating unavailable.
*/
if (io_tlb_list[index] >= nslots) {
int count = 0;
for (i = index; i < (int) (index + nslots); i++)
io_tlb_list[i] = 0;
for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
io_tlb_list[i] = ++count;
tlb_addr = io_tlb_start + (index << IO_TLB_SHIFT);
/*
* Update the indices to avoid searching in the next
* round.
*/
io_tlb_index = ((index + nslots) < io_tlb_nslabs
? (index + nslots) : 0);
goto found;
}
index += stride;
if (index >= io_tlb_nslabs)
index = 0;
} while (index != wrap);
not_found:
spin_unlock_irqrestore(&io_tlb_lock, flags);
if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size);
return SWIOTLB_MAP_ERROR;
found:
spin_unlock_irqrestore(&io_tlb_lock, flags);
/*
* Save away the mapping from the original address to the DMA address.
* This is needed when we sync the memory. Then we sync the buffer if
* needed.
*/
for (i = 0; i < nslots; i++)
io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE);
return tlb_addr;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 245 | 49.49% | 1 | 3.57% |
FUJITA Tomonori | 83 | 16.77% | 3 | 10.71% |
Jan Beulich | 30 | 6.06% | 2 | 7.14% |
Alexander Duyck | 23 | 4.65% | 2 | 7.14% |
David Mosberger-Tang | 21 | 4.24% | 2 | 7.14% |
Keir Fraser | 17 | 3.43% | 1 | 3.57% |
Tom Lendacky | 17 | 3.43% | 2 | 7.14% |
Konrad Rzeszutek Wilk | 10 | 2.02% | 2 | 7.14% |
Yinghai Lu | 9 | 1.82% | 1 | 3.57% |
Christian König | 7 | 1.41% | 1 | 3.57% |
Becky Bruce | 6 | 1.21% | 2 | 7.14% |
Andi Kleen | 6 | 1.21% | 1 | 3.57% |
Stefano Stabellini | 6 | 1.21% | 1 | 3.57% |
Jeremy Fitzhardinge | 3 | 0.61% | 1 | 3.57% |
Eric Sesterhenn / Snakebyte | 3 | 0.61% | 1 | 3.57% |
Jesse Barnes | 3 | 0.61% | 1 | 3.57% |
Nikita Yushchenko | 2 | 0.40% | 1 | 3.57% |
Andrew Morton | 2 | 0.40% | 1 | 3.57% |
Ian Campbell | 1 | 0.20% | 1 | 3.57% |
Tony Luck | 1 | 0.20% | 1 | 3.57% |
Total | 495 | 100.00% | 28 | 100.00% |
/*
* Allocates bounce buffer and returns its kernel virtual address.
*/
static phys_addr_t
map_single(struct device *hwdev, phys_addr_t phys, size_t size,
enum dma_data_direction dir, unsigned long attrs)
{
dma_addr_t start_dma_addr;
if (swiotlb_force == SWIOTLB_NO_FORCE) {
dev_warn_ratelimited(hwdev, "Cannot do DMA to address %pa\n",
&phys);
return SWIOTLB_MAP_ERROR;
}
start_dma_addr = swiotlb_phys_to_dma(hwdev, io_tlb_start);
return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size,
dir, attrs);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
FUJITA Tomonori | 38 | 51.35% | 1 | 14.29% |
Geert Uytterhoeven | 25 | 33.78% | 1 | 14.29% |
Alexander Duyck | 7 | 9.46% | 2 | 28.57% |
Konrad Rzeszutek Wilk | 2 | 2.70% | 1 | 14.29% |
Tom Lendacky | 1 | 1.35% | 1 | 14.29% |
Alexandre Courbot | 1 | 1.35% | 1 | 14.29% |
Total | 74 | 100.00% | 7 | 100.00% |
/*
* dma_addr is the kernel virtual address of the bounce buffer to unmap.
*/
void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
unsigned long flags;
int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
phys_addr_t orig_addr = io_tlb_orig_addr[index];
/*
* First, sync the memory before unmapping the entry
*/
if (orig_addr != INVALID_PHYS_ADDR &&
!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE);
/*
* Return the buffer to the free list by setting the corresponding
* entries to indicate the number of contiguous entries available.
* While returning the entries to the free list, we merge the entries
* with slots below and above the pool being returned.
*/
spin_lock_irqsave(&io_tlb_lock, flags);
{
count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
io_tlb_list[index + nslots] : 0);
/*
* Step 1: return the slots to the free list, merging the
* slots with superceeding slots
*/
for (i = index + nslots - 1; i >= index; i--) {
io_tlb_list[i] = ++count;
io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
}
/*
* Step 2: merge the returned slots with the preceding slots,
* if available (non zero)
*/
for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
io_tlb_list[i] = ++count;
}
spin_unlock_irqrestore(&io_tlb_lock, flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 151 | 67.41% | 1 | 7.14% |
David Mosberger-Tang | 23 | 10.27% | 2 | 14.29% |
Alexander Duyck | 18 | 8.04% | 3 | 21.43% |
Jan Beulich | 11 | 4.91% | 1 | 7.14% |
Becky Bruce | 9 | 4.02% | 2 | 14.29% |
Jesse Barnes | 6 | 2.68% | 1 | 7.14% |
Konrad Rzeszutek Wilk | 3 | 1.34% | 2 | 14.29% |
Jeremy Fitzhardinge | 2 | 0.89% | 1 | 7.14% |
André Goddard Rosa | 1 | 0.45% | 1 | 7.14% |
Total | 224 | 100.00% | 14 | 100.00% |
void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
size_t size, enum dma_data_direction dir,
enum dma_sync_target target)
{
int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
phys_addr_t orig_addr = io_tlb_orig_addr[index];
if (orig_addr == INVALID_PHYS_ADDR)
return;
orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1);
switch (target) {
case SYNC_FOR_CPU:
if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
swiotlb_bounce(orig_addr, tlb_addr,
size, DMA_FROM_DEVICE);
else
BUG_ON(dir != DMA_TO_DEVICE);
break;
case SYNC_FOR_DEVICE:
if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
swiotlb_bounce(orig_addr, tlb_addr,
size, DMA_TO_DEVICE);
else
BUG_ON(dir != DMA_FROM_DEVICE);
break;
default:
BUG();
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
John W. Linville | 41 | 27.33% | 1 | 6.67% |
Linus Torvalds | 32 | 21.33% | 1 | 6.67% |
Becky Bruce | 32 | 21.33% | 2 | 13.33% |
Alexander Duyck | 10 | 6.67% | 1 | 6.67% |
Jan Beulich | 8 | 5.33% | 2 | 13.33% |
Eric Sesterhenn / Snakebyte | 8 | 5.33% | 1 | 6.67% |
Jeremy Fitzhardinge | 7 | 4.67% | 2 | 13.33% |
David Mosberger-Tang | 6 | 4.00% | 1 | 6.67% |
Konrad Rzeszutek Wilk | 5 | 3.33% | 3 | 20.00% |
Tony Luck | 1 | 0.67% | 1 | 6.67% |
Total | 150 | 100.00% | 15 | 100.00% |
static inline bool dma_coherent_ok(struct device *dev, dma_addr_t addr,
size_t size)
{
u64 mask = DMA_BIT_MASK(32);
if (dev && dev->coherent_dma_mask)
mask = dev->coherent_dma_mask;
return addr + size - 1 <= mask;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 19 | 38.78% | 1 | 12.50% |
FUJITA Tomonori | 15 | 30.61% | 3 | 37.50% |
Linus Torvalds | 7 | 14.29% | 1 | 12.50% |
Yang Hongyang | 4 | 8.16% | 1 | 12.50% |
David Mosberger-Tang | 2 | 4.08% | 1 | 12.50% |
Jan Beulich | 2 | 4.08% | 1 | 12.50% |
Total | 49 | 100.00% | 8 | 100.00% |
static void *
swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle,
unsigned long attrs)
{
phys_addr_t phys_addr;
if (swiotlb_force == SWIOTLB_NO_FORCE)
goto out_warn;
phys_addr = swiotlb_tbl_map_single(dev,