cregit-Linux how code gets into the kernel

Release 4.9 lib/swiotlb.c

Directory: lib
/*
 * Dynamic DMA mapping support.
 *
 * This implementation is a fallback for platforms that do not support
 * I/O TLBs (aka DMA address translation hardware).
 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
 * Copyright (C) 2000, 2003 Hewlett-Packard Co
 *      David Mosberger-Tang <davidm@hpl.hp.com>
 *
 * 03/05/07 davidm      Switch from PCI-DMA to generic device DMA API.
 * 00/12/13 davidm      Rename to swiotlb.c and add mark_clean() to avoid
 *                      unnecessary i-cache flushing.
 * 04/07/.. ak          Better overflow handling. Assorted fixes.
 * 05/09/10 linville    Add support for syncing ranges, support syncing for
 *                      DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
 * 08/12/11 beckyb      Add highmem support
 */

#include <linux/cache.h>
#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/swiotlb.h>
#include <linux/pfn.h>
#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <linux/scatterlist.h>

#include <asm/io.h>
#include <asm/dma.h>

#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/iommu-helper.h>


#define CREATE_TRACE_POINTS
#include <trace/events/swiotlb.h>


#define OFFSET(val,align) ((unsigned long)        \
                           ( (val) & ( (align) - 1)))


#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))

/*
 * Minimum IO TLB size to bother booting with.  Systems with mainly
 * 64bit capable cards will only lightly use the swiotlb.  If we can't
 * allocate a contiguous 1MB, we're probably in trouble anyway.
 */

#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)


int swiotlb_force;

/*
 * Used to do a quick range check in swiotlb_tbl_unmap_single and
 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
 * API.
 */


static phys_addr_t io_tlb_start, io_tlb_end;

/*
 * The number of IO TLB blocks (in groups of 64) between io_tlb_start and
 * io_tlb_end.  This is command line adjustable via setup_io_tlb_npages.
 */

static unsigned long io_tlb_nslabs;

/*
 * When the IOMMU overflows we return a fallback buffer. This sets the size.
 */

static unsigned long io_tlb_overflow = 32*1024;


static phys_addr_t io_tlb_overflow_buffer;

/*
 * This is a free list describing the number of free entries available from
 * each index
 */

static unsigned int *io_tlb_list;

static unsigned int io_tlb_index;

/*
 * We need to save away the original address corresponding to a mapped entry
 * for the sync operations.
 */

#define INVALID_PHYS_ADDR (~(phys_addr_t)0)

static phys_addr_t *io_tlb_orig_addr;

/*
 * Protect the above data structures in the map and unmap calls
 */
static DEFINE_SPINLOCK(io_tlb_lock);


static int late_alloc;


static int __init setup_io_tlb_npages(char *str) { if (isdigit(*str)) { io_tlb_nslabs = simple_strtoul(str, &str, 0); /* avoid tail segment of size < IO_TLB_SEGSIZE */ io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); } if (*str == ',') ++str; if (!strcmp(str, "force")) swiotlb_force = 1; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
andi kleenandi kleen3651.43%125.00%
linus torvaldslinus torvalds2332.86%125.00%
david mosbergerdavid mosberger1014.29%125.00%
yinghai luyinghai lu11.43%125.00%
Total70100.00%4100.00%

early_param("swiotlb", setup_io_tlb_npages); /* make io_tlb_overflow tunable too? */
unsigned long swiotlb_nr_tbl(void) { return io_tlb_nslabs; }

Contributors

PersonTokensPropCommitsCommitProp
fujita tomonorifujita tomonori1090.91%150.00%
konrad rzeszutek wilkkonrad rzeszutek wilk19.09%150.00%
Total11100.00%2100.00%

EXPORT_SYMBOL_GPL(swiotlb_nr_tbl); /* default to 64MB */ #define IO_TLB_DEFAULT_SIZE (64UL<<20)
unsigned long swiotlb_size_or_default(void) { unsigned long size; size = io_tlb_nslabs << IO_TLB_SHIFT; return size ? size : (IO_TLB_DEFAULT_SIZE); }

Contributors

PersonTokensPropCommitsCommitProp
yinghai luyinghai lu27100.00%1100.00%
Total27100.00%1100.00%

/* Note that this doesn't work with highmem page */
static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, volatile void *address) { return phys_to_dma(hwdev, virt_to_phys(address)); }

Contributors

PersonTokensPropCommitsCommitProp
ian campbellian campbell1970.37%133.33%
jeremy fitzhardingejeremy fitzhardinge725.93%133.33%
fujita tomonorifujita tomonori13.70%133.33%
Total27100.00%3100.00%

static bool no_iotlb_memory;
void swiotlb_print_info(void) { unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT; unsigned char *vstart, *vend; if (no_iotlb_memory) { pr_warn("software IO TLB: No low mem\n"); return; } vstart = phys_to_virt(io_tlb_start); vend = phys_to_virt(io_tlb_end); printk(KERN_INFO "software IO TLB [mem %#010llx-%#010llx] (%luMB) mapped at [%p-%p]\n", (unsigned long long)io_tlb_start, (unsigned long long)io_tlb_end, bytes >> 20, vstart, vend - 1); }

Contributors

PersonTokensPropCommitsCommitProp
ian campbellian campbell3544.30%116.67%
alexander duyckalexander duyck1518.99%233.33%
yinghai luyinghai lu1215.19%116.67%
bjorn helgaasbjorn helgaas911.39%116.67%
fujita tomonorifujita tomonori810.13%116.67%
Total79100.00%6100.00%


int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) { void *v_overflow_buffer; unsigned long i, bytes; bytes = nslabs << IO_TLB_SHIFT; io_tlb_nslabs = nslabs; io_tlb_start = __pa(tlb); io_tlb_end = io_tlb_start + bytes; /* * Get the overflow emergency buffer */ v_overflow_buffer = memblock_virt_alloc_low_nopanic( PAGE_ALIGN(io_tlb_overflow), PAGE_SIZE); if (!v_overflow_buffer) return -ENOMEM; io_tlb_overflow_buffer = __pa(v_overflow_buffer); /* * Allocate and initialize the free list array. This array is used * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE * between io_tlb_start and io_tlb_end. */ io_tlb_list = memblock_virt_alloc( PAGE_ALIGN(io_tlb_nslabs * sizeof(int)), PAGE_SIZE); io_tlb_orig_addr = memblock_virt_alloc( PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)), PAGE_SIZE); for (i = 0; i < io_tlb_nslabs; i++) { io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; } io_tlb_index = 0; if (verbose) swiotlb_print_info(); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
linus torvaldslinus torvalds5030.49%16.67%
jan beulichjan beulich3521.34%213.33%
alexander duyckalexander duyck2917.68%213.33%
fujita tomonorifujita tomonori2112.80%213.33%
yinghai luyinghai lu127.32%320.00%
david mosbergerdavid mosberger95.49%213.33%
santosh shilimkarsantosh shilimkar53.05%16.67%
andi kleenandi kleen21.22%16.67%
ian campbellian campbell10.61%16.67%
Total164100.00%15100.00%

/* * Statically reserve bounce buffer space and initialize bounce buffer data * structures for the software IO TLB used to implement the DMA API. */
void __init swiotlb_init(int verbose) { size_t default_size = IO_TLB_DEFAULT_SIZE; unsigned char *vstart; unsigned long bytes; if (!io_tlb_nslabs) { io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); } bytes = io_tlb_nslabs << IO_TLB_SHIFT; /* Get IO TLB memory from the low pages */ vstart = memblock_virt_alloc_low_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE); if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose)) return; if (io_tlb_start) memblock_free_early(io_tlb_start, PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); pr_warn("Cannot allocate SWIOTLB buffer"); no_iotlb_memory = true; }

Contributors

PersonTokensPropCommitsCommitProp
fujita tomonorifujita tomonori5652.83%222.22%
yinghai luyinghai lu3533.02%444.44%
alexander duyckalexander duyck87.55%111.11%
david mosbergerdavid mosberger43.77%111.11%
santosh shilimkarsantosh shilimkar32.83%111.11%
Total106100.00%9100.00%

/* * Systems with larger DMA zones (those that don't support ISA) can * initialize the swiotlb later using the slab allocator if needed. * This should be just like above, but with some error catching. */
int swiotlb_late_init_with_default_size(size_t default_size) { unsigned long bytes, req_nslabs = io_tlb_nslabs; unsigned char *vstart = NULL; unsigned int order; int rc = 0; if (!io_tlb_nslabs) { io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); } /* * Get IO TLB memory from the low pages */ order = get_order(io_tlb_nslabs << IO_TLB_SHIFT); io_tlb_nslabs = SLABS_PER_PAGE << order; bytes = io_tlb_nslabs << IO_TLB_SHIFT; while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order); if (vstart) break; order--; } if (!vstart) { io_tlb_nslabs = req_nslabs; return -ENOMEM; } if (order != get_order(bytes)) { printk(KERN_WARNING "Warning: only able to allocate %ld MB " "for software IO TLB\n", (PAGE_SIZE << order) >> 20); io_tlb_nslabs = SLABS_PER_PAGE << order; } rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs); if (rc) free_pages((unsigned long)vstart, order); return rc; }

Contributors

PersonTokensPropCommitsCommitProp
alex williamsonalex williamson11460.96%116.67%
konrad rzeszutek wilkkonrad rzeszutek wilk4021.39%116.67%
alexander duyckalexander duyck126.42%116.67%
jan beulichjan beulich115.88%116.67%
fujita tomonorifujita tomonori94.81%116.67%
jeremy fitzhardingejeremy fitzhardinge10.53%116.67%
Total187100.00%6100.00%


int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) { unsigned long i, bytes; unsigned char *v_overflow_buffer; bytes = nslabs << IO_TLB_SHIFT; io_tlb_nslabs = nslabs; io_tlb_start = virt_to_phys(tlb); io_tlb_end = io_tlb_start + bytes; memset(tlb, 0, bytes); /* * Get the overflow emergency buffer */ v_overflow_buffer = (void *)__get_free_pages(GFP_DMA, get_order(io_tlb_overflow)); if (!v_overflow_buffer) goto cleanup2; io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer); /* * Allocate and initialize the free list array. This array is used * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE * between io_tlb_start and io_tlb_end. */ io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL, get_order(io_tlb_nslabs * sizeof(int))); if (!io_tlb_list) goto cleanup3; io_tlb_orig_addr = (phys_addr_t *) __get_free_pages(GFP_KERNEL, get_order(io_tlb_nslabs * sizeof(phys_addr_t))); if (!io_tlb_orig_addr) goto cleanup4; for (i = 0; i < io_tlb_nslabs; i++) { io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; } io_tlb_index = 0; swiotlb_print_info(); late_alloc = 1; return 0; cleanup4: free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * sizeof(int))); io_tlb_list = NULL; cleanup3: free_pages((unsigned long)v_overflow_buffer, get_order(io_tlb_overflow)); io_tlb_overflow_buffer = 0; cleanup2: io_tlb_end = 0; io_tlb_start = 0; io_tlb_nslabs = 0; return -ENOMEM; }

Contributors

PersonTokensPropCommitsCommitProp
alex williamsonalex williamson12949.43%19.09%
alexander duyckalexander duyck5219.92%327.27%
jan beulichjan beulich4216.09%218.18%
konrad rzeszutek wilkkonrad rzeszutek wilk3011.49%19.09%
fujita tomonorifujita tomonori51.92%218.18%
becky brucebecky bruce20.77%19.09%
ian campbellian campbell10.38%19.09%
Total261100.00%11100.00%


void __init swiotlb_free(void) { if (!io_tlb_orig_addr) return; if (late_alloc) { free_pages((unsigned long)phys_to_virt(io_tlb_overflow_buffer), get_order(io_tlb_overflow)); free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs * sizeof(phys_addr_t))); free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * sizeof(int))); free_pages((unsigned long)phys_to_virt(io_tlb_start), get_order(io_tlb_nslabs << IO_TLB_SHIFT)); } else { memblock_free_late(io_tlb_overflow_buffer, PAGE_ALIGN(io_tlb_overflow)); memblock_free_late(__pa(io_tlb_orig_addr), PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); memblock_free_late(__pa(io_tlb_list), PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); memblock_free_late(io_tlb_start, PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); } io_tlb_nslabs = 0; }

Contributors

PersonTokensPropCommitsCommitProp
fujita tomonorifujita tomonori13283.02%116.67%
yinghai luyinghai lu127.55%116.67%
alexander duyckalexander duyck74.40%233.33%
konrad rzeszutek wilkkonrad rzeszutek wilk42.52%116.67%
santosh shilimkarsantosh shilimkar42.52%116.67%
Total159100.00%6100.00%


int is_swiotlb_buffer(phys_addr_t paddr) { return paddr >= io_tlb_start && paddr < io_tlb_end; }

Contributors

PersonTokensPropCommitsCommitProp
fujita tomonorifujita tomonori17100.00%2100.00%
Total17100.00%2100.00%

/* * Bounce: copy the swiotlb buffer back to the original dma location */
static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr, size_t size, enum dma_data_direction dir) { unsigned long pfn = PFN_DOWN(orig_addr); unsigned char *vaddr = phys_to_virt(tlb_addr); if (PageHighMem(pfn_to_page(pfn))) { /* The buffer does not have a mapping. Map it in and copy */ unsigned int offset = orig_addr & ~PAGE_MASK; char *buffer; unsigned int sz = 0; unsigned long flags; while (size) { sz = min_t(size_t, PAGE_SIZE - offset, size); local_irq_save(flags); buffer = kmap_atomic(pfn_to_page(pfn)); if (dir == DMA_TO_DEVICE) memcpy(vaddr, buffer + offset, sz); else memcpy(buffer + offset, vaddr, sz); kunmap_atomic(buffer); local_irq_restore(flags); size -= sz; pfn++; vaddr += sz; offset = 0; } } else if (dir == DMA_TO_DEVICE) { memcpy(vaddr, phys_to_virt(orig_addr), size); } else { memcpy(phys_to_virt(orig_addr), vaddr, size); } }

Contributors

PersonTokensPropCommitsCommitProp
jeremy fitzhardingejeremy fitzhardinge10352.02%240.00%
becky brucebecky bruce6834.34%240.00%
alexander duyckalexander duyck2713.64%120.00%
Total198100.00%5100.00%


phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr, phys_addr_t orig_addr, size_t size, enum dma_data_direction dir) { unsigned long flags; phys_addr_t tlb_addr; unsigned int nslots, stride, index, wrap; int i; unsigned long mask; unsigned long offset_slots; unsigned long max_slots; if (no_iotlb_memory) panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer"); mask = dma_get_seg_boundary(hwdev); tbl_dma_addr &= mask; offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; /* * Carefully handle integer overflow which can occur when mask == ~0UL. */ max_slots = mask + 1 ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT); /* * For mappings greater than a page, we limit the stride (and * hence alignment) to a page size. */ nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; if (size > PAGE_SIZE) stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT)); else stride = 1; BUG_ON(!nslots); /* * Find suitable number of IO TLB entries size that will fit this * request and allocate a buffer from that IO TLB pool. */ spin_lock_irqsave(&io_tlb_lock, flags); index = ALIGN(io_tlb_index, stride); if (index >= io_tlb_nslabs) index = 0; wrap = index; do { while (iommu_is_span_boundary(index, nslots, offset_slots, max_slots)) { index += stride; if (index >= io_tlb_nslabs) index = 0; if (index == wrap) goto not_found; } /* * If we find a slot that indicates we have 'nslots' number of * contiguous buffers, we allocate the buffers from that slot * and mark the entries as '0' indicating unavailable. */ if (io_tlb_list[index] >= nslots) { int count = 0; for (i = index; i < (int) (index + nslots); i++) io_tlb_list[i] = 0; for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--) io_tlb_list[i] = ++count; tlb_addr = io_tlb_start + (index << IO_TLB_SHIFT); /* * Update the indices to avoid searching in the next * round. */ io_tlb_index = ((index + nslots) < io_tlb_nslabs ? (index + nslots) : 0); goto found; } index += stride; if (index >= io_tlb_nslabs) index = 0; } while (index != wrap); not_found: spin_unlock_irqrestore(&io_tlb_lock, flags); if (printk_ratelimit()) dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size); return SWIOTLB_MAP_ERROR; found: spin_unlock_irqrestore(&io_tlb_lock, flags); /* * Save away the mapping from the original address to the DMA address. * This is needed when we sync the memory. Then we sync the buffer if * needed. */ for (i = 0; i < nslots; i++) io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT); if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE); return tlb_addr; }

Contributors

PersonTokensPropCommitsCommitProp
linus torvaldslinus torvalds24653.71%14.35%
fujita tomonorifujita tomonori8318.12%313.04%
jan beulichjan beulich306.55%28.70%
david mosbergerdavid mosberger214.59%28.70%
keir fraserkeir fraser173.71%14.35%
konrad rzeszutek wilkkonrad rzeszutek wilk102.18%28.70%
alexander duyckalexander duyck102.18%14.35%
yinghai luyinghai lu91.97%14.35%
andi kleenandi kleen61.31%14.35%
stefano stabellinistefano stabellini61.31%14.35%
becky brucebecky bruce61.31%28.70%
jesse barnesjesse barnes40.87%14.35%
eric sesterhenneric sesterhenn30.66%14.35%
jeremy fitzhardingejeremy fitzhardinge30.66%14.35%
andrew mortonandrew morton20.44%14.35%
ian campbellian campbell10.22%14.35%
tony lucktony luck10.22%14.35%
Total458100.00%23100.00%

EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single); /* * Allocates bounce buffer and returns its kernel virtual address. */
static phys_addr_t map_single(struct device *hwdev, phys_addr_t phys, size_t size, enum dma_data_direction dir) { dma_addr_t start_dma_addr = phys_to_dma(hwdev, io_tlb_start); return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir); }

Contributors

PersonTokensPropCommitsCommitProp
fujita tomonorifujita tomonori4088.89%120.00%
konrad rzeszutek wilkkonrad rzeszutek wilk24.44%120.00%
alexander duyckalexander duyck24.44%240.00%
alexandre courbotalexandre courbot12.22%120.00%
Total45100.00%5100.00%

/* * dma_addr is the kernel virtual address of the bounce buffer to unmap. */
void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, size_t size, enum dma_data_direction dir) { unsigned long flags; int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT; phys_addr_t orig_addr = io_tlb_orig_addr[index]; /* * First, sync the memory before unmapping the entry */ if (orig_addr != INVALID_PHYS_ADDR && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE); /* * Return the buffer to the free list by setting the corresponding * entries to indicate the number of contiguous entries available. * While returning the entries to the free list, we merge the entries * with slots below and above the pool being returned. */ spin_lock_irqsave(&io_tlb_lock, flags); { count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ? io_tlb_list[index + nslots] : 0); /* * Step 1: return the slots to the free list, merging the * slots with superceeding slots */ for (i = index + nslots - 1; i >= index; i--) { io_tlb_list[i] = ++count; io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; } /* * Step 2: merge the returned slots with the preceding slots, * if available (non zero) */ for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--) io_tlb_list[i] = ++count; } spin_unlock_irqrestore(&io_tlb_lock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
linus torvaldslinus torvalds15070.42%17.69%
david mosbergerdavid mosberger2310.80%215.38%
jan beulichjan beulich115.16%17.69%
becky brucebecky bruce94.23%215.38%
alexander duyckalexander duyck83.76%215.38%
jesse barnesjesse barnes62.82%17.69%
konrad rzeszutek wilkkonrad rzeszutek wilk31.41%215.38%
jeremy fitzhardingejeremy fitzhardinge20.94%17.69%
andre goddard rosaandre goddard rosa10.47%17.69%
Total213100.00%13100.00%

EXPORT_SYMBOL_GPL(swiotlb_tbl_unmap_single);
void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr, size_t size, enum dma_data_direction dir, enum dma_sync_target target) { int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT; phys_addr_t orig_addr = io_tlb_orig_addr[index]; if (orig_addr == INVALID_PHYS_ADDR) return; orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1); switch (target) { case SYNC_FOR_CPU: if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE); else BUG_ON(dir != DMA_TO_DEVICE); break; case SYNC_FOR_DEVICE: if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE); else BUG_ON(dir != DMA_FROM_DEVICE); break; default: BUG(); } }

Contributors

PersonTokensPropCommitsCommitProp
john w. linvillejohn w. linville4127.33%16.67%
becky brucebecky bruce3322.00%213.33%
linus torvaldslinus torvalds3120.67%16.67%
alexander duyckalexander duyck117.33%16.67%
jan beulichjan beulich85.33%213.33%
eric sesterhenneric sesterhenn85.33%16.67%
jeremy fitzhardingejeremy fitzhardinge64.00%213.33%
david mosbergerdavid mosberger64.00%16.67%
konrad rzeszutek wilkkonrad rzeszutek wilk53.33%320.00%
tony lucktony luck10.67%16.67%
Total150100.00%15100.00%

EXPORT_SYMBOL_GPL(swiotlb_tbl_sync_single);
void * swiotlb_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags) { dma_addr_t dev_addr; void *ret; int order = get_order(size); u64 dma_mask = DMA_BIT_MASK(32); if (hwdev && hwdev->coherent_dma_mask) dma_mask = hwdev->coherent_dma_mask; ret = (void *)__get_free_pages(flags, order); if (ret) { dev_addr = swiotlb_virt_to_bus(hwdev, ret); if (dev_addr + size - 1 > dma_mask) { /* * The allocated memory isn't reachable by the device. */ free_pages((unsigned long) ret, order); ret = NULL; } } if (!ret) { /* * We are either out of memory or the device can't DMA to * GFP_DMA memory; fall back on map_single(), which * will grab memory from the lowest available address range. */ phys_addr_t paddr = map_single(hwdev, 0, size, DMA_FROM_DEVICE); if (paddr == SWIOTLB_MAP_ERROR) goto err_warn; ret = phys_to_virt(paddr); dev_addr = phys_to_dma(hwdev, paddr); /* Confirm address can be DMA'd by device */ if (dev_addr + size - 1 > dma_mask) { printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", (unsigned long long)dma_mask, (unsigned long long)dev_addr); /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ swiotlb_tbl_unmap_single(hwdev, paddr, size, DMA_TO_DEVICE); goto err_warn; } } *dma_handle = dev_addr; memset(ret, 0, size); return ret; err_warn: pr_warn("swiotlb: coherent allocation failed for device %s size=%zu\n", dev_name(hwdev), size); dump_stack(); return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
linus torvaldslinus torvalds5422.04%14.35%
david mosbergerdavid mosberger4217.14%28.70%
fujita tomonorifujita tomonori3514.29%521.74%
alexander duyckalexander duyck3413.88%28.70%
joerg roedeljoerg roedel2610.61%14.35%
jan beulichjan beulich145.71%313.04%
suresh siddhasuresh siddha114.49%14.35%
jesse barnesjesse barnes114.49%14.35%
randy dunlaprandy dunlap52.04%14.35%
jeremy fitzhardingejeremy fitzhardinge41.63%14.35%
yang hongyangyang hongyang41.63%14.35%
konrad rzeszutek wilkkonrad rzeszutek wilk20.82%14.35%
al viroal viro10.41%14.35%
ian campbellian campbell10.41%14.35%
becky brucebecky bruce10.41%14.35%
Total245100.00%23100.00%

EXPORT_SYMBOL(swiotlb_alloc_coherent);
void swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dev_addr) { phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); WARN_ON(irqs_disabled()); if (!is_swiotlb_buffer(paddr)) free_pages((unsigned long)vaddr, get_order(size)); else /* DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single */ swiotlb_tbl_unmap_single(hwdev, paddr, size, DMA_TO_DEVICE); }

Contributors

PersonTokensPropCommitsCommitProp
linus torvaldslinus torvalds3143.66%111.11%
fujita tomonorifujita tomonori1521.13%333.33%
suresh siddhasuresh siddha1419.72%111.11%
david brownelldavid brownell68.45%111.11%
david mosbergerdavid mosberger22.82%111.11%
konrad rzeszutek wilkkonrad rzeszutek wilk22.82%111.11%
alexander duyckalexander duyck11.41%111.11%
Total71100.00%9100.00%

EXPORT_SYMBOL(swiotlb_free_coherent);
static void swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir, int do_panic) { /* * Ran out of IOMMU space for this operation. This is very bad. * Unfortunately the drivers cannot handle this operation properly. * unless they check for dma_mapping_error (most don't) * When the mapping is small enough return a static buffer to limit * the damage, or panic when the transfer is too big. */ printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at " "device %s\n", size, dev ? dev_name(dev) : "?"); if (size <= io_tlb_overflow || !do_panic) return; if (dir == DMA_BIDIRECTIONAL) panic("DMA: Random memory could be DMA accessed\n"); if (dir == DMA_FROM_DEVICE) panic("DMA: Random memory could be DMA written\n"); if (dir == DMA_TO_DEVICE) panic("DMA: Random memory could be DMA read\n"); }

Contributors

PersonTokensPropCommitsCommitProp
andi kleenandi kleen5869.88%114.29%
casey dahlincasey dahlin1619.28%114.29%
kay sieverskay sievers33.61%114.29%
tony lucktony luck22.41%114.29%
konrad rzeszutek wilkkonrad rzeszutek wilk22.41%114.29%
jan beulichjan beulich11.20%114.29%
jesse barnesjesse barnes11.20%114.29%
Total83100.00%7100.00%

/* * Map a single buffer of the indicated size for DMA in streaming mode. The * physical address to use is returned. * * Once the device is given the dma address, the device owns this memory until * either swiotlb_unmap_page or swiotlb_dma_sync_single is performed. */
dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, unsigned long attrs) { phys_addr_t map, phys = page_to_phys(page) + offset; dma_addr_t dev_addr = phys_to_dma(dev, phys); BUG_ON(dir == DMA_NONE); /* * If the address happens to be in the device's DMA window, * we can safely return the device addr and not worry about bounce * buffering it. */ if (dma_capable(dev, dev_addr, size) && !swiotlb_force) return dev_addr; trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); /* Oh well, have to allocate and map a bounce buffer. */ map = map_sing