cregit-Linux how code gets into the kernel

Release 4.17 lib/swiotlb.c

Directory: lib
/*
 * Dynamic DMA mapping support.
 *
 * This implementation is a fallback for platforms that do not support
 * I/O TLBs (aka DMA address translation hardware).
 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
 * Copyright (C) 2000, 2003 Hewlett-Packard Co
 *      David Mosberger-Tang <davidm@hpl.hp.com>
 *
 * 03/05/07 davidm      Switch from PCI-DMA to generic device DMA API.
 * 00/12/13 davidm      Rename to swiotlb.c and add mark_clean() to avoid
 *                      unnecessary i-cache flushing.
 * 04/07/.. ak          Better overflow handling. Assorted fixes.
 * 05/09/10 linville    Add support for syncing ranges, support syncing for
 *                      DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
 * 08/12/11 beckyb      Add highmem support
 */

#include <linux/cache.h>
#include <linux/dma-direct.h>
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/swiotlb.h>
#include <linux/pfn.h>
#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <linux/scatterlist.h>
#include <linux/mem_encrypt.h>
#include <linux/set_memory.h>

#include <asm/io.h>
#include <asm/dma.h>

#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/iommu-helper.h>


#define CREATE_TRACE_POINTS
#include <trace/events/swiotlb.h>


#define OFFSET(val,align) ((unsigned long)        \
                           ( (val) & ( (align) - 1)))


#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))

/*
 * Minimum IO TLB size to bother booting with.  Systems with mainly
 * 64bit capable cards will only lightly use the swiotlb.  If we can't
 * allocate a contiguous 1MB, we're probably in trouble anyway.
 */

#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)


enum swiotlb_force swiotlb_force;

/*
 * Used to do a quick range check in swiotlb_tbl_unmap_single and
 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
 * API.
 */


static phys_addr_t io_tlb_start, io_tlb_end;

/*
 * The number of IO TLB blocks (in groups of 64) between io_tlb_start and
 * io_tlb_end.  This is command line adjustable via setup_io_tlb_npages.
 */

static unsigned long io_tlb_nslabs;

/*
 * When the IOMMU overflows we return a fallback buffer. This sets the size.
 */

static unsigned long io_tlb_overflow = 32*1024;


static phys_addr_t io_tlb_overflow_buffer;

/*
 * This is a free list describing the number of free entries available from
 * each index
 */

static unsigned int *io_tlb_list;

static unsigned int io_tlb_index;

/*
 * Max segment that we can provide which (if pages are contingous) will
 * not be bounced (unless SWIOTLB_FORCE is set).
 */

unsigned int max_segment;

/*
 * We need to save away the original address corresponding to a mapped entry
 * for the sync operations.
 */

#define INVALID_PHYS_ADDR (~(phys_addr_t)0)

static phys_addr_t *io_tlb_orig_addr;

/*
 * Protect the above data structures in the map and unmap calls
 */
static DEFINE_SPINLOCK(io_tlb_lock);


static int late_alloc;


static int __init setup_io_tlb_npages(char *str) { if (isdigit(*str)) { io_tlb_nslabs = simple_strtoul(str, &str, 0); /* avoid tail segment of size < IO_TLB_SEGSIZE */ io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); } if (*str == ',') ++str; if (!strcmp(str, "force")) { swiotlb_force = SWIOTLB_FORCE; } else if (!strcmp(str, "noforce")) { swiotlb_force = SWIOTLB_NO_FORCE; io_tlb_nslabs = 1; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Andi Kleen3537.63%116.67%
Geert Uytterhoeven2425.81%233.33%
Linus Torvalds2324.73%116.67%
David Mosberger-Tang1010.75%116.67%
Yinghai Lu11.08%116.67%
Total93100.00%6100.00%

early_param("swiotlb", setup_io_tlb_npages); /* make io_tlb_overflow tunable too? */
unsigned long swiotlb_nr_tbl(void) { return io_tlb_nslabs; }

Contributors

PersonTokensPropCommitsCommitProp
FUJITA Tomonori1090.91%150.00%
Konrad Rzeszutek Wilk19.09%150.00%
Total11100.00%2100.00%

EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
unsigned int swiotlb_max_segment(void) { return max_segment; }

Contributors

PersonTokensPropCommitsCommitProp
Konrad Rzeszutek Wilk11100.00%1100.00%
Total11100.00%1100.00%

EXPORT_SYMBOL_GPL(swiotlb_max_segment);
void swiotlb_set_max_segment(unsigned int val) { if (swiotlb_force == SWIOTLB_FORCE) max_segment = 1; else max_segment = rounddown(val, PAGE_SIZE); }

Contributors

PersonTokensPropCommitsCommitProp
Konrad Rzeszutek Wilk29100.00%1100.00%
Total29100.00%1100.00%

/* default to 64MB */ #define IO_TLB_DEFAULT_SIZE (64UL<<20)
unsigned long swiotlb_size_or_default(void) { unsigned long size; size = io_tlb_nslabs << IO_TLB_SHIFT; return size ? size : (IO_TLB_DEFAULT_SIZE); }

Contributors

PersonTokensPropCommitsCommitProp
Yinghai Lu27100.00%1100.00%
Total27100.00%1100.00%

static bool no_iotlb_memory;
void swiotlb_print_info(void) { unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT; unsigned char *vstart, *vend; if (no_iotlb_memory) { pr_warn("software IO TLB: No low mem\n"); return; } vstart = phys_to_virt(io_tlb_start); vend = phys_to_virt(io_tlb_end); printk(KERN_INFO "software IO TLB [mem %#010llx-%#010llx] (%luMB) mapped at [%p-%p]\n", (unsigned long long)io_tlb_start, (unsigned long long)io_tlb_end, bytes >> 20, vstart, vend - 1); }

Contributors

PersonTokensPropCommitsCommitProp
Ian Campbell3544.30%116.67%
Alexander Duyck1518.99%233.33%
Yinghai Lu1215.19%116.67%
Björn Helgaas911.39%116.67%
FUJITA Tomonori810.13%116.67%
Total79100.00%6100.00%

/* * Early SWIOTLB allocation may be too early to allow an architecture to * perform the desired operations. This function allows the architecture to * call SWIOTLB when the operations are possible. It needs to be called * before the SWIOTLB memory is used. */
void __init swiotlb_update_mem_attributes(void) { void *vaddr; unsigned long bytes; if (no_iotlb_memory || late_alloc) return; vaddr = phys_to_virt(io_tlb_start); bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT); set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT); memset(vaddr, 0, bytes); vaddr = phys_to_virt(io_tlb_overflow_buffer); bytes = PAGE_ALIGN(io_tlb_overflow); set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT); memset(vaddr, 0, bytes); }

Contributors

PersonTokensPropCommitsCommitProp
Tom Lendacky8385.57%150.00%
Christoph Hellwig1414.43%150.00%
Total97100.00%2100.00%


int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) { void *v_overflow_buffer; unsigned long i, bytes; bytes = nslabs << IO_TLB_SHIFT; io_tlb_nslabs = nslabs; io_tlb_start = __pa(tlb); io_tlb_end = io_tlb_start + bytes; /* * Get the overflow emergency buffer */ v_overflow_buffer = memblock_virt_alloc_low_nopanic( PAGE_ALIGN(io_tlb_overflow), PAGE_SIZE); if (!v_overflow_buffer) return -ENOMEM; io_tlb_overflow_buffer = __pa(v_overflow_buffer); /* * Allocate and initialize the free list array. This array is used * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE * between io_tlb_start and io_tlb_end. */ io_tlb_list = memblock_virt_alloc( PAGE_ALIGN(io_tlb_nslabs * sizeof(int)), PAGE_SIZE); io_tlb_orig_addr = memblock_virt_alloc( PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)), PAGE_SIZE); for (i = 0; i < io_tlb_nslabs; i++) { io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; } io_tlb_index = 0; if (verbose) swiotlb_print_info(); swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds5230.41%16.67%
Jan Beulich3520.47%213.33%
Alexander Duyck2916.96%213.33%
FUJITA Tomonori2112.28%213.33%
Yinghai Lu127.02%320.00%
David Mosberger-Tang95.26%213.33%
Konrad Rzeszutek Wilk74.09%16.67%
Santosh Shilimkar52.92%16.67%
Ian Campbell10.58%16.67%
Total171100.00%15100.00%

/* * Statically reserve bounce buffer space and initialize bounce buffer data * structures for the software IO TLB used to implement the DMA API. */
void __init swiotlb_init(int verbose) { size_t default_size = IO_TLB_DEFAULT_SIZE; unsigned char *vstart; unsigned long bytes; if (!io_tlb_nslabs) { io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); } bytes = io_tlb_nslabs << IO_TLB_SHIFT; /* Get IO TLB memory from the low pages */ vstart = memblock_virt_alloc_low_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE); if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose)) return; if (io_tlb_start) memblock_free_early(io_tlb_start, PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); pr_warn("Cannot allocate SWIOTLB buffer"); no_iotlb_memory = true; }

Contributors

PersonTokensPropCommitsCommitProp
FUJITA Tomonori5652.83%222.22%
Yinghai Lu3533.02%444.44%
Alexander Duyck87.55%111.11%
David Mosberger-Tang43.77%111.11%
Santosh Shilimkar32.83%111.11%
Total106100.00%9100.00%

/* * Systems with larger DMA zones (those that don't support ISA) can * initialize the swiotlb later using the slab allocator if needed. * This should be just like above, but with some error catching. */
int swiotlb_late_init_with_default_size(size_t default_size) { unsigned long bytes, req_nslabs = io_tlb_nslabs; unsigned char *vstart = NULL; unsigned int order; int rc = 0; if (!io_tlb_nslabs) { io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); } /* * Get IO TLB memory from the low pages */ order = get_order(io_tlb_nslabs << IO_TLB_SHIFT); io_tlb_nslabs = SLABS_PER_PAGE << order; bytes = io_tlb_nslabs << IO_TLB_SHIFT; while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order); if (vstart) break; order--; } if (!vstart) { io_tlb_nslabs = req_nslabs; return -ENOMEM; } if (order != get_order(bytes)) { printk(KERN_WARNING "Warning: only able to allocate %ld MB " "for software IO TLB\n", (PAGE_SIZE << order) >> 20); io_tlb_nslabs = SLABS_PER_PAGE << order; } rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs); if (rc) free_pages((unsigned long)vstart, order); return rc; }

Contributors

PersonTokensPropCommitsCommitProp
Alex Williamson11460.96%116.67%
Konrad Rzeszutek Wilk4021.39%116.67%
Alexander Duyck126.42%116.67%
Jan Beulich115.88%116.67%
FUJITA Tomonori94.81%116.67%
Jeremy Fitzhardinge10.53%116.67%
Total187100.00%6100.00%


int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) { unsigned long i, bytes; unsigned char *v_overflow_buffer; bytes = nslabs << IO_TLB_SHIFT; io_tlb_nslabs = nslabs; io_tlb_start = virt_to_phys(tlb); io_tlb_end = io_tlb_start + bytes; set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT); memset(tlb, 0, bytes); /* * Get the overflow emergency buffer */ v_overflow_buffer = (void *)__get_free_pages(GFP_DMA, get_order(io_tlb_overflow)); if (!v_overflow_buffer) goto cleanup2; set_memory_decrypted((unsigned long)v_overflow_buffer, io_tlb_overflow >> PAGE_SHIFT); memset(v_overflow_buffer, 0, io_tlb_overflow); io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer); /* * Allocate and initialize the free list array. This array is used * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE * between io_tlb_start and io_tlb_end. */ io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL, get_order(io_tlb_nslabs * sizeof(int))); if (!io_tlb_list) goto cleanup3; io_tlb_orig_addr = (phys_addr_t *) __get_free_pages(GFP_KERNEL, get_order(io_tlb_nslabs * sizeof(phys_addr_t))); if (!io_tlb_orig_addr) goto cleanup4; for (i = 0; i < io_tlb_nslabs; i++) { io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; } io_tlb_index = 0; swiotlb_print_info(); late_alloc = 1; swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT); return 0; cleanup4: free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * sizeof(int))); io_tlb_list = NULL; cleanup3: free_pages((unsigned long)v_overflow_buffer, get_order(io_tlb_overflow)); io_tlb_overflow_buffer = 0; cleanup2: io_tlb_end = 0; io_tlb_start = 0; io_tlb_nslabs = 0; max_segment = 0; return -ENOMEM; }

Contributors

PersonTokensPropCommitsCommitProp
Alex Williamson12942.02%17.14%
Alexander Duyck5216.94%321.43%
Jan Beulich4213.68%214.29%
Konrad Rzeszutek Wilk4113.36%214.29%
Tom Lendacky216.84%17.14%
Christoph Hellwig144.56%17.14%
FUJITA Tomonori51.63%214.29%
Becky Bruce20.65%17.14%
Ian Campbell10.33%17.14%
Total307100.00%14100.00%


void __init swiotlb_exit(void) { if (!io_tlb_orig_addr) return; if (late_alloc) { free_pages((unsigned long)phys_to_virt(io_tlb_overflow_buffer), get_order(io_tlb_overflow)); free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs * sizeof(phys_addr_t))); free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * sizeof(int))); free_pages((unsigned long)phys_to_virt(io_tlb_start), get_order(io_tlb_nslabs << IO_TLB_SHIFT)); } else { memblock_free_late(io_tlb_overflow_buffer, PAGE_ALIGN(io_tlb_overflow)); memblock_free_late(__pa(io_tlb_orig_addr), PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); memblock_free_late(__pa(io_tlb_list), PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); memblock_free_late(io_tlb_start, PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); } io_tlb_nslabs = 0; max_segment = 0; }

Contributors

PersonTokensPropCommitsCommitProp
FUJITA Tomonori13180.37%112.50%
Yinghai Lu127.36%112.50%
Konrad Rzeszutek Wilk84.91%225.00%
Alexander Duyck74.29%225.00%
Santosh Shilimkar42.45%112.50%
Christoph Hellwig10.61%112.50%
Total163100.00%8100.00%


int is_swiotlb_buffer(phys_addr_t paddr) { return paddr >= io_tlb_start && paddr < io_tlb_end; }

Contributors

PersonTokensPropCommitsCommitProp
FUJITA Tomonori17100.00%2100.00%
Total17100.00%2100.00%

/* * Bounce: copy the swiotlb buffer back to the original dma location */
static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr, size_t size, enum dma_data_direction dir) { unsigned long pfn = PFN_DOWN(orig_addr); unsigned char *vaddr = phys_to_virt(tlb_addr); if (PageHighMem(pfn_to_page(pfn))) { /* The buffer does not have a mapping. Map it in and copy */ unsigned int offset = orig_addr & ~PAGE_MASK; char *buffer; unsigned int sz = 0; unsigned long flags; while (size) { sz = min_t(size_t, PAGE_SIZE - offset, size); local_irq_save(flags); buffer = kmap_atomic(pfn_to_page(pfn)); if (dir == DMA_TO_DEVICE) memcpy(vaddr, buffer + offset, sz); else memcpy(buffer + offset, vaddr, sz); kunmap_atomic(buffer); local_irq_restore(flags); size -= sz; pfn++; vaddr += sz; offset = 0; } } else if (dir == DMA_TO_DEVICE) { memcpy(vaddr, phys_to_virt(orig_addr), size); } else { memcpy(phys_to_virt(orig_addr), vaddr, size); } }

Contributors

PersonTokensPropCommitsCommitProp
Jeremy Fitzhardinge10251.52%240.00%
Becky Bruce6934.85%240.00%
Alexander Duyck2713.64%120.00%
Total198100.00%5100.00%


phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr, phys_addr_t orig_addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { unsigned long flags; phys_addr_t tlb_addr; unsigned int nslots, stride, index, wrap; int i; unsigned long mask; unsigned long offset_slots; unsigned long max_slots; if (no_iotlb_memory) panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer"); if (mem_encrypt_active()) pr_warn_once("%s is active and system is using DMA bounce buffers\n", sme_active() ? "SME" : "SEV"); mask = dma_get_seg_boundary(hwdev); tbl_dma_addr &= mask; offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; /* * Carefully handle integer overflow which can occur when mask == ~0UL. */ max_slots = mask + 1 ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT); /* * For mappings greater than or equal to a page, we limit the stride * (and hence alignment) to a page size. */ nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; if (size >= PAGE_SIZE) stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT)); else stride = 1; BUG_ON(!nslots); /* * Find suitable number of IO TLB entries size that will fit this * request and allocate a buffer from that IO TLB pool. */ spin_lock_irqsave(&io_tlb_lock, flags); index = ALIGN(io_tlb_index, stride); if (index >= io_tlb_nslabs) index = 0; wrap = index; do { while (iommu_is_span_boundary(index, nslots, offset_slots, max_slots)) { index += stride; if (index >= io_tlb_nslabs) index = 0; if (index == wrap) goto not_found; } /* * If we find a slot that indicates we have 'nslots' number of * contiguous buffers, we allocate the buffers from that slot * and mark the entries as '0' indicating unavailable. */ if (io_tlb_list[index] >= nslots) { int count = 0; for (i = index; i < (int) (index + nslots); i++) io_tlb_list[i] = 0; for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--) io_tlb_list[i] = ++count; tlb_addr = io_tlb_start + (index << IO_TLB_SHIFT); /* * Update the indices to avoid searching in the next * round. */ io_tlb_index = ((index + nslots) < io_tlb_nslabs ? (index + nslots) : 0); goto found; } index += stride; if (index >= io_tlb_nslabs) index = 0; } while (index != wrap); not_found: spin_unlock_irqrestore(&io_tlb_lock, flags); if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size); return SWIOTLB_MAP_ERROR; found: spin_unlock_irqrestore(&io_tlb_lock, flags); /* * Save away the mapping from the original address to the DMA address. * This is needed when we sync the memory. Then we sync the buffer if * needed. */ for (i = 0; i < nslots; i++) io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT); if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE); return tlb_addr; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds24549.49%13.57%
FUJITA Tomonori8316.77%310.71%
Jan Beulich306.06%27.14%
Alexander Duyck234.65%27.14%
David Mosberger-Tang214.24%27.14%
Keir Fraser173.43%13.57%
Tom Lendacky173.43%27.14%
Konrad Rzeszutek Wilk102.02%27.14%
Yinghai Lu91.82%13.57%
Christian König71.41%13.57%
Stefano Stabellini61.21%13.57%
Becky Bruce61.21%27.14%
Andi Kleen61.21%13.57%
Jesse Barnes30.61%13.57%
Eric Sesterhenn / Snakebyte30.61%13.57%
Jeremy Fitzhardinge30.61%13.57%
Nikita Yushchenko20.40%13.57%
Andrew Morton20.40%13.57%
Tony Luck10.20%13.57%
Ian Campbell10.20%13.57%
Total495100.00%28100.00%

/* * Allocates bounce buffer and returns its kernel virtual address. */
static phys_addr_t map_single(struct device *hwdev, phys_addr_t phys, size_t size, enum dma_data_direction dir, unsigned long attrs) { dma_addr_t start_dma_addr; if (swiotlb_force == SWIOTLB_NO_FORCE) { dev_warn_ratelimited(hwdev, "Cannot do DMA to address %pa\n", &phys); return SWIOTLB_MAP_ERROR; } start_dma_addr = __phys_to_dma(hwdev, io_tlb_start); return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir, attrs); }

Contributors

PersonTokensPropCommitsCommitProp
FUJITA Tomonori3851.35%114.29%
Geert Uytterhoeven2533.78%114.29%
Alexander Duyck79.46%228.57%
Konrad Rzeszutek Wilk22.70%114.29%
Alexandre Courbot11.35%114.29%
Christoph Hellwig11.35%114.29%
Total74100.00%7100.00%

/* * dma_addr is the kernel virtual address of the bounce buffer to unmap. */
void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { unsigned long flags; int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT; phys_addr_t orig_addr = io_tlb_orig_addr[index]; /* * First, sync the memory before unmapping the entry */ if (orig_addr != INVALID_PHYS_ADDR && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE); /* * Return the buffer to the free list by setting the corresponding * entries to indicate the number of contiguous entries available. * While returning the entries to the free list, we merge the entries * with slots below and above the pool being returned. */ spin_lock_irqsave(&io_tlb_lock, flags); { count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ? io_tlb_list[index + nslots] : 0); /* * Step 1: return the slots to the free list, merging the * slots with superceeding slots */ for (i = index + nslots - 1; i >= index; i--) { io_tlb_list[i] = ++count; io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; } /* * Step 2: merge the returned slots with the preceding slots, * if available (non zero) */ for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--) io_tlb_list[i] = ++count; } spin_unlock_irqrestore(&io_tlb_lock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds15167.41%17.14%
David Mosberger-Tang2310.27%214.29%
Alexander Duyck188.04%321.43%
Jan Beulich114.91%17.14%
Becky Bruce94.02%214.29%
Jesse Barnes62.68%17.14%
Konrad Rzeszutek Wilk31.34%214.29%
Jeremy Fitzhardinge20.89%17.14%
André Goddard Rosa10.45%17.14%
Total224100.00%14100.00%


void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr, size_t size, enum dma_data_direction dir, enum dma_sync_target target) { int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT; phys_addr_t orig_addr = io_tlb_orig_addr[index]; if (orig_addr == INVALID_PHYS_ADDR) return; orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1); switch (target) { case SYNC_FOR_CPU: if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE); else BUG_ON(dir != DMA_TO_DEVICE); break; case SYNC_FOR_DEVICE: if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE); else BUG_ON(dir != DMA_FROM_DEVICE); break; default: BUG(); } }

Contributors

PersonTokensPropCommitsCommitProp
John W. Linville4127.33%16.67%
Linus Torvalds3221.33%16.67%
Becky Bruce3221.33%213.33%
Alexander Duyck106.67%16.67%
Eric Sesterhenn / Snakebyte85.33%16.67%
Jan Beulich85.33%213.33%
Jeremy Fitzhardinge74.67%213.33%
David Mosberger-Tang64.00%16.67%
Konrad Rzeszutek Wilk53.33%320.00%
Tony Luck10.67%16.67%
Total150100.00%15100.00%

#ifdef CONFIG_DMA_DIRECT_OPS
static inline bool dma_coherent_ok(struct device *dev, dma_addr_t addr, size_t size) { u64 mask = DMA_BIT_MASK(32); if (dev && dev->coherent_dma_mask) mask = dev->coherent_dma_mask; return addr + size - 1 <= mask; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig1938.78%112.50%
FUJITA Tomonori1530.61%337.50%
Linus Torvalds714.29%112.50%
Yang Hongyang48.16%112.50%
David Mosberger-Tang24.08%112.50%
Jan Beulich24.08%112.50%
Total49100.00%8100.00%


static void * swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle, unsigned long attrs) { phys_addr_t phys_addr; if (swiotlb_force == SWIOTLB_NO_FORCE) goto out_warn; phys_addr = swiotlb_tbl_map_single(dev, __phys_to_dma(dev, io_tlb_start), 0, size, DMA_FROM_DEVICE, attrs); if (phys_addr == SWIOTLB_MAP_ERROR) goto out_warn; *dma_handle = __phys_to_dma(dev, phys_addr); if (!dma_coherent_ok(dev, *dma_handle, size)) goto out_unmap; memset(phys_to_virt(phys_addr), 0, size); return phys_to_virt(phys_addr); out_unmap: dev_warn(dev, "hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", (unsigned long long)(dev ? dev->coherent_dma_mask : 0), (unsigned long long)*dma_handle); /* * DMA_TO_DEVICE to avoid memcpy in unmap_single. * DMA_ATTR_SKIP_CPU_SYNC is optional. */ swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); out_warn: if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) { dev_warn(dev, "swiotlb: coherent allocation failed, size=%zu\n", size); dump_stack(); } return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig12767.91%210.00%
Linus Torvalds136.95%15.00%
Suresh B. Siddha105.35%15.00%
Alexander Duyck63.21%315.00%
Jesse Barnes63.21%15.00%
Jan Beulich63.21%15.00%
FUJITA Tomonori63.21%210.00%
Randy Dunlap31.60%15.00%
Joerg Roedel21.07%15.00%
David Mosberger-Tang21.07%15.00%
Jean Delvare10.53%15.00%
Becky Bruce10.53%15.00%
Jeremy Fitzhardinge10.53%15.00%
Konrad Rzeszutek Wilk10.53%15.00%
Michel Dänzer10.53%15.00%
Takashi Iwai10.53%15.00%
Total187100.00%20100.00%


static bool swiotlb_free_buffer(struct device *dev, size_t size, dma_addr_t dma_addr) { phys_addr_t phys_addr = dma_to_phys(dev, dma_addr); WARN_ON_ONCE(irqs_disabled()); if (!is_swiotlb_buffer(phys_addr)) return false; /* * DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single. * DMA_ATTR_SKIP_CPU_SYNC is optional. */ swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); return true; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig3354.10%112.50%
Linus Torvalds1016.39%112.50%
FUJITA Tomonori914.75%337.50%
David Brownell58.20%112.50%
Suresh B. Siddha34.92%112.50%
David Mosberger-Tang11.64%112.50%
Total61100.00%8100.00%

#endif
static void swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir, int do_panic) { if (swiotlb_force == SWIOTLB_NO_FORCE) return; /* * Ran out of IOMMU space for this operation. This is very bad. * Unfortunately the drivers cannot handle this operation properly. * unless they check for dma_mapping_error (most don't) * When the mapping is small enough return a static buffer to limit * the damage, or panic when the transfer is too big. */ dev_err_ratelimited(dev, "DMA: Out of SW-IOMMU space for %zu bytes\n", size); if (size <= io_tlb_overflow || !do_panic) return; if (dir == DMA_BIDIRECTIONAL) panic("DMA: Random memory could be DMA accessed\n"); if (dir == DMA_FROM_DEVICE) panic("DMA: Random memory could be DMA written\n"); if (dir == DMA_TO_DEVICE) panic("DMA: Random memory could be DMA read\n"); }

Contributors

PersonTokensPropCommitsCommitProp
Andi Kleen5061.73%116.67%
Casey Dahlin1619.75%116.67%
Geert Uytterhoeven1113.58%233.33%
Tony Luck22.47%116.67%
Konrad Rzeszutek Wilk22.47%116.67%
Total81100.00%6100.00%

/* * Map a single buffer of the indicated size for DMA in streaming mode. The * physical address to use is returned. * * Once the device is given the dma address, the device owns this memory until * either swiotlb_unmap_page or swiotlb_dma_sync_single is performed. */
dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, unsigned long attrs) { phys_addr_t map, phys = page_to_phys(page) + offset; dma_addr_t dev_addr = phys_to_dma(dev, phys); BUG_ON(dir == DMA_NONE); /* * If the address happens to be in the device's DMA window, * we can safely return the device addr and not worry about bounce * buffering it. */ if (dma_capable(dev, dev_addr, size) && swiotlb_force != SWIOTLB_FORCE) return dev_addr; trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); /* Oh well, have to allocate and map a bounce buffer. */ map = map_single(dev, phys, size, dir, attrs); if (map == SWIOTLB_MAP_ERROR) { swiotlb_full(dev, size, dir, 1); return __phys_to_dma(dev, io_tlb_overflow_buffer); } dev_addr = __phys_to_dma(dev, map); /* Ensure that the address returned is DMA'ble */ if (dma_capable(dev, dev_addr, size)) return dev_addr; attrs |= DMA_ATTR_SKIP_CPU_SYNC; swiotlb_tbl_unmap_single(dev, map, size, dir, attrs); return __phys_to_dma(dev, io_tlb_overflow_buffer); }

Contributors

PersonTokensPropCommitsCommitProp
FUJITA Tomonori5228.73%626.09%
Linus Torvalds3820.99%14.35%
Andi Kleen2916.02%14.35%
Alexander Duyck2513.81%417.39%
Zoltan Kiss116.08%14.35%
David Mosberger-Tang94.97%14.35%
Eric Sesterhenn / Snakebyte31.66%14.35%
Christoph Hellwig31.66%14.35%
Krzysztof Kozlowski21.10%14.35%
Jeremy Fitzhardinge21.10%14.35%
Geert Uytterhoeven21.10%14.35%
Arthur Kepner21.10%14.35%
Jan Beulich10.55%14.35%
Becky Bruce10.55%14.35%
Tony Luck10.55%14.35%
Total181100.00%23100.00%

/* * Unmap a single streaming mode DMA translation. The dma_addr and size must * match what was provided for in a previous swiotlb_map_page call. All * other usages are undefined. * * After this call, reads by the cpu to the buffer are guaranteed to see * whatever the device wrote there. */
static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); BUG_ON(dir == DMA_NONE); if (is_swiotlb_buffer(paddr)) { swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs); return; } if (dir != DMA_FROM_DEVICE) return; /* * phys_to_virt doesn't work with hihgmem page but we could * call dma_mark_clean() with hihgmem page here. However, we * are fine since dma_mark_clean() is null on POWERPC. We can * make dma_mark_clean() take a physical address if necessary. */ dma_mark_clean(phys_to_virt(paddr), size); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds3946.99%18.33%
FUJITA Tomonori1315.66%325.00%
David Mosberger-Tang910.84%18.33%
Becky Bruce910.84%216.67%
Alexander Duyck67.23%18.33%
Konrad Rzeszutek Wilk33.61%216.67%
Eric Sesterhenn / Snakebyte33.61%18.33%
Jan Beulich11.20%18.33%
Total83100.00%12100.00%


void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { unmap_single(hwdev, dev_addr, size, dir, attrs); }

Contributors

PersonTokensPropCommitsCommitProp
Becky Bruce3389.19%133.33%
Krzysztof Kozlowski25.41%133.33%
Alexander Duyck25.41%133.33%
Total37100.00%3100.00%

/* * Make physical memory consistent for a single streaming mode DMA translation * after a transfer. * * If you perform a swiotlb_map_page() but wish to interrogate the buffer * using the cpu, yet do not wish to teardown the dma mapping, you must * call this function before doing so. At the next point you give the dma * address back to the card, you must first perform a * swiotlb_dma_sync_for_device, and then the device again owns the buffer */
static void swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, enum dma_sync_target target) { phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); BUG_ON(dir == DMA_NONE); if (is_swiotlb_buffer(paddr)) { swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target); return; } if (dir != DMA_FROM_DEVICE) return; dma_mark_clean(phys_to_virt(paddr), size); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton4757.32%17.14%
FUJITA Tomonori1214.63%321.43%
Becky Bruce78.54%214.29%
John W. Linville67.32%214.29%
Konrad Rzeszutek Wilk56.10%321.43%
Eric Sesterhenn / Snakebyte33.66%17.14%
Jan Beulich11.22%17.14%
Linus Torvalds11.22%17.14%
Total82100.00%14100.00%


void swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir) { swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds2060.61%116.67%
John W. Linville618.18%233.33%
David Mosberger-Tang412.12%116.67%
FUJITA Tomonori26.06%116.67%
Andrew Morton13.03%116.67%
Total33100.00%6100.00%


void swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir) { swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); }

Contributors

PersonTokensPropCommitsCommitProp
John W. Linville2472.73%240.00%
Linus Torvalds618.18%120.00%
FUJITA Tomonori26.06%120.00%
David Mosberger-Tang13.03%120.00%
Total33100.00%5100.00%

/* * Map a set of buffers described by scatterlist in streaming mode for DMA. * This is the scatter-gather version of the above swiotlb_map_page * interface. Here the scatter gather list elements are each tagged with the * appropriate dma address and length. They are obtained via * sg_dma_{address,length}(SG). * * NOTE: An implementation may be able to use a smaller number of * DMA address/length pairs than there are SG table elements. * (for example via virtual mapping capabilities) * The routine returns the number of addr/length pairs actually * used, at most nents. * * Device ownership issues as mentioned above for swiotlb_map_page are the * same here. */
int swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, unsigned long attrs) { struct scatterlist *sg; int i; BUG_ON(dir == DMA_NONE); for_each_sg(sgl, sg, nelems, i) { phys_addr_t paddr = sg_phys(sg); dma_addr_t dev_addr = phys_to_dma(hwdev, paddr); if (swiotlb_force == SWIOTLB_FORCE || !dma_capable(hwdev, dev_addr, sg->length)) { phys_addr_t map = map_single(hwdev, sg_phys(sg), sg->length, dir, attrs); if (map == SWIOTLB_MAP_ERROR) { /* Don't panic here, we expect map_sg users to do proper error handling. */ swiotlb_full(hwdev, sg->length, dir, 0); attrs |= DMA_ATTR_SKIP_CPU_SYNC; swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir, attrs); sg_dma_len(sgl) = 0; return 0; } sg->dma_address = __phys_to_dma(hwdev, map); } else sg->dma_address = dev_addr; sg_dma_len(sg) = sg->length; } return nelems; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds4423.04%26.67%
Andi Kleen4121.47%26.67%
Jens Axboe178.90%13.33%
Jan Beulich126.28%26.67%
FUJITA Tomonori115.76%620.00%
Becky Bruce115.76%13.33%
Alexander Duyck94.71%310.00%
David Mosberger-Tang84.19%13.33%
Arthur Kepner63.14%13.33%
Eunbong Song 송은봉63.14%13.33%
Jeremy Fitzhardinge52.62%26.67%
Björn Helgaas52.62%13.33%
Ian Campbell42.09%13.33%
Eric Sesterhenn / Snakebyte31.57%13.33%
Tony Luck31.57%13.33%
Krzysztof Kozlowski21.05%13.33%
Geert Uytterhoeven21.05%13.33%
Christoph Hellwig10.52%13.33%
Jesse Barnes10.52%13.33%
Total191100.00%30100.00%

/* * Unmap a set of streaming mode DMA translations. Again, cpu read rules * concerning calls here are the same as for swiotlb_unmap_page() above. */
void swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, unsigned long attrs) { struct scatterlist *sg; int i; BUG_ON(dir == DMA_NONE); for_each_sg(sgl, sg, nelems, i) unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds3043.48%110.00%
Jens Axboe1724.64%110.00%
David Mosberger-Tang57.25%110.00%
Eric Sesterhenn / Snakebyte34.35%110.00%
Eunbong Song 송은봉34.35%110.00%
Arthur Kepner34.35%110.00%
FUJITA Tomonori22.90%110.00%
Krzysztof Kozlowski22.90%110.00%
Björn Helgaas22.90%110.00%
Alexander Duyck22.90%110.00%
Total69100.00%10100.00%

/* * Make physical memory consistent for a set of streaming mode DMA translations * after a transfer. * * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules * and usage. */
static void swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, enum dma_sync_target target) { struct scatterlist *sg; int i; for_each_sg(sgl, sg, nelems, i) swiotlb_sync_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, target); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton2946.03%110.00%
Jens Axboe1726.98%110.00%
John W. Linville69.52%220.00%
Konrad Rzeszutek Wilk46.35%220.00%
Eunbong Song 송은봉34.76%110.00%
Jeremy Fitzhardinge23.17%110.00%
Becky Bruce11.59%110.00%
Linus Torvalds11.59%110.00%
Total63100.00%10100.00%


void swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, int nelems, enum dma_data_direction dir) { swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds1851.43%228.57%
John W. Linville1234.29%228.57%
FUJITA Tomonori25.71%114.29%
David Mosberger-Tang25.71%114.29%
Andrew Morton12.86%114.29%
Total35100.00%7100.00%


void swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, int nelems, enum dma_data_direction dir) { swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); }

Contributors

PersonTokensPropCommitsCommitProp
John W. Linville2365.71%240.00%
Linus Torvalds925.71%120.00%
FUJITA Tomonori25.71%120.00%
David Mosberger-Tang12.86%120.00%
Total35100.00%5100.00%


int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) { return (dma_addr == __phys_to_dma(hwdev, io_tlb_overflow_buffer)); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton1040.00%120.00%
Andi Kleen728.00%120.00%
FUJITA Tomonori520.00%120.00%
Jeremy Fitzhardinge28.00%120.00%
Christoph Hellwig14.00%120.00%
Total25100.00%5100.00%

/* * Return whether the given device DMA address mask can be supported * properly. For example, if your device can only drive the low 24-bits * during bus mastering, then you would pass 0x00ffffff as the mask to * this function. */
int swiotlb_dma_supported(struct device *hwdev, u64 mask) { return __phys_to_dma(hwdev, io_tlb_end - 1) <= mask; }

Contributors

PersonTokensPropCommitsCommitProp
David Mosberger-Tang1664.00%342.86%
Tony Luck520.00%114.29%
Jeremy Fitzhardinge28.00%114.29%
Jan Beulich14.00%114.29%
Christoph Hellwig14.00%114.29%
Total25100.00%7100.00%

#ifdef CONFIG_DMA_DIRECT_OPS
void *swiotlb_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { void *vaddr; /* temporary workaround: */ if (gfp & __GFP_NOWARN) attrs |= DMA_ATTR_NO_WARN; /* * Don't print a warning when the first allocation attempt fails. * swiotlb_alloc_coherent() will print a warning when the DMA memory * allocation ultimately failed. */ gfp |= __GFP_NOWARN; vaddr = dma_direct_alloc(dev, size, dma_handle, gfp, attrs); if (!vaddr) vaddr = swiotlb_alloc_buffer(dev, size, dma_handle, attrs); return vaddr; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig81100.00%2100.00%
Total81100.00%2100.00%


void swiotlb_free(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_addr, unsigned long attrs) { if (!swiotlb_free_buffer(dev, size, dma_addr)) dma_direct_free(dev, size, vaddr, dma_addr, attrs); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig49100.00%2100.00%
Total49100.00%2100.00%

const struct dma_map_ops swiotlb_dma_ops = { .mapping_error = swiotlb_dma_mapping_error, .alloc = swiotlb_alloc, .free = swiotlb_free, .sync_single_for_cpu = swiotlb_sync_single_for_cpu, .sync_single_for_device = swiotlb_sync_single_for_device, .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, .sync_sg_for_device = swiotlb_sync_sg_for_device, .map_sg = swiotlb_map_sg_attrs, .unmap_sg = swiotlb_unmap_sg_attrs, .map_page = swiotlb_map_page, .unmap_page = swiotlb_unmap_page, .dma_supported = dma_direct_supported, }; #endif /* CONFIG_DMA_DIRECT_OPS */

Overall Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds79819.41%32.34%
FUJITA Tomonori52112.67%2217.19%
Christoph Hellwig43010.46%118.59%
Alexander Duyck2636.40%97.03%
Alex Williamson2536.15%10.78%
Becky Bruce1924.67%75.47%
Konrad Rzeszutek Wilk1884.57%75.47%
Andi Kleen1844.48%21.56%
Jan Beulich1654.01%64.69%
David Mosberger-Tang1453.53%64.69%
Jeremy Fitzhardinge1353.28%43.12%
Tom Lendacky1253.04%32.34%
Yinghai Lu1192.89%43.12%
John W. Linville1182.87%21.56%
Andrew Morton902.19%32.34%
Geert Uytterhoeven661.61%32.34%
Jens Axboe511.24%10.78%
Ian Campbell421.02%32.34%
Eric Sesterhenn / Snakebyte260.63%10.78%
Jesse Barnes200.49%10.78%
Keir Fraser170.41%10.78%
Casey Dahlin160.39%10.78%
Björn Helgaas160.39%21.56%
Tony Luck150.36%21.56%
Zoltan Kiss140.34%10.78%
Suresh B. Siddha130.32%10.78%
Santosh Shilimkar120.29%10.78%
Eunbong Song 송은봉120.29%10.78%
Arthur Kepner110.27%10.78%
Krzysztof Kozlowski80.19%10.78%
Christian König70.17%10.78%
Stefano Stabellini60.15%10.78%
David Brownell50.12%10.78%
Thomas Gleixner40.10%10.78%
Yang Hongyang40.10%10.78%
Tejun Heo30.07%10.78%
Thierry Reding30.07%10.78%
Randy Dunlap30.07%10.78%
Joerg Roedel20.05%10.78%
Nikita Yushchenko20.05%10.78%
Takashi Iwai10.02%10.78%
Jean Delvare10.02%10.78%
Michel Dänzer10.02%10.78%
Alexandre Courbot10.02%10.78%
Paul Gortmaker10.02%10.78%
Uwe Kleine-König10.02%10.78%
André Goddard Rosa10.02%10.78%
Total4111100.00%128100.00%
Directory: lib
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.