cregit-Linux how code gets into the kernel

Release 4.14 arch/blackfin/kernel/dma-mapping.c

/*
 * Dynamic DMA mapping support
 *
 * Copyright 2005-2009 Analog Devices Inc.
 *
 * Licensed under the GPL-2 or later
 */

#include <linux/types.h>
#include <linux/gfp.h>
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include <linux/export.h>
#include <linux/bitmap.h>


static spinlock_t dma_page_lock;

static unsigned long *dma_page;

static unsigned int dma_pages;

static unsigned long dma_base;

static unsigned long dma_size;

static unsigned int dma_initialized;


static void dma_alloc_init(unsigned long start, unsigned long end) { spin_lock_init(&dma_page_lock); dma_initialized = 0; dma_page = (unsigned long *)__get_free_page(GFP_KERNEL); memset(dma_page, 0, PAGE_SIZE); dma_base = PAGE_ALIGN(start); dma_size = PAGE_ALIGN(end) - PAGE_ALIGN(start); dma_pages = dma_size >> PAGE_SHIFT; memset((void *)dma_base, 0, DMA_UNCACHED_REGION); dma_initialized = 1; printk(KERN_INFO "%s: dma_page @ 0x%p - %d pages at 0x%08lx\n", __func__, dma_page, dma_pages, dma_base); }

Contributors

PersonTokensPropCommitsCommitProp
Bryan Wu9897.03%125.00%
Barry Song10.99%125.00%
Michael Hennerich10.99%125.00%
Harvey Harrison10.99%125.00%
Total101100.00%4100.00%


static inline unsigned int get_pages(size_t size) { return ((size - 1) >> PAGE_SHIFT) + 1; }

Contributors

PersonTokensPropCommitsCommitProp
Bryan Wu24100.00%1100.00%
Total24100.00%1100.00%


static unsigned long __alloc_dma_pages(unsigned int pages) { unsigned long ret = 0, flags; unsigned long start; if (dma_initialized == 0) dma_alloc_init(_ramend - DMA_UNCACHED_REGION, _ramend); spin_lock_irqsave(&dma_page_lock, flags); start = bitmap_find_next_zero_area(dma_page, dma_pages, 0, pages, 0); if (start < dma_pages) { ret = dma_base + (start << PAGE_SHIFT); bitmap_set(dma_page, start, pages); } spin_unlock_irqrestore(&dma_page_lock, flags); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Bryan Wu6969.70%133.33%
Akinobu Mita2626.26%133.33%
Michael Hennerich44.04%133.33%
Total99100.00%3100.00%


static void __free_dma_pages(unsigned long addr, unsigned int pages) { unsigned long page = (addr - dma_base) >> PAGE_SHIFT; unsigned long flags; if ((page + pages) > dma_pages) { printk(KERN_ERR "%s: freeing outside range.\n", __func__); BUG(); } spin_lock_irqsave(&dma_page_lock, flags); bitmap_clear(dma_page, page, pages); spin_unlock_irqrestore(&dma_page_lock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Bryan Wu6988.46%125.00%
Akinobu Mita56.41%125.00%
Michael Hennerich33.85%125.00%
Harvey Harrison11.28%125.00%
Total78100.00%4100.00%


static void *bfin_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { void *ret; ret = (void *)__alloc_dma_pages(get_pages(size)); if (ret) { memset(ret, 0, size); *dma_handle = virt_to_phys(ret); } return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Bryan Wu6491.43%133.33%
Christoph Hellwig45.71%133.33%
Krzysztof Kozlowski22.86%133.33%
Total70100.00%3100.00%


static void bfin_dma_free(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { __free_dma_pages((unsigned long)vaddr, get_pages(size)); }

Contributors

PersonTokensPropCommitsCommitProp
Bryan Wu3384.62%133.33%
Christoph Hellwig410.26%133.33%
Krzysztof Kozlowski25.13%133.33%
Total39100.00%3100.00%

/* * Streaming DMA mappings */
void __dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir) { __dma_sync_inline(addr, size, dir); }

Contributors

PersonTokensPropCommitsCommitProp
Bryan Wu1562.50%133.33%
Barry Song833.33%133.33%
Sonic Zhang14.17%133.33%
Total24100.00%3100.00%

EXPORT_SYMBOL(__dma_sync);
static int bfin_dma_map_sg(struct device *dev, struct scatterlist *sg_list, int nents, enum dma_data_direction direction, unsigned long attrs) { struct scatterlist *sg; int i; for_each_sg(sg_list, sg, nents, i) { sg->dma_address = (dma_addr_t) sg_virt(sg); if (attrs & DMA_ATTR_SKIP_CPU_SYNC) continue; __dma_sync(sg_dma_address(sg), sg_dma_len(sg), direction); } return nents; }

Contributors

PersonTokensPropCommitsCommitProp
Bryan Wu4047.62%222.22%
Sonic Zhang2529.76%222.22%
Alexander Duyck78.33%111.11%
Christoph Hellwig44.76%111.11%
Barry Song33.57%111.11%
Jens Axboe33.57%111.11%
Krzysztof Kozlowski22.38%111.11%
Total84100.00%9100.00%


static void bfin_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg_list, int nelems, enum dma_data_direction direction) { struct scatterlist *sg; int i; for_each_sg(sg_list, sg, nelems, i) { sg->dma_address = (dma_addr_t) sg_virt(sg); __dma_sync(sg_dma_address(sg), sg_dma_len(sg), direction); } }

Contributors

PersonTokensPropCommitsCommitProp
Barry Song2941.43%125.00%
Bryan Wu2332.86%125.00%
Sonic Zhang1622.86%125.00%
Christoph Hellwig22.86%125.00%
Total70100.00%4100.00%


static dma_addr_t bfin_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, unsigned long attrs) { dma_addr_t handle = (dma_addr_t)(page_address(page) + offset); if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) _dma_sync(handle, size, dir); return handle; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig5683.58%133.33%
Alexander Duyck913.43%133.33%
Krzysztof Kozlowski22.99%133.33%
Total67100.00%3100.00%


static inline void bfin_dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { _dma_sync(handle, size, dir); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig31100.00%1100.00%
Total31100.00%1100.00%

const struct dma_map_ops bfin_dma_ops = { .alloc = bfin_dma_alloc, .free = bfin_dma_free, .map_page = bfin_dma_map_page, .map_sg = bfin_dma_map_sg, .sync_single_for_device = bfin_dma_sync_single_for_device, .sync_sg_for_device = bfin_dma_sync_sg_for_device, }; EXPORT_SYMBOL(bfin_dma_ops);

Overall Contributors

PersonTokensPropCommitsCommitProp
Bryan Wu48861.77%211.76%
Christoph Hellwig13817.47%15.88%
Barry Song445.57%15.88%
Sonic Zhang425.32%317.65%
Akinobu Mita344.30%15.88%
Alexander Duyck162.03%15.88%
Michael Hennerich91.14%15.88%
Krzysztof Kozlowski81.01%15.88%
Jens Axboe30.38%15.88%
Paul Gortmaker30.38%15.88%
Harvey Harrison20.25%15.88%
Robin Getz10.13%15.88%
Adrian Bunk10.13%15.88%
Bart Van Assche10.13%15.88%
Total790100.00%17100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.