cregit-Linux how code gets into the kernel

Release 4.14 arch/microblaze/kernel/dma.c

// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (C) 2009-2010 PetaLogix
 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
 *
 * Provide default implementations of the DMA mapping callbacks for
 * directly mapped busses.
 */

#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/gfp.h>
#include <linux/dma-debug.h>
#include <linux/export.h>
#include <linux/bug.h>


#define NOT_COHERENT_CACHE


static void *dma_direct_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) { #ifdef NOT_COHERENT_CACHE return consistent_alloc(flag, size, dma_handle); #else void *ret; struct page *page; int node = dev_to_node(dev); /* ignore region specifiers */ flag &= ~(__GFP_HIGHMEM); page = alloc_pages_node(node, flag, get_order(size)); if (page == NULL) return NULL; ret = page_address(page); memset(ret, 0, size); *dma_handle = virt_to_phys(ret); return ret; #endif }

Contributors

PersonTokensPropCommitsCommitProp
Michal Simek11496.61%250.00%
Andrzej Pietrasiewicz21.69%125.00%
Krzysztof Kozlowski21.69%125.00%
Total118100.00%4100.00%


static void dma_direct_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { #ifdef NOT_COHERENT_CACHE consistent_free(size, vaddr); #else free_pages((unsigned long)vaddr, get_order(size)); #endif }

Contributors

PersonTokensPropCommitsCommitProp
Michal Simek4992.45%360.00%
Krzysztof Kozlowski23.77%120.00%
Andrzej Pietrasiewicz23.77%120.00%
Total53100.00%5100.00%


static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction direction, unsigned long attrs) { struct scatterlist *sg; int i; /* FIXME this part of code is untested */ for_each_sg(sgl, sg, nents, i) { sg->dma_address = sg_phys(sg); if (attrs & DMA_ATTR_SKIP_CPU_SYNC) continue; __dma_sync(sg_phys(sg), sg->length, direction); } return nents; }

Contributors

PersonTokensPropCommitsCommitProp
Michal Simek7086.42%233.33%
Alexander Duyck78.64%116.67%
Krzysztof Kozlowski22.47%116.67%
Geliang Tang11.23%116.67%
Eli Billauer11.23%116.67%
Total81100.00%6100.00%


static int dma_direct_dma_supported(struct device *dev, u64 mask) { return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Michal Simek17100.00%1100.00%
Total17100.00%1100.00%


static inline dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, unsigned long attrs) { if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) __dma_sync(page_to_phys(page) + offset, size, direction); return page_to_phys(page) + offset; }

Contributors

PersonTokensPropCommitsCommitProp
Michal Simek5079.37%350.00%
Alexander Duyck914.29%116.67%
Eli Billauer23.17%116.67%
Krzysztof Kozlowski23.17%116.67%
Total63100.00%6100.00%


static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction, unsigned long attrs) { /* There is not necessary to do cache cleanup * * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and * dma_address is physical address */ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) __dma_sync(dma_address, size, direction); }

Contributors

PersonTokensPropCommitsCommitProp
Michal Simek3373.33%350.00%
Alexander Duyck920.00%116.67%
Krzysztof Kozlowski24.44%116.67%
Eli Billauer12.22%116.67%
Total45100.00%6100.00%


static inline void dma_direct_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { /* * It's pointless to flush the cache as the memory segment * is given to the CPU */ if (direction == DMA_FROM_DEVICE) __dma_sync(dma_handle, size, direction); }

Contributors

PersonTokensPropCommitsCommitProp
Eli Billauer38100.00%1100.00%
Total38100.00%1100.00%


static inline void dma_direct_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { /* * It's pointless to invalidate the cache if the device isn't * supposed to write to the relevant region */ if (direction == DMA_TO_DEVICE) __dma_sync(dma_handle, size, direction); }

Contributors

PersonTokensPropCommitsCommitProp
Eli Billauer38100.00%1100.00%
Total38100.00%1100.00%


static inline void dma_direct_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction direction) { struct scatterlist *sg; int i; /* FIXME this part of code is untested */ if (direction == DMA_FROM_DEVICE) for_each_sg(sgl, sg, nents, i) __dma_sync(sg->dma_address, sg->length, direction); }

Contributors

PersonTokensPropCommitsCommitProp
Eli Billauer62100.00%1100.00%
Total62100.00%1100.00%


static inline void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction direction) { struct scatterlist *sg; int i; /* FIXME this part of code is untested */ if (direction == DMA_TO_DEVICE) for_each_sg(sgl, sg, nents, i) __dma_sync(sg->dma_address, sg->length, direction); }

Contributors

PersonTokensPropCommitsCommitProp
Eli Billauer62100.00%1100.00%
Total62100.00%1100.00%


static int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t handle, size_t size, unsigned long attrs) { #ifdef CONFIG_MMU unsigned long user_count = vma_pages(vma); unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned long off = vma->vm_pgoff; unsigned long pfn; if (off >= count || user_count > (count - off)) return -ENXIO; #ifdef NOT_COHERENT_CACHE vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); pfn = consistent_virt_to_pfn(cpu_addr); #else pfn = virt_to_pfn(cpu_addr); #endif return remap_pfn_range(vma, vma->vm_start, pfn + off, vma->vm_end - vma->vm_start, vma->vm_page_prot); #else return -ENXIO; #endif }

Contributors

PersonTokensPropCommitsCommitProp
Lars-Peter Clausen14395.97%125.00%
Thomas Meyer32.01%125.00%
Krzysztof Kozlowski21.34%125.00%
Michal Simek10.67%125.00%
Total149100.00%4100.00%

const struct dma_map_ops dma_direct_ops = { .alloc = dma_direct_alloc_coherent, .free = dma_direct_free_coherent, .mmap = dma_direct_mmap_coherent, .map_sg = dma_direct_map_sg, .dma_supported = dma_direct_dma_supported, .map_page = dma_direct_map_page, .unmap_page = dma_direct_unmap_page, .sync_single_for_cpu = dma_direct_sync_single_for_cpu, .sync_single_for_device = dma_direct_sync_single_for_device, .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu, .sync_sg_for_device = dma_direct_sync_sg_for_device, }; EXPORT_SYMBOL(dma_direct_ops); /* Number of entries preallocated for DMA-API debugging */ #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
static int __init dma_init(void) { dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Michal Simek17100.00%1100.00%
Total17100.00%1100.00%

fs_initcall(dma_init);

Overall Contributors

PersonTokensPropCommitsCommitProp
Michal Simek41649.35%736.84%
Eli Billauer22426.57%210.53%
Lars-Peter Clausen14817.56%15.26%
Alexander Duyck252.97%15.26%
Krzysztof Kozlowski121.42%15.26%
Andrzej Pietrasiewicz60.71%15.26%
Paul Gortmaker30.36%15.26%
Tejun Heo30.36%15.26%
Thomas Meyer30.36%15.26%
Bart Van Assche10.12%15.26%
Geliang Tang10.12%15.26%
Greg Kroah-Hartman10.12%15.26%
Total843100.00%19100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.