cregit-Linux how code gets into the kernel

Release 4.14 arch/mips/mm/dma-default.c

Directory: arch/mips/mm
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
 * Copyright (C) 2000, 2001, 06  Ralf Baechle <ralf@linux-mips.org>
 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
 */

#include <linux/types.h>
#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <linux/dma-contiguous.h>

#include <asm/cache.h>
#include <asm/cpu-type.h>
#include <asm/io.h>

#include <dma-coherence.h>

#if defined(CONFIG_DMA_MAYBE_COHERENT) && !defined(CONFIG_DMA_PERDEV_COHERENT)
/* User defined DMA coherency from command line. */

enum coherent_io_user_state coherentio = IO_COHERENCE_DEFAULT;

EXPORT_SYMBOL_GPL(coherentio);

int hw_coherentio = 0;	
/* Actual hardware supported DMA coherency setting. */


static int __init setcoherentio(char *str) { coherentio = IO_COHERENCE_ENABLED; pr_info("Hardware DMA cache coherency (command line)\n"); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Steven J. Hill2295.65%150.00%
Paul Burton14.35%150.00%
Total23100.00%2100.00%

early_param("coherentio", setcoherentio);
static int __init setnocoherentio(char *str) { coherentio = IO_COHERENCE_DISABLED; pr_info("Software DMA cache coherency (command line)\n"); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Steven J. Hill2295.65%150.00%
Paul Burton14.35%150.00%
Total23100.00%2100.00%

early_param("nocoherentio", setnocoherentio); #endif
static inline struct page *dma_addr_to_page(struct device *dev, dma_addr_t dma_addr) { return pfn_to_page( plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT); }

Contributors

PersonTokensPropCommitsCommitProp
Franck Bui-Huu1446.67%133.33%
Dezhong Diao930.00%133.33%
Kevin Cernekee723.33%133.33%
Total30100.00%3100.00%

/* * The affected CPUs below in 'cpu_needs_post_dma_flush()' can * speculatively fill random cachelines with stale data at any time, * requiring an extra flush post-DMA. * * Warning on the terminology - Linux calls an uncached area coherent; * MIPS terminology calls memory areas with hardware maintained coherency * coherent. * * Note that the R14000 and R16000 should also be checked for in this * condition. However this function is only called on non-I/O-coherent * systems and only the R10000 and R12000 are used in such systems, the * SGI IP28 Indigo² rsp. SGI IP32 aka O2. */
static inline bool cpu_needs_post_dma_flush(struct device *dev) { if (plat_device_is_coherent(dev)) return false; switch (boot_cpu_type()) { case CPU_R10000: case CPU_R12000: case CPU_BMIPS5000: return true; default: /* * Presence of MAARs suggests that the CPU supports * speculatively prefetching data, and therefore requires * the post-DMA flush/invalidate. */ return cpu_has_maar; } }

Contributors

PersonTokensPropCommitsCommitProp
Paul Burton2554.35%120.00%
Ralf Bächle1839.13%240.00%
Jim Quinlan24.35%120.00%
Jerin Jacob12.17%120.00%
Total46100.00%5100.00%


static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp) { gfp_t dma_flag; /* ignore region specifiers */ gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); #ifdef CONFIG_ISA if (dev == NULL) dma_flag = __GFP_DMA; else #endif #if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA) if (dev == NULL || dev->coherent_dma_mask < DMA_BIT_MASK(32)) dma_flag = __GFP_DMA; else if (dev->coherent_dma_mask < DMA_BIT_MASK(64)) dma_flag = __GFP_DMA32; else #endif #if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA) if (dev == NULL || dev->coherent_dma_mask < DMA_BIT_MASK(64)) dma_flag = __GFP_DMA32; else #endif #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32) if (dev == NULL || dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8)) dma_flag = __GFP_DMA; else #endif dma_flag = 0; /* Don't invoke OOM killer */ gfp |= __GFP_NORETRY; return gfp | dma_flag; }

Contributors

PersonTokensPropCommitsCommitProp
Ralf Bächle13473.63%228.57%
Andrew Morton2915.93%228.57%
Matt Redfearn126.59%114.29%
James Hogan63.30%114.29%
Al Viro10.55%114.29%
Total182100.00%7100.00%


static void *mips_dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { void *ret; struct page *page = NULL; unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; gfp = massage_gfp_flags(dev, gfp); if (IS_ENABLED(CONFIG_DMA_CMA) && gfpflags_allow_blocking(gfp)) page = dma_alloc_from_contiguous(dev, count, get_order(size), gfp); if (!page) page = alloc_pages(gfp, get_order(size)); if (!page) return NULL; ret = page_address(page); memset(ret, 0, size); *dma_handle = plat_map_dma_mem(dev, ret, size); if (!(attrs & DMA_ATTR_NON_CONSISTENT) && !plat_device_is_coherent(dev)) { dma_cache_wback_inv((unsigned long) ret, size); ret = UNCAC_ADDR(ret); } return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Zubair Lutfullah Kakakhel6134.66%19.09%
Andrew Morton5732.39%19.09%
Ralf Bächle3922.16%218.18%
Christoph Hellwig73.98%19.09%
Qais Yousef31.70%19.09%
David Daney21.14%19.09%
Lucas Stach21.14%19.09%
Krzysztof Kozlowski21.14%19.09%
Andrzej Pietrasiewicz21.14%19.09%
Al Viro10.57%19.09%
Total176100.00%11100.00%


static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { unsigned long addr = (unsigned long) vaddr; unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; struct page *page = NULL; plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); if (!(attrs & DMA_ATTR_NON_CONSISTENT) && !plat_device_is_coherent(dev)) addr = CAC_ADDR(addr); page = virt_to_page((void *) addr); if (!dma_release_from_contiguous(dev, page, count)) __free_pages(page, get_order(size)); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton4537.82%110.00%
Zubair Lutfullah Kakakhel4235.29%110.00%
David Daney97.56%330.00%
Ralf Bächle86.72%110.00%
Christoph Hellwig75.88%110.00%
Kevin Cernekee43.36%110.00%
Andrzej Pietrasiewicz21.68%110.00%
Krzysztof Kozlowski21.68%110.00%
Total119100.00%10100.00%


static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs) { unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned long addr = (unsigned long)cpu_addr; unsigned long off = vma->vm_pgoff; unsigned long pfn; int ret = -ENXIO; if (!plat_device_is_coherent(dev)) addr = CAC_ADDR(addr); pfn = page_to_pfn(virt_to_page((void *)addr)); if (attrs & DMA_ATTR_WRITE_COMBINE) vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); else vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) return ret; if (off < count && user_count <= (count - off)) { ret = remap_pfn_range(vma, vma->vm_start, pfn + off, user_count << PAGE_SHIFT, vma->vm_page_prot); } return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Alex Smith19997.55%133.33%
Krzysztof Kozlowski41.96%133.33%
Vladimir Murzin10.49%133.33%
Total204100.00%3100.00%


static inline void __dma_sync_virtual(void *addr, size_t size, enum dma_data_direction direction) { switch (direction) { case DMA_TO_DEVICE: dma_cache_wback((unsigned long)addr, size); break; case DMA_FROM_DEVICE: dma_cache_inv((unsigned long)addr, size); break; case DMA_BIDIRECTIONAL: dma_cache_wback_inv((unsigned long)addr, size); break; default: BUG(); } }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton5879.45%150.00%
Dezhong Diao1520.55%150.00%
Total73100.00%2100.00%

/* * A single sg entry may refer to multiple physically contiguous * pages. But we still need to process highmem pages individually. * If highmem is not configured then the bulk of this loop gets * optimized out. */
static inline void __dma_sync(struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction) { size_t left = size; do { size_t len = left; if (PageHighMem(page)) { void *addr; if (offset + len > PAGE_SIZE) { if (offset >= PAGE_SIZE) { page += offset >> PAGE_SHIFT; offset &= ~PAGE_MASK; } len = PAGE_SIZE - offset; } addr = kmap_atomic(page); __dma_sync_virtual(addr + offset, len, direction); kunmap_atomic(addr); } else __dma_sync_virtual(page_address(page) + offset, size, direction); offset = 0; page++; left -= len; } while (left); }

Contributors

PersonTokensPropCommitsCommitProp
Dezhong Diao138100.00%1100.00%
Total138100.00%1100.00%


static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction direction, unsigned long attrs) { if (cpu_needs_post_dma_flush(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) __dma_sync(dma_addr_to_page(dev, dma_addr), dma_addr & ~PAGE_MASK, size, direction); plat_post_dma_flush(dev); plat_unmap_dma_mem(dev, dma_addr, size, direction); }

Contributors

PersonTokensPropCommitsCommitProp
Ralf Bächle2533.78%218.18%
Andrew Morton2128.38%19.09%
Alexander Duyck79.46%19.09%
Kevin Cernekee68.11%218.18%
Dezhong Diao68.11%19.09%
David Daney68.11%218.18%
Krzysztof Kozlowski22.70%19.09%
Jim Quinlan11.35%19.09%
Total74100.00%11100.00%


static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction, unsigned long attrs) { int i; struct scatterlist *sg; for_each_sg(sglist, sg, nents, i) { if (!plat_device_is_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) __dma_sync(sg_page(sg), sg->offset, sg->length, direction); #ifdef CONFIG_NEED_SG_DMA_LENGTH sg->dma_length = sg->length; #endif sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) + sg->offset; } return nents; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton4842.11%111.11%
Dezhong Diao1614.04%111.11%
Akinobu Mita1513.16%111.11%
Jayachandran C1311.40%111.11%
Alexander Duyck76.14%111.11%
Ralf Bächle76.14%111.11%
David Daney43.51%111.11%
Krzysztof Kozlowski21.75%111.11%
Thomas Bogendoerfer21.75%111.11%
Total114100.00%9100.00%


static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, unsigned long attrs) { if (!plat_device_is_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) __dma_sync(page, offset, size, direction); return plat_map_dma_mem_page(dev, page) + offset; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton3450.75%114.29%
David Daney1217.91%114.29%
Dezhong Diao710.45%114.29%
Alexander Duyck710.45%114.29%
Atsushi Nemoto34.48%114.29%
Ralf Bächle22.99%114.29%
Krzysztof Kozlowski22.99%114.29%
Total67100.00%7100.00%


static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nhwentries, enum dma_data_direction direction, unsigned long attrs) { int i; struct scatterlist *sg; for_each_sg(sglist, sg, nhwentries, i) { if (!plat_device_is_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && direction != DMA_TO_DEVICE) __dma_sync(sg_page(sg), sg->offset, sg->length, direction); plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction); } }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton3434.34%110.00%
Ralf Bächle2020.20%110.00%
Akinobu Mita1515.15%110.00%
Dezhong Diao77.07%110.00%
Alexander Duyck77.07%110.00%
Kevin Cernekee66.06%110.00%
David Daney66.06%220.00%
Thiemo Seufer22.02%110.00%
Krzysztof Kozlowski22.02%110.00%
Total99100.00%10100.00%


static void mips_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { if (cpu_needs_post_dma_flush(dev)) __dma_sync(dma_addr_to_page(dev, dma_handle), dma_handle & ~PAGE_MASK, size, direction); plat_post_dma_flush(dev); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton2548.08%225.00%
Ralf Bächle1426.92%225.00%
Dezhong Diao815.38%112.50%
David Daney23.85%112.50%
Kevin Cernekee23.85%112.50%
Jim Quinlan11.92%112.50%
Total52100.00%8100.00%


static void mips_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { if (!plat_device_is_coherent(dev)) __dma_sync(dma_addr_to_page(dev, dma_handle), dma_handle & ~PAGE_MASK, size, direction); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton2552.08%116.67%
Ralf Bächle918.75%116.67%
Dezhong Diao816.67%116.67%
David Daney24.17%116.67%
Kevin Cernekee24.17%116.67%
Thomas Bogendoerfer24.17%116.67%
Total48100.00%6100.00%


static void mips_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction) { int i; struct scatterlist *sg; if (cpu_needs_post_dma_flush(dev)) { for_each_sg(sglist, sg, nelems, i) { __dma_sync(sg_page(sg), sg->offset, sg->length, direction); } } plat_post_dma_flush(dev); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton3344.00%112.50%
Akinobu Mita2026.67%112.50%
Jayachandran C79.33%112.50%
Ralf Bächle68.00%225.00%
Dezhong Diao45.33%112.50%
Jens Axboe34.00%112.50%
David Daney22.67%112.50%
Total75100.00%8100.00%


static void mips_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction direction) { int i; struct scatterlist *sg; if (!plat_device_is_coherent(dev)) { for_each_sg(sglist, sg, nelems, i) { __dma_sync(sg_page(sg), sg->offset, sg->length, direction); } } }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton3346.48%114.29%
Akinobu Mita2028.17%114.29%
Jayachandran C811.27%114.29%
Dezhong Diao45.63%114.29%
Jens Axboe34.23%114.29%
David Daney22.82%114.29%
Ralf Bächle11.41%114.29%
Total71100.00%7100.00%


static int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton952.94%120.00%
FUJITA Tomonori529.41%120.00%
Paul Burton15.88%120.00%
David Daney15.88%120.00%
Felix Fietkau15.88%120.00%
Total17100.00%5100.00%


static int mips_dma_supported(struct device *dev, u64 mask) { return plat_dma_supported(dev, mask); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton1359.09%120.00%
David Daney627.27%240.00%
Ralf Bächle29.09%120.00%
Paul Burton14.55%120.00%
Total22100.00%5100.00%


void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); if (!plat_device_is_coherent(dev)) __dma_sync_virtual(vaddr, size, direction); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton2555.56%116.67%
Ralf Bächle1737.78%350.00%
Thomas Bogendoerfer24.44%116.67%
Dezhong Diao12.22%116.67%
Total45100.00%6100.00%

EXPORT_SYMBOL(dma_cache_sync); static const struct dma_map_ops mips_default_dma_map_ops = { .alloc = mips_dma_alloc_coherent, .free = mips_dma_free_coherent, .mmap = mips_dma_mmap, .map_page = mips_dma_map_page, .unmap_page = mips_dma_unmap_page, .map_sg = mips_dma_map_sg, .unmap_sg = mips_dma_unmap_sg, .sync_single_for_cpu = mips_dma_sync_single_for_cpu, .sync_single_for_device = mips_dma_sync_single_for_device, .sync_sg_for_cpu = mips_dma_sync_sg_for_cpu, .sync_sg_for_device = mips_dma_sync_sg_for_device, .mapping_error = mips_dma_mapping_error, .dma_supported = mips_dma_supported }; const struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops; EXPORT_SYMBOL(mips_dma_map_ops); #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
static int __init mips_dma_init(void) { dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
David Daney17100.00%1100.00%
Total17100.00%1100.00%

fs_initcall(mips_dma_init);

Overall Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton50926.73%35.36%
Ralf Bächle32016.81%1119.64%
Dezhong Diao22711.92%11.79%
Alex Smith20410.71%11.79%
David Daney1548.09%35.36%
Zubair Lutfullah Kakakhel1065.57%11.79%
Steven J. Hill723.78%11.79%
Akinobu Mita703.68%11.79%
Paul Burton442.31%47.14%
Alexander Duyck281.47%11.79%
Jayachandran C281.47%23.57%
Kevin Cernekee271.42%23.57%
Krzysztof Kozlowski160.84%11.79%
Franck Bui-Huu140.74%11.79%
Christoph Hellwig140.74%11.79%
Matt Redfearn120.63%11.79%
Jens Axboe80.42%23.57%
Thomas Bogendoerfer60.32%35.36%
Andrzej Pietrasiewicz60.32%11.79%
James Hogan60.32%11.79%
FUJITA Tomonori50.26%11.79%
Jim Quinlan40.21%11.79%
Felix Fietkau40.21%23.57%
Tejun Heo30.16%11.79%
Atsushi Nemoto30.16%11.79%
Qais Yousef30.16%11.79%
Al Viro20.11%11.79%
Lucas Stach20.11%11.79%
Thiemo Seufer20.11%11.79%
Bart Van Assche20.11%11.79%
Jerin Jacob10.05%11.79%
Paul Gortmaker10.05%11.79%
Vladimir Murzin10.05%11.79%
Total1904100.00%56100.00%
Directory: arch/mips/mm
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.