cregit-Linux how code gets into the kernel

Release 4.16 arch/powerpc/kernel/dma.c

/*
 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
 *
 * Provide default implementations of the DMA mapping callbacks for
 * directly mapped busses.
 */

#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dma-debug.h>
#include <linux/gfp.h>
#include <linux/memblock.h>
#include <linux/export.h>
#include <linux/pci.h>
#include <asm/vio.h>
#include <asm/bug.h>
#include <asm/machdep.h>
#include <asm/swiotlb.h>
#include <asm/iommu.h>

/*
 * Generic direct DMA implementation
 *
 * This implementation supports a per-device offset that can be applied if
 * the address at which memory is visible to devices is not 0. Platform code
 * can set archdata.dma_data to an unsigned long holding the offset. By
 * default the offset is PCI_DRAM_OFFSET.
 */


static u64 __maybe_unused get_pfn_limit(struct device *dev) { u64 pfn = (dev->coherent_dma_mask >> PAGE_SHIFT) + 1; struct dev_archdata __maybe_unused *sd = &dev->archdata; #ifdef CONFIG_SWIOTLB if (sd->max_direct_dma_addr && dev->dma_ops == &powerpc_swiotlb_dma_ops) pfn = min_t(u64, pfn, sd->max_direct_dma_addr >> PAGE_SHIFT); #endif return pfn; }

Contributors

PersonTokensPropCommitsCommitProp
Scott Wood7097.22%133.33%
Christoph Hellwig11.39%133.33%
Bart Van Assche11.39%133.33%
Total72100.00%3100.00%


static int dma_nommu_dma_supported(struct device *dev, u64 mask) { #ifdef CONFIG_PPC64 u64 limit = get_dma_offset(dev) + (memblock_end_of_DRAM() - 1); /* Limit fits in the mask, we are good */ if (mask >= limit) return 1; #ifdef CONFIG_FSL_SOC /* Freescale gets another chance via ZONE_DMA/ZONE_DMA32, however * that will have to be refined if/when they support iommus */ return 1; #endif /* Sorry ... */ return 0; #else return 1; #endif }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Herrenschmidt6198.39%150.00%
Christoph Hellwig11.61%150.00%
Total62100.00%2100.00%


void *__dma_nommu_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) { void *ret; #ifdef CONFIG_NOT_COHERENT_CACHE ret = __dma_alloc_coherent(dev, size, dma_handle, flag); if (ret == NULL) return NULL; *dma_handle += get_dma_offset(dev); return ret; #else struct page *page; int node = dev_to_node(dev); #ifdef CONFIG_FSL_SOC u64 pfn = get_pfn_limit(dev); int zone; /* * This code should be OK on other platforms, but we have drivers that * don't set coherent_dma_mask. As a workaround we just ifdef it. This * whole routine needs some serious cleanup. */ zone = dma_pfn_limit_to_zone(pfn); if (zone < 0) { dev_err(dev, "%s: No suitable zone for pfn %#llx\n", __func__, pfn); return NULL; } switch (zone) { case ZONE_DMA: flag |= GFP_DMA; break; #ifdef CONFIG_ZONE_DMA32 case ZONE_DMA32: flag |= GFP_DMA32; break; #endif }; #endif /* CONFIG_FSL_SOC */ page = alloc_pages_node(node, flag, get_order(size)); if (page == NULL) return NULL; ret = page_address(page); memset(ret, 0, size); *dma_handle = __pa(ret) + get_dma_offset(dev); return ret; #endif }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Herrenschmidt8741.04%423.53%
Scott Wood6832.08%211.76%
Becky Bruce219.91%317.65%
Andrew Morton167.55%15.88%
Michael Ellerman125.66%317.65%
Jeremy Kerr31.42%15.88%
Krzysztof Kozlowski20.94%15.88%
Andrzej Pietrasiewicz20.94%15.88%
Christoph Hellwig10.47%15.88%
Total212100.00%17100.00%


void __dma_nommu_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { #ifdef CONFIG_NOT_COHERENT_CACHE __dma_free_coherent(size, vaddr); #else free_pages((unsigned long)vaddr, get_order(size)); #endif }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Herrenschmidt3057.69%116.67%
Becky Bruce1426.92%116.67%
Andrew Morton35.77%116.67%
Andrzej Pietrasiewicz23.85%116.67%
Krzysztof Kozlowski23.85%116.67%
Christoph Hellwig11.92%116.67%
Total52100.00%6100.00%


static void *dma_nommu_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) { struct iommu_table *iommu; /* The coherent mask may be smaller than the real mask, check if * we can really use the direct ops */ if (dma_nommu_dma_supported(dev, dev->coherent_dma_mask)) return __dma_nommu_alloc_coherent(dev, size, dma_handle, flag, attrs); /* Ok we can't ... do we have an iommu ? If not, fail */ iommu = get_iommu_table_base(dev); if (!iommu) return NULL; /* Try to use the iommu */ return iommu_alloc_coherent(dev, iommu, size, dma_handle, dev->coherent_dma_mask, flag, dev_to_node(dev)); }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Herrenschmidt9294.85%133.33%
Christoph Hellwig33.09%133.33%
Krzysztof Kozlowski22.06%133.33%
Total97100.00%3100.00%


static void dma_nommu_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { struct iommu_table *iommu; /* See comments in dma_nommu_alloc_coherent() */ if (dma_nommu_dma_supported(dev, dev->coherent_dma_mask)) return __dma_nommu_free_coherent(dev, size, vaddr, dma_handle, attrs); /* Maybe we used an iommu ... */ iommu = get_iommu_table_base(dev); /* If we hit that we should have never allocated in the first * place so how come we are freeing ? */ if (WARN_ON(!iommu)) return; iommu_free_coherent(iommu, size, vaddr, dma_handle); }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Herrenschmidt7992.94%133.33%
Christoph Hellwig44.71%133.33%
Krzysztof Kozlowski22.35%133.33%
Total85100.00%3100.00%


int dma_nommu_mmap_coherent(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t handle, size_t size, unsigned long attrs) { unsigned long pfn; #ifdef CONFIG_NOT_COHERENT_CACHE vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr); #else pfn = page_to_pfn(virt_to_page(cpu_addr)); #endif return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, vma->vm_end - vma->vm_start, vma->vm_page_prot); }

Contributors

PersonTokensPropCommitsCommitProp
Marek Szyprowski9797.00%133.33%
Krzysztof Kozlowski22.00%133.33%
Christoph Hellwig11.00%133.33%
Total100100.00%3100.00%


static int dma_nommu_map_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction direction, unsigned long attrs) { struct scatterlist *sg; int i; for_each_sg(sgl, sg, nents, i) { sg->dma_address = sg_phys(sg) + get_dma_offset(dev); sg->dma_length = sg->length; if (attrs & DMA_ATTR_SKIP_CPU_SYNC) continue; __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); } return nents; }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Herrenschmidt4142.27%215.38%
Jens Axboe1717.53%215.38%
Andrew Morton1717.53%17.69%
Alexander Duyck77.22%17.69%
Michael Ellerman44.12%17.69%
Jeremy Kerr33.09%17.69%
Krzysztof Kozlowski22.06%17.69%
Stephen Rothwell22.06%17.69%
Mark Nelson22.06%17.69%
Becky Bruce11.03%17.69%
Christoph Hellwig11.03%17.69%
Total97100.00%13100.00%


static void dma_nommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction, unsigned long attrs) { }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Herrenschmidt1869.23%120.00%
Andrew Morton311.54%120.00%
Mark Nelson27.69%120.00%
Krzysztof Kozlowski27.69%120.00%
Christoph Hellwig13.85%120.00%
Total26100.00%5100.00%


static u64 dma_nommu_get_required_mask(struct device *dev) { u64 end, mask; end = memblock_end_of_DRAM() + get_dma_offset(dev); mask = 1ULL << (fls64(end) - 1); mask += mask - 1; return mask; }

Contributors

PersonTokensPropCommitsCommitProp
Milton D. Miller II4797.92%150.00%
Christoph Hellwig12.08%150.00%
Total48100.00%2100.00%


static inline dma_addr_t dma_nommu_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, unsigned long attrs) { BUG_ON(dir == DMA_NONE); if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) __dma_sync_page(page, offset, size, dir); return page_to_phys(page) + offset + get_dma_offset(dev); }

Contributors

PersonTokensPropCommitsCommitProp
Becky Bruce6083.33%240.00%
Alexander Duyck912.50%120.00%
Krzysztof Kozlowski22.78%120.00%
Christoph Hellwig11.39%120.00%
Total72100.00%5100.00%


static inline void dma_nommu_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction, unsigned long attrs) { }

Contributors

PersonTokensPropCommitsCommitProp
Becky Bruce2288.00%133.33%
Krzysztof Kozlowski28.00%133.33%
Christoph Hellwig14.00%133.33%
Total25100.00%3100.00%

#ifdef CONFIG_NOT_COHERENT_CACHE
static inline void dma_nommu_sync_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction direction) { struct scatterlist *sg; int i; for_each_sg(sgl, sg, nents, i) __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); }

Contributors

PersonTokensPropCommitsCommitProp
Becky Bruce5998.33%150.00%
Christoph Hellwig11.67%150.00%
Total60100.00%2100.00%


static inline void dma_nommu_sync_single(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { __dma_sync(bus_to_virt(dma_handle), size, direction); }

Contributors

PersonTokensPropCommitsCommitProp
Becky Bruce3397.06%150.00%
Christoph Hellwig12.94%150.00%
Total34100.00%2100.00%

#endif const struct dma_map_ops dma_nommu_ops = { .alloc = dma_nommu_alloc_coherent, .free = dma_nommu_free_coherent, .mmap = dma_nommu_mmap_coherent, .map_sg = dma_nommu_map_sg, .unmap_sg = dma_nommu_unmap_sg, .dma_supported = dma_nommu_dma_supported, .map_page = dma_nommu_map_page, .unmap_page = dma_nommu_unmap_page, .get_required_mask = dma_nommu_get_required_mask, #ifdef CONFIG_NOT_COHERENT_CACHE .sync_single_for_cpu = dma_nommu_sync_single, .sync_single_for_device = dma_nommu_sync_single, .sync_sg_for_cpu = dma_nommu_sync_sg, .sync_sg_for_device = dma_nommu_sync_sg, #endif }; EXPORT_SYMBOL(dma_nommu_ops);
int dma_set_coherent_mask(struct device *dev, u64 mask) { if (!dma_supported(dev, mask)) { /* * We need to special case the direct DMA ops which can * support a fallback for coherent allocations. There * is no dma_op->set_coherent_mask() so we have to do * things the hard way: */ if (get_dma_ops(dev) != &dma_nommu_ops || get_iommu_table_base(dev) == NULL || !dma_iommu_dma_supported(dev, mask)) return -EIO; } dev->coherent_dma_mask = mask; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Herrenschmidt6398.44%150.00%
Christoph Hellwig11.56%150.00%
Total64100.00%2100.00%

EXPORT_SYMBOL(dma_set_coherent_mask); #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
int dma_set_mask(struct device *dev, u64 dma_mask) { if (ppc_md.dma_set_mask) return ppc_md.dma_set_mask(dev, dma_mask); if (dev_is_pci(dev)) { struct pci_dev *pdev = to_pci_dev(dev); struct pci_controller *phb = pci_bus_to_host(pdev->bus); if (phb->controller_ops.dma_set_mask) return phb->controller_ops.dma_set_mask(pdev, dma_mask); } if (!dev->dma_mask || !dma_supported(dev, dma_mask)) return -EIO; *dev->dma_mask = dma_mask; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Axtens5146.79%133.33%
Benjamin Herrenschmidt3532.11%133.33%
Christoph Hellwig2321.10%133.33%
Total109100.00%3100.00%

EXPORT_SYMBOL(dma_set_mask);
u64 __dma_get_required_mask(struct device *dev) { const struct dma_map_ops *dma_ops = get_dma_ops(dev); if (unlikely(dma_ops == NULL)) return 0; if (dma_ops->get_required_mask) return dma_ops->get_required_mask(dev); return DMA_BIT_MASK(8 * sizeof(dma_addr_t)); }

Contributors

PersonTokensPropCommitsCommitProp
Milton D. Miller II5696.55%250.00%
Bart Van Assche11.72%125.00%
Gavin Shan11.72%125.00%
Total58100.00%4100.00%


u64 dma_get_required_mask(struct device *dev) { if (ppc_md.dma_get_required_mask) return ppc_md.dma_get_required_mask(dev); if (dev_is_pci(dev)) { struct pci_dev *pdev = to_pci_dev(dev); struct pci_controller *phb = pci_bus_to_host(pdev->bus); if (phb->controller_ops.dma_get_required_mask) return phb->controller_ops.dma_get_required_mask(pdev); } return __dma_get_required_mask(dev); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Donnellan4962.03%150.00%
Gavin Shan3037.97%150.00%
Total79100.00%2100.00%

EXPORT_SYMBOL_GPL(dma_get_required_mask);
static int __init dma_init(void) { dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); #ifdef CONFIG_PCI dma_debug_add_bus(&pci_bus_type); #endif #ifdef CONFIG_IBMVIO dma_debug_add_bus(&vio_bus_type); #endif return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Anton Blanchard2256.41%150.00%
FUJITA Tomonori1743.59%150.00%
Total39100.00%2100.00%

fs_initcall(dma_init);

Overall Contributors

PersonTokensPropCommitsCommitProp
Benjamin Herrenschmidt54435.30%1021.28%
Becky Bruce24615.96%510.64%
Scott Wood1419.15%24.26%
Milton D. Miller II1127.27%24.26%
Marek Szyprowski1016.55%12.13%
Christoph Hellwig583.76%36.38%
Andrew Morton533.44%12.13%
Daniel Axtens513.31%12.13%
Andrew Donnellan493.18%12.13%
FUJITA Tomonori322.08%36.38%
Gavin Shan312.01%12.13%
Anton Blanchard281.82%12.13%
Krzysztof Kozlowski181.17%12.13%
Jens Axboe171.10%24.26%
Michael Ellerman161.04%36.38%
Alexander Duyck161.04%12.13%
Jeremy Kerr60.39%12.13%
Andrzej Pietrasiewicz60.39%12.13%
Mark Nelson40.26%12.13%
Tejun Heo30.19%12.13%
Paul Gortmaker30.19%12.13%
Bart Van Assche30.19%24.26%
Stephen Rothwell20.13%12.13%
Yinghai Lu10.06%12.13%
Total1541100.00%47100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.