cregit-Linux how code gets into the kernel

Release 4.11 arch/avr32/mm/dma-coherent.c

Directory: arch/avr32/mm
/*
 *  Copyright (C) 2004-2006 Atmel Corporation
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/dma-mapping.h>
#include <linux/gfp.h>
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/device.h>
#include <linux/scatterlist.h>

#include <asm/processor.h>
#include <asm/cacheflush.h>
#include <asm/io.h>
#include <asm/addrspace.h>


void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction) { /* * No need to sync an uncached area */ if (PXSEG(vaddr) == P2SEG) return; switch (direction) { case DMA_FROM_DEVICE: /* invalidate only */ invalidate_dcache_region(vaddr, size); break; case DMA_TO_DEVICE: /* writeback only */ clean_dcache_region(vaddr, size); break; case DMA_BIDIRECTIONAL: /* writeback and invalidate */ flush_dcache_region(vaddr, size); break; default: BUG(); } }

Contributors

PersonTokensPropCommitsCommitProp
Håvard Skinnemoen6989.61%133.33%
Ralf Bächle810.39%266.67%
Total77100.00%3100.00%

EXPORT_SYMBOL(dma_cache_sync);
static struct page *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) { struct page *page, *free, *end; int order; /* Following is a work-around (a.k.a. hack) to prevent pages * with __GFP_COMP being passed to split_page() which cannot * handle them. The real problem is that this flag probably * should be 0 on AVR32 as it is not supported on this * platform--see CONFIG_HUGETLB_PAGE. */ gfp &= ~(__GFP_COMP); size = PAGE_ALIGN(size); order = get_order(size); page = alloc_pages(gfp, order); if (!page) return NULL; split_page(page, order); /* * When accessing physical memory with valid cache data, we * get a cache hit even if the virtual memory region is marked * as uncached. * * Since the memory is newly allocated, there is no point in * doing a writeback. If the previous owner cares, he should * have flushed the cache before releasing the memory. */ invalidate_dcache_region(phys_to_virt(page_to_phys(page)), size); *handle = page_to_bus(page); free = page + (size >> PAGE_SHIFT); end = page + (1 << order); /* * Free any unused pages */ while (free < end) { __free_page(free); free++; } return page; }

Contributors

PersonTokensPropCommitsCommitProp
Håvard Skinnemoen145100.00%2100.00%
Total145100.00%2100.00%


static void __dma_free(struct device *dev, size_t size, struct page *page, dma_addr_t handle) { struct page *end = page + (PAGE_ALIGN(size) >> PAGE_SHIFT); while (page < end) __free_page(page++); }

Contributors

PersonTokensPropCommitsCommitProp
Håvard Skinnemoen50100.00%1100.00%
Total50100.00%1100.00%


static void *avr32_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, unsigned long attrs) { struct page *page; dma_addr_t phys; page = __dma_alloc(dev, size, handle, gfp); if (!page) return NULL; phys = page_to_phys(page); if (attrs & DMA_ATTR_WRITE_COMBINE) { /* Now, map the page into P3 with write-combining turned on */ *handle = phys; return __ioremap(phys, size, _PAGE_BUFFER); } else { return phys_to_uncached(phys); } }

Contributors

PersonTokensPropCommitsCommitProp
Håvard Skinnemoen5355.79%133.33%
Christoph Hellwig3840.00%133.33%
Krzysztof Kozlowski44.21%133.33%
Total95100.00%3100.00%


static void avr32_dma_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle, unsigned long attrs) { struct page *page; if (attrs & DMA_ATTR_WRITE_COMBINE) { iounmap(cpu_addr); page = phys_to_page(handle); } else { void *addr = phys_to_cached(uncached_to_phys(cpu_addr)); pr_debug("avr32_dma_free addr %p (phys %08lx) size %u\n", cpu_addr, (unsigned long)handle, (unsigned)size); BUG_ON(!virt_addr_valid(addr)); page = virt_to_page(addr); } __dma_free(dev, size, page, handle); }

Contributors

PersonTokensPropCommitsCommitProp
Håvard Skinnemoen7568.18%133.33%
Christoph Hellwig3128.18%133.33%
Krzysztof Kozlowski43.64%133.33%
Total110100.00%3100.00%


static dma_addr_t avr32_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction, unsigned long attrs) { void *cpu_addr = page_address(page) + offset; if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) dma_cache_sync(dev, cpu_addr, size, direction); return virt_to_bus(cpu_addr); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig3348.53%125.00%
Håvard Skinnemoen2435.29%125.00%
Alexander Duyck913.24%125.00%
Krzysztof Kozlowski22.94%125.00%
Total68100.00%4100.00%


static int avr32_dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction, unsigned long attrs) { int i; struct scatterlist *sg; for_each_sg(sglist, sg, nents, i) { char *virt; sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset; virt = sg_virt(sg); if (attrs & DMA_ATTR_SKIP_CPU_SYNC) continue; dma_cache_sync(dev, virt, sg->length, direction); } return nents; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig7678.35%125.00%
Håvard Skinnemoen1212.37%125.00%
Alexander Duyck77.22%125.00%
Krzysztof Kozlowski22.06%125.00%
Total97100.00%4100.00%


static void avr32_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { dma_cache_sync(dev, bus_to_virt(dma_handle), size, direction); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig2571.43%150.00%
Håvard Skinnemoen1028.57%150.00%
Total35100.00%2100.00%


static void avr32_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction) { int i; struct scatterlist *sg; for_each_sg(sglist, sg, nents, i) dma_cache_sync(dev, sg_virt(sg), sg->length, direction); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig3968.42%150.00%
Håvard Skinnemoen1831.58%150.00%
Total57100.00%2100.00%

const struct dma_map_ops avr32_dma_ops = { .alloc = avr32_dma_alloc, .free = avr32_dma_free, .map_page = avr32_dma_map_page, .map_sg = avr32_dma_map_sg, .sync_single_for_device = avr32_dma_sync_single_for_device, .sync_sg_for_device = avr32_dma_sync_sg_for_device, }; EXPORT_SYMBOL(avr32_dma_ops);

Overall Contributors

PersonTokensPropCommitsCommitProp
Håvard Skinnemoen47458.37%327.27%
Christoph Hellwig29536.33%19.09%
Alexander Duyck161.97%19.09%
Krzysztof Kozlowski121.48%19.09%
Ralf Bächle80.99%218.18%
Tejun Heo30.37%19.09%
Paul Gortmaker30.37%19.09%
Bart Van Assche10.12%19.09%
Total812100.00%11100.00%
Directory: arch/avr32/mm
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.