Contributors: 16
Author Tokens Token Proportion Commits Commit Proportion
Christoph Hellwig 122 40.67% 5 20.00%
Laura Abbott 79 26.33% 1 4.00%
Marek Szyprowski 26 8.67% 3 12.00%
Yosry Ahmed 16 5.33% 1 4.00%
Russell King 11 3.67% 3 12.00%
Andrey Smirnov 9 3.00% 1 4.00%
Robin Murphy 6 2.00% 1 4.00%
Eric Auger 6 2.00% 1 4.00%
Hiroshi Doyu 6 2.00% 1 4.00%
Greg Kroah-Hartman 5 1.67% 2 8.00%
Jon Medhurst (Tixy) 4 1.33% 1 4.00%
Akinobu Mita 3 1.00% 1 4.00%
Geliang Tang 3 1.00% 1 4.00%
gaoxu 2 0.67% 1 4.00%
David Rientjes 1 0.33% 1 4.00%
Linus Torvalds 1 0.33% 1 4.00%
Total 300 25

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172
// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (c) 2014 The Linux Foundation
 */
#include <linux/dma-map-ops.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>

struct page **dma_common_find_pages(void *cpu_addr)
{
	struct vm_struct *area = find_vm_area(cpu_addr);

	if (!area || !(area->flags & VM_DMA_COHERENT))
		return NULL;
	WARN(area->flags != VM_DMA_COHERENT,
	     "unexpected flags in area: %p\n", cpu_addr);
	return area->pages;
}

/*
 * Remaps an array of PAGE_SIZE pages into another vm_area.
 * Cannot be used in non-sleeping contexts
 */
void *dma_common_pages_remap(struct page **pages, size_t size,
			 pgprot_t prot, const void *caller)
{
	void *vaddr;

	vaddr = vmap(pages, PAGE_ALIGN(size) >> PAGE_SHIFT,
		     VM_DMA_COHERENT, prot);
	if (vaddr)
		find_vm_area(vaddr)->pages = pages;
	return vaddr;
}

/*
 * Remaps an allocated contiguous region into another vm_area.
 * Cannot be used in non-sleeping contexts
 */
void *dma_common_contiguous_remap(struct page *page, size_t size,
			pgprot_t prot, const void *caller)
{
	int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	struct page **pages;
	void *vaddr;
	int i;

	pages = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
	if (!pages)
		return NULL;
	for (i = 0; i < count; i++)
		pages[i] = nth_page(page, i);
	vaddr = vmap(pages, count, VM_DMA_COHERENT, prot);
	kvfree(pages);

	return vaddr;
}

/*
 * Unmaps a range previously mapped by dma_common_*_remap
 */
void dma_common_free_remap(void *cpu_addr, size_t size)
{
	struct vm_struct *area = find_vm_area(cpu_addr);

	if (!area || !(area->flags & VM_DMA_COHERENT)) {
		WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
		return;
	}

	vunmap(cpu_addr);
}