Contributors: 11
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Christoph Hellwig |
181 |
62.41% |
1 |
6.25% |
Kefeng Wang |
32 |
11.03% |
3 |
18.75% |
Baoquan He |
26 |
8.97% |
3 |
18.75% |
Christophe Leroy |
19 |
6.55% |
2 |
12.50% |
Arnd Bergmann |
11 |
3.79% |
1 |
6.25% |
Thierry Reding |
7 |
2.41% |
1 |
6.25% |
Håvard Skinnemoen |
7 |
2.41% |
1 |
6.25% |
Adrian Bunk |
3 |
1.03% |
1 |
6.25% |
Huang Ying |
2 |
0.69% |
1 |
6.25% |
Greg Kroah-Hartman |
1 |
0.34% |
1 |
6.25% |
Paul Gortmaker |
1 |
0.34% |
1 |
6.25% |
Total |
290 |
|
16 |
|
// SPDX-License-Identifier: GPL-2.0
/*
* Re-map IO memory to kernel address space so that we can access it.
* This is needed for high PCI addresses that aren't mapped in the
* 640k-1MB IO memory area on PC's
*
* (C) Copyright 1995 1996 Linus Torvalds
*/
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/io.h>
#include <linux/export.h>
#include <linux/ioremap.h>
void __iomem *generic_ioremap_prot(phys_addr_t phys_addr, size_t size,
pgprot_t prot)
{
unsigned long offset, vaddr;
phys_addr_t last_addr;
struct vm_struct *area;
/* An early platform driver might end up here */
if (WARN_ON_ONCE(!slab_is_available()))
return NULL;
/* Disallow wrap-around or zero size */
last_addr = phys_addr + size - 1;
if (!size || last_addr < phys_addr)
return NULL;
/* Page-align mappings */
offset = phys_addr & (~PAGE_MASK);
phys_addr -= offset;
size = PAGE_ALIGN(size + offset);
area = __get_vm_area_caller(size, VM_IOREMAP, IOREMAP_START,
IOREMAP_END, __builtin_return_address(0));
if (!area)
return NULL;
vaddr = (unsigned long)area->addr;
area->phys_addr = phys_addr;
if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
free_vm_area(area);
return NULL;
}
return (void __iomem *)(vaddr + offset);
}
#ifndef ioremap_prot
void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
unsigned long prot)
{
return generic_ioremap_prot(phys_addr, size, __pgprot(prot));
}
EXPORT_SYMBOL(ioremap_prot);
#endif
void generic_iounmap(volatile void __iomem *addr)
{
void *vaddr = (void *)((unsigned long)addr & PAGE_MASK);
if (is_ioremap_addr(vaddr))
vunmap(vaddr);
}
#ifndef iounmap
void iounmap(volatile void __iomem *addr)
{
generic_iounmap(addr);
}
EXPORT_SYMBOL(iounmap);
#endif