Contributors: 13
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Christophe Leroy |
79 |
25.99% |
10 |
34.48% |
Benjamin Herrenschmidt |
72 |
23.68% |
4 |
13.79% |
Paul Mackerras |
68 |
22.37% |
1 |
3.45% |
Nicholas Piggin |
37 |
12.17% |
2 |
6.90% |
Anton Blanchard |
13 |
4.28% |
2 |
6.90% |
Andrew Morton |
11 |
3.62% |
3 |
10.34% |
Olaf Hering |
10 |
3.29% |
1 |
3.45% |
Ryan Roberts |
6 |
1.97% |
1 |
3.45% |
Dan J Williams |
3 |
0.99% |
1 |
3.45% |
Linus Torvalds (pre-git) |
2 |
0.66% |
1 |
3.45% |
Aneesh Kumar K.V |
1 |
0.33% |
1 |
3.45% |
Linus Torvalds |
1 |
0.33% |
1 |
3.45% |
Thomas Gleixner |
1 |
0.33% |
1 |
3.45% |
Total |
304 |
|
29 |
|
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/mmzone.h>
#include <linux/vmalloc.h>
unsigned long ioremap_bot;
EXPORT_SYMBOL(ioremap_bot);
void __iomem *ioremap(phys_addr_t addr, unsigned long size)
{
pgprot_t prot = pgprot_noncached(PAGE_KERNEL);
void *caller = __builtin_return_address(0);
return __ioremap_caller(addr, size, prot, caller);
}
EXPORT_SYMBOL(ioremap);
void __iomem *ioremap_wc(phys_addr_t addr, unsigned long size)
{
pgprot_t prot = pgprot_noncached_wc(PAGE_KERNEL);
void *caller = __builtin_return_address(0);
return __ioremap_caller(addr, size, prot, caller);
}
EXPORT_SYMBOL(ioremap_wc);
void __iomem *ioremap_coherent(phys_addr_t addr, unsigned long size)
{
pgprot_t prot = pgprot_cached(PAGE_KERNEL);
void *caller = __builtin_return_address(0);
return __ioremap_caller(addr, size, prot, caller);
}
void __iomem *ioremap_prot(phys_addr_t addr, size_t size, pgprot_t prot)
{
pte_t pte = __pte(pgprot_val(prot));
void *caller = __builtin_return_address(0);
/* writeable implies dirty for kernel addresses */
if (pte_write(pte))
pte = pte_mkdirty(pte);
return __ioremap_caller(addr, size, pte_pgprot(pte), caller);
}
EXPORT_SYMBOL(ioremap_prot);
int early_ioremap_range(unsigned long ea, phys_addr_t pa,
unsigned long size, pgprot_t prot)
{
unsigned long i;
for (i = 0; i < size; i += PAGE_SIZE) {
int err = map_kernel_page(ea + i, pa + i, pgprot_nx(prot));
if (WARN_ON_ONCE(err)) /* Should clean up */
return err;
}
return 0;
}