Release 4.14 arch/x86/mm/highmem_32.c
#include <linux/highmem.h>
#include <linux/export.h>
#include <linux/swap.h> /* for totalram_pages */
#include <linux/bootmem.h>
void *kmap(struct page *page)
{
might_sleep();
if (!PageHighMem(page))
return page_address(page);
return kmap_high(page);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 30 | 88.24% | 2 | 66.67% |
Dave Hansen | 4 | 11.76% | 1 | 33.33% |
Total | 34 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(kmap);
void kunmap(struct page *page)
{
if (in_interrupt())
BUG();
if (!PageHighMem(page))
return;
kunmap_high(page);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 28 | 87.50% | 1 | 50.00% |
Dave Hansen | 4 | 12.50% | 1 | 50.00% |
Total | 32 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(kunmap);
/*
* kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
* no global lock is needed and because the kmap code must perform a global TLB
* invalidation when the kmap pool wraps.
*
* However when holding an atomic kmap it is not legal to sleep, so atomic
* kmaps are appropriate for short, tight code paths only.
*/
void *kmap_atomic_prot(struct page *page, pgprot_t prot)
{
unsigned long vaddr;
int idx, type;
preempt_disable();
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
BUG_ON(!pte_none(*(kmap_pte-idx)));
set_pte(kmap_pte-idx, mk_pte(page, prot));
arch_flush_lazy_mmu_mode();
return (void *)vaddr;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 81 | 77.88% | 3 | 33.33% |
Peter Zijlstra | 9 | 8.65% | 2 | 22.22% |
Jeremy Fitzhardinge | 4 | 3.85% | 1 | 11.11% |
Dave Hansen | 4 | 3.85% | 1 | 11.11% |
David Hildenbrand | 3 | 2.88% | 1 | 11.11% |
Konrad Rzeszutek Wilk | 3 | 2.88% | 1 | 11.11% |
Total | 104 | 100.00% | 9 | 100.00% |
EXPORT_SYMBOL(kmap_atomic_prot);
void *kmap_atomic(struct page *page)
{
return kmap_atomic_prot(page, kmap_prot);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeremy Fitzhardinge | 14 | 73.68% | 1 | 33.33% |
Peter Zijlstra | 4 | 21.05% | 1 | 33.33% |
Américo Wang | 1 | 5.26% | 1 | 33.33% |
Total | 19 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(kmap_atomic);
/*
* This is the same as kmap_atomic() but can map memory that doesn't
* have a struct page associated with it.
*/
void *kmap_atomic_pfn(unsigned long pfn)
{
return kmap_atomic_prot_pfn(pfn, kmap_prot);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 13 | 72.22% | 1 | 50.00% |
Jeremy Fitzhardinge | 5 | 27.78% | 1 | 50.00% |
Total | 18 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
void __kunmap_atomic(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
int idx, type;
type = kmap_atomic_idx();
idx = type + KM_TYPE_NR * smp_processor_id();
#ifdef CONFIG_DEBUG_HIGHMEM
WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
#endif
/*
* Force other mappings to Oops if they'll try to access this
* pte without first remap it. Keeping stale mappings around
* is a bad idea also, in case the page changes cacheability
* attributes or becomes a protected page in a hypervisor.
*/
kpte_clear_flush(kmap_pte-idx, vaddr);
kmap_atomic_idx_pop();
arch_flush_lazy_mmu_mode();
}
#ifdef CONFIG_DEBUG_HIGHMEM
else {
BUG_ON(vaddr < PAGE_OFFSET);
BUG_ON(vaddr >= (unsigned long)high_memory);
}
#endif
pagefault_enable();
preempt_enable();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 49 | 39.84% | 3 | 33.33% |
Andrew Morton | 43 | 34.96% | 2 | 22.22% |
Jeremy Fitzhardinge | 23 | 18.70% | 1 | 11.11% |
David Hildenbrand | 3 | 2.44% | 1 | 11.11% |
Konrad Rzeszutek Wilk | 3 | 2.44% | 1 | 11.11% |
Zachary Amsden | 2 | 1.63% | 1 | 11.11% |
Total | 123 | 100.00% | 9 | 100.00% |
EXPORT_SYMBOL(__kunmap_atomic);
void __init set_highmem_pages_init(void)
{
struct zone *zone;
int nid;
/*
* Explicitly reset zone->managed_pages because set_highmem_pages_init()
* is invoked before free_all_bootmem()
*/
reset_all_zones_managed_pages();
for_each_zone(zone) {
unsigned long zone_start_pfn, zone_end_pfn;
if (!is_highmem(zone))
continue;
zone_start_pfn = zone->zone_start_pfn;
zone_end_pfn = zone_start_pfn + zone->spanned_pages;
nid = zone_to_nid(zone);
printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
zone->name, nid, zone_start_pfn, zone_end_pfn);
add_highpages_with_active_regions(nid, zone_start_pfn,
zone_end_pfn);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pekka J Enberg | 83 | 95.40% | 1 | 50.00% |
Jiang Liu | 4 | 4.60% | 1 | 50.00% |
Total | 87 | 100.00% | 2 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 185 | 40.04% | 5 | 23.81% |
Peter Zijlstra | 102 | 22.08% | 3 | 14.29% |
Pekka J Enberg | 87 | 18.83% | 1 | 4.76% |
Jeremy Fitzhardinge | 46 | 9.96% | 2 | 9.52% |
Dave Hansen | 12 | 2.60% | 1 | 4.76% |
Jiang Liu | 7 | 1.52% | 1 | 4.76% |
David Hildenbrand | 6 | 1.30% | 1 | 4.76% |
Konrad Rzeszutek Wilk | 6 | 1.30% | 1 | 4.76% |
Eric Anholt | 3 | 0.65% | 1 | 4.76% |
Zachary Amsden | 2 | 0.43% | 1 | 4.76% |
Alexey Dobriyan | 2 | 0.43% | 1 | 4.76% |
Américo Wang | 2 | 0.43% | 1 | 4.76% |
Figo.zhang | 1 | 0.22% | 1 | 4.76% |
Paul Gortmaker | 1 | 0.22% | 1 | 4.76% |
Total | 462 | 100.00% | 21 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.