cregit-Linux how code gets into the kernel

Release 4.14 arch/x86/mm/highmem_32.c

Directory: arch/x86/mm
#include <linux/highmem.h>
#include <linux/export.h>
#include <linux/swap.h> /* for totalram_pages */
#include <linux/bootmem.h>


void *kmap(struct page *page) { might_sleep(); if (!PageHighMem(page)) return page_address(page); return kmap_high(page); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton3088.24%266.67%
Dave Hansen411.76%133.33%
Total34100.00%3100.00%

EXPORT_SYMBOL(kmap);
void kunmap(struct page *page) { if (in_interrupt()) BUG(); if (!PageHighMem(page)) return; kunmap_high(page); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton2887.50%150.00%
Dave Hansen412.50%150.00%
Total32100.00%2100.00%

EXPORT_SYMBOL(kunmap); /* * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because * no global lock is needed and because the kmap code must perform a global TLB * invalidation when the kmap pool wraps. * * However when holding an atomic kmap it is not legal to sleep, so atomic * kmaps are appropriate for short, tight code paths only. */
void *kmap_atomic_prot(struct page *page, pgprot_t prot) { unsigned long vaddr; int idx, type; preempt_disable(); pagefault_disable(); if (!PageHighMem(page)) return page_address(page); type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); BUG_ON(!pte_none(*(kmap_pte-idx))); set_pte(kmap_pte-idx, mk_pte(page, prot)); arch_flush_lazy_mmu_mode(); return (void *)vaddr; }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton8177.88%333.33%
Peter Zijlstra98.65%222.22%
Jeremy Fitzhardinge43.85%111.11%
Dave Hansen43.85%111.11%
David Hildenbrand32.88%111.11%
Konrad Rzeszutek Wilk32.88%111.11%
Total104100.00%9100.00%

EXPORT_SYMBOL(kmap_atomic_prot);
void *kmap_atomic(struct page *page) { return kmap_atomic_prot(page, kmap_prot); }

Contributors

PersonTokensPropCommitsCommitProp
Jeremy Fitzhardinge1473.68%133.33%
Peter Zijlstra421.05%133.33%
Américo Wang15.26%133.33%
Total19100.00%3100.00%

EXPORT_SYMBOL(kmap_atomic); /* * This is the same as kmap_atomic() but can map memory that doesn't * have a struct page associated with it. */
void *kmap_atomic_pfn(unsigned long pfn) { return kmap_atomic_prot_pfn(pfn, kmap_prot); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra1372.22%150.00%
Jeremy Fitzhardinge527.78%150.00%
Total18100.00%2100.00%

EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
void __kunmap_atomic(void *kvaddr) { unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; if (vaddr >= __fix_to_virt(FIX_KMAP_END) && vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) { int idx, type; type = kmap_atomic_idx(); idx = type + KM_TYPE_NR * smp_processor_id(); #ifdef CONFIG_DEBUG_HIGHMEM WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); #endif /* * Force other mappings to Oops if they'll try to access this * pte without first remap it. Keeping stale mappings around * is a bad idea also, in case the page changes cacheability * attributes or becomes a protected page in a hypervisor. */ kpte_clear_flush(kmap_pte-idx, vaddr); kmap_atomic_idx_pop(); arch_flush_lazy_mmu_mode(); } #ifdef CONFIG_DEBUG_HIGHMEM else { BUG_ON(vaddr < PAGE_OFFSET); BUG_ON(vaddr >= (unsigned long)high_memory); } #endif pagefault_enable(); preempt_enable(); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra4939.84%333.33%
Andrew Morton4334.96%222.22%
Jeremy Fitzhardinge2318.70%111.11%
David Hildenbrand32.44%111.11%
Konrad Rzeszutek Wilk32.44%111.11%
Zachary Amsden21.63%111.11%
Total123100.00%9100.00%

EXPORT_SYMBOL(__kunmap_atomic);
void __init set_highmem_pages_init(void) { struct zone *zone; int nid; /* * Explicitly reset zone->managed_pages because set_highmem_pages_init() * is invoked before free_all_bootmem() */ reset_all_zones_managed_pages(); for_each_zone(zone) { unsigned long zone_start_pfn, zone_end_pfn; if (!is_highmem(zone)) continue; zone_start_pfn = zone->zone_start_pfn; zone_end_pfn = zone_start_pfn + zone->spanned_pages; nid = zone_to_nid(zone); printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n", zone->name, nid, zone_start_pfn, zone_end_pfn); add_highpages_with_active_regions(nid, zone_start_pfn, zone_end_pfn); } }

Contributors

PersonTokensPropCommitsCommitProp
Pekka J Enberg8395.40%150.00%
Jiang Liu44.60%150.00%
Total87100.00%2100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton18540.04%523.81%
Peter Zijlstra10222.08%314.29%
Pekka J Enberg8718.83%14.76%
Jeremy Fitzhardinge469.96%29.52%
Dave Hansen122.60%14.76%
Jiang Liu71.52%14.76%
David Hildenbrand61.30%14.76%
Konrad Rzeszutek Wilk61.30%14.76%
Eric Anholt30.65%14.76%
Zachary Amsden20.43%14.76%
Alexey Dobriyan20.43%14.76%
Américo Wang20.43%14.76%
Figo.zhang10.22%14.76%
Paul Gortmaker10.22%14.76%
Total462100.00%21100.00%
Directory: arch/x86/mm
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.