cregit-Linux how code gets into the kernel

Release 4.14 arch/x86/mm/ioremap.c

Directory: arch/x86/mm
/*
 * Re-map IO memory to kernel address space so that we can access it.
 * This is needed for high PCI addresses that aren't mapped in the
 * 640k-1MB IO memory area on PC's
 *
 * (C) Copyright 1995 1996 Linus Torvalds
 */

#include <linux/bootmem.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mmiotrace.h>
#include <linux/mem_encrypt.h>
#include <linux/efi.h>

#include <asm/set_memory.h>
#include <asm/e820/api.h>
#include <asm/fixmap.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/pgalloc.h>
#include <asm/pat.h>
#include <asm/setup.h>

#include "physaddr.h"

/*
 * Fix up the linear direct mapping of the kernel to avoid cache attribute
 * conflicts.
 */

int ioremap_change_attr(unsigned long vaddr, unsigned long size, enum page_cache_mode pcm) { unsigned long nrpages = size >> PAGE_SHIFT; int err; switch (pcm) { case _PAGE_CACHE_MODE_UC: default: err = _set_memory_uc(vaddr, nrpages); break; case _PAGE_CACHE_MODE_WC: err = _set_memory_wc(vaddr, nrpages); break; case _PAGE_CACHE_MODE_WT: err = _set_memory_wt(vaddr, nrpages); break; case _PAGE_CACHE_MODE_WB: err = _set_memory_wb(vaddr, nrpages); break; } return err; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner2831.11%333.33%
Andi Kleen2527.78%111.11%
Venkatesh Pallipadi1415.56%222.22%
Toshi Kani1314.44%111.11%
Juergen Gross77.78%111.11%
Harvey Harrison33.33%111.11%
Total90100.00%9100.00%


static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, void *arg) { unsigned long i; for (i = 0; i < nr_pages; ++i) if (pfn_valid(start_pfn + i) && !PageReserved(pfn_to_page(start_pfn + i))) return 1; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Roland Dreier61100.00%1100.00%
Total61100.00%1100.00%

/* * Remap an arbitrary physical address space into the kernel virtual * address space. It transparently creates kernel huge I/O mapping when * the physical address is aligned by a huge page size (1GB or 2MB) and * the requested size is at least the huge page size. * * NOTE: MTRRs can override PAT memory types with a 4KB granularity. * Therefore, the mapping code falls back to use a smaller page toward 4KB * when a mapping range is covered by non-WB type of MTRRs. * * NOTE! We need to allow non-page-aligned mappings too: we will obviously * have to convert them into an offset in a page-aligned mapping, but the * caller shouldn't need to know that small detail. */
static void __iomem *__ioremap_caller(resource_size_t phys_addr, unsigned long size, enum page_cache_mode pcm, void *caller) { unsigned long offset, vaddr; resource_size_t pfn, last_pfn, last_addr; const resource_size_t unaligned_phys_addr = phys_addr; const unsigned long unaligned_size = size; struct vm_struct *area; enum page_cache_mode new_pcm; pgprot_t prot; int retval; void __iomem *ret_addr; /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; if (!size || last_addr < phys_addr) return NULL; if (!phys_addr_valid(phys_addr)) { printk(KERN_WARNING "ioremap: invalid physical address %llx\n", (unsigned long long)phys_addr); WARN_ON_ONCE(1); return NULL; } /* * Don't allow anybody to remap normal RAM that we're using.. */ pfn = phys_addr >> PAGE_SHIFT; last_pfn = last_addr >> PAGE_SHIFT; if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL, __ioremap_check_ram) == 1) { WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n", &phys_addr, &last_addr); return NULL; } /* * Mappings have to be page-aligned */ offset = phys_addr & ~PAGE_MASK; phys_addr &= PHYSICAL_PAGE_MASK; size = PAGE_ALIGN(last_addr+1) - phys_addr; retval = reserve_memtype(phys_addr, (u64)phys_addr + size, pcm, &new_pcm); if (retval) { printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval); return NULL; } if (pcm != new_pcm) { if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) { printk(KERN_ERR "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n", (unsigned long long)phys_addr, (unsigned long long)(phys_addr + size), pcm, new_pcm); goto err_free_memtype; } pcm = new_pcm; } prot = PAGE_KERNEL_IO; switch (pcm) { case _PAGE_CACHE_MODE_UC: default: prot = __pgprot(pgprot_val(prot) | cachemode2protval(_PAGE_CACHE_MODE_UC)); break; case _PAGE_CACHE_MODE_UC_MINUS: prot = __pgprot(pgprot_val(prot) | cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)); break; case _PAGE_CACHE_MODE_WC: prot = __pgprot(pgprot_val(prot) | cachemode2protval(_PAGE_CACHE_MODE_WC)); break; case _PAGE_CACHE_MODE_WT: prot = __pgprot(pgprot_val(prot) | cachemode2protval(_PAGE_CACHE_MODE_WT)); break; case _PAGE_CACHE_MODE_WB: break; } /* * Ok, go for it.. */ area = get_vm_area_caller(size, VM_IOREMAP, caller); if (!area) goto err_free_memtype; area->phys_addr = phys_addr; vaddr = (unsigned long) area->addr; if (kernel_map_sync_memtype(phys_addr, size, pcm)) goto err_free_area; if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) goto err_free_area; ret_addr = (void __iomem *) (vaddr + offset); mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr); /* * Check if the request spans more than any BAR in the iomem resource * tree. */ if (iomem_map_sanity_check(unaligned_phys_addr, unaligned_size)) pr_warn("caller %pS mapping multiple BARs\n", caller); return ret_addr; err_free_area: free_vm_area(area); err_free_memtype: free_memtype(phys_addr, phys_addr + size); return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Andi Kleen11221.92%511.90%
Venkatesh Pallipadi8817.22%511.90%
Juergen Gross6312.33%24.76%
Thomas Gleixner5210.18%921.43%
Xiaotian Feng336.46%12.38%
Pekka Paalanen326.26%24.76%
Toshi Kani305.87%24.76%
Roland Dreier173.33%12.38%
Randy Dunlap173.33%12.38%
Suresh B. Siddha142.74%24.76%
Tim Gardner101.96%12.38%
Ingo Molnar91.76%37.14%
Kenji Kaneshige91.76%12.38%
Christoph Lameter81.57%12.38%
Laura Abbott71.37%12.38%
H. Peter Anvin40.78%12.38%
Håvard Skinnemoen20.39%12.38%
Al Viro20.39%12.38%
Mike Travis10.20%12.38%
Linus Torvalds10.20%12.38%
Total511100.00%42100.00%

/** * ioremap_nocache - map bus memory into CPU space * @phys_addr: bus address of the memory * @size: size of the resource to map * * ioremap_nocache performs a platform specific sequence of operations to * make bus memory CPU accessible via the readb/readw/readl/writeb/ * writew/writel functions and the other mmio helpers. The returned * address is not guaranteed to be usable directly as a virtual * address. * * This version of ioremap ensures that the memory is marked uncachable * on the CPU as well as honouring existing caching rules from things like * the PCI bus. Note that there are other caches and buffers on many * busses. In particular driver authors should read up on PCI writes * * It's useful if some control registers are in such an area and * write combining or read caching is not desirable: * * Must be freed with iounmap. */
void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size) { /* * Ideally, this should be: * pat_enabled() ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS; * * Till we fix all X drivers to use ioremap_wc(), we will use * UC MINUS. Drivers that are certain they need or can already * be converted over to strong UC can use ioremap_uc(). */ enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS; return __ioremap_caller(phys_addr, size, pcm, __builtin_return_address(0)); }

Contributors

PersonTokensPropCommitsCommitProp
Andi Kleen2055.56%225.00%
Christoph Lameter616.67%112.50%
Juergen Gross513.89%112.50%
Suresh B. Siddha25.56%112.50%
Linus Torvalds12.78%112.50%
Al Viro12.78%112.50%
Luis R. Rodriguez12.78%112.50%
Total36100.00%8100.00%

EXPORT_SYMBOL(ioremap_nocache); /** * ioremap_uc - map bus memory into CPU space as strongly uncachable * @phys_addr: bus address of the memory * @size: size of the resource to map * * ioremap_uc performs a platform specific sequence of operations to * make bus memory CPU accessible via the readb/readw/readl/writeb/ * writew/writel functions and the other mmio helpers. The returned * address is not guaranteed to be usable directly as a virtual * address. * * This version of ioremap ensures that the memory is marked with a strong * preference as completely uncachable on the CPU when possible. For non-PAT * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT * systems this will set the PAT entry for the pages as strong UC. This call * will honor existing caching rules from things like the PCI bus. Note that * there are other caches and buffers on many busses. In particular driver * authors should read up on PCI writes. * * It's useful if some control registers are in such an area and * write combining or read caching is not desirable: * * Must be freed with iounmap. */
void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size) { enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC; return __ioremap_caller(phys_addr, size, pcm, __builtin_return_address(0)); }

Contributors

PersonTokensPropCommitsCommitProp
Luis R. Rodriguez35100.00%1100.00%
Total35100.00%1100.00%

EXPORT_SYMBOL_GPL(ioremap_uc); /** * ioremap_wc - map memory into CPU space write combined * @phys_addr: bus address of the memory * @size: size of the resource to map * * This version of ioremap ensures that the memory is marked write combining. * Write combining allows faster writes to some hardware devices. * * Must be freed with iounmap. */
void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size) { return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC, __builtin_return_address(0)); }

Contributors

PersonTokensPropCommitsCommitProp
Venkatesh Pallipadi2275.86%250.00%
Christoph Lameter620.69%125.00%
Juergen Gross13.45%125.00%
Total29100.00%4100.00%

EXPORT_SYMBOL(ioremap_wc); /** * ioremap_wt - map memory into CPU space write through * @phys_addr: bus address of the memory * @size: size of the resource to map * * This version of ioremap ensures that the memory is marked write through. * Write through stores data into memory while keeping the cache up-to-date. * * Must be freed with iounmap. */
void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size) { return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WT, __builtin_return_address(0)); }

Contributors

PersonTokensPropCommitsCommitProp
Toshi Kani2172.41%150.00%
Venkatesh Pallipadi827.59%150.00%
Total29100.00%2100.00%

EXPORT_SYMBOL(ioremap_wt);
void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) { return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB, __builtin_return_address(0)); }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner2172.41%125.00%
Christoph Lameter620.69%125.00%
Linus Torvalds13.45%125.00%
Juergen Gross13.45%125.00%
Total29100.00%4100.00%

EXPORT_SYMBOL(ioremap_cache);
void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, unsigned long prot_val) { return __ioremap_caller(phys_addr, size, pgprot2cachemode(__pgprot(prot_val)), __builtin_return_address(0)); }

Contributors

PersonTokensPropCommitsCommitProp
Rik Van Riel3384.62%150.00%
Juergen Gross615.38%150.00%
Total39100.00%2100.00%

EXPORT_SYMBOL(ioremap_prot); /** * iounmap - Free a IO remapping * @addr: virtual address from ioremap_* * * Caller must ensure there is only one unmapping for the same pointer. */
void iounmap(volatile void __iomem *addr) { struct vm_struct *p, *o; if ((void __force *)addr <= high_memory) return; /* * The PCI/ISA range special-casing was removed from __ioremap() * so this check, in theory, can be removed. However, there are * cases where iounmap() is called for addresses not obtained via * ioremap() (vga16fb for example). Add a warning so that these * cases can be caught and fixed. */ if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) && (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) { WARN(1, "iounmap() called for ISA range not obtained using ioremap()\n"); return; } addr = (volatile void __iomem *) (PAGE_MASK & (unsigned long __force)addr); mmiotrace_iounmap(addr); /* Use the vm area unlocked, assuming the caller ensures there isn't another iounmap for the same address in parallel. Reuse of the virtual address is prevented by leaving it in the global lists until we're done with it. cpa takes care of the direct mappings. */ p = find_vm_area((void __force *)addr); if (!p) { printk(KERN_ERR "iounmap: bad address %p\n", addr); dump_stack(); return; } free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p)); /* Finally remove it */ o = remove_vm_area((void __force *)addr); BUG_ON(p != o || o == NULL); kfree(p); }

Contributors

PersonTokensPropCommitsCommitProp
Andi Kleen11266.27%637.50%
Thomas Gleixner2213.02%318.75%
Venkatesh Pallipadi169.47%16.25%
Tom Lendacky105.92%16.25%
Pekka Paalanen42.37%16.25%
Al Viro31.78%318.75%
JoonSoo Kim21.18%16.25%
Total169100.00%16100.00%

EXPORT_SYMBOL(iounmap);
int __init arch_ioremap_pud_supported(void) { #ifdef CONFIG_X86_64 return boot_cpu_has(X86_FEATURE_GBPAGES); #else return 0; #endif }

Contributors

PersonTokensPropCommitsCommitProp
Toshi Kani1979.17%133.33%
Borislav Petkov416.67%133.33%
Jan Beulich14.17%133.33%
Total24100.00%3100.00%


int __init arch_ioremap_pmd_supported(void) { return boot_cpu_has(X86_FEATURE_PSE); }

Contributors

PersonTokensPropCommitsCommitProp
Toshi Kani964.29%133.33%
Borislav Petkov428.57%133.33%
Jan Beulich17.14%133.33%
Total14100.00%3100.00%

/* * Convert a physical pointer to a virtual kernel pointer for /dev/mem * access */
void *xlate_dev_mem_ptr(phys_addr_t phys) { unsigned long start = phys & PAGE_MASK; unsigned long offset = phys & ~PAGE_MASK; void *vaddr; /* memremap() maps if RAM, otherwise falls back to ioremap() */ vaddr = memremap(start, PAGE_SIZE, MEMREMAP_WB); /* Only add the offset on success and return NULL if memremap() failed */ if (vaddr) vaddr += offset; return vaddr; }

Contributors

PersonTokensPropCommitsCommitProp
Venkatesh Pallipadi2750.00%120.00%
Ingo Molnar2138.89%240.00%
Tom Lendacky59.26%120.00%
Thierry Reding11.85%120.00%
Total54100.00%5100.00%


void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) { memunmap((void *)((unsigned long)addr & PAGE_MASK)); }

Contributors

PersonTokensPropCommitsCommitProp
Venkatesh Pallipadi2793.10%133.33%
Tom Lendacky13.45%133.33%
Thierry Reding13.45%133.33%
Total29100.00%3100.00%

/* * Examine the physical address to determine if it is an area of memory * that should be mapped decrypted. If the memory is not part of the * kernel usable area it was accessed and created decrypted, so these * areas should be mapped decrypted. And since the encryption key can * change across reboots, persistent memory should also be mapped * decrypted. */
static bool memremap_should_map_decrypted(resource_size_t phys_addr, unsigned long size) { int is_pmem; /* * Check if the address is part of a persistent memory region. * This check covers areas added by E820, EFI and ACPI. */ is_pmem = region_intersects(phys_addr, size, IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY); if (is_pmem != REGION_DISJOINT) return true; /* * Check if the non-volatile attribute is set for an EFI * reserved area. */ if (efi_enabled(EFI_BOOT)) { switch (efi_mem_type(phys_addr)) { case EFI_RESERVED_TYPE: if (efi_mem_attributes(phys_addr) & EFI_MEMORY_NV) return true; break; default: break; } } /* Check if the address is outside kernel usable area */ switch (e820__get_entry_type(phys_addr, phys_addr + size - 1)) { case E820_TYPE_RESERVED: case E820_TYPE_ACPI: case E820_TYPE_NVS: case E820_TYPE_UNUSABLE: case E820_TYPE_PRAM: return true; default: break; } return false; }

Contributors

PersonTokensPropCommitsCommitProp
Tom Lendacky115100.00%2100.00%
Total115100.00%2100.00%

/* * Examine the physical address to determine if it is EFI data. Check * it against the boot params structure and EFI tables and memory types. */
static bool memremap_is_efi_data(resource_size_t phys_addr, unsigned long size) { u64 paddr; /* Check if the address is part of EFI boot/runtime data */ if (!efi_enabled(EFI_BOOT)) return false; paddr = boot_params.efi_info.efi_memmap_hi; paddr <<= 32; paddr |= boot_params.efi_info.efi_memmap; if (phys_addr == paddr) return true; paddr = boot_params.efi_info.efi_systab_hi; paddr <<= 32; paddr |= boot_params.efi_info.efi_systab; if (phys_addr == paddr) return true; if (efi_is_table_address(phys_addr)) return true; switch (efi_mem_type(phys_addr)) { case EFI_BOOT_SERVICES_DATA: case EFI_RUNTIME_SERVICES_DATA: return true; default: break; } return false; }

Contributors

PersonTokensPropCommitsCommitProp
Tom Lendacky119100.00%1100.00%
Total119100.00%1100.00%

/* * Examine the physical address to determine if it is boot data by checking * it against the boot params setup_data chain. */
static bool memremap_is_setup_data(resource_size_t phys_addr, unsigned long size) { struct setup_data *data; u64 paddr, paddr_next; paddr = boot_params.hdr.setup_data; while (paddr) { unsigned int len; if (phys_addr == paddr) return true; data = memremap(paddr, sizeof(*data), MEMREMAP_WB | MEMREMAP_DEC); paddr_next = data->next; len = data->len; memunmap(data); if ((phys_addr > paddr) && (phys_addr < (paddr + len))) return true; paddr = paddr_next; } return false; }

Contributors

PersonTokensPropCommitsCommitProp
Tom Lendacky112100.00%1100.00%
Total112100.00%1100.00%

/* * Examine the physical address to determine if it is boot data by checking * it against the boot params setup_data chain (early boot version). */
static bool __init early_memremap_is_setup_data(resource_size_t phys_addr, unsigned long size) { struct setup_data *data; u64 paddr, paddr_next; paddr = boot_params.hdr.setup_data; while (paddr) { unsigned int len; if (phys_addr == paddr) return true; data = early_memremap_decrypted(paddr, sizeof(*data)); paddr_next = data->next; len = data->len; early_memunmap(data, sizeof(*data)); if ((phys_addr > paddr) && (phys_addr < (paddr + len))) return true; paddr = paddr_next; } return false; }

Contributors

PersonTokensPropCommitsCommitProp
Tom Lendacky115100.00%1100.00%
Total115100.00%1100.00%

/* * Architecture function to determine if RAM remap is allowed. By default, a * RAM remap will map the data as encrypted. Determine if a RAM remap should * not be done so that the data will be mapped decrypted. */
bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size, unsigned long flags) { if (!sme_active()) return true; if (flags & MEMREMAP_ENC) return true; if (flags & MEMREMAP_DEC) return false; if (memremap_is_setup_data(phys_addr, size) || memremap_is_efi_data(phys_addr, size) || memremap_should_map_decrypted(phys_addr, size)) return false; return true; }

Contributors

PersonTokensPropCommitsCommitProp
Tom Lendacky72100.00%1100.00%
Total72100.00%1100.00%

/* * Architecture override of __weak function to adjust the protection attributes * used when remapping memory. By default, early_memremap() will map the data * as encrypted. Determine if an encrypted mapping should not be done and set * the appropriate protection attributes. */
pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr, unsigned long size, pgprot_t prot) { if (!sme_active()) return prot; if (early_memremap_is_setup_data(phys_addr, size) || memremap_is_efi_data(phys_addr, size) || memremap_should_map_decrypted(phys_addr, size)) prot = pgprot_decrypted(prot); else prot = pgprot_encrypted(prot); return prot; }

Contributors

PersonTokensPropCommitsCommitProp
Tom Lendacky66100.00%1100.00%
Total66100.00%1100.00%


bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size) { return arch_memremap_can_ram_remap(phys_addr, size, 0); }

Contributors

PersonTokensPropCommitsCommitProp
Tom Lendacky23100.00%1100.00%
Total23100.00%1100.00%

#ifdef CONFIG_ARCH_USE_MEMREMAP_PROT /* Remap memory with encryption */
void __init *early_memremap_encrypted(resource_size_t phys_addr, unsigned long size) { return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC); }

Contributors

PersonTokensPropCommitsCommitProp
Tom Lendacky24100.00%1100.00%
Total24100.00%1100.00%

/* * Remap memory with encryption and write-protected - cannot be called * before pat_init() is called */
void __init *early_memremap_encrypted_wp(resource_size_t phys_addr, unsigned long size) { /* Be sure the write-protect PAT entry is set for write-protect */ if (__pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] != _PAGE_CACHE_MODE_WP) return NULL; return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC_WP); }

Contributors

PersonTokensPropCommitsCommitProp
Tom Lendacky37100.00%1100.00%
Total37100.00%1100.00%

/* Remap memory without encryption */
void __init *early_memremap_decrypted(resource_size_t phys_addr, unsigned long size) { return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC); }

Contributors

PersonTokensPropCommitsCommitProp
Tom Lendacky24100.00%1100.00%
Total24100.00%1100.00%

/* * Remap memory without encryption and write-protected - cannot be called * before pat_init() is called */
void __init *early_memremap_decrypted_wp(resource_size_t phys_addr, unsigned long size) { /* Be sure the write-protect PAT entry is set for write-protect */ if (__pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] != _PAGE_CACHE_MODE_WP) return NULL; return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC_WP); }

Contributors

PersonTokensPropCommitsCommitProp
Tom Lendacky37100.00%1100.00%
Total37100.00%1100.00%

#endif /* CONFIG_ARCH_USE_MEMREMAP_PROT */ static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) { /* Don't assume we're using swapper_pg_dir at this point */ pgd_t *base = __va(read_cr3_pa()); pgd_t *pgd = &base[pgd_index(addr)]; p4d_t *p4d = p4d_offset(pgd, addr); pud_t *pud = pud_offset(p4d, addr); pmd_t *pmd = pmd_offset(pud, addr); return pmd; }

Contributors

PersonTokensPropCommitsCommitProp
Ian Campbell3649.32%120.00%
Thomas Gleixner1317.81%120.00%
Kirill A. Shutemov1216.44%120.00%
Jeremy Fitzhardinge1115.07%120.00%
Andrew Lutomirski11.37%120.00%
Total73100.00%5100.00%


static inline pte_t * __init early_ioremap_pte(unsigned long addr) { return &bm_pte[pte_index(addr)]; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner1669.57%150.00%
Ian Campbell730.43%150.00%
Total23100.00%2100.00%


bool __init is_early_ioremap_ptep(pte_t *ptep) { return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)]; }

Contributors

PersonTokensPropCommitsCommitProp
Jeremy Fitzhardinge32100.00%1100.00%
Total32100.00%1100.00%


void __init early_ioremap_init(void) { pmd_t *pmd; #ifdef CONFIG_X86_64 BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1)); #else WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1)); #endif early_ioremap_setup(); pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); memset(bm_pte, 0, sizeof(bm_pte)); pmd_populate_kernel(&init_mm, pmd, bm_pte); /* * The boot-ioremap range spans multiple pmds, for which * we are not prepared: */ #define __FIXADDR_TOP (-PAGE_SIZE) BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); #undef __FIXADDR_TOP if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) { WARN_ON(1); printk(KERN_WARNING "pmd %p != %p\n", pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))); printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", fix_to_virt(FIX_BTMAP_BEGIN)); printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n", fix_to_virt(FIX_BTMAP_END)); printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END); printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN); } }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner9849.00%114.29%
Andrew Lutomirski5125.50%114.29%
Jan Beulich2814.00%114.29%
Ian Campbell2010.00%228.57%
Mark Salter21.00%114.29%
Wang Chen10.50%114.29%
Total200100.00%7100.00%


void __init __early_set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags) { unsigned long addr = __fix_to_virt(idx); pte_t *pte; if (idx >= __end_of_fixed_addresses) { BUG(); return; } pte = early_ioremap_pte(addr); if (pgprot_val(flags)) set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); else pte_clear(&init_mm, addr, pte); __flush_tlb_one(addr); }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner6374.12%125.00%
Ian Campbell1922.35%125.00%
Jeremy Fitzhardinge22.35%125.00%
Masami Hiramatsu11.18%125.00%
Total85100.00%4100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
Tom Lendacky78532.26%55.26%
Thomas Gleixner33313.69%1212.63%
Andi Kleen29812.25%1212.63%
Venkatesh Pallipadi2158.84%88.42%
Toshi Kani953.90%44.21%
Ian Campbell893.66%22.11%
Juergen Gross833.41%22.11%
Roland Dreier783.21%11.05%
Andrew Lutomirski522.14%22.11%
Jeremy Fitzhardinge522.14%66.32%
Luis R. Rodriguez421.73%22.11%
Pekka Paalanen391.60%22.11%
Rik Van Riel381.56%11.05%
Ingo Molnar361.48%88.42%
Xiaotian Feng331.36%11.05%
Jan Beulich301.23%22.11%
Christoph Lameter261.07%11.05%
Randy Dunlap170.70%11.05%
Suresh B. Siddha160.66%22.11%
Kirill A. Shutemov120.49%11.05%
Tim Gardner100.41%11.05%
Kenji Kaneshige90.37%11.05%
Borislav Petkov80.33%22.11%
Laura Abbott80.33%22.11%
Al Viro60.25%33.16%
H. Peter Anvin40.16%11.05%
Harvey Harrison30.12%11.05%
Linus Torvalds30.12%11.05%
Håvard Skinnemoen20.08%11.05%
Thierry Reding20.08%11.05%
Wanpeng Li20.08%11.05%
JoonSoo Kim20.08%11.05%
Mark Salter20.08%11.05%
Mike Travis10.04%11.05%
Wang Chen10.04%11.05%
Masami Hiramatsu10.04%11.05%
Total2433100.00%95100.00%
Directory: arch/x86/mm
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.