cregit-Linux how code gets into the kernel

Release 4.14 arch/x86/mm/iomap_32.c

Directory: arch/x86/mm
/*
 * Copyright © 2008 Ingo Molnar
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License along
 * with this program; if not, write to the Free Software Foundation, Inc.,
 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
 */

#include <asm/iomap.h>
#include <asm/pat.h>
#include <linux/export.h>
#include <linux/highmem.h>


static int is_io_mapping_possible(resource_size_t base, unsigned long size) { #if !defined(CONFIG_X86_PAE) && defined(CONFIG_PHYS_ADDR_T_64BIT) /* There is no way to map greater than 1 << 32 address without PAE */ if (base + size > 0x100000000ULL) return 0; #endif return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Venkatesh Pallipadi2866.67%250.00%
Andrew Morton1126.19%125.00%
Ingo Molnar37.14%125.00%
Total42100.00%4100.00%


int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot) { enum page_cache_mode pcm = _PAGE_CACHE_MODE_WC; int ret; if (!is_io_mapping_possible(base, size)) return -EINVAL; ret = io_reserve_memtype(base, base + size, &pcm); if (ret) return ret; *prot = __pgprot(__PAGE_KERNEL | cachemode2protval(pcm)); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Venkatesh Pallipadi6788.16%150.00%
Juergen Gross911.84%150.00%
Total76100.00%2100.00%

EXPORT_SYMBOL_GPL(iomap_create_wc);
void iomap_free(resource_size_t base, unsigned long size) { io_free_memtype(base, base + size); }

Contributors

PersonTokensPropCommitsCommitProp
Venkatesh Pallipadi2095.24%150.00%
Peter Zijlstra14.76%150.00%
Total21100.00%2100.00%

EXPORT_SYMBOL_GPL(iomap_free);
void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) { unsigned long vaddr; int idx, type; preempt_disable(); pagefault_disable(); type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); set_pte(kmap_pte - idx, pfn_pte(pfn, prot)); arch_flush_lazy_mmu_mode(); return (void *)vaddr; }

Contributors

PersonTokensPropCommitsCommitProp
Keith Packard6181.33%116.67%
Peter Zijlstra810.67%116.67%
David Hildenbrand34.00%116.67%
Ingo Molnar11.33%116.67%
Akinobu Mita11.33%116.67%
Eric Anholt11.33%116.67%
Total75100.00%6100.00%

/* * Map 'pfn' using protections 'prot' */
void __iomem * iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) { /* * For non-PAT systems, translate non-WB request to UC- just in * case the caller set the PWT bit to prot directly without using * pgprot_writecombine(). UC- translates to uncached if the MTRR * is UC or WC. UC- gets the real intention, of the user, which is * "WC if the MTRR is WC, UC if you can't do that." */ if (!pat_enabled() && pgprot2cachemode(prot) != _PAGE_CACHE_MODE_WB) prot = __pgprot(__PAGE_KERNEL | cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)); return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot); }

Contributors

PersonTokensPropCommitsCommitProp
Keith Packard1935.19%114.29%
Eric Anholt1222.22%114.29%
Juergen Gross916.67%114.29%
Francisco Jerez712.96%114.29%
Borislav Petkov47.41%114.29%
Akinobu Mita23.70%114.29%
Luis R. Rodriguez11.85%114.29%
Total54100.00%7100.00%

EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
void iounmap_atomic(void __iomem *kvaddr) { unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; if (vaddr >= __fix_to_virt(FIX_KMAP_END) && vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) { int idx, type; type = kmap_atomic_idx(); idx = type + KM_TYPE_NR * smp_processor_id(); #ifdef CONFIG_DEBUG_HIGHMEM WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); #endif /* * Force other mappings to Oops if they'll try to access this * pte without first remap it. Keeping stale mappings around * is a bad idea also, in case the page changes cacheability * attributes or becomes a protected page in a hypervisor. */ kpte_clear_flush(kmap_pte-idx, vaddr); kmap_atomic_idx_pop(); } pagefault_enable(); preempt_enable(); }

Contributors

PersonTokensPropCommitsCommitProp
Keith Packard4749.47%120.00%
Peter Zijlstra4446.32%240.00%
David Hildenbrand33.16%120.00%
Francisco Jerez11.05%120.00%
Total95100.00%5100.00%

EXPORT_SYMBOL_GPL(iounmap_atomic);

Overall Contributors

PersonTokensPropCommitsCommitProp
Keith Packard14336.02%15.56%
Venkatesh Pallipadi12130.48%211.11%
Peter Zijlstra5413.60%211.11%
Juergen Gross184.53%15.56%
Eric Anholt164.03%15.56%
Andrew Morton112.77%15.56%
Francisco Jerez82.02%15.56%
Ingo Molnar82.02%211.11%
Akinobu Mita61.51%316.67%
David Hildenbrand61.51%15.56%
Borislav Petkov41.01%15.56%
Paul Gortmaker10.25%15.56%
Luis R. Rodriguez10.25%15.56%
Total397100.00%18100.00%
Directory: arch/x86/mm
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.