Contributors: 11
	  
        
          | Author | 
          Tokens | 
          Token Proportion | 
          Commits | 
          Commit Proportion | 
        
	  
	  
        
        
          | Russell King | 
          63 | 
          34.62% | 
          5 | 
          27.78% | 
        
        
          | Peter Zijlstra | 
          37 | 
          20.33% | 
          2 | 
          11.11% | 
        
        
          | Catalin Marinas | 
          25 | 
          13.74% | 
          3 | 
          16.67% | 
        
        
          | Hyok S. Choi | 
          25 | 
          13.74% | 
          1 | 
          5.56% | 
        
        
          | Vishal Moola (Oracle) | 
          20 | 
          10.99% | 
          1 | 
          5.56% | 
        
        
          | Will Deacon | 
          3 | 
          1.65% | 
          1 | 
          5.56% | 
        
        
          | Matthew Wilcox | 
          3 | 
          1.65% | 
          1 | 
          5.56% | 
        
        
          | Thomas Gleixner | 
          2 | 
          1.10% | 
          1 | 
          5.56% | 
        
        
          | Andrew Morton | 
          2 | 
          1.10% | 
          1 | 
          5.56% | 
        
        
          | Anders Roxell | 
          1 | 
          0.55% | 
          1 | 
          5.56% | 
        
        
          | Aneesh Kumar K.V | 
          1 | 
          0.55% | 
          1 | 
          5.56% | 
        
	  
	  
        
          | Total | 
          182 | 
           | 
          18 | 
           | 
	    
	  
    
 
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 *  arch/arm/include/asm/tlb.h
 *
 *  Copyright (C) 2002 Russell King
 *
 *  Experimentation shows that on a StrongARM, it appears to be faster
 *  to use the "invalidate whole tlb" rather than "invalidate single
 *  tlb" for this.
 *
 *  This appears true for both the process fork+exit case, as well as
 *  the munmap-large-area case.
 */
#ifndef __ASMARM_TLB_H
#define __ASMARM_TLB_H
#include <asm/cacheflush.h>
#ifndef CONFIG_MMU
#include <linux/pagemap.h>
#define tlb_flush(tlb)	((void) tlb)
#include <asm-generic/tlb.h>
#else /* !CONFIG_MMU */
#include <linux/swap.h>
#include <asm/tlbflush.h>
static inline void __tlb_remove_table(void *_table)
{
	free_page_and_swap_cache((struct page *)_table);
}
#include <asm-generic/tlb.h>
static inline void
__pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr)
{
	struct ptdesc *ptdesc = page_ptdesc(pte);
	pagetable_pte_dtor(ptdesc);
#ifndef CONFIG_ARM_LPAE
	/*
	 * With the classic ARM MMU, a pte page has two corresponding pmd
	 * entries, each covering 1MB.
	 */
	addr = (addr & PMD_MASK) + SZ_1M;
	__tlb_adjust_range(tlb, addr - PAGE_SIZE, 2 * PAGE_SIZE);
#endif
	tlb_remove_ptdesc(tlb, ptdesc);
}
static inline void
__pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
{
#ifdef CONFIG_ARM_LPAE
	struct ptdesc *ptdesc = virt_to_ptdesc(pmdp);
	pagetable_pmd_dtor(ptdesc);
	tlb_remove_ptdesc(tlb, ptdesc);
#endif
}
#endif /* CONFIG_MMU */
#endif