Release 4.11 arch/arm/include/asm/tlb.h
/*
* arch/arm/include/asm/tlb.h
*
* Copyright (C) 2002 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Experimentation shows that on a StrongARM, it appears to be faster
* to use the "invalidate whole tlb" rather than "invalidate single
* tlb" for this.
*
* This appears true for both the process fork+exit case, as well as
* the munmap-large-area case.
*/
#ifndef __ASMARM_TLB_H
#define __ASMARM_TLB_H
#include <asm/cacheflush.h>
#ifndef CONFIG_MMU
#include <linux/pagemap.h>
#define tlb_flush(tlb) ((void) tlb)
#include <asm-generic/tlb.h>
#else /* !CONFIG_MMU */
#include <linux/swap.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#define MMU_GATHER_BUNDLE 8
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
static inline void __tlb_remove_table(void *_table)
{
free_page_and_swap_cache((struct page *)_table);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steve Capper | 21 | 100.00% | 1 | 100.00% |
Total | 21 | 100.00% | 1 | 100.00% |
struct mmu_table_batch {
struct rcu_head rcu;
unsigned int nr;
void *tables[0];
};
#define MAX_TABLE_BATCH \
((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
extern void tlb_table_flush(struct mmu_gather *tlb);
extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
#define tlb_remove_entry(tlb, entry) tlb_remove_table(tlb, entry)
#else
#define tlb_remove_entry(tlb, entry) tlb_remove_page(tlb, entry)
#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
/*
* TLB handling. This allows us to remove pages from the page
* tables, and efficiently handle the TLB issues.
*/
struct mmu_gather {
struct mm_struct *mm;
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
struct mmu_table_batch *batch;
unsigned int need_flush;
#endif
unsigned int fullmm;
struct vm_area_struct *vma;
unsigned long start, end;
unsigned long range_start;
unsigned long range_end;
unsigned int nr;
unsigned int max;
struct page **pages;
struct page *local[MMU_GATHER_BUNDLE];
};
DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
/*
* This is unnecessarily complex. There's three ways the TLB shootdown
* code is used:
* 1. Unmapping a range of vmas. See zap_page_range(), unmap_region().
* tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
* tlb->vma will be non-NULL.
* 2. Unmapping all vmas. See exit_mmap().
* tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
* tlb->vma will be non-NULL. Additionally, page tables will be freed.
* 3. Unmapping argument pages. See shift_arg_pages().
* tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
* tlb->vma will be NULL.
*/
static inline void tlb_flush(struct mmu_gather *tlb)
{
if (tlb->fullmm || !tlb->vma)
flush_tlb_mm(tlb->mm);
else if (tlb->range_end > 0) {
flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
tlb->range_start = TASK_SIZE;
tlb->range_end = 0;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Russell King | 68 | 100.00% | 1 | 100.00% |
Total | 68 | 100.00% | 1 | 100.00% |
static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
{
if (!tlb->fullmm) {
if (addr < tlb->range_start)
tlb->range_start = addr;
if (addr + PAGE_SIZE > tlb->range_end)
tlb->range_end = addr + PAGE_SIZE;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Russell King | 57 | 100.00% | 1 | 100.00% |
Total | 57 | 100.00% | 1 | 100.00% |
static inline void __tlb_alloc_page(struct mmu_gather *tlb)
{
unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
if (addr) {
tlb->pages = (void *)addr;
tlb->max = PAGE_SIZE / sizeof(struct page *);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 54 | 100.00% | 1 | 100.00% |
Total | 54 | 100.00% | 1 | 100.00% |
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
tlb_flush(tlb);
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
tlb_table_flush(tlb);
#endif
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Russell King | 15 | 55.56% | 1 | 33.33% |
Steve Capper | 10 | 37.04% | 1 | 33.33% |
Linus Torvalds | 2 | 7.41% | 1 | 33.33% |
Total | 27 | 100.00% | 3 | 100.00% |
static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
{
free_pages_and_swap_cache(tlb->pages, tlb->nr);
tlb->nr = 0;
if (tlb->pages == tlb->local)
__tlb_alloc_page(tlb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Russell King | 17 | 38.64% | 1 | 33.33% |
Peter Zijlstra | 14 | 31.82% | 1 | 33.33% |
Linus Torvalds | 13 | 29.55% | 1 | 33.33% |
Total | 44 | 100.00% | 3 | 100.00% |
static inline void tlb_flush_mmu(struct mmu_gather *tlb)
{
tlb_flush_mmu_tlbonly(tlb);
tlb_flush_mmu_free(tlb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 20 | 90.91% | 1 | 33.33% |
Russell King | 1 | 4.55% | 1 | 33.33% |
Peter Zijlstra | 1 | 4.55% | 1 | 33.33% |
Total | 22 | 100.00% | 3 | 100.00% |
static inline void
tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
{
tlb->mm = mm;
tlb->fullmm = !(start | (end+1));
tlb->start = start;
tlb->end = end;
tlb->vma = NULL;
tlb->max = ARRAY_SIZE(tlb->local);
tlb->pages = tlb->local;
tlb->nr = 0;
__tlb_alloc_page(tlb);
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
tlb->batch = NULL;
#endif
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Russell King | 37 | 35.24% | 3 | 50.00% |
Peter Zijlstra | 29 | 27.62% | 1 | 16.67% |
Linus Torvalds | 28 | 26.67% | 1 | 16.67% |
Steve Capper | 11 | 10.48% | 1 | 16.67% |
Total | 105 | 100.00% | 6 | 100.00% |
static inline void
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
tlb_flush_mmu(tlb);
/* keep the page table cache within bounds */
check_pgt_cache();
if (tlb->pages != tlb->local)
free_pages((unsigned long)tlb->pages, 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Russell King | 26 | 50.00% | 2 | 33.33% |
Peter Zijlstra | 20 | 38.46% | 1 | 16.67% |
Hugh Dickins | 4 | 7.69% | 2 | 33.33% |
Andrew Morton | 2 | 3.85% | 1 | 16.67% |
Total | 52 | 100.00% | 6 | 100.00% |
/*
* Memorize the range for the TLB flush.
*/
static inline void
tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
{
tlb_add_flush(tlb, addr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Aaro Koskinen | 16 | 59.26% | 1 | 25.00% |
Russell King | 11 | 40.74% | 3 | 75.00% |
Total | 27 | 100.00% | 4 | 100.00% |
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
tlb_remove_tlb_entry(tlb, ptep, address)
/*
* In the case of tlb vma handling, we can optimise these away in the
* case where we're doing a full MM flush. When we're doing a munmap,
* the vmas are adjusted to only cover the region to be torn down.
*/
static inline void
tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
if (!tlb->fullmm) {
flush_cache_range(vma, vma->vm_start, vma->vm_end);
tlb->vma = vma;
tlb->range_start = TASK_SIZE;
tlb->range_end = 0;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Russell King | 43 | 75.44% | 3 | 75.00% |
Aaro Koskinen | 14 | 24.56% | 1 | 25.00% |
Total | 57 | 100.00% | 4 | 100.00% |
static inline void
tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
if (!tlb->fullmm)
tlb_flush(tlb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Russell King | 28 | 96.55% | 3 | 75.00% |
Aaro Koskinen | 1 | 3.45% | 1 | 25.00% |
Total | 29 | 100.00% | 4 | 100.00% |
static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
tlb->pages[tlb->nr++] = page;
VM_WARN_ON(tlb->nr > tlb->max);
if (tlb->nr == tlb->max)
return true;
return false;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Aneesh Kumar K.V | 38 | 67.86% | 2 | 50.00% |
Russell King | 14 | 25.00% | 1 | 25.00% |
Peter Zijlstra | 4 | 7.14% | 1 | 25.00% |
Total | 56 | 100.00% | 4 | 100.00% |
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
if (__tlb_remove_page(tlb, page))
tlb_flush_mmu(tlb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 24 | 77.42% | 1 | 25.00% |
Russell King | 6 | 19.35% | 2 | 50.00% |
Aneesh Kumar K.V | 1 | 3.23% | 1 | 25.00% |
Total | 31 | 100.00% | 4 | 100.00% |
static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size)
{
return __tlb_remove_page(tlb, page);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Aneesh Kumar K.V | 28 | 100.00% | 1 | 100.00% |
Total | 28 | 100.00% | 1 | 100.00% |
static inline void tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size)
{
return tlb_remove_page(tlb, page);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Aneesh Kumar K.V | 28 | 100.00% | 1 | 100.00% |
Total | 28 | 100.00% | 1 | 100.00% |
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
unsigned long addr)
{
pgtable_page_dtor(pte);
#ifdef CONFIG_ARM_LPAE
tlb_add_flush(tlb, addr);
#else
/*
* With the classic ARM MMU, a pte page has two corresponding pmd
* entries, each covering 1MB.
*/
addr &= PMD_MASK;
tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE);
tlb_add_flush(tlb, addr + SZ_1M);
#endif
tlb_remove_entry(tlb, pte);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Russell King | 37 | 52.86% | 2 | 40.00% |
Catalin Marinas | 18 | 25.71% | 1 | 20.00% |
Will Deacon | 14 | 20.00% | 1 | 20.00% |
Steve Capper | 1 | 1.43% | 1 | 20.00% |
Total | 70 | 100.00% | 5 | 100.00% |
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
unsigned long addr)
{
#ifdef CONFIG_ARM_LPAE
tlb_add_flush(tlb, addr);
tlb_remove_entry(tlb, virt_to_page(pmdp));
#endif
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Catalin Marinas | 41 | 97.62% | 1 | 50.00% |
Steve Capper | 1 | 2.38% | 1 | 50.00% |
Total | 42 | 100.00% | 2 | 100.00% |
static inline void
tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
{
tlb_add_flush(tlb, addr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Catalin Marinas | 27 | 100.00% | 1 | 100.00% |
Total | 27 | 100.00% | 1 | 100.00% |
#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
#define tlb_migrate_finish(mm) do { } while (0)
#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
unsigned int page_size)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Aneesh Kumar K.V | 15 | 100.00% | 1 | 100.00% |
Total | 15 | 100.00% | 1 | 100.00% |
#endif /* CONFIG_MMU */
#endif
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Russell King | 455 | 40.59% | 10 | 33.33% |
Peter Zijlstra | 161 | 14.36% | 1 | 3.33% |
Steve Capper | 133 | 11.86% | 1 | 3.33% |
Aneesh Kumar K.V | 128 | 11.42% | 5 | 16.67% |
Catalin Marinas | 87 | 7.76% | 3 | 10.00% |
Linus Torvalds | 69 | 6.16% | 2 | 6.67% |
Aaro Koskinen | 40 | 3.57% | 1 | 3.33% |
Hyok S. Choi | 15 | 1.34% | 1 | 3.33% |
Will Deacon | 14 | 1.25% | 1 | 3.33% |
Andrew Morton | 11 | 0.98% | 2 | 6.67% |
Benjamin Herrenschmidt | 4 | 0.36% | 1 | 3.33% |
Hugh Dickins | 4 | 0.36% | 2 | 6.67% |
Total | 1121 | 100.00% | 30 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.