Release 4.14 arch/ia64/include/asm/tlb.h
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_IA64_TLB_H
#define _ASM_IA64_TLB_H
/*
* Based on <asm-generic/tlb.h>.
*
* Copyright (C) 2002-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
/*
* Removing a translation from a page table (including TLB-shootdown) is a four-step
* procedure:
*
* (1) Flush (virtual) caches --- ensures virtual memory is coherent with kernel memory
* (this is a no-op on ia64).
* (2) Clear the relevant portions of the page-table
* (3) Flush the TLBs --- ensures that stale content is gone from CPU TLBs
* (4) Release the pages that were freed up in step (2).
*
* Note that the ordering of these steps is crucial to avoid races on MP machines.
*
* The Linux kernel defines several platform-specific hooks for TLB-shootdown. When
* unmapping a portion of the virtual address space, these hooks are called according to
* the following template:
*
* tlb <- tlb_gather_mmu(mm, start, end); // start unmap for address space MM
* {
* for each vma that needs a shootdown do {
* tlb_start_vma(tlb, vma);
* for each page-table-entry PTE that needs to be removed do {
* tlb_remove_tlb_entry(tlb, pte, address);
* if (pte refers to a normal page) {
* tlb_remove_page(tlb, page);
* }
* }
* tlb_end_vma(tlb, vma);
* }
* }
* tlb_finish_mmu(tlb, start, end); // finish unmap for address space MM
*/
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/tlbflush.h>
#include <asm/machvec.h>
/*
* If we can't allocate a page to make a big batch of page pointers
* to work on, then just handle a few from the on-stack structure.
*/
#define IA64_GATHER_BUNDLE 8
struct mmu_gather {
struct mm_struct *mm;
unsigned int nr;
unsigned int max;
unsigned char fullmm; /* non-zero means full mm flush */
unsigned char need_flush; /* really unmapped some PTEs? */
unsigned long start, end;
unsigned long start_addr;
unsigned long end_addr;
struct page **pages;
struct page *local[IA64_GATHER_BUNDLE];
};
struct ia64_tr_entry {
u64 ifa;
u64 itir;
u64 pte;
u64 rr;
}; /*Record for tr entry!*/
extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
extern void ia64_ptr_entry(u64 target_mask, int slot);
extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
/*
region register macros
*/
#define RR_TO_VE(val) (((val) >> 0) & 0x0000000000000001)
#define RR_VE(val) (((val) & 0x0000000000000001) << 0)
#define RR_VE_MASK 0x0000000000000001L
#define RR_VE_SHIFT 0
#define RR_TO_PS(val) (((val) >> 2) & 0x000000000000003f)
#define RR_PS(val) (((val) & 0x000000000000003f) << 2)
#define RR_PS_MASK 0x00000000000000fcL
#define RR_PS_SHIFT 2
#define RR_RID_MASK 0x00000000ffffff00L
#define RR_TO_RID(val) ((val >> 8) & 0xffffff)
static inline void
ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
tlb->need_flush = 0;
if (tlb->fullmm) {
/*
* Tearing down the entire address space. This happens both as a result
* of exit() and execve(). The latter case necessitates the call to
* flush_tlb_mm() here.
*/
flush_tlb_mm(tlb->mm);
} else if (unlikely (end - start >= 1024*1024*1024*1024UL
|| REGION_NUMBER(start) != REGION_NUMBER(end - 1)))
{
/*
* If we flush more than a tera-byte or across regions, we're probably
* better off just flushing the entire TLB(s). This should be very rare
* and is not worth optimizing for.
*/
flush_tlb_all();
} else {
/*
* XXX fix me: flush_tlb_range() should take an mm pointer instead of a
* vma pointer.
*/
struct vm_area_struct vma;
vma.vm_mm = tlb->mm;
/* flush the address range from the tlb: */
flush_tlb_range(&vma, start, end);
/* now flush the virt. page-table area mapping the address range: */
flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end));
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Mosberger-Tang | 118 | 96.72% | 5 | 71.43% |
Linus Torvalds | 2 | 1.64% | 1 | 14.29% |
Andrew Morton | 2 | 1.64% | 1 | 14.29% |
Total | 122 | 100.00% | 7 | 100.00% |
static inline void
ia64_tlb_flush_mmu_free(struct mmu_gather *tlb)
{
unsigned long i;
unsigned int nr;
/* lastly, release the freed pages */
nr = tlb->nr;
tlb->nr = 0;
tlb->start_addr = ~0UL;
for (i = 0; i < nr; ++i)
free_page_and_swap_cache(tlb->pages[i]);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Mosberger-Tang | 44 | 69.84% | 1 | 50.00% |
Linus Torvalds | 19 | 30.16% | 1 | 50.00% |
Total | 63 | 100.00% | 2 | 100.00% |
/*
* Flush the TLB for address range START to END and, if not in fast mode, release the
* freed pages that where gathered up to this point.
*/
static inline void
ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
if (!tlb->need_flush)
return;
ia64_tlb_flush_mmu_tlbonly(tlb, start, end);
ia64_tlb_flush_mmu_free(tlb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 42 | 100.00% | 1 | 100.00% |
Total | 42 | 100.00% | 1 | 100.00% |
static inline void __tlb_alloc_page(struct mmu_gather *tlb)
{
unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
if (addr) {
tlb->pages = (void *)addr;
tlb->max = PAGE_SIZE / sizeof(void *);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 48 | 90.57% | 1 | 33.33% |
David Mosberger-Tang | 3 | 5.66% | 1 | 33.33% |
Andrew Morton | 2 | 3.77% | 1 | 33.33% |
Total | 53 | 100.00% | 3 | 100.00% |
static inline void
arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
unsigned long start, unsigned long end)
{
tlb->mm = mm;
tlb->max = ARRAY_SIZE(tlb->local);
tlb->pages = tlb->local;
tlb->nr = 0;
tlb->fullmm = !(start | (end+1));
tlb->start = start;
tlb->end = end;
tlb->start_addr = ~0UL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Mosberger-Tang | 33 | 36.67% | 4 | 50.00% |
Linus Torvalds | 28 | 31.11% | 1 | 12.50% |
Peter Zijlstra | 26 | 28.89% | 1 | 12.50% |
Martin Hicks | 2 | 2.22% | 1 | 12.50% |
MinChan Kim | 1 | 1.11% | 1 | 12.50% |
Total | 90 | 100.00% | 8 | 100.00% |
/*
* Called at the end of the shootdown operation to free up any resources that were
* collected.
*/
static inline void
arch_tlb_finish_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end, bool force)
{
if (force)
tlb->need_flush = 1;
/*
* Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
* tlb->end_addr.
*/
ia64_tlb_flush_mmu(tlb, start, end);
/* keep the page table cache within bounds */
check_pgt_cache();
if (tlb->pages != tlb->local)
free_pages((unsigned long)tlb->pages, 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Mosberger-Tang | 31 | 44.29% | 1 | 16.67% |
Peter Zijlstra | 20 | 28.57% | 1 | 16.67% |
MinChan Kim | 14 | 20.00% | 2 | 33.33% |
Hugh Dickins | 3 | 4.29% | 1 | 16.67% |
Andrew Morton | 2 | 2.86% | 1 | 16.67% |
Total | 70 | 100.00% | 6 | 100.00% |
/*
* Logically, this routine frees PAGE. On MP machines, the actual freeing of the page
* must be delayed until after the TLB has been flushed (see comments at the beginning of
* this file).
*/
static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
tlb->need_flush = 1;
if (!tlb->nr && tlb->pages == tlb->local)
__tlb_alloc_page(tlb);
tlb->pages[tlb->nr++] = page;
VM_WARN_ON(tlb->nr > tlb->max);
if (tlb->nr == tlb->max)
return true;
return false;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Mosberger-Tang | 32 | 39.02% | 2 | 33.33% |
Aneesh Kumar K.V | 26 | 31.71% | 2 | 33.33% |
Peter Zijlstra | 22 | 26.83% | 1 | 16.67% |
Andrew Morton | 2 | 2.44% | 1 | 16.67% |
Total | 82 | 100.00% | 6 | 100.00% |
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
ia64_tlb_flush_mmu_tlbonly(tlb, tlb->start_addr, tlb->end_addr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 25 | 100.00% | 1 | 100.00% |
Total | 25 | 100.00% | 1 | 100.00% |
static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
{
ia64_tlb_flush_mmu_free(tlb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 17 | 100.00% | 1 | 100.00% |
Total | 17 | 100.00% | 1 | 100.00% |
static inline void tlb_flush_mmu(struct mmu_gather *tlb)
{
ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Mosberger-Tang | 14 | 56.00% | 1 | 50.00% |
Peter Zijlstra | 11 | 44.00% | 1 | 50.00% |
Total | 25 | 100.00% | 2 | 100.00% |
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
if (__tlb_remove_page(tlb, page))
tlb_flush_mmu(tlb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 30 | 96.77% | 1 | 50.00% |
Aneesh Kumar K.V | 1 | 3.23% | 1 | 50.00% |
Total | 31 | 100.00% | 2 | 100.00% |
static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size)
{
return __tlb_remove_page(tlb, page);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Aneesh Kumar K.V | 28 | 100.00% | 1 | 100.00% |
Total | 28 | 100.00% | 1 | 100.00% |
static inline void tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size)
{
return tlb_remove_page(tlb, page);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Aneesh Kumar K.V | 28 | 100.00% | 1 | 100.00% |
Total | 28 | 100.00% | 1 | 100.00% |
/*
* Remove TLB entry for PTE mapped at virtual address ADDRESS. This is called for any
* PTE, not just those pointing to (normal) physical memory.
*/
static inline void
__tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
{
if (tlb->start_addr == ~0UL)
tlb->start_addr = address;
tlb->end_addr = address + PAGE_SIZE;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Mosberger-Tang | 41 | 95.35% | 1 | 50.00% |
Andrew Morton | 2 | 4.65% | 1 | 50.00% |
Total | 43 | 100.00% | 2 | 100.00% |
#define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm)
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define tlb_remove_tlb_entry(tlb, ptep, addr) \
do { \
tlb->need_flush = 1; \
__tlb_remove_tlb_entry(tlb, ptep, addr); \
} while (0)
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
tlb_remove_tlb_entry(tlb, ptep, address)
#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
unsigned int page_size)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Aneesh Kumar K.V | 15 | 100.00% | 1 | 100.00% |
Total | 15 | 100.00% | 1 | 100.00% |
#define pte_free_tlb(tlb, ptep, address) \
do { \
tlb->need_flush = 1; \
__pte_free_tlb(tlb, ptep, address); \
} while (0)
#define pmd_free_tlb(tlb, ptep, address) \
do { \
tlb->need_flush = 1; \
__pmd_free_tlb(tlb, ptep, address); \
} while (0)
#define pud_free_tlb(tlb, pudp, address) \
do { \
tlb->need_flush = 1; \
__pud_free_tlb(tlb, pudp, address); \
} while (0)
#endif /* _ASM_IA64_TLB_H */
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Mosberger-Tang | 425 | 41.10% | 9 | 30.00% |
Peter Zijlstra | 173 | 16.73% | 1 | 3.33% |
Linus Torvalds | 141 | 13.64% | 2 | 6.67% |
Aneesh Kumar K.V | 116 | 11.22% | 5 | 16.67% |
Xiantao Zhang | 109 | 10.54% | 1 | 3.33% |
MinChan Kim | 15 | 1.45% | 2 | 6.67% |
Andrew Morton | 15 | 1.45% | 2 | 6.67% |
Jack Steiner | 10 | 0.97% | 1 | 3.33% |
Nicholas Piggin | 9 | 0.87% | 1 | 3.33% |
Benjamin Herrenschmidt | 9 | 0.87% | 1 | 3.33% |
Hugh Dickins | 4 | 0.39% | 1 | 3.33% |
Jesse Barnes | 3 | 0.29% | 1 | 3.33% |
Tony Luck | 2 | 0.19% | 1 | 3.33% |
Martin Hicks | 2 | 0.19% | 1 | 3.33% |
Greg Kroah-Hartman | 1 | 0.10% | 1 | 3.33% |
Total | 1034 | 100.00% | 30 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.