cregit-Linux how code gets into the kernel

Release 4.14 arch/ia64/include/asm/tlb.h

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_IA64_TLB_H

#define _ASM_IA64_TLB_H
/*
 * Based on <asm-generic/tlb.h>.
 *
 * Copyright (C) 2002-2003 Hewlett-Packard Co
 *      David Mosberger-Tang <davidm@hpl.hp.com>
 */
/*
 * Removing a translation from a page table (including TLB-shootdown) is a four-step
 * procedure:
 *
 *      (1) Flush (virtual) caches --- ensures virtual memory is coherent with kernel memory
 *          (this is a no-op on ia64).
 *      (2) Clear the relevant portions of the page-table
 *      (3) Flush the TLBs --- ensures that stale content is gone from CPU TLBs
 *      (4) Release the pages that were freed up in step (2).
 *
 * Note that the ordering of these steps is crucial to avoid races on MP machines.
 *
 * The Linux kernel defines several platform-specific hooks for TLB-shootdown.  When
 * unmapping a portion of the virtual address space, these hooks are called according to
 * the following template:
 *
 *      tlb <- tlb_gather_mmu(mm, start, end);          // start unmap for address space MM
 *      {
 *        for each vma that needs a shootdown do {
 *          tlb_start_vma(tlb, vma);
 *            for each page-table-entry PTE that needs to be removed do {
 *              tlb_remove_tlb_entry(tlb, pte, address);
 *              if (pte refers to a normal page) {
 *                tlb_remove_page(tlb, page);
 *              }
 *            }
 *          tlb_end_vma(tlb, vma);
 *        }
 *      }
 *      tlb_finish_mmu(tlb, start, end);        // finish unmap for address space MM
 */
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/swap.h>

#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/tlbflush.h>
#include <asm/machvec.h>

/*
 * If we can't allocate a page to make a big batch of page pointers
 * to work on, then just handle a few from the on-stack structure.
 */

#define	IA64_GATHER_BUNDLE	8


struct mmu_gather {
	
struct mm_struct	*mm;
	
unsigned int		nr;
	
unsigned int		max;
	
unsigned char		fullmm;		/* non-zero means full mm flush */
	
unsigned char		need_flush;	/* really unmapped some PTEs? */
	

unsigned long		start, end;
	
unsigned long		start_addr;
	
unsigned long		end_addr;
	
struct page		**pages;
	
struct page		*local[IA64_GATHER_BUNDLE];
};


struct ia64_tr_entry {
	
u64 ifa;
	
u64 itir;
	
u64 pte;
	
u64 rr;
}; /*Record for tr entry!*/

extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
extern void ia64_ptr_entry(u64 target_mask, int slot);

extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];

/*
 region register macros
*/

#define RR_TO_VE(val)   (((val) >> 0) & 0x0000000000000001)

#define RR_VE(val)	(((val) & 0x0000000000000001) << 0)

#define RR_VE_MASK	0x0000000000000001L

#define RR_VE_SHIFT	0

#define RR_TO_PS(val)	(((val) >> 2) & 0x000000000000003f)

#define RR_PS(val)	(((val) & 0x000000000000003f) << 2)

#define RR_PS_MASK	0x00000000000000fcL

#define RR_PS_SHIFT	2

#define RR_RID_MASK	0x00000000ffffff00L

#define RR_TO_RID(val) 	((val >> 8) & 0xffffff)


static inline void ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned long end) { tlb->need_flush = 0; if (tlb->fullmm) { /* * Tearing down the entire address space. This happens both as a result * of exit() and execve(). The latter case necessitates the call to * flush_tlb_mm() here. */ flush_tlb_mm(tlb->mm); } else if (unlikely (end - start >= 1024*1024*1024*1024UL || REGION_NUMBER(start) != REGION_NUMBER(end - 1))) { /* * If we flush more than a tera-byte or across regions, we're probably * better off just flushing the entire TLB(s). This should be very rare * and is not worth optimizing for. */ flush_tlb_all(); } else { /* * XXX fix me: flush_tlb_range() should take an mm pointer instead of a * vma pointer. */ struct vm_area_struct vma; vma.vm_mm = tlb->mm; /* flush the address range from the tlb: */ flush_tlb_range(&vma, start, end); /* now flush the virt. page-table area mapping the address range: */ flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end)); } }

Contributors

PersonTokensPropCommitsCommitProp
David Mosberger-Tang11896.72%571.43%
Linus Torvalds21.64%114.29%
Andrew Morton21.64%114.29%
Total122100.00%7100.00%


static inline void ia64_tlb_flush_mmu_free(struct mmu_gather *tlb) { unsigned long i; unsigned int nr; /* lastly, release the freed pages */ nr = tlb->nr; tlb->nr = 0; tlb->start_addr = ~0UL; for (i = 0; i < nr; ++i) free_page_and_swap_cache(tlb->pages[i]); }

Contributors

PersonTokensPropCommitsCommitProp
David Mosberger-Tang4469.84%150.00%
Linus Torvalds1930.16%150.00%
Total63100.00%2100.00%

/* * Flush the TLB for address range START to END and, if not in fast mode, release the * freed pages that where gathered up to this point. */
static inline void ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) { if (!tlb->need_flush) return; ia64_tlb_flush_mmu_tlbonly(tlb, start, end); ia64_tlb_flush_mmu_free(tlb); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds42100.00%1100.00%
Total42100.00%1100.00%


static inline void __tlb_alloc_page(struct mmu_gather *tlb) { unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); if (addr) { tlb->pages = (void *)addr; tlb->max = PAGE_SIZE / sizeof(void *); } }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra4890.57%133.33%
David Mosberger-Tang35.66%133.33%
Andrew Morton23.77%133.33%
Total53100.00%3100.00%


static inline void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) { tlb->mm = mm; tlb->max = ARRAY_SIZE(tlb->local); tlb->pages = tlb->local; tlb->nr = 0; tlb->fullmm = !(start | (end+1)); tlb->start = start; tlb->end = end; tlb->start_addr = ~0UL; }

Contributors

PersonTokensPropCommitsCommitProp
David Mosberger-Tang3336.67%450.00%
Linus Torvalds2831.11%112.50%
Peter Zijlstra2628.89%112.50%
Martin Hicks22.22%112.50%
MinChan Kim11.11%112.50%
Total90100.00%8100.00%

/* * Called at the end of the shootdown operation to free up any resources that were * collected. */
static inline void arch_tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end, bool force) { if (force) tlb->need_flush = 1; /* * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and * tlb->end_addr. */ ia64_tlb_flush_mmu(tlb, start, end); /* keep the page table cache within bounds */ check_pgt_cache(); if (tlb->pages != tlb->local) free_pages((unsigned long)tlb->pages, 0); }

Contributors

PersonTokensPropCommitsCommitProp
David Mosberger-Tang3144.29%116.67%
Peter Zijlstra2028.57%116.67%
MinChan Kim1420.00%233.33%
Hugh Dickins34.29%116.67%
Andrew Morton22.86%116.67%
Total70100.00%6100.00%

/* * Logically, this routine frees PAGE. On MP machines, the actual freeing of the page * must be delayed until after the TLB has been flushed (see comments at the beginning of * this file). */
static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) { tlb->need_flush = 1; if (!tlb->nr && tlb->pages == tlb->local) __tlb_alloc_page(tlb); tlb->pages[tlb->nr++] = page; VM_WARN_ON(tlb->nr > tlb->max); if (tlb->nr == tlb->max) return true; return false; }

Contributors

PersonTokensPropCommitsCommitProp
David Mosberger-Tang3239.02%233.33%
Aneesh Kumar K.V2631.71%233.33%
Peter Zijlstra2226.83%116.67%
Andrew Morton22.44%116.67%
Total82100.00%6100.00%


static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) { ia64_tlb_flush_mmu_tlbonly(tlb, tlb->start_addr, tlb->end_addr); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds25100.00%1100.00%
Total25100.00%1100.00%


static inline void tlb_flush_mmu_free(struct mmu_gather *tlb) { ia64_tlb_flush_mmu_free(tlb); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds17100.00%1100.00%
Total17100.00%1100.00%


static inline void tlb_flush_mmu(struct mmu_gather *tlb) { ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr); }

Contributors

PersonTokensPropCommitsCommitProp
David Mosberger-Tang1456.00%150.00%
Peter Zijlstra1144.00%150.00%
Total25100.00%2100.00%


static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) { if (__tlb_remove_page(tlb, page)) tlb_flush_mmu(tlb); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra3096.77%150.00%
Aneesh Kumar K.V13.23%150.00%
Total31100.00%2100.00%


static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size) { return __tlb_remove_page(tlb, page); }

Contributors

PersonTokensPropCommitsCommitProp
Aneesh Kumar K.V28100.00%1100.00%
Total28100.00%1100.00%


static inline void tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size) { return tlb_remove_page(tlb, page); }

Contributors

PersonTokensPropCommitsCommitProp
Aneesh Kumar K.V28100.00%1100.00%
Total28100.00%1100.00%

/* * Remove TLB entry for PTE mapped at virtual address ADDRESS. This is called for any * PTE, not just those pointing to (normal) physical memory. */
static inline void __tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long address) { if (tlb->start_addr == ~0UL) tlb->start_addr = address; tlb->end_addr = address + PAGE_SIZE; }

Contributors

PersonTokensPropCommitsCommitProp
David Mosberger-Tang4195.35%150.00%
Andrew Morton24.65%150.00%
Total43100.00%2100.00%

#define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm) #define tlb_start_vma(tlb, vma) do { } while (0) #define tlb_end_vma(tlb, vma) do { } while (0) #define tlb_remove_tlb_entry(tlb, ptep, addr) \ do { \ tlb->need_flush = 1; \ __tlb_remove_tlb_entry(tlb, ptep, addr); \ } while (0) #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ tlb_remove_tlb_entry(tlb, ptep, address) #define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, unsigned int page_size) { }

Contributors

PersonTokensPropCommitsCommitProp
Aneesh Kumar K.V15100.00%1100.00%
Total15100.00%1100.00%

#define pte_free_tlb(tlb, ptep, address) \ do { \ tlb->need_flush = 1; \ __pte_free_tlb(tlb, ptep, address); \ } while (0) #define pmd_free_tlb(tlb, ptep, address) \ do { \ tlb->need_flush = 1; \ __pmd_free_tlb(tlb, ptep, address); \ } while (0) #define pud_free_tlb(tlb, pudp, address) \ do { \ tlb->need_flush = 1; \ __pud_free_tlb(tlb, pudp, address); \ } while (0) #endif /* _ASM_IA64_TLB_H */

Overall Contributors

PersonTokensPropCommitsCommitProp
David Mosberger-Tang42541.10%930.00%
Peter Zijlstra17316.73%13.33%
Linus Torvalds14113.64%26.67%
Aneesh Kumar K.V11611.22%516.67%
Xiantao Zhang10910.54%13.33%
MinChan Kim151.45%26.67%
Andrew Morton151.45%26.67%
Jack Steiner100.97%13.33%
Nicholas Piggin90.87%13.33%
Benjamin Herrenschmidt90.87%13.33%
Hugh Dickins40.39%13.33%
Jesse Barnes30.29%13.33%
Tony Luck20.19%13.33%
Martin Hicks20.19%13.33%
Greg Kroah-Hartman10.10%13.33%
Total1034100.00%30100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.