cregit-Linux how code gets into the kernel

Release 4.14 arch/sh/include/asm/tlb.h

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH_TLB_H

#define __ASM_SH_TLB_H

#ifdef CONFIG_SUPERH64
# include <asm/tlb_64.h>
#endif

#ifndef __ASSEMBLY__
#include <linux/pagemap.h>

#ifdef CONFIG_MMU
#include <linux/swap.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>

/*
 * TLB handling.  This allows us to remove pages from the page
 * tables, and efficiently handle the TLB issues.
 */

struct mmu_gather {
	
struct mm_struct	*mm;
	
unsigned int		fullmm;
	

unsigned long		start, end;
};


static inline void init_tlb_gather(struct mmu_gather *tlb) { tlb->start = TASK_SIZE; tlb->end = 0; if (tlb->fullmm) { tlb->start = 0; tlb->end = TASK_SIZE; } }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mundt44100.00%1100.00%
Total44100.00%1100.00%


static inline void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) { tlb->mm = mm; tlb->start = start; tlb->end = end; tlb->fullmm = !(start | (end+1)); init_tlb_gather(tlb); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds2844.44%125.00%
Paul Mundt2844.44%125.00%
Peter Zijlstra69.52%125.00%
MinChan Kim11.59%125.00%
Total63100.00%4100.00%


static inline void arch_tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end, bool force) { if (tlb->fullmm || force) flush_tlb_mm(tlb->mm); /* keep the page table cache within bounds */ check_pgt_cache(); }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mundt3685.71%133.33%
MinChan Kim614.29%266.67%
Total42100.00%3100.00%


static inline void tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address) { if (tlb->start > address) tlb->start = address; if (tlb->end < address + PAGE_SIZE) tlb->end = address + PAGE_SIZE; }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mundt52100.00%1100.00%
Total52100.00%1100.00%

#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ tlb_remove_tlb_entry(tlb, ptep, address) /* * In the case of tlb vma handling, we can optimise these away in the * case where we're doing a full MM flush. When we're doing a munmap, * the vmas are adjusted to only cover the region to be torn down. */
static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { if (!tlb->fullmm) flush_cache_range(vma, vma->vm_start, vma->vm_end); }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mundt37100.00%1100.00%
Total37100.00%1100.00%


static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { if (!tlb->fullmm && tlb->end) { flush_tlb_range(vma, tlb->start, tlb->end); init_tlb_gather(tlb); } }

Contributors

PersonTokensPropCommitsCommitProp
Paul Mundt48100.00%1100.00%
Total48100.00%1100.00%


static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) { }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds11100.00%1100.00%
Total11100.00%1100.00%


static inline void tlb_flush_mmu_free(struct mmu_gather *tlb) { }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds11100.00%1100.00%
Total11100.00%1100.00%


static inline void tlb_flush_mmu(struct mmu_gather *tlb) { }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra11100.00%1100.00%
Total11100.00%1100.00%


static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) { free_page_and_swap_cache(page); return false; /* avoid calling tlb_flush_mmu */ }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra2596.15%150.00%
Aneesh Kumar K.V13.85%150.00%
Total26100.00%2100.00%


static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) { __tlb_remove_page(tlb, page); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra1875.00%150.00%
Paul Mundt625.00%150.00%
Total24100.00%2100.00%


static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size) { return __tlb_remove_page(tlb, page); }

Contributors

PersonTokensPropCommitsCommitProp
Aneesh Kumar K.V28100.00%1100.00%
Total28100.00%1100.00%


static inline void tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size) { return tlb_remove_page(tlb, page); }

Contributors

PersonTokensPropCommitsCommitProp
Aneesh Kumar K.V28100.00%1100.00%
Total28100.00%1100.00%

#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, unsigned int page_size) { }

Contributors

PersonTokensPropCommitsCommitProp
Aneesh Kumar K.V15100.00%1100.00%
Total15100.00%1100.00%

#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep) #define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) #define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp) #define tlb_migrate_finish(mm) do { } while (0) #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SUPERH64) extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t); extern void tlb_unwire_entry(void); #else
static inline void tlb_wire_entry(struct vm_area_struct *vma , unsigned long addr, pte_t pte) { BUG(); }

Contributors

PersonTokensPropCommitsCommitProp
Matt Fleming22100.00%1100.00%
Total22100.00%1100.00%


static inline void tlb_unwire_entry(void) { BUG(); }

Contributors

PersonTokensPropCommitsCommitProp
Matt Fleming12100.00%1100.00%
Total12100.00%1100.00%

#endif #else /* CONFIG_MMU */ #define tlb_start_vma(tlb, vma) do { } while (0) #define tlb_end_vma(tlb, vma) do { } while (0) #define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0) #define tlb_flush(tlb) do { } while (0) #include <asm-generic/tlb.h> #endif /* CONFIG_MMU */ #endif /* __ASSEMBLY__ */ #endif /* __ASM_SH_TLB_H */

Overall Contributors

PersonTokensPropCommitsCommitProp
Paul Mundt39358.05%420.00%
Aneesh Kumar K.V9013.29%420.00%
Matt Fleming639.31%210.00%
Peter Zijlstra608.86%15.00%
Linus Torvalds537.83%315.00%
MinChan Kim71.03%210.00%
Benjamin Herrenschmidt60.89%15.00%
Nobuhiro Iwamatsu30.44%15.00%
Greg Kroah-Hartman10.15%15.00%
David Howells10.15%15.00%
Total677100.00%20100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.