Release 4.14 arch/um/include/asm/tlb.h
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __UM_TLB_H
#define __UM_TLB_H
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <asm/percpu.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
/* struct mmu_gather is an opaque type used by the mm code for passing around
* any data needed by arch specific code for tlb_remove_page.
*/
struct mmu_gather {
struct mm_struct *mm;
unsigned int need_flush; /* Really unmapped some ptes? */
unsigned long start;
unsigned long end;
unsigned int fullmm; /* non-zero means full mm flush */
};
static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
unsigned long address)
{
if (tlb->start > address)
tlb->start = address;
if (tlb->end < address + PAGE_SIZE)
tlb->end = address + PAGE_SIZE;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeff Dike | 52 | 100.00% | 1 | 100.00% |
Total | 52 | 100.00% | 1 | 100.00% |
static inline void init_tlb_gather(struct mmu_gather *tlb)
{
tlb->need_flush = 0;
tlb->start = TASK_SIZE;
tlb->end = 0;
if (tlb->fullmm) {
tlb->start = 0;
tlb->end = TASK_SIZE;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeff Dike | 50 | 100.00% | 1 | 100.00% |
Total | 50 | 100.00% | 1 | 100.00% |
static inline void
arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
unsigned long start, unsigned long end)
{
tlb->mm = mm;
tlb->start = start;
tlb->end = end;
tlb->fullmm = !(start | (end+1));
init_tlb_gather(tlb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeff Dike | 28 | 44.44% | 1 | 25.00% |
Linus Torvalds | 28 | 44.44% | 1 | 25.00% |
Peter Zijlstra | 6 | 9.52% | 1 | 25.00% |
MinChan Kim | 1 | 1.59% | 1 | 25.00% |
Total | 63 | 100.00% | 4 | 100.00% |
extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end);
static inline void
tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeff Dike | 25 | 92.59% | 1 | 50.00% |
Linus Torvalds | 2 | 7.41% | 1 | 50.00% |
Total | 27 | 100.00% | 2 | 100.00% |
static inline void
tlb_flush_mmu_free(struct mmu_gather *tlb)
{
init_tlb_gather(tlb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 11 | 64.71% | 1 | 50.00% |
Jeff Dike | 6 | 35.29% | 1 | 50.00% |
Total | 17 | 100.00% | 2 | 100.00% |
static inline void
tlb_flush_mmu(struct mmu_gather *tlb)
{
if (!tlb->need_flush)
return;
tlb_flush_mmu_tlbonly(tlb);
tlb_flush_mmu_free(tlb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 30 | 100.00% | 1 | 100.00% |
Total | 30 | 100.00% | 1 | 100.00% |
/* arch_tlb_finish_mmu
* Called at the end of the shootdown operation to free up any resources
* that were required.
*/
static inline void
arch_tlb_finish_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end, bool force)
{
if (force) {
tlb->start = start;
tlb->end = end;
tlb->need_flush = 1;
}
tlb_flush_mmu(tlb);
/* keep the page table cache within bounds */
check_pgt_cache();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
MinChan Kim | 28 | 50.00% | 2 | 66.67% |
Jeff Dike | 28 | 50.00% | 1 | 33.33% |
Total | 56 | 100.00% | 3 | 100.00% |
/* tlb_remove_page
* Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
* while handling the additional races in SMP caused by other CPUs
* caching valid mappings in their TLBs.
*/
static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
tlb->need_flush = 1;
free_page_and_swap_cache(page);
return false; /* avoid calling tlb_flush_mmu */
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeff Dike | 25 | 78.12% | 1 | 33.33% |
Peter Zijlstra | 6 | 18.75% | 1 | 33.33% |
Aneesh Kumar K.V | 1 | 3.12% | 1 | 33.33% |
Total | 32 | 100.00% | 3 | 100.00% |
static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
{
__tlb_remove_page(tlb, page);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 23 | 95.83% | 1 | 50.00% |
Jeff Dike | 1 | 4.17% | 1 | 50.00% |
Total | 24 | 100.00% | 2 | 100.00% |
static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size)
{
return __tlb_remove_page(tlb, page);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Aneesh Kumar K.V | 28 | 100.00% | 1 | 100.00% |
Total | 28 | 100.00% | 1 | 100.00% |
static inline void tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size)
{
return tlb_remove_page(tlb, page);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Aneesh Kumar K.V | 28 | 100.00% | 1 | 100.00% |
Total | 28 | 100.00% | 1 | 100.00% |
/**
* tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
*
* Record the fact that pte's were really umapped in ->need_flush, so we can
* later optimise away the tlb invalidate. This helps when userspace is
* unmapping already-unmapped pages, which happens quite a lot.
*/
#define tlb_remove_tlb_entry(tlb, ptep, address) \
do { \
tlb->need_flush = 1; \
__tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
tlb_remove_tlb_entry(tlb, ptep, address)
#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
unsigned int page_size)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Aneesh Kumar K.V | 15 | 100.00% | 1 | 100.00% |
Total | 15 | 100.00% | 1 | 100.00% |
#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
#define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
#define tlb_migrate_finish(mm) do {} while (0)
#endif
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeff Dike | 352 | 59.56% | 2 | 14.29% |
Aneesh Kumar K.V | 90 | 15.23% | 4 | 28.57% |
Linus Torvalds | 71 | 12.01% | 2 | 14.29% |
Peter Zijlstra | 35 | 5.92% | 1 | 7.14% |
MinChan Kim | 30 | 5.08% | 2 | 14.29% |
Benjamin Herrenschmidt | 9 | 1.52% | 1 | 7.14% |
Américo Wang | 3 | 0.51% | 1 | 7.14% |
Greg Kroah-Hartman | 1 | 0.17% | 1 | 7.14% |
Total | 591 | 100.00% | 14 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.