cregit-Linux how code gets into the kernel

Release 4.17 arch/arm64/include/asm/tlbflush.h

/*
 * Based on arch/arm/include/asm/tlbflush.h
 *
 * Copyright (C) 1999-2003 Russell King
 * Copyright (C) 2012 ARM Ltd.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */
#ifndef __ASM_TLBFLUSH_H

#define __ASM_TLBFLUSH_H

#ifndef __ASSEMBLY__

#include <linux/sched.h>
#include <asm/cputype.h>
#include <asm/mmu.h>

/*
 * Raw TLBI operations.
 *
 * Where necessary, use the __tlbi() macro to avoid asm()
 * boilerplate. Drivers and most kernel code should use the TLB
 * management routines in preference to the macro below.
 *
 * The macro can be used as __tlbi(op) or __tlbi(op, arg), depending
 * on whether a particular TLBI operation takes an argument or
 * not. The macros handles invoking the asm with or without the
 * register argument as appropriate.
 */

#define __TLBI_0(op, arg) asm ("tlbi " #op "\n"                                  \
                   ALTERNATIVE("nop\n                   nop",                  \
                               "dsb ish\n               tlbi " #op,            \
                               ARM64_WORKAROUND_REPEAT_TLBI,                   \
                               CONFIG_QCOM_FALKOR_ERRATUM_1009)                \
                            : : )


#define __TLBI_1(op, arg) asm ("tlbi " #op ", %0\n"                              \
                   ALTERNATIVE("nop\n                   nop",                  \
                               "dsb ish\n               tlbi " #op ", %0",     \
                               ARM64_WORKAROUND_REPEAT_TLBI,                   \
                               CONFIG_QCOM_FALKOR_ERRATUM_1009)                \
                            : : "r" (arg))


#define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg)


#define __tlbi(op, ...)		__TLBI_N(op, ##__VA_ARGS__, 1, 0)


#define __tlbi_user(op, arg) do {                                            \
        if (arm64_kernel_unmapped_at_el0())                                     \
                __tlbi(op, (arg) | USER_ASID_FLAG);                             \
} while (0)

/* This macro creates a properly formatted VA operand for the TLBI */

#define __TLBI_VADDR(addr, asid)				\
	({                                                      \
                unsigned long __ta = (addr) >> 12;              \
                __ta &= GENMASK_ULL(43, 0);                     \
                __ta |= (unsigned long)(asid) << 48;            \
                __ta;                                           \
        })

/*
 *      TLB Management
 *      ==============
 *
 *      The TLB specific code is expected to perform whatever tests it needs
 *      to determine if it should invalidate the TLB for each call.  Start
 *      addresses are inclusive and end addresses are exclusive; it is safe to
 *      round these addresses down.
 *
 *      flush_tlb_all()
 *
 *              Invalidate the entire TLB.
 *
 *      flush_tlb_mm(mm)
 *
 *              Invalidate all TLB entries in a particular address space.
 *              - mm    - mm_struct describing address space
 *
 *      flush_tlb_range(mm,start,end)
 *
 *              Invalidate a range of TLB entries in the specified address
 *              space.
 *              - mm    - mm_struct describing address space
 *              - start - start address (may not be aligned)
 *              - end   - end address (exclusive, may not be aligned)
 *
 *      flush_tlb_page(vaddr,vma)
 *
 *              Invalidate the specified page in the specified address range.
 *              - vaddr - virtual address (may not be aligned)
 *              - vma   - vma_struct describing address range
 *
 *      flush_kern_tlb_page(kaddr)
 *
 *              Invalidate the TLB entry for the specified page.  The address
 *              will be in the kernels virtual memory space.  Current uses
 *              only require the D-TLB to be invalidated.
 *              - kaddr - Kernel virtual memory address
 */

static inline void local_flush_tlb_all(void) { dsb(nshst); __tlbi(vmalle1); dsb(nsh); isb(); }

Contributors

PersonTokensPropCommitsCommitProp
Will Deacon2281.48%150.00%
Mark Rutland518.52%150.00%
Total27100.00%2100.00%


static inline void flush_tlb_all(void) { dsb(ishst); __tlbi(vmalle1is); dsb(ish); isb(); }

Contributors

PersonTokensPropCommitsCommitProp
Catalin Marinas1659.26%133.33%
Will Deacon622.22%133.33%
Mark Rutland518.52%133.33%
Total27100.00%3100.00%


static inline void flush_tlb_mm(struct mm_struct *mm) { unsigned long asid = __TLBI_VADDR(0, ASID(mm)); dsb(ishst); __tlbi(aside1is, asid); __tlbi_user(aside1is, asid); dsb(ish); }

Contributors

PersonTokensPropCommitsCommitProp
Catalin Marinas2550.00%120.00%
Will Deacon1326.00%240.00%
Mark Rutland714.00%120.00%
Philip Elcan510.00%120.00%
Total50100.00%5100.00%


static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) { unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm)); dsb(ishst); __tlbi(vale1is, addr); __tlbi_user(vale1is, addr); dsb(ish); }

Contributors

PersonTokensPropCommitsCommitProp
Catalin Marinas3257.14%120.00%
Will Deacon1323.21%240.00%
Mark Rutland712.50%120.00%
Philip Elcan47.14%120.00%
Total56100.00%5100.00%

/* * This is meant to avoid soft lock-ups on large TLB flushing ranges and not * necessarily a performance improvement. */ #define MAX_TLB_RANGE (1024UL << PAGE_SHIFT)
static inline void __flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, bool last_level) { unsigned long asid = ASID(vma->vm_mm); unsigned long addr; if ((end - start) > MAX_TLB_RANGE) { flush_tlb_mm(vma->vm_mm); return; } start = __TLBI_VADDR(start, asid); end = __TLBI_VADDR(end, asid); dsb(ishst); for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) { if (last_level) { __tlbi(vale1is, addr); __tlbi_user(vale1is, addr); } else { __tlbi(vae1is, addr); __tlbi_user(vae1is, addr); } } dsb(ish); }

Contributors

PersonTokensPropCommitsCommitProp
Steve Capper6544.83%114.29%
Catalin Marinas3826.21%342.86%
Will Deacon1812.41%114.29%
Mark Rutland149.66%114.29%
Philip Elcan106.90%114.29%
Total145100.00%7100.00%


static inline void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { __flush_tlb_range(vma, start, end, false); }

Contributors

PersonTokensPropCommitsCommitProp
Catalin Marinas31100.00%1100.00%
Total31100.00%1100.00%


static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end) { unsigned long addr; if ((end - start) > MAX_TLB_RANGE) { flush_tlb_all(); return; } start = __TLBI_VADDR(start, 0); end = __TLBI_VADDR(end, 0); dsb(ishst); for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) __tlbi(vaae1is, addr); dsb(ish); isb(); }

Contributors

PersonTokensPropCommitsCommitProp
Steve Capper4851.61%114.29%
Catalin Marinas2223.66%342.86%
Philip Elcan1415.05%114.29%
Mark Rutland77.53%114.29%
Mark Salter22.15%114.29%
Total93100.00%7100.00%

/* * Used to invalidate the TLB (walk caches) corresponding to intermediate page * table levels (pgd/pud/pmd). */
static inline void __flush_tlb_pgtable(struct mm_struct *mm, unsigned long uaddr) { unsigned long addr = __TLBI_VADDR(uaddr, ASID(mm)); __tlbi(vae1is, addr); __tlbi_user(vae1is, addr); dsb(ish); }

Contributors

PersonTokensPropCommitsCommitProp
Catalin Marinas3163.27%125.00%
Will Deacon714.29%125.00%
Mark Rutland714.29%125.00%
Philip Elcan48.16%125.00%
Total49100.00%4100.00%

#endif #endif

Overall Contributors

PersonTokensPropCommitsCommitProp
Catalin Marinas22138.84%535.71%
Steve Capper11319.86%17.14%
Will Deacon9115.99%321.43%
Mark Rutland9115.99%17.14%
Philip Elcan488.44%17.14%
Christopher Covington20.35%17.14%
Mark Salter20.35%17.14%
Vladimir Murzin10.18%17.14%
Total569100.00%14100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.