Release 4.14 arch/x86/include/asm/tlb.h
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_TLB_H
#define _ASM_X86_TLB_H
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
#define tlb_flush(tlb) \
{ \
if (!tlb->fullmm && !tlb->need_flush_all) \
flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \
else \
flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \
}
#include <asm-generic/tlb.h>
/*
* While x86 architecture in general requires an IPI to perform TLB
* shootdown, enablement code for several hypervisors overrides
* .flush_tlb_others hook in pv_mmu_ops and implements it by issuing
* a hypercall. To keep software pagetable walkers safe in this case we
* switch to RCU based table free (HAVE_RCU_TABLE_FREE). See the comment
* below 'ifdef CONFIG_HAVE_RCU_TABLE_FREE' in include/asm-generic/tlb.h
* for more details.
*/
static inline void __tlb_remove_table(void *table)
{
free_page_and_swap_cache(table);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Vitaly Kuznetsov | 16 | 100.00% | 1 | 100.00% |
Total | 16 | 100.00% | 1 | 100.00% |
#endif /* _ASM_X86_TLB_H */
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Gleixner | 44 | 65.67% | 2 | 28.57% |
Vitaly Kuznetsov | 17 | 25.37% | 1 | 14.29% |
H. Peter Anvin | 3 | 4.48% | 1 | 14.29% |
Greg Kroah-Hartman | 1 | 1.49% | 1 | 14.29% |
Alex Shi | 1 | 1.49% | 1 | 14.29% |
Dave Hansen | 1 | 1.49% | 1 | 14.29% |
Total | 67 | 100.00% | 7 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.