cregit-Linux how code gets into the kernel

Release 4.13 arch/powerpc/include/asm/mmu_context.h

#ifndef __ASM_POWERPC_MMU_CONTEXT_H

#define __ASM_POWERPC_MMU_CONTEXT_H
#ifdef __KERNEL__

#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <asm/mmu.h>	
#include <asm/cputable.h>
#include <asm/cputhreads.h>

/*
 * Most if the context management is out of line
 */
extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
extern void destroy_context(struct mm_struct *mm);
#ifdef CONFIG_SPAPR_TCE_IOMMU
struct mm_iommu_table_group_mem_t;

extern int isolate_lru_page(struct page *page);	/* from internal.h */
extern bool mm_iommu_preregistered(struct mm_struct *mm);
extern long mm_iommu_get(struct mm_struct *mm,
		unsigned long ua, unsigned long entries,
		struct mm_iommu_table_group_mem_t **pmem);
extern long mm_iommu_put(struct mm_struct *mm,
		struct mm_iommu_table_group_mem_t *mem);
extern void mm_iommu_init(struct mm_struct *mm);
extern void mm_iommu_cleanup(struct mm_struct *mm);
extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
		unsigned long ua, unsigned long size);
extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(
		struct mm_struct *mm, unsigned long ua, unsigned long size);
extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
		unsigned long ua, unsigned long entries);
extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
		unsigned long ua, unsigned long *hpa);
extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
		unsigned long ua, unsigned long *hpa);
extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
#endif
extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
extern void set_context(unsigned long id, pgd_t *pgd);

#ifdef CONFIG_PPC_BOOK3S_64
extern void radix__switch_mmu_context(struct mm_struct *prev,
				      struct mm_struct *next);

static inline void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { if (radix_enabled()) return radix__switch_mmu_context(prev, next); return switch_slb(tsk, next); }

Contributors

PersonTokensPropCommitsCommitProp
Aneesh Kumar K.V43100.00%2100.00%
Total43100.00%2100.00%

extern int hash__alloc_context_id(void); extern void hash__reserve_context_id(int id); extern void __destroy_context(int context_id);
static inline void mmu_context_init(void) { }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Herrenschmidt8100.00%1100.00%
Total8100.00%1100.00%

#else extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk); extern unsigned long __init_new_context(void); extern void __destroy_context(unsigned long context_id); extern void mmu_context_init(void); #endif #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU) extern void radix_kvm_prefetch_workaround(struct mm_struct *mm); #else
static inline void radix_kvm_prefetch_workaround(struct mm_struct *mm) { }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Herrenschmidt11100.00%1100.00%
Total11100.00%1100.00%

#endif extern void switch_cop(struct mm_struct *next); extern int use_cop(unsigned long acop, struct mm_struct *mm); extern void drop_cop(unsigned long acop, struct mm_struct *mm); /* * switch_mm is the entry point called from the architecture independent * code in kernel/sched/core.c */
static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { bool new_on_cpu = false; /* Mark this context has been used on the new CPU */ if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) { cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); /* * This full barrier orders the store to the cpumask above vs * a subsequent operation which allows this CPU to begin loading * translations for next. * * When using the radix MMU that operation is the load of the * MMU context id, which is then moved to SPRN_PID. * * For the hash MMU it is either the first load from slb_cache * in switch_slb(), and/or the store of paca->mm_ctx_id in * copy_mm_to_paca(). * * On the read side the barrier is in pte_xchg(), which orders * the store to the PTE vs the load of mm_cpumask. */ smp_mb(); new_on_cpu = true; } /* 32-bit keeps track of the current PGDIR in the thread struct */ #ifdef CONFIG_PPC32 tsk->thread.pgdir = next->pgd; #endif /* CONFIG_PPC32 */ /* 64-bit Book3E keeps track of current PGD in the PACA */ #ifdef CONFIG_PPC_BOOK3E_64 get_paca()->pgd = next->pgd; #endif /* Nothing else to do if we aren't actually switching */ if (prev == next) return; #ifdef CONFIG_PPC_ICSWX /* Switch coprocessor context only if prev or next uses a coprocessor */ if (prev->context.acop || next->context.acop) switch_cop(next); #endif /* CONFIG_PPC_ICSWX */ /* We must stop all altivec streams before changing the HW * context */ #ifdef CONFIG_ALTIVEC if (cpu_has_feature(CPU_FTR_ALTIVEC)) asm volatile ("dssall"); #endif /* CONFIG_ALTIVEC */ if (new_on_cpu) radix_kvm_prefetch_workaround(next); /* * The actual HW switching method differs between the various * sub architectures. Out of line for now */ switch_mmu_context(prev, next, tsk); }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Herrenschmidt6438.79%428.57%
Anton Blanchard3521.21%214.29%
Tseng-Hui (Frank) Lin2615.76%17.14%
Andrew Morton169.70%214.29%
Paul Mackerras137.88%17.14%
Rusty Russell42.42%17.14%
Olof Johansson31.82%17.14%
Aneesh Kumar K.V31.82%17.14%
David Gibson10.61%17.14%
Total165100.00%14100.00%


static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { unsigned long flags; local_irq_save(flags); switch_mm_irqs_off(prev, next, tsk); local_irq_restore(flags); }

Contributors

PersonTokensPropCommitsCommitProp
David Gibson45100.00%1100.00%
Total45100.00%1100.00%

#define switch_mm_irqs_off switch_mm_irqs_off #define deactivate_mm(tsk,mm) do { } while (0) /* * After we have set current->mm to a new value, this activates * the context for the new mm so we see the new mappings. */
static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) { unsigned long flags; local_irq_save(flags); switch_mm(prev, next, current); local_irq_restore(flags); }

Contributors

PersonTokensPropCommitsCommitProp
Andrew Morton3690.00%150.00%
Anton Blanchard410.00%150.00%
Total40100.00%2100.00%

/* We don't currently use enter_lazy_tlb() for anything */
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { /* 64-bit Book3E keeps track of current PGD in the PACA */ #ifdef CONFIG_PPC_BOOK3E_64 get_paca()->pgd = NULL; #endif }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Herrenschmidt30100.00%2100.00%
Total30100.00%2100.00%


static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) { }

Contributors

PersonTokensPropCommitsCommitProp
Laurent Dufour16100.00%1100.00%
Total16100.00%1100.00%


static inline void arch_exit_mmap(struct mm_struct *mm) { }

Contributors

PersonTokensPropCommitsCommitProp
Laurent Dufour11100.00%1100.00%
Total11100.00%1100.00%


static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long start, unsigned long end) { if (start <= mm->context.vdso_base && mm->context.vdso_base < end) mm->context.vdso_base = 0; }

Contributors

PersonTokensPropCommitsCommitProp
Laurent Dufour51100.00%1100.00%
Total51100.00%1100.00%


static inline void arch_bprm_mm_init(struct mm_struct *mm, struct vm_area_struct *vma) { }

Contributors

PersonTokensPropCommitsCommitProp
Laurent Dufour16100.00%1100.00%
Total16100.00%1100.00%


static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write, bool execute, bool foreign) { /* by default, allow everything */ return true; }

Contributors

PersonTokensPropCommitsCommitProp
Dave Hansen25100.00%3100.00%
Total25100.00%3100.00%

#endif /* __KERNEL__ */ #endif /* __ASM_POWERPC_MMU_CONTEXT_H */

Overall Contributors

PersonTokensPropCommitsCommitProp
Alexey Kardashevskiy19521.08%410.53%
Benjamin Herrenschmidt18319.78%513.16%
Laurent Dufour9410.16%12.63%
Aneesh Kumar K.V899.62%37.89%
Anton Blanchard748.00%410.53%
Andrew Morton646.92%37.89%
Tseng-Hui (Frank) Lin646.92%12.63%
David Gibson505.41%12.63%
Alexander Graf313.35%25.26%
Dave Hansen252.70%37.89%
Paul Mackerras192.05%37.89%
Balbir Singh111.19%12.63%
Linus Torvalds90.97%12.63%
Arnd Bergmann60.65%12.63%
Rusty Russell40.43%12.63%
Olof Johansson30.32%12.63%
Alexey Dobriyan20.22%12.63%
Viresh Kumar10.11%12.63%
Michael Ellerman10.11%12.63%
Total925100.00%38100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.