Release 4.13 arch/powerpc/include/asm/mmu_context.h
#ifndef __ASM_POWERPC_MMU_CONTEXT_H
#define __ASM_POWERPC_MMU_CONTEXT_H
#ifdef __KERNEL__
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <asm/mmu.h>
#include <asm/cputable.h>
#include <asm/cputhreads.h>
/*
* Most if the context management is out of line
*/
extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
extern void destroy_context(struct mm_struct *mm);
#ifdef CONFIG_SPAPR_TCE_IOMMU
struct mm_iommu_table_group_mem_t;
extern int isolate_lru_page(struct page *page); /* from internal.h */
extern bool mm_iommu_preregistered(struct mm_struct *mm);
extern long mm_iommu_get(struct mm_struct *mm,
unsigned long ua, unsigned long entries,
struct mm_iommu_table_group_mem_t **pmem);
extern long mm_iommu_put(struct mm_struct *mm,
struct mm_iommu_table_group_mem_t *mem);
extern void mm_iommu_init(struct mm_struct *mm);
extern void mm_iommu_cleanup(struct mm_struct *mm);
extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
unsigned long ua, unsigned long size);
extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(
struct mm_struct *mm, unsigned long ua, unsigned long size);
extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
unsigned long ua, unsigned long entries);
extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
unsigned long ua, unsigned long *hpa);
extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
unsigned long ua, unsigned long *hpa);
extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
#endif
extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
extern void set_context(unsigned long id, pgd_t *pgd);
#ifdef CONFIG_PPC_BOOK3S_64
extern void radix__switch_mmu_context(struct mm_struct *prev,
struct mm_struct *next);
static inline void switch_mmu_context(struct mm_struct *prev,
struct mm_struct *next,
struct task_struct *tsk)
{
if (radix_enabled())
return radix__switch_mmu_context(prev, next);
return switch_slb(tsk, next);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Aneesh Kumar K.V | 43 | 100.00% | 2 | 100.00% |
Total | 43 | 100.00% | 2 | 100.00% |
extern int hash__alloc_context_id(void);
extern void hash__reserve_context_id(int id);
extern void __destroy_context(int context_id);
static inline void mmu_context_init(void) { }
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Benjamin Herrenschmidt | 8 | 100.00% | 1 | 100.00% |
Total | 8 | 100.00% | 1 | 100.00% |
#else
extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk);
extern unsigned long __init_new_context(void);
extern void __destroy_context(unsigned long context_id);
extern void mmu_context_init(void);
#endif
#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU)
extern void radix_kvm_prefetch_workaround(struct mm_struct *mm);
#else
static inline void radix_kvm_prefetch_workaround(struct mm_struct *mm) { }
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Benjamin Herrenschmidt | 11 | 100.00% | 1 | 100.00% |
Total | 11 | 100.00% | 1 | 100.00% |
#endif
extern void switch_cop(struct mm_struct *next);
extern int use_cop(unsigned long acop, struct mm_struct *mm);
extern void drop_cop(unsigned long acop, struct mm_struct *mm);
/*
* switch_mm is the entry point called from the architecture independent
* code in kernel/sched/core.c
*/
static inline void switch_mm_irqs_off(struct mm_struct *prev,
struct mm_struct *next,
struct task_struct *tsk)
{
bool new_on_cpu = false;
/* Mark this context has been used on the new CPU */
if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) {
cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
/*
* This full barrier orders the store to the cpumask above vs
* a subsequent operation which allows this CPU to begin loading
* translations for next.
*
* When using the radix MMU that operation is the load of the
* MMU context id, which is then moved to SPRN_PID.
*
* For the hash MMU it is either the first load from slb_cache
* in switch_slb(), and/or the store of paca->mm_ctx_id in
* copy_mm_to_paca().
*
* On the read side the barrier is in pte_xchg(), which orders
* the store to the PTE vs the load of mm_cpumask.
*/
smp_mb();
new_on_cpu = true;
}
/* 32-bit keeps track of the current PGDIR in the thread struct */
#ifdef CONFIG_PPC32
tsk->thread.pgdir = next->pgd;
#endif /* CONFIG_PPC32 */
/* 64-bit Book3E keeps track of current PGD in the PACA */
#ifdef CONFIG_PPC_BOOK3E_64
get_paca()->pgd = next->pgd;
#endif
/* Nothing else to do if we aren't actually switching */
if (prev == next)
return;
#ifdef CONFIG_PPC_ICSWX
/* Switch coprocessor context only if prev or next uses a coprocessor */
if (prev->context.acop || next->context.acop)
switch_cop(next);
#endif /* CONFIG_PPC_ICSWX */
/* We must stop all altivec streams before changing the HW
* context
*/
#ifdef CONFIG_ALTIVEC
if (cpu_has_feature(CPU_FTR_ALTIVEC))
asm volatile ("dssall");
#endif /* CONFIG_ALTIVEC */
if (new_on_cpu)
radix_kvm_prefetch_workaround(next);
/*
* The actual HW switching method differs between the various
* sub architectures. Out of line for now
*/
switch_mmu_context(prev, next, tsk);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Benjamin Herrenschmidt | 64 | 38.79% | 4 | 28.57% |
Anton Blanchard | 35 | 21.21% | 2 | 14.29% |
Tseng-Hui (Frank) Lin | 26 | 15.76% | 1 | 7.14% |
Andrew Morton | 16 | 9.70% | 2 | 14.29% |
Paul Mackerras | 13 | 7.88% | 1 | 7.14% |
Rusty Russell | 4 | 2.42% | 1 | 7.14% |
Olof Johansson | 3 | 1.82% | 1 | 7.14% |
Aneesh Kumar K.V | 3 | 1.82% | 1 | 7.14% |
David Gibson | 1 | 0.61% | 1 | 7.14% |
Total | 165 | 100.00% | 14 | 100.00% |
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
unsigned long flags;
local_irq_save(flags);
switch_mm_irqs_off(prev, next, tsk);
local_irq_restore(flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Gibson | 45 | 100.00% | 1 | 100.00% |
Total | 45 | 100.00% | 1 | 100.00% |
#define switch_mm_irqs_off switch_mm_irqs_off
#define deactivate_mm(tsk,mm) do { } while (0)
/*
* After we have set current->mm to a new value, this activates
* the context for the new mm so we see the new mappings.
*/
static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
{
unsigned long flags;
local_irq_save(flags);
switch_mm(prev, next, current);
local_irq_restore(flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andrew Morton | 36 | 90.00% | 1 | 50.00% |
Anton Blanchard | 4 | 10.00% | 1 | 50.00% |
Total | 40 | 100.00% | 2 | 100.00% |
/* We don't currently use enter_lazy_tlb() for anything */
static inline void enter_lazy_tlb(struct mm_struct *mm,
struct task_struct *tsk)
{
/* 64-bit Book3E keeps track of current PGD in the PACA */
#ifdef CONFIG_PPC_BOOK3E_64
get_paca()->pgd = NULL;
#endif
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Benjamin Herrenschmidt | 30 | 100.00% | 2 | 100.00% |
Total | 30 | 100.00% | 2 | 100.00% |
static inline void arch_dup_mmap(struct mm_struct *oldmm,
struct mm_struct *mm)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Laurent Dufour | 16 | 100.00% | 1 | 100.00% |
Total | 16 | 100.00% | 1 | 100.00% |
static inline void arch_exit_mmap(struct mm_struct *mm)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Laurent Dufour | 11 | 100.00% | 1 | 100.00% |
Total | 11 | 100.00% | 1 | 100.00% |
static inline void arch_unmap(struct mm_struct *mm,
struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
if (start <= mm->context.vdso_base && mm->context.vdso_base < end)
mm->context.vdso_base = 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Laurent Dufour | 51 | 100.00% | 1 | 100.00% |
Total | 51 | 100.00% | 1 | 100.00% |
static inline void arch_bprm_mm_init(struct mm_struct *mm,
struct vm_area_struct *vma)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Laurent Dufour | 16 | 100.00% | 1 | 100.00% |
Total | 16 | 100.00% | 1 | 100.00% |
static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
bool write, bool execute, bool foreign)
{
/* by default, allow everything */
return true;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dave Hansen | 25 | 100.00% | 3 | 100.00% |
Total | 25 | 100.00% | 3 | 100.00% |
#endif /* __KERNEL__ */
#endif /* __ASM_POWERPC_MMU_CONTEXT_H */
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexey Kardashevskiy | 195 | 21.08% | 4 | 10.53% |
Benjamin Herrenschmidt | 183 | 19.78% | 5 | 13.16% |
Laurent Dufour | 94 | 10.16% | 1 | 2.63% |
Aneesh Kumar K.V | 89 | 9.62% | 3 | 7.89% |
Anton Blanchard | 74 | 8.00% | 4 | 10.53% |
Andrew Morton | 64 | 6.92% | 3 | 7.89% |
Tseng-Hui (Frank) Lin | 64 | 6.92% | 1 | 2.63% |
David Gibson | 50 | 5.41% | 1 | 2.63% |
Alexander Graf | 31 | 3.35% | 2 | 5.26% |
Dave Hansen | 25 | 2.70% | 3 | 7.89% |
Paul Mackerras | 19 | 2.05% | 3 | 7.89% |
Balbir Singh | 11 | 1.19% | 1 | 2.63% |
Linus Torvalds | 9 | 0.97% | 1 | 2.63% |
Arnd Bergmann | 6 | 0.65% | 1 | 2.63% |
Rusty Russell | 4 | 0.43% | 1 | 2.63% |
Olof Johansson | 3 | 0.32% | 1 | 2.63% |
Alexey Dobriyan | 2 | 0.22% | 1 | 2.63% |
Viresh Kumar | 1 | 0.11% | 1 | 2.63% |
Michael Ellerman | 1 | 0.11% | 1 | 2.63% |
Total | 925 | 100.00% | 38 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.