cregit-Linux how code gets into the kernel

Release 4.11 arch/s390/include/asm/mmu_context.h

/*
 *  S390 version
 *
 *  Derived from "include/asm-i386/mmu_context.h"
 */

#ifndef __S390_MMU_CONTEXT_H

#define __S390_MMU_CONTEXT_H

#include <asm/pgalloc.h>
#include <linux/uaccess.h>
#include <linux/mm_types.h>
#include <asm/tlbflush.h>
#include <asm/ctl_reg.h>


static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { spin_lock_init(&mm->context.pgtable_lock); INIT_LIST_HEAD(&mm->context.pgtable_list); spin_lock_init(&mm->context.gmap_lock); INIT_LIST_HEAD(&mm->context.gmap_list); cpumask_clear(&mm->context.cpu_attach_mask); atomic_set(&mm->context.flush_count, 0); mm->context.gmap_asce = 0; mm->context.flush_mm = 0; #ifdef CONFIG_PGSTE mm->context.alloc_pgste = page_table_allocate_pgste; mm->context.has_pgste = 0; mm->context.use_skey = 0; #endif switch (mm->context.asce_limit) { case 1UL << 42: /* * forked 3-level task, fall through to set new asce with new * mm->pgd */ case 0: /* context created by exec, set asce limit to 4TB */ mm->context.asce_limit = STACK_TOP_MAX; mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | _ASCE_USER_BITS | _ASCE_TYPE_REGION3; break; case 1UL << 53: /* forked 4-level task, set new asce with new mm->pgd */ mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | _ASCE_USER_BITS | _ASCE_TYPE_REGION2; break; case 1UL << 31: /* forked 2-level compat task, set new asce with new mm->pgd */ mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT; /* pgd_alloc() did not increase mm->nr_pmds */ mm_inc_nr_pmds(mm); } crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Martin Schwidefsky15863.45%1066.67%
Gerald Schaefer6927.71%16.67%
Christian Bornträger83.21%16.67%
Dominik Dingel83.21%213.33%
Linus Torvalds (pre-git)62.41%16.67%
Total249100.00%15100.00%

#define destroy_context(mm) do { } while (0)
static inline void set_user_asce(struct mm_struct *mm) { S390_lowcore.user_asce = mm->context.asce; if (current->thread.mm_segment.ar4) __ctl_load(S390_lowcore.user_asce, 7, 7); set_cpu_flag(CIF_ASCE_PRIMARY); }

Contributors

PersonTokensPropCommitsCommitProp
Martin Schwidefsky3062.50%660.00%
Linus Torvalds (pre-git)1327.08%110.00%
Andrew Morton36.25%110.00%
Heiko Carstens12.08%110.00%
Gerald Schaefer12.08%110.00%
Total48100.00%10100.00%


static inline void clear_user_asce(void) { S390_lowcore.user_asce = S390_lowcore.kernel_asce; __ctl_load(S390_lowcore.user_asce, 1, 1); __ctl_load(S390_lowcore.user_asce, 7, 7); }

Contributors

PersonTokensPropCommitsCommitProp
Heiko Carstens2358.97%133.33%
Martin Schwidefsky1641.03%266.67%
Total39100.00%3100.00%


static inline void load_kernel_asce(void) { unsigned long asce; __ctl_store(asce, 1, 1); if (asce != S390_lowcore.kernel_asce) __ctl_load(S390_lowcore.kernel_asce, 1, 1); set_cpu_flag(CIF_ASCE_PRIMARY); }

Contributors

PersonTokensPropCommitsCommitProp
Heiko Carstens4291.30%240.00%
Martin Schwidefsky48.70%360.00%
Total46100.00%5100.00%


static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { int cpu = smp_processor_id(); S390_lowcore.user_asce = next->context.asce; if (prev == next) return; cpumask_set_cpu(cpu, &next->context.cpu_attach_mask); cpumask_set_cpu(cpu, mm_cpumask(next)); /* Clear old ASCE by loading the kernel ASCE. */ __ctl_load(S390_lowcore.kernel_asce, 1, 1); __ctl_load(S390_lowcore.kernel_asce, 7, 7); cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); }

Contributors

PersonTokensPropCommitsCommitProp
Martin Schwidefsky9694.12%763.64%
Rusty Russell21.96%19.09%
Heiko Carstens21.96%19.09%
Gerald Schaefer10.98%19.09%
Linus Torvalds (pre-git)10.98%19.09%
Total102100.00%11100.00%

#define finish_arch_post_lock_switch finish_arch_post_lock_switch
static inline void finish_arch_post_lock_switch(void) { struct task_struct *tsk = current; struct mm_struct *mm = tsk->mm; load_kernel_asce(); if (mm) { preempt_disable(); while (atomic_read(&mm->context.flush_count)) cpu_relax(); if (mm->context.flush_mm) __tlb_flush_mm(mm); preempt_enable(); } set_fs(current->thread.mm_segment); }

Contributors

PersonTokensPropCommitsCommitProp
Martin Schwidefsky7698.70%583.33%
Linus Torvalds (pre-git)11.30%116.67%
Total77100.00%6100.00%

#define enter_lazy_tlb(mm,tsk) do { } while (0) #define deactivate_mm(tsk,mm) do { } while (0)
static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) { switch_mm(prev, next, current); set_user_asce(next); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)2580.65%133.33%
Martin Schwidefsky516.13%133.33%
Adrian Bunk13.23%133.33%
Total31100.00%3100.00%


static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) { }

Contributors

PersonTokensPropCommitsCommitProp
Martin Schwidefsky16100.00%2100.00%
Total16100.00%2100.00%


static inline void arch_exit_mmap(struct mm_struct *mm) { }

Contributors

PersonTokensPropCommitsCommitProp
Martin Schwidefsky11100.00%1100.00%
Total11100.00%1100.00%


static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long start, unsigned long end) { }

Contributors

PersonTokensPropCommitsCommitProp
Dave Hansen24100.00%1100.00%
Total24100.00%1100.00%


static inline void arch_bprm_mm_init(struct mm_struct *mm, struct vm_area_struct *vma) { }

Contributors

PersonTokensPropCommitsCommitProp
Dave Hansen16100.00%1100.00%
Total16100.00%1100.00%


static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write, bool execute, bool foreign) { /* by default, allow everything */ return true; }

Contributors

PersonTokensPropCommitsCommitProp
Dave Hansen25100.00%3100.00%
Total25100.00%3100.00%


static inline bool arch_pte_access_permitted(pte_t pte, bool write) { /* by default, allow everything */ return true; }

Contributors

PersonTokensPropCommitsCommitProp
Dave Hansen17100.00%1100.00%
Total17100.00%1100.00%

#endif /* __S390_MMU_CONTEXT_H */

Overall Contributors

PersonTokensPropCommitsCommitProp
Martin Schwidefsky43056.95%1845.00%
Dave Hansen8210.86%410.00%
Gerald Schaefer759.93%25.00%
Heiko Carstens699.14%37.50%
Linus Torvalds (pre-git)607.95%12.50%
Linus Torvalds101.32%25.00%
Dominik Dingel81.06%25.00%
Christian Bornträger81.06%12.50%
Andrew Morton40.53%25.00%
Ingo Molnar30.40%12.50%
Jeremy Fitzhardinge20.26%12.50%
Rusty Russell20.26%12.50%
Adrian Bunk10.13%12.50%
David Howells10.13%12.50%
Total755100.00%40100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.