Release 4.14 arch/um/include/asm/mmu_context.h
/*
* Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
* Licensed under the GPL
*/
#ifndef __UM_MMU_CONTEXT_H
#define __UM_MMU_CONTEXT_H
#include <linux/sched.h>
#include <linux/mm_types.h>
#include <asm/mmu.h>
extern void uml_setup_stubs(struct mm_struct *mm);
/*
* Needed since we do not use the asm-generic/mm_hooks.h:
*/
static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
{
uml_setup_stubs(mm);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dave Hansen | 22 | 100.00% | 1 | 100.00% |
Total | 22 | 100.00% | 1 | 100.00% |
extern void arch_exit_mmap(struct mm_struct *mm);
static inline void arch_unmap(struct mm_struct *mm,
struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dave Hansen | 24 | 100.00% | 1 | 100.00% |
Total | 24 | 100.00% | 1 | 100.00% |
static inline void arch_bprm_mm_init(struct mm_struct *mm,
struct vm_area_struct *vma)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dave Hansen | 16 | 100.00% | 1 | 100.00% |
Total | 16 | 100.00% | 1 | 100.00% |
static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
bool write, bool execute, bool foreign)
{
/* by default, allow everything */
return true;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dave Hansen | 25 | 100.00% | 1 | 100.00% |
Total | 25 | 100.00% | 1 | 100.00% |
/*
* end asm-generic/mm_hooks.h functions
*/
#define deactivate_mm(tsk,mm) do { } while (0)
extern void force_flush_all(void);
static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
{
/*
* This is called by fs/exec.c and sys_unshare()
* when the new ->mm is used for the first time.
*/
__switch_mm(&new->context.id);
down_write(&new->mmap_sem);
uml_setup_stubs(new);
up_write(&new->mmap_sem);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeff Dike | 20 | 40.82% | 3 | 42.86% |
Al Viro | 17 | 34.69% | 1 | 14.29% |
Benjamin LaHaise | 11 | 22.45% | 2 | 28.57% |
Oleg Nesterov | 1 | 2.04% | 1 | 14.29% |
Total | 49 | 100.00% | 7 | 100.00% |
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
unsigned cpu = smp_processor_id();
if(prev != next){
cpumask_clear_cpu(cpu, mm_cpumask(prev));
cpumask_set_cpu(cpu, mm_cpumask(next));
if(next != &init_mm)
__switch_mm(&next->context.id);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeff Dike | 59 | 80.82% | 4 | 66.67% |
Rusty Russell | 14 | 19.18% | 2 | 33.33% |
Total | 73 | 100.00% | 6 | 100.00% |
static inline void enter_lazy_tlb(struct mm_struct *mm,
struct task_struct *tsk)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeff Dike | 16 | 100.00% | 1 | 100.00% |
Total | 16 | 100.00% | 1 | 100.00% |
extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
extern void destroy_context(struct mm_struct *mm);
#endif
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeff Dike | 152 | 49.67% | 6 | 35.29% |
Dave Hansen | 89 | 29.08% | 2 | 11.76% |
Al Viro | 20 | 6.54% | 2 | 11.76% |
Benjamin LaHaise | 18 | 5.88% | 2 | 11.76% |
Rusty Russell | 14 | 4.58% | 2 | 11.76% |
Linus Torvalds | 9 | 2.94% | 1 | 5.88% |
Ingo Molnar | 3 | 0.98% | 1 | 5.88% |
Oleg Nesterov | 1 | 0.33% | 1 | 5.88% |
Total | 306 | 100.00% | 17 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.