Contributors: 12
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Jeff Dike |
151 |
52.43% |
5 |
23.81% |
Dave Hansen |
67 |
23.26% |
2 |
9.52% |
Benjamin LaHaise |
18 |
6.25% |
2 |
9.52% |
Rusty Russell |
14 |
4.86% |
2 |
9.52% |
Al Viro |
12 |
4.17% |
2 |
9.52% |
Linus Torvalds |
9 |
3.12% |
1 |
4.76% |
Michel Lespinasse |
6 |
2.08% |
2 |
9.52% |
Thomas Gleixner |
4 |
1.39% |
1 |
4.76% |
Ingo Molnar |
3 |
1.04% |
1 |
4.76% |
Alex Dewar |
2 |
0.69% |
1 |
4.76% |
Oleg Nesterov |
1 |
0.35% |
1 |
4.76% |
Johannes Berg |
1 |
0.35% |
1 |
4.76% |
Total |
288 |
|
21 |
|
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
*/
#ifndef __UM_MMU_CONTEXT_H
#define __UM_MMU_CONTEXT_H
#include <linux/sched.h>
#include <linux/mm_types.h>
#include <linux/mmap_lock.h>
#include <asm/mmu.h>
extern void uml_setup_stubs(struct mm_struct *mm);
/*
* Needed since we do not use the asm-generic/mm_hooks.h:
*/
static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
{
uml_setup_stubs(mm);
return 0;
}
extern void arch_exit_mmap(struct mm_struct *mm);
static inline void arch_unmap(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
}
static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
bool write, bool execute, bool foreign)
{
/* by default, allow everything */
return true;
}
/*
* end asm-generic/mm_hooks.h functions
*/
#define deactivate_mm(tsk,mm) do { } while (0)
extern void force_flush_all(void);
static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
{
/*
* This is called by fs/exec.c and sys_unshare()
* when the new ->mm is used for the first time.
*/
__switch_mm(&new->context.id);
mmap_write_lock_nested(new, SINGLE_DEPTH_NESTING);
uml_setup_stubs(new);
mmap_write_unlock(new);
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
unsigned cpu = smp_processor_id();
if(prev != next){
cpumask_clear_cpu(cpu, mm_cpumask(prev));
cpumask_set_cpu(cpu, mm_cpumask(next));
if(next != &init_mm)
__switch_mm(&next->context.id);
}
}
static inline void enter_lazy_tlb(struct mm_struct *mm,
struct task_struct *tsk)
{
}
extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
extern void destroy_context(struct mm_struct *mm);
#endif