Contributors: 12
Author Tokens Token Proportion Commits Commit Proportion
Linus Torvalds (pre-git) 173 44.70% 1 6.25%
Helge Deller 103 26.61% 4 25.00%
Matthew Wilcox 43 11.11% 1 6.25%
Kyle McMartin 32 8.27% 2 12.50%
Nicholas Piggin 13 3.36% 1 6.25%
John David Anglin 7 1.81% 1 6.25%
Andrew Morton 6 1.55% 1 6.25%
Jeremy Fitzhardinge 3 0.78% 1 6.25%
Alexey Dobriyan 3 0.78% 1 6.25%
Linus Torvalds 2 0.52% 1 6.25%
Greg Kroah-Hartman 1 0.26% 1 6.25%
Arun Sharma 1 0.26% 1 6.25%
Total 387 16


/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __PARISC_MMU_CONTEXT_H
#define __PARISC_MMU_CONTEXT_H

#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/atomic.h>
#include <linux/spinlock.h>
#include <asm-generic/mm_hooks.h>

/* on PA-RISC, we actually have enough contexts to justify an allocator
 * for them.  prumpf */

extern unsigned long alloc_sid(void);
extern void free_sid(unsigned long);

#define init_new_context init_new_context
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
	BUG_ON(atomic_read(&mm->mm_users) != 1);

	mm->context.space_id = alloc_sid();
	return 0;
}

#define destroy_context destroy_context
static inline void
destroy_context(struct mm_struct *mm)
{
	free_sid(mm->context.space_id);
	mm->context.space_id = 0;
}

static inline unsigned long __space_to_prot(mm_context_t context)
{
#if SPACEID_SHIFT == 0
	return context.space_id << 1;
#else
	return context.space_id >> (SPACEID_SHIFT - 1);
#endif
}

static inline void load_context(mm_context_t context)
{
	mtsp(context.space_id, SR_USER);
	mtctl(__space_to_prot(context), 8);
}

static inline void switch_mm_irqs_off(struct mm_struct *prev,
		struct mm_struct *next, struct task_struct *tsk)
{
	if (prev != next) {
#ifdef CONFIG_TLB_PTLOCK
		/* put physical address of page_table_lock in cr28 (tr4)
		   for TLB faults */
		spinlock_t *pgd_lock = &next->page_table_lock;
		mtctl(__pa(__ldcw_align(&pgd_lock->rlock.raw_lock)), 28);
#endif
		mtctl(__pa(next->pgd), 25);
		load_context(next->context);
	}
}

static inline void switch_mm(struct mm_struct *prev,
		struct mm_struct *next, struct task_struct *tsk)
{
	unsigned long flags;

	if (prev == next)
		return;

	local_irq_save(flags);
	switch_mm_irqs_off(prev, next, tsk);
	local_irq_restore(flags);
}
#define switch_mm_irqs_off switch_mm_irqs_off

#define activate_mm activate_mm
static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
{
	/*
	 * Activate_mm is our one chance to allocate a space id
	 * for a new mm created in the exec path. There's also
	 * some lazy tlb stuff, which is currently dead code, but
	 * we only allocate a space id if one hasn't been allocated
	 * already, so we should be OK.
	 */

	BUG_ON(next == &init_mm); /* Should never happen */

	if (next->context.space_id == 0)
		next->context.space_id = alloc_sid();

	switch_mm(prev,next,current);
}

#include <asm-generic/mmu_context.h>

#endif