Contributors: 19
Author Tokens Token Proportion Commits Commit Proportion
Anton Blanchard 133 22.89% 9 21.43%
Paul Mackerras 127 21.86% 5 11.90%
Michael Ellerman 86 14.80% 3 7.14%
David Howells 65 11.19% 1 2.38%
Christophe Leroy 32 5.51% 1 2.38%
Cyril Bur 30 5.16% 3 7.14%
Linus Torvalds (pre-git) 30 5.16% 3 7.14%
Nicholas Piggin 23 3.96% 4 9.52%
Kevin Hao 12 2.07% 1 2.38%
Bharat Bhushan 7 1.20% 1 2.38%
Linus Torvalds 7 1.20% 1 2.38%
Michael Neuling 6 1.03% 1 2.38%
Vitaly Bordug 5 0.86% 2 4.76%
Daniel Axtens 4 0.69% 2 4.76%
Sukadev Bhattiprolu 4 0.69% 1 2.38%
Leonidas Da Silva Barbosa 3 0.52% 1 2.38%
Magali Lemes 3 0.52% 1 2.38%
Johannes Berg 3 0.52% 1 2.38%
Greg Kroah-Hartman 1 0.17% 1 2.38%
Total 581 42


/* SPDX-License-Identifier: GPL-2.0 */
/*
 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
 */
#ifndef _ASM_POWERPC_SWITCH_TO_H
#define _ASM_POWERPC_SWITCH_TO_H

#include <linux/sched.h>
#include <asm/reg.h>

struct thread_struct;
struct task_struct;
struct pt_regs;

extern struct task_struct *__switch_to(struct task_struct *,
	struct task_struct *);
#define switch_to(prev, next, last)	((last) = __switch_to((prev), (next)))

extern struct task_struct *_switch(struct thread_struct *prev,
				   struct thread_struct *next);

extern void switch_booke_debug_regs(struct debug_reg *new_debug);

extern int emulate_altivec(struct pt_regs *);

#ifdef CONFIG_PPC_BOOK3S_64
void restore_math(struct pt_regs *regs);
#else
static inline void restore_math(struct pt_regs *regs)
{
}
#endif

void restore_tm_state(struct pt_regs *regs);

extern void flush_all_to_thread(struct task_struct *);
extern void giveup_all(struct task_struct *);

#ifdef CONFIG_PPC_FPU
extern void enable_kernel_fp(void);
extern void flush_fp_to_thread(struct task_struct *);
extern void giveup_fpu(struct task_struct *);
extern void save_fpu(struct task_struct *);
static inline void disable_kernel_fp(void)
{
	msr_check_and_clear(MSR_FP);
}
#else
static inline void save_fpu(struct task_struct *t) { }
static inline void flush_fp_to_thread(struct task_struct *t) { }
static inline void enable_kernel_fp(void)
{
	BUILD_BUG();
}
#endif

#ifdef CONFIG_ALTIVEC
extern void enable_kernel_altivec(void);
extern void flush_altivec_to_thread(struct task_struct *);
extern void giveup_altivec(struct task_struct *);
extern void save_altivec(struct task_struct *);
static inline void disable_kernel_altivec(void)
{
	msr_check_and_clear(MSR_VEC);
}
#else
static inline void save_altivec(struct task_struct *t) { }
static inline void __giveup_altivec(struct task_struct *t) { }
static inline void enable_kernel_altivec(void)
{
	BUILD_BUG();
}

static inline void disable_kernel_altivec(void)
{
	BUILD_BUG();
}
#endif

#ifdef CONFIG_VSX
extern void enable_kernel_vsx(void);
extern void flush_vsx_to_thread(struct task_struct *);
static inline void disable_kernel_vsx(void)
{
	msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
}
#else
static inline void enable_kernel_vsx(void)
{
	BUILD_BUG();
}

static inline void disable_kernel_vsx(void)
{
	BUILD_BUG();
}
#endif

#ifdef CONFIG_SPE
extern void enable_kernel_spe(void);
extern void flush_spe_to_thread(struct task_struct *);
extern void giveup_spe(struct task_struct *);
extern void __giveup_spe(struct task_struct *);
static inline void disable_kernel_spe(void)
{
	msr_check_and_clear(MSR_SPE);
}
#else
static inline void __giveup_spe(struct task_struct *t) { }
#endif

static inline void clear_task_ebb(struct task_struct *t)
{
#ifdef CONFIG_PPC_BOOK3S_64
    /* EBB perf events are not inherited, so clear all EBB state. */
    t->thread.ebbrr = 0;
    t->thread.ebbhr = 0;
    t->thread.bescr = 0;
    t->thread.mmcr2 = 0;
    t->thread.mmcr0 = 0;
    t->thread.siar = 0;
    t->thread.sdar = 0;
    t->thread.sier = 0;
    t->thread.used_ebb = 0;
#endif
}

void kvmppc_save_user_regs(void);
void kvmppc_save_current_sprs(void);

extern int set_thread_tidr(struct task_struct *t);

#endif /* _ASM_POWERPC_SWITCH_TO_H */