Contributors: 18
Author Tokens Token Proportion Commits Commit Proportion
Ingo Molnar 123 33.70% 4 14.81%
Rusty Russell 58 15.89% 2 7.41%
Glauber de Oliveira Costa 57 15.62% 1 3.70%
Tom Lendacky 26 7.12% 1 3.70%
Juergen Gross 20 5.48% 3 11.11%
Thomas Gleixner 17 4.66% 2 7.41%
Peter Zijlstra 14 3.84% 2 7.41%
David Howells 12 3.29% 1 3.70%
Andrew Morton 9 2.47% 1 3.70%
Boris Ostrovsky 6 1.64% 1 3.70%
Andi Kleen 5 1.37% 1 3.70%
Nick Desaulniers 4 1.10% 2 7.41%
Denys Vlasenko 3 0.82% 1 3.70%
Steven Rostedt 3 0.82% 1 3.70%
Joe Perches 3 0.82% 1 3.70%
H. Peter Anvin 2 0.55% 1 3.70%
Linus Torvalds (pre-git) 2 0.55% 1 3.70%
Greg Kroah-Hartman 1 0.27% 1 3.70%
Total 365 27


/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _X86_IRQFLAGS_H_
#define _X86_IRQFLAGS_H_

#include <asm/processor-flags.h>

#ifndef __ASSEMBLY__

#include <asm/nospec-branch.h>

/*
 * Interrupt control:
 */

/* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */
extern inline unsigned long native_save_fl(void);
extern __always_inline unsigned long native_save_fl(void)
{
	unsigned long flags;

	/*
	 * "=rm" is safe here, because "pop" adjusts the stack before
	 * it evaluates its effective address -- this is part of the
	 * documented behavior of the "pop" instruction.
	 */
	asm volatile("# __raw_save_flags\n\t"
		     "pushf ; pop %0"
		     : "=rm" (flags)
		     : /* no input */
		     : "memory");

	return flags;
}

static __always_inline void native_irq_disable(void)
{
	asm volatile("cli": : :"memory");
}

static __always_inline void native_irq_enable(void)
{
	asm volatile("sti": : :"memory");
}

static __always_inline void native_safe_halt(void)
{
	mds_idle_clear_cpu_buffers();
	asm volatile("sti; hlt": : :"memory");
}

static __always_inline void native_halt(void)
{
	mds_idle_clear_cpu_buffers();
	asm volatile("hlt": : :"memory");
}

static __always_inline int native_irqs_disabled_flags(unsigned long flags)
{
	return !(flags & X86_EFLAGS_IF);
}

static __always_inline unsigned long native_local_irq_save(void)
{
	unsigned long flags = native_save_fl();

	native_irq_disable();

	return flags;
}

static __always_inline void native_local_irq_restore(unsigned long flags)
{
	if (!native_irqs_disabled_flags(flags))
		native_irq_enable();
}

#endif

#ifdef CONFIG_PARAVIRT_XXL
#include <asm/paravirt.h>
#else
#ifndef __ASSEMBLY__
#include <linux/types.h>

static __always_inline unsigned long arch_local_save_flags(void)
{
	return native_save_fl();
}

static __always_inline void arch_local_irq_disable(void)
{
	native_irq_disable();
}

static __always_inline void arch_local_irq_enable(void)
{
	native_irq_enable();
}

/*
 * Used in the idle loop; sti takes one instruction cycle
 * to complete:
 */
static __always_inline void arch_safe_halt(void)
{
	native_safe_halt();
}

/*
 * Used when interrupts are already enabled or to
 * shutdown the processor:
 */
static __always_inline void halt(void)
{
	native_halt();
}

/*
 * For spinlocks, etc:
 */
static __always_inline unsigned long arch_local_irq_save(void)
{
	unsigned long flags = arch_local_save_flags();
	arch_local_irq_disable();
	return flags;
}
#else

#ifdef CONFIG_X86_64
#ifdef CONFIG_DEBUG_ENTRY
#define SAVE_FLAGS		pushfq; popq %rax
#endif

#endif

#endif /* __ASSEMBLY__ */
#endif /* CONFIG_PARAVIRT_XXL */

#ifndef __ASSEMBLY__
static __always_inline int arch_irqs_disabled_flags(unsigned long flags)
{
	return !(flags & X86_EFLAGS_IF);
}

static __always_inline int arch_irqs_disabled(void)
{
	unsigned long flags = arch_local_save_flags();

	return arch_irqs_disabled_flags(flags);
}

static __always_inline void arch_local_irq_restore(unsigned long flags)
{
	if (!arch_irqs_disabled_flags(flags))
		arch_local_irq_enable();
}
#endif /* !__ASSEMBLY__ */

#endif