Contributors: 12
Author Tokens Token Proportion Commits Commit Proportion
Ingo Molnar 156 52.70% 6 26.09%
Peter Zijlstra 98 33.11% 5 21.74%
Jeremy Fitzhardinge 20 6.76% 1 4.35%
Thomas Gleixner 5 1.69% 2 8.70%
Rafael J. Wysocki 5 1.69% 2 8.70%
David Howells 4 1.35% 1 4.35%
Arjan van de Ven 2 0.68% 1 4.35%
Nicholas Piggin 2 0.68% 1 4.35%
Mel Gorman 1 0.34% 1 4.35%
Tim Chen 1 0.34% 1 4.35%
Kirill V Tkhai 1 0.34% 1 4.35%
Greg Kroah-Hartman 1 0.34% 1 4.35%
Total 296 23


/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_IDLE_H
#define _LINUX_SCHED_IDLE_H

#include <linux/sched.h>

enum cpu_idle_type {
	__CPU_NOT_IDLE = 0,
	CPU_IDLE,
	CPU_NEWLY_IDLE,
	CPU_MAX_IDLE_TYPES
};

#ifdef CONFIG_SMP
extern void wake_up_if_idle(int cpu);
#else
static inline void wake_up_if_idle(int cpu) { }
#endif

/*
 * Idle thread specific functions to determine the need_resched
 * polling state.
 */
#ifdef TIF_POLLING_NRFLAG

#ifdef _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H

static __always_inline void __current_set_polling(void)
{
	arch_set_bit(TIF_POLLING_NRFLAG,
		     (unsigned long *)(&current_thread_info()->flags));
}

static __always_inline void __current_clr_polling(void)
{
	arch_clear_bit(TIF_POLLING_NRFLAG,
		       (unsigned long *)(&current_thread_info()->flags));
}

#else

static __always_inline void __current_set_polling(void)
{
	set_bit(TIF_POLLING_NRFLAG,
		(unsigned long *)(&current_thread_info()->flags));
}

static __always_inline void __current_clr_polling(void)
{
	clear_bit(TIF_POLLING_NRFLAG,
		  (unsigned long *)(&current_thread_info()->flags));
}

#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H */

static __always_inline bool __must_check current_set_polling_and_test(void)
{
	__current_set_polling();

	/*
	 * Polling state must be visible before we test NEED_RESCHED,
	 * paired by resched_curr()
	 */
	smp_mb__after_atomic();

	return unlikely(tif_need_resched());
}

static __always_inline bool __must_check current_clr_polling_and_test(void)
{
	__current_clr_polling();

	/*
	 * Polling state must be visible before we test NEED_RESCHED,
	 * paired by resched_curr()
	 */
	smp_mb__after_atomic();

	return unlikely(tif_need_resched());
}

#else
static inline void __current_set_polling(void) { }
static inline void __current_clr_polling(void) { }

static inline bool __must_check current_set_polling_and_test(void)
{
	return unlikely(tif_need_resched());
}
static inline bool __must_check current_clr_polling_and_test(void)
{
	return unlikely(tif_need_resched());
}
#endif

static __always_inline void current_clr_polling(void)
{
	__current_clr_polling();

	/*
	 * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
	 * Once the bit is cleared, we'll get IPIs with every new
	 * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
	 * fold.
	 */
	smp_mb(); /* paired with resched_curr() */

	preempt_fold_need_resched();
}

#endif /* _LINUX_SCHED_IDLE_H */