cregit-Linux how code gets into the kernel

Release 4.14 arch/x86/include/asm/qspinlock.h

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_QSPINLOCK_H

#define _ASM_X86_QSPINLOCK_H

#include <asm/cpufeature.h>
#include <asm-generic/qspinlock_types.h>
#include <asm/paravirt.h>


#define	queued_spin_unlock queued_spin_unlock
/**
 * queued_spin_unlock - release a queued spinlock
 * @lock : Pointer to queued spinlock structure
 *
 * A smp_store_release() on the least-significant byte.
 */

static inline void native_queued_spin_unlock(struct qspinlock *lock) { smp_store_release((u8 *)lock, 0); }

Contributors

PersonTokensPropCommitsCommitProp
Waiman Long2295.65%150.00%
Peter Zijlstra14.35%150.00%
Total23100.00%2100.00%

#ifdef CONFIG_PARAVIRT_SPINLOCKS extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); extern void __pv_init_lock_hash(void); extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock);
static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) { pv_queued_spin_lock_slowpath(lock, val); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra22100.00%1100.00%
Total22100.00%1100.00%


static inline void queued_spin_unlock(struct qspinlock *lock) { pv_queued_spin_unlock(lock); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra17100.00%1100.00%
Total17100.00%1100.00%

#define vcpu_is_preempted vcpu_is_preempted
static inline bool vcpu_is_preempted(long cpu) { return pv_vcpu_is_preempted(cpu); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra1593.75%150.00%
Waiman Long16.25%150.00%
Total16100.00%2100.00%

#else
static inline void queued_spin_unlock(struct qspinlock *lock) { native_queued_spin_unlock(lock); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra17100.00%1100.00%
Total17100.00%1100.00%

#endif #ifdef CONFIG_PARAVIRT #define virt_spin_lock virt_spin_lock
static inline bool virt_spin_lock(struct qspinlock *lock) { if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) return false; /* * On hypervisors without PARAVIRT_SPINLOCKS support we fall * back to a Test-and-Set spinlock, because fair locks have * horrible lock 'holder' preemption issues. */ do { while (atomic_read(&lock->val) != 0) cpu_relax(); } while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0); return true; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra62100.00%2100.00%
Total62100.00%2100.00%

#endif /* CONFIG_PARAVIRT */ #include <asm-generic/qspinlock.h> #endif /* _ASM_X86_QSPINLOCK_H */

Overall Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra20482.26%562.50%
Waiman Long4317.34%225.00%
Greg Kroah-Hartman10.40%112.50%
Total248100.00%8100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.