cregit-Linux how code gets into the kernel

Release 4.12 include/linux/spinlock_api_smp.h

Directory: include/linux
#ifndef __LINUX_SPINLOCK_API_SMP_H

#define __LINUX_SPINLOCK_API_SMP_H

#ifndef __LINUX_SPINLOCK_H
# error "please don't include this file directly"
#endif

/*
 * include/linux/spinlock_api_smp.h
 *
 * spinlock API declarations on SMP (and debug)
 * (implemented in kernel/spinlock.c)
 *
 * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
 * Released under the General Public License (GPL).
 */

int in_lock_functions(unsigned long addr);


#define assert_raw_spin_locked(x)	BUG_ON(!raw_spin_is_locked(x))


void __lockfunc _raw_spin_lock(raw_spinlock_t *lock)		__acquires(lock);
void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
								
__acquires(lock);
void __lockfunc
_raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
								
__acquires(lock);

void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock)		__acquires(lock);
void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
								
__acquires(lock);

unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
								
__acquires(lock);
unsigned long __lockfunc
_raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass)
								
__acquires(lock);
int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock);
int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock);

void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock)		__releases(lock);

void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)	__releases(lock);

void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock)	__releases(lock);
void __lockfunc
_raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
								
__releases(lock);

#ifdef CONFIG_INLINE_SPIN_LOCK

#define _raw_spin_lock(lock) __raw_spin_lock(lock)
#endif

#ifdef CONFIG_INLINE_SPIN_LOCK_BH

#define _raw_spin_lock_bh(lock) __raw_spin_lock_bh(lock)
#endif

#ifdef CONFIG_INLINE_SPIN_LOCK_IRQ

#define _raw_spin_lock_irq(lock) __raw_spin_lock_irq(lock)
#endif

#ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE

#define _raw_spin_lock_irqsave(lock) __raw_spin_lock_irqsave(lock)
#endif

#ifdef CONFIG_INLINE_SPIN_TRYLOCK

#define _raw_spin_trylock(lock) __raw_spin_trylock(lock)
#endif

#ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH

#define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock)
#endif

#ifndef CONFIG_UNINLINE_SPIN_UNLOCK

#define _raw_spin_unlock(lock) __raw_spin_unlock(lock)
#endif

#ifdef CONFIG_INLINE_SPIN_UNLOCK_BH

#define _raw_spin_unlock_bh(lock) __raw_spin_unlock_bh(lock)
#endif

#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ

#define _raw_spin_unlock_irq(lock) __raw_spin_unlock_irq(lock)
#endif

#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE

#define _raw_spin_unlock_irqrestore(lock, flags) __raw_spin_unlock_irqrestore(lock, flags)
#endif


static inline int __raw_spin_trylock(raw_spinlock_t *lock) { preempt_disable(); if (do_raw_spin_trylock(lock)) { spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); return 1; } preempt_enable(); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner2452.17%480.00%
Heiko Carstens2247.83%120.00%
Total46100.00%5100.00%

/* * If lockdep is enabled then we use the non-preemption spin-ops * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are * not re-enabled during lock-acquire (which the preempt-spin-ops do): */ #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock) { unsigned long flags; local_irq_save(flags); preempt_disable(); spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); /* * On lockdep we dont want the hand-coded irq-enable of * do_raw_spin_lock_flags() code, because lockdep assumes * that interrupts are not re-enabled during lock-acquire: */ #ifdef CONFIG_LOCKDEP LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); #else do_raw_spin_lock_flags(lock, &flags); #endif return flags; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Gleixner4365.15%480.00%
Heiko Carstens2334.85%120.00%
Total66100.00%5100.00%


static inline void __raw_spin_lock_irq(raw_spinlock_t *lock) { local_irq_disable(); preempt_disable(); spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Heiko Carstens2357.50%120.00%
Thomas Gleixner1742.50%480.00%
Total40100.00%5100.00%


static inline void __raw_spin_lock_bh(raw_spinlock_t *lock) { __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Heiko Carstens2356.10%116.67%
Thomas Gleixner1229.27%466.67%
Peter Zijlstra614.63%116.67%
Total41100.00%6100.00%


static inline void __raw_spin_lock(raw_spinlock_t *lock) { preempt_disable(); spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Heiko Carstens2362.16%120.00%
Thomas Gleixner1437.84%480.00%
Total37100.00%5100.00%

#endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */
static inline void __raw_spin_unlock(raw_spinlock_t *lock) { spin_release(&lock->dep_map, 1, _RET_IP_); do_raw_spin_unlock(lock); preempt_enable(); }

Contributors

PersonTokensPropCommitsCommitProp
Heiko Carstens2683.87%120.00%
Thomas Gleixner516.13%480.00%
Total31100.00%5100.00%


static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) { spin_release(&lock->dep_map, 1, _RET_IP_); do_raw_spin_unlock(lock); local_irq_restore(flags); preempt_enable(); }

Contributors

PersonTokensPropCommitsCommitProp
Heiko Carstens3690.00%120.00%
Thomas Gleixner410.00%480.00%
Total40100.00%5100.00%


static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock) { spin_release(&lock->dep_map, 1, _RET_IP_); do_raw_spin_unlock(lock); local_irq_enable(); preempt_enable(); }

Contributors

PersonTokensPropCommitsCommitProp
Heiko Carstens3088.24%120.00%
Thomas Gleixner411.76%480.00%
Total34100.00%5100.00%


static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock) { spin_release(&lock->dep_map, 1, _RET_IP_); do_raw_spin_unlock(lock); __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); }

Contributors

PersonTokensPropCommitsCommitProp
Heiko Carstens2777.14%116.67%
Peter Zijlstra411.43%116.67%
Thomas Gleixner411.43%466.67%
Total35100.00%6100.00%


static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock) { __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); if (do_raw_spin_trylock(lock)) { spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); return 1; } __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Heiko Carstens4175.93%120.00%
Peter Zijlstra1018.52%120.00%
Thomas Gleixner35.56%360.00%
Total54100.00%5100.00%

#include <linux/rwlock_api_smp.h> #endif /* __LINUX_SPINLOCK_API_SMP_H */

Overall Contributors

PersonTokensPropCommitsCommitProp
Heiko Carstens32642.01%214.29%
Thomas Gleixner23530.28%428.57%
Ingo Molnar15219.59%214.29%
Peter Zijlstra364.64%214.29%
Arjan van de Ven121.55%17.14%
Josh Triplett111.42%17.14%
Raghavendra K T30.39%17.14%
Chen Gang S10.13%17.14%
Total776100.00%14100.00%
Directory: include/linux
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.