cregit-Linux how code gets into the kernel

Release 4.10 arch/sparc/include/asm/spinlock_64.h

/* spinlock.h: 64-bit Sparc spinlock support.
 *
 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
 */

#ifndef __SPARC64_SPINLOCK_H

#define __SPARC64_SPINLOCK_H

#ifndef __ASSEMBLY__

#include <asm/processor.h>
#include <asm/barrier.h>

/* To get debugging spinlocks which detect and catch
 * deadlock situations, set CONFIG_DEBUG_SPINLOCK
 * and rebuild your kernel.
 */

/* Because we play games to save cycles in the non-contention case, we
 * need to be extra careful about branch targets into the "spinning"
 * code.  They live in their own section, but the newer V9 branches
 * have a shorter range than the traditional 32-bit sparc branch
 * variants.  The rule is that the branches that go into and out of
 * the spinner sections must be pre-V9 branches.
 */


#define arch_spin_is_locked(lp)	((lp)->lock != 0)


static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) { smp_cond_load_acquire(&lock->lock, !VAL); }

Contributors

PersonTokensPropCommitsCommitProp
peter zijlstrapeter zijlstra1986.36%133.33%
sam ravnborgsam ravnborg29.09%133.33%
thomas gleixnerthomas gleixner14.55%133.33%
Total22100.00%3100.00%


static inline void arch_spin_lock(arch_spinlock_t *lock) { unsigned long tmp; __asm__ __volatile__( "1: ldstub [%1], %0\n" " brnz,pn %0, 2f\n" " nop\n" " .subsection 2\n" "2: ldub [%1], %0\n" " brnz,pt %0, 2b\n" " nop\n" " ba,a,pt %%xcc, 1b\n" " .previous" : "=&r" (tmp) : "r" (lock) : "memory"); }

Contributors

PersonTokensPropCommitsCommitProp
sam ravnborgsam ravnborg1583.33%125.00%
thomas gleixnerthomas gleixner211.11%250.00%
david s. millerdavid s. miller15.56%125.00%
Total18100.00%4100.00%


static inline int arch_spin_trylock(arch_spinlock_t *lock) { unsigned long result; __asm__ __volatile__( " ldstub [%1], %0\n" : "=r" (result) : "r" (lock) : "memory"); return (result == 0UL); }

Contributors

PersonTokensPropCommitsCommitProp
sam ravnborgsam ravnborg2288.00%125.00%
thomas gleixnerthomas gleixner28.00%250.00%
david s. millerdavid s. miller14.00%125.00%
Total25100.00%4100.00%


static inline void arch_spin_unlock(arch_spinlock_t *lock) { __asm__ __volatile__( " stb %%g0, [%0]" : /* No outputs */ : "r" (lock) : "memory"); }

Contributors

PersonTokensPropCommitsCommitProp
sam ravnborgsam ravnborg1381.25%125.00%
thomas gleixnerthomas gleixner212.50%250.00%
david s. millerdavid s. miller16.25%125.00%
Total16100.00%4100.00%


static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { unsigned long tmp1, tmp2; __asm__ __volatile__( "1: ldstub [%2], %0\n" " brnz,pn %0, 2f\n" " nop\n" " .subsection 2\n" "2: rdpr %%pil, %1\n" " wrpr %3, %%pil\n" "3: ldub [%2], %0\n" " brnz,pt %0, 3b\n" " nop\n" " ba,pt %%xcc, 1b\n" " wrpr %1, %%pil\n" " .previous" : "=&r" (tmp1), "=&r" (tmp2) : "r"(lock), "r"(flags) : "memory"); }

Contributors

PersonTokensPropCommitsCommitProp
sam ravnborgsam ravnborg2187.50%125.00%
thomas gleixnerthomas gleixner28.33%250.00%
david s. millerdavid s. miller14.17%125.00%
Total24100.00%4100.00%

/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
static inline void arch_read_lock(arch_rwlock_t *lock) { unsigned long tmp1, tmp2; __asm__ __volatile__ ( "1: ldsw [%2], %0\n" " brlz,pn %0, 2f\n" "4: add %0, 1, %1\n" " cas [%2], %0, %1\n" " cmp %0, %1\n" " bne,pn %%icc, 1b\n" " nop\n" " .subsection 2\n" "2: ldsw [%2], %0\n" " brlz,pt %0, 2b\n" " nop\n" " ba,a,pt %%xcc, 4b\n" " .previous" : "=&r" (tmp1), "=&r" (tmp2) : "r" (lock) : "memory"); }

Contributors

PersonTokensPropCommitsCommitProp
sam ravnborgsam ravnborg1680.00%120.00%
tobias klausertobias klauser15.00%120.00%
david s. millerdavid s. miller15.00%120.00%
heiko carstensheiko carstens15.00%120.00%
thomas gleixnerthomas gleixner15.00%120.00%
Total20100.00%5100.00%


static inline int arch_read_trylock(arch_rwlock_t *lock) { int tmp1, tmp2; __asm__ __volatile__ ( "1: ldsw [%2], %0\n" " brlz,a,pn %0, 2f\n" " mov 0, %0\n" " add %0, 1, %1\n" " cas [%2], %0, %1\n" " cmp %0, %1\n" " bne,pn %%icc, 1b\n" " mov 1, %0\n" "2:" : "=&r" (tmp1), "=&r" (tmp2) : "r" (lock) : "memory"); return tmp1; }

Contributors

PersonTokensPropCommitsCommitProp
sam ravnborgsam ravnborg1881.82%120.00%
tobias klausertobias klauser14.55%120.00%
david s. millerdavid s. miller14.55%120.00%
thomas gleixnerthomas gleixner14.55%120.00%
heiko carstensheiko carstens14.55%120.00%
Total22100.00%5100.00%


static inline void arch_read_unlock(arch_rwlock_t *lock) { unsigned long tmp1, tmp2; __asm__ __volatile__( "1: lduw [%2], %0\n" " sub %0, 1, %1\n" " cas [%2], %0, %1\n" " cmp %0, %1\n" " bne,pn %%xcc, 1b\n" " nop" : "=&r" (tmp1), "=&r" (tmp2) : "r" (lock) : "memory"); }

Contributors

PersonTokensPropCommitsCommitProp
sam ravnborgsam ravnborg1680.00%120.00%
tobias klausertobias klauser15.00%120.00%
david s. millerdavid s. miller15.00%120.00%
heiko carstensheiko carstens15.00%120.00%
thomas gleixnerthomas gleixner15.00%120.00%
Total20100.00%5100.00%


static inline void arch_write_lock(arch_rwlock_t *lock) { unsigned long mask, tmp1, tmp2; mask = 0x80000000UL; __asm__ __volatile__( "1: lduw [%2], %0\n" " brnz,pn %0, 2f\n" "4: or %0, %3, %1\n" " cas [%2], %0, %1\n" " cmp %0, %1\n" " bne,pn %%icc, 1b\n" " nop\n" " .subsection 2\n" "2: lduw [%2], %0\n" " brnz,pt %0, 2b\n" " nop\n" " ba,a,pt %%xcc, 4b\n" " .previous" : "=&r" (tmp1), "=&r" (tmp2) : "r" (lock), "r" (mask) : "memory"); }

Contributors

PersonTokensPropCommitsCommitProp
sam ravnborgsam ravnborg2284.62%120.00%
thomas gleixnerthomas gleixner13.85%120.00%
heiko carstensheiko carstens13.85%120.00%
tobias klausertobias klauser13.85%120.00%
david s. millerdavid s. miller13.85%120.00%
Total26100.00%5100.00%


static inline void arch_write_unlock(arch_rwlock_t *lock) { __asm__ __volatile__( " stw %%g0, [%0]" : /* no outputs */ : "r" (lock) : "memory"); }

Contributors

PersonTokensPropCommitsCommitProp
sam ravnborgsam ravnborg1275.00%120.00%
thomas gleixnerthomas gleixner16.25%120.00%
heiko carstensheiko carstens16.25%120.00%
david s. millerdavid s. miller16.25%120.00%
tobias klausertobias klauser16.25%120.00%
Total16100.00%5100.00%


static inline int arch_write_trylock(arch_rwlock_t *lock) { unsigned long mask, tmp1, tmp2, result; mask = 0x80000000UL; __asm__ __volatile__( " mov 0, %2\n" "1: lduw [%3], %0\n" " brnz,pn %0, 2f\n" " or %0, %4, %1\n" " cas [%3], %0, %1\n" " cmp %0, %1\n" " bne,pn %%icc, 1b\n" " nop\n" " mov 1, %2\n" "2:" : "=&r" (tmp1), "=&r" (tmp2), "=&r" (result) : "r" (lock), "r" (mask) : "memory"); return result; }

Contributors

PersonTokensPropCommitsCommitProp
sam ravnborgsam ravnborg2787.10%120.00%
david s. millerdavid s. miller13.23%120.00%
tobias klausertobias klauser13.23%120.00%
heiko carstensheiko carstens13.23%120.00%
thomas gleixnerthomas gleixner13.23%120.00%
Total31100.00%5100.00%

#define arch_read_lock_flags(p, f) arch_read_lock(p) #define arch_write_lock_flags(p, f) arch_write_lock(p) #define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) #define arch_write_can_lock(rw) (!(rw)->lock) #define arch_spin_relax(lock) cpu_relax() #define arch_read_relax(lock) cpu_relax() #define arch_write_relax(lock) cpu_relax() #endif /* !(__ASSEMBLY__) */ #endif /* !(__SPARC64_SPINLOCK_H) */

Overall Contributors

PersonTokensPropCommitsCommitProp
sam ravnborgsam ravnborg23873.23%19.09%
peter zijlstrapeter zijlstra257.69%19.09%
thomas gleixnerthomas gleixner237.08%436.36%
robin holtrobin holt144.31%19.09%
david s. millerdavid s. miller113.38%218.18%
heiko carstensheiko carstens82.46%19.09%
tobias klausertobias klauser61.85%19.09%
Total325100.00%11100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.