Release 4.11 arch/sparc/include/asm/spinlock_64.h
/* spinlock.h: 64-bit Sparc spinlock support.
*
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
*/
#ifndef __SPARC64_SPINLOCK_H
#define __SPARC64_SPINLOCK_H
#ifndef __ASSEMBLY__
#include <asm/processor.h>
#include <asm/barrier.h>
/* To get debugging spinlocks which detect and catch
* deadlock situations, set CONFIG_DEBUG_SPINLOCK
* and rebuild your kernel.
*/
/* Because we play games to save cycles in the non-contention case, we
* need to be extra careful about branch targets into the "spinning"
* code. They live in their own section, but the newer V9 branches
* have a shorter range than the traditional 32-bit sparc branch
* variants. The rule is that the branches that go into and out of
* the spinner sections must be pre-V9 branches.
*/
#define arch_spin_is_locked(lp) ((lp)->lock != 0)
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_cond_load_acquire(&lock->lock, !VAL);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 19 | 86.36% | 1 | 33.33% |
Sam Ravnborg | 2 | 9.09% | 1 | 33.33% |
Thomas Gleixner | 1 | 4.55% | 1 | 33.33% |
Total | 22 | 100.00% | 3 | 100.00% |
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp;
__asm__ __volatile__(
"1: ldstub [%1], %0\n"
" brnz,pn %0, 2f\n"
" nop\n"
" .subsection 2\n"
"2: ldub [%1], %0\n"
" brnz,pt %0, 2b\n"
" nop\n"
" ba,a,pt %%xcc, 1b\n"
" .previous"
: "=&r" (tmp)
: "r" (lock)
: "memory");
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Sam Ravnborg | 15 | 83.33% | 1 | 25.00% |
Thomas Gleixner | 2 | 11.11% | 2 | 50.00% |
David S. Miller | 1 | 5.56% | 1 | 25.00% |
Total | 18 | 100.00% | 4 | 100.00% |
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned long result;
__asm__ __volatile__(
" ldstub [%1], %0\n"
: "=r" (result)
: "r" (lock)
: "memory");
return (result == 0UL);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Sam Ravnborg | 22 | 88.00% | 1 | 25.00% |
Thomas Gleixner | 2 | 8.00% | 2 | 50.00% |
David S. Miller | 1 | 4.00% | 1 | 25.00% |
Total | 25 | 100.00% | 4 | 100.00% |
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__asm__ __volatile__(
" stb %%g0, [%0]"
: /* No outputs */
: "r" (lock)
: "memory");
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Sam Ravnborg | 13 | 81.25% | 1 | 25.00% |
Thomas Gleixner | 2 | 12.50% | 2 | 50.00% |
David S. Miller | 1 | 6.25% | 1 | 25.00% |
Total | 16 | 100.00% | 4 | 100.00% |
static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
{
unsigned long tmp1, tmp2;
__asm__ __volatile__(
"1: ldstub [%2], %0\n"
" brnz,pn %0, 2f\n"
" nop\n"
" .subsection 2\n"
"2: rdpr %%pil, %1\n"
" wrpr %3, %%pil\n"
"3: ldub [%2], %0\n"
" brnz,pt %0, 3b\n"
" nop\n"
" ba,pt %%xcc, 1b\n"
" wrpr %1, %%pil\n"
" .previous"
: "=&r" (tmp1), "=&r" (tmp2)
: "r"(lock), "r"(flags)
: "memory");
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Sam Ravnborg | 21 | 87.50% | 1 | 25.00% |
Thomas Gleixner | 2 | 8.33% | 2 | 50.00% |
David S. Miller | 1 | 4.17% | 1 | 25.00% |
Total | 24 | 100.00% | 4 | 100.00% |
/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
static inline void arch_read_lock(arch_rwlock_t *lock)
{
unsigned long tmp1, tmp2;
__asm__ __volatile__ (
"1: ldsw [%2], %0\n"
" brlz,pn %0, 2f\n"
"4: add %0, 1, %1\n"
" cas [%2], %0, %1\n"
" cmp %0, %1\n"
" bne,pn %%icc, 1b\n"
" nop\n"
" .subsection 2\n"
"2: ldsw [%2], %0\n"
" brlz,pt %0, 2b\n"
" nop\n"
" ba,a,pt %%xcc, 4b\n"
" .previous"
: "=&r" (tmp1), "=&r" (tmp2)
: "r" (lock)
: "memory");
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Sam Ravnborg | 16 | 80.00% | 1 | 20.00% |
Tobias Klauser | 1 | 5.00% | 1 | 20.00% |
David S. Miller | 1 | 5.00% | 1 | 20.00% |
Thomas Gleixner | 1 | 5.00% | 1 | 20.00% |
Heiko Carstens | 1 | 5.00% | 1 | 20.00% |
Total | 20 | 100.00% | 5 | 100.00% |
static inline int arch_read_trylock(arch_rwlock_t *lock)
{
int tmp1, tmp2;
__asm__ __volatile__ (
"1: ldsw [%2], %0\n"
" brlz,a,pn %0, 2f\n"
" mov 0, %0\n"
" add %0, 1, %1\n"
" cas [%2], %0, %1\n"
" cmp %0, %1\n"
" bne,pn %%icc, 1b\n"
" mov 1, %0\n"
"2:"
: "=&r" (tmp1), "=&r" (tmp2)
: "r" (lock)
: "memory");
return tmp1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Sam Ravnborg | 18 | 81.82% | 1 | 20.00% |
David S. Miller | 1 | 4.55% | 1 | 20.00% |
Heiko Carstens | 1 | 4.55% | 1 | 20.00% |
Thomas Gleixner | 1 | 4.55% | 1 | 20.00% |
Tobias Klauser | 1 | 4.55% | 1 | 20.00% |
Total | 22 | 100.00% | 5 | 100.00% |
static inline void arch_read_unlock(arch_rwlock_t *lock)
{
unsigned long tmp1, tmp2;
__asm__ __volatile__(
"1: lduw [%2], %0\n"
" sub %0, 1, %1\n"
" cas [%2], %0, %1\n"
" cmp %0, %1\n"
" bne,pn %%xcc, 1b\n"
" nop"
: "=&r" (tmp1), "=&r" (tmp2)
: "r" (lock)
: "memory");
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Sam Ravnborg | 16 | 80.00% | 1 | 20.00% |
Tobias Klauser | 1 | 5.00% | 1 | 20.00% |
Heiko Carstens | 1 | 5.00% | 1 | 20.00% |
Thomas Gleixner | 1 | 5.00% | 1 | 20.00% |
David S. Miller | 1 | 5.00% | 1 | 20.00% |
Total | 20 | 100.00% | 5 | 100.00% |
static inline void arch_write_lock(arch_rwlock_t *lock)
{
unsigned long mask, tmp1, tmp2;
mask = 0x80000000UL;
__asm__ __volatile__(
"1: lduw [%2], %0\n"
" brnz,pn %0, 2f\n"
"4: or %0, %3, %1\n"
" cas [%2], %0, %1\n"
" cmp %0, %1\n"
" bne,pn %%icc, 1b\n"
" nop\n"
" .subsection 2\n"
"2: lduw [%2], %0\n"
" brnz,pt %0, 2b\n"
" nop\n"
" ba,a,pt %%xcc, 4b\n"
" .previous"
: "=&r" (tmp1), "=&r" (tmp2)
: "r" (lock), "r" (mask)
: "memory");
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Sam Ravnborg | 22 | 84.62% | 1 | 20.00% |
Thomas Gleixner | 1 | 3.85% | 1 | 20.00% |
Heiko Carstens | 1 | 3.85% | 1 | 20.00% |
David S. Miller | 1 | 3.85% | 1 | 20.00% |
Tobias Klauser | 1 | 3.85% | 1 | 20.00% |
Total | 26 | 100.00% | 5 | 100.00% |
static inline void arch_write_unlock(arch_rwlock_t *lock)
{
__asm__ __volatile__(
" stw %%g0, [%0]"
: /* no outputs */
: "r" (lock)
: "memory");
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Sam Ravnborg | 12 | 75.00% | 1 | 20.00% |
David S. Miller | 1 | 6.25% | 1 | 20.00% |
Thomas Gleixner | 1 | 6.25% | 1 | 20.00% |
Heiko Carstens | 1 | 6.25% | 1 | 20.00% |
Tobias Klauser | 1 | 6.25% | 1 | 20.00% |
Total | 16 | 100.00% | 5 | 100.00% |
static inline int arch_write_trylock(arch_rwlock_t *lock)
{
unsigned long mask, tmp1, tmp2, result;
mask = 0x80000000UL;
__asm__ __volatile__(
" mov 0, %2\n"
"1: lduw [%3], %0\n"
" brnz,pn %0, 2f\n"
" or %0, %4, %1\n"
" cas [%3], %0, %1\n"
" cmp %0, %1\n"
" bne,pn %%icc, 1b\n"
" nop\n"
" mov 1, %2\n"
"2:"
: "=&r" (tmp1), "=&r" (tmp2), "=&r" (result)
: "r" (lock), "r" (mask)
: "memory");
return result;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Sam Ravnborg | 27 | 87.10% | 1 | 20.00% |
David S. Miller | 1 | 3.23% | 1 | 20.00% |
Thomas Gleixner | 1 | 3.23% | 1 | 20.00% |
Heiko Carstens | 1 | 3.23% | 1 | 20.00% |
Tobias Klauser | 1 | 3.23% | 1 | 20.00% |
Total | 31 | 100.00% | 5 | 100.00% |
#define arch_read_lock_flags(p, f) arch_read_lock(p)
#define arch_write_lock_flags(p, f) arch_write_lock(p)
#define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
#define arch_write_can_lock(rw) (!(rw)->lock)
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()
#endif /* !(__ASSEMBLY__) */
#endif /* !(__SPARC64_SPINLOCK_H) */
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Sam Ravnborg | 238 | 73.23% | 1 | 9.09% |
Peter Zijlstra | 25 | 7.69% | 1 | 9.09% |
Thomas Gleixner | 23 | 7.08% | 4 | 36.36% |
Robin Holt | 14 | 4.31% | 1 | 9.09% |
David S. Miller | 11 | 3.38% | 2 | 18.18% |
Heiko Carstens | 8 | 2.46% | 1 | 9.09% |
Tobias Klauser | 6 | 1.85% | 1 | 9.09% |
Total | 325 | 100.00% | 11 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.