Release 4.7 arch/sh/include/asm/spinlock.h
/*
* include/asm-sh/spinlock.h
*
* Copyright (C) 2002, 2003 Paul Mundt
* Copyright (C) 2006, 2007 Akio Idehara
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef __ASM_SH_SPINLOCK_H
#define __ASM_SH_SPINLOCK_H
/*
* The only locking implemented here uses SH-4A opcodes. For others,
* split this out as per atomic-*.h.
*/
#ifndef CONFIG_CPU_SH4A
#error "Need movli.l/movco.l for spinlocks"
#endif
/*
* Your basic SMP spinlocks, allowing only a single CPU anywhere
*/
#define arch_spin_is_locked(x) ((x)->lock <= 0)
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_spin_unlock_wait(x) \
do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
/*
* Simple spin lock operations. There are two variants, one clears IRQ's
* on the local processor, one does not.
*
* We make no fairness assumptions. They have a cost.
*/
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned long tmp;
unsigned long oldval;
__asm__ __volatile__ (
"1: \n\t"
"movli.l @%2, %0 ! arch_spin_lock \n\t"
"mov %0, %1 \n\t"
"mov #0, %0 \n\t"
"movco.l %0, @%2 \n\t"
"bf 1b \n\t"
"cmp/pl %1 \n\t"
"bf 1b \n\t"
: "=&z" (tmp), "=&r" (oldval)
: "r" (&lock->lock)
: "t", "memory"
);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
paul mundt | paul mundt | 19 | 86.36% | 2 | 50.00% |
thomas gleixner | thomas gleixner | 3 | 13.64% | 2 | 50.00% |
| Total | 22 | 100.00% | 4 | 100.00% |
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
unsigned long tmp;
__asm__ __volatile__ (
"mov #1, %0 ! arch_spin_unlock \n\t"
"mov.l %0, @%1 \n\t"
: "=&z" (tmp)
: "r" (&lock->lock)
: "t", "memory"
);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
paul mundt | paul mundt | 15 | 83.33% | 2 | 50.00% |
thomas gleixner | thomas gleixner | 3 | 16.67% | 2 | 50.00% |
| Total | 18 | 100.00% | 4 | 100.00% |
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned long tmp, oldval;
__asm__ __volatile__ (
"1: \n\t"
"movli.l @%2, %0 ! arch_spin_trylock \n\t"
"mov %0, %1 \n\t"
"mov #0, %0 \n\t"
"movco.l %0, @%2 \n\t"
"bf 1b \n\t"
"synco \n\t"
: "=&z" (tmp), "=&r" (oldval)
: "r" (&lock->lock)
: "t", "memory"
);
return oldval;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
paul mundt | paul mundt | 20 | 86.96% | 2 | 50.00% |
thomas gleixner | thomas gleixner | 3 | 13.04% | 2 | 50.00% |
| Total | 23 | 100.00% | 4 | 100.00% |
/*
* Read-write spinlocks, allowing multiple readers but only one writer.
*
* NOTE! it is quite common to have readers in interrupts but no interrupt
* writers. For those circumstances we can "mix" irq-safe locks - any writer
* needs to get a irq-safe write-lock, but readers can get non-irqsafe
* read-locks.
*/
/**
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
#define arch_read_can_lock(x) ((x)->lock > 0)
/**
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
__asm__ __volatile__ (
"1: \n\t"
"movli.l @%1, %0 ! arch_read_lock \n\t"
"cmp/pl %0 \n\t"
"bf 1b \n\t"
"add #-1, %0 \n\t"
"movco.l %0, @%1 \n\t"
"bf 1b \n\t"
: "=&z" (tmp)
: "r" (&rw->lock)
: "t", "memory"
);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
paul mundt | paul mundt | 15 | 83.33% | 2 | 50.00% |
thomas gleixner | thomas gleixner | 3 | 16.67% | 2 | 50.00% |
| Total | 18 | 100.00% | 4 | 100.00% |
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned long tmp;
__asm__ __volatile__ (
"1: \n\t"
"movli.l @%1, %0 ! arch_read_unlock \n\t"
"add #1, %0 \n\t"
"movco.l %0, @%1 \n\t"
"bf 1b \n\t"
: "=&z" (tmp)
: "r" (&rw->lock)
: "t", "memory"
);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
paul mundt | paul mundt | 15 | 83.33% | 2 | 50.00% |
thomas gleixner | thomas gleixner | 3 | 16.67% | 2 | 50.00% |
| Total | 18 | 100.00% | 4 | 100.00% |
static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned long tmp;
__asm__ __volatile__ (
"1: \n\t"
"movli.l @%1, %0 ! arch_write_lock \n\t"
"cmp/hs %2, %0 \n\t"
"bf 1b \n\t"
"sub %2, %0 \n\t"
"movco.l %0, @%1 \n\t"
"bf 1b \n\t"
: "=&z" (tmp)
: "r" (&rw->lock), "r" (RW_LOCK_BIAS)
: "t", "memory"
);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
paul mundt | paul mundt | 15 | 83.33% | 2 | 50.00% |
thomas gleixner | thomas gleixner | 3 | 16.67% | 2 | 50.00% |
| Total | 18 | 100.00% | 4 | 100.00% |
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
__asm__ __volatile__ (
"mov.l %1, @%0 ! arch_write_unlock \n\t"
:
: "r" (&rw->lock), "r" (RW_LOCK_BIAS)
: "t", "memory"
);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
paul mundt | paul mundt | 10 | 71.43% | 2 | 40.00% |
thomas gleixner | thomas gleixner | 3 | 21.43% | 2 | 40.00% |
evgeniy polyakov | evgeniy polyakov | 1 | 7.14% | 1 | 20.00% |
| Total | 14 | 100.00% | 5 | 100.00% |
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned long tmp, oldval;
__asm__ __volatile__ (
"1: \n\t"
"movli.l @%2, %0 ! arch_read_trylock \n\t"
"mov %0, %1 \n\t"
"cmp/pl %0 \n\t"
"bf 2f \n\t"
"add #-1, %0 \n\t"
"movco.l %0, @%2 \n\t"
"bf 1b \n\t"
"2: \n\t"
"synco \n\t"
: "=&z" (tmp), "=&r" (oldval)
: "r" (&rw->lock)
: "t", "memory"
);
return (oldval > 0);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
paul mundt | paul mundt | 22 | 81.48% | 2 | 40.00% |
thomas gleixner | thomas gleixner | 3 | 11.11% | 2 | 40.00% |
ingo molnar | ingo molnar | 2 | 7.41% | 1 | 20.00% |
| Total | 27 | 100.00% | 5 | 100.00% |
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned long tmp, oldval;
__asm__ __volatile__ (
"1: \n\t"
"movli.l @%2, %0 ! arch_write_trylock \n\t"
"mov %0, %1 \n\t"
"cmp/hs %3, %0 \n\t"
"bf 2f \n\t"
"sub %3, %0 \n\t"
"2: \n\t"
"movco.l %0, @%2 \n\t"
"bf 1b \n\t"
"synco \n\t"
: "=&z" (tmp), "=&r" (oldval)
: "r" (&rw->lock), "r" (RW_LOCK_BIAS)
: "t", "memory"
);
return (oldval > (RW_LOCK_BIAS - 1));
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
paul mundt | paul mundt | 16 | 51.61% | 1 | 25.00% |
andrew morton | andrew morton | 12 | 38.71% | 1 | 25.00% |
thomas gleixner | thomas gleixner | 3 | 9.68% | 2 | 50.00% |
| Total | 31 | 100.00% | 4 | 100.00% |
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()
#endif /* __ASM_SH_SPINLOCK_H */
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
paul mundt | paul mundt | 184 | 63.45% | 3 | 20.00% |
thomas gleixner | thomas gleixner | 41 | 14.14% | 4 | 26.67% |
martin schwidefsky | martin schwidefsky | 18 | 6.21% | 1 | 6.67% |
andrew morton | andrew morton | 16 | 5.52% | 2 | 13.33% |
robin holt | robin holt | 14 | 4.83% | 1 | 6.67% |
pre-git | pre-git | 9 | 3.10% | 1 | 6.67% |
ingo molnar | ingo molnar | 7 | 2.41% | 2 | 13.33% |
evgeniy polyakov | evgeniy polyakov | 1 | 0.34% | 1 | 6.67% |
| Total | 290 | 100.00% | 15 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.