Release 4.16 arch/s390/include/asm/spinlock.h
/* SPDX-License-Identifier: GPL-2.0 */
/*
* S390 version
* Copyright IBM Corp. 1999
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* Derived from "include/asm-i386/spinlock.h"
*/
#ifndef __ASM_SPINLOCK_H
#define __ASM_SPINLOCK_H
#include <linux/smp.h>
#include <asm/atomic_ops.h>
#include <asm/barrier.h>
#include <asm/processor.h>
#include <asm/alternative.h>
#define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
extern int spin_retry;
#ifndef CONFIG_SMP
static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christian Bornträger | 13 | 100.00% | 1 | 100.00% |
Total | 13 | 100.00% | 1 | 100.00% |
#else
bool arch_vcpu_is_preempted(int cpu);
#endif
#define vcpu_is_preempted arch_vcpu_is_preempted
/*
* Simple spin lock operations. There are two variants, one clears IRQ's
* on the local processor, one does not.
*
* We make no fairness assumptions. They have a cost.
*
* (the type definitions are in asm/spinlock_types.h)
*/
void arch_spin_relax(arch_spinlock_t *lock);
#define arch_spin_relax arch_spin_relax
void arch_spin_lock_wait(arch_spinlock_t *);
int arch_spin_trylock_retry(arch_spinlock_t *);
void arch_spin_lock_setup(int cpu);
static inline u32 arch_spin_lockval(int cpu)
{
return cpu + 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Philipp Hachtmann | 13 | 86.67% | 1 | 50.00% |
Martin Schwidefsky | 2 | 13.33% | 1 | 50.00% |
Total | 15 | 100.00% | 2 | 100.00% |
static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
return lock.lock == 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Heiko Carstens | 16 | 94.12% | 1 | 50.00% |
Philipp Hachtmann | 1 | 5.88% | 1 | 50.00% |
Total | 17 | 100.00% | 2 | 100.00% |
static inline int arch_spin_is_locked(arch_spinlock_t *lp)
{
return READ_ONCE(lp->lock) != 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Philipp Hachtmann | 18 | 85.71% | 1 | 25.00% |
Linus Torvalds (pre-git) | 1 | 4.76% | 1 | 25.00% |
Martin Schwidefsky | 1 | 4.76% | 1 | 25.00% |
Christian Bornträger | 1 | 4.76% | 1 | 25.00% |
Total | 21 | 100.00% | 4 | 100.00% |
static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
{
barrier();
return likely(__atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Martin Schwidefsky | 14 | 46.67% | 3 | 42.86% |
Philipp Hachtmann | 10 | 33.33% | 2 | 28.57% |
Linus Torvalds (pre-git) | 5 | 16.67% | 1 | 14.29% |
Thomas Gleixner | 1 | 3.33% | 1 | 14.29% |
Total | 30 | 100.00% | 7 | 100.00% |
static inline void arch_spin_lock(arch_spinlock_t *lp)
{
if (!arch_spin_trylock_once(lp))
arch_spin_lock_wait(lp);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Philipp Hachtmann | 15 | 62.50% | 1 | 20.00% |
Martin Schwidefsky | 7 | 29.17% | 2 | 40.00% |
Linus Torvalds (pre-git) | 1 | 4.17% | 1 | 20.00% |
Thomas Gleixner | 1 | 4.17% | 1 | 20.00% |
Total | 24 | 100.00% | 5 | 100.00% |
static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
unsigned long flags)
{
if (!arch_spin_trylock_once(lp))
arch_spin_lock_wait(lp);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hisashi Hifumi | 20 | 71.43% | 1 | 20.00% |
Philipp Hachtmann | 5 | 17.86% | 1 | 20.00% |
Thomas Gleixner | 2 | 7.14% | 2 | 40.00% |
Martin Schwidefsky | 1 | 3.57% | 1 | 20.00% |
Total | 28 | 100.00% | 5 | 100.00% |
#define arch_spin_lock_flags arch_spin_lock_flags
static inline int arch_spin_trylock(arch_spinlock_t *lp)
{
if (!arch_spin_trylock_once(lp))
return arch_spin_trylock_retry(lp);
return 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 9 | 32.14% | 1 | 16.67% |
Philipp Hachtmann | 8 | 28.57% | 1 | 16.67% |
Martin Schwidefsky | 8 | 28.57% | 2 | 33.33% |
Thomas Gleixner | 3 | 10.71% | 2 | 33.33% |
Total | 28 | 100.00% | 6 | 100.00% |
static inline void arch_spin_unlock(arch_spinlock_t *lp)
{
typecheck(int, lp->lock);
asm volatile(
ALTERNATIVE("", ".long 0xb2fa0070", 49) /* NIAI 7 */
" sth %1,%0\n"
: "=Q" (((unsigned short *) &lp->lock)[1])
: "d" (0) : "cc", "memory");
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Martin Schwidefsky | 9 | 36.00% | 4 | 40.00% |
Linus Torvalds (pre-git) | 7 | 28.00% | 1 | 10.00% |
Heiko Carstens | 4 | 16.00% | 1 | 10.00% |
Philipp Hachtmann | 2 | 8.00% | 1 | 10.00% |
Thomas Gleixner | 2 | 8.00% | 2 | 20.00% |
Vasily Gorbik | 1 | 4.00% | 1 | 10.00% |
Total | 25 | 100.00% | 10 | 100.00% |
/*
* Read-write spinlocks, allowing multiple readers
* but only one writer.
*
* NOTE! it is quite common to have readers in interrupts
* but no interrupt writers. For those circumstances we
* can "mix" irq-safe locks - any writer needs to get a
* irq-safe write-lock, but readers can get non-irqsafe
* read-locks.
*/
#define arch_read_relax(rw) barrier()
#define arch_write_relax(rw) barrier()
void arch_read_lock_wait(arch_rwlock_t *lp);
void arch_write_lock_wait(arch_rwlock_t *lp);
static inline void arch_read_lock(arch_rwlock_t *rw)
{
int old;
old = __atomic_add(1, &rw->cnts);
if (old & 0xffff0000)
arch_read_lock_wait(rw);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Martin Schwidefsky | 37 | 100.00% | 2 | 100.00% |
Total | 37 | 100.00% | 2 | 100.00% |
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
__atomic_add_const_barrier(-1, &rw->cnts);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Martin Schwidefsky | 22 | 100.00% | 2 | 100.00% |
Total | 22 | 100.00% | 2 | 100.00% |
static inline void arch_write_lock(arch_rwlock_t *rw)
{
if (!__atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000))
arch_write_lock_wait(rw);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Martin Schwidefsky | 31 | 100.00% | 2 | 100.00% |
Total | 31 | 100.00% | 2 | 100.00% |
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
__atomic_add_barrier(-0x30000, &rw->cnts);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Martin Schwidefsky | 22 | 100.00% | 2 | 100.00% |
Total | 22 | 100.00% | 2 | 100.00% |
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
int old;
old = READ_ONCE(rw->cnts);
return (!(old & 0xffff0000) &&
__atomic_cmpxchg_bool(&rw->cnts, old, old + 1));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Martin Schwidefsky | 43 | 91.49% | 5 | 71.43% |
Philipp Hachtmann | 3 | 6.38% | 1 | 14.29% |
Thomas Gleixner | 1 | 2.13% | 1 | 14.29% |
Total | 47 | 100.00% | 7 | 100.00% |
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
int old;
old = READ_ONCE(rw->cnts);
return !old && __atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Martin Schwidefsky | 37 | 94.87% | 4 | 66.67% |
Thomas Gleixner | 2 | 5.13% | 2 | 33.33% |
Total | 39 | 100.00% | 6 | 100.00% |
#endif /* __ASM_SPINLOCK_H */
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Martin Schwidefsky | 295 | 56.73% | 14 | 42.42% |
Philipp Hachtmann | 79 | 15.19% | 2 | 6.06% |
Linus Torvalds (pre-git) | 33 | 6.35% | 1 | 3.03% |
Christian Bornträger | 32 | 6.15% | 2 | 6.06% |
Hisashi Hifumi | 25 | 4.81% | 1 | 3.03% |
Heiko Carstens | 21 | 4.04% | 3 | 9.09% |
Thomas Gleixner | 15 | 2.88% | 4 | 12.12% |
Peter Zijlstra | 6 | 1.15% | 1 | 3.03% |
Will Deacon | 4 | 0.77% | 1 | 3.03% |
Linus Torvalds | 4 | 0.77% | 1 | 3.03% |
Vasily Gorbik | 4 | 0.77% | 1 | 3.03% |
Greg Kroah-Hartman | 1 | 0.19% | 1 | 3.03% |
Ingo Molnar | 1 | 0.19% | 1 | 3.03% |
Total | 520 | 100.00% | 33 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.