Contributors: 10
Author Tokens Token Proportion Commits Commit Proportion
Rich Felker 201 60.91% 1 6.25%
Paul Mundt 70 21.21% 3 18.75%
Peter Zijlstra 23 6.97% 1 6.25%
Thomas Gleixner 17 5.15% 4 25.00%
Andrew Morton 10 3.03% 2 12.50%
Ralf Baechle 3 0.91% 1 6.25%
Ingo Molnar 2 0.61% 1 6.25%
Maciej W. Rozycki 2 0.61% 1 6.25%
Kuninori Morimoto 1 0.30% 1 6.25%
Evgeniy Polyakov 1 0.30% 1 6.25%
Total 330 16


/* SPDX-License-Identifier: GPL-2.0
 *
 * include/asm-sh/spinlock-cas.h
 *
 * Copyright (C) 2015 SEI
 */
#ifndef __ASM_SH_SPINLOCK_CAS_H
#define __ASM_SH_SPINLOCK_CAS_H

#include <asm/barrier.h>
#include <asm/processor.h>

static inline unsigned __sl_cas(volatile unsigned *p, unsigned old, unsigned new)
{
	__asm__ __volatile__("cas.l %1,%0,@r0"
		: "+r"(new)
		: "r"(old), "z"(p)
		: "t", "memory" );
	return new;
}

/*
 * Your basic SMP spinlocks, allowing only a single CPU anywhere
 */

#define arch_spin_is_locked(x)		((x)->lock <= 0)

static inline void arch_spin_lock(arch_spinlock_t *lock)
{
	while (!__sl_cas(&lock->lock, 1, 0));
}

static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
	__sl_cas(&lock->lock, 0, 1);
}

static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
	return __sl_cas(&lock->lock, 1, 0);
}

/*
 * Read-write spinlocks, allowing multiple readers but only one writer.
 *
 * NOTE! it is quite common to have readers in interrupts but no interrupt
 * writers. For those circumstances we can "mix" irq-safe locks - any writer
 * needs to get a irq-safe write-lock, but readers can get non-irqsafe
 * read-locks.
 */

static inline void arch_read_lock(arch_rwlock_t *rw)
{
	unsigned old;
	do old = rw->lock;
	while (!old || __sl_cas(&rw->lock, old, old-1) != old);
}

static inline void arch_read_unlock(arch_rwlock_t *rw)
{
	unsigned old;
	do old = rw->lock;
	while (__sl_cas(&rw->lock, old, old+1) != old);
}

static inline void arch_write_lock(arch_rwlock_t *rw)
{
	while (__sl_cas(&rw->lock, RW_LOCK_BIAS, 0) != RW_LOCK_BIAS);
}

static inline void arch_write_unlock(arch_rwlock_t *rw)
{
	__sl_cas(&rw->lock, 0, RW_LOCK_BIAS);
}

static inline int arch_read_trylock(arch_rwlock_t *rw)
{
	unsigned old;
	do old = rw->lock;
	while (old && __sl_cas(&rw->lock, old, old-1) != old);
	return !!old;
}

static inline int arch_write_trylock(arch_rwlock_t *rw)
{
	return __sl_cas(&rw->lock, RW_LOCK_BIAS, 0) == RW_LOCK_BIAS;
}

#endif /* __ASM_SH_SPINLOCK_CAS_H */