Contributors: 10
Author Tokens Token Proportion Commits Commit Proportion
Matthew Wilcox 217 39.53% 5 26.32%
Helge Deller 174 31.69% 2 10.53%
Linus Torvalds (pre-git) 90 16.39% 1 5.26%
John David Anglin 23 4.19% 2 10.53%
Thomas Gleixner 19 3.46% 4 21.05%
James Bottomley 9 1.64% 1 5.26%
Rolf Eike Beer 6 1.09% 1 5.26%
Ingo Molnar 6 1.09% 1 5.26%
Will Deacon 4 0.73% 1 5.26%
Greg Kroah-Hartman 1 0.18% 1 5.26%
Total 549 19


/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SPINLOCK_H
#define __ASM_SPINLOCK_H

#include <asm/barrier.h>
#include <asm/ldcw.h>
#include <asm/processor.h>
#include <asm/spinlock_types.h>

static inline int arch_spin_is_locked(arch_spinlock_t *x)
{
	volatile unsigned int *a = __ldcw_align(x);
	smp_mb();
	return *a == 0;
}

static inline void arch_spin_lock(arch_spinlock_t *x)
{
	volatile unsigned int *a;

	a = __ldcw_align(x);
	while (__ldcw(a) == 0)
		while (*a == 0)
			cpu_relax();
}

static inline void arch_spin_lock_flags(arch_spinlock_t *x,
					 unsigned long flags)
{
	volatile unsigned int *a;
	unsigned long flags_dis;

	a = __ldcw_align(x);
	while (__ldcw(a) == 0) {
		local_save_flags(flags_dis);
		local_irq_restore(flags);
		while (*a == 0)
			cpu_relax();
		local_irq_restore(flags_dis);
	}
}
#define arch_spin_lock_flags arch_spin_lock_flags

static inline void arch_spin_unlock(arch_spinlock_t *x)
{
	volatile unsigned int *a;

	a = __ldcw_align(x);
#ifdef CONFIG_SMP
	(void) __ldcw(a);
#else
	mb();
#endif
	*a = 1;
}

static inline int arch_spin_trylock(arch_spinlock_t *x)
{
	volatile unsigned int *a;
	int ret;

	a = __ldcw_align(x);
        ret = __ldcw(a) != 0;

	return ret;
}

/*
 * Read-write spinlocks, allowing multiple readers but only one writer.
 * Unfair locking as Writers could be starved indefinitely by Reader(s)
 *
 * The spinlock itself is contained in @counter and access to it is
 * serialized with @lock_mutex.
 */

/* 1 - lock taken successfully */
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
	int ret = 0;
	unsigned long flags;

	local_irq_save(flags);
	arch_spin_lock(&(rw->lock_mutex));

	/*
	 * zero means writer holds the lock exclusively, deny Reader.
	 * Otherwise grant lock to first/subseq reader
	 */
	if (rw->counter > 0) {
		rw->counter--;
		ret = 1;
	}

	arch_spin_unlock(&(rw->lock_mutex));
	local_irq_restore(flags);

	return ret;
}

/* 1 - lock taken successfully */
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
	int ret = 0;
	unsigned long flags;

	local_irq_save(flags);
	arch_spin_lock(&(rw->lock_mutex));

	/*
	 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
	 * deny writer. Otherwise if unlocked grant to writer
	 * Hence the claim that Linux rwlocks are unfair to writers.
	 * (can be starved for an indefinite time by readers).
	 */
	if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
		rw->counter = 0;
		ret = 1;
	}
	arch_spin_unlock(&(rw->lock_mutex));
	local_irq_restore(flags);

	return ret;
}

static inline void arch_read_lock(arch_rwlock_t *rw)
{
	while (!arch_read_trylock(rw))
		cpu_relax();
}

static inline void arch_write_lock(arch_rwlock_t *rw)
{
	while (!arch_write_trylock(rw))
		cpu_relax();
}

static inline void arch_read_unlock(arch_rwlock_t *rw)
{
	unsigned long flags;

	local_irq_save(flags);
	arch_spin_lock(&(rw->lock_mutex));
	rw->counter++;
	arch_spin_unlock(&(rw->lock_mutex));
	local_irq_restore(flags);
}

static inline void arch_write_unlock(arch_rwlock_t *rw)
{
	unsigned long flags;

	local_irq_save(flags);
	arch_spin_lock(&(rw->lock_mutex));
	rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
	arch_spin_unlock(&(rw->lock_mutex));
	local_irq_restore(flags);
}

#endif /* __ASM_SPINLOCK_H */