Release 4.18 arch/riscv/include/asm/spinlock.h
/*
* Copyright (C) 2015 Regents of the University of California
* Copyright (C) 2017 SiFive
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _ASM_RISCV_SPINLOCK_H
#define _ASM_RISCV_SPINLOCK_H
#include <linux/kernel.h>
#include <asm/current.h>
#include <asm/fence.h>
/*
* Simple spin lock operations. These provide no fairness guarantees.
*/
/* FIXME: Replace this with a ticket lock, like MIPS. */
#define arch_spin_is_locked(x) (READ_ONCE((x)->lock) != 0)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
smp_store_release(&lock->lock, 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Palmer Dabbelt | 11 | 52.38% | 1 | 50.00% |
Andrea Parri | 10 | 47.62% | 1 | 50.00% |
Total | 21 | 100.00% | 2 | 100.00% |
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
int tmp = 1, busy;
__asm__ __volatile__ (
" amoswap.w %0, %2, %1\n"
RISCV_ACQUIRE_BARRIER
: "=r" (busy), "+A" (lock->lock)
: "r" (tmp)
: "memory");
return !busy;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Palmer Dabbelt | 24 | 96.00% | 1 | 50.00% |
Andrea Parri | 1 | 4.00% | 1 | 50.00% |
Total | 25 | 100.00% | 2 | 100.00% |
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
while (1) {
if (arch_spin_is_locked(lock))
continue;
if (arch_spin_trylock(lock))
break;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Palmer Dabbelt | 33 | 100.00% | 1 | 100.00% |
Total | 33 | 100.00% | 1 | 100.00% |
/***********************************************************/
static inline void arch_read_lock(arch_rwlock_t *lock)
{
int tmp;
__asm__ __volatile__(
"1: lr.w %1, %0\n"
" bltz %1, 1b\n"
" addi %1, %1, 1\n"
" sc.w %1, %1, %0\n"
" bnez %1, 1b\n"
RISCV_ACQUIRE_BARRIER
: "+A" (lock->lock), "=&r" (tmp)
:: "memory");
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Palmer Dabbelt | 16 | 94.12% | 1 | 50.00% |
Andrea Parri | 1 | 5.88% | 1 | 50.00% |
Total | 17 | 100.00% | 2 | 100.00% |
static inline void arch_write_lock(arch_rwlock_t *lock)
{
int tmp;
__asm__ __volatile__(
"1: lr.w %1, %0\n"
" bnez %1, 1b\n"
" li %1, -1\n"
" sc.w %1, %1, %0\n"
" bnez %1, 1b\n"
RISCV_ACQUIRE_BARRIER
: "+A" (lock->lock), "=&r" (tmp)
:: "memory");
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Palmer Dabbelt | 16 | 94.12% | 1 | 50.00% |
Andrea Parri | 1 | 5.88% | 1 | 50.00% |
Total | 17 | 100.00% | 2 | 100.00% |
static inline int arch_read_trylock(arch_rwlock_t *lock)
{
int busy;
__asm__ __volatile__(
"1: lr.w %1, %0\n"
" bltz %1, 1f\n"
" addi %1, %1, 1\n"
" sc.w %1, %1, %0\n"
" bnez %1, 1b\n"
RISCV_ACQUIRE_BARRIER
"1:\n"
: "+A" (lock->lock), "=&r" (busy)
:: "memory");
return !busy;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Palmer Dabbelt | 20 | 95.24% | 1 | 50.00% |
Andrea Parri | 1 | 4.76% | 1 | 50.00% |
Total | 21 | 100.00% | 2 | 100.00% |
static inline int arch_write_trylock(arch_rwlock_t *lock)
{
int busy;
__asm__ __volatile__(
"1: lr.w %1, %0\n"
" bnez %1, 1f\n"
" li %1, -1\n"
" sc.w %1, %1, %0\n"
" bnez %1, 1b\n"
RISCV_ACQUIRE_BARRIER
"1:\n"
: "+A" (lock->lock), "=&r" (busy)
:: "memory");
return !busy;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Palmer Dabbelt | 20 | 95.24% | 1 | 50.00% |
Andrea Parri | 1 | 4.76% | 1 | 50.00% |
Total | 21 | 100.00% | 2 | 100.00% |
static inline void arch_read_unlock(arch_rwlock_t *lock)
{
__asm__ __volatile__(
RISCV_RELEASE_BARRIER
" amoadd.w x0, %1, %0\n"
: "+A" (lock->lock)
: "r" (-1)
: "memory");
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Palmer Dabbelt | 13 | 92.86% | 1 | 50.00% |
Andrea Parri | 1 | 7.14% | 1 | 50.00% |
Total | 14 | 100.00% | 2 | 100.00% |
static inline void arch_write_unlock(arch_rwlock_t *lock)
{
smp_store_release(&lock->lock, 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Palmer Dabbelt | 11 | 52.38% | 1 | 50.00% |
Andrea Parri | 10 | 47.62% | 1 | 50.00% |
Total | 21 | 100.00% | 2 | 100.00% |
#endif /* _ASM_RISCV_SPINLOCK_H */
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Palmer Dabbelt | 190 | 86.76% | 2 | 66.67% |
Andrea Parri | 29 | 13.24% | 1 | 33.33% |
Total | 219 | 100.00% | 3 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.