Release 4.15 kernel/locking/rwsem-spinlock.c
// SPDX-License-Identifier: GPL-2.0
/* rwsem-spinlock.c: R/W semaphores: contention handling functions for
* generic spinlock implementation
*
* Copyright (c) 2001 David Howells (dhowells@redhat.com).
* - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
* - Derived also from comments by Linus
*/
#include <linux/rwsem.h>
#include <linux/sched/signal.h>
#include <linux/sched/debug.h>
#include <linux/export.h>
enum rwsem_waiter_type {
RWSEM_WAITING_FOR_WRITE,
RWSEM_WAITING_FOR_READ
};
struct rwsem_waiter {
struct list_head list;
struct task_struct *task;
enum rwsem_waiter_type type;
};
int rwsem_is_locked(struct rw_semaphore *sem)
{
int ret = 1;
unsigned long flags;
if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) {
ret = (sem->count != 0);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
}
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Américo Wang | 53 | 94.64% | 1 | 33.33% |
Thomas Gleixner | 2 | 3.57% | 1 | 33.33% |
Peter Zijlstra | 1 | 1.79% | 1 | 33.33% |
Total | 56 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(rwsem_is_locked);
/*
* initialise the semaphore
*/
void __init_rwsem(struct rw_semaphore *sem, const char *name,
struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
* Make sure we are not reinitializing a held semaphore:
*/
debug_check_no_locks_freed((void *)sem, sizeof(*sem));
lockdep_init_map(&sem->dep_map, name, key, 0);
#endif
sem->count = 0;
raw_spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ingo Molnar | 44 | 57.14% | 1 | 20.00% |
Linus Torvalds | 29 | 37.66% | 1 | 20.00% |
Peter Zijlstra | 3 | 3.90% | 2 | 40.00% |
Thomas Gleixner | 1 | 1.30% | 1 | 20.00% |
Total | 77 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL(__init_rwsem);
/*
* handle the lock release when processes blocked on it that can now run
* - if we come here, then:
* - the 'active count' _reached_ zero
* - the 'waiting count' is non-zero
* - the spinlock must be held by the caller
* - woken process blocks are discarded from the list after having task zeroed
* - writers are only woken if wakewrite is non-zero
*/
static inline struct rw_semaphore *
__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
{
struct rwsem_waiter *waiter;
struct task_struct *tsk;
int woken;
waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
if (wakewrite)
/* Wake up a writer. Note that we do not grant it the
* lock - it will have to acquire it when it runs. */
wake_up_process(waiter->task);
goto out;
}
/* grant an infinite number of read locks to the front of the queue */
woken = 0;
do {
struct list_head *next = waiter->list.next;
list_del(&waiter->list);
tsk = waiter->task;
/*
* Make sure we do not wakeup the next reader before
* setting the nil condition to grant the next reader;
* otherwise we could miss the wakeup on the other
* side and end up sleeping again. See the pairing
* in rwsem_down_read_failed().
*/
smp_mb();
waiter->task = NULL;
wake_up_process(tsk);
put_task_struct(tsk);
woken++;
if (next == &sem->wait_list)
break;
waiter = list_entry(next, struct rwsem_waiter, list);
} while (waiter->type != RWSEM_WAITING_FOR_WRITE);
sem->count += woken;
out:
return sem;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 127 | 75.15% | 3 | 27.27% |
Christoph Hellwig | 20 | 11.83% | 1 | 9.09% |
Michel Lespinasse | 17 | 10.06% | 2 | 18.18% |
Andrew Morton | 1 | 0.59% | 1 | 9.09% |
Peter Zijlstra | 1 | 0.59% | 1 | 9.09% |
Nicholas Piggin | 1 | 0.59% | 1 | 9.09% |
Yuanhan Liu | 1 | 0.59% | 1 | 9.09% |
Davidlohr Bueso A | 1 | 0.59% | 1 | 9.09% |
Total | 169 | 100.00% | 11 | 100.00% |
/*
* wake a single writer
*/
static inline struct rw_semaphore *
__rwsem_wake_one_writer(struct rw_semaphore *sem)
{
struct rwsem_waiter *waiter;
waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
wake_up_process(waiter->task);
return sem;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 44 | 97.78% | 2 | 66.67% |
Yuanhan Liu | 1 | 2.22% | 1 | 33.33% |
Total | 45 | 100.00% | 3 | 100.00% |
/*
* get a read lock on the semaphore
*/
int __sched __down_read_common(struct rw_semaphore *sem, int state)
{
struct rwsem_waiter waiter;
unsigned long flags;
raw_spin_lock_irqsave(&sem->wait_lock, flags);
if (sem->count >= 0 && list_empty(&sem->wait_list)) {
/* granted */
sem->count++;
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
goto out;
}
/* set up my own style of waitqueue */
waiter.task = current;
waiter.type = RWSEM_WAITING_FOR_READ;
get_task_struct(current);
list_add_tail(&waiter.list, &sem->wait_list);
/* wait to be given the lock */
for (;;) {
if (!waiter.task)
break;
if (signal_pending_state(state, current))
goto out_nolock;
set_current_state(state);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
schedule();
raw_spin_lock_irqsave(&sem->wait_lock, flags);
}
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
out:
return 0;
out_nolock:
/*
* We didn't take the lock, so that there is a writer, which
* is owner or the first waiter of the sem. If it's a waiter,
* it will be woken by current owner. Not need to wake anybody.
*/
list_del(&waiter.list);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
return -EINTR;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 103 | 52.55% | 3 | 27.27% |
Kirill Tkhai | 75 | 38.27% | 1 | 9.09% |
Kevin Hilman | 8 | 4.08% | 1 | 9.09% |
Davidlohr Bueso A | 4 | 2.04% | 2 | 18.18% |
Peter Zijlstra | 2 | 1.02% | 1 | 9.09% |
Thomas Gleixner | 2 | 1.02% | 1 | 9.09% |
Nicholas Piggin | 1 | 0.51% | 1 | 9.09% |
Michel Lespinasse | 1 | 0.51% | 1 | 9.09% |
Total | 196 | 100.00% | 11 | 100.00% |
void __sched __down_read(struct rw_semaphore *sem)
{
__down_read_common(sem, TASK_UNINTERRUPTIBLE);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kirill Tkhai | 18 | 100.00% | 1 | 100.00% |
Total | 18 | 100.00% | 1 | 100.00% |
int __sched __down_read_killable(struct rw_semaphore *sem)
{
return __down_read_common(sem, TASK_KILLABLE);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kirill Tkhai | 18 | 94.74% | 1 | 50.00% |
Linus Torvalds | 1 | 5.26% | 1 | 50.00% |
Total | 19 | 100.00% | 2 | 100.00% |
/*
* trylock for reading -- returns 1 if successful, 0 if contention
*/
int __down_read_trylock(struct rw_semaphore *sem)
{
unsigned long flags;
int ret = 0;
raw_spin_lock_irqsave(&sem->wait_lock, flags);
if (sem->count >= 0 && list_empty(&sem->wait_list)) {
/* granted */
sem->count++;
ret = 1;
}
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 58 | 82.86% | 1 | 25.00% |
David Howells | 8 | 11.43% | 1 | 25.00% |
Thomas Gleixner | 2 | 2.86% | 1 | 25.00% |
Peter Zijlstra | 2 | 2.86% | 1 | 25.00% |
Total | 70 | 100.00% | 4 | 100.00% |
/*
* get a write lock on the semaphore
*/
int __sched __down_write_common(struct rw_semaphore *sem, int state)
{
struct rwsem_waiter waiter;
unsigned long flags;
int ret = 0;
raw_spin_lock_irqsave(&sem->wait_lock, flags);
/* set up my own style of waitqueue */
waiter.task = current;
waiter.type = RWSEM_WAITING_FOR_WRITE;
list_add_tail(&waiter.list, &sem->wait_list);
/* wait for someone to release the lock */
for (;;) {
/*
* That is the key to support write lock stealing: allows the
* task already on CPU to get the lock soon rather than put
* itself into sleep and waiting for system woke it or someone
* else in the head of the wait list up.
*/
if (sem->count == 0)
break;
if (signal_pending_state(state, current))
goto out_nolock;
set_current_state(state);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
schedule();
raw_spin_lock_irqsave(&sem->wait_lock, flags);
}
/* got the lock */
sem->count = -1;
list_del(&waiter.list);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
return ret;
out_nolock:
list_del(&waiter.list);
if (!list_empty(&sem->wait_list) && sem->count >= 0)
__rwsem_do_wake(sem, 0);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
return -EINTR;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 58 | 29.29% | 1 | 8.33% |
Yuanhan Liu | 52 | 26.26% | 1 | 8.33% |
Niklas Cassel | 42 | 21.21% | 1 | 8.33% |
Michal Hocko | 26 | 13.13% | 1 | 8.33% |
Kirill Tkhai | 7 | 3.54% | 1 | 8.33% |
Kevin Hilman | 6 | 3.03% | 1 | 8.33% |
Peter Zijlstra | 2 | 1.01% | 1 | 8.33% |
Davidlohr Bueso A | 2 | 1.01% | 2 | 16.67% |
Nicholas Piggin | 1 | 0.51% | 1 | 8.33% |
Thomas Gleixner | 1 | 0.51% | 1 | 8.33% |
Michel Lespinasse | 1 | 0.51% | 1 | 8.33% |
Total | 198 | 100.00% | 12 | 100.00% |
void __sched __down_write(struct rw_semaphore *sem)
{
__down_write_common(sem, TASK_UNINTERRUPTIBLE);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Michal Hocko | 18 | 100.00% | 1 | 100.00% |
Total | 18 | 100.00% | 1 | 100.00% |
int __sched __down_write_killable(struct rw_semaphore *sem)
{
return __down_write_common(sem, TASK_KILLABLE);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Michal Hocko | 18 | 94.74% | 1 | 50.00% |
Linus Torvalds | 1 | 5.26% | 1 | 50.00% |
Total | 19 | 100.00% | 2 | 100.00% |
/*
* trylock for writing -- returns 1 if successful, 0 if contention
*/
int __down_write_trylock(struct rw_semaphore *sem)
{
unsigned long flags;
int ret = 0;
raw_spin_lock_irqsave(&sem->wait_lock, flags);
if (sem->count == 0) {
/* got the lock */
sem->count = -1;
ret = 1;
}
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 51 | 79.69% | 1 | 20.00% |
David Howells | 8 | 12.50% | 1 | 20.00% |
Peter Zijlstra | 2 | 3.12% | 1 | 20.00% |
Thomas Gleixner | 2 | 3.12% | 1 | 20.00% |
Yuanhan Liu | 1 | 1.56% | 1 | 20.00% |
Total | 64 | 100.00% | 5 | 100.00% |
/*
* release a read lock on the semaphore
*/
void __up_read(struct rw_semaphore *sem)
{
unsigned long flags;
raw_spin_lock_irqsave(&sem->wait_lock, flags);
if (--sem->count == 0 && !list_empty(&sem->wait_list))
sem = __rwsem_wake_one_writer(sem);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 48 | 81.36% | 1 | 25.00% |
David Howells | 8 | 13.56% | 1 | 25.00% |
Thomas Gleixner | 2 | 3.39% | 1 | 25.00% |
Peter Zijlstra | 1 | 1.69% | 1 | 25.00% |
Total | 59 | 100.00% | 4 | 100.00% |
/*
* release a write lock on the semaphore
*/
void __up_write(struct rw_semaphore *sem)
{
unsigned long flags;
raw_spin_lock_irqsave(&sem->wait_lock, flags);
sem->count = 0;
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem, 1);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 47 | 78.33% | 1 | 20.00% |
David Howells | 8 | 13.33% | 1 | 20.00% |
Thomas Gleixner | 2 | 3.33% | 1 | 20.00% |
Christoph Hellwig | 2 | 3.33% | 1 | 20.00% |
Peter Zijlstra | 1 | 1.67% | 1 | 20.00% |
Total | 60 | 100.00% | 5 | 100.00% |
/*
* downgrade a write lock into a read lock
* - just wake up any readers at the front of the queue
*/
void __downgrade_write(struct rw_semaphore *sem)
{
unsigned long flags;
raw_spin_lock_irqsave(&sem->wait_lock, flags);
sem->count = 1;
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem, 0);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 49 | 81.67% | 1 | 25.00% |
David Howells | 8 | 13.33% | 1 | 25.00% |
Thomas Gleixner | 2 | 3.33% | 1 | 25.00% |
Peter Zijlstra | 1 | 1.67% | 1 | 25.00% |
Total | 60 | 100.00% | 4 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 485 | 40.82% | 3 | 10.34% |
Christoph Hellwig | 183 | 15.40% | 1 | 3.45% |
Kirill Tkhai | 118 | 9.93% | 2 | 6.90% |
Américo Wang | 63 | 5.30% | 2 | 6.90% |
Michal Hocko | 62 | 5.22% | 1 | 3.45% |
Yuanhan Liu | 56 | 4.71% | 1 | 3.45% |
Ingo Molnar | 48 | 4.04% | 3 | 10.34% |
Niklas Cassel | 42 | 3.54% | 1 | 3.45% |
David Howells | 40 | 3.37% | 1 | 3.45% |
Michel Lespinasse | 30 | 2.53% | 2 | 6.90% |
Thomas Gleixner | 16 | 1.35% | 1 | 3.45% |
Peter Zijlstra | 16 | 1.35% | 2 | 6.90% |
Kevin Hilman | 14 | 1.18% | 1 | 3.45% |
Davidlohr Bueso A | 7 | 0.59% | 4 | 13.79% |
Nicholas Piggin | 5 | 0.42% | 1 | 3.45% |
Greg Kroah-Hartman | 1 | 0.08% | 1 | 3.45% |
Paul Gortmaker | 1 | 0.08% | 1 | 3.45% |
Andrew Morton | 1 | 0.08% | 1 | 3.45% |
Total | 1188 | 100.00% | 29 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.