Release 4.7 kernel/locking/rwsem-spinlock.c
/* rwsem-spinlock.c: R/W semaphores: contention handling functions for
* generic spinlock implementation
*
* Copyright (c) 2001 David Howells (dhowells@redhat.com).
* - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
* - Derived also from comments by Linus
*/
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/export.h>
enum rwsem_waiter_type {
RWSEM_WAITING_FOR_WRITE,
RWSEM_WAITING_FOR_READ
};
struct rwsem_waiter {
struct list_head list;
struct task_struct *task;
enum rwsem_waiter_type type;
};
int rwsem_is_locked(struct rw_semaphore *sem)
{
int ret = 1;
unsigned long flags;
if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) {
ret = (sem->count != 0);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
}
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
americo wang | americo wang | 53 | 94.64% | 1 | 33.33% |
thomas gleixner | thomas gleixner | 2 | 3.57% | 1 | 33.33% |
peter zijlstra | peter zijlstra | 1 | 1.79% | 1 | 33.33% |
| Total | 56 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(rwsem_is_locked);
/*
* initialise the semaphore
*/
void __init_rwsem(struct rw_semaphore *sem, const char *name,
struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
* Make sure we are not reinitializing a held semaphore:
*/
debug_check_no_locks_freed((void *)sem, sizeof(*sem));
lockdep_init_map(&sem->dep_map, name, key, 0);
#endif
sem->count = 0;
raw_spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
ingo molnar | ingo molnar | 44 | 57.14% | 1 | 20.00% |
linus torvalds | linus torvalds | 29 | 37.66% | 1 | 20.00% |
peter zijlstra | peter zijlstra | 3 | 3.90% | 2 | 40.00% |
thomas gleixner | thomas gleixner | 1 | 1.30% | 1 | 20.00% |
| Total | 77 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL(__init_rwsem);
/*
* handle the lock release when processes blocked on it that can now run
* - if we come here, then:
* - the 'active count' _reached_ zero
* - the 'waiting count' is non-zero
* - the spinlock must be held by the caller
* - woken process blocks are discarded from the list after having task zeroed
* - writers are only woken if wakewrite is non-zero
*/
static inline struct rw_semaphore *
__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
{
struct rwsem_waiter *waiter;
struct task_struct *tsk;
int woken;
waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
if (wakewrite)
/* Wake up a writer. Note that we do not grant it the
* lock - it will have to acquire it when it runs. */
wake_up_process(waiter->task);
goto out;
}
/* grant an infinite number of read locks to the front of the queue */
woken = 0;
do {
struct list_head *next = waiter->list.next;
list_del(&waiter->list);
tsk = waiter->task;
/*
* Make sure we do not wakeup the next reader before
* setting the nil condition to grant the next reader;
* otherwise we could miss the wakeup on the other
* side and end up sleeping again. See the pairing
* in rwsem_down_read_failed().
*/
smp_mb();
waiter->task = NULL;
wake_up_process(tsk);
put_task_struct(tsk);
woken++;
if (next == &sem->wait_list)
break;
waiter = list_entry(next, struct rwsem_waiter, list);
} while (waiter->type != RWSEM_WAITING_FOR_WRITE);
sem->count += woken;
out:
return sem;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
linus torvalds | linus torvalds | 127 | 75.15% | 3 | 27.27% |
christoph hellwig | christoph hellwig | 20 | 11.83% | 1 | 9.09% |
michel lespinasse | michel lespinasse | 17 | 10.06% | 2 | 18.18% |
yuanhan liu | yuanhan liu | 1 | 0.59% | 1 | 9.09% |
andrew morton | andrew morton | 1 | 0.59% | 1 | 9.09% |
nick piggin | nick piggin | 1 | 0.59% | 1 | 9.09% |
davidlohr bueso | davidlohr bueso | 1 | 0.59% | 1 | 9.09% |
peter zijlstra | peter zijlstra | 1 | 0.59% | 1 | 9.09% |
| Total | 169 | 100.00% | 11 | 100.00% |
/*
* wake a single writer
*/
static inline struct rw_semaphore *
__rwsem_wake_one_writer(struct rw_semaphore *sem)
{
struct rwsem_waiter *waiter;
waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
wake_up_process(waiter->task);
return sem;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
linus torvalds | linus torvalds | 44 | 97.78% | 2 | 66.67% |
yuanhan liu | yuanhan liu | 1 | 2.22% | 1 | 33.33% |
| Total | 45 | 100.00% | 3 | 100.00% |
/*
* get a read lock on the semaphore
*/
void __sched __down_read(struct rw_semaphore *sem)
{
struct rwsem_waiter waiter;
struct task_struct *tsk;
unsigned long flags;
raw_spin_lock_irqsave(&sem->wait_lock, flags);
if (sem->count >= 0 && list_empty(&sem->wait_list)) {
/* granted */
sem->count++;
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
goto out;
}
tsk = current;
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
/* set up my own style of waitqueue */
waiter.task = tsk;
waiter.type = RWSEM_WAITING_FOR_READ;
get_task_struct(tsk);
list_add_tail(&waiter.list, &sem->wait_list);
/* we don't need to touch the semaphore struct anymore */
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
/* wait to be given the lock */
for (;;) {
if (!waiter.task)
break;
schedule();
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
}
__set_task_state(tsk, TASK_RUNNING);
out:
;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
linus torvalds | linus torvalds | 138 | 86.25% | 3 | 30.00% |
kevin hilman | kevin hilman | 10 | 6.25% | 1 | 10.00% |
davidlohr bueso | davidlohr bueso | 4 | 2.50% | 1 | 10.00% |
thomas gleixner | thomas gleixner | 3 | 1.88% | 1 | 10.00% |
peter zijlstra | peter zijlstra | 2 | 1.25% | 1 | 10.00% |
michel lespinasse | michel lespinasse | 1 | 0.62% | 1 | 10.00% |
nick piggin | nick piggin | 1 | 0.62% | 1 | 10.00% |
ingo molnar | ingo molnar | 1 | 0.62% | 1 | 10.00% |
| Total | 160 | 100.00% | 10 | 100.00% |
/*
* trylock for reading -- returns 1 if successful, 0 if contention
*/
int __down_read_trylock(struct rw_semaphore *sem)
{
unsigned long flags;
int ret = 0;
raw_spin_lock_irqsave(&sem->wait_lock, flags);
if (sem->count >= 0 && list_empty(&sem->wait_list)) {
/* granted */
sem->count++;
ret = 1;
}
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
christoph hellwig | christoph hellwig | 58 | 82.86% | 1 | 25.00% |
david howells | david howells | 8 | 11.43% | 1 | 25.00% |
thomas gleixner | thomas gleixner | 2 | 2.86% | 1 | 25.00% |
peter zijlstra | peter zijlstra | 2 | 2.86% | 1 | 25.00% |
| Total | 70 | 100.00% | 4 | 100.00% |
/*
* get a write lock on the semaphore
*/
int __sched __down_write_common(struct rw_semaphore *sem, int state)
{
struct rwsem_waiter waiter;
struct task_struct *tsk;
unsigned long flags;
int ret = 0;
raw_spin_lock_irqsave(&sem->wait_lock, flags);
/* set up my own style of waitqueue */
tsk = current;
waiter.task = tsk;
waiter.type = RWSEM_WAITING_FOR_WRITE;
list_add_tail(&waiter.list, &sem->wait_list);
/* wait for someone to release the lock */
for (;;) {
/*
* That is the key to support write lock stealing: allows the
* task already on CPU to get the lock soon rather than put
* itself into sleep and waiting for system woke it or someone
* else in the head of the wait list up.
*/
if (sem->count == 0)
break;
if (signal_pending_state(state, current)) {
ret = -EINTR;
goto out;
}
set_task_state(tsk, state);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
schedule();
raw_spin_lock_irqsave(&sem->wait_lock, flags);
}
/* got the lock */
sem->count = -1;
out:
list_del(&waiter.list);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
linus torvalds | linus torvalds | 71 | 41.76% | 1 | 12.50% |
yuanhan liu | yuanhan liu | 52 | 30.59% | 1 | 12.50% |
michal hocko | michal hocko | 36 | 21.18% | 1 | 12.50% |
kevin hilman | kevin hilman | 6 | 3.53% | 1 | 12.50% |
peter zijlstra | peter zijlstra | 2 | 1.18% | 1 | 12.50% |
nick piggin | nick piggin | 1 | 0.59% | 1 | 12.50% |
thomas gleixner | thomas gleixner | 1 | 0.59% | 1 | 12.50% |
michel lespinasse | michel lespinasse | 1 | 0.59% | 1 | 12.50% |
| Total | 170 | 100.00% | 8 | 100.00% |
void __sched __down_write(struct rw_semaphore *sem)
{
__down_write_common(sem, TASK_UNINTERRUPTIBLE);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
michal hocko | michal hocko | 18 | 100.00% | 1 | 100.00% |
| Total | 18 | 100.00% | 1 | 100.00% |
int __sched __down_write_killable(struct rw_semaphore *sem)
{
return __down_write_common(sem, TASK_KILLABLE);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
michal hocko | michal hocko | 18 | 94.74% | 1 | 50.00% |
linus torvalds | linus torvalds | 1 | 5.26% | 1 | 50.00% |
| Total | 19 | 100.00% | 2 | 100.00% |
/*
* trylock for writing -- returns 1 if successful, 0 if contention
*/
int __down_write_trylock(struct rw_semaphore *sem)
{
unsigned long flags;
int ret = 0;
raw_spin_lock_irqsave(&sem->wait_lock, flags);
if (sem->count == 0) {
/* got the lock */
sem->count = -1;
ret = 1;
}
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
christoph hellwig | christoph hellwig | 51 | 79.69% | 1 | 20.00% |
david howells | david howells | 8 | 12.50% | 1 | 20.00% |
thomas gleixner | thomas gleixner | 2 | 3.12% | 1 | 20.00% |
peter zijlstra | peter zijlstra | 2 | 3.12% | 1 | 20.00% |
yuanhan liu | yuanhan liu | 1 | 1.56% | 1 | 20.00% |
| Total | 64 | 100.00% | 5 | 100.00% |
/*
* release a read lock on the semaphore
*/
void __up_read(struct rw_semaphore *sem)
{
unsigned long flags;
raw_spin_lock_irqsave(&sem->wait_lock, flags);
if (--sem->count == 0 && !list_empty(&sem->wait_list))
sem = __rwsem_wake_one_writer(sem);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
linus torvalds | linus torvalds | 48 | 81.36% | 1 | 25.00% |
david howells | david howells | 8 | 13.56% | 1 | 25.00% |
thomas gleixner | thomas gleixner | 2 | 3.39% | 1 | 25.00% |
peter zijlstra | peter zijlstra | 1 | 1.69% | 1 | 25.00% |
| Total | 59 | 100.00% | 4 | 100.00% |
/*
* release a write lock on the semaphore
*/
void __up_write(struct rw_semaphore *sem)
{
unsigned long flags;
raw_spin_lock_irqsave(&sem->wait_lock, flags);
sem->count = 0;
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem, 1);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
linus torvalds | linus torvalds | 47 | 78.33% | 1 | 20.00% |
david howells | david howells | 8 | 13.33% | 1 | 20.00% |
thomas gleixner | thomas gleixner | 2 | 3.33% | 1 | 20.00% |
christoph hellwig | christoph hellwig | 2 | 3.33% | 1 | 20.00% |
peter zijlstra | peter zijlstra | 1 | 1.67% | 1 | 20.00% |
| Total | 60 | 100.00% | 5 | 100.00% |
/*
* downgrade a write lock into a read lock
* - just wake up any readers at the front of the queue
*/
void __downgrade_write(struct rw_semaphore *sem)
{
unsigned long flags;
raw_spin_lock_irqsave(&sem->wait_lock, flags);
sem->count = 1;
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem, 0);
raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
christoph hellwig | christoph hellwig | 49 | 81.67% | 1 | 25.00% |
david howells | david howells | 8 | 13.33% | 1 | 25.00% |
thomas gleixner | thomas gleixner | 2 | 3.33% | 1 | 25.00% |
peter zijlstra | peter zijlstra | 1 | 1.67% | 1 | 25.00% |
| Total | 60 | 100.00% | 4 | 100.00% |
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
linus torvalds | linus torvalds | 533 | 49.22% | 3 | 13.64% |
christoph hellwig | christoph hellwig | 183 | 16.90% | 1 | 4.55% |
michal hocko | michal hocko | 72 | 6.65% | 1 | 4.55% |
americo wang | americo wang | 63 | 5.82% | 2 | 9.09% |
yuanhan liu | yuanhan liu | 56 | 5.17% | 1 | 4.55% |
ingo molnar | ingo molnar | 45 | 4.16% | 2 | 9.09% |
david howells | david howells | 40 | 3.69% | 1 | 4.55% |
michel lespinasse | michel lespinasse | 30 | 2.77% | 2 | 9.09% |
thomas gleixner | thomas gleixner | 17 | 1.57% | 1 | 4.55% |
kevin hilman | kevin hilman | 16 | 1.48% | 1 | 4.55% |
peter zijlstra | peter zijlstra | 16 | 1.48% | 2 | 9.09% |
davidlohr bueso | davidlohr bueso | 5 | 0.46% | 2 | 9.09% |
nick piggin | nick piggin | 5 | 0.46% | 1 | 4.55% |
andrew morton | andrew morton | 1 | 0.09% | 1 | 4.55% |
paul gortmaker | paul gortmaker | 1 | 0.09% | 1 | 4.55% |
| Total | 1083 | 100.00% | 22 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.