cregit-Linux how code gets into the kernel

Release 4.15 kernel/locking/rwsem-spinlock.c

Directory: kernel/locking
// SPDX-License-Identifier: GPL-2.0
/* rwsem-spinlock.c: R/W semaphores: contention handling functions for
 * generic spinlock implementation
 *
 * Copyright (c) 2001   David Howells (dhowells@redhat.com).
 * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
 * - Derived also from comments by Linus
 */
#include <linux/rwsem.h>
#include <linux/sched/signal.h>
#include <linux/sched/debug.h>
#include <linux/export.h>


enum rwsem_waiter_type {
	
RWSEM_WAITING_FOR_WRITE,
	
RWSEM_WAITING_FOR_READ
};


struct rwsem_waiter {
	
struct list_head list;
	
struct task_struct *task;
	
enum rwsem_waiter_type type;
};


int rwsem_is_locked(struct rw_semaphore *sem) { int ret = 1; unsigned long flags; if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) { ret = (sem->count != 0); raw_spin_unlock_irqrestore(&sem->wait_lock, flags); } return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Américo Wang5394.64%133.33%
Thomas Gleixner23.57%133.33%
Peter Zijlstra11.79%133.33%
Total56100.00%3100.00%

EXPORT_SYMBOL(rwsem_is_locked); /* * initialise the semaphore */
void __init_rwsem(struct rw_semaphore *sem, const char *name, struct lock_class_key *key) { #ifdef CONFIG_DEBUG_LOCK_ALLOC /* * Make sure we are not reinitializing a held semaphore: */ debug_check_no_locks_freed((void *)sem, sizeof(*sem)); lockdep_init_map(&sem->dep_map, name, key, 0); #endif sem->count = 0; raw_spin_lock_init(&sem->wait_lock); INIT_LIST_HEAD(&sem->wait_list); }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar4457.14%120.00%
Linus Torvalds2937.66%120.00%
Peter Zijlstra33.90%240.00%
Thomas Gleixner11.30%120.00%
Total77100.00%5100.00%

EXPORT_SYMBOL(__init_rwsem); /* * handle the lock release when processes blocked on it that can now run * - if we come here, then: * - the 'active count' _reached_ zero * - the 'waiting count' is non-zero * - the spinlock must be held by the caller * - woken process blocks are discarded from the list after having task zeroed * - writers are only woken if wakewrite is non-zero */
static inline struct rw_semaphore * __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) { struct rwsem_waiter *waiter; struct task_struct *tsk; int woken; waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); if (waiter->type == RWSEM_WAITING_FOR_WRITE) { if (wakewrite) /* Wake up a writer. Note that we do not grant it the * lock - it will have to acquire it when it runs. */ wake_up_process(waiter->task); goto out; } /* grant an infinite number of read locks to the front of the queue */ woken = 0; do { struct list_head *next = waiter->list.next; list_del(&waiter->list); tsk = waiter->task; /* * Make sure we do not wakeup the next reader before * setting the nil condition to grant the next reader; * otherwise we could miss the wakeup on the other * side and end up sleeping again. See the pairing * in rwsem_down_read_failed(). */ smp_mb(); waiter->task = NULL; wake_up_process(tsk); put_task_struct(tsk); woken++; if (next == &sem->wait_list) break; waiter = list_entry(next, struct rwsem_waiter, list); } while (waiter->type != RWSEM_WAITING_FOR_WRITE); sem->count += woken; out: return sem; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds12775.15%327.27%
Christoph Hellwig2011.83%19.09%
Michel Lespinasse1710.06%218.18%
Andrew Morton10.59%19.09%
Peter Zijlstra10.59%19.09%
Nicholas Piggin10.59%19.09%
Yuanhan Liu10.59%19.09%
Davidlohr Bueso A10.59%19.09%
Total169100.00%11100.00%

/* * wake a single writer */
static inline struct rw_semaphore * __rwsem_wake_one_writer(struct rw_semaphore *sem) { struct rwsem_waiter *waiter; waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); wake_up_process(waiter->task); return sem; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds4497.78%266.67%
Yuanhan Liu12.22%133.33%
Total45100.00%3100.00%

/* * get a read lock on the semaphore */
int __sched __down_read_common(struct rw_semaphore *sem, int state) { struct rwsem_waiter waiter; unsigned long flags; raw_spin_lock_irqsave(&sem->wait_lock, flags); if (sem->count >= 0 && list_empty(&sem->wait_list)) { /* granted */ sem->count++; raw_spin_unlock_irqrestore(&sem->wait_lock, flags); goto out; } /* set up my own style of waitqueue */ waiter.task = current; waiter.type = RWSEM_WAITING_FOR_READ; get_task_struct(current); list_add_tail(&waiter.list, &sem->wait_list); /* wait to be given the lock */ for (;;) { if (!waiter.task) break; if (signal_pending_state(state, current)) goto out_nolock; set_current_state(state); raw_spin_unlock_irqrestore(&sem->wait_lock, flags); schedule(); raw_spin_lock_irqsave(&sem->wait_lock, flags); } raw_spin_unlock_irqrestore(&sem->wait_lock, flags); out: return 0; out_nolock: /* * We didn't take the lock, so that there is a writer, which * is owner or the first waiter of the sem. If it's a waiter, * it will be woken by current owner. Not need to wake anybody. */ list_del(&waiter.list); raw_spin_unlock_irqrestore(&sem->wait_lock, flags); return -EINTR; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds10352.55%327.27%
Kirill Tkhai7538.27%19.09%
Kevin Hilman84.08%19.09%
Davidlohr Bueso A42.04%218.18%
Peter Zijlstra21.02%19.09%
Thomas Gleixner21.02%19.09%
Nicholas Piggin10.51%19.09%
Michel Lespinasse10.51%19.09%
Total196100.00%11100.00%


void __sched __down_read(struct rw_semaphore *sem) { __down_read_common(sem, TASK_UNINTERRUPTIBLE); }

Contributors

PersonTokensPropCommitsCommitProp
Kirill Tkhai18100.00%1100.00%
Total18100.00%1100.00%


int __sched __down_read_killable(struct rw_semaphore *sem) { return __down_read_common(sem, TASK_KILLABLE); }

Contributors

PersonTokensPropCommitsCommitProp
Kirill Tkhai1894.74%150.00%
Linus Torvalds15.26%150.00%
Total19100.00%2100.00%

/* * trylock for reading -- returns 1 if successful, 0 if contention */
int __down_read_trylock(struct rw_semaphore *sem) { unsigned long flags; int ret = 0; raw_spin_lock_irqsave(&sem->wait_lock, flags); if (sem->count >= 0 && list_empty(&sem->wait_list)) { /* granted */ sem->count++; ret = 1; } raw_spin_unlock_irqrestore(&sem->wait_lock, flags); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig5882.86%125.00%
David Howells811.43%125.00%
Thomas Gleixner22.86%125.00%
Peter Zijlstra22.86%125.00%
Total70100.00%4100.00%

/* * get a write lock on the semaphore */
int __sched __down_write_common(struct rw_semaphore *sem, int state) { struct rwsem_waiter waiter; unsigned long flags; int ret = 0; raw_spin_lock_irqsave(&sem->wait_lock, flags); /* set up my own style of waitqueue */ waiter.task = current; waiter.type = RWSEM_WAITING_FOR_WRITE; list_add_tail(&waiter.list, &sem->wait_list); /* wait for someone to release the lock */ for (;;) { /* * That is the key to support write lock stealing: allows the * task already on CPU to get the lock soon rather than put * itself into sleep and waiting for system woke it or someone * else in the head of the wait list up. */ if (sem->count == 0) break; if (signal_pending_state(state, current)) goto out_nolock; set_current_state(state); raw_spin_unlock_irqrestore(&sem->wait_lock, flags); schedule(); raw_spin_lock_irqsave(&sem->wait_lock, flags); } /* got the lock */ sem->count = -1; list_del(&waiter.list); raw_spin_unlock_irqrestore(&sem->wait_lock, flags); return ret; out_nolock: list_del(&waiter.list); if (!list_empty(&sem->wait_list) && sem->count >= 0) __rwsem_do_wake(sem, 0); raw_spin_unlock_irqrestore(&sem->wait_lock, flags); return -EINTR; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds5829.29%18.33%
Yuanhan Liu5226.26%18.33%
Niklas Cassel4221.21%18.33%
Michal Hocko2613.13%18.33%
Kirill Tkhai73.54%18.33%
Kevin Hilman63.03%18.33%
Peter Zijlstra21.01%18.33%
Davidlohr Bueso A21.01%216.67%
Nicholas Piggin10.51%18.33%
Thomas Gleixner10.51%18.33%
Michel Lespinasse10.51%18.33%
Total198100.00%12100.00%


void __sched __down_write(struct rw_semaphore *sem) { __down_write_common(sem, TASK_UNINTERRUPTIBLE); }

Contributors

PersonTokensPropCommitsCommitProp
Michal Hocko18100.00%1100.00%
Total18100.00%1100.00%


int __sched __down_write_killable(struct rw_semaphore *sem) { return __down_write_common(sem, TASK_KILLABLE); }

Contributors

PersonTokensPropCommitsCommitProp
Michal Hocko1894.74%150.00%
Linus Torvalds15.26%150.00%
Total19100.00%2100.00%

/* * trylock for writing -- returns 1 if successful, 0 if contention */
int __down_write_trylock(struct rw_semaphore *sem) { unsigned long flags; int ret = 0; raw_spin_lock_irqsave(&sem->wait_lock, flags); if (sem->count == 0) { /* got the lock */ sem->count = -1; ret = 1; } raw_spin_unlock_irqrestore(&sem->wait_lock, flags); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig5179.69%120.00%
David Howells812.50%120.00%
Peter Zijlstra23.12%120.00%
Thomas Gleixner23.12%120.00%
Yuanhan Liu11.56%120.00%
Total64100.00%5100.00%

/* * release a read lock on the semaphore */
void __up_read(struct rw_semaphore *sem) { unsigned long flags; raw_spin_lock_irqsave(&sem->wait_lock, flags); if (--sem->count == 0 && !list_empty(&sem->wait_list)) sem = __rwsem_wake_one_writer(sem); raw_spin_unlock_irqrestore(&sem->wait_lock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds4881.36%125.00%
David Howells813.56%125.00%
Thomas Gleixner23.39%125.00%
Peter Zijlstra11.69%125.00%
Total59100.00%4100.00%

/* * release a write lock on the semaphore */
void __up_write(struct rw_semaphore *sem) { unsigned long flags; raw_spin_lock_irqsave(&sem->wait_lock, flags); sem->count = 0; if (!list_empty(&sem->wait_list)) sem = __rwsem_do_wake(sem, 1); raw_spin_unlock_irqrestore(&sem->wait_lock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds4778.33%120.00%
David Howells813.33%120.00%
Thomas Gleixner23.33%120.00%
Christoph Hellwig23.33%120.00%
Peter Zijlstra11.67%120.00%
Total60100.00%5100.00%

/* * downgrade a write lock into a read lock * - just wake up any readers at the front of the queue */
void __downgrade_write(struct rw_semaphore *sem) { unsigned long flags; raw_spin_lock_irqsave(&sem->wait_lock, flags); sem->count = 1; if (!list_empty(&sem->wait_list)) sem = __rwsem_do_wake(sem, 0); raw_spin_unlock_irqrestore(&sem->wait_lock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig4981.67%125.00%
David Howells813.33%125.00%
Thomas Gleixner23.33%125.00%
Peter Zijlstra11.67%125.00%
Total60100.00%4100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds48540.82%310.34%
Christoph Hellwig18315.40%13.45%
Kirill Tkhai1189.93%26.90%
Américo Wang635.30%26.90%
Michal Hocko625.22%13.45%
Yuanhan Liu564.71%13.45%
Ingo Molnar484.04%310.34%
Niklas Cassel423.54%13.45%
David Howells403.37%13.45%
Michel Lespinasse302.53%26.90%
Thomas Gleixner161.35%13.45%
Peter Zijlstra161.35%26.90%
Kevin Hilman141.18%13.45%
Davidlohr Bueso A70.59%413.79%
Nicholas Piggin50.42%13.45%
Greg Kroah-Hartman10.08%13.45%
Paul Gortmaker10.08%13.45%
Andrew Morton10.08%13.45%
Total1188100.00%29100.00%
Directory: kernel/locking
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.