Release 4.15 kernel/locking/qrwlock.c
/*
* Queued read/write locks
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
*
* Authors: Waiman Long <waiman.long@hp.com>
*/
#include <linux/smp.h>
#include <linux/bug.h>
#include <linux/cpumask.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <linux/spinlock.h>
#include <asm/qrwlock.h>
/**
* queued_read_lock_slowpath - acquire read lock of a queue rwlock
* @lock: Pointer to queue rwlock structure
*/
void queued_read_lock_slowpath(struct qrwlock *lock)
{
/*
* Readers come here when they cannot get the lock without waiting
*/
if (unlikely(in_interrupt())) {
/*
* Readers in interrupt context will get the lock immediately
* if the writer is just waiting (not holding the lock yet),
* so spin with ACQUIRE semantics until the lock is available
* without waiting in the queue.
*/
atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
return;
}
atomic_sub(_QR_BIAS, &lock->cnts);
/*
* Put the reader into the wait queue
*/
arch_spin_lock(&lock->wait_lock);
atomic_add(_QR_BIAS, &lock->cnts);
/*
* The ACQUIRE semantics of the following spinning code ensure
* that accesses can't leak upwards out of our subsequent critical
* section in the case that the lock is currently held for write.
*/
atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
/*
* Signal the next one in queue to become queue head
*/
arch_spin_unlock(&lock->wait_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Waiman Long | 67 | 72.83% | 3 | 50.00% |
Will Deacon | 23 | 25.00% | 2 | 33.33% |
Davidlohr Bueso A | 2 | 2.17% | 1 | 16.67% |
Total | 92 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL(queued_read_lock_slowpath);
/**
* queued_write_lock_slowpath - acquire write lock of a queue rwlock
* @lock : Pointer to queue rwlock structure
*/
void queued_write_lock_slowpath(struct qrwlock *lock)
{
/* Put the writer into the wait queue */
arch_spin_lock(&lock->wait_lock);
/* Try to acquire the lock directly if no reader is present */
if (!atomic_read(&lock->cnts) &&
(atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0))
goto unlock;
/* Set the waiting flag to notify readers that a writer is pending */
atomic_add(_QW_WAITING, &lock->cnts);
/* When no more readers or writers, set the locked flag */
do {
atomic_cond_read_acquire(&lock->cnts, VAL == _QW_WAITING);
} while (atomic_cmpxchg_relaxed(&lock->cnts, _QW_WAITING,
_QW_LOCKED) != _QW_WAITING);
unlock:
arch_spin_unlock(&lock->wait_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Waiman Long | 83 | 79.81% | 2 | 28.57% |
Will Deacon | 19 | 18.27% | 4 | 57.14% |
Davidlohr Bueso A | 2 | 1.92% | 1 | 14.29% |
Total | 104 | 100.00% | 7 | 100.00% |
EXPORT_SYMBOL(queued_write_lock_slowpath);
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Waiman Long | 180 | 78.26% | 4 | 40.00% |
Will Deacon | 43 | 18.70% | 4 | 40.00% |
Davidlohr Bueso A | 4 | 1.74% | 1 | 10.00% |
Babu Moger | 3 | 1.30% | 1 | 10.00% |
Total | 230 | 100.00% | 10 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.