Contributors: 17
Author |
Tokens |
Token Proportion |
Commits |
Commit Proportion |
Ingo Molnar |
373 |
81.26% |
5 |
20.83% |
George Stark |
39 |
8.50% |
1 |
4.17% |
Peter Zijlstra |
13 |
2.83% |
4 |
16.67% |
Eric Paris |
6 |
1.31% |
1 |
4.17% |
Randy Dunlap |
5 |
1.09% |
1 |
4.17% |
Linus Torvalds |
4 |
0.87% |
1 |
4.17% |
David Woodhouse |
3 |
0.65% |
1 |
4.17% |
Linus Torvalds (pre-git) |
2 |
0.44% |
1 |
4.17% |
Zhang Qiang |
2 |
0.44% |
1 |
4.17% |
John Levon |
2 |
0.44% |
1 |
4.17% |
Lance Roy |
2 |
0.44% |
1 |
4.17% |
Jeremy Kerr |
2 |
0.44% |
1 |
4.17% |
Thomas Gleixner |
2 |
0.44% |
1 |
4.17% |
Christoph Hellwig |
1 |
0.22% |
1 |
4.17% |
Chris Wilson |
1 |
0.22% |
1 |
4.17% |
Paul Gortmaker |
1 |
0.22% |
1 |
4.17% |
Andrew Morton |
1 |
0.22% |
1 |
4.17% |
Total |
459 |
|
24 |
|
/*
* Debugging code for mutexes
*
* Started by Ingo Molnar:
*
* Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
*
* lock debugging, locking tree, deadlock detection started by:
*
* Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey
* Released under the General Public License (GPL).
*/
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/poison.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/interrupt.h>
#include <linux/debug_locks.h>
#include "mutex.h"
/*
* Must be called with lock->wait_lock held.
*/
void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter)
{
memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter));
waiter->magic = waiter;
INIT_LIST_HEAD(&waiter->list);
waiter->ww_ctx = MUTEX_POISON_WW_CTX;
}
void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter)
{
lockdep_assert_held(&lock->wait_lock);
DEBUG_LOCKS_WARN_ON(list_empty(&lock->wait_list));
DEBUG_LOCKS_WARN_ON(waiter->magic != waiter);
DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
}
void debug_mutex_free_waiter(struct mutex_waiter *waiter)
{
DEBUG_LOCKS_WARN_ON(!list_empty(&waiter->list));
memset(waiter, MUTEX_DEBUG_FREE, sizeof(*waiter));
}
void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct task_struct *task)
{
lockdep_assert_held(&lock->wait_lock);
/* Mark the current thread as blocked on the lock: */
task->blocked_on = waiter;
}
void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct task_struct *task)
{
DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
DEBUG_LOCKS_WARN_ON(waiter->task != task);
DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
task->blocked_on = NULL;
INIT_LIST_HEAD(&waiter->list);
waiter->task = NULL;
}
void debug_mutex_unlock(struct mutex *lock)
{
if (likely(debug_locks)) {
DEBUG_LOCKS_WARN_ON(lock->magic != lock);
DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
}
}
void debug_mutex_init(struct mutex *lock, const char *name,
struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
* Make sure we are not reinitializing a held lock:
*/
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_SLEEP);
#endif
lock->magic = lock;
}
static void devm_mutex_release(void *res)
{
mutex_destroy(res);
}
int __devm_mutex_init(struct device *dev, struct mutex *lock)
{
return devm_add_action_or_reset(dev, devm_mutex_release, lock);
}
EXPORT_SYMBOL_GPL(__devm_mutex_init);
/***
* mutex_destroy - mark a mutex unusable
* @lock: the mutex to be destroyed
*
* This function marks the mutex uninitialized, and any subsequent
* use of the mutex is forbidden. The mutex must not be locked when
* this function is called.
*/
void mutex_destroy(struct mutex *lock)
{
DEBUG_LOCKS_WARN_ON(mutex_is_locked(lock));
lock->magic = NULL;
}
EXPORT_SYMBOL_GPL(mutex_destroy);