Release 4.15 kernel/locking/lockdep.c
/*
* kernel/lockdep.c
*
* Runtime locking correctness validator
*
* Started by Ingo Molnar:
*
* Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
*
* this code maps all the lock dependencies as they occur in a live kernel
* and will warn about the following classes of locking bugs:
*
* - lock inversion scenarios
* - circular lock dependencies
* - hardirq/softirq safe/unsafe locking bugs
*
* Bugs are reported even if the current locking scenario does not cause
* any deadlock at this point.
*
* I.e. if anytime in the past two locks were taken in a different order,
* even if it happened for another task, even if those were different
* locks (but of the same class as this lock), this code will detect it.
*
* Thanks to Arjan van de Ven for coming up with the initial idea of
* mapping lock dependencies runtime.
*/
#define DISABLE_BRANCH_PROFILING
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <linux/sched/task.h>
#include <linux/sched/mm.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/interrupt.h>
#include <linux/stacktrace.h>
#include <linux/debug_locks.h>
#include <linux/irqflags.h>
#include <linux/utsname.h>
#include <linux/hash.h>
#include <linux/ftrace.h>
#include <linux/stringify.h>
#include <linux/bitops.h>
#include <linux/gfp.h>
#include <linux/random.h>
#include <linux/jhash.h>
#include <linux/nmi.h>
#include <asm/sections.h>
#include "lockdep_internals.h"
#define CREATE_TRACE_POINTS
#include <trace/events/lock.h>
#ifdef CONFIG_PROVE_LOCKING
int prove_locking = 1;
module_param(prove_locking, int, 0644);
#else
#define prove_locking 0
#endif
#ifdef CONFIG_LOCK_STAT
int lock_stat = 1;
module_param(lock_stat, int, 0644);
#else
#define lock_stat 0
#endif
/*
* lockdep_lock: protects the lockdep graph, the hashes and the
* class/list/hash allocators.
*
* This is one of the rare exceptions where it's justified
* to use a raw spinlock - we really dont want the spinlock
* code to recurse back into the lockdep code...
*/
static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
static int graph_lock(void)
{
arch_spin_lock(&lockdep_lock);
/*
* Make sure that if another CPU detected a bug while
* walking the graph we dont change it (while the other
* CPU is busy printing out stuff with the graph lock
* dropped already)
*/
if (!debug_locks) {
arch_spin_unlock(&lockdep_lock);
return 0;
}
/* prevent any recursions within lockdep from causing deadlocks */
current->lockdep_recursion++;
return 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ingo Molnar | 32 | 80.00% | 1 | 33.33% |
Steven Rostedt | 6 | 15.00% | 1 | 33.33% |
Thomas Gleixner | 2 | 5.00% | 1 | 33.33% |
Total | 40 | 100.00% | 3 | 100.00% |
static inline int graph_unlock(void)
{
if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) {
/*
* The lockdep graph lock isn't locked while we expect it to
* be, we're confused now, bye!
*/
return DEBUG_LOCKS_WARN_ON(1);
}
current->lockdep_recursion--;
arch_spin_unlock(&lockdep_lock);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ingo Molnar | 17 | 39.53% | 1 | 20.00% |
Jarek Poplawski | 16 | 37.21% | 1 | 20.00% |
Steven Rostedt | 5 | 11.63% | 1 | 20.00% |
Peter Zijlstra | 3 | 6.98% | 1 | 20.00% |
Thomas Gleixner | 2 | 4.65% | 1 | 20.00% |
Total | 43 | 100.00% | 5 | 100.00% |
/*
* Turn lock debugging off and return with 0 if it was off already,
* and also release the graph lock:
*/
static inline int debug_locks_off_graph_unlock(void)
{
int ret = debug_locks_off();
arch_spin_unlock(&lockdep_lock);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ingo Molnar | 23 | 95.83% | 1 | 50.00% |
Thomas Gleixner | 1 | 4.17% | 1 | 50.00% |
Total | 24 | 100.00% | 2 | 100.00% |
unsigned long nr_list_entries;
static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
/*
* All data structures here are protected by the global debug_lock.
*
* Mutex key structs only get allocated, once during bootup, and never
* get freed - this significantly simplifies the debugging code.
*/
unsigned long nr_lock_classes;
static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
static inline struct lock_class *hlock_class(struct held_lock *hlock)
{
if (!hlock->class_idx) {
/*
* Someone passed in garbage, we give up.
*/
DEBUG_LOCKS_WARN_ON(1);
return NULL;
}
return lock_classes + hlock->class_idx - 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dave Jones | 40 | 97.56% | 1 | 50.00% |
Peter Zijlstra | 1 | 2.44% | 1 | 50.00% |
Total | 41 | 100.00% | 2 | 100.00% |
#ifdef CONFIG_LOCK_STAT
static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], cpu_lock_stats);
static inline u64 lockstat_clock(void)
{
return local_clock();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 13 | 100.00% | 2 | 100.00% |
Total | 13 | 100.00% | 2 | 100.00% |
static int lock_point(unsigned long points[], unsigned long ip)
{
int i;
for (i = 0; i < LOCKSTAT_POINTS; i++) {
if (points[i] == 0) {
points[i] = ip;
break;
}
if (points[i] == ip)
break;
}
return i;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 63 | 96.92% | 2 | 66.67% |
Ingo Molnar | 2 | 3.08% | 1 | 33.33% |
Total | 65 | 100.00% | 3 | 100.00% |
static void lock_time_inc(struct lock_time *lt, u64 time)
{
if (time > lt->max)
lt->max = time;
if (time < lt->min || !lt->nr)
lt->min = time;
lt->total += time;
lt->nr++;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 55 | 94.83% | 2 | 50.00% |
Ingo Molnar | 2 | 3.45% | 1 | 25.00% |
Frank Rowand | 1 | 1.72% | 1 | 25.00% |
Total | 58 | 100.00% | 4 | 100.00% |
static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
{
if (!src->nr)
return;
if (src->max > dst->max)
dst->max = src->max;
if (src->min < dst->min || !dst->nr)
dst->min = src->min;
dst->total += src->total;
dst->nr += src->nr;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 43 | 52.44% | 1 | 50.00% |
Frank Rowand | 39 | 47.56% | 1 | 50.00% |
Total | 82 | 100.00% | 2 | 100.00% |
struct lock_class_stats lock_stats(struct lock_class *class)
{
struct lock_class_stats stats;
int cpu, i;
memset(&stats, 0, sizeof(struct lock_class_stats));
for_each_possible_cpu(cpu) {
struct lock_class_stats *pcs =
&per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
stats.contention_point[i] += pcs->contention_point[i];
for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
stats.contending_point[i] += pcs->contending_point[i];
lock_time_add(&pcs->read_waittime, &stats.read_waittime);
lock_time_add(&pcs->write_waittime, &stats.write_waittime);
lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
stats.bounces[i] += pcs->bounces[i];
}
return stats;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 208 | 99.52% | 3 | 75.00% |
Tejun Heo | 1 | 0.48% | 1 | 25.00% |
Total | 209 | 100.00% | 4 | 100.00% |
void clear_lock_stats(struct lock_class *class)
{
int cpu;
for_each_possible_cpu(cpu) {
struct lock_class_stats *cpu_stats =
&per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
memset(cpu_stats, 0, sizeof(struct lock_class_stats));
}
memset(class->contention_point, 0, sizeof(class->contention_point));
memset(class->contending_point, 0, sizeof(class->contending_point));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 81 | 98.78% | 2 | 66.67% |
Tejun Heo | 1 | 1.22% | 1 | 33.33% |
Total | 82 | 100.00% | 3 | 100.00% |
static struct lock_class_stats *get_lock_stats(struct lock_class *class)
{
return &get_cpu_var(cpu_lock_stats)[class - lock_classes];
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 22 | 88.00% | 1 | 33.33% |
Ingo Molnar | 2 | 8.00% | 1 | 33.33% |
Tejun Heo | 1 | 4.00% | 1 | 33.33% |
Total | 25 | 100.00% | 3 | 100.00% |
static void put_lock_stats(struct lock_class_stats *stats)
{
put_cpu_var(cpu_lock_stats);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 15 | 93.75% | 1 | 50.00% |
Tejun Heo | 1 | 6.25% | 1 | 50.00% |
Total | 16 | 100.00% | 2 | 100.00% |
static void lock_release_holdtime(struct held_lock *hlock)
{
struct lock_class_stats *stats;
u64 holdtime;
if (!lock_stat)
return;
holdtime = lockstat_clock() - hlock->holdtime_stamp;
stats = get_lock_stats(hlock_class(hlock));
if (hlock->read)
lock_time_inc(&stats->read_holdtime, holdtime);
else
lock_time_inc(&stats->write_holdtime, holdtime);
put_lock_stats(stats);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 73 | 96.05% | 2 | 66.67% |
Dave Jones | 3 | 3.95% | 1 | 33.33% |
Total | 76 | 100.00% | 3 | 100.00% |
#else
static inline void lock_release_holdtime(struct held_lock *hlock)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 11 | 100.00% | 1 | 100.00% |
Total | 11 | 100.00% | 1 | 100.00% |
#endif
/*
* We keep a global list of all lock classes. The list only grows,
* never shrinks. The list is only accessed with the lockdep
* spinlock lock held.
*/
LIST_HEAD(all_lock_classes);
/*
* The lockdep classes are in a hash-table as well, for fast lookup:
*/
#define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
#define CLASSHASH_SIZE (1UL << CLASSHASH_BITS)
#define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS)
#define classhashentry(key) (classhash_table + __classhashfn((key)))
static struct hlist_head classhash_table[CLASSHASH_SIZE];
/*
* We put the lock dependency chains into a hash-table as well, to cache
* their existence:
*/
#define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1)
#define CHAINHASH_SIZE (1UL << CHAINHASH_BITS)
#define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS)
#define chainhashentry(chain) (chainhash_table + __chainhashfn((chain)))
static struct hlist_head chainhash_table[CHAINHASH_SIZE];
/*
* The hash key of the lock dependency chains is a hash itself too:
* it's a hash of all locks taken up to that lock, including that lock.
* It's a 64-bit hash, because it's important for the keys to be
* unique.
*/
static inline u64 iterate_chain_key(u64 key, u32 idx)
{
u32 k0 = key, k1 = key >> 32;
__jhash_mix(idx, k0, k1); /* Macro that modifies arguments! */
return k0 | (u64)k1 << 32;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 40 | 90.91% | 1 | 50.00% |
Ingo Molnar | 4 | 9.09% | 1 | 50.00% |
Total | 44 | 100.00% | 2 | 100.00% |
void lockdep_off(void)
{
current->lockdep_recursion++;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ingo Molnar | 12 | 100.00% | 1 | 100.00% |
Total | 12 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(lockdep_off);
void lockdep_on(void)
{
current->lockdep_recursion--;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ingo Molnar | 12 | 100.00% | 1 | 100.00% |
Total | 12 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(lockdep_on);
/*
* Debugging switches:
*/
#define VERBOSE 0
#define VERY_VERBOSE 0
#if VERBOSE
# define HARDIRQ_VERBOSE 1
# define SOFTIRQ_VERBOSE 1
#else
# define HARDIRQ_VERBOSE 0
# define SOFTIRQ_VERBOSE 0
#endif
#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
/*
* Quick filtering for interesting events:
*/
static int class_filter(struct lock_class *class)
{
#if 0
/* Example */
if (class->name_version == 1 &&
!strcmp(class->name, "lockname"))
return 1;
if (class->name_version == 1 &&
!strcmp(class->name, "&struct->lockfield"))
return 1;
#endif
/* Filter everything else. 1 would be to allow everything else */
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ingo Molnar | 15 | 68.18% | 2 | 66.67% |
Andi Kleen | 7 | 31.82% | 1 | 33.33% |
Total | 22 | 100.00% | 3 | 100.00% |
#endif
static int verbose(struct lock_class *class)
{
#if VERBOSE
return class_filter(class);
#endif
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ingo Molnar | 25 | 100.00% | 1 | 100.00% |
Total | 25 | 100.00% | 1 | 100.00% |
/*
* Stack-trace: tightly packed array of stack backtrace
* addresses. Protected by the graph_lock.
*/
unsigned long nr_stack_trace_entries;
static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
static void print_lockdep_off(const char *bug_msg)
{
printk(KERN_DEBUG "%s\n", bug_msg);
printk(KERN_DEBUG "turning off the locking correctness validator.\n");
#ifdef CONFIG_LOCK_STAT
printk(KERN_DEBUG "Please attach the output of /proc/lock_stat to the bug report\n");
#endif
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dave Jones | 31 | 86.11% | 1 | 50.00% |
Andreas Gruenbacher | 5 | 13.89% | 1 | 50.00% |
Total | 36 | 100.00% | 2 | 100.00% |
static int save_trace(struct stack_trace *trace)
{
trace->nr_entries = 0;
trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
trace->entries = stack_trace + nr_stack_trace_entries;
trace->skip = 3;
save_stack_trace(trace);
/*
* Some daft arches put -1 at the end to indicate its a full trace.
*
* <rant> this is buggy anyway, since it takes a whole extra entry so a
* complete trace that maxes out the entries provided will be reported
* as incomplete, friggin useless </rant>
*/
if (trace->nr_entries != 0 &&
trace->entries[trace->nr_entries-1] == ULONG_MAX)
trace->nr_entries--;
trace->max_entries = trace->nr_entries;
nr_stack_trace_entries += trace->nr_entries;
if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
if (!debug_locks_off_graph_unlock())
return 0;
print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
dump_stack();
return 0;
}
return 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ingo Molnar | 77 | 65.25% | 2 | 28.57% |
Peter Zijlstra | 24 | 20.34% | 1 | 14.29% |
Tony Luck | 6 | 5.08% | 1 | 14.29% |
Andi Kleen | 6 | 5.08% | 1 | 14.29% |
Dave Jones | 5 | 4.24% | 2 | 28.57% |
Total | 118 | 100.00% | 7 | 100.00% |
unsigned int nr_hardirq_chains;
unsigned int nr_softirq_chains;
unsigned int nr_process_chains;
unsigned int max_lockdep_depth;
#ifdef CONFIG_DEBUG_LOCKDEP
/*
* Various lockdep statistics:
*/
DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
#endif
/*
* Locking printouts:
*/
#define __USAGE(__STATE) \
[LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \
[LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \
[LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
[LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
static const char *usage_str[] =
{
#define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
#include "lockdep_states.h"
#undef LOCKDEP_STATE
[LOCK_USED] = "INITIAL USE",
};
const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
{
return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ingo Molnar | 31 | 91.18% | 1 | 50.00% |
Alexey Dobriyan | 3 | 8.82% | 1 | 50.00% |
Total | 34 | 100.00% | 2 | 100.00% |
static inline unsigned long lock_flag(enum lock_usage_bit bit)
{
return 1UL << bit;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 17 | 100.00% | 1 | 100.00% |
Total | 17 | 100.00% | 1 | 100.00% |
static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
{
char c = '.';
if (class->usage_mask & lock_flag(bit + 2))
c = '+';
if (class->usage_mask & lock_flag(bit)) {
c = '-';
if (class->usage_mask & lock_flag(bit + 2))
c = '?';
}
return c;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ingo Molnar | 37 | 50.00% | 1 | 33.33% |
Peter Zijlstra | 36 | 48.65% | 1 | 33.33% |
Nicholas Piggin | 1 | 1.35% | 1 | 33.33% |
Total | 74 | 100.00% | 3 | 100.00% |
void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
{
int i = 0;
#define LOCKDEP_STATE(__STATE) \
usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \
usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
#include "lockdep_states.h"
#undef LOCKDEP_STATE
usage[i] = '\0';
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 37 | 88.10% | 2 | 50.00% |
Nicholas Piggin | 3 | 7.14% | 1 | 25.00% |
Ingo Molnar | 2 | 4.76% | 1 | 25.00% |
Total | 42 | 100.00% | 4 | 100.00% |
static void __print_lock_name(struct lock_class *class)
{
char str[KSYM_NAME_LEN];
const char *name;
name = class->name;
if (!name) {
name = __get_key_name(class->key, str);
printk(KERN_CONT "%s", name);
} else {
printk(KERN_CONT "%s", name);
if (class->name_version > 1)
printk(KERN_CONT "#%d", class->name_version);
if (class->subclass)
printk(KERN_CONT "/%d", class->subclass);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ingo Molnar | 89 | 89.90% | 1 | 20.00% |
Steven Rostedt | 5 | 5.05% | 2 | 40.00% |
Dmitriy Vyukov | 4 | 4.04% | 1 | 20.00% |
Jarek Poplawski | 1 | 1.01% | 1 | 20.00% |
Total | 99 | 100.00% | 5 | 100.00% |
static void print_lock_name(struct lock_class *class)
{
char usage[LOCK_USAGE_CHARS];
get_usage_chars(class, usage);
printk(KERN_CONT " (");
__print_lock_name(class);
printk(KERN_CONT "){%s}", usage);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Steven Rostedt | 33 | 76.74% | 1 | 20.00% |
Ingo Molnar | 5 | 11.63% | 1 | 20.00% |
Dmitriy Vyukov | 2 | 4.65% | 1 | 20.00% |
Peter Zijlstra | 2 | 4.65% | 1 | 20.00% |
Nicholas Piggin | 1 | 2.33% | 1 | 20.00% |
Total | 43 | 100.00% | 5 | 100.00% |
static void print_lockdep_cache(struct lockdep_map *lock)
{
const char *name;
char str[KSYM_NAME_LEN];
name = lock->name;
if (!name)
name = __get_key_name(lock->key->subkeys, str);
printk(KERN_CONT "%s", name);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ingo Molnar | 52 | 96.30% | 1 | 33.33% |
Dmitriy Vyukov | 1 | 1.85% | 1 | 33.33% |
Jarek Poplawski | 1 | 1.85% | 1 | 33.33% |
Total | 54 | 100.00% | 3 | 100.00% |
static void print_lock(struct held_lock *hlock)
{
/*
* We can be called locklessly through debug_show_all_locks() so be
* extra careful, the hlock might have been released and cleared.
*/
unsigned int class_idx = hlock->class_idx;
/* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfields: */
barrier();
if (!class_idx || (class_idx - 1) >= MAX_LOCKDEP_KEYS) {
printk(KERN_CONT "<RELEASED>\n");
return;
}
print_lock_name(lock_classes + class_idx - 1);
printk(KERN_CONT ", at: [<%p>] %pS\n",
(void *)hlock->acquire_ip, (void *)hlock->acquire_ip);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 39 | 50.65% | 1 | 25.00% |
Ingo Molnar | 21 | 27.27% | 1 | 25.00% |
Dmitriy Vyukov | 16 | 20.78% | 1 | 25.00% |
Dave Jones | 1 | 1.30% | 1 | 25.00% |
Total | 77 | 100.00% | 4 | 100.00% |
static void lockdep_print_held_locks(struct task_struct *curr)
{
int i, depth = curr->lockdep_depth;
if (!depth) {
printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr));
return;
}
printk("%d lock%s held by %s/%d:\n",
depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr));
for (i = 0; i < depth; i++) {
printk(" #%d: ", i);
print_lock(curr->held_locks + i);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ingo Molnar | 91 | 93.81% | 1 | 50.00% |
Pavel Emelyanov | 6 | 6.19% | 1 | 50.00% |
Total | 97 | 100.00% | 2 | 100.00% |
static void print_kernel_ident(void)
{
printk("%s %.*s %s\n", init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
init_utsname()->version,
print_tainted());
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 16 | 41.03% | 1 | 25.00% |
Ingo Molnar | 16 | 41.03% | 1 | 25.00% |
Ben Hutchings | 5 | 12.82% | 1 | 25.00% |
Andi Kleen | 2 | 5.13% | 1 | 25.00% |
Total | 39 | 100.00% | 4 | 100.00% |
static int very_verbose(struct lock_class *class)
{
#if VERY_VERBOSE
return class_filter(class);
#endif
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 21 | 84.00% | 1 | 50.00% |
Ingo Molnar | 4 | 16.00% | 1 | 50.00% |
Total | 25 | 100.00% | 2 | 100.00% |
/*
* Is this the address of a static object:
*/
#ifdef __KERNEL__
static int static_obj(void *obj)
{
unsigned long start = (unsigned long) &_stext,
end = (unsigned long) &_end,
addr = (unsigned long) obj;
/*
* static variable?
*/
if ((addr >= start) && (addr < end))
return 1;
if (arch_is_kernel_data(addr))
return 1;
/*
* in-kernel percpu var?
*/
if (is_kernel_percpu_address(addr))
return 1;
/*
* module static or percpu var?
*/
return is_module_address(addr) || is_module_percpu_address(addr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 53 | 59.55% | 1 | 25.00% |
Lei Ming | 16 | 17.98% | 1 | 25.00% |
Tejun Heo | 10 | 11.24% | 1 | 25.00% |
Mike Frysinger | 10 | 11.24% | 1 | 25.00% |
Total | 89 | 100.00% | 4 | 100.00% |
#endif
/*
* To make lock name printouts unique, we calculate a unique
* class->name_version generation counter:
*/
static int count_matching_names(struct lock_class *new_class)
{
struct lock_class *class;
int count = 0;
if (!new_class->name)
return 0;
list_for_each_entry_rcu(class, &all_lock_classes, lock_entry) {
if (new_class->key - new_class->subclass == class->key)
return class->name_version;
if (class->name && !strcmp(class->name, new_class->name))
count = max(count, class->name_version);
}
return count + 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 61 | 64.89% | 2 | 33.33% |
Ingo Molnar | 26 | 27.66% | 1 | 16.67% |
David S. Miller | 3 | 3.19% | 1 | 16.67% |
Jarek Poplawski | 3 | 3.19% | 1 | 16.67% |
Andi Kleen | 1 | 1.06% | 1 | 16.67% |
Total | 94 | 100.00% | 6 | 100.00% |
/*
* Register a lock's class in the hash-table, if the class is not present
* yet. Otherwise we look it up. We cache the result in the lock object
* itself, so actual lookup of the hash should be once per lock object.
*/
static inline struct lock_class *
look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
{
struct lockdep_subclass_key *key;
struct hlist_head *hash_head;
struct lock_class *class;
bool is_static = false;
if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
debug_locks_off();
printk(KERN_ERR
"BUG: looking up invalid subclass: %u\n", subclass);
printk(KERN_ERR
"turning off the locking correctness validator.\n");
dump_stack();
return NULL;
}
/*
* Static locks do not have their class-keys yet - for them the key
* is the lock object itself. If the lock is in the per cpu area,
* the canonical address of the lock (per cpu offset removed) is
* used.
*/
if (unlikely(!lock->key)) {
unsigned long can_addr, addr = (unsigned long)lock;
if (__is_kernel_percpu_address(addr, &can_addr))
lock->key = (void *)can_addr;
else if (__is_module_percpu_address(addr, &can_addr))
lock->key = (void *)can_addr;
else if (static_obj(lock))
lock->key = (void *)lock;
else
return ERR_PTR(-EINVAL);
is_static = true;
}
/*
* NOTE: the class-key must be unique. For dynamic locks, a static
* lock_class_key variable is passed in through the mutex_init()
* (or spin_lock_init()) call - which acts as the key. For static
* locks we use the lock object itself as the key.
*/
BUILD_BUG_ON(sizeof(struct lock_class_key) >
sizeof(struct lockdep_map));
key = lock->key->subkeys + subclass;
hash_head = classhashentry(key);
/*
* We do an RCU walk of the hash, see lockdep_free_key_range().
*/
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return NULL;
hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
if (class->key == key) {
/*
* Huh! same key, different name? Did someone trample
* on some memory? We're most confused.
*/
WARN_ON_ONCE(class->name != lock->name);
return class;
}
}
return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 127 | 47.57% | 4 | 44.44% |
Thomas Gleixner | 96 | 35.96% | 1 | 11.11% |
Hitoshi Mitake | 34 | 12.73% | 1 | 11.11% |
Ingo Molnar | 7 | 2.62% | 1 | 11.11% |
Andrew Morton | 2 | 0.75% | 1 | 11.11% |
Jarek Poplawski | 1 | 0.37% | 1 | 11.11% |
Total | 267 | 100.00% | 9 | 100.00% |
/*
* Register a lock's class in the hash-table, if the class is not present
* yet. Otherwise we look it up. We cache the result in the lock object
* itself, so actual lookup of the hash should be once per lock object.
*/
static struct lock_class *
register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
{
struct lockdep_subclass_key *key;
struct hlist_head *hash_head;
struct lock_class *class;
DEBUG_LOCKS_WARN_ON(!irqs_disabled());
class = look_up_lock_class(lock, subclass);
if (likely(!IS_ERR_OR_NULL(class)))
goto out_set_class_cache;
/*
* Debug-check: all keys must be persistent!
*/
if (IS_ERR(class)) {
debug_locks_off();
printk("INFO: trying to register non-static key.\n");
printk("the code is fine but needs lockdep annotation.\n");
printk("turning off the locking correctness validator.\n");
dump_stack();
return NULL;
}
key = lock->key->subkeys + subclass;
hash_head = classhashentry(key);
if (!graph_lock()) {
return NULL;
}
/*
* We have to do the hash-walk again, to avoid races
* with another CPU:
*/
hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
if (class->key == key)
goto out_unlock_set;
}
/*
* Allocate a new key from the static array, and add it to
* the hash:
*/
if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
if (!debug_locks_off_graph_unlock()) {
return NULL;
}
print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
dump_stack();
return NULL;
}
class = lock_classes + nr_lock_classes++;
debug_atomic_inc(nr_unused_locks);
class->key = key;
class->name = lock->name;
class->subclass = subclass;
INIT_LIST_HEAD(&class->lock_entry);
INIT_LIST_HEAD(&class->locks_before);
INIT_LIST_HEAD(&class->locks_after);
class->name_version = count_matching_names(class);
/*
* We use RCU's safe list-add method to make
* parallel walking of the hash-list safe:
*/
hlist_add_head_rcu(&class->hash_entry, hash_head);
/*
* Add it to the global list of classes:
*/
list_add_tail_rcu(&class->lock_entry, &all_lock_classes);
if (verbose(class)) {
graph_unlock();
printk("\nnew class %p: %s", class->key, class->name);
if (class->name_version > 1)
printk(KERN_CONT "#%d", class->name_version);
printk(KERN_CONT "\n"