cregit-Linux how code gets into the kernel

Release 4.15 kernel/locking/lockdep.c

Directory: kernel/locking
/*
 * kernel/lockdep.c
 *
 * Runtime locking correctness validator
 *
 * Started by Ingo Molnar:
 *
 *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
 *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
 *
 * this code maps all the lock dependencies as they occur in a live kernel
 * and will warn about the following classes of locking bugs:
 *
 * - lock inversion scenarios
 * - circular lock dependencies
 * - hardirq/softirq safe/unsafe locking bugs
 *
 * Bugs are reported even if the current locking scenario does not cause
 * any deadlock at this point.
 *
 * I.e. if anytime in the past two locks were taken in a different order,
 * even if it happened for another task, even if those were different
 * locks (but of the same class as this lock), this code will detect it.
 *
 * Thanks to Arjan van de Ven for coming up with the initial idea of
 * mapping lock dependencies runtime.
 */

#define DISABLE_BRANCH_PROFILING
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <linux/sched/task.h>
#include <linux/sched/mm.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/interrupt.h>
#include <linux/stacktrace.h>
#include <linux/debug_locks.h>
#include <linux/irqflags.h>
#include <linux/utsname.h>
#include <linux/hash.h>
#include <linux/ftrace.h>
#include <linux/stringify.h>
#include <linux/bitops.h>
#include <linux/gfp.h>
#include <linux/random.h>
#include <linux/jhash.h>
#include <linux/nmi.h>

#include <asm/sections.h>

#include "lockdep_internals.h"


#define CREATE_TRACE_POINTS
#include <trace/events/lock.h>

#ifdef CONFIG_PROVE_LOCKING

int prove_locking = 1;
module_param(prove_locking, int, 0644);
#else

#define prove_locking 0
#endif

#ifdef CONFIG_LOCK_STAT

int lock_stat = 1;
module_param(lock_stat, int, 0644);
#else

#define lock_stat 0
#endif

/*
 * lockdep_lock: protects the lockdep graph, the hashes and the
 *               class/list/hash allocators.
 *
 * This is one of the rare exceptions where it's justified
 * to use a raw spinlock - we really dont want the spinlock
 * code to recurse back into the lockdep code...
 */

static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;


static int graph_lock(void) { arch_spin_lock(&lockdep_lock); /* * Make sure that if another CPU detected a bug while * walking the graph we dont change it (while the other * CPU is busy printing out stuff with the graph lock * dropped already) */ if (!debug_locks) { arch_spin_unlock(&lockdep_lock); return 0; } /* prevent any recursions within lockdep from causing deadlocks */ current->lockdep_recursion++; return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar3280.00%133.33%
Steven Rostedt615.00%133.33%
Thomas Gleixner25.00%133.33%
Total40100.00%3100.00%


static inline int graph_unlock(void) { if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) { /* * The lockdep graph lock isn't locked while we expect it to * be, we're confused now, bye! */ return DEBUG_LOCKS_WARN_ON(1); } current->lockdep_recursion--; arch_spin_unlock(&lockdep_lock); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar1739.53%120.00%
Jarek Poplawski1637.21%120.00%
Steven Rostedt511.63%120.00%
Peter Zijlstra36.98%120.00%
Thomas Gleixner24.65%120.00%
Total43100.00%5100.00%

/* * Turn lock debugging off and return with 0 if it was off already, * and also release the graph lock: */
static inline int debug_locks_off_graph_unlock(void) { int ret = debug_locks_off(); arch_spin_unlock(&lockdep_lock); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar2395.83%150.00%
Thomas Gleixner14.17%150.00%
Total24100.00%2100.00%

unsigned long nr_list_entries; static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; /* * All data structures here are protected by the global debug_lock. * * Mutex key structs only get allocated, once during bootup, and never * get freed - this significantly simplifies the debugging code. */ unsigned long nr_lock_classes; static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
static inline struct lock_class *hlock_class(struct held_lock *hlock) { if (!hlock->class_idx) { /* * Someone passed in garbage, we give up. */ DEBUG_LOCKS_WARN_ON(1); return NULL; } return lock_classes + hlock->class_idx - 1; }

Contributors

PersonTokensPropCommitsCommitProp
Dave Jones4097.56%150.00%
Peter Zijlstra12.44%150.00%
Total41100.00%2100.00%

#ifdef CONFIG_LOCK_STAT static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], cpu_lock_stats);
static inline u64 lockstat_clock(void) { return local_clock(); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra13100.00%2100.00%
Total13100.00%2100.00%


static int lock_point(unsigned long points[], unsigned long ip) { int i; for (i = 0; i < LOCKSTAT_POINTS; i++) { if (points[i] == 0) { points[i] = ip; break; } if (points[i] == ip) break; } return i; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra6396.92%266.67%
Ingo Molnar23.08%133.33%
Total65100.00%3100.00%


static void lock_time_inc(struct lock_time *lt, u64 time) { if (time > lt->max) lt->max = time; if (time < lt->min || !lt->nr) lt->min = time; lt->total += time; lt->nr++; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra5594.83%250.00%
Ingo Molnar23.45%125.00%
Frank Rowand11.72%125.00%
Total58100.00%4100.00%


static inline void lock_time_add(struct lock_time *src, struct lock_time *dst) { if (!src->nr) return; if (src->max > dst->max) dst->max = src->max; if (src->min < dst->min || !dst->nr) dst->min = src->min; dst->total += src->total; dst->nr += src->nr; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra4352.44%150.00%
Frank Rowand3947.56%150.00%
Total82100.00%2100.00%


struct lock_class_stats lock_stats(struct lock_class *class) { struct lock_class_stats stats; int cpu, i; memset(&stats, 0, sizeof(struct lock_class_stats)); for_each_possible_cpu(cpu) { struct lock_class_stats *pcs = &per_cpu(cpu_lock_stats, cpu)[class - lock_classes]; for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++) stats.contention_point[i] += pcs->contention_point[i]; for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++) stats.contending_point[i] += pcs->contending_point[i]; lock_time_add(&pcs->read_waittime, &stats.read_waittime); lock_time_add(&pcs->write_waittime, &stats.write_waittime); lock_time_add(&pcs->read_holdtime, &stats.read_holdtime); lock_time_add(&pcs->write_holdtime, &stats.write_holdtime); for (i = 0; i < ARRAY_SIZE(stats.bounces); i++) stats.bounces[i] += pcs->bounces[i]; } return stats; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra20899.52%375.00%
Tejun Heo10.48%125.00%
Total209100.00%4100.00%


void clear_lock_stats(struct lock_class *class) { int cpu; for_each_possible_cpu(cpu) { struct lock_class_stats *cpu_stats = &per_cpu(cpu_lock_stats, cpu)[class - lock_classes]; memset(cpu_stats, 0, sizeof(struct lock_class_stats)); } memset(class->contention_point, 0, sizeof(class->contention_point)); memset(class->contending_point, 0, sizeof(class->contending_point)); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra8198.78%266.67%
Tejun Heo11.22%133.33%
Total82100.00%3100.00%


static struct lock_class_stats *get_lock_stats(struct lock_class *class) { return &get_cpu_var(cpu_lock_stats)[class - lock_classes]; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra2288.00%133.33%
Ingo Molnar28.00%133.33%
Tejun Heo14.00%133.33%
Total25100.00%3100.00%


static void put_lock_stats(struct lock_class_stats *stats) { put_cpu_var(cpu_lock_stats); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra1593.75%150.00%
Tejun Heo16.25%150.00%
Total16100.00%2100.00%


static void lock_release_holdtime(struct held_lock *hlock) { struct lock_class_stats *stats; u64 holdtime; if (!lock_stat) return; holdtime = lockstat_clock() - hlock->holdtime_stamp; stats = get_lock_stats(hlock_class(hlock)); if (hlock->read) lock_time_inc(&stats->read_holdtime, holdtime); else lock_time_inc(&stats->write_holdtime, holdtime); put_lock_stats(stats); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra7396.05%266.67%
Dave Jones33.95%133.33%
Total76100.00%3100.00%

#else
static inline void lock_release_holdtime(struct held_lock *hlock) { }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra11100.00%1100.00%
Total11100.00%1100.00%

#endif /* * We keep a global list of all lock classes. The list only grows, * never shrinks. The list is only accessed with the lockdep * spinlock lock held. */ LIST_HEAD(all_lock_classes); /* * The lockdep classes are in a hash-table as well, for fast lookup: */ #define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1) #define CLASSHASH_SIZE (1UL << CLASSHASH_BITS) #define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS) #define classhashentry(key) (classhash_table + __classhashfn((key))) static struct hlist_head classhash_table[CLASSHASH_SIZE]; /* * We put the lock dependency chains into a hash-table as well, to cache * their existence: */ #define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1) #define CHAINHASH_SIZE (1UL << CHAINHASH_BITS) #define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS) #define chainhashentry(chain) (chainhash_table + __chainhashfn((chain))) static struct hlist_head chainhash_table[CHAINHASH_SIZE]; /* * The hash key of the lock dependency chains is a hash itself too: * it's a hash of all locks taken up to that lock, including that lock. * It's a 64-bit hash, because it's important for the keys to be * unique. */
static inline u64 iterate_chain_key(u64 key, u32 idx) { u32 k0 = key, k1 = key >> 32; __jhash_mix(idx, k0, k1); /* Macro that modifies arguments! */ return k0 | (u64)k1 << 32; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra4090.91%150.00%
Ingo Molnar49.09%150.00%
Total44100.00%2100.00%


void lockdep_off(void) { current->lockdep_recursion++; }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar12100.00%1100.00%
Total12100.00%1100.00%

EXPORT_SYMBOL(lockdep_off);
void lockdep_on(void) { current->lockdep_recursion--; }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar12100.00%1100.00%
Total12100.00%1100.00%

EXPORT_SYMBOL(lockdep_on); /* * Debugging switches: */ #define VERBOSE 0 #define VERY_VERBOSE 0 #if VERBOSE # define HARDIRQ_VERBOSE 1 # define SOFTIRQ_VERBOSE 1 #else # define HARDIRQ_VERBOSE 0 # define SOFTIRQ_VERBOSE 0 #endif #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE /* * Quick filtering for interesting events: */
static int class_filter(struct lock_class *class) { #if 0 /* Example */ if (class->name_version == 1 && !strcmp(class->name, "lockname")) return 1; if (class->name_version == 1 && !strcmp(class->name, "&struct->lockfield")) return 1; #endif /* Filter everything else. 1 would be to allow everything else */ return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar1568.18%266.67%
Andi Kleen731.82%133.33%
Total22100.00%3100.00%

#endif
static int verbose(struct lock_class *class) { #if VERBOSE return class_filter(class); #endif return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar25100.00%1100.00%
Total25100.00%1100.00%

/* * Stack-trace: tightly packed array of stack backtrace * addresses. Protected by the graph_lock. */ unsigned long nr_stack_trace_entries; static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
static void print_lockdep_off(const char *bug_msg) { printk(KERN_DEBUG "%s\n", bug_msg); printk(KERN_DEBUG "turning off the locking correctness validator.\n"); #ifdef CONFIG_LOCK_STAT printk(KERN_DEBUG "Please attach the output of /proc/lock_stat to the bug report\n"); #endif }

Contributors

PersonTokensPropCommitsCommitProp
Dave Jones3186.11%150.00%
Andreas Gruenbacher513.89%150.00%
Total36100.00%2100.00%


static int save_trace(struct stack_trace *trace) { trace->nr_entries = 0; trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries; trace->entries = stack_trace + nr_stack_trace_entries; trace->skip = 3; save_stack_trace(trace); /* * Some daft arches put -1 at the end to indicate its a full trace. * * <rant> this is buggy anyway, since it takes a whole extra entry so a * complete trace that maxes out the entries provided will be reported * as incomplete, friggin useless </rant> */ if (trace->nr_entries != 0 && trace->entries[trace->nr_entries-1] == ULONG_MAX) trace->nr_entries--; trace->max_entries = trace->nr_entries; nr_stack_trace_entries += trace->nr_entries; if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) { if (!debug_locks_off_graph_unlock()) return 0; print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!"); dump_stack(); return 0; } return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar7765.25%228.57%
Peter Zijlstra2420.34%114.29%
Tony Luck65.08%114.29%
Andi Kleen65.08%114.29%
Dave Jones54.24%228.57%
Total118100.00%7100.00%

unsigned int nr_hardirq_chains; unsigned int nr_softirq_chains; unsigned int nr_process_chains; unsigned int max_lockdep_depth; #ifdef CONFIG_DEBUG_LOCKDEP /* * Various lockdep statistics: */ DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats); #endif /* * Locking printouts: */ #define __USAGE(__STATE) \ [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \ [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \ [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\ [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R", static const char *usage_str[] = { #define LOCKDEP_STATE(__STATE) __USAGE(__STATE) #include "lockdep_states.h" #undef LOCKDEP_STATE [LOCK_USED] = "INITIAL USE", };
const char * __get_key_name(struct lockdep_subclass_key *key, char *str) { return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str); }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar3191.18%150.00%
Alexey Dobriyan38.82%150.00%
Total34100.00%2100.00%


static inline unsigned long lock_flag(enum lock_usage_bit bit) { return 1UL << bit; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra17100.00%1100.00%
Total17100.00%1100.00%


static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit) { char c = '.'; if (class->usage_mask & lock_flag(bit + 2)) c = '+'; if (class->usage_mask & lock_flag(bit)) { c = '-'; if (class->usage_mask & lock_flag(bit + 2)) c = '?'; } return c; }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar3750.00%133.33%
Peter Zijlstra3648.65%133.33%
Nicholas Piggin11.35%133.33%
Total74100.00%3100.00%


void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS]) { int i = 0; #define LOCKDEP_STATE(__STATE) \ usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \ usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ); #include "lockdep_states.h" #undef LOCKDEP_STATE usage[i] = '\0'; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra3788.10%250.00%
Nicholas Piggin37.14%125.00%
Ingo Molnar24.76%125.00%
Total42100.00%4100.00%


static void __print_lock_name(struct lock_class *class) { char str[KSYM_NAME_LEN]; const char *name; name = class->name; if (!name) { name = __get_key_name(class->key, str); printk(KERN_CONT "%s", name); } else { printk(KERN_CONT "%s", name); if (class->name_version > 1) printk(KERN_CONT "#%d", class->name_version); if (class->subclass) printk(KERN_CONT "/%d", class->subclass); } }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar8989.90%120.00%
Steven Rostedt55.05%240.00%
Dmitriy Vyukov44.04%120.00%
Jarek Poplawski11.01%120.00%
Total99100.00%5100.00%


static void print_lock_name(struct lock_class *class) { char usage[LOCK_USAGE_CHARS]; get_usage_chars(class, usage); printk(KERN_CONT " ("); __print_lock_name(class); printk(KERN_CONT "){%s}", usage); }

Contributors

PersonTokensPropCommitsCommitProp
Steven Rostedt3376.74%120.00%
Ingo Molnar511.63%120.00%
Dmitriy Vyukov24.65%120.00%
Peter Zijlstra24.65%120.00%
Nicholas Piggin12.33%120.00%
Total43100.00%5100.00%


static void print_lockdep_cache(struct lockdep_map *lock) { const char *name; char str[KSYM_NAME_LEN]; name = lock->name; if (!name) name = __get_key_name(lock->key->subkeys, str); printk(KERN_CONT "%s", name); }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar5296.30%133.33%
Dmitriy Vyukov11.85%133.33%
Jarek Poplawski11.85%133.33%
Total54100.00%3100.00%


static void print_lock(struct held_lock *hlock) { /* * We can be called locklessly through debug_show_all_locks() so be * extra careful, the hlock might have been released and cleared. */ unsigned int class_idx = hlock->class_idx; /* Don't re-read hlock->class_idx, can't use READ_ONCE() on bitfields: */ barrier(); if (!class_idx || (class_idx - 1) >= MAX_LOCKDEP_KEYS) { printk(KERN_CONT "<RELEASED>\n"); return; } print_lock_name(lock_classes + class_idx - 1); printk(KERN_CONT ", at: [<%p>] %pS\n", (void *)hlock->acquire_ip, (void *)hlock->acquire_ip); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra3950.65%125.00%
Ingo Molnar2127.27%125.00%
Dmitriy Vyukov1620.78%125.00%
Dave Jones11.30%125.00%
Total77100.00%4100.00%


static void lockdep_print_held_locks(struct task_struct *curr) { int i, depth = curr->lockdep_depth; if (!depth) { printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr)); return; } printk("%d lock%s held by %s/%d:\n", depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr)); for (i = 0; i < depth; i++) { printk(" #%d: ", i); print_lock(curr->held_locks + i); } }

Contributors

PersonTokensPropCommitsCommitProp
Ingo Molnar9193.81%150.00%
Pavel Emelyanov66.19%150.00%
Total97100.00%2100.00%


static void print_kernel_ident(void) { printk("%s %.*s %s\n", init_utsname()->release, (int)strcspn(init_utsname()->version, " "), init_utsname()->version, print_tainted()); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra1641.03%125.00%
Ingo Molnar1641.03%125.00%
Ben Hutchings512.82%125.00%
Andi Kleen25.13%125.00%
Total39100.00%4100.00%


static int very_verbose(struct lock_class *class) { #if VERY_VERBOSE return class_filter(class); #endif return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra2184.00%150.00%
Ingo Molnar416.00%150.00%
Total25100.00%2100.00%

/* * Is this the address of a static object: */ #ifdef __KERNEL__
static int static_obj(void *obj) { unsigned long start = (unsigned long) &_stext, end = (unsigned long) &_end, addr = (unsigned long) obj; /* * static variable? */ if ((addr >= start) && (addr < end)) return 1; if (arch_is_kernel_data(addr)) return 1; /* * in-kernel percpu var? */ if (is_kernel_percpu_address(addr)) return 1; /* * module static or percpu var? */ return is_module_address(addr) || is_module_percpu_address(addr); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra5359.55%125.00%
Lei Ming1617.98%125.00%
Tejun Heo1011.24%125.00%
Mike Frysinger1011.24%125.00%
Total89100.00%4100.00%

#endif /* * To make lock name printouts unique, we calculate a unique * class->name_version generation counter: */
static int count_matching_names(struct lock_class *new_class) { struct lock_class *class; int count = 0; if (!new_class->name) return 0; list_for_each_entry_rcu(class, &all_lock_classes, lock_entry) { if (new_class->key - new_class->subclass == class->key) return class->name_version; if (class->name && !strcmp(class->name, new_class->name)) count = max(count, class->name_version); } return count + 1; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra6164.89%233.33%
Ingo Molnar2627.66%116.67%
David S. Miller33.19%116.67%
Jarek Poplawski33.19%116.67%
Andi Kleen11.06%116.67%
Total94100.00%6100.00%

/* * Register a lock's class in the hash-table, if the class is not present * yet. Otherwise we look it up. We cache the result in the lock object * itself, so actual lookup of the hash should be once per lock object. */
static inline struct lock_class * look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) { struct lockdep_subclass_key *key; struct hlist_head *hash_head; struct lock_class *class; bool is_static = false; if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) { debug_locks_off(); printk(KERN_ERR "BUG: looking up invalid subclass: %u\n", subclass); printk(KERN_ERR "turning off the locking correctness validator.\n"); dump_stack(); return NULL; } /* * Static locks do not have their class-keys yet - for them the key * is the lock object itself. If the lock is in the per cpu area, * the canonical address of the lock (per cpu offset removed) is * used. */ if (unlikely(!lock->key)) { unsigned long can_addr, addr = (unsigned long)lock; if (__is_kernel_percpu_address(addr, &can_addr)) lock->key = (void *)can_addr; else if (__is_module_percpu_address(addr, &can_addr)) lock->key = (void *)can_addr; else if (static_obj(lock)) lock->key = (void *)lock; else return ERR_PTR(-EINVAL); is_static = true; } /* * NOTE: the class-key must be unique. For dynamic locks, a static * lock_class_key variable is passed in through the mutex_init() * (or spin_lock_init()) call - which acts as the key. For static * locks we use the lock object itself as the key. */ BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(struct lockdep_map)); key = lock->key->subkeys + subclass; hash_head = classhashentry(key); /* * We do an RCU walk of the hash, see lockdep_free_key_range(). */ if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) return NULL; hlist_for_each_entry_rcu(class, hash_head, hash_entry) { if (class->key == key) { /* * Huh! same key, different name? Did someone trample * on some memory? We're most confused. */ WARN_ON_ONCE(class->name != lock->name); return class; } } return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL); }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra12747.57%444.44%
Thomas Gleixner9635.96%111.11%
Hitoshi Mitake3412.73%111.11%
Ingo Molnar72.62%111.11%
Andrew Morton20.75%111.11%
Jarek Poplawski10.37%111.11%
Total267100.00%9100.00%

/* * Register a lock's class in the hash-table, if the class is not present * yet. Otherwise we look it up. We cache the result in the lock object * itself, so actual lookup of the hash should be once per lock object. */
static struct lock_class * register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) { struct lockdep_subclass_key *key; struct hlist_head *hash_head; struct lock_class *class; DEBUG_LOCKS_WARN_ON(!irqs_disabled()); class = look_up_lock_class(lock, subclass); if (likely(!IS_ERR_OR_NULL(class))) goto out_set_class_cache; /* * Debug-check: all keys must be persistent! */ if (IS_ERR(class)) { debug_locks_off(); printk("INFO: trying to register non-static key.\n"); printk("the code is fine but needs lockdep annotation.\n"); printk("turning off the locking correctness validator.\n"); dump_stack(); return NULL; } key = lock->key->subkeys + subclass; hash_head = classhashentry(key); if (!graph_lock()) { return NULL; } /* * We have to do the hash-walk again, to avoid races * with another CPU: */ hlist_for_each_entry_rcu(class, hash_head, hash_entry) { if (class->key == key) goto out_unlock_set; } /* * Allocate a new key from the static array, and add it to * the hash: */ if (nr_lock_classes >= MAX_LOCKDEP_KEYS) { if (!debug_locks_off_graph_unlock()) { return NULL; } print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!"); dump_stack(); return NULL; } class = lock_classes + nr_lock_classes++; debug_atomic_inc(nr_unused_locks); class->key = key; class->name = lock->name; class->subclass = subclass; INIT_LIST_HEAD(&class->lock_entry); INIT_LIST_HEAD(&class->locks_before); INIT_LIST_HEAD(&class->locks_after); class->name_version = count_matching_names(class); /* * We use RCU's safe list-add method to make * parallel walking of the hash-list safe: */ hlist_add_head_rcu(&class->hash_entry, hash_head); /* * Add it to the global list of classes: */ list_add_tail_rcu(&class->lock_entry, &all_lock_classes); if (verbose(class)) { graph_unlock(); printk("\nnew class %p: %s", class->key, class->name); if (class->name_version > 1) printk(KERN_CONT "#%d", class->name_version); printk(KERN_CONT "\n"