cregit-Linux how code gets into the kernel

Release 4.8 mm/kmemleak.c

Directory: mm
/*
 * mm/kmemleak.c
 *
 * Copyright (C) 2008 ARM Limited
 * Written by Catalin Marinas <catalin.marinas@arm.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 *
 *
 * For more information on the algorithm and kmemleak usage, please see
 * Documentation/kmemleak.txt.
 *
 * Notes on locking
 * ----------------
 *
 * The following locks and mutexes are used by kmemleak:
 *
 * - kmemleak_lock (rwlock): protects the object_list modifications and
 *   accesses to the object_tree_root. The object_list is the main list
 *   holding the metadata (struct kmemleak_object) for the allocated memory
 *   blocks. The object_tree_root is a red black tree used to look-up
 *   metadata based on a pointer to the corresponding memory block.  The
 *   kmemleak_object structures are added to the object_list and
 *   object_tree_root in the create_object() function called from the
 *   kmemleak_alloc() callback and removed in delete_object() called from the
 *   kmemleak_free() callback
 * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
 *   the metadata (e.g. count) are protected by this lock. Note that some
 *   members of this structure may be protected by other means (atomic or
 *   kmemleak_lock). This lock is also held when scanning the corresponding
 *   memory block to avoid the kernel freeing it via the kmemleak_free()
 *   callback. This is less heavyweight than holding a global lock like
 *   kmemleak_lock during scanning
 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
 *   unreferenced objects at a time. The gray_list contains the objects which
 *   are already referenced or marked as false positives and need to be
 *   scanned. This list is only modified during a scanning episode when the
 *   scan_mutex is held. At the end of a scan, the gray_list is always empty.
 *   Note that the kmemleak_object.use_count is incremented when an object is
 *   added to the gray_list and therefore cannot be freed. This mutex also
 *   prevents multiple users of the "kmemleak" debugfs file together with
 *   modifications to the memory scanning parameters including the scan_thread
 *   pointer
 *
 * Locks and mutexes are acquired/nested in the following order:
 *
 *   scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
 *
 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
 * regions.
 *
 * The kmemleak_object structures have a use_count incremented or decremented
 * using the get_object()/put_object() functions. When the use_count becomes
 * 0, this count can no longer be incremented and put_object() schedules the
 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
 * function must be protected by rcu_read_lock() to avoid accessing a freed
 * structure.
 */


#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/sched.h>
#include <linux/jiffies.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/kthread.h>
#include <linux/rbtree.h>
#include <linux/fs.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/cpumask.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/rcupdate.h>
#include <linux/stacktrace.h>
#include <linux/cache.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <linux/mmzone.h>
#include <linux/slab.h>
#include <linux/thread_info.h>
#include <linux/err.h>
#include <linux/uaccess.h>
#include <linux/string.h>
#include <linux/nodemask.h>
#include <linux/mm.h>
#include <linux/workqueue.h>
#include <linux/crc32.h>

#include <asm/sections.h>
#include <asm/processor.h>
#include <linux/atomic.h>

#include <linux/kasan.h>
#include <linux/kmemcheck.h>
#include <linux/kmemleak.h>
#include <linux/memory_hotplug.h>

/*
 * Kmemleak configuration and common defines.
 */

#define MAX_TRACE		16	
/* stack trace length */

#define MSECS_MIN_AGE		5000	
/* minimum object age for reporting */

#define SECS_FIRST_SCAN		60	
/* delay before the first scan */

#define SECS_SCAN_WAIT		600	
/* subsequent auto scanning delay */

#define MAX_SCAN_SIZE		4096	
/* maximum size of a scanned block */


#define BYTES_PER_POINTER	sizeof(void *)

/* GFP bitmask for kmemleak internal allocations */

#define gfp_kmemleak_mask(gfp)	(((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
                                 __GFP_NORETRY | __GFP_NOMEMALLOC | \
                                 __GFP_NOWARN)

/* scanning area inside a memory block */

struct kmemleak_scan_area {
	
struct hlist_node node;
	
unsigned long start;
	
size_t size;
};


#define KMEMLEAK_GREY	0

#define KMEMLEAK_BLACK	-1

/*
 * Structure holding the metadata for each allocated memory block.
 * Modifications to such objects should be made while holding the
 * object->lock. Insertions or deletions from object_list, gray_list or
 * rb_node are already protected by the corresponding locks or mutex (see
 * the notes on locking above). These objects are reference-counted
 * (use_count) and freed using the RCU mechanism.
 */

struct kmemleak_object {
	
spinlock_t lock;
	
unsigned long flags;		/* object status flags */
	
struct list_head object_list;
	
struct list_head gray_list;
	
struct rb_node rb_node;
	
struct rcu_head rcu;		/* object_list lockless traversal */
	/* object usage count; object freed when use_count == 0 */
	
atomic_t use_count;
	
unsigned long pointer;
	
size_t size;
	/* minimum number of a pointers found before it is considered leak */
	
int min_count;
	/* the total number of pointers found pointing to this object */
	
int count;
	/* checksum for detecting modified objects */
	
u32 checksum;
	/* memory ranges to be scanned inside an object (empty for all) */
	
struct hlist_head area_list;
	
unsigned long trace[MAX_TRACE];
	
unsigned int trace_len;
	
unsigned long jiffies;		/* creation timestamp */
	
pid_t pid;			/* pid of the current task */
	
char comm[TASK_COMM_LEN];	/* executable name */
};

/* flag representing the memory block allocation status */

#define OBJECT_ALLOCATED	(1 << 0)
/* flag set after the first reporting of an unreference object */

#define OBJECT_REPORTED		(1 << 1)
/* flag set to not scan the object */

#define OBJECT_NO_SCAN		(1 << 2)

/* number of bytes to print per line; must be 16 or 32 */

#define HEX_ROW_SIZE		16
/* number of bytes to print at a time (1, 2, 4, 8) */

#define HEX_GROUP_SIZE		1
/* include ASCII after the hex output */

#define HEX_ASCII		1
/* max number of lines to be printed */

#define HEX_MAX_LINES		2

/* the list of all allocated objects */
static LIST_HEAD(object_list);
/* the list of gray-colored objects (see color_gray comment below) */
static LIST_HEAD(gray_list);
/* search tree for object boundaries */

static struct rb_root object_tree_root = RB_ROOT;
/* rw_lock protecting the access to object_list and object_tree_root */
static DEFINE_RWLOCK(kmemleak_lock);

/* allocation caches for kmemleak internal data */

static struct kmem_cache *object_cache;

static struct kmem_cache *scan_area_cache;

/* set if tracing memory operations is enabled */

static int kmemleak_enabled;
/* same as above but only for the kmemleak_free() callback */

static int kmemleak_free_enabled;
/* set in the late_initcall if there were no errors */

static int kmemleak_initialized;
/* enables or disables early logging of the memory operations */

static int kmemleak_early_log = 1;
/* set if a kmemleak warning was issued */

static int kmemleak_warning;
/* set if a fatal kmemleak error has occurred */

static int kmemleak_error;

/* minimum and maximum address that may be valid pointers */

static unsigned long min_addr = ULONG_MAX;

static unsigned long max_addr;


static struct task_struct *scan_thread;
/* used to avoid reporting of recently allocated objects */

static unsigned long jiffies_min_age;

static unsigned long jiffies_last_scan;
/* delay between automatic memory scannings */

static signed long jiffies_scan_wait;
/* enables or disables the task stacks scanning */

static int kmemleak_stack_scan = 1;
/* protects the memory scanning, parameters and debug/kmemleak file access */
static DEFINE_MUTEX(scan_mutex);
/* setting kmemleak=on, will set this var, skipping the disable */

static int kmemleak_skip_disable;
/* If there are leaks that can be reported */

static bool kmemleak_found_leaks;

/*
 * Early object allocation/freeing logging. Kmemleak is initialized after the
 * kernel allocator. However, both the kernel allocator and kmemleak may
 * allocate memory blocks which need to be tracked. Kmemleak defines an
 * arbitrary buffer to hold the allocation/freeing information before it is
 * fully initialized.
 */

/* kmemleak operation type for early logging */
enum {
	
KMEMLEAK_ALLOC,
	
KMEMLEAK_ALLOC_PERCPU,
	
KMEMLEAK_FREE,
	
KMEMLEAK_FREE_PART,
	
KMEMLEAK_FREE_PERCPU,
	
KMEMLEAK_NOT_LEAK,
	
KMEMLEAK_IGNORE,
	
KMEMLEAK_SCAN_AREA,
	
KMEMLEAK_NO_SCAN
};

/*
 * Structure holding the information passed to kmemleak callbacks during the
 * early logging.
 */

struct early_log {
	
int op_type;			/* kmemleak operation type */
	
const void *ptr;		/* allocated/freed memory block */
	
size_t size;			/* memory block size */
	
int min_count;			/* minimum reference count */
	
unsigned long trace[MAX_TRACE];	/* stack trace */
	
unsigned int trace_len;		/* stack trace length */
};

/* early logging buffer and current position */
static struct early_log
	
early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;

static int crt_early_log __initdata;

static void kmemleak_disable(void);

/*
 * Print a warning and dump the stack trace.
 */

#define kmemleak_warn(x...)	do {            \
        pr_warn(x);                             \
        dump_stack();                           \
        kmemleak_warning = 1;                   \
} while (0)

/*
 * Macro invoked when a serious kmemleak condition occurred and cannot be
 * recovered from. Kmemleak will be disabled and further allocation/freeing
 * tracing no longer available.
 */

#define kmemleak_stop(x...)	do {    \
        kmemleak_warn(x);               \
        kmemleak_disable();             \
} while (0)

/*
 * Printing of the objects hex dump to the seq file. The number of lines to be
 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
 * with the object->lock held.
 */

static void hex_dump_object(struct seq_file *seq, struct kmemleak_object *object) { const u8 *ptr = (const u8 *)object->pointer; size_t len; /* limit the number of lines to HEX_MAX_LINES */ len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE); seq_printf(seq, " hex dump (first %zu bytes):\n", len); kasan_disable_current(); seq_hex_dump(seq, " ", DUMP_PREFIX_NONE, HEX_ROW_SIZE, HEX_GROUP_SIZE, ptr, len, HEX_ASCII); kasan_enable_current(); }

Contributors

PersonTokensPropCommitsCommitProp
sergey senozhatskysergey senozhatsky6679.52%133.33%
andy shevchenkoandy shevchenko1113.25%133.33%
dmitriy vyukovdmitriy vyukov67.23%133.33%
Total83100.00%3100.00%

/* * Object colors, encoded with count and min_count: * - white - orphan object, not enough references to it (count < min_count) * - gray - not orphan, not marked as false positive (min_count == 0) or * sufficient references to it (count >= min_count) * - black - ignore, it doesn't contain references (e.g. text section) * (min_count == -1). No function defined for this color. * Newly created objects don't have any color assigned (object->count == -1) * before the next memory scan when they become white. */
static bool color_white(const struct kmemleak_object *object) { return object->count != KMEMLEAK_BLACK && object->count < object->min_count; }

Contributors

PersonTokensPropCommitsCommitProp
catalin marinascatalin marinas2592.59%133.33%
luis r. rodriguezluis r. rodriguez27.41%266.67%
Total27100.00%3100.00%


static bool color_gray(const struct kmemleak_object *object) { return object->min_count != KMEMLEAK_BLACK && object->count >= object->min_count; }

Contributors

PersonTokensPropCommitsCommitProp
catalin marinascatalin marinas2592.59%133.33%
luis r. rodriguezluis r. rodriguez27.41%266.67%
Total27100.00%3100.00%

/* * Objects are considered unreferenced only if their color is white, they have * not be deleted and have a minimum age to avoid false positives caused by * pointers temporarily stored in CPU registers. */
static bool unreferenced_object(struct kmemleak_object *object) { return (color_white(object) && object->flags & OBJECT_ALLOCATED) && time_before_eq(object->jiffies + jiffies_min_age, jiffies_last_scan); }

Contributors

PersonTokensPropCommitsCommitProp
catalin marinascatalin marinas3597.22%375.00%
luis r. rodriguezluis r. rodriguez12.78%125.00%
Total36100.00%4100.00%

/* * Printing of the unreferenced objects information to the seq file. The * print_unreferenced function must be called with the object->lock held. */
static void print_unreferenced(struct seq_file *seq, struct kmemleak_object *object) { int i; unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies); seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n", object->pointer, object->size); seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n", object->comm, object->pid, object->jiffies, msecs_age / 1000, msecs_age % 1000); hex_dump_object(seq, object); seq_printf(seq, " backtrace:\n"); for (i = 0; i < object->trace_len; i++) { void *ptr = (void *)object->trace[i]; seq_printf(seq, " [<%p>] %pS\n", ptr, ptr); } }

Contributors

PersonTokensPropCommitsCommitProp
catalin marinascatalin marinas12393.89%360.00%
sergey senozhatskysergey senozhatsky75.34%120.00%
joe perchesjoe perches10.76%120.00%
Total131100.00%5100.00%

/* * Print the kmemleak_object information. This function is used mainly for * debugging special cases when kmemleak operations. It must be called with * the object->lock held. */
static void dump_object_info(struct kmemleak_object *object) { struct stack_trace trace; trace.nr_entries = object->trace_len; trace.entries = object->trace; pr_notice("Object 0x%08lx (size %zu):\n", object->pointer, object->size); pr_notice(" comm \"%s\", pid %d, jiffies %lu\n", object->comm, object->pid, object->jiffies); pr_notice(" min_count = %d\n", object->min_count); pr_notice(" count = %d\n", object->count); pr_notice(" flags = 0x%lx\n", object->flags); pr_notice(" checksum = %u\n", object->checksum); pr_notice(" backtrace:\n"); print_stack_trace(&trace, 4); }

Contributors

PersonTokensPropCommitsCommitProp
catalin marinascatalin marinas10797.27%350.00%
ma jianpengma jianpeng10.91%116.67%
michel lespinassemichel lespinasse10.91%116.67%
joe perchesjoe perches10.91%116.67%
Total110100.00%6100.00%

/* * Look-up a memory block metadata (kmemleak_object) in the object search * tree based on a pointer value. If alias is 0, only values pointing to the * beginning of the memory block are allowed. The kmemleak_lock must be held * when calling this function. */
static struct kmemleak_object *lookup_object(unsigned long ptr, int alias) { struct rb_node *rb = object_tree_root.rb_node; while (rb) { struct kmemleak_object *object = rb_entry(rb, struct kmemleak_object, rb_node); if (ptr < object->pointer) rb = object->rb_node.rb_left; else if (object->pointer + object->size <= ptr) rb = object->rb_node.rb_right; else if (object->pointer == ptr || alias) return object; else { kmemleak_warn("Found object by alias at 0x%08lx\n", ptr); dump_object_info(object); break; } } return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
michel lespinassemichel lespinasse5850.43%125.00%
catalin marinascatalin marinas5749.57%375.00%
Total115100.00%4100.00%

/* * Increment the object use_count. Return 1 if successful or 0 otherwise. Note * that once an object's use_count reached 0, the RCU freeing was already * registered and the object should no longer be used. This function must be * called under the protection of rcu_read_lock(). */
static int get_object(struct kmemleak_object *object) { return atomic_inc_not_zero(&object->use_count); }

Contributors

PersonTokensPropCommitsCommitProp
catalin marinascatalin marinas20100.00%1100.00%
Total20100.00%1100.00%

/* * RCU callback to free a kmemleak_object. */
static void free_object_rcu(struct rcu_head *rcu) { struct hlist_node *tmp; struct kmemleak_scan_area *area; struct kmemleak_object *object = container_of(rcu, struct kmemleak_object, rcu); /* * Once use_count is 0 (guaranteed by put_object), there is no other * code accessing this object, hence no need for locking. */ hlist_for_each_entry_safe(area, tmp, &object->area_list, node) { hlist_del(&area->node); kmem_cache_free(scan_area_cache, area); } kmem_cache_free(object_cache, object); }

Contributors

PersonTokensPropCommitsCommitProp
catalin marinascatalin marinas6794.37%150.00%
sasha levinsasha levin45.63%150.00%
Total71100.00%2100.00%

/* * Decrement the object use_count. Once the count is 0, free the object using * an RCU callback. Since put_object() may be called via the kmemleak_free() -> * delete_object() path, the delayed RCU freeing ensures that there is no * recursive call to the kernel allocator. Lock-less RCU object_list traversal * is also possible. */
static void put_object(struct kmemleak_object *object) { if (!atomic_dec_and_test(&object->use_count)) return; /* should only get here after delete_object was called */ WARN_ON(object->flags & OBJECT_ALLOCATED); call_rcu(&object->rcu, free_object_rcu); }

Contributors

PersonTokensPropCommitsCommitProp
catalin marinascatalin marinas43100.00%1100.00%
Total43100.00%1100.00%

/* * Look up an object in the object search tree and increase its use_count. */
static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias) { unsigned long flags; struct kmemleak_object *object; rcu_read_lock(); read_lock_irqsave(&kmemleak_lock, flags); object = lookup_object(ptr, alias); read_unlock_irqrestore(&kmemleak_lock, flags); /* check whether the object is still available */ if (object && !get_object(object)) object = NULL; rcu_read_unlock(); return object; }

Contributors

PersonTokensPropCommitsCommitProp
catalin marinascatalin marinas73100.00%1100.00%
Total73100.00%1100.00%

/* * Look up an object in the object search tree and remove it from both * object_tree_root and object_list. The returned object's use_count should be * at least 1, as initially set by create_object(). */
static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias) { unsigned long flags; struct kmemleak_object *object; write_lock_irqsave(&kmemleak_lock, flags); object = lookup_object(ptr, alias); if (object) { rb_erase(&object->rb_node, &object_tree_root); list_del_rcu(&object->object_list); } write_unlock_irqrestore(&kmemleak_lock, flags); return object; }

Contributors

PersonTokensPropCommitsCommitProp
catalin marinascatalin marinas77100.00%1100.00%
Total77100.00%1100.00%

/* * Save stack trace to the given array of MAX_TRACE size. */
static int __save_stack_trace(unsigned long *trace) { struct stack_trace stack_trace; stack_trace.max_entries = MAX_TRACE; stack_trace.nr_entries = 0; stack_trace.entries = trace; stack_trace.skip = 2; save_stack_trace(&stack_trace); return stack_trace.nr_entries; }

Contributors

PersonTokensPropCommitsCommitProp
catalin marinascatalin marinas50100.00%1100.00%
Total50100.00%1100.00%

/* * Create the metadata (struct kmemleak_object) corresponding to an allocated * memory block and add it to the object_list and object_tree_root. */
static struct kmemleak_object *create_object(unsigned long ptr, size_t size, int min_count, gfp_t gfp) { unsigned long flags; struct kmemleak_object *object, *parent; struct rb_node **link, *rb_parent; object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp)); if (!object) { pr_warn("Cannot allocate a kmemleak_object structure\n"); kmemleak_disable(); return NULL; } INIT_LIST_HEAD(&object->object_list); INIT_LIST_HEAD(&object->gray_list); INIT_HLIST_HEAD(&object->area_list); spin_lock_init(&object->lock); atomic_set(&object->use_count, 1); object->flags = OBJECT_ALLOCATED; object->pointer = ptr; object->size = size; object->min_count = min_count; object->count = 0; /* white color initially */ object->jiffies = jiffies; object->checksum = 0; /* task information */ if (in_irq()) { object->pid = 0; strncpy(object->comm, "hardirq", sizeof(object->comm)); } else if (in_softirq()) { object->pid = 0; strncpy(object->comm, "softirq", sizeof(object->comm)); } else { object->pid = current->pid; /* * There is a small chance of a race with set_task_comm(), * however using get_task_comm() here may cause locking * dependency issues with current->alloc_lock. In the worst * case, the command line is not correct. */ strncpy(object->comm, current->comm, sizeof(object->comm)); } /* kernel backtrace */ object->trace_len = __save_stack_trace(object->trace); write_lock_irqsave(&kmemleak_lock, flags); min_addr = min(min_addr, ptr); max_addr = max(max_addr, ptr + size); link = &object_tree_root.rb_node; rb_parent = NULL; while (*link) { rb_parent = *link; parent = rb_entry(rb_parent, struct kmemleak_object, rb_node); if (ptr + size <= parent->pointer) link = &parent->rb_node.rb_left; else if (parent->pointer + parent->size <= ptr) link = &parent->rb_node.rb_right; else { kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n", ptr); /* * No need for parent->lock here since "parent" cannot * be freed while the kmemleak_lock is held. */ dump_object_info(parent); kmem_cache_free(object_cache, object); object = NULL; goto out; } } rb_link_node(&object->rb_node, rb_parent, link); rb_insert_color(&object->rb_node, &object_tree_root); list_add_tail_rcu(&object->object_list, &object_list); out: write_unlock_irqrestore(&kmemleak_lock, flags); return object; }

Contributors

PersonTokensPropCommitsCommitProp
catalin marinascatalin marinas33876.82%660.00%
michel lespinassemichel lespinasse9922.50%110.00%
joe perchesjoe perches30.68%330.00%
Total440100.00%10100.00%

/* * Mark the object as not allocated and schedule RCU freeing via put_object(). */
static void __delete_object(struct kmemleak_object *object) { unsigned long flags; WARN_ON(!(object->flags & OBJECT_ALLOCATED)); WARN_ON(atomic_read(&object->use_count) < 1); /* * Locking here also ensures that the corresponding memory block * cannot be freed when it is being scanned. */ spin_lock_irqsave(&object->lock, flags); object->flags &= ~OBJECT_ALLOCATED; spin_unlock_irqrestore(&object->lock, flags); put_object(object); }

Contributors

PersonTokensPropCommitsCommitProp
catalin marinascatalin marinas73100.00%3100.00%
Total73100.00%3100.00%

/* * Look up the metadata (struct kmemleak_object) corresponding to ptr and * delete it. */
static void delete_object_full(unsigned long ptr) { struct kmemleak_object *object; object = find_and_remove_object(ptr, 0); if (!object) { #ifdef DEBUG kmemleak_warn("Freeing unknown object at 0x%08lx\n", ptr); #endif return; } __delete_object(object); }

Contributors

PersonTokensPropCommitsCommitProp
catalin marinascatalin marinas49100.00%2100.00%
Total49100.00%2100.00%

/* * Look up the metadata (struct kmemleak_object) corresponding to ptr and * delete it. If the memory block is partially freed, the function may create * additional metadata for the remaining parts of the block. */
static void delete_object_part(unsigned long ptr, size_t size) { struct kmemleak_object *object; unsigned long start, end; object = find_and_remove_object(ptr, 1); if (!object) { #ifdef DEBUG kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n", ptr, size); #endif return; } /* * Create one or two objects that may result from the memory block * split. Note that partial freeing is only done by free_bootmem() and * this happens before kmemleak_init() is called. The path below is * only executed during early log recording in kmemleak_init(), so * GFP_KERNEL is enough. */ start = object->pointer; end = object->pointer + object->size; if (ptr > start) create_object(start, ptr - start, object->min_count, GFP_KERNEL); if (ptr + size < end) create_object(ptr + size, end - ptr - size, object->min_count, GFP_KERNEL); __delete_object(object); }

Contributors

PersonTokensPropCommitsCommitProp
catalin marinascatalin marinas12499.20%266.67%
joe perchesjoe perches10.80%133.33%
Total125100.00%3100.00%


static void __paint_it(struct kmemleak_object *object, int color) { object->min_count = color; if (color == KMEMLEAK_BLACK) object->flags |= OBJECT_NO_SCAN; }

Contributors

PersonTokensPropCommitsCommitProp
luis r. rodriguezluis r. rodriguez1753.12%150.00%
catalin marinascatalin marinas1546.88%150.00%
Total32100.00%2100.00%


static void paint_it(struct kmemleak_object *object, int color) { unsigned long flags; spin_lock_irqsave(&object->lock, flags); __paint_it(object, color); spin_unlock_irqrestore(&object->lock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
catalin marinascatalin marinas2351.11%150.00%
luis r. rodriguezluis r. rodriguez2248.89%150.00%
Total45100.00%2100.00%


static void paint_ptr(unsigned long ptr, int color) { struct kmemleak_object *object; object = find_and_get_object(ptr, 0); if (!object) { kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n", ptr, (color == KMEMLEAK_GREY) ? "Grey" : (color == KMEMLEAK_BLACK) ? "Black" : "Unknown"); return; } paint_it(object, color); put_object(object); }

Contributors

PersonTokensPropCommitsCommitProp
catalin marinascatalin marinas4359.72%250.00%
luis r. rodriguezluis r. rodriguez2838.89%125.00%
joe perchesjoe perches11.39%125.00%
Total72100.00%4100.00%

/* * Mark an object permanently as gray-colored so that it can no longer be * reported as a leak. This is used in general to mark a false positive. */
static void make_gray_object(unsigned long ptr) { paint_ptr(ptr, KMEMLEAK_GREY); }

Contributors

PersonTokensPropCommitsCommitProp
luis r. rodriguezluis r. rodriguez1376.47%150.00%
catalin marinascatalin marinas423.53%150.00%
Total17100.00%2100.00%

/* * Mark the object as black-colored so that it is ignored from scans and * reporting. */
static void make_black_object(unsigned long ptr) { paint_ptr(ptr, KMEMLEAK_BLACK); }

Contributors

PersonTokensPropCommitsCommitProp
luis r. rodriguezluis r. rodriguez1376.47%150.00%
catalin marinascatalin marinas423.53%150.00%
Total17100.00%2100.00%

/* * Add a scanning area to the object. If at least one such area is added, * kmemleak will only scan these ranges rather than the whole memory block. */
static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) { unsigned long flags; struct kmemleak_object *object; struct kmemleak_scan_area *area; object = find_and_get_object(ptr, 1); if (!object) { kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n", ptr); return; } area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp)); if (!area) { pr_warn("Cannot allocate a scan area\n"); goto out; } spin_lock_irqsave(&object->lock, flags); if (size == SIZE_MAX) { size = object->pointer + object->size - ptr; } else if (ptr + size > object->pointer + object->size) { kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr); dump_object_info(object); kmem_cache_free(scan_area_cache, area); goto out_unlock; } INIT_HLIST_NODE(&area->node); area->start = ptr; area->size = size; hlist_add_head(&area->node, &object->area_list); out_unlock: spin_unlock_irqrestore(&object->lock, flags); out: put_object(object); }

Contributors

PersonTokensPropCommitsCommitProp
catalin marinascatalin marinas19898.02%466.67%
joe perchesjoe perches41.98%233.33%
Total202100.00%6100.00%

/* * Set the OBJECT_NO_SCAN flag for the object corresponding to the give * pointer. Such object will not be scanned by kmemleak but references to it * are searched. */
static void object_no_scan(unsigned long ptr) { unsigned long flags; struct kmemleak_object *object; object = find_and_get_object(ptr, 0); if (!object) { kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr); return; } spin_lock_irqsave(&object->lock, flags); object->flags |= OBJECT_NO_SCAN; spin_unlock_irqrestore(&object->lock, flags); put_object(object); }

Contributors

PersonTokensPropCommitsCommitProp
catalin marinascatalin marinas7398.65%150.00%
joe perchesjoe perches11.35%150.00%
Total74100.00%2100.00%

/* * Log an early kmemleak_* call to the early_log buffer. These calls will be * processed later once kmemleak is fully initialized. */
static void __init log_early(int op_type, const void *ptr, size_t size, int min_count) { unsigned long flags; struct early_log *log; if (kmemleak_error) { /* kmemleak stopped recording, just count the requests */ crt_early_log++; return; } if (crt_early_log >= ARRAY_SIZE(early_log)) { crt_early_log++; kmemleak_disable(); return; } /* * There is no need for locking since the kernel is still in UP mode * at this stage. Disabling the IRQs is enough. */ local_irq_save(flags); log = &early_log[crt_early_log]; log->op_type = op_type; log->ptr = ptr; log->size = size; log->min_count = min_count; log->trace_len = __save_stack_trace(log->trace); crt_early_log++; local_irq_restore(flags); }

Contributors

PersonTokensPropCommitsCommitProp
catalin marinascatalin marinas11397.41%583.33%
wang kaiwang kai32.59%116.67%
Total116100.00%6100.00%

/* * Log an early allocated block and populate the stack trace. */
static void early_alloc(struct early_log *log) { struct kmemleak_object *object; unsigned long flags; int i; if (!kmemleak_enabled || !log->ptr || IS_ERR(log->ptr)) return; /* * RCU locking needed to ensure object is not freed via put_object(). */ rcu_read_lock(); object = create_object((unsigned long)log->ptr, log->size, log->min_count, GFP_ATOMIC); if (!object) goto out; spin_lock_irqsave(&object->lock, flags); for (i = 0; i < log->trace_len; i++) object->trace[i] = log->trace[i]; object->trace_len = log->trace_len; spin_unlock_irqrestore(&object->lock, flags); out: rcu_read_unlock(); }

Contributors

PersonTokensPropCommitsCommitProp
catalin marinascatalin marinas13799.28%266.67%
tetsuo handatetsuo handa10.72%133.33%
Total138100.00%3100.00%

/* * Log an early allocated block and populate the stack trace. */
static void early_alloc_percpu(struct early_log *log) { unsigned int cpu; const void __percpu *ptr = log->ptr; for_each_possible_cpu(cpu) { log->ptr = per_cpu_ptr(ptr, cpu); early_alloc(log);