cregit-Linux how code gets into the kernel

Release 4.8 mm/slub.c

Directory: mm
/*
 * SLUB: A slab allocator that limits cache line use instead of queuing
 * objects in per cpu and per node lists.
 *
 * The allocator synchronizes using per slab locks or atomic operatios
 * and only uses a centralized lock to manage a pool of partial slabs.
 *
 * (C) 2007 SGI, Christoph Lameter
 * (C) 2011 Linux Foundation, Christoph Lameter
 */

#include <linux/mm.h>
#include <linux/swap.h> /* struct reclaim_state */
#include <linux/module.h>
#include <linux/bit_spinlock.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/slab.h>
#include "slab.h"
#include <linux/proc_fs.h>
#include <linux/notifier.h>
#include <linux/seq_file.h>
#include <linux/kasan.h>
#include <linux/kmemcheck.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/mempolicy.h>
#include <linux/ctype.h>
#include <linux/debugobjects.h>
#include <linux/kallsyms.h>
#include <linux/memory.h>
#include <linux/math64.h>
#include <linux/fault-inject.h>
#include <linux/stacktrace.h>
#include <linux/prefetch.h>
#include <linux/memcontrol.h>

#include <trace/events/kmem.h>

#include "internal.h"

/*
 * Lock order:
 *   1. slab_mutex (Global Mutex)
 *   2. node->list_lock
 *   3. slab_lock(page) (Only on some arches and for debugging)
 *
 *   slab_mutex
 *
 *   The role of the slab_mutex is to protect the list of all the slabs
 *   and to synchronize major metadata changes to slab cache structures.
 *
 *   The slab_lock is only used for debugging and on arches that do not
 *   have the ability to do a cmpxchg_double. It only protects the second
 *   double word in the page struct. Meaning
 *      A. page->freelist       -> List of object free in a page
 *      B. page->counters       -> Counters of objects
 *      C. page->frozen         -> frozen state
 *
 *   If a slab is frozen then it is exempt from list management. It is not
 *   on any list. The processor that froze the slab is the one who can
 *   perform list operations on the page. Other processors may put objects
 *   onto the freelist but the processor that froze the slab is the only
 *   one that can retrieve the objects from the page's freelist.
 *
 *   The list_lock protects the partial and full list on each node and
 *   the partial slab counter. If taken then no new slabs may be added or
 *   removed from the lists nor make the number of partial slabs be modified.
 *   (Note that the total number of slabs is an atomic value that may be
 *   modified without taking the list lock).
 *
 *   The list_lock is a centralized lock and thus we avoid taking it as
 *   much as possible. As long as SLUB does not have to handle partial
 *   slabs, operations can continue without any centralized lock. F.e.
 *   allocating a long series of objects that fill up slabs does not require
 *   the list lock.
 *   Interrupts are disabled during allocation and deallocation in order to
 *   make the slab allocator safe to use in the context of an irq. In addition
 *   interrupts are disabled to ensure that the processor does not change
 *   while handling per_cpu slabs, due to kernel preemption.
 *
 * SLUB assigns one slab for allocation to each processor.
 * Allocations only occur from these slabs called cpu slabs.
 *
 * Slabs with free elements are kept on a partial list and during regular
 * operations no list for full slabs is used. If an object in a full slab is
 * freed then the slab will show up again on the partial lists.
 * We track full slabs for debugging purposes though because otherwise we
 * cannot scan all objects.
 *
 * Slabs are freed when they become empty. Teardown and setup is
 * minimal so we rely on the page allocators per cpu caches for
 * fast frees and allocs.
 *
 * Overloading of page flags that are otherwise used for LRU management.
 *
 * PageActive           The slab is frozen and exempt from list processing.
 *                      This means that the slab is dedicated to a purpose
 *                      such as satisfying allocations for a specific
 *                      processor. Objects may be freed in the slab while
 *                      it is frozen but slab_free will then skip the usual
 *                      list operations. It is up to the processor holding
 *                      the slab to integrate the slab into the slab lists
 *                      when the slab is no longer needed.
 *
 *                      One use of this flag is to mark slabs that are
 *                      used for allocations. Then such a slab becomes a cpu
 *                      slab. The cpu slab may be equipped with an additional
 *                      freelist that allows lockless access to
 *                      free objects in addition to the regular freelist
 *                      that requires the slab lock.
 *
 * PageError            Slab requires special handling due to debug
 *                      options set. This moves slab handling out of
 *                      the fast path and disables lockless freelists.
 */


static inline int kmem_cache_debug(struct kmem_cache *s) { #ifdef CONFIG_SLUB_DEBUG return unlikely(s->flags & SLAB_DEBUG_FLAGS); #else return 0; #endif }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter32100.00%2100.00%
Total32100.00%2100.00%


void *fixup_red_left(struct kmem_cache *s, void *p) { if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) p += s->red_left_pad; return p; }

Contributors

PersonTokensPropCommitsCommitProp
joonsoo kimjoonsoo kim37100.00%1100.00%
Total37100.00%1100.00%


static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) { #ifdef CONFIG_SLUB_CPU_PARTIAL return !kmem_cache_debug(s); #else return false; #endif }

Contributors

PersonTokensPropCommitsCommitProp
joonsoo kimjoonsoo kim29100.00%1100.00%
Total29100.00%1100.00%

/* * Issues still to be resolved: * * - Support PAGE_ALLOC_DEBUG. Should be easy to do. * * - Variable sizing of the per node arrays */ /* Enable to test recovery from slab corruption on boot */ #undef SLUB_RESILIENCY_TEST /* Enable to log cmpxchg failures */ #undef SLUB_DEBUG_CMPXCHG /* * Mininum number of partial slabs. These will be left on the partial * lists even if they are empty. kmem_cache_shrink may reclaim them. */ #define MIN_PARTIAL 5 /* * Maximum number of desirable partial slabs. * The existence of more partial slabs makes kmem_cache_shrink * sort the partial list by the number of objects in use. */ #define MAX_PARTIAL 10 #define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \ SLAB_POISON | SLAB_STORE_USER) /* * These debug flags cannot use CMPXCHG because there might be consistency * issues when checking or reading debug information */ #define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \ SLAB_TRACE) /* * Debugging flags that require metadata to be stored in the slab. These get * disabled when slub_debug=O is used and a cache's min order increases with * metadata. */ #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) #define OO_SHIFT 16 #define OO_MASK ((1 << OO_SHIFT) - 1) #define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */ /* Internal SLUB flags */ #define __OBJECT_POISON 0x80000000UL /* Poison object */ #define __CMPXCHG_DOUBLE 0x40000000UL /* Use cmpxchg_double */ #ifdef CONFIG_SMP static struct notifier_block slab_notifier; #endif /* * Tracking user of a slab. */ #define TRACK_ADDRS_COUNT 16 struct track { unsigned long addr; /* Called from address */ #ifdef CONFIG_STACKTRACE unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */ #endif int cpu; /* Was running on cpu */ int pid; /* Pid context */ unsigned long when; /* When did the operation occur */ }; enum track_item { TRACK_ALLOC, TRACK_FREE }; #ifdef CONFIG_SYSFS static int sysfs_slab_add(struct kmem_cache *); static int sysfs_slab_alias(struct kmem_cache *, const char *); static void memcg_propagate_slab_attrs(struct kmem_cache *s); #else
static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter15100.00%2100.00%
Total15100.00%2100.00%


static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter20100.00%2100.00%
Total20100.00%2100.00%


static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { }

Contributors

PersonTokensPropCommitsCommitProp
glauber costaglauber costa11100.00%1100.00%
Total11100.00%1100.00%

#endif
static inline void stat(const struct kmem_cache *s, enum stat_item si) { #ifdef CONFIG_SLUB_STATS /* * The rmw is racy on a preemptible kernel but this is acceptable, so * avoid this_cpu_add()'s irq-disable overhead. */ raw_cpu_inc(s->cpu_slab->stat[si]); #endif }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter35100.00%4100.00%
Total35100.00%4100.00%

/******************************************************************** * Core slab cache functions *******************************************************************/
static inline void *get_freepointer(struct kmem_cache *s, void *object) { return *(void **)(object + s->offset); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter32100.00%1100.00%
Total32100.00%1100.00%


static void prefetch_freepointer(const struct kmem_cache *s, void *object) { prefetch(object + s->offset); }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet25100.00%1100.00%
Total25100.00%1100.00%


static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) { void *p; if (!debug_pagealloc_enabled()) return get_freepointer(s, object); probe_kernel_read(&p, (void **)(object + s->offset), sizeof(p)); return p; }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter4877.42%150.00%
joonsoo kimjoonsoo kim1422.58%150.00%
Total62100.00%2100.00%


static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) { *(void **)(object + s->offset) = fp; }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter36100.00%1100.00%
Total36100.00%1100.00%

/* Loop over all objects in a slab */ #define for_each_object(__p, __s, __addr, __objects) \ for (__p = fixup_red_left(__s, __addr); \ __p < (__addr) + (__objects) * (__s)->size; \ __p += (__s)->size) #define for_each_object_idx(__p, __idx, __s, __addr, __objects) \ for (__p = fixup_red_left(__s, __addr), __idx = 1; \ __idx <= __objects; \ __p += (__s)->size, __idx++) /* Determine object index from a given position */
static inline int slab_index(void *p, struct kmem_cache *s, void *addr) { return (p - addr) / s->size; }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter31100.00%1100.00%
Total31100.00%1100.00%


static inline int order_objects(int order, unsigned long size, int reserved) { return ((PAGE_SIZE << order) - reserved) / size; }

Contributors

PersonTokensPropCommitsCommitProp
lai jiangshanlai jiangshan30100.00%1100.00%
Total30100.00%1100.00%


static inline struct kmem_cache_order_objects oo_make(int order, unsigned long size, int reserved) { struct kmem_cache_order_objects x = { (order << OO_SHIFT) + order_objects(order, size, reserved) }; return x; }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter3071.43%133.33%
lai jiangshanlai jiangshan1126.19%133.33%
cyrill gorcunovcyrill gorcunov12.38%133.33%
Total42100.00%3100.00%


static inline int oo_order(struct kmem_cache_order_objects x) { return x.x >> OO_SHIFT; }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter1794.44%150.00%
cyrill gorcunovcyrill gorcunov15.56%150.00%
Total18100.00%2100.00%


static inline int oo_objects(struct kmem_cache_order_objects x) { return x.x & OO_MASK; }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter1794.44%150.00%
cyrill gorcunovcyrill gorcunov15.56%150.00%
Total18100.00%2100.00%

/* * Per slab locking using the pagelock */
static __always_inline void slab_lock(struct page *page) { VM_BUG_ON_PAGE(PageTail(page), page); bit_spin_lock(PG_locked, &page->flags); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter2268.75%266.67%
kirill a. shutemovkirill a. shutemov1031.25%133.33%
Total32100.00%3100.00%


static __always_inline void slab_unlock(struct page *page) { VM_BUG_ON_PAGE(PageTail(page), page); __bit_spin_unlock(PG_locked, &page->flags); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter2268.75%266.67%
kirill a. shutemovkirill a. shutemov1031.25%133.33%
Total32100.00%3100.00%


static inline void set_page_slub_counters(struct page *page, unsigned long counters_new) { struct page tmp; tmp.counters = counters_new; /* * page->counters can cover frozen/inuse/objects as well * as page->_refcount. If we assign to ->counters directly * we run the risk of losing updates to page->_refcount, so * be careful and only assign to the fields we need. */ page->frozen = tmp.frozen; page->inuse = tmp.inuse; page->objects = tmp.objects; }

Contributors

PersonTokensPropCommitsCommitProp
dave hansendave hansen5098.04%150.00%
joonsoo kimjoonsoo kim11.96%150.00%
Total51100.00%2100.00%

/* Interrupts must be disabled (for the fallback code to work right) */
static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page, void *freelist_old, unsigned long counters_old, void *freelist_new, unsigned long counters_new, const char *n) { VM_BUG_ON(!irqs_disabled()); #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) if (s->flags & __CMPXCHG_DOUBLE) { if (cmpxchg_double(&page->freelist, &page->counters, freelist_old, counters_old, freelist_new, counters_new)) return true; } else #endif { slab_lock(page); if (page->freelist == freelist_old && page->counters == counters_old) { page->freelist = freelist_new; set_page_slub_counters(page, counters_new); slab_unlock(page); return true; } slab_unlock(page); } cpu_relax(); stat(s, CMPXCHG_DOUBLE_FAIL); #ifdef SLUB_DEBUG_CMPXCHG pr_info("%s %s: cmpxchg double redo ", n, s->name); #endif return false; }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter14985.63%225.00%
heiko carstensheiko carstens126.90%225.00%
jan beulichjan beulich52.87%112.50%
dave hansendave hansen42.30%112.50%
joe perchesjoe perches31.72%112.50%
fabian frederickfabian frederick10.57%112.50%
Total174100.00%8100.00%


static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, void *freelist_old, unsigned long counters_old, void *freelist_new, unsigned long counters_new, const char *n) { #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) if (s->flags & __CMPXCHG_DOUBLE) { if (cmpxchg_double(&page->freelist, &page->counters, freelist_old, counters_old, freelist_new, counters_new)) return true; } else #endif { unsigned long flags; local_irq_save(flags); slab_lock(page); if (page->freelist == freelist_old && page->counters == counters_old) { page->freelist = freelist_new; set_page_slub_counters(page, counters_new); slab_unlock(page); local_irq_restore(flags); return true; } slab_unlock(page); local_irq_restore(flags); } cpu_relax(); stat(s, CMPXCHG_DOUBLE_FAIL); #ifdef SLUB_DEBUG_CMPXCHG pr_info("%s %s: cmpxchg double redo ", n, s->name); #endif return false; }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter15181.18%430.77%
heiko carstensheiko carstens126.45%215.38%
ben greearben greear73.76%17.69%
jan beulichjan beulich52.69%17.69%
dave hansendave hansen42.15%17.69%
joe perchesjoe perches31.61%17.69%
eduard gabriel munteanueduard gabriel munteanu21.08%17.69%
fabian frederickfabian frederick10.54%17.69%
akinobu mitaakinobu mita10.54%17.69%
Total186100.00%13100.00%

#ifdef CONFIG_SLUB_DEBUG /* * Determine a map of object in use on a page. * * Node listlock must be held to guarantee that the page does * not vanish from under us. */
static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map) { void *p; void *addr = page_address(page); for (p = page->freelist; p; p = get_freepointer(s, p)) set_bit(slab_index(p, s, addr), map); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter67100.00%2100.00%
Total67100.00%2100.00%


static inline int size_from_object(struct kmem_cache *s) { if (s->flags & SLAB_RED_ZONE) return s->size - s->red_left_pad; return s->size; }

Contributors

PersonTokensPropCommitsCommitProp
joonsoo kimjoonsoo kim34100.00%1100.00%
Total34100.00%1100.00%


static inline void *restore_red_left(struct kmem_cache *s, void *p) { if (s->flags & SLAB_RED_ZONE) p -= s->red_left_pad; return p; }

Contributors

PersonTokensPropCommitsCommitProp
joonsoo kimjoonsoo kim34100.00%1100.00%
Total34100.00%1100.00%

/* * Debug settings: */ #if defined(CONFIG_SLUB_DEBUG_ON) static int slub_debug = DEBUG_DEFAULT_FLAGS; #else static int slub_debug; #endif static char *slub_debug_slabs; static int disable_higher_order_debug; /* * slub is about to manipulate internal object metadata. This memory lies * outside the range of the allocated object, so accessing it would normally * be reported by kasan as a bounds error. metadata_access_enable() is used * to tell kasan that these accesses are OK. */
static inline void metadata_access_enable(void) { kasan_disable_current(); }

Contributors

PersonTokensPropCommitsCommitProp
andrey ryabininandrey ryabinin12100.00%1100.00%
Total12100.00%1100.00%


static inline void metadata_access_disable(void) { kasan_enable_current(); }

Contributors

PersonTokensPropCommitsCommitProp
andrey ryabininandrey ryabinin12100.00%1100.00%
Total12100.00%1100.00%

/* * Object debugging */ /* Verify that a pointer has an address that is valid within a slab page */
static inline int check_valid_pointer(struct kmem_cache *s, struct page *page, void *object) { void *base; if (!object) return 1; base = page_address(page); object = restore_red_left(s, object); if (object < base || object >= base + page->objects * s->size || (object - base) % s->size) { return 0; } return 1; }

Contributors

PersonTokensPropCommitsCommitProp
joonsoo kimjoonsoo kim85100.00%1100.00%
Total85100.00%1100.00%


static void print_section(char *text, u8 *addr, unsigned int length) { metadata_access_enable(); print_hex_dump(KERN_ERR, text, DUMP_PREFIX_ADDRESS, 16, 1, addr, length, 1); metadata_access_disable(); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter2558.14%125.00%
sebastian andrzej siewiorsebastian andrzej siewior716.28%125.00%
andrey ryabininandrey ryabinin613.95%125.00%
ben greearben greear511.63%125.00%
Total43100.00%4100.00%


static struct track *get_track(struct kmem_cache *s, void *object, enum track_item alloc) { struct track *p; if (s->offset) p = object + s->offset + sizeof(void *); else p = object + s->inuse; return p + alloc; }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter60100.00%2100.00%
Total60100.00%2100.00%


static void set_track(struct kmem_cache *s, void *object, enum track_item alloc, unsigned long addr) { struct track *p = get_track(s, object, alloc); if (addr) { #ifdef CONFIG_STACKTRACE struct stack_trace trace; int i; trace.nr_entries = 0; trace.max_entries = TRACK_ADDRS_COUNT; trace.entries = p->addrs; trace.skip = 3; metadata_access_enable(); save_stack_trace(&trace); metadata_access_disable(); /* See rant in lockdep.c */ if (trace.nr_entries != 0 && trace.entries[trace.nr_entries - 1] == ULONG_MAX) trace.nr_entries--; for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++) p->addrs[i] = 0; #endif p->addr = addr; p->cpu = smp_processor_id(); p->pid = current->pid; p->when = jiffies; } else memset(p, 0, sizeof(struct track)); }

Contributors

PersonTokensPropCommitsCommitProp
linus torvaldslinus torvalds9350.27%116.67%
christoph lameterchristoph lameter7942.70%233.33%
andrey ryabininandrey ryabinin63.24%116.67%
akinobu mitaakinobu mita52.70%116.67%
eduard gabriel munteanueduard gabriel munteanu21.08%116.67%
Total185100.00%6100.00%


static void init_tracking(struct kmem_cache *s, void *object) { if (!(s->flags & SLAB_STORE_USER)) return; set_track(s, object, TRACK_FREE, 0UL); set_track(s, object, TRACK_ALLOC, 0UL); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter4795.92%266.67%
eduard gabriel munteanueduard gabriel munteanu24.08%133.33%
Total49100.00%3100.00%


static void print_track(const char *s, struct track *t) { if (!t->addr) return; pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n", s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid); #ifdef CONFIG_STACKTRACE { int i; for (i = 0; i < TRACK_ADDRS_COUNT; i++) if (t->addrs[i]) pr_err("\t%pS\n", (void *)t->addrs[i]); else break; } #endif }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter4947.57%233.33%
linus torvaldslinus torvalds4846.60%233.33%
eduard gabriel munteanueduard gabriel munteanu43.88%116.67%
fabian frederickfabian frederick21.94%116.67%
Total103100.00%6100.00%


static void print_tracking(struct kmem_cache *s, void *object) { if (!(s->flags & SLAB_STORE_USER)) return; print_track("Allocated", get_track(s, object, TRACK_ALLOC)); print_track("Freed", get_track(s, object, TRACK_FREE)); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter55100.00%1100.00%
Total55100.00%1100.00%


static void print_page_info(struct page *page) { pr_err("INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n", page, page->objects, page->inuse, page->freelist, page->flags); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter3397.06%266.67%
fabian frederickfabian frederick12.94%133.33%
Total34100.00%3100.00%


static void slab_bug(struct kmem_cache *s, char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; pr_err("=============================================================================\n"); pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf); pr_err("-----------------------------------------------------------------------------\n\n"); add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); va_end(args); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter4555.56%116.67%
fabian frederickfabian frederick2632.10%233.33%
dave jonesdave jones89.88%233.33%
rusty russellrusty russell22.47%116.67%
Total81100.00%6100.00%


static void slab_fix(struct kmem_cache *s, char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; pr_err("FIX %s: %pV\n", s->name, &vaf); va_end(args); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter3963.93%133.33%
fabian frederickfabian frederick2236.07%266.67%
Total61100.00%3100.00%


static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p) { unsigned int off; /* Offset of last byte */ u8 *addr = page_address(page); print_tracking(s, p); print_page_info(page); pr_err("INFO: Object 0x%p @offset=%tu fp=0x%p\n\n", p, p - addr, get_freepointer(s, p)); if (s->flags & SLAB_RED_ZONE) print_section("Redzone ", p - s->red_left_pad, s->red_left_pad); else if (p > addr + 16) print_section("Bytes b4 ", p - 16, 16); print_section("Object ", p, min_t(unsigned long, s->object_size, PAGE_SIZE)); if (s->flags & SLAB_RED_ZONE) print_section("Redzone ", p + s->object_size, s->inuse - s->object_size); if (s->offset) off = s->offset + sizeof(void *); else off = s->inuse; if (s->flags & SLAB_STORE_USER) off += 2 * sizeof(struct track); off += kasan_metadata_size(s); if (off != size_from_object(s)) /* Beginning of the filler is the free pointer */ print_section("Padding ", p + off, size_from_object(s) - off); dump_stack(); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter18379.91%444.44%
joonsoo kimjoonsoo kim3013.10%111.11%
alexander potapenkoalexander potapenko73.06%111.11%
sebastian andrzej siewiorsebastian andrzej siewior41.75%111.11%
pekka j enbergpekka j enberg41.75%111.11%
fabian frederickfabian frederick10.44%111.11%
Total229100.00%9100.00%


void object_err(struct kmem_cache *s, struct page *page, u8 *object, char *reason) { slab_bug(s, "%s", reason); print_trailer(s, page, object); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter41100.00%3100.00%
Total41100.00%3100.00%


static void slab_err(struct kmem_cache *s, struct page *page, const char *fmt, ...) { va_list args; char buf[100]; va_start(args, fmt); vsnprintf(buf, sizeof(buf), fmt, args); va_end(args); slab_bug(s, "%s", buf); print_page_info(page); dump_stack(); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter75100.00%4100.00%
Total75100.00%4100.00%


static void init_object(struct kmem_cache *s, void *object, u8 val) { u8 *p = object; if (s->flags & SLAB_RED_ZONE) memset(p - s->red_left_pad, val, s->red_left_pad); if (s->flags & __OBJECT_POISON) { memset(p, POISON_FREE, s->object_size - 1); p[s->object_size - 1] = POISON_END; } if (s->flags & SLAB_RED_ZONE) memset(p + s->object_size, val, s->inuse - s->object_size); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter8578.70%375.00%
joonsoo kimjoonsoo kim2321.30%125.00%
Total108100.00%4100.00%


static void restore_bytes(struct kmem_cache *s, char *message, u8 data, void *from, void *to) { slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data); memset(from, data, to - from); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter52100.00%2100.00%
Total52100.00%2100.00%


static int check_bytes_and_report(struct kmem_cache *s, struct page *page, u8 *object, char *what, u8 *start, unsigned int value, unsigned int bytes) { u8 *fault; u8 *end; metadata_access_enable(); fault = memchr_inv(start, value, bytes); metadata_access_disable(); if (!fault) return 1; end = start + bytes; while (end > fault && end[-1] == value) end--; slab_bug(s, "%s overwritten", what); pr_err("INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n", fault, end - 1, fault[0], value); print_trailer(s, page, object); restore_bytes(s, what, value, fault, end); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter13694.44%125.00%
andrey ryabininandrey ryabinin64.17%125.00%
fabian frederickfabian frederick10.69%125.00%
akinobu mitaakinobu mita10.69%125.00%
Total144100.00%4100.00%

/* * Object layout: * * object address * Bytes of the object to be managed. * If the freepointer may overlay the object then the free * pointer is the first word of the object. * * Poisoning uses 0x6b (POISON_FREE) and the last byte is * 0xa5 (POISON_END) * * object + s->object_size * Padding to reach word boundary. This is also used for Redzoning. * Padding is extended by another word if Redzoning is enabled and * object_size == inuse. * * We fill with 0xbb (RED_INACTIVE) for inactive objects and with * 0xcc (RED_ACTIVE) for objects in use. * * object + s->inuse * Meta data starts here. * * A. Free pointer (if we cannot overwrite object on free) * B. Tracking data for SLAB_STORE_USER * C. Padding to reach required alignment boundary or at mininum * one word if debugging is on to be able to detect writes * before the word boundary. * * Padding is done using 0x5a (POISON_INUSE) * * object + s->size * Nothing is used beyond s->size. * * If slabcaches are merged then the object_size and inuse boundaries are mostly * ignored. And therefore no slab options that rely on these boundaries * may be used with merged slabcaches. */
static int check_pad_bytes(struct kmem_cache *