cregit-Linux how code gets into the kernel

Release 4.8 mm/slab.c

Directory: mm
/*
 * linux/mm/slab.c
 * Written by Mark Hemment, 1996/97.
 * (markhe@nextd.demon.co.uk)
 *
 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
 *
 * Major cleanup, different bufctl logic, per-cpu arrays
 *      (c) 2000 Manfred Spraul
 *
 * Cleanup, make the head arrays unconditional, preparation for NUMA
 *      (c) 2002 Manfred Spraul
 *
 * An implementation of the Slab Allocator as described in outline in;
 *      UNIX Internals: The New Frontiers by Uresh Vahalia
 *      Pub: Prentice Hall      ISBN 0-13-101908-2
 * or with a little more detail in;
 *      The Slab Allocator: An Object-Caching Kernel Memory Allocator
 *      Jeff Bonwick (Sun Microsystems).
 *      Presented at: USENIX Summer 1994 Technical Conference
 *
 * The memory is organized in caches, one cache for each object type.
 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
 * Each cache consists out of many slabs (they are small (usually one
 * page long) and always contiguous), and each slab contains multiple
 * initialized objects.
 *
 * This means, that your constructor is used only for newly allocated
 * slabs and you must pass objects with the same initializations to
 * kmem_cache_free.
 *
 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
 * normal). If you need a special memory type, then must create a new
 * cache for that memory type.
 *
 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
 *   full slabs with 0 free objects
 *   partial slabs
 *   empty slabs with no allocated objects
 *
 * If partial slabs exist, then new allocations come from these slabs,
 * otherwise from empty slabs or new slabs are allocated.
 *
 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
 *
 * Each cache has a short per-cpu head array, most allocs
 * and frees go into that array, and if that array overflows, then 1/2
 * of the entries in the array are given back into the global cache.
 * The head array is strictly LIFO and should improve the cache hit rates.
 * On SMP, it additionally reduces the spinlock operations.
 *
 * The c_cpuarray may not be read with enabled local interrupts -
 * it's changed with a smp_call_function().
 *
 * SMP synchronization:
 *  constructors and destructors are called without any locking.
 *  Several members in struct kmem_cache and struct slab never change, they
 *      are accessed without any locking.
 *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
 *      and local interrupts are disabled so slab code is preempt-safe.
 *  The non-constant members are protected with a per-cache irq spinlock.
 *
 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
 * in 2000 - many ideas in the current implementation are derived from
 * his patch.
 *
 * Further notes from the original documentation:
 *
 * 11 April '97.  Started multi-threading - markhe
 *      The global cache-chain is protected by the mutex 'slab_mutex'.
 *      The sem is only needed when accessing/extending the cache-chain, which
 *      can never happen inside an interrupt (kmem_cache_create(),
 *      kmem_cache_shrink() and kmem_cache_reap()).
 *
 *      At present, each engine can be growing a cache.  This should be blocked.
 *
 * 15 March 2005. NUMA slab allocator.
 *      Shai Fultheim <shai@scalex86.org>.
 *      Shobhit Dayal <shobhit@calsoftinc.com>
 *      Alok N Kataria <alokk@calsoftinc.com>
 *      Christoph Lameter <christoph@lameter.com>
 *
 *      Modified the slab allocator to be node aware on NUMA systems.
 *      Each node has its own list of partial, free and full slabs.
 *      All object allocations for a node occur from node specific slab lists.
 */

#include	<linux/slab.h>
#include	<linux/mm.h>
#include	<linux/poison.h>
#include	<linux/swap.h>
#include	<linux/cache.h>
#include	<linux/interrupt.h>
#include	<linux/init.h>
#include	<linux/compiler.h>
#include	<linux/cpuset.h>
#include	<linux/proc_fs.h>
#include	<linux/seq_file.h>
#include	<linux/notifier.h>
#include	<linux/kallsyms.h>
#include	<linux/cpu.h>
#include	<linux/sysctl.h>
#include	<linux/module.h>
#include	<linux/rcupdate.h>
#include	<linux/string.h>
#include	<linux/uaccess.h>
#include	<linux/nodemask.h>
#include	<linux/kmemleak.h>
#include	<linux/mempolicy.h>
#include	<linux/mutex.h>
#include	<linux/fault-inject.h>
#include	<linux/rtmutex.h>
#include	<linux/reciprocal_div.h>
#include	<linux/debugobjects.h>
#include	<linux/kmemcheck.h>
#include	<linux/memory.h>
#include	<linux/prefetch.h>

#include	<net/sock.h>

#include	<asm/cacheflush.h>
#include	<asm/tlbflush.h>
#include	<asm/page.h>

#include <trace/events/kmem.h>

#include	"internal.h"

#include	"slab.h"

/*
 * DEBUG        - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
 *                0 for faster, smaller code (especially in the critical paths).
 *
 * STATS        - 1 to collect stats for /proc/slabinfo.
 *                0 for faster, smaller code (especially in the critical paths).
 *
 * FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
 */

#ifdef CONFIG_DEBUG_SLAB

#define	DEBUG		1

#define	STATS		1

#define	FORCED_DEBUG	1
#else

#define	DEBUG		0

#define	STATS		0

#define	FORCED_DEBUG	0
#endif

/* Shouldn't this be in a header file somewhere? */

#define	BYTES_PER_WORD		sizeof(void *)

#define	REDZONE_ALIGN		max(BYTES_PER_WORD, __alignof__(unsigned long long))

#ifndef ARCH_KMALLOC_FLAGS

#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
#endif


#define FREELIST_BYTE_INDEX (((PAGE_SIZE >> BITS_PER_BYTE) \
                                <= SLAB_OBJ_MIN_SIZE) ? 1 : 0)

#if FREELIST_BYTE_INDEX

typedef unsigned char freelist_idx_t;
#else

typedef unsigned short freelist_idx_t;
#endif


#define SLAB_OBJ_MAX_NUM ((1 << sizeof(freelist_idx_t) * BITS_PER_BYTE) - 1)

/*
 * struct array_cache
 *
 * Purpose:
 * - LIFO ordering, to hand out cache-warm objects from _alloc
 * - reduce the number of linked list operations
 * - reduce spinlock operations
 *
 * The limit is stored in the per-cpu structure to reduce the data cache
 * footprint.
 *
 */

struct array_cache {
	
unsigned int avail;
	
unsigned int limit;
	
unsigned int batchcount;
	
unsigned int touched;
	
void *entry[];	/*
                         * Must have this definition in here for the proper
                         * alignment of array_cache. Also simplifies accessing
                         * the entries.
                         */
};


struct alien_cache {
	
spinlock_t lock;
	
struct array_cache ac;
};

/*
 * Need this for bootstrapping a per node allocator.
 */

#define NUM_INIT_LISTS (2 * MAX_NUMNODES)

static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];

#define	CACHE_CACHE 0

#define	SIZE_NODE (MAX_NUMNODES)

static int drain_freelist(struct kmem_cache *cache,
			struct kmem_cache_node *n, int tofree);
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
			int node, struct list_head *list);
static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list);
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
static void cache_reap(struct work_struct *unused);

static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
						void **list);
static inline void fixup_slab_list(struct kmem_cache *cachep,
				struct kmem_cache_node *n, struct page *page,
				void **list);

static int slab_early_init = 1;


#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))


static void kmem_cache_node_init(struct kmem_cache_node *parent) { INIT_LIST_HEAD(&parent->slabs_full); INIT_LIST_HEAD(&parent->slabs_partial); INIT_LIST_HEAD(&parent->slabs_free); parent->shared = NULL; parent->alien = NULL; parent->colour_next = 0; spin_lock_init(&parent->list_lock); parent->free_objects = 0; parent->free_touched = 0; }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter6791.78%375.00%
ravikiran g thirumalairavikiran g thirumalai68.22%125.00%
Total73100.00%4100.00%

#define MAKE_LIST(cachep, listp, slab, nodeid) \ do { \ INIT_LIST_HEAD(listp); \ list_splice(&get_node(cachep, nodeid)->slab, listp); \ } while (0) #define MAKE_ALL_LISTS(cachep, ptr, nodeid) \ do { \ MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \ MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \ MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \ } while (0) #define CFLGS_OBJFREELIST_SLAB (0x40000000UL) #define CFLGS_OFF_SLAB (0x80000000UL) #define OBJFREELIST_SLAB(x) ((x)->flags & CFLGS_OBJFREELIST_SLAB) #define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) #define BATCHREFILL_LIMIT 16 /* * Optimization question: fewer reaps means less probability for unnessary * cpucache drain/refill cycles. * * OTOH the cpuarrays can contain lots of objects, * which could lock up otherwise freeable slabs. */ #define REAPTIMEOUT_AC (2*HZ) #define REAPTIMEOUT_NODE (4*HZ) #if STATS #define STATS_INC_ACTIVE(x) ((x)->num_active++) #define STATS_DEC_ACTIVE(x) ((x)->num_active--) #define STATS_INC_ALLOCED(x) ((x)->num_allocations++) #define STATS_INC_GROWN(x) ((x)->grown++) #define STATS_ADD_REAPED(x,y) ((x)->reaped += (y)) #define STATS_SET_HIGH(x) \ do { \ if ((x)->num_active > (x)->high_mark) \ (x)->high_mark = (x)->num_active; \ } while (0) #define STATS_INC_ERR(x) ((x)->errors++) #define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++) #define STATS_INC_NODEFREES(x) ((x)->node_frees++) #define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++) #define STATS_SET_FREEABLE(x, i) \ do { \ if ((x)->max_freeable < i) \ (x)->max_freeable = i; \ } while (0) #define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit) #define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss) #define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit) #define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss) #else #define STATS_INC_ACTIVE(x) do { } while (0) #define STATS_DEC_ACTIVE(x) do { } while (0) #define STATS_INC_ALLOCED(x) do { } while (0) #define STATS_INC_GROWN(x) do { } while (0) #define STATS_ADD_REAPED(x,y) do { (void)(y); } while (0) #define STATS_SET_HIGH(x) do { } while (0) #define STATS_INC_ERR(x) do { } while (0) #define STATS_INC_NODEALLOCS(x) do { } while (0) #define STATS_INC_NODEFREES(x) do { } while (0) #define STATS_INC_ACOVERFLOW(x) do { } while (0) #define STATS_SET_FREEABLE(x, i) do { } while (0) #define STATS_INC_ALLOCHIT(x) do { } while (0) #define STATS_INC_ALLOCMISS(x) do { } while (0) #define STATS_INC_FREEHIT(x) do { } while (0) #define STATS_INC_FREEMISS(x) do { } while (0) #endif #if DEBUG /* * memory layout of objects: * 0 : objp * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that * the end of an object is aligned with the end of the real * allocation. Catches writes behind the end of the allocation. * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1: * redzone word. * cachep->obj_offset: The real object. * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long] * cachep->size - 1* BYTES_PER_WORD: last caller address * [BYTES_PER_WORD long] */
static int obj_offset(struct kmem_cache *cachep) { return cachep->obj_offset; }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton1275.00%250.00%
pekka j enbergpekka j enberg212.50%125.00%
manfred spraulmanfred spraul212.50%125.00%
Total16100.00%4100.00%


static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp) { BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); return (unsigned long long*) (objp + obj_offset(cachep) - sizeof(unsigned long long)); }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton4279.25%240.00%
david woodhousedavid woodhouse815.09%120.00%
pekka j enbergpekka j enberg23.77%120.00%
manfred spraulmanfred spraul11.89%120.00%
Total53100.00%5100.00%


static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp) { BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); if (cachep->flags & SLAB_STORE_USER) return (unsigned long long *)(objp + cachep->size - sizeof(unsigned long long) - REDZONE_ALIGN); return (unsigned long long *) (objp + cachep->size - sizeof(unsigned long long)); }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton6375.00%233.33%
david woodhousedavid woodhouse1720.24%233.33%
pekka j enbergpekka j enberg22.38%116.67%
christoph lameterchristoph lameter22.38%116.67%
Total84100.00%6100.00%


static void **dbg_userword(struct kmem_cache *cachep, void *objp) { BUG_ON(!(cachep->flags & SLAB_STORE_USER)); return (void **)(objp + cachep->size - BYTES_PER_WORD); }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton4293.33%250.00%
pekka j enbergpekka j enberg24.44%125.00%
christoph lameterchristoph lameter12.22%125.00%
Total45100.00%4100.00%

#else #define obj_offset(x) 0 #define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) #define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) #define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) #endif #ifdef CONFIG_DEBUG_SLAB_LEAK
static inline bool is_store_user_clean(struct kmem_cache *cachep) { return atomic_read(&cachep->store_user_clean) == 1; }

Contributors

PersonTokensPropCommitsCommitProp
joonsoo kimjoonsoo kim23100.00%2100.00%
Total23100.00%2100.00%


static inline void set_store_user_clean(struct kmem_cache *cachep) { atomic_set(&cachep->store_user_clean, 1); }

Contributors

PersonTokensPropCommitsCommitProp
joonsoo kimjoonsoo kim22100.00%2100.00%
Total22100.00%2100.00%


static inline void set_store_user_dirty(struct kmem_cache *cachep) { if (is_store_user_clean(cachep)) atomic_set(&cachep->store_user_clean, 0); }

Contributors

PersonTokensPropCommitsCommitProp
joonsoo kimjoonsoo kim29100.00%2100.00%
Total29100.00%2100.00%

#else
static inline void set_store_user_dirty(struct kmem_cache *cachep) {}

Contributors

PersonTokensPropCommitsCommitProp
joonsoo kimjoonsoo kim11100.00%1100.00%
Total11100.00%1100.00%

#endif /* * Do not go above this order unless 0 objects fit into the slab or * overridden on the command line. */ #define SLAB_MAX_ORDER_HI 1 #define SLAB_MAX_ORDER_LO 0 static int slab_max_order = SLAB_MAX_ORDER_LO; static bool slab_max_order_set __initdata;
static inline struct kmem_cache *virt_to_cache(const void *obj) { struct page *page = virt_to_head_page(obj); return page->slab_cache; }

Contributors

PersonTokensPropCommitsCommitProp
pekka j enbergpekka j enberg2689.66%133.33%
christoph lameterchristoph lameter310.34%266.67%
Total29100.00%3100.00%


static inline void *index_to_obj(struct kmem_cache *cache, struct page *page, unsigned int idx) { return page->s_mem + cache->size * idx; }

Contributors

PersonTokensPropCommitsCommitProp
pekka j enbergpekka j enberg2987.88%133.33%
joonsoo kimjoonsoo kim39.09%133.33%
christoph lameterchristoph lameter13.03%133.33%
Total33100.00%3100.00%

/* * We want to avoid an expensive divide : (offset / cache->size) * Using the fact that size is a constant for a particular cache, * we can replace (offset / cache->size) by * reciprocal_divide(offset, cache->reciprocal_buffer_size) */
static inline unsigned int obj_to_index(const struct kmem_cache *cache, const struct page *page, void *obj) { u32 offset = (obj - page->s_mem); return reciprocal_divide(offset, cache->reciprocal_buffer_size); }

Contributors

PersonTokensPropCommitsCommitProp
pekka j enbergpekka j enberg2760.00%133.33%
eric dumazeteric dumazet1533.33%133.33%
joonsoo kimjoonsoo kim36.67%133.33%
Total45100.00%3100.00%

#define BOOT_CPUCACHE_ENTRIES 1 /* internal cache of cache description objs */ static struct kmem_cache kmem_cache_boot = { .batchcount = 1, .limit = BOOT_CPUCACHE_ENTRIES, .shared = 1, .size = sizeof(struct kmem_cache), .name = "kmem_cache", }; static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) { return this_cpu_ptr(cachep->cpu_cache); }

Contributors

PersonTokensPropCommitsCommitProp
rusty russellrusty russell1568.18%125.00%
joonsoo kimjoonsoo kim418.18%125.00%
pekka j enbergpekka j enberg313.64%250.00%
Total22100.00%4100.00%

/* * Calculate the number of objects and left-over bytes for a given buffer size. */
static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size, unsigned long flags, size_t *left_over) { unsigned int num; size_t slab_size = PAGE_SIZE << gfporder; /* * The slab management structure can be either off the slab or * on it. For the latter case, the memory allocated for a * slab is used for: * * - @buffer_size bytes for each object * - One freelist_idx_t for each object * * We don't need to consider alignment of freelist because * freelist will be at the end of slab page. The objects will be * at the correct alignment. * * If the slab management structure is off the slab, then the * alignment will already be calculated into the size. Because * the slabs are all pages aligned, the objects will be at the * correct alignment when allocated. */ if (flags & (CFLGS_OBJFREELIST_SLAB | CFLGS_OFF_SLAB)) { num = slab_size / buffer_size; *left_over = slab_size % buffer_size; } else { num = slab_size / (buffer_size + sizeof(freelist_idx_t)); *left_over = slab_size % (buffer_size + sizeof(freelist_idx_t)); } return num; }

Contributors

PersonTokensPropCommitsCommitProp
joonsoo kimjoonsoo kim3942.39%440.00%
pre-gitpre-git3538.04%440.00%
steven rostedtsteven rostedt1718.48%110.00%
andrew mortonandrew morton11.09%110.00%
Total92100.00%10100.00%

#if DEBUG #define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
static void __slab_error(const char *function, struct kmem_cache *cachep, char *msg) { pr_err("slab error in %s(): cache `%s': %s\n", function, cachep->name, msg); dump_stack(); add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton3274.42%116.67%
dave jonesdave jones511.63%116.67%
pekka j enbergpekka j enberg24.65%116.67%
rusty russellrusty russell24.65%116.67%
zwane mwaikambozwane mwaikambo12.33%116.67%
joe perchesjoe perches12.33%116.67%
Total43100.00%6100.00%

#endif /* * By default on NUMA we use alien caches to stage the freeing of * objects allocated from other nodes. This causes massive memory * inefficiencies when using fake NUMA setup to split memory into a * large number of small nodes, so it can be disabled on the command * line */ static int use_alien_caches __read_mostly = 1;
static int __init noaliencache_setup(char *s) { use_alien_caches = 0; return 1; }

Contributors

PersonTokensPropCommitsCommitProp
paul menagepaul menage18100.00%1100.00%
Total18100.00%1100.00%

__setup("noaliencache", noaliencache_setup);
static int __init slab_max_order_setup(char *str) { get_option(&str, &slab_max_order); slab_max_order = slab_max_order < 0 ? 0 : min(slab_max_order, MAX_ORDER - 1); slab_max_order_set = true; return 1; }

Contributors

PersonTokensPropCommitsCommitProp
david rientjesdavid rientjes44100.00%1100.00%
Total44100.00%1100.00%

__setup("slab_max_order=", slab_max_order_setup); #ifdef CONFIG_NUMA /* * Special reaping functions for NUMA systems called from cache_reap(). * These take care of doing round robin flushing of alien caches (containing * objects freed on different nodes from which they were allocated) and the * flushing of remote pcps by calling drain_node_pages. */ static DEFINE_PER_CPU(unsigned long, slab_reap_node);
static void init_reap_node(int cpu) { per_cpu(slab_reap_node, cpu) = next_node_in(cpu_to_mem(cpu), node_online_map); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter1869.23%133.33%
andrew mortonandrew morton726.92%133.33%
lee schermerhornlee schermerhorn13.85%133.33%
Total26100.00%3100.00%


static void next_reap_node(void) { int node = __this_cpu_read(slab_reap_node); node = next_node_in(node, node_online_map); __this_cpu_write(slab_reap_node, node); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter2990.62%250.00%
tejun heotejun heo26.25%125.00%
andrew mortonandrew morton13.12%125.00%
Total32100.00%4100.00%

#else #define init_reap_node(cpu) do { } while (0) #define next_reap_node(void) do { } while (0) #endif /* * Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz * via the workqueue/eventd. * Add the CPU number into the expiration time to minimize the possibility of * the CPUs getting into lockstep and contending for the global cache chain * lock. */
static void start_cpu_timer(int cpu) { struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu); /* * When this gets called from do_initcalls via cpucache_init(), * init_workqueues() has already run, so keventd will be setup * at that time. */ if (keventd_up() && reap_work->work.func == NULL) { init_reap_node(cpu); INIT_DEFERRABLE_WORK(reap_work, cache_reap); schedule_delayed_work_on(cpu, reap_work, __round_jiffies_relative(HZ, cpu)); } }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton2945.31%327.27%
dimitri sivanichdimitri sivanich1625.00%19.09%
christoph lameterchristoph lameter57.81%19.09%
rusty russellrusty russell46.25%19.09%
arjan van de venarjan van de ven46.25%19.09%
david howellsdavid howells34.69%19.09%
tejun heotejun heo23.12%218.18%
pre-gitpre-git11.56%19.09%
Total64100.00%11100.00%


static void init_arraycache(struct array_cache *ac, int limit, int batch) { /* * The array_cache structures contain pointers to free object. * However, when such objects are allocated or transferred to another * cache the pointers are not cleared and they could be counted as * valid references during a kmemleak scan. Therefore, kmemleak must * not scan such objects. */ kmemleak_no_scan(ac); if (ac) { ac->avail = 0; ac->limit = limit; ac->batchcount = batch; ac->touched = 0; } }

Contributors

PersonTokensPropCommitsCommitProp
joonsoo kimjoonsoo kim53100.00%1100.00%
Total53100.00%1100.00%


static struct array_cache *alloc_arraycache(int node, int entries, int batchcount, gfp_t gfp) { size_t memsize = sizeof(void *) * entries + sizeof(struct array_cache); struct array_cache *ac = NULL; ac = kmalloc_node(memsize, gfp, node); init_arraycache(ac, entries, batchcount); return ac; }

Contributors

PersonTokensPropCommitsCommitProp
mel gormanmel gorman2841.79%116.67%
andrew mortonandrew morton2740.30%116.67%
joonsoo kimjoonsoo kim811.94%233.33%
pekka j enbergpekka j enberg34.48%116.67%
christoph lameterchristoph lameter11.49%116.67%
Total67100.00%6100.00%


static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep, struct page *page, void *objp) { struct kmem_cache_node *n; int page_node; LIST_HEAD(list); page_node = page_to_nid(page); n = get_node(cachep, page_node); spin_lock(&n->list_lock); free_block(cachep, &objp, 1, page_node, &list); spin_unlock(&n->list_lock); slabs_destroy(cachep, &list); }

Contributors

PersonTokensPropCommitsCommitProp
joonsoo kimjoonsoo kim4550.56%120.00%
mel gormanmel gorman3640.45%120.00%
christoph lameterchristoph lameter88.99%360.00%
Total89100.00%5100.00%

/* * Transfer objects in one arraycache to another. * Locking must be handled by the caller. * * Return the number of entries transferred. */
static int transfer_objects(struct array_cache *to, struct array_cache *from, unsigned int max) { /* Figure out how many entries to transfer */ int nr = min3(from->avail, max, to->limit - to->avail); if (!nr) return 0; memcpy(to->entry + to->avail, from->entry + from->avail -nr, sizeof(void *) *nr); from->avail -= nr; to->avail += nr; return nr; }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter7580.65%125.00%
mel gormanmel gorman99.68%125.00%
joonsoo kimjoonsoo kim88.60%125.00%
hagen paul pfeiferhagen paul pfeifer11.08%125.00%
Total93100.00%4100.00%

#ifndef CONFIG_NUMA #define drain_alien_cache(cachep, alien) do { } while (0) #define reap_alien(cachep, n) do { } while (0)
static inline struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) { return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter1777.27%125.00%
pekka j enbergpekka j enberg313.64%125.00%
joonsoo kimjoonsoo kim29.09%250.00%
Total22100.00%4100.00%


static inline void free_alien_cache(struct alien_cache **ac_ptr) { }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter1191.67%150.00%
joonsoo kimjoonsoo kim18.33%150.00%
Total12100.00%2100.00%


static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter19100.00%1100.00%
Total19100.00%1100.00%


static inline void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) { return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter19100.00%1100.00%
Total19100.00%1100.00%


static inline void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) { return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter2195.45%150.00%
christoph hellwigchristoph hellwig14.55%150.00%
Total22100.00%2100.00%


static inline gfp_t gfp_exact_node(gfp_t flags) { return flags & ~__GFP_NOFAIL; }

Contributors

PersonTokensPropCommitsCommitProp
david rientjesdavid rientjes1381.25%150.00%
mel gormanmel gorman318.75%150.00%
Total16100.00%2100.00%

#else /* CONFIG_NUMA */ static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int); static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
static struct alien_cache *__alloc_alien_cache(int node, int entries, int batch, gfp_t gfp) { size_t memsize = sizeof(void *) * entries + sizeof(struct alien_cache); struct alien_cache *alc = NULL; alc = kmalloc_node(memsize, gfp, node); init_arraycache(&alc->ac, entries, batch); spin_lock_init(&alc->lock); return alc; }

Contributors

PersonTokensPropCommitsCommitProp
joonsoo kimjoonsoo kim78100.00%3100.00%
Total78100.00%3100.00%


static struct alien_cache **alloc_alien_cache(int node, int limit, gfp_t gfp) { struct alien_cache **alc_ptr; size_t memsize = sizeof(void *) * nr_node_ids; int i; if (limit > 1) limit = 12; alc_ptr = kzalloc_node(memsize, gfp, node); if (!alc_ptr) return NULL; for_each_node(i) { if (i == node || !node_online(i)) continue; alc_ptr[i] = __alloc_alien_cache(node, limit, 0xbaadf00d, gfp); if (!alc_ptr[i]) { for (i--; i >= 0; i--) kfree(alc_ptr[i]); kfree(alc_ptr); return NULL; } } return alc_ptr; }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter8660.14%218.18%
andrew mortonandrew morton3222.38%327.27%
joonsoo kimjoonsoo kim1611.19%218.18%
pekka j enbergpekka j enberg64.20%19.09%
haicheng lihaicheng li10.70%19.09%
akinobu mitaakinobu mita10.70%19.09%
rusty russellrusty russell10.70%19.09%
Total143100.00%11100.00%


static void free_alien_cache(struct alien_cache **alc_ptr) { int i; if (!alc_ptr) return; for_each_node(i) kfree(alc_ptr[i]); kfree(alc_ptr); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter2565.79%125.00%
andrew mortonandrew morton615.79%125.00%
joonsoo kimjoonsoo kim513.16%125.00%
rusty russellrusty russell25.26%125.00%
Total38100.00%4100.00%


static void __drain_alien_cache(struct kmem_cache *cachep, struct array_cache *ac, int node, struct list_head *list) { struct kmem_cache_node *n = get_node(cachep, node); if (ac->avail) { spin_lock(&n->list_lock); /* * Stuff objects into the remote nodes shared array first. * That way we could avoid the overhead of putting the objects * into the free lists and getting them back later. */ if (n->shared) transfer_objects(n->shared, ac, ac->limit); free_block(cachep, ac->entry, ac->avail, node, list); ac->avail = 0; spin_unlock(&n->list_lock); } }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter6866.02%646.15%
rusty russellrusty russell1312.62%17.69%
andrew mortonandrew morton87.77%215.38%
joonsoo kimjoonsoo kim76.80%215.38%
jacob shinjacob shin54.85%17.69%
pekka j enbergpekka j enberg21.94%17.69%
Total103100.00%13100.00%

/* * Called from cache_reap() to regularly drain alien caches round robin. */
static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n) { int node = __this_cpu_read(slab_reap_node); if (n->alien) { struct alien_cache *alc = n->alien[node]; struct array_cache *ac; if (alc) { ac = &alc->ac; if (ac->avail && spin_trylock_irq(&alc->lock)) { LIST_HEAD(list); __drain_alien_cache(cachep, ac, node, &list); spin_unlock_irq(&alc->lock); slabs_destroy(cachep, &list); } } } }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter7365.77%555.56%
joonsoo kimjoonsoo kim3733.33%333.33%
tejun heotejun heo10.90%111.11%
Total111100.00%9100.00%


static void drain_alien_cache(struct kmem_cache *cachep, struct alien_cache **alien) { int i = 0; struct alien_cache *alc; struct array_cache *ac; unsigned long flags; for_each_online_node(i) { alc = alien[i]; if (alc) { LIST_HEAD(list); ac = &alc->ac; spin_lock_irqsave(&alc->lock, flags); __drain_alien_cache(cachep, ac, i, &list); spin_unlock_irqrestore(&alc->lock, flags); slabs_destroy(cachep, &list); } } }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter4945.79%110.00%
joonsoo kimjoonsoo kim3330.84%330.00%
andrew mortonandrew morton1917.76%330.00%
pre-gitpre-git21.87%110.00%
pekka j enbergpekka j enberg21.87%110.00%
ravikiran g thirumalairavikiran g thirumalai21.87%110.00%
Total107100.00%10100.00%


static int __cache_free_alien(struct kmem_cache *cachep, void *objp, int node, int page_node) { struct kmem_cache_node *n; struct alien_cache *alien = NULL; struct array_cache *ac; LIST_HEAD(list); n = get_node(cachep, node); STATS_INC_NODEFREES(cachep); if (n->alien && n->alien[page_node]) { alien = n->alien[page_node]; ac