cregit-Linux how code gets into the kernel

Release 4.14 include/linux/slub_def.h

Directory: include/linux
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SLUB_DEF_H

#define _LINUX_SLUB_DEF_H

/*
 * SLUB : A Slab allocator without object queues.
 *
 * (C) 2007 SGI, Christoph Lameter
 */
#include <linux/kobject.h>


enum stat_item {
	
ALLOC_FASTPATH,		/* Allocation from cpu slab */
	
ALLOC_SLOWPATH,		/* Allocation by getting a new cpu slab */
	
FREE_FASTPATH,		/* Free to cpu slab */
	
FREE_SLOWPATH,		/* Freeing not to cpu slab */
	
FREE_FROZEN,		/* Freeing to frozen slab */
	
FREE_ADD_PARTIAL,	/* Freeing moves slab to partial list */
	
FREE_REMOVE_PARTIAL,	/* Freeing removes last object */
	
ALLOC_FROM_PARTIAL,	/* Cpu slab acquired from node partial list */
	
ALLOC_SLAB,		/* Cpu slab acquired from page allocator */
	
ALLOC_REFILL,		/* Refill cpu slab from slab freelist */
	
ALLOC_NODE_MISMATCH,	/* Switching cpu slab */
	
FREE_SLAB,		/* Slab freed to the page allocator */
	
CPUSLAB_FLUSH,		/* Abandoning of the cpu slab */
	
DEACTIVATE_FULL,	/* Cpu slab was full when deactivated */
	
DEACTIVATE_EMPTY,	/* Cpu slab was empty when deactivated */
	
DEACTIVATE_TO_HEAD,	/* Cpu slab was moved to the head of partials */
	
DEACTIVATE_TO_TAIL,	/* Cpu slab was moved to the tail of partials */
	
DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
	
DEACTIVATE_BYPASS,	/* Implicit deactivation */
	
ORDER_FALLBACK,		/* Number of times fallback was necessary */
	
CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
	
CMPXCHG_DOUBLE_FAIL,	/* Number of times that cmpxchg double did not match */
	
CPU_PARTIAL_ALLOC,	/* Used cpu partial on alloc */
	
CPU_PARTIAL_FREE,	/* Refill cpu partial on free */
	
CPU_PARTIAL_NODE,	/* Refill cpu partial from node partial */
	
CPU_PARTIAL_DRAIN,	/* Drain cpu partial to node partial */
	
NR_SLUB_STAT_ITEMS };


struct kmem_cache_cpu {
	
void **freelist;	/* Pointer to next available object */
	
unsigned long tid;	/* Globally unique transaction id */
	
struct page *page;	/* The slab from which we are allocating */
#ifdef CONFIG_SLUB_CPU_PARTIAL
	
struct page *partial;	/* Partially allocated frozen slabs */
#endif
#ifdef CONFIG_SLUB_STATS
	
unsigned stat[NR_SLUB_STAT_ITEMS];
#endif
};

#ifdef CONFIG_SLUB_CPU_PARTIAL

#define slub_percpu_partial(c)		((c)->partial)


#define slub_set_percpu_partial(c, p)		\
({                                              \
        slub_percpu_partial(c) = (p)->next;     \
})


#define slub_percpu_partial_read_once(c)     READ_ONCE(slub_percpu_partial(c))
#else

#define slub_percpu_partial(c)			NULL


#define slub_set_percpu_partial(c, p)


#define slub_percpu_partial_read_once(c)	NULL
#endif // CONFIG_SLUB_CPU_PARTIAL

/*
 * Word size structure that can be atomically updated or read and that
 * contains both the order and the number of objects that a slab of the
 * given order would contain.
 */

struct kmem_cache_order_objects {
	
unsigned long x;
};

/*
 * Slab cache management.
 */

struct kmem_cache {
	
struct kmem_cache_cpu __percpu *cpu_slab;
	/* Used for retriving partial slabs etc */
	
unsigned long flags;
	
unsigned long min_partial;
	
int size;		/* The size of an object including meta data */
	
int object_size;	/* The size of an object without meta data */
	
int offset;		/* Free pointer offset. */
#ifdef CONFIG_SLUB_CPU_PARTIAL
	
int cpu_partial;	/* Number of per cpu partial objects to keep around */
#endif
	
struct kmem_cache_order_objects oo;

	/* Allocation and freeing of slabs */
	
struct kmem_cache_order_objects max;
	
struct kmem_cache_order_objects min;
	
gfp_t allocflags;	/* gfp flags to use on each alloc */
	
int refcount;		/* Refcount for slab cache destroy */
	
void (*ctor)(void *);
	
int inuse;		/* Offset to metadata */
	
int align;		/* Alignment */
	
int reserved;		/* Reserved bytes at the end of slabs */
	
int red_left_pad;	/* Left redzone padding size */
	
const char *name;	/* Name (only for display!) */
	
struct list_head list;	/* List of slab caches */
#ifdef CONFIG_SYSFS
	
struct kobject kobj;	/* For sysfs */
	
struct work_struct kobj_remove_work;
#endif
#ifdef CONFIG_MEMCG
	
struct memcg_cache_params memcg_params;
	
int max_attr_size; /* for propagation, maximum size of a stored attr */
#ifdef CONFIG_SYSFS
	
struct kset *memcg_kset;
#endif
#endif

#ifdef CONFIG_SLAB_FREELIST_HARDENED
	
unsigned long random;
#endif

#ifdef CONFIG_NUMA
	/*
         * Defragmentation by allocating from a remote node.
         */
	
int remote_node_defrag_ratio;
#endif

#ifdef CONFIG_SLAB_FREELIST_RANDOM
	
unsigned int *random_seq;
#endif

#ifdef CONFIG_KASAN
	
struct kasan_cache kasan_info;
#endif

	
struct kmem_cache_node *node[MAX_NUMNODES];
};

#ifdef CONFIG_SLUB_CPU_PARTIAL

#define slub_cpu_partial(s)		((s)->cpu_partial)

#define slub_set_cpu_partial(s, n)		\
({                                              \
        slub_cpu_partial(s) = (n);              \
})
#else

#define slub_cpu_partial(s)		(0)

#define slub_set_cpu_partial(s, n)
#endif // CONFIG_SLUB_CPU_PARTIAL

#ifdef CONFIG_SYSFS

#define SLAB_SUPPORTS_SYSFS
void sysfs_slab_release(struct kmem_cache *);
#else

static inline void sysfs_slab_release(struct kmem_cache *s) { }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Lameter1090.91%150.00%
Tejun Heo19.09%150.00%
Total11100.00%2100.00%

#endif void object_err(struct kmem_cache *s, struct page *page, u8 *object, char *reason); void *fixup_red_left(struct kmem_cache *s, void *p);
static inline void *nearest_obj(struct kmem_cache *cache, struct page *page, void *x) { void *object = x - (x - page_address(page)) % cache->size; void *last_object = page_address(page) + (page->objects - 1) * cache->size; void *result = (unlikely(object > last_object)) ? last_object : object; result = fixup_red_left(cache, result); return result; }

Contributors

PersonTokensPropCommitsCommitProp
Alexander Potapenko91100.00%2100.00%
Total91100.00%2100.00%

#endif /* _LINUX_SLUB_DEF_H */

Overall Contributors

PersonTokensPropCommitsCommitProp
Christoph Lameter27647.26%2453.33%
Alexander Potapenko11419.52%36.67%
Wei Yang10818.49%36.67%
Andrey Ryabinin223.77%12.22%
Glauber de Oliveira Costa122.05%24.44%
Thomas Garnier101.71%12.22%
Vladimir Davydov101.71%12.22%
Kees Cook91.54%12.22%
Alex Shi91.54%24.44%
Tejun Heo61.03%24.44%
Lai Jiangshan40.68%12.22%
Namhyung Kim10.17%12.22%
Johannes Weiner10.17%12.22%
Greg Kroah-Hartman10.17%12.22%
Zhi Yong Wu10.17%12.22%
Total584100.00%45100.00%
Directory: include/linux
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.