Contributors: 23
Author Tokens Token Proportion Commits Commit Proportion
Linus Torvalds (pre-git) 77 20.37% 3 6.82%
Alexander Potapenko 77 20.37% 3 6.82%
Pekka J Enberg 53 14.02% 3 6.82%
Christoph Lameter 34 8.99% 8 18.18%
Andrey Konovalov 29 7.67% 1 2.27%
Vlastimil Babka 18 4.76% 2 4.55%
Andrey Ryabinin 15 3.97% 1 2.27%
Roman Gushchin 12 3.17% 1 2.27%
Thomas Garnier 10 2.65% 2 4.55%
Eric Dumazet 7 1.85% 2 4.55%
JoonSoo Kim 7 1.85% 2 4.55%
Andrew Morton 7 1.85% 2 4.55%
David Windsor 6 1.59% 1 2.27%
Alexey Dobriyan 6 1.59% 3 6.82%
Ravikiran G. Thirumalai 4 1.06% 1 2.27%
Manfred Spraul 4 1.06% 1 2.27%
Hagen Paul Pfeifer 3 0.79% 1 2.27%
Glauber de Oliveira Costa 3 0.79% 2 4.55%
Hannes Frederic Sowa 2 0.53% 1 2.27%
Baoquan He 1 0.26% 1 2.27%
Feng Tang 1 0.26% 1 2.27%
Fam Zheng 1 0.26% 1 2.27%
Greg Kroah-Hartman 1 0.26% 1 2.27%
Total 378 44


/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SLAB_DEF_H
#define	_LINUX_SLAB_DEF_H

#include <linux/kfence.h>
#include <linux/reciprocal_div.h>

/*
 * Definitions unique to the original Linux SLAB allocator.
 */

struct kmem_cache {
	struct array_cache __percpu *cpu_cache;

/* 1) Cache tunables. Protected by slab_mutex */
	unsigned int batchcount;
	unsigned int limit;
	unsigned int shared;

	unsigned int size;
	struct reciprocal_value reciprocal_buffer_size;
/* 2) touched by every alloc & free from the backend */

	slab_flags_t flags;		/* constant flags */
	unsigned int num;		/* # of objs per slab */

/* 3) cache_grow/shrink */
	/* order of pgs per slab (2^n) */
	unsigned int gfporder;

	/* force GFP flags, e.g. GFP_DMA */
	gfp_t allocflags;

	size_t colour;			/* cache colouring range */
	unsigned int colour_off;	/* colour offset */
	unsigned int freelist_size;

	/* constructor func */
	void (*ctor)(void *obj);

/* 4) cache creation/removal */
	const char *name;
	struct list_head list;
	int refcount;
	int object_size;
	int align;

/* 5) statistics */
#ifdef CONFIG_DEBUG_SLAB
	unsigned long num_active;
	unsigned long num_allocations;
	unsigned long high_mark;
	unsigned long grown;
	unsigned long reaped;
	unsigned long errors;
	unsigned long max_freeable;
	unsigned long node_allocs;
	unsigned long node_frees;
	unsigned long node_overflow;
	atomic_t allochit;
	atomic_t allocmiss;
	atomic_t freehit;
	atomic_t freemiss;

	/*
	 * If debugging is enabled, then the allocator can add additional
	 * fields and/or padding to every object. 'size' contains the total
	 * object size including these internal fields, while 'obj_offset'
	 * and 'object_size' contain the offset to the user object and its
	 * size.
	 */
	int obj_offset;
#endif /* CONFIG_DEBUG_SLAB */

#ifdef CONFIG_KASAN_GENERIC
	struct kasan_cache kasan_info;
#endif

#ifdef CONFIG_SLAB_FREELIST_RANDOM
	unsigned int *random_seq;
#endif

#ifdef CONFIG_HARDENED_USERCOPY
	unsigned int useroffset;	/* Usercopy region offset */
	unsigned int usersize;		/* Usercopy region size */
#endif

	struct kmem_cache_node *node[MAX_NUMNODES];
};

static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
				void *x)
{
	void *object = x - (x - slab->s_mem) % cache->size;
	void *last_object = slab->s_mem + (cache->num - 1) * cache->size;

	if (unlikely(object > last_object))
		return last_object;
	else
		return object;
}

/*
 * We want to avoid an expensive divide : (offset / cache->size)
 *   Using the fact that size is a constant for a particular cache,
 *   we can replace (offset / cache->size) by
 *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
 */
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
					const struct slab *slab, void *obj)
{
	u32 offset = (obj - slab->s_mem);
	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
}

static inline int objs_per_slab(const struct kmem_cache *cache,
				     const struct slab *slab)
{
	if (is_kfence_address(slab_address(slab)))
		return 1;
	return cache->num;
}

#endif	/* _LINUX_SLAB_DEF_H */