cregit-Linux how code gets into the kernel

Release 4.8 mm/slab_common.c

Directory: mm
/*
 * Slab allocator functions that are independent of the allocator strategy
 *
 * (C) 2012 Christoph Lameter <cl@linux.com>
 */
#include <linux/slab.h>

#include <linux/mm.h>
#include <linux/poison.h>
#include <linux/interrupt.h>
#include <linux/memory.h>
#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/cpu.h>
#include <linux/uaccess.h>
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/page.h>
#include <linux/memcontrol.h>


#define CREATE_TRACE_POINTS
#include <trace/events/kmem.h>

#include "slab.h"


enum slab_state slab_state;

LIST_HEAD(slab_caches);

DEFINE_MUTEX(slab_mutex);

struct kmem_cache *kmem_cache;

/*
 * Set of flags that will prevent slab merging
 */

#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
                SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
                SLAB_FAILSLAB | SLAB_KASAN)


#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
                         SLAB_NOTRACK | SLAB_ACCOUNT)

/*
 * Merge control. If this is set then no merging of slab caches will occur.
 * (Could be removed. This was introduced to pacify the merge skeptics.)
 */

static int slab_nomerge;


static int __init setup_slab_nomerge(char *str) { slab_nomerge = 1; return 1; }

Contributors

PersonTokensPropCommitsCommitProp
joonsoo kimjoonsoo kim18100.00%1100.00%
Total18100.00%1100.00%

#ifdef CONFIG_SLUB __setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0); #endif __setup("slab_nomerge", setup_slab_nomerge); /* * Determine the size of a slab object */
unsigned int kmem_cache_size(struct kmem_cache *s) { return s->object_size; }

Contributors

PersonTokensPropCommitsCommitProp
joonsoo kimjoonsoo kim16100.00%1100.00%
Total16100.00%1100.00%

EXPORT_SYMBOL(kmem_cache_size); #ifdef CONFIG_DEBUG_VM
static int kmem_cache_sanity_check(const char *name, size_t size) { struct kmem_cache *s = NULL; if (!name || in_interrupt() || size < sizeof(void *) || size > KMALLOC_MAX_SIZE) { pr_err("kmem_cache_create(%s) integrity check failed\n", name); return -EINVAL; } list_for_each_entry(s, &slab_caches, list) { char tmp; int res; /* * This happens when the module gets unloaded and doesn't * destroy its slab cache and no-one else reuses the vmalloc * area of the module. Print a warning. */ res = probe_kernel_address(s->name, tmp); if (res) { pr_err("Slab cache with size %d has lost its name\n", s->object_size); continue; } } WARN_ON(strchr(name, ' ')); /* It confuses parsers */ return 0; }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter9887.50%266.67%
shuah khanshuah khan1412.50%133.33%
Total112100.00%3100.00%

#else
static inline int kmem_cache_sanity_check(const char *name, size_t size) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
shuah khanshuah khan18100.00%1100.00%
Total18100.00%1100.00%

#endif
void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p) { size_t i; for (i = 0; i < nr; i++) { if (s) kmem_cache_free(s, p[i]); else kfree(p[i]); } }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter4474.58%150.00%
jesper dangaard brouerjesper dangaard brouer1525.42%150.00%
Total59100.00%2100.00%


int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr, void **p) { size_t i; for (i = 0; i < nr; i++) { void *x = p[i] = kmem_cache_alloc(s, flags); if (!x) { __kmem_cache_free_bulk(s, i, p); return 0; } } return i; }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter7496.10%150.00%
jesper dangaard brouerjesper dangaard brouer33.90%150.00%
Total77100.00%2100.00%

#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
void slab_init_memcg_params(struct kmem_cache *s) { s->memcg_params.is_root_cache = true; INIT_LIST_HEAD(&s->memcg_params.list); RCU_INIT_POINTER(s->memcg_params.memcg_caches, NULL); }

Contributors

PersonTokensPropCommitsCommitProp
vladimir davydovvladimir davydov39100.00%2100.00%
Total39100.00%2100.00%


static int init_memcg_params(struct kmem_cache *s, struct mem_cgroup *memcg, struct kmem_cache *root_cache) { struct memcg_cache_array *arr; if (memcg) { s->memcg_params.is_root_cache = false; s->memcg_params.memcg = memcg; s->memcg_params.root_cache = root_cache; return 0; } slab_init_memcg_params(s); if (!memcg_nr_cache_ids) return 0; arr = kzalloc(sizeof(struct memcg_cache_array) + memcg_nr_cache_ids * sizeof(void *), GFP_KERNEL); if (!arr) return -ENOMEM; RCU_INIT_POINTER(s->memcg_params.memcg_caches, arr); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
vladimir davydovvladimir davydov116100.00%3100.00%
Total116100.00%3100.00%


static void destroy_memcg_params(struct kmem_cache *s) { if (is_root_cache(s)) kfree(rcu_access_pointer(s->memcg_params.memcg_caches)); }

Contributors

PersonTokensPropCommitsCommitProp
vladimir davydovvladimir davydov30100.00%2100.00%
Total30100.00%2100.00%


static int update_memcg_params(struct kmem_cache *s, int new_array_size) { struct memcg_cache_array *old, *new; if (!is_root_cache(s)) return 0; new = kzalloc(sizeof(struct memcg_cache_array) + new_array_size * sizeof(void *), GFP_KERNEL); if (!new) return -ENOMEM; old = rcu_dereference_protected(s->memcg_params.memcg_caches, lockdep_is_held(&slab_mutex)); if (old) memcpy(new->entries, old->entries, memcg_nr_cache_ids * sizeof(void *)); rcu_assign_pointer(s->memcg_params.memcg_caches, new); if (old) kfree_rcu(old, rcu); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
vladimir davydovvladimir davydov128100.00%3100.00%
Total128100.00%3100.00%


int memcg_update_all_caches(int num_memcgs) { struct kmem_cache *s; int ret = 0; mutex_lock(&slab_mutex); list_for_each_entry(s, &slab_caches, list) { ret = update_memcg_params(s, num_memcgs); /* * Instead of freeing the memory, we'll just leave the caches * up to this point in an updated state. */ if (ret) break; } mutex_unlock(&slab_mutex); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
glauber costaglauber costa5594.83%125.00%
vladimir davydovvladimir davydov35.17%375.00%
Total58100.00%4100.00%

#else
static inline int init_memcg_params(struct kmem_cache *s, struct mem_cgroup *memcg, struct kmem_cache *root_cache) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
vladimir davydovvladimir davydov25100.00%2100.00%
Total25100.00%2100.00%


static inline void destroy_memcg_params(struct kmem_cache *s) { }

Contributors

PersonTokensPropCommitsCommitProp
vladimir davydovvladimir davydov11100.00%2100.00%
Total11100.00%2100.00%

#endif /* CONFIG_MEMCG && !CONFIG_SLOB */ /* * Find a mergeable slab cache */
int slab_unmergeable(struct kmem_cache *s) { if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE)) return 1; if (!is_root_cache(s)) return 1; if (s->ctor) return 1; /* * We may have set a slab to be unmergeable during bootstrap. */ if (s->refcount < 0) return 1; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
joonsoo kimjoonsoo kim60100.00%1100.00%
Total60100.00%1100.00%


struct kmem_cache *find_mergeable(size_t size, size_t align, unsigned long flags, const char *name, void (*ctor)(void *)) { struct kmem_cache *s; if (slab_nomerge || (flags & SLAB_NEVER_MERGE)) return NULL; if (ctor) return NULL; size = ALIGN(size, sizeof(void *)); align = calculate_alignment(flags, align, size); size = ALIGN(size, align); flags = kmem_cache_flags(size, flags, name, NULL); list_for_each_entry_reverse(s, &slab_caches, list) { if (slab_unmergeable(s)) continue; if (size > s->size) continue; if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME)) continue; /* * Check if alignment is compatible. * Courtesy of Adrian Drzewiecki */ if ((s->size & ~(align - 1)) != s->size) continue; if (s->size - size >= sizeof(void *)) continue; if (IS_ENABLED(CONFIG_SLAB) && align && (align > s->align || s->align % align)) continue; return s; } return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
joonsoo kimjoonsoo kim213100.00%3100.00%
Total213100.00%3100.00%

/* * Figure out what the alignment of the objects will be given a set of * flags, a user specified alignment and the size of the objects. */
unsigned long calculate_alignment(unsigned long flags, unsigned long align, unsigned long size) { /* * If the user wants hardware cache aligned objects then follow that * suggestion if the object is sufficiently large. * * The hardware cache alignment cannot override the specified * alignment though. If that is greater then use it. */ if (flags & SLAB_HWCACHE_ALIGN) { unsigned long ralign = cache_line_size(); while (size <= ralign / 2) ralign /= 2; align = max(align, ralign); } if (align < ARCH_SLAB_MINALIGN) align = ARCH_SLAB_MINALIGN; return ALIGN(align, sizeof(void *)); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter77100.00%1100.00%
Total77100.00%1100.00%


static struct kmem_cache *create_cache(const char *name, size_t object_size, size_t size, size_t align, unsigned long flags, void (*ctor)(void *), struct mem_cgroup *memcg, struct kmem_cache *root_cache) { struct kmem_cache *s; int err; err = -ENOMEM; s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); if (!s) goto out; s->name = name; s->object_size = object_size; s->size = size; s->align = align; s->ctor = ctor; err = init_memcg_params(s, memcg, root_cache); if (err) goto out_free_cache; err = __kmem_cache_create(s, flags); if (err) goto out_free_cache; s->refcount = 1; list_add(&s->list, &slab_caches); out: if (err) return ERR_PTR(err); return s; out_free_cache: destroy_memcg_params(s); kmem_cache_free(kmem_cache, s); goto out; }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter8746.03%741.18%
vladimir davydovvladimir davydov5830.69%529.41%
shuah khanshuah khan3417.99%15.88%
glauber costaglauber costa42.12%15.88%
vaishali thakkarvaishali thakkar31.59%15.88%
dave jonesdave jones21.06%15.88%
andrzej hajdaandrzej hajda10.53%15.88%
Total189100.00%17100.00%

/* * kmem_cache_create - Create a cache. * @name: A string which is used in /proc/slabinfo to identify this cache. * @size: The size of objects to be created in this cache. * @align: The required alignment for the objects. * @flags: SLAB flags * @ctor: A constructor for the objects. * * Returns a ptr to the cache on success, NULL on failure. * Cannot be called within a interrupt, but can be interrupted. * The @ctor is run when new pages are allocated by the cache. * * The flags are * * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) * to catch references to uninitialised memory. * * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check * for buffer overruns. * * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware * cacheline. This can be beneficial if you're counting cycles as closely * as davem. */
struct kmem_cache * kmem_cache_create(const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)) { struct kmem_cache *s = NULL; const char *cache_name; int err; get_online_cpus(); get_online_mems(); memcg_get_cache_ids(); mutex_lock(&slab_mutex); err = kmem_cache_sanity_check(name, size); if (err) { goto out_unlock; } /* * Some allocators will constraint the set of valid flags to a subset * of all flags. We expect them to define CACHE_CREATE_MASK in this * case, and we'll just provide them with a sanitized version of the * passed flags. */ flags &= CACHE_CREATE_MASK; s = __kmem_cache_alias(name, size, align, flags, ctor); if (s) goto out_unlock; cache_name = kstrdup_const(name, GFP_KERNEL); if (!cache_name) { err = -ENOMEM; goto out_unlock; } s = create_cache(cache_name, size, size, calculate_alignment(flags, align, size), flags, ctor, NULL, NULL); if (IS_ERR(s)) { err = PTR_ERR(s); kfree_const(cache_name); } out_unlock: mutex_unlock(&slab_mutex); memcg_put_cache_ids(); put_online_mems(); put_online_cpus(); if (err) { if (flags & SLAB_PANIC) panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n", name, err); else { pr_warn("kmem_cache_create(%s) failed with error %d\n", name, err); dump_stack(); } return NULL; } return s; }

Contributors

PersonTokensPropCommitsCommitProp
vladimir davydovvladimir davydov19079.50%440.00%
glauber costaglauber costa4016.74%220.00%
andrzej hajdaandrzej hajda31.26%110.00%
joe perchesjoe perches20.84%110.00%
andrew mortonandrew morton20.84%110.00%
alexandru moisealexandru moise20.84%110.00%
Total239100.00%10100.00%

EXPORT_SYMBOL(kmem_cache_create);
static int shutdown_cache(struct kmem_cache *s, struct list_head *release, bool *need_rcu_barrier) { if (__kmem_cache_shutdown(s) != 0) return -EBUSY; if (s->flags & SLAB_DESTROY_BY_RCU) *need_rcu_barrier = true; list_move(&s->list, release); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
vladimir davydovvladimir davydov59100.00%2100.00%
Total59100.00%2100.00%


static void release_caches(struct list_head *release, bool need_rcu_barrier) { struct kmem_cache *s, *s2; if (need_rcu_barrier) rcu_barrier(); list_for_each_entry_safe(s, s2, release, list) { #ifdef SLAB_SUPPORTS_SYSFS sysfs_slab_remove(s); #else slab_kmem_cache_release(s); #endif } }

Contributors

PersonTokensPropCommitsCommitProp
vladimir davydovvladimir davydov58100.00%2100.00%
Total58100.00%2100.00%

#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) /* * memcg_create_kmem_cache - Create a cache for a memory cgroup. * @memcg: The memory cgroup the new cache is for. * @root_cache: The parent of the new cache. * * This function attempts to create a kmem cache that will serve allocation * requests going from @memcg to @root_cache. The new cache inherits properties * from its parent. */
void memcg_create_kmem_cache(struct mem_cgroup *memcg, struct kmem_cache *root_cache) { static char memcg_name_buf[NAME_MAX + 1]; /* protected by slab_mutex */ struct cgroup_subsys_state *css = &memcg->css; struct memcg_cache_array *arr; struct kmem_cache *s = NULL; char *cache_name; int idx; get_online_cpus(); get_online_mems(); mutex_lock(&slab_mutex); /* * The memory cgroup could have been offlined while the cache * creation work was pending. */ if (memcg->kmem_state != KMEM_ONLINE) goto out_unlock; idx = memcg_cache_id(memcg); arr = rcu_dereference_protected(root_cache->memcg_params.memcg_caches, lockdep_is_held(&slab_mutex)); /* * Since per-memcg caches are created asynchronously on first * allocation (see memcg_kmem_get_cache()), several threads can try to * create the same cache, but only one of them may succeed. */ if (arr->entries[idx]) goto out_unlock; cgroup_name(css->cgroup, memcg_name_buf, sizeof(memcg_name_buf)); cache_name = kasprintf(GFP_KERNEL, "%s(%llu:%s)", root_cache->name, css->serial_nr, memcg_name_buf); if (!cache_name) goto out_unlock; s = create_cache(cache_name, root_cache->object_size, root_cache->size, root_cache->align, root_cache->flags, root_cache->ctor, memcg, root_cache); /* * If we could not create a memcg cache, do not complain, because * that's not critical at all as we can always proceed with the root * cache. */ if (IS_ERR(s)) { kfree(cache_name); goto out_unlock; } list_add(&s->memcg_params.list, &root_cache->memcg_params.list); /* * Since readers won't lock (see cache_from_memcg_idx()), we need a * barrier here to ensure nobody will see the kmem_cache partially * initialized. */ smp_wmb(); arr->entries[idx] = s; out_unlock: mutex_unlock(&slab_mutex); put_online_mems(); put_online_cpus(); }

Contributors

PersonTokensPropCommitsCommitProp
vladimir davydovvladimir davydov24397.59%1381.25%
michal hockomichal hocko31.20%16.25%
johannes weinerjohannes weiner31.20%212.50%
Total249100.00%16100.00%


void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg) { int idx; struct memcg_cache_array *arr; struct kmem_cache *s, *c; idx = memcg_cache_id(memcg); get_online_cpus(); get_online_mems(); mutex_lock(&slab_mutex); list_for_each_entry(s, &slab_caches, list) { if (!is_root_cache(s)) continue; arr = rcu_dereference_protected(s->memcg_params.memcg_caches, lockdep_is_held(&slab_mutex)); c = arr->entries[idx]; if (!c) continue; __kmem_cache_shrink(c, true); arr->entries[idx] = NULL; } mutex_unlock(&slab_mutex); put_online_mems(); put_online_cpus(); }

Contributors

PersonTokensPropCommitsCommitProp
vladimir davydovvladimir davydov124100.00%2100.00%
Total124100.00%2100.00%


static int __shutdown_memcg_cache(struct kmem_cache *s, struct list_head *release, bool *need_rcu_barrier) { BUG_ON(is_root_cache(s)); if (shutdown_cache(s, release, need_rcu_barrier)) return -EBUSY; list_del(&s->memcg_params.list); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
vladimir davydovvladimir davydov56100.00%1100.00%
Total56100.00%1100.00%


void memcg_destroy_kmem_caches(struct mem_cgroup *memcg) { LIST_HEAD(release); bool need_rcu_barrier = false; struct kmem_cache *s, *s2; get_online_cpus(); get_online_mems(); mutex_lock(&slab_mutex); list_for_each_entry_safe(s, s2, &slab_caches, list) { if (is_root_cache(s) || s->memcg_params.memcg != memcg) continue; /* * The cgroup is about to be freed and therefore has no charges * left. Hence, all its caches must be empty by now. */ BUG_ON(__shutdown_memcg_cache(s, &release, &need_rcu_barrier)); } mutex_unlock(&slab_mutex); put_online_mems(); put_online_cpus(); release_caches(&release, need_rcu_barrier); }

Contributors

PersonTokensPropCommitsCommitProp
vladimir davydovvladimir davydov103100.00%5100.00%
Total103100.00%5100.00%


static int shutdown_memcg_caches(struct kmem_cache *s, struct list_head *release, bool *need_rcu_barrier) { struct memcg_cache_array *arr; struct kmem_cache *c, *c2; LIST_HEAD(busy); int i; BUG_ON(!is_root_cache(s)); /* * First, shutdown active caches, i.e. caches that belong to online * memory cgroups. */ arr = rcu_dereference_protected(s->memcg_params.memcg_caches, lockdep_is_held(&slab_mutex)); for_each_memcg_cache_index(i) { c = arr->entries[i]; if (!c) continue; if (__shutdown_memcg_cache(c, release, need_rcu_barrier)) /* * The cache still has objects. Move it to a temporary * list so as not to try to destroy it for a second * time while iterating over inactive caches below. */ list_move(&c->memcg_params.list, &busy); else /* * The cache is empty and will be destroyed soon. Clear * the pointer to it in the memcg_caches array so that * it will never be accessed even if the root cache * stays alive. */ arr->entries[i] = NULL; } /* * Second, shutdown all caches left from memory cgroups that are now * offline. */ list_for_each_entry_safe(c, c2, &s->memcg_params.list, memcg_params.list) __shutdown_memcg_cache(c, release, need_rcu_barrier); list_splice(&busy, &s->memcg_params.list); /* * A cache being destroyed must be empty. In particular, this means * that all per memcg caches attached to it must be empty too. */ if (!list_empty(&s->memcg_params.list)) return -EBUSY; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
vladimir davydovvladimir davydov179100.00%1100.00%
Total179100.00%1100.00%

#else
static inline int shutdown_memcg_caches(struct kmem_cache *s, struct list_head *release, bool *need_rcu_barrier) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
vladimir davydovvladimir davydov24100.00%1100.00%
Total24100.00%1100.00%

#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
void slab_kmem_cache_release(struct kmem_cache *s) { __kmem_cache_release(s); destroy_memcg_params(s); kfree_const(s->name); kmem_cache_free(kmem_cache, s); }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter2367.65%120.00%
vladimir davydovvladimir davydov514.71%240.00%
dmitry safonovdmitry safonov514.71%120.00%
andrzej hajdaandrzej hajda12.94%120.00%
Total34100.00%5100.00%


void kmem_cache_destroy(struct kmem_cache *s) { LIST_HEAD(release); bool need_rcu_barrier = false; int err; if (unlikely(!s)) return; get_online_cpus(); get_online_mems(); kasan_cache_destroy(s); mutex_lock(&slab_mutex); s->refcount--; if (s->refcount) goto out_unlock; err = shutdown_memcg_caches(s, &release, &need_rcu_barrier); if (!err) err = shutdown_cache(s, &release, &need_rcu_barrier); if (err) { pr_err("kmem_cache_destroy %s: Slab cache still has objects\n", s->name); dump_stack(); } out_unlock: mutex_unlock(&slab_mutex); put_online_mems(); put_online_cpus(); release_caches(&release, need_rcu_barrier); }

Contributors

PersonTokensPropCommitsCommitProp
vladimir davydovvladimir davydov7555.97%763.64%
christoph lameterchristoph lameter4432.84%19.09%
sergey senozhatskysergey senozhatsky96.72%19.09%
alexander potapenkoalexander potapenko53.73%19.09%
joe perchesjoe perches10.75%19.09%
Total134100.00%11100.00%

EXPORT_SYMBOL(kmem_cache_destroy); /** * kmem_cache_shrink - Shrink a cache. * @cachep: The cache to shrink. * * Releases as many slabs as possible for a cache. * To help debugging, a zero exit status indicates all slabs were released. */
int kmem_cache_shrink(struct kmem_cache *cachep) { int ret; get_online_cpus(); get_online_mems(); kasan_cache_shrink(cachep); ret = __kmem_cache_shrink(cachep, false); put_online_mems(); put_online_cpus(); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
vladimir davydovvladimir davydov3788.10%266.67%
alexander potapenkoalexander potapenko511.90%133.33%
Total42100.00%3100.00%

EXPORT_SYMBOL(kmem_cache_shrink);
bool slab_is_available(void) { return slab_state >= UP; }

Contributors

PersonTokensPropCommitsCommitProp
christoph lameterchristoph lameter1191.67%150.00%
denis kirjanovdenis kirjanov18.33%150.00%
Total12100.00%2100.00%

#ifndef CONFIG_SLOB /* Create a cache during boot when no slab services are available yet */
void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size, unsigned long flags) { int err; s->name = name; s->size = s->object_size = size; s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size); slab_init_memcg_params(s); err = __kmem_cache_create(s, flags); if