cregit-Linux how code gets into the kernel

Release 4.8 mm/zsmalloc.c

Directory: mm
/*
 * zsmalloc memory allocator
 *
 * Copyright (C) 2011  Nitin Gupta
 * Copyright (C) 2012, 2013 Minchan Kim
 *
 * This code is released using a dual license strategy: BSD/GPL
 * You can choose the license that better fits your requirements.
 *
 * Released under the terms of 3-clause BSD License
 * Released under the terms of GNU General Public License Version 2.0
 */

/*
 * Following is how we use various fields and flags of underlying
 * struct page(s) to form a zspage.
 *
 * Usage of struct page fields:
 *      page->private: points to zspage
 *      page->freelist(index): links together all component pages of a zspage
 *              For the huge page, this is always 0, so we use this field
 *              to store handle.
 *      page->units: first object offset in a subpage of zspage
 *
 * Usage of struct page flags:
 *      PG_private: identifies the first component page
 *      PG_private2: identifies the last component page
 *      PG_owner_priv_1: indentifies the huge component page
 *
 */


#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/highmem.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <asm/tlbflush.h>
#include <asm/pgtable.h>
#include <linux/cpumask.h>
#include <linux/cpu.h>
#include <linux/vmalloc.h>
#include <linux/preempt.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/debugfs.h>
#include <linux/zsmalloc.h>
#include <linux/zpool.h>
#include <linux/mount.h>
#include <linux/migrate.h>
#include <linux/pagemap.h>


#define ZSPAGE_MAGIC	0x58

/*
 * This must be power of 2 and greater than of equal to sizeof(link_free).
 * These two conditions ensure that any 'struct link_free' itself doesn't
 * span more than 1 page which avoids complex case of mapping 2 pages simply
 * to restore link_free pointer values.
 */

#define ZS_ALIGN		8

/*
 * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single)
 * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N.
 */

#define ZS_MAX_ZSPAGE_ORDER 2

#define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)


#define ZS_HANDLE_SIZE (sizeof(unsigned long))

/*
 * Object location (<PFN>, <obj_idx>) is encoded as
 * as single (unsigned long) handle value.
 *
 * Note that object index <obj_idx> starts from 0.
 *
 * This is made more complicated by various memory models and PAE.
 */

#ifndef MAX_PHYSMEM_BITS
#ifdef CONFIG_HIGHMEM64G

#define MAX_PHYSMEM_BITS 36
#else /* !CONFIG_HIGHMEM64G */
/*
 * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just
 * be PAGE_SHIFT
 */

#define MAX_PHYSMEM_BITS BITS_PER_LONG
#endif
#endif

#define _PFN_BITS		(MAX_PHYSMEM_BITS - PAGE_SHIFT)

/*
 * Memory for allocating for handle keeps object position by
 * encoding <page, obj_idx> and the encoded value has a room
 * in least bit(ie, look at obj_to_location).
 * We use the bit to synchronize between object access by
 * user and migration.
 */

#define HANDLE_PIN_BIT	0

/*
 * Head in allocated object should have OBJ_ALLOCATED_TAG
 * to identify the object was allocated or not.
 * It's okay to add the status bit in the least bit because
 * header keeps handle which is 4byte-aligned address so we
 * have room for two bit at least.
 */

#define OBJ_ALLOCATED_TAG 1

#define OBJ_TAG_BITS 1

#define OBJ_INDEX_BITS	(BITS_PER_LONG - _PFN_BITS - OBJ_TAG_BITS)

#define OBJ_INDEX_MASK	((_AC(1, UL) << OBJ_INDEX_BITS) - 1)


#define MAX(a, b) ((a) >= (b) ? (a) : (b))
/* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */

#define ZS_MIN_ALLOC_SIZE \
	MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
/* each chunk includes extra space to keep handle */

#define ZS_MAX_ALLOC_SIZE	PAGE_SIZE

/*
 * On systems with 4K page size, this gives 255 size classes! There is a
 * trader-off here:
 *  - Large number of size classes is potentially wasteful as free page are
 *    spread across these classes
 *  - Small number of size classes causes large internal fragmentation
 *  - Probably its better to use specific size classes (empirically
 *    determined). NOTE: all those class sizes must be set as multiple of
 *    ZS_ALIGN to make sure link_free itself never has to span 2 pages.
 *
 *  ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN
 *  (reason above)
 */

#define ZS_SIZE_CLASS_DELTA	(PAGE_SIZE >> CLASS_BITS)


enum fullness_group {
	
ZS_EMPTY,
	
ZS_ALMOST_EMPTY,
	
ZS_ALMOST_FULL,
	
ZS_FULL,
	
NR_ZS_FULLNESS,
};


enum zs_stat_type {
	
CLASS_EMPTY,
	
CLASS_ALMOST_EMPTY,
	
CLASS_ALMOST_FULL,
	
CLASS_FULL,
	
OBJ_ALLOCATED,
	
OBJ_USED,
	
NR_ZS_STAT_TYPE,
};


struct zs_size_stat {
	
unsigned long objs[NR_ZS_STAT_TYPE];
};

#ifdef CONFIG_ZSMALLOC_STAT

static struct dentry *zs_stat_root;
#endif

#ifdef CONFIG_COMPACTION

static struct vfsmount *zsmalloc_mnt;
#endif

/*
 * number of size_classes
 */

static int zs_size_classes;

/*
 * We assign a page to ZS_ALMOST_EMPTY fullness group when:
 *      n <= N / f, where
 * n = number of allocated objects
 * N = total number of objects zspage can store
 * f = fullness_threshold_frac
 *
 * Similarly, we assign zspage to:
 *      ZS_ALMOST_FULL  when n > N / f
 *      ZS_EMPTY        when n == 0
 *      ZS_FULL         when n == N
 *
 * (see: fix_fullness_group())
 */

static const int fullness_threshold_frac = 4;


struct size_class {
	
spinlock_t lock;
	
struct list_head fullness_list[NR_ZS_FULLNESS];
	/*
         * Size of objects stored in this class. Must be multiple
         * of ZS_ALIGN.
         */
	
int size;
	
int objs_per_zspage;
	/* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
	
int pages_per_zspage;

	
unsigned int index;
	
struct zs_size_stat stats;
};

/* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */

static void SetPageHugeObject(struct page *page) { SetPageOwnerPriv1(page); }

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim16100.00%1100.00%
Total16100.00%1100.00%


static void ClearPageHugeObject(struct page *page) { ClearPageOwnerPriv1(page); }

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim16100.00%1100.00%
Total16100.00%1100.00%


static int PageHugeObject(struct page *page) { return PageOwnerPriv1(page); }

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim17100.00%1100.00%
Total17100.00%1100.00%

/* * Placed within free objects to form a singly linked list. * For every zspage, zspage->freeobj gives head of this list. * * This must be power of 2 and less than or equal to ZS_ALIGN */ struct link_free { union { /* * Free object index; * It's valid for non-allocated object */ unsigned long next; /* * Handle of allocated object. */ unsigned long handle; }; }; struct zs_pool { const char *name; struct size_class **size_class; struct kmem_cache *handle_cachep; struct kmem_cache *zspage_cachep; atomic_long_t pages_allocated; struct zs_pool_stats stats; /* Compact classes */ struct shrinker shrinker; /* * To signify that register_shrinker() was successful * and unregister_shrinker() will not Oops. */ bool shrinker_enabled; #ifdef CONFIG_ZSMALLOC_STAT struct dentry *stat_dentry; #endif #ifdef CONFIG_COMPACTION struct inode *inode; struct work_struct free_work; #endif }; /* * A zspage's class index and fullness group * are encoded in its (first)page->mapping */ #define FULLNESS_BITS 2 #define CLASS_BITS 8 #define ISOLATED_BITS 3 #define MAGIC_VAL_BITS 8 struct zspage { struct { unsigned int fullness:FULLNESS_BITS; unsigned int class:CLASS_BITS; unsigned int isolated:ISOLATED_BITS; unsigned int magic:MAGIC_VAL_BITS; }; unsigned int inuse; unsigned int freeobj; struct page *first_page; struct list_head list; /* fullness list */ #ifdef CONFIG_COMPACTION rwlock_t lock; #endif }; struct mapping_area { #ifdef CONFIG_PGTABLE_MAPPING struct vm_struct *vm; /* vm area for mapping object that span pages */ #else char *vm_buf; /* copy buffer for objects that span pages */ #endif char *vm_addr; /* address of kmap_atomic()'ed pages */ enum zs_mapmode vm_mm; /* mapping mode */ }; #ifdef CONFIG_COMPACTION static int zs_register_migration(struct zs_pool *pool); static void zs_unregister_migration(struct zs_pool *pool); static void migrate_lock_init(struct zspage *zspage); static void migrate_read_lock(struct zspage *zspage); static void migrate_read_unlock(struct zspage *zspage); static void kick_deferred_free(struct zs_pool *pool); static void init_deferred_free(struct zs_pool *pool); static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage); #else
static int zsmalloc_mount(void) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim11100.00%1100.00%
Total11100.00%1100.00%


static void zsmalloc_unmount(void) {}

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim7100.00%1100.00%
Total7100.00%1100.00%


static int zs_register_migration(struct zs_pool *pool) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim14100.00%1100.00%
Total14100.00%1100.00%


static void zs_unregister_migration(struct zs_pool *pool) {}

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim10100.00%1100.00%
Total10100.00%1100.00%


static void migrate_lock_init(struct zspage *zspage) {}

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim10100.00%1100.00%
Total10100.00%1100.00%


static void migrate_read_lock(struct zspage *zspage) {}

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim10100.00%1100.00%
Total10100.00%1100.00%


static void migrate_read_unlock(struct zspage *zspage) {}

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim10100.00%1100.00%
Total10100.00%1100.00%


static void kick_deferred_free(struct zs_pool *pool) {}

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim10100.00%1100.00%
Total10100.00%1100.00%


static void init_deferred_free(struct zs_pool *pool) {}

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim10100.00%1100.00%
Total10100.00%1100.00%


static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {}

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim15100.00%1100.00%
Total15100.00%1100.00%

#endif
static int create_cache(struct zs_pool *pool) { pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE, 0, 0, NULL); if (!pool->handle_cachep) return 1; pool->zspage_cachep = kmem_cache_create("zspage", sizeof(struct zspage), 0, 0, NULL); if (!pool->zspage_cachep) { kmem_cache_destroy(pool->handle_cachep); pool->handle_cachep = NULL; return 1; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim87100.00%3100.00%
Total87100.00%3100.00%


static void destroy_cache(struct zs_pool *pool) { kmem_cache_destroy(pool->handle_cachep); kmem_cache_destroy(pool->zspage_cachep); }

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim25100.00%2100.00%
Total25100.00%2100.00%


static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp) { return (unsigned long)kmem_cache_alloc(pool->handle_cachep, gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE)); }

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim3288.89%375.00%
sergey senozhatskysergey senozhatsky411.11%125.00%
Total36100.00%4100.00%


static void cache_free_handle(struct zs_pool *pool, unsigned long handle) { kmem_cache_free(pool->handle_cachep, (void *)handle); }

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim28100.00%2100.00%
Total28100.00%2100.00%


static struct zspage *cache_alloc_zspage(struct zs_pool *pool, gfp_t flags) { return kmem_cache_alloc(pool->zspage_cachep, flags & ~(__GFP_HIGHMEM|__GFP_MOVABLE)); }

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim33100.00%2100.00%
Total33100.00%2100.00%

;
static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage) { kmem_cache_free(pool->zspage_cachep, zspage); }

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim25100.00%1100.00%
Total25100.00%1100.00%


static void record_obj(unsigned long handle, unsigned long obj) { /* * lsb of @obj represents handle lock while other bits * represent object value the handle is pointing so * updating shouldn't do store tearing. */ WRITE_ONCE(*(unsigned long *)handle, obj); }

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim2382.14%150.00%
junil leejunil lee517.86%150.00%
Total28100.00%2100.00%

/* zpool driver */ #ifdef CONFIG_ZPOOL
static void *zs_zpool_create(const char *name, gfp_t gfp, const struct zpool_ops *zpool_ops, struct zpool *zpool) { /* * Ignore global gfp flags: zs_malloc() may be invoked from * different contexts and its caller must provide a valid * gfp mask. */ return zs_create_pool(name); }

Contributors

PersonTokensPropCommitsCommitProp
dan streetmandan streetman2575.76%233.33%
ganesh mahendranganesh mahendran515.15%116.67%
sergey senozhatskysergey senozhatsky26.06%233.33%
krzysztof kozlowskikrzysztof kozlowski13.03%116.67%
Total33100.00%6100.00%


static void zs_zpool_destroy(void *pool) { zs_destroy_pool(pool); }

Contributors

PersonTokensPropCommitsCommitProp
dan streetmandan streetman15100.00%1100.00%
Total15100.00%1100.00%


static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp, unsigned long *handle) { *handle = zs_malloc(pool, size, gfp); return *handle ? 0 : -1; }

Contributors

PersonTokensPropCommitsCommitProp
dan streetmandan streetman4095.24%150.00%
sergey senozhatskysergey senozhatsky24.76%150.00%
Total42100.00%2100.00%


static void zs_zpool_free(void *pool, unsigned long handle) { zs_free(pool, handle); }

Contributors

PersonTokensPropCommitsCommitProp
dan streetmandan streetman21100.00%1100.00%
Total21100.00%1100.00%


static int zs_zpool_shrink(void *pool, unsigned int pages, unsigned int *reclaimed) { return -EINVAL; }

Contributors

PersonTokensPropCommitsCommitProp
dan streetmandan streetman23100.00%1100.00%
Total23100.00%1100.00%


static void *zs_zpool_map(void *pool, unsigned long handle, enum zpool_mapmode mm) { enum zs_mapmode zs_mm; switch (mm) { case ZPOOL_MM_RO: zs_mm = ZS_MM_RO; break; case ZPOOL_MM_WO: zs_mm = ZS_MM_WO; break; case ZPOOL_MM_RW: /* fallthru */ default: zs_mm = ZS_MM_RW; break; } return zs_map_object(pool, handle, zs_mm); }

Contributors

PersonTokensPropCommitsCommitProp
dan streetmandan streetman65100.00%1100.00%
Total65100.00%1100.00%


static void zs_zpool_unmap(void *pool, unsigned long handle) { zs_unmap_object(pool, handle); }

Contributors

PersonTokensPropCommitsCommitProp
dan streetmandan streetman21100.00%1100.00%
Total21100.00%1100.00%


static u64 zs_zpool_total_size(void *pool) { return zs_get_total_pages(pool) << PAGE_SHIFT; }

Contributors

PersonTokensPropCommitsCommitProp
dan streetmandan streetman1583.33%150.00%
minchan kimminchan kim316.67%150.00%
Total18100.00%2100.00%

static struct zpool_driver zs_zpool_driver = { .type = "zsmalloc", .owner = THIS_MODULE, .create = zs_zpool_create, .destroy = zs_zpool_destroy, .malloc = zs_zpool_malloc, .free = zs_zpool_free, .shrink = zs_zpool_shrink, .map = zs_zpool_map, .unmap = zs_zpool_unmap, .total_size = zs_zpool_total_size, }; MODULE_ALIAS("zpool-zsmalloc"); #endif /* CONFIG_ZPOOL */ /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */ static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
static bool is_zspage_isolated(struct zspage *zspage) { return zspage->isolated; }

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim1593.75%150.00%
nitin guptanitin gupta16.25%150.00%
Total16100.00%2100.00%


static int is_first_page(struct page *page) { return PagePrivate(page); }

Contributors

PersonTokensPropCommitsCommitProp
nitin guptanitin gupta1376.47%133.33%
minchan kimminchan kim423.53%266.67%
Total17100.00%3100.00%

/* Protected by class->lock */
static inline int get_zspage_inuse(struct zspage *zspage) { return zspage->inuse; }

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim17100.00%2100.00%
Total17100.00%2100.00%


static inline void set_zspage_inuse(struct zspage *zspage, int val) { zspage->inuse = val; }

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim21100.00%2100.00%
Total21100.00%2100.00%


static inline void mod_zspage_inuse(struct zspage *zspage, int val) { zspage->inuse += val; }

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim21100.00%2100.00%
Total21100.00%2100.00%


static inline struct page *get_first_page(struct zspage *zspage) { struct page *first_page = zspage->first_page; VM_BUG_ON_PAGE(!is_first_page(first_page), first_page); return first_page; }

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim37100.00%3100.00%
Total37100.00%3100.00%


static inline int get_first_obj_offset(struct page *page) { return page->units; }

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim17100.00%2100.00%
Total17100.00%2100.00%


static inline void set_first_obj_offset(struct page *page, int offset) { page->units = offset; }

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim21100.00%2100.00%
Total21100.00%2100.00%


static inline unsigned int get_freeobj(struct zspage *zspage) { return zspage->freeobj; }

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim18100.00%3100.00%
Total18100.00%3100.00%


static inline void set_freeobj(struct zspage *zspage, unsigned int obj) { zspage->freeobj = obj; }

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim22100.00%3100.00%
Total22100.00%3100.00%


static void get_zspage_mapping(struct zspage *zspage, unsigned int *class_idx, enum fullness_group *fullness) { BUG_ON(zspage->magic != ZSPAGE_MAGIC); *fullness = zspage->fullness; *class_idx = zspage->class; }

Contributors

PersonTokensPropCommitsCommitProp
nitin guptanitin gupta2761.36%133.33%
minchan kimminchan kim1738.64%266.67%
Total44100.00%3100.00%


static void set_zspage_mapping(struct zspage *zspage, unsigned int class_idx, enum fullness_group fullness) { zspage->class = class_idx; zspage->fullness = fullness; }

Contributors

PersonTokensPropCommitsCommitProp
nitin guptanitin gupta2374.19%150.00%
minchan kimminchan kim825.81%150.00%
Total31100.00%2100.00%

/* * zsmalloc divides the pool into various size classes where each * class maintains a list of zspages where each zspage is divided * into equal sized chunks. Each allocation falls into one of these * classes depending on its size. This function returns index of the * size class which has chunk size big enough to hold the give size. */
static int get_size_class_index(int size) { int idx = 0; if (likely(size > ZS_MIN_ALLOC_SIZE)) idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE, ZS_SIZE_CLASS_DELTA); return min(zs_size_classes - 1, idx); }

Contributors

PersonTokensPropCommitsCommitProp
nitin guptanitin gupta3784.09%150.00%
minchan kimminchan kim715.91%150.00%
Total44100.00%2100.00%


static inline void zs_stat_inc(struct size_class *class, enum zs_stat_type type, unsigned long cnt) { class->stats.objs[type] += cnt; }

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim2270.97%150.00%
nitin guptanitin gupta929.03%150.00%
Total31100.00%2100.00%


static inline void zs_stat_dec(struct size_class *class, enum zs_stat_type type, unsigned long cnt) { class->stats.objs[type] -= cnt; }

Contributors

PersonTokensPropCommitsCommitProp
nitin guptanitin gupta1754.84%150.00%
minchan kimminchan kim1445.16%150.00%
Total31100.00%2100.00%


static inline unsigned long zs_stat_get(struct size_class *class, enum zs_stat_type type) { return class->stats.objs[type]; }

Contributors

PersonTokensPropCommitsCommitProp
nitin guptanitin gupta1451.85%150.00%
minchan kimminchan kim1348.15%150.00%
Total27100.00%2100.00%

#ifdef CONFIG_ZSMALLOC_STAT
static void __init zs_stat_init(void) { if (!debugfs_initialized()) { pr_warn("debugfs not available, stat dir not created\n"); return; } zs_stat_root = debugfs_create_dir("zsmalloc", NULL); if (!zs_stat_root) pr_warn("debugfs 'zsmalloc' stat dir creation failed\n"); }

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim1945.24%133.33%
dan streetmandan streetman1433.33%133.33%
nitin guptanitin gupta921.43%133.33%
Total42100.00%3100.00%


static void __exit zs_stat_exit(void) { debugfs_remove_recursive(zs_stat_root); }

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim1071.43%150.00%
nitin guptanitin gupta428.57%150.00%
Total14100.00%2100.00%

static unsigned long zs_can_compact(struct size_class *class);
static int zs_stats_size_show(struct seq_file *s, void *v) { int i; struct zs_pool *pool = s->private; struct size_class *class; int objs_per_zspage; unsigned long class_almost_full, class_almost_empty; unsigned long obj_allocated, obj_used, pages_used, freeable; unsigned long total_class_almost_full = 0, total_class_almost_empty = 0; unsigned long total_objs = 0, total_used_objs = 0, total_pages = 0; unsigned long total_freeable = 0; seq_printf(s, " %5s %5s %11s %12s %13s %10s %10s %16s %8s\n", "class", "size", "almost_full", "almost_empty", "obj_allocated", "obj_used", "pages_used", "pages_per_zspage", "freeable"); for (i = 0; i < zs_size_classes; i++) { class = pool->size_class[i]; if (class->index != i) continue; spin_lock(&class->lock); class_almost_full = zs_stat_get(class, CLASS_ALMOST_FULL); class_almost_empty = zs_stat_get(class, CLASS_ALMOST_EMPTY); obj_allocated = zs_stat_get(class, OBJ_ALLOCATED); obj_used = zs_stat_get(class, OBJ_USED); freeable = zs_can_compact(class); spin_unlock(&class->lock); objs_per_zspage = class->objs_per_zspage; pages_used = obj_allocated / objs_per_zspage * class->pages_per_zspage; seq_printf(s, " %5u %5u %11lu %12lu %13lu" " %10lu %10lu %16d %8lu\n", i, class->size, class_almost_full, class_almost_empty, obj_allocated, obj_used, pages_used, class->pages_per_zspage, freeable); total_class_almost_full += class_almost_full; total_class_almost_empty += class_almost_empty; total_objs += obj_allocated; total_used_objs += obj_used; total_pages += pages_used; total_freeable += freeable; } seq_puts(s, "\n"); seq_printf(s, " %5s %5s %11lu %12lu %13lu %10lu %10lu %16s %8lu\n", "Total", "", total_class_almost_full, total_class_almost_empty, total_objs, total_used_objs, total_pages, "", total_freeable); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim22172.94%125.00%
nitin guptanitin gupta5016.50%125.00%
sergey senozhatskysergey senozhatsky3110.23%125.00%
ganesh mahendranganesh mahendran10.33%125.00%
Total303100.00%4100.00%


static int zs_stats_size_open(struct inode *inode, struct file *file) { return single_open(file, zs_stats_size_show, inode->i_private); }

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim1760.71%150.00%
nitin guptanitin gupta1139.29%150.00%
Total28100.00%2100.00%

static const struct file_operations zs_stat_size_ops = { .open = zs_stats_size_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, };
static void zs_pool_stat_create(struct zs_pool *pool, const char *name) { struct dentry *entry; if (!zs_stat_root) { pr_warn("no root stat dir, not creating <%s> stat dir\n", name); return; } entry = debugfs_create_dir(name, zs_stat_root); if (!entry) { pr_warn("debugfs dir <%s> creation failed\n", name); return; } pool->stat_dentry = entry; entry = debugfs_create_file("classes", S_IFREG | S_IRUGO, pool->stat_dentry, pool, &zs_stat_size_ops); if (!entry) { pr_warn("%s: debugfs file entry <%s> creation failed\n", name, "classes"); debugfs_remove_recursive(pool->stat_dentry); pool->stat_dentry = NULL; } }

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim5446.96%233.33%
nitin guptanitin gupta3429.57%116.67%
dan streetmandan streetman2521.74%233.33%
sunghan suhsunghan suh21.74%116.67%
Total115100.00%6100.00%


static void zs_pool_stat_destroy(struct zs_pool *pool) { debugfs_remove_recursive(pool->stat_dentry); }

Contributors

PersonTokensPropCommitsCommitProp
nitin guptanitin gupta1161.11%150.00%
minchan kimminchan kim738.89%150.00%
Total18100.00%2100.00%

#else /* CONFIG_ZSMALLOC_STAT */
static void __init zs_stat_init(void) { }

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim675.00%266.67%
dan streetmandan streetman225.00%133.33%
Total8100.00%3100.00%


static void __exit zs_stat_exit(void) { }

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim8100.00%2100.00%
Total8100.00%2100.00%


static inline void zs_pool_stat_create(struct zs_pool *pool, const char *name) { }

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim1487.50%375.00%
dan streetmandan streetman212.50%125.00%
Total16100.00%4100.00%


static inline void zs_pool_stat_destroy(struct zs_pool *pool) { }

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim981.82%150.00%
nitin guptanitin gupta218.18%150.00%
Total11100.00%2100.00%

#endif /* * For each size class, zspages are divided into different groups * depending on how "full" they are. This was done so that we could * easily find empty or nearly empty zspages when we try to shrink * the pool (not yet implemented). This function returns fullness * status of the given page. */
static enum fullness_group get_fullness_group(struct size_class *class, struct zspage *zspage) { int inuse, objs_per_zspage; enum fullness_group fg; inuse = get_zspage_inuse(zspage); objs_per_zspage = class->objs_per_zspage; if (inuse == 0) fg = ZS_EMPTY; else if (inuse == objs_per_zspage) fg = ZS_FULL; else if (inuse <= 3 * objs_per_zspage / fullness_threshold_frac) fg = ZS_ALMOST_EMPTY; else fg = ZS_ALMOST_FULL; return fg; }

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim7286.75%480.00%
nitin guptanitin gupta1113.25%120.00%
Total83100.00%5100.00%

/* * Each size class maintains various freelists and zspages are assigned * to one of these freelists based on the number of live objects they * have. This functions inserts the given zspage into the freelist * identified by <class, fullness_group>. */
static void insert_zspage(struct size_class *class, struct zspage *zspage, enum fullness_group fullness) { struct zspage *head; zs_stat_inc(class, fullness, 1); head = list_first_entry_or_null(&class->fullness_list[fullness], struct zspage, list); /* * We want to see more ZS_FULL pages and less almost empty/full. * Put pages with higher ->inuse first. */ if (head) { if (get_zspage_inuse(zspage) < get_zspage_inuse(head)) { list_add(&zspage->list, &head->list); return; } } list_add(&zspage->list, &class->fullness_list[fullness]); }

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim8481.55%457.14%
nitin guptanitin gupta1716.50%228.57%
sergey senozhatskysergey senozhatsky21.94%114.29%
Total103100.00%7100.00%

/* * This function removes the given zspage from the freelist identified * by <class, fullness_group>. */
static void remove_zspage(struct size_class *class, struct zspage *zspage, enum fullness_group fullness) { VM_BUG_ON(list_empty(&class->fullness_list[fullness])); VM_BUG_ON(is_zspage_isolated(zspage)); list_del_init(&zspage->list); zs_stat_dec(class, fullness, 1); }

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim4067.80%480.00%
nitin guptanitin gupta1932.20%120.00%
Total59100.00%5100.00%

/* * Each size class maintains zspages in different fullness groups depending * on the number of live objects they contain. When allocating or freeing * objects, the fullness status of the page can change, say, from ALMOST_FULL * to ALMOST_EMPTY when freeing an object. This function checks if such * a status change has occurred for the given page and accordingly moves the * page from the freelist of the old fullness group to that of the new * fullness group. */
static enum fullness_group fix_fullness_group(struct size_class *class, struct zspage *zspage) { int class_idx; enum fullness_group currfg, newfg; get_zspage_mapping(zspage, &class_idx, &currfg); newfg = get_fullness_group(class, zspage); if (newfg == currfg) goto out; if (!is_zspage_isolated(zspage)) { remove_zspage(class, zspage, currfg); insert_zspage(class, zspage, newfg); } set_zspage_mapping(zspage, class_idx, newfg); out: return newfg; }

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim6567.01%571.43%
nitin guptanitin gupta3131.96%114.29%
seth jenningsseth jennings11.03%114.29%
Total97100.00%7100.00%

/* * We have to decide on how many pages to link together * to form a zspage for each size class. This is important * to reduce wastage due to unusable space left at end of * each zspage which is given as: * wastage = Zp % class_size * usage = Zp - wastage * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ... * * For example, for size class of 3/8 * PAGE_SIZE, we should * link together 3 PAGE_SIZE sized pages to form a zspage * since then we can perfectly fit in 8 such objects. */
static int get_pages_per_zspage(int class_size) { int i, max_usedpc = 0; /* zspage order which gives maximum used size per KB */ int max_usedpc_order = 1; for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) { int zspage_size; int waste, usedpc; zspage_size = i * PAGE_SIZE; waste = zspage_size % class_size; usedpc = (zspage_size - waste) * 100 / zspage_size; if (usedpc > max_usedpc) { max_usedpc = usedpc; max_usedpc_order = i; } } return max_usedpc_order; }

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim6371.59%150.00%
nitin guptanitin gupta2528.41%150.00%
Total88100.00%2100.00%


static struct zspage *get_zspage(struct page *page) { struct zspage *zspage = (struct zspage *)page->private; BUG_ON(zspage->magic != ZSPAGE_MAGIC); return zspage; }

Contributors

PersonTokensPropCommitsCommitProp
minchan kimminchan kim2461.54%360.00%
nitin guptanitin gupta1025.64%120.00%
kirill a. shutemovkirill a. shutemov512.82%120.00%
Total39100.00%5100.00%


static struct page *get_next_page(struct page *page) { if (unlikely(PageHugeObject(page)