cregit-Linux how code gets into the kernel

Release 4.16 mm/swap_cgroup.c

Directory: mm
// SPDX-License-Identifier: GPL-2.0
#include <linux/swap_cgroup.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>

#include <linux/swapops.h> /* depends on mm.h include */

static DEFINE_MUTEX(swap_cgroup_mutex);

struct swap_cgroup_ctrl {
	
struct page **map;
	
unsigned long length;
	
spinlock_t	lock;
};


static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES];


struct swap_cgroup {
	
unsigned short		id;
};

#define SC_PER_PAGE	(PAGE_SIZE/sizeof(struct swap_cgroup))

/*
 * SwapCgroup implements "lookup" and "exchange" operations.
 * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge
 * against SwapCache. At swap_free(), this is accessed directly from swap.
 *
 * This means,
 *  - we have no race in "exchange" when we're accessed via SwapCache because
 *    SwapCache(and its swp_entry) is under lock.
 *  - When called via swap_free(), there is no user of this entry and no race.
 * Then, we don't need lock around "exchange".
 *
 * TODO: we can push these buffers out to HIGHMEM.
 */

/*
 * allocate buffer for swap_cgroup.
 */

static int swap_cgroup_prepare(int type) { struct page *page; struct swap_cgroup_ctrl *ctrl; unsigned long idx, max; ctrl = &swap_cgroup_ctrl[type]; for (idx = 0; idx < ctrl->length; idx++) { page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!page) goto not_enough_page; ctrl->map[idx] = page; if (!(idx % SWAP_CLUSTER_MAX)) cond_resched(); } return 0; not_enough_page: max = idx; for (idx = 0; idx < max; idx++) __free_page(ctrl->map[idx]); return -ENOMEM; }

Contributors

PersonTokensPropCommitsCommitProp
Kamezawa Hiroyuki11290.32%150.00%
Yu Zhao129.68%150.00%
Total124100.00%2100.00%


static struct swap_cgroup *__lookup_swap_cgroup(struct swap_cgroup_ctrl *ctrl, pgoff_t offset) { struct page *mappage; struct swap_cgroup *sc; mappage = ctrl->map[offset / SC_PER_PAGE]; sc = page_address(mappage); return sc + offset % SC_PER_PAGE; }

Contributors

PersonTokensPropCommitsCommitProp
Huang Ying51100.00%1100.00%
Total51100.00%1100.00%


static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent, struct swap_cgroup_ctrl **ctrlp) { pgoff_t offset = swp_offset(ent); struct swap_cgroup_ctrl *ctrl; ctrl = &swap_cgroup_ctrl[swp_type(ent)]; if (ctrlp) *ctrlp = ctrl; return __lookup_swap_cgroup(ctrl, offset); }

Contributors

PersonTokensPropCommitsCommitProp
Daisuke Nishimura2746.55%133.33%
Bob Liu2746.55%133.33%
Huang Ying46.90%133.33%
Total58100.00%3100.00%

/** * swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry. * @ent: swap entry to be cmpxchged * @old: old id * @new: new id * * Returns old id at success, 0 at failure. * (There is no mem_cgroup using 0 as its id) */
unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, unsigned short old, unsigned short new) { struct swap_cgroup_ctrl *ctrl; struct swap_cgroup *sc; unsigned long flags; unsigned short retval; sc = lookup_swap_cgroup(ent, &ctrl); spin_lock_irqsave(&ctrl->lock, flags); retval = sc->id; if (retval == old) sc->id = new; else retval = 0; spin_unlock_irqrestore(&ctrl->lock, flags); return retval; }

Contributors

PersonTokensPropCommitsCommitProp
Bob Liu4246.15%133.33%
Kamezawa Hiroyuki3538.46%133.33%
Daisuke Nishimura1415.38%133.33%
Total91100.00%3100.00%

/** * swap_cgroup_record - record mem_cgroup for a set of swap entries * @ent: the first swap entry to be recorded into * @id: mem_cgroup to be recorded * @nr_ents: number of swap entries to be recorded * * Returns old value at success, 0 at failure. * (Of course, old value can be 0.) */
unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id, unsigned int nr_ents) { struct swap_cgroup_ctrl *ctrl; struct swap_cgroup *sc; unsigned short old; unsigned long flags; pgoff_t offset = swp_offset(ent); pgoff_t end = offset + nr_ents; sc = lookup_swap_cgroup(ent, &ctrl); spin_lock_irqsave(&ctrl->lock, flags); old = sc->id; for (;;) { VM_BUG_ON(sc->id != old); sc->id = id; offset++; if (offset == end) break; if (offset % SC_PER_PAGE) sc++; else sc = __lookup_swap_cgroup(ctrl, offset); } spin_unlock_irqrestore(&ctrl->lock, flags); return old; }

Contributors

PersonTokensPropCommitsCommitProp
Kamezawa Hiroyuki6848.57%350.00%
Huang Ying6445.71%116.67%
Bob Liu53.57%116.67%
Daisuke Nishimura32.14%116.67%
Total140100.00%6100.00%

/** * lookup_swap_cgroup_id - lookup mem_cgroup id tied to swap entry * @ent: swap entry to be looked up. * * Returns ID of mem_cgroup at success. 0 at failure. (0 is invalid ID) */
unsigned short lookup_swap_cgroup_id(swp_entry_t ent) { return lookup_swap_cgroup(ent, NULL)->id; }

Contributors

PersonTokensPropCommitsCommitProp
Kamezawa Hiroyuki1473.68%266.67%
Bob Liu526.32%133.33%
Total19100.00%3100.00%


int swap_cgroup_swapon(int type, unsigned long max_pages) { void *array; unsigned long array_size; unsigned long length; struct swap_cgroup_ctrl *ctrl; if (!do_swap_account) return 0; length = DIV_ROUND_UP(max_pages, SC_PER_PAGE); array_size = length * sizeof(void *); array = vzalloc(array_size); if (!array) goto nomem; ctrl = &swap_cgroup_ctrl[type]; mutex_lock(&swap_cgroup_mutex); ctrl->length = length; ctrl->map = array; spin_lock_init(&ctrl->lock); if (swap_cgroup_prepare(type)) { /* memory shortage */ ctrl->map = NULL; ctrl->length = 0; mutex_unlock(&swap_cgroup_mutex); vfree(array); goto nomem; } mutex_unlock(&swap_cgroup_mutex); return 0; nomem: pr_info("couldn't allocate enough memory for swap_cgroup\n"); pr_info("swap_cgroup can be disabled by swapaccount=0 boot option\n"); return -ENOMEM; }

Contributors

PersonTokensPropCommitsCommitProp
Kamezawa Hiroyuki15291.57%228.57%
Namhyung Kim95.42%228.57%
Joe Perches42.41%228.57%
Américo Wang10.60%114.29%
Total166100.00%7100.00%


void swap_cgroup_swapoff(int type) { struct page **map; unsigned long i, length; struct swap_cgroup_ctrl *ctrl; if (!do_swap_account) return; mutex_lock(&swap_cgroup_mutex); ctrl = &swap_cgroup_ctrl[type]; map = ctrl->map; length = ctrl->length; ctrl->map = NULL; ctrl->length = 0; mutex_unlock(&swap_cgroup_mutex); if (map) { for (i = 0; i < length; i++) { struct page *page = map[i]; if (page) __free_page(page); if (!(i % SWAP_CLUSTER_MAX)) cond_resched(); } vfree(map); } }

Contributors

PersonTokensPropCommitsCommitProp
Kamezawa Hiroyuki8060.61%133.33%
Namhyung Kim4030.30%133.33%
David Rientjes129.09%133.33%
Total132100.00%3100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
Kamezawa Hiroyuki51661.07%525.00%
Huang Ying12014.20%15.00%
Bob Liu799.35%15.00%
Namhyung Kim495.80%210.00%
Daisuke Nishimura445.21%15.00%
David Rientjes121.42%15.00%
Yu Zhao121.42%15.00%
Joe Perches40.47%210.00%
Johannes Weiner40.47%15.00%
Wanpeng Li10.12%15.00%
Hugh Dickins10.12%15.00%
Greg Kroah-Hartman10.12%15.00%
H Hartley Sweeten10.12%15.00%
Américo Wang10.12%15.00%
Total845100.00%20100.00%
Directory: mm
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.