Release 4.11 drivers/xen/tmem.c
/*
* Xen implementation for transcendent memory (tmem)
*
* Copyright (C) 2009-2011 Oracle Corp. All rights reserved.
* Author: Dan Magenheimer
*/
#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/pagemap.h>
#include <linux/cleancache.h>
#include <linux/frontswap.h>
#include <xen/xen.h>
#include <xen/interface/xen.h>
#include <xen/page.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
#include <xen/tmem.h>
#ifndef CONFIG_XEN_TMEM_MODULE
bool __read_mostly tmem_enabled = false;
static int __init enable_tmem(char *s)
{
tmem_enabled = true;
return 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Konrad Rzeszutek Wilk | 18 | 100.00% | 1 | 100.00% |
Total | 18 | 100.00% | 1 | 100.00% |
__setup("tmem", enable_tmem);
#endif
#ifdef CONFIG_CLEANCACHE
static bool cleancache __read_mostly = true;
module_param(cleancache, bool, S_IRUGO);
static bool selfballooning __read_mostly = true;
module_param(selfballooning, bool, S_IRUGO);
#endif /* CONFIG_CLEANCACHE */
#ifdef CONFIG_FRONTSWAP
static bool frontswap __read_mostly = true;
module_param(frontswap, bool, S_IRUGO);
#else /* CONFIG_FRONTSWAP */
#define frontswap (0)
#endif /* CONFIG_FRONTSWAP */
#ifdef CONFIG_XEN_SELFBALLOONING
static bool selfshrinking __read_mostly = true;
module_param(selfshrinking, bool, S_IRUGO);
#endif /* CONFIG_XEN_SELFBALLOONING */
#define TMEM_CONTROL 0
#define TMEM_NEW_POOL 1
#define TMEM_DESTROY_POOL 2
#define TMEM_NEW_PAGE 3
#define TMEM_PUT_PAGE 4
#define TMEM_GET_PAGE 5
#define TMEM_FLUSH_PAGE 6
#define TMEM_FLUSH_OBJECT 7
#define TMEM_READ 8
#define TMEM_WRITE 9
#define TMEM_XCHG 10
/* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */
#define TMEM_POOL_PERSIST 1
#define TMEM_POOL_SHARED 2
#define TMEM_POOL_PAGESIZE_SHIFT 4
#define TMEM_VERSION_SHIFT 24
struct tmem_pool_uuid {
u64 uuid_lo;
u64 uuid_hi;
};
struct tmem_oid {
u64 oid[3];
};
#define TMEM_POOL_PRIVATE_UUID { 0, 0 }
/* flags for tmem_ops.new_pool */
#define TMEM_POOL_PERSIST 1
#define TMEM_POOL_SHARED 2
/* xen tmem foundation ops/hypercalls */
static inline int xen_tmem_op(u32 tmem_cmd, u32 tmem_pool, struct tmem_oid oid,
u32 index, unsigned long gmfn, u32 tmem_offset, u32 pfn_offset, u32 len)
{
struct tmem_op op;
int rc = 0;
op.cmd = tmem_cmd;
op.pool_id = tmem_pool;
op.u.gen.oid[0] = oid.oid[0];
op.u.gen.oid[1] = oid.oid[1];
op.u.gen.oid[2] = oid.oid[2];
op.u.gen.index = index;
op.u.gen.tmem_offset = tmem_offset;
op.u.gen.pfn_offset = pfn_offset;
op.u.gen.len = len;
set_xen_guest_handle(op.u.gen.gmfn, (void *)gmfn);
rc = HYPERVISOR_tmem_op(&op);
return rc;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan Magenheimer | 176 | 100.00% | 1 | 100.00% |
Total | 176 | 100.00% | 1 | 100.00% |
static int xen_tmem_new_pool(struct tmem_pool_uuid uuid,
u32 flags, unsigned long pagesize)
{
struct tmem_op op;
int rc = 0, pageshift;
for (pageshift = 0; pagesize != 1; pageshift++)
pagesize >>= 1;
flags |= (pageshift - 12) << TMEM_POOL_PAGESIZE_SHIFT;
flags |= TMEM_SPEC_VERSION << TMEM_VERSION_SHIFT;
op.cmd = TMEM_NEW_POOL;
op.u.new.uuid[0] = uuid.uuid_lo;
op.u.new.uuid[1] = uuid.uuid_hi;
op.u.new.flags = flags;
rc = HYPERVISOR_tmem_op(&op);
return rc;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan Magenheimer | 118 | 100.00% | 1 | 100.00% |
Total | 118 | 100.00% | 1 | 100.00% |
/* xen generic tmem ops */
static int xen_tmem_put_page(u32 pool_id, struct tmem_oid oid,
u32 index, struct page *page)
{
return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index,
xen_page_to_gfn(page), 0, 0, 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan Magenheimer | 36 | 81.82% | 1 | 50.00% |
Julien Grall | 8 | 18.18% | 1 | 50.00% |
Total | 44 | 100.00% | 2 | 100.00% |
static int xen_tmem_get_page(u32 pool_id, struct tmem_oid oid,
u32 index, struct page *page)
{
return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index,
xen_page_to_gfn(page), 0, 0, 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan Magenheimer | 36 | 81.82% | 1 | 50.00% |
Julien Grall | 8 | 18.18% | 1 | 50.00% |
Total | 44 | 100.00% | 2 | 100.00% |
static int xen_tmem_flush_page(u32 pool_id, struct tmem_oid oid, u32 index)
{
return xen_tmem_op(TMEM_FLUSH_PAGE, pool_id, oid, index,
0, 0, 0, 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan Magenheimer | 36 | 100.00% | 1 | 100.00% |
Total | 36 | 100.00% | 1 | 100.00% |
static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid)
{
return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan Magenheimer | 33 | 100.00% | 1 | 100.00% |
Total | 33 | 100.00% | 1 | 100.00% |
#ifdef CONFIG_CLEANCACHE
static int xen_tmem_destroy_pool(u32 pool_id)
{
struct tmem_oid oid = { { 0 } };
return xen_tmem_op(TMEM_DESTROY_POOL, pool_id, oid, 0, 0, 0, 0, 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan Magenheimer | 39 | 100.00% | 1 | 100.00% |
Total | 39 | 100.00% | 1 | 100.00% |
/* cleancache ops */
static void tmem_cleancache_put_page(int pool, struct cleancache_filekey key,
pgoff_t index, struct page *page)
{
u32 ind = (u32) index;
struct tmem_oid oid = *(struct tmem_oid *)&key;
if (pool < 0)
return;
if (ind != index)
return;
mb(); /* ensure page is quiescent; tmem may address it with an alias */
(void)xen_tmem_put_page((u32)pool, oid, ind, page);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan Magenheimer | 76 | 98.70% | 1 | 50.00% |
Julien Grall | 1 | 1.30% | 1 | 50.00% |
Total | 77 | 100.00% | 2 | 100.00% |
static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key,
pgoff_t index, struct page *page)
{
u32 ind = (u32) index;
struct tmem_oid oid = *(struct tmem_oid *)&key;
int ret;
/* translate return values to linux semantics */
if (pool < 0)
return -1;
if (ind != index)
return -1;
ret = xen_tmem_get_page((u32)pool, oid, ind, page);
if (ret == 1)
return 0;
else
return -1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan Magenheimer | 95 | 98.96% | 1 | 50.00% |
Julien Grall | 1 | 1.04% | 1 | 50.00% |
Total | 96 | 100.00% | 2 | 100.00% |
static void tmem_cleancache_flush_page(int pool, struct cleancache_filekey key,
pgoff_t index)
{
u32 ind = (u32) index;
struct tmem_oid oid = *(struct tmem_oid *)&key;
if (pool < 0)
return;
if (ind != index)
return;
(void)xen_tmem_flush_page((u32)pool, oid, ind);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan Magenheimer | 66 | 100.00% | 1 | 100.00% |
Total | 66 | 100.00% | 1 | 100.00% |
static void tmem_cleancache_flush_inode(int pool, struct cleancache_filekey key)
{
struct tmem_oid oid = *(struct tmem_oid *)&key;
if (pool < 0)
return;
(void)xen_tmem_flush_object((u32)pool, oid);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan Magenheimer | 46 | 100.00% | 1 | 100.00% |
Total | 46 | 100.00% | 1 | 100.00% |
static void tmem_cleancache_flush_fs(int pool)
{
if (pool < 0)
return;
(void)xen_tmem_destroy_pool((u32)pool);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan Magenheimer | 27 | 100.00% | 1 | 100.00% |
Total | 27 | 100.00% | 1 | 100.00% |
static int tmem_cleancache_init_fs(size_t pagesize)
{
struct tmem_pool_uuid uuid_private = TMEM_POOL_PRIVATE_UUID;
return xen_tmem_new_pool(uuid_private, 0, pagesize);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan Magenheimer | 25 | 100.00% | 1 | 100.00% |
Total | 25 | 100.00% | 1 | 100.00% |
static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize)
{
struct tmem_pool_uuid shared_uuid;
shared_uuid.uuid_lo = *(u64 *)uuid;
shared_uuid.uuid_hi = *(u64 *)(&uuid[8]);
return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan Magenheimer | 55 | 100.00% | 1 | 100.00% |
Total | 55 | 100.00% | 1 | 100.00% |
static const struct cleancache_ops tmem_cleancache_ops = {
.put_page = tmem_cleancache_put_page,
.get_page = tmem_cleancache_get_page,
.invalidate_page = tmem_cleancache_flush_page,
.invalidate_inode = tmem_cleancache_flush_inode,
.invalidate_fs = tmem_cleancache_flush_fs,
.init_shared_fs = tmem_cleancache_init_shared_fs,
.init_fs = tmem_cleancache_init_fs
};
#endif
#ifdef CONFIG_FRONTSWAP
/* frontswap tmem operations */
/* a single tmem poolid is used for all frontswap "types" (swapfiles) */
static int tmem_frontswap_poolid;
/*
* Swizzling increases objects per swaptype, increasing tmem concurrency
* for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
*/
#define SWIZ_BITS 4
#define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
#define _oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
#define iswiz(_ind) (_ind >> SWIZ_BITS)
static inline struct tmem_oid oswiz(unsigned type, u32 ind)
{
struct tmem_oid oid = { .oid = { 0 } };
oid.oid[0] = _oswiz(type, ind);
return oid;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan Magenheimer | 44 | 100.00% | 2 | 100.00% |
Total | 44 | 100.00% | 2 | 100.00% |
/* returns 0 if the page was successfully put into frontswap, -1 if not */
static int tmem_frontswap_store(unsigned type, pgoff_t offset,
struct page *page)
{
u64 ind64 = (u64)offset;
u32 ind = (u32)offset;
int pool = tmem_frontswap_poolid;
int ret;
if (pool < 0)
return -1;
if (ind64 != ind)
return -1;
mb(); /* ensure page is quiescent; tmem may address it with an alias */
ret = xen_tmem_put_page(pool, oswiz(type, ind), iswiz(ind), page);
/* translate Xen tmem return values to linux semantics */
if (ret == 1)
return 0;
else
return -1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan Magenheimer | 99 | 98.02% | 2 | 50.00% |
Konrad Rzeszutek Wilk | 1 | 0.99% | 1 | 25.00% |
Julien Grall | 1 | 0.99% | 1 | 25.00% |
Total | 101 | 100.00% | 4 | 100.00% |
/*
* returns 0 if the page was successfully gotten from frontswap, -1 if
* was not present (should never happen!)
*/
static int tmem_frontswap_load(unsigned type, pgoff_t offset,
struct page *page)
{
u64 ind64 = (u64)offset;
u32 ind = (u32)offset;
int pool = tmem_frontswap_poolid;
int ret;
if (pool < 0)
return -1;
if (ind64 != ind)
return -1;
ret = xen_tmem_get_page(pool, oswiz(type, ind), iswiz(ind), page);
/* translate Xen tmem return values to linux semantics */
if (ret == 1)
return 0;
else
return -1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan Magenheimer | 95 | 97.94% | 1 | 33.33% |
Konrad Rzeszutek Wilk | 1 | 1.03% | 1 | 33.33% |
Julien Grall | 1 | 1.03% | 1 | 33.33% |
Total | 97 | 100.00% | 3 | 100.00% |
/* flush a single page from frontswap */
static void tmem_frontswap_flush_page(unsigned type, pgoff_t offset)
{
u64 ind64 = (u64)offset;
u32 ind = (u32)offset;
int pool = tmem_frontswap_poolid;
if (pool < 0)
return;
if (ind64 != ind)
return;
(void) xen_tmem_flush_page(pool, oswiz(type, ind), iswiz(ind));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan Magenheimer | 67 | 100.00% | 1 | 100.00% |
Total | 67 | 100.00% | 1 | 100.00% |
/* flush all pages from the passed swaptype */
static void tmem_frontswap_flush_area(unsigned type)
{
int pool = tmem_frontswap_poolid;
int ind;
if (pool < 0)
return;
for (ind = SWIZ_MASK; ind >= 0; ind--)
(void)xen_tmem_flush_object(pool, oswiz(type, ind));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan Magenheimer | 52 | 100.00% | 1 | 100.00% |
Total | 52 | 100.00% | 1 | 100.00% |
static void tmem_frontswap_init(unsigned ignored)
{
struct tmem_pool_uuid private = TMEM_POOL_PRIVATE_UUID;
/* a single tmem poolid is used for all frontswap "types" (swapfiles) */
if (tmem_frontswap_poolid < 0)
tmem_frontswap_poolid =
xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan Magenheimer | 33 | 100.00% | 1 | 100.00% |
Total | 33 | 100.00% | 1 | 100.00% |
static struct frontswap_ops tmem_frontswap_ops = {
.store = tmem_frontswap_store,
.load = tmem_frontswap_load,
.invalidate_page = tmem_frontswap_flush_page,
.invalidate_area = tmem_frontswap_flush_area,
.init = tmem_frontswap_init
};
#endif
static int __init xen_tmem_init(void)
{
if (!xen_domain())
return 0;
#ifdef CONFIG_FRONTSWAP
if (tmem_enabled && frontswap) {
char *s = "";
tmem_frontswap_poolid = -1;
frontswap_register_ops(&tmem_frontswap_ops);
pr_info("frontswap enabled, RAM provided by Xen Transcendent Memory%s\n",
s);
}
#endif
#ifdef CONFIG_CLEANCACHE
BUILD_BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid));
if (tmem_enabled && cleancache) {
int err;
err = cleancache_register_ops(&tmem_cleancache_ops);
if (err)
pr_warn("xen-tmem: failed to enable cleancache: %d\n",
err);
else
pr_info("cleancache enabled, RAM provided by "
"Xen Transcendent Memory\n");
}
#endif
#ifdef CONFIG_XEN_SELFBALLOONING
/*
* There is no point of driving pages to the swap system if they
* aren't going anywhere in tmem universe.
*/
if (!frontswap) {
selfshrinking = false;
selfballooning = false;
}
xen_selfballoon_init(selfballooning, selfshrinking);
#endif
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan Magenheimer | 95 | 66.43% | 3 | 25.00% |
Konrad Rzeszutek Wilk | 28 | 19.58% | 5 | 41.67% |
Vladimir Davydov | 15 | 10.49% | 1 | 8.33% |
Joe Perches | 3 | 2.10% | 1 | 8.33% |
Jan Beulich | 2 | 1.40% | 2 | 16.67% |
Total | 143 | 100.00% | 12 | 100.00% |
module_init(xen_tmem_init)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Dan Magenheimer <dan.magenheimer@oracle.com>");
MODULE_DESCRIPTION("Shim to Xen transcendent memory");
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan Magenheimer | 1617 | 88.41% | 4 | 18.18% |
Konrad Rzeszutek Wilk | 155 | 8.47% | 10 | 45.45% |
Julien Grall | 22 | 1.20% | 2 | 9.09% |
Vladimir Davydov | 15 | 0.82% | 1 | 4.55% |
Joe Perches | 10 | 0.55% | 1 | 4.55% |
Frederico Cadete | 7 | 0.38% | 1 | 4.55% |
Jan Beulich | 2 | 0.11% | 2 | 9.09% |
Julia Lawall | 1 | 0.05% | 1 | 4.55% |
Total | 1829 | 100.00% | 22 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.