Release 4.11 drivers/xen/balloon.c
/******************************************************************************
* Xen balloon driver - enables returning/claiming memory to/from Xen.
*
* Copyright (c) 2003, B Dragovic
* Copyright (c) 2003-2004, M Williamson, K Fraser
* Copyright (c) 2005 Dan M. Smith, IBM Corporation
* Copyright (c) 2010 Daniel Kiper
*
* Memory hotplug support was written by Daniel Kiper. Work on
* it was sponsored by Google under Google Summer of Code 2010
* program. Jeremy Fitzhardinge from Citrix was the mentor for
* this project.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation; or, when distributed
* separately from the Linux kernel or incorporated into other
* software packages, subject to the following license:
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this source file (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
#include <linux/cpu.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/cred.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/bootmem.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/gfp.h>
#include <linux/notifier.h>
#include <linux/memory.h>
#include <linux/memory_hotplug.h>
#include <linux/percpu-defs.h>
#include <linux/slab.h>
#include <linux/sysctl.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/tlb.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
#include <xen/xen.h>
#include <xen/interface/xen.h>
#include <xen/interface/memory.h>
#include <xen/balloon.h>
#include <xen/features.h>
#include <xen/page.h>
static int xen_hotplug_unpopulated;
#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
static int zero;
static int one = 1;
static struct ctl_table balloon_table[] = {
{
.procname = "hotplug_unpopulated",
.data = &xen_hotplug_unpopulated,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &one,
},
{ }
};
static struct ctl_table balloon_root[] = {
{
.procname = "balloon",
.mode = 0555,
.child = balloon_table,
},
{ }
};
static struct ctl_table xen_root[] = {
{
.procname = "xen",
.mode = 0555,
.child = balloon_root,
},
{ }
};
#endif
/*
* Use one extent per PAGE_SIZE to avoid to break down the page into
* multiple frame.
*/
#define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)
/*
* balloon_process() state:
*
* BP_DONE: done or nothing to do,
* BP_WAIT: wait to be rescheduled,
* BP_EAGAIN: error, go to sleep,
* BP_ECANCELED: error, balloon operation canceled.
*/
enum bp_state {
BP_DONE,
BP_WAIT,
BP_EAGAIN,
BP_ECANCELED
};
static DEFINE_MUTEX(balloon_mutex);
struct balloon_stats balloon_stats;
EXPORT_SYMBOL_GPL(balloon_stats);
/* We increase/decrease in batches which fit in a page */
static xen_pfn_t frame_list[PAGE_SIZE / sizeof(xen_pfn_t)];
/* List of ballooned pages, threaded through the mem_map array. */
static LIST_HEAD(ballooned_pages);
static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
/* Main work function, always executed in process context. */
static void balloon_process(struct work_struct *work);
static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
/* When ballooning out (allocating memory to return to Xen) we don't really
want the kernel to try too hard since that can trigger the oom killer. */
#define GFP_BALLOON \
(GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)
static void scrub_page(struct page *page)
{
#ifdef CONFIG_XEN_SCRUB_PAGES
clear_highpage(page);
#endif
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeremy Fitzhardinge | 21 | 100.00% | 2 | 100.00% |
Total | 21 | 100.00% | 2 | 100.00% |
/* balloon_append: add the given page to the balloon. */
static void __balloon_append(struct page *page)
{
/* Lowmem is re-populated first, so highmem pages go at list tail. */
if (PageHighMem(page)) {
list_add_tail(&page->lru, &ballooned_pages);
balloon_stats.balloon_high++;
} else {
list_add(&page->lru, &ballooned_pages);
balloon_stats.balloon_low++;
}
wake_up(&balloon_wq);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeremy Fitzhardinge | 56 | 90.32% | 2 | 66.67% |
David Vrabel | 6 | 9.68% | 1 | 33.33% |
Total | 62 | 100.00% | 3 | 100.00% |
static void balloon_append(struct page *page)
{
__balloon_append(page);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeremy Fitzhardinge | 16 | 100.00% | 2 | 100.00% |
Total | 16 | 100.00% | 2 | 100.00% |
/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
static struct page *balloon_retrieve(bool require_lowmem)
{
struct page *page;
if (list_empty(&ballooned_pages))
return NULL;
page = list_entry(ballooned_pages.next, struct page, lru);
if (require_lowmem && PageHighMem(page))
return NULL;
list_del(&page->lru);
if (PageHighMem(page))
balloon_stats.balloon_high--;
else
balloon_stats.balloon_low--;
return page;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeremy Fitzhardinge | 57 | 69.51% | 1 | 33.33% |
Konrad Rzeszutek Wilk | 14 | 17.07% | 1 | 33.33% |
David Vrabel | 11 | 13.41% | 1 | 33.33% |
Total | 82 | 100.00% | 3 | 100.00% |
static struct page *balloon_next_page(struct page *page)
{
struct list_head *next = page->lru.next;
if (next == &ballooned_pages)
return NULL;
return list_entry(next, struct page, lru);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeremy Fitzhardinge | 45 | 100.00% | 1 | 100.00% |
Total | 45 | 100.00% | 1 | 100.00% |
static enum bp_state update_schedule(enum bp_state state)
{
if (state == BP_WAIT)
return BP_WAIT;
if (state == BP_ECANCELED)
return BP_ECANCELED;
if (state == BP_DONE) {
balloon_stats.schedule_delay = 1;
balloon_stats.retry_count = 1;
return BP_DONE;
}
++balloon_stats.retry_count;
if (balloon_stats.max_retry_count != RETRY_UNLIMITED &&
balloon_stats.retry_count > balloon_stats.max_retry_count) {
balloon_stats.schedule_delay = 1;
balloon_stats.retry_count = 1;
return BP_ECANCELED;
}
balloon_stats.schedule_delay <<= 1;
if (balloon_stats.schedule_delay > balloon_stats.max_schedule_delay)
balloon_stats.schedule_delay = balloon_stats.max_schedule_delay;
return BP_EAGAIN;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Daniel Kiper | 93 | 79.49% | 1 | 25.00% |
Boris Ostrovsky | 9 | 7.69% | 1 | 25.00% |
David Vrabel | 9 | 7.69% | 1 | 25.00% |
Jeremy Fitzhardinge | 6 | 5.13% | 1 | 25.00% |
Total | 117 | 100.00% | 4 | 100.00% |
#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
static void release_memory_resource(struct resource *resource)
{
if (!resource)
return;
/*
* No need to reset region to identity mapped since we now
* know that no I/O can be in this region
*/
release_resource(resource);
kfree(resource);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ross Lagerwall | 28 | 100.00% | 1 | 100.00% |
Total | 28 | 100.00% | 1 | 100.00% |
static struct resource *additional_memory_resource(phys_addr_t size)
{
struct resource *res;
int ret;
res = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res)
return NULL;
res->name = "System RAM";
res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
ret = allocate_resource(&iomem_resource, res,
size, 0, -1,
PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
if (ret < 0) {
pr_err("Cannot allocate new System RAM resource\n");
kfree(res);
return NULL;
}
#ifdef CONFIG_SPARSEMEM
{
unsigned long limit = 1UL << (MAX_PHYSMEM_BITS - PAGE_SHIFT);
unsigned long pfn = res->start >> PAGE_SHIFT;
if (pfn > limit) {
pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n",
pfn, limit);
release_memory_resource(res);
return NULL;
}
}
#endif
return res;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Vrabel | 102 | 64.97% | 1 | 33.33% |
Ross Lagerwall | 54 | 34.39% | 1 | 33.33% |
Toshi Kani | 1 | 0.64% | 1 | 33.33% |
Total | 157 | 100.00% | 3 | 100.00% |
static enum bp_state reserve_additional_memory(void)
{
long credit;
struct resource *resource;
int nid, rc;
unsigned long balloon_hotplug;
credit = balloon_stats.target_pages + balloon_stats.target_unpopulated
- balloon_stats.total_pages;
/*
* Already hotplugged enough pages? Wait for them to be
* onlined.
*/
if (credit <= 0)
return BP_WAIT;
balloon_hotplug = round_up(credit, PAGES_PER_SECTION);
resource = additional_memory_resource(balloon_hotplug * PAGE_SIZE);
if (!resource)
goto err;
nid = memory_add_physaddr_to_nid(resource->start);
#ifdef CONFIG_XEN_HAVE_PVMMU
/*
* We don't support PV MMU when Linux and Xen is using
* different page granularity.
*/
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
/*
* add_memory() will build page tables for the new memory so
* the p2m must contain invalid entries so the correct
* non-present PTEs will be written.
*
* If a failure occurs, the original (identity) p2m entries
* are not restored since this region is now known not to
* conflict with any devices.
*/
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
unsigned long pfn, i;
pfn = PFN_DOWN(resource->start);
for (i = 0; i < balloon_hotplug; i++) {
if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
pr_warn("set_phys_to_machine() failed, no memory added\n");
goto err;
}
}
}
#endif
/*
* add_memory_resource() will call online_pages() which in its turn
* will call xen_online_page() callback causing deadlock if we don't
* release balloon_mutex here. Unlocking here is safe because the
* callers drop the mutex before trying again.
*/
mutex_unlock(&balloon_mutex);
rc = add_memory_resource(nid, resource, memhp_auto_online);
mutex_lock(&balloon_mutex);
if (rc) {
pr_warn("Cannot add additional memory (%i)\n", rc);
goto err;
}
balloon_stats.total_pages += balloon_hotplug;
return BP_WAIT;
err:
release_memory_resource(resource);
return BP_ECANCELED;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Vrabel | 78 | 35.45% | 5 | 50.00% |
Juergen Gross | 62 | 28.18% | 1 | 10.00% |
Daniel Kiper | 57 | 25.91% | 1 | 10.00% |
Vitaly Kuznetsov | 15 | 6.82% | 2 | 20.00% |
Julien Grall | 8 | 3.64% | 1 | 10.00% |
Total | 220 | 100.00% | 10 | 100.00% |
static void xen_online_page(struct page *page)
{
__online_page_set_limits(page);
mutex_lock(&balloon_mutex);
__balloon_append(page);
mutex_unlock(&balloon_mutex);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Daniel Kiper | 33 | 100.00% | 1 | 100.00% |
Total | 33 | 100.00% | 1 | 100.00% |
static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v)
{
if (val == MEM_ONLINE)
schedule_delayed_work(&balloon_worker, 0);
return NOTIFY_OK;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Daniel Kiper | 36 | 100.00% | 1 | 100.00% |
Total | 36 | 100.00% | 1 | 100.00% |
static struct notifier_block xen_memory_nb = {
.notifier_call = xen_memory_notifier,
.priority = 0
};
#else
static enum bp_state reserve_additional_memory(void)
{
balloon_stats.target_pages = balloon_stats.current_pages;
return BP_ECANCELED;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeremy Fitzhardinge | 15 | 75.00% | 1 | 25.00% |
David Vrabel | 5 | 25.00% | 3 | 75.00% |
Total | 20 | 100.00% | 4 | 100.00% |
#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
static long current_credit(void)
{
return balloon_stats.target_pages - balloon_stats.current_pages;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Daniel Kiper | 11 | 64.71% | 1 | 50.00% |
David Vrabel | 6 | 35.29% | 1 | 50.00% |
Total | 17 | 100.00% | 2 | 100.00% |
static bool balloon_is_inflated(void)
{
return balloon_stats.balloon_low || balloon_stats.balloon_high;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Daniel Kiper | 10 | 58.82% | 1 | 50.00% |
David Vrabel | 7 | 41.18% | 1 | 50.00% |
Total | 17 | 100.00% | 2 | 100.00% |
static enum bp_state increase_reservation(unsigned long nr_pages)
{
int rc;
unsigned long i;
struct page *page;
struct xen_memory_reservation reservation = {
.address_bits = 0,
.extent_order = EXTENT_ORDER,
.domid = DOMID_SELF
};
if (nr_pages > ARRAY_SIZE(frame_list))
nr_pages = ARRAY_SIZE(frame_list);
page = list_first_entry_or_null(&ballooned_pages, struct page, lru);
for (i = 0; i < nr_pages; i++) {
if (!page) {
nr_pages = i;
break;
}
/* XENMEM_populate_physmap requires a PFN based on Xen
* granularity.
*/
frame_list[i] = page_to_xen_pfn(page);
page = balloon_next_page(page);
}
set_xen_guest_handle(reservation.extent_start, frame_list);
reservation.nr_extents = nr_pages;
rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
if (rc <= 0)
return BP_EAGAIN;
for (i = 0; i < rc; i++) {
page = balloon_retrieve(false);
BUG_ON(page == NULL);
#ifdef CONFIG_XEN_HAVE_PVMMU
/*
* We don't support PV MMU when Linux and Xen is using
* different page granularity.
*/
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
unsigned long pfn = page_to_pfn(page);
set_phys_to_machine(pfn, frame_list[i]);
/* Link back into the page tables if not highmem. */
if (!PageHighMem(page)) {
int ret;
ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
mfn_pte(frame_list[i], PAGE_KERNEL),
0);
BUG_ON(ret);
}
}
#endif
/* Relinquish the page back to the allocator. */
free_reserved_page(page);
}
balloon_stats.current_pages += rc;
return BP_DONE;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeremy Fitzhardinge | 205 | 71.93% | 1 | 9.09% |
Daniel Kiper | 25 | 8.77% | 2 | 18.18% |
Julien Grall | 20 | 7.02% | 1 | 9.09% |
Stefano Stabellini | 13 | 4.56% | 1 | 9.09% |
Jie Liu | 10 | 3.51% | 1 | 9.09% |
Isaku Yamahata | 4 | 1.40% | 1 | 9.09% |
Ian Campbell | 4 | 1.40% | 2 | 18.18% |
Konrad Rzeszutek Wilk | 3 | 1.05% | 1 | 9.09% |
Ross Lagerwall | 1 | 0.35% | 1 | 9.09% |
Total | 285 | 100.00% | 11 | 100.00% |
static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
{
enum bp_state state = BP_DONE;
unsigned long i;
struct page *page, *tmp;
int ret;
struct xen_memory_reservation reservation = {
.address_bits = 0,
.extent_order = EXTENT_ORDER,
.domid = DOMID_SELF
};
LIST_HEAD(pages);
if (nr_pages > ARRAY_SIZE(frame_list))
nr_pages = ARRAY_SIZE(frame_list);
for (i = 0; i < nr_pages; i++) {
page = alloc_page(gfp);
if (page == NULL) {
nr_pages = i;
state = BP_EAGAIN;
break;
}
adjust_managed_page_count(page, -1);
scrub_page(page);
list_add(&page->lru, &pages);
}
/*
* Ensure that ballooned highmem pages don't have kmaps.
*
* Do this before changing the p2m as kmap_flush_unused()
* reads PTEs to obtain pages (and hence needs the original
* p2m entry).
*/
kmap_flush_unused();
/*
* Setup the frame, update direct mapping, invalidate P2M,
* and add to balloon.
*/
i = 0;
list_for_each_entry_safe(page, tmp, &pages, lru) {
/* XENMEM_decrease_reservation requires a GFN */
frame_list[i++] = xen_page_to_gfn(page);
#ifdef CONFIG_XEN_HAVE_PVMMU
/*
* We don't support PV MMU when Linux and Xen is using
* different page granularity.
*/
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
unsigned long pfn = page_to_pfn(page);
if (!PageHighMem(page)) {
ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
__pte_ma(0), 0);
BUG_ON(ret);
}
__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
}
#endif
list_del(&page->lru);
balloon_append(page);
}
flush_tlb_all();
set_xen_guest_handle(reservation.extent_start, frame_list);
reservation.nr_extents = nr_pages;
ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
BUG_ON(ret != nr_pages);
balloon_stats.current_pages -= nr_pages;
return state;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeremy Fitzhardinge | 142 | 46.25% | 1 | 5.26% |
Julien Grall | 56 | 18.24% | 1 | 5.26% |
Dan Magenheimer | 25 | 8.14% | 1 | 5.26% |
Wei Liu | 19 | 6.19% | 2 | 10.53% |
Stefano Stabellini | 15 | 4.89% | 3 | 15.79% |
Daniel Kiper | 11 | 3.58% | 1 | 5.26% |
Ian Campbell | 11 | 3.58% | 2 | 10.53% |
Ross Lagerwall | 8 | 2.61% | 1 | 5.26% |
David Vrabel | 8 | 2.61% | 4 | 21.05% |
Lisa Nguyen | 4 | 1.30% | 1 | 5.26% |
Isaku Yamahata | 4 | 1.30% | 1 | 5.26% |
Konrad Rzeszutek Wilk | 4 | 1.30% | 1 | 5.26% |
Total | 307 | 100.00% | 19 | 100.00% |
/*
* As this is a work item it is guaranteed to run as a single instance only.
* We may of course race updates of the target counts (which are protected
* by the balloon lock), or with changes to the Xen hard limit, but we will
* recover from these in time.
*/
static void balloon_process(struct work_struct *work)
{
enum bp_state state = BP_DONE;
long credit;
do {
mutex_lock(&balloon_mutex);
credit = current_credit();
if (credit > 0) {
if (balloon_is_inflated())
state = increase_reservation(credit);
else
state = reserve_additional_memory();
}
if (credit < 0)
state = decrease_reservation(-credit, GFP_BALLOON);
state = update_schedule(state);
mutex_unlock(&balloon_mutex);
cond_resched();
} while (credit && state == BP_DONE);
/* Schedule more work if there is some still to be done. */
if (state == BP_EAGAIN)
schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeremy Fitzhardinge | 71 | 59.17% | 1 | 12.50% |
Daniel Kiper | 37 | 30.83% | 4 | 50.00% |
Juergen Gross | 9 | 7.50% | 1 | 12.50% |
Konrad Rzeszutek Wilk | 2 | 1.67% | 1 | 12.50% |
David Vrabel | 1 | 0.83% | 1 | 12.50% |
Total | 120 | 100.00% | 8 | 100.00% |
/* Resets the Xen limit, sets new target, and kicks off processing. */
void balloon_set_new_target(unsigned long target)
{
/* No need for lock. Not read-modify-write updates. */
balloon_stats.target_pages = target;
schedule_delayed_work(&balloon_worker, 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeremy Fitzhardinge | 21 | 87.50% | 1 | 50.00% |
Daniel Kiper | 3 | 12.50% | 1 | 50.00% |
Total | 24 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL_GPL(balloon_set_new_target);
static int add_ballooned_pages(int nr_pages)
{
enum bp_state st;
if (xen_hotplug_unpopulated) {
st = reserve_additional_memory();
if (st != BP_ECANCELED) {
mutex_unlock(&balloon_mutex);
wait_event(balloon_wq,
!list_empty(&ballooned_pages));
mutex_lock(&balloon_mutex);
return 0;
}
}
st = decrease_reservation(nr_pages, GFP_USER);
if (st != BP_DONE)
return -ENOMEM;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Vrabel | 81 | 100.00% | 1 | 100.00% |
Total | 81 | 100.00% | 1 | 100.00% |
/**
* alloc_xenballooned_pages - get pages that have been ballooned out
* @nr_pages: Number of pages to get
* @pages: pages returned
* @return 0 on success, error otherwise
*/
int alloc_xenballooned_pages(int nr_pages, struct page **pages)
{
int pgno = 0;
struct page *page;
int ret;
mutex_lock(&balloon_mutex);
balloon_stats.target_unpopulated += nr_pages;
while (pgno < nr_pages) {
page = balloon_retrieve(true);
if (page) {
pages[pgno++] = page;
#ifdef CONFIG_XEN_HAVE_PVMMU
/*
* We don't support PV MMU when Linux and Xen is using
* different page granularity.
*/
BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
ret = xen_alloc_p2m_entry(page_to_pfn(page));
if (ret < 0)
goto out_undo;
#endif
} else {
ret = add_ballooned_pages(nr_pages - pgno);
if (ret < 0)
goto out_undo;
}
}
mutex_unlock(&balloon_mutex);
return 0;
out_undo:
mutex_unlock(&balloon_mutex);
free_xenballooned_pages(pgno, pages);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Konrad Rzeszutek Wilk | 72 | 48.65% | 1 | 16.67% |
David Vrabel | 46 | 31.08% | 3 | 50.00% |
Jeremy Fitzhardinge | 22 | 14.86% | 1 | 16.67% |
Julien Grall | 8 | 5.41% | 1 | 16.67% |
Total | 148 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL(alloc_xenballooned_pages);
/**
* free_xenballooned_pages - return pages retrieved with get_ballooned_pages
* @nr_pages: Number of pages
* @pages: pages to return
*/
void free_xenballooned_pages(int nr_pages, struct page **pages)
{
int i;
mutex_lock(&balloon_mutex);
for (i = 0; i < nr_pages; i++) {
if (pages[i])
balloon_append(pages[i]);
}
balloon_stats.target_unpopulated -= nr_pages;
/* The balloon may be too large now. Shrink it if needed. */
if (current_credit())
schedule_delayed_work(&balloon_worker, 0);
mutex_unlock(&balloon_mutex);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Konrad Rzeszutek Wilk | 52 | 65.82% | 1 | 25.00% |
Jeremy Fitzhardinge | 20 | 25.32% | 1 | 25.00% |
David Vrabel | 6 | 7.59% | 1 | 25.00% |
Daniel Kiper | 1 | 1.27% | 1 | 25.00% |
Total | 79 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(free_xenballooned_pages);
static void __init balloon_add_region(unsigned long start_pfn,
unsigned long pages)
{
unsigned long pfn, extra_pfn_end;
struct page *page;
/*
* If the amount of usable memory has been limited (e.g., with
* the 'mem' command line parameter), don't add pages beyond
* this limit.
*/
extra_pfn_end = min(max_pfn, start_pfn + pages);
for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) {
page = pfn_to_page(pfn);
/* totalram_pages and totalhigh_pages do not
include the boot-time balloon extension, so
don't subtract from it. */
__balloon_append(page);
}
balloon_stats.total_pages += extra_pfn_end - start_pfn;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Vrabel | 58 | 78.38% | 2 | 50.00% |
Jeremy Fitzhardinge | 16 | 21.62% | 2 | 50.00% |
Total | 74 | 100.00% | 4 | 100.00% |
static int __init balloon_init(void)
{
int i;
if (!xen_domain())
return -ENODEV;
pr_info("Initialising balloon driver\n");
balloon_stats.current_pages = xen_pv_domain()
? min(xen_start_info->nr_pages - xen_released_pages, max_pfn)
: get_num_physpages();
balloon_stats.target_pages = balloon_stats.current_pages;
balloon_stats.balloon_low = 0;
balloon_stats.balloon_high = 0;
balloon_stats.total_pages = balloon_stats.current_pages;
balloon_stats.schedule_delay = 1;
balloon_stats.max_schedule_delay = 32;
balloon_stats.retry_count = 1;
balloon_stats.max_retry_count = RETRY_UNLIMITED;
#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
set_online_page_callback(&xen_online_page);
register_memory_notifier(&xen_memory_nb);
register_sysctl_table(xen_root);
#endif
/*
* Initialize the balloon with pages from the extra memory
* regions (see arch/x86/xen/setup.c).
*/
for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++)
if (xen_extra_mem[i].n_pfns)
balloon_add_region(xen_extra_mem[i].start_pfn,
xen_extra_mem[i].n_pfns);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeremy Fitzhardinge | 70 | 42.42% | 1 | 7.69% |
David Vrabel | 52 | 31.52% | 4 | 30.77% |
Daniel Kiper | 35 | 21.21% | 3 | 23.08% |
Juergen Gross | 3 | 1.82% | 1 | 7.69% |
Boris Ostrovsky | 2 | 1.21% | 1 | 7.69% |
Stefano Stabellini | 1 | 0.61% | 1 | 7.69% |
Joe Perches | 1 | 0.61% | 1 | 7.69% |
Konrad Rzeszutek Wilk | 1 | 0.61% | 1 | 7.69% |
Total | 165 | 100.00% | 13 | 100.00% |
subsys_initcall(balloon_init);
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeremy Fitzhardinge | 906 | 36.05% | 6 | 10.53% |
David Vrabel | 621 | 24.71% | 13 | 22.81% |
Daniel Kiper | 393 | 15.64% | 6 | 10.53% |
Konrad Rzeszutek Wilk | 159 | 6.33% | 2 | 3.51% |
Julien Grall | 103 | 4.10% | 2 | 3.51% |
Ross Lagerwall | 91 | 3.62% | 3 | 5.26% |
Juergen Gross | 75 | 2.98% | 3 | 5.26% |
Stefano Stabellini | 35 | 1.39% | 3 | 5.26% |
Dan Magenheimer | 25 | 0.99% | 1 | 1.75% |
Wei Liu | 19 | 0.76% | 2 | 3.51% |
Ian Campbell | 16 | 0.64% | 4 | 7.02% |
Vitaly Kuznetsov | 15 | 0.60% | 2 | 3.51% |
Boris Ostrovsky | 11 | 0.44% | 2 | 3.51% |
Jie Liu | 10 | 0.40% | 1 | 1.75% |
Isaku Yamahata | 8 | 0.32% | 1 | 1.75% |
Joe Perches | 8 | 0.32% | 1 | 1.75% |
Daniel De Graaf | 7 | 0.28% | 1 | 1.75% |
Lisa Nguyen | 4 | 0.16% | 1 | 1.75% |
Tejun Heo | 3 | 0.12% | 1 | 1.75% |
Ingo Molnar | 3 | 0.12% | 1 | 1.75% |
Toshi Kani | 1 | 0.04% | 1 | 1.75% |
Total | 2513 | 100.00% | 57 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.