cregit-Linux how code gets into the kernel

Release 4.11 drivers/hv/hv_balloon.c

Directory: drivers/hv
/*
 * Copyright (c) 2012, Microsoft Corporation.
 *
 * Author:
 *   K. Y. Srinivasan <kys@microsoft.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published
 * by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 * NON INFRINGEMENT.  See the GNU General Public License for more
 * details.
 *
 */


#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/mman.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/completion.h>
#include <linux/memory_hotplug.h>
#include <linux/memory.h>
#include <linux/notifier.h>
#include <linux/percpu_counter.h>

#include <linux/hyperv.h>

/*
 * We begin with definitions supporting the Dynamic Memory protocol
 * with the host.
 *
 * Begin protocol definitions.
 */



/*
 * Protocol versions. The low word is the minor version, the high word the major
 * version.
 *
 * History:
 * Initial version 1.0
 * Changed to 0.1 on 2009/03/25
 * Changes to 0.2 on 2009/05/14
 * Changes to 0.3 on 2009/12/03
 * Changed to 1.0 on 2011/04/05
 */


#define DYNMEM_MAKE_VERSION(Major, Minor) ((__u32)(((Major) << 16) | (Minor)))

#define DYNMEM_MAJOR_VERSION(Version) ((__u32)(Version) >> 16)

#define DYNMEM_MINOR_VERSION(Version) ((__u32)(Version) & 0xff)

enum {
	
DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3),
	
DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0),
	
DYNMEM_PROTOCOL_VERSION_3 = DYNMEM_MAKE_VERSION(2, 0),

	
DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1,
	
DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2,
	
DYNMEM_PROTOCOL_VERSION_WIN10 = DYNMEM_PROTOCOL_VERSION_3,

	
DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN10
};



/*
 * Message Types
 */


enum dm_message_type {
	/*
         * Version 0.3
         */
	
DM_ERROR			= 0,
	
DM_VERSION_REQUEST		= 1,
	
DM_VERSION_RESPONSE		= 2,
	
DM_CAPABILITIES_REPORT		= 3,
	
DM_CAPABILITIES_RESPONSE	= 4,
	
DM_STATUS_REPORT		= 5,
	
DM_BALLOON_REQUEST		= 6,
	
DM_BALLOON_RESPONSE		= 7,
	
DM_UNBALLOON_REQUEST		= 8,
	
DM_UNBALLOON_RESPONSE		= 9,
	
DM_MEM_HOT_ADD_REQUEST		= 10,
	
DM_MEM_HOT_ADD_RESPONSE		= 11,
	
DM_VERSION_03_MAX		= 11,
	/*
         * Version 1.0.
         */
	
DM_INFO_MESSAGE			= 12,
	
DM_VERSION_1_MAX		= 12
};


/*
 * Structures defining the dynamic memory management
 * protocol.
 */


union dm_version {
	struct {
		
__u16 minor_version;
		
__u16 major_version;
	};
	
__u32 version;
} 
__packed;



union dm_caps {
	struct {
		
__u64 balloon:1;
		
__u64 hot_add:1;
		/*
                 * To support guests that may have alignment
                 * limitations on hot-add, the guest can specify
                 * its alignment requirements; a value of n
                 * represents an alignment of 2^n in mega bytes.
                 */
		
__u64 hot_add_alignment:4;
		
__u64 reservedz:58;
	} 
cap_bits;
	
__u64 caps;
} 
__packed;


union dm_mem_page_range {
	struct  {
		/*
                 * The PFN number of the first page in the range.
                 * 40 bits is the architectural limit of a PFN
                 * number for AMD64.
                 */
		
__u64 start_page:40;
		/*
                 * The number of pages in the range.
                 */
		
__u64 page_cnt:24;
	} 
finfo;
	
__u64  page_range;
} 
__packed;



/*
 * The header for all dynamic memory messages:
 *
 * type: Type of the message.
 * size: Size of the message in bytes; including the header.
 * trans_id: The guest is responsible for manufacturing this ID.
 */


struct dm_header {
	
__u16 type;
	
__u16 size;
	
__u32 trans_id;
} 
__packed;

/*
 * A generic message format for dynamic memory.
 * Specific message formats are defined later in the file.
 */


struct dm_message {
	
struct dm_header hdr;
	
__u8 data[]; /* enclosed message */
} 
__packed;


/*
 * Specific message types supporting the dynamic memory protocol.
 */

/*
 * Version negotiation message. Sent from the guest to the host.
 * The guest is free to try different versions until the host
 * accepts the version.
 *
 * dm_version: The protocol version requested.
 * is_last_attempt: If TRUE, this is the last version guest will request.
 * reservedz: Reserved field, set to zero.
 */


struct dm_version_request {
	
struct dm_header hdr;
	
union dm_version version;
	
__u32 is_last_attempt:1;
	
__u32 reservedz:31;
} 
__packed;

/*
 * Version response message; Host to Guest and indicates
 * if the host has accepted the version sent by the guest.
 *
 * is_accepted: If TRUE, host has accepted the version and the guest
 * should proceed to the next stage of the protocol. FALSE indicates that
 * guest should re-try with a different version.
 *
 * reservedz: Reserved field, set to zero.
 */


struct dm_version_response {
	
struct dm_header hdr;
	
__u64 is_accepted:1;
	
__u64 reservedz:63;
} 
__packed;

/*
 * Message reporting capabilities. This is sent from the guest to the
 * host.
 */


struct dm_capabilities {
	
struct dm_header hdr;
	
union dm_caps caps;
	
__u64 min_page_cnt;
	
__u64 max_page_number;
} 
__packed;

/*
 * Response to the capabilities message. This is sent from the host to the
 * guest. This message notifies if the host has accepted the guest's
 * capabilities. If the host has not accepted, the guest must shutdown
 * the service.
 *
 * is_accepted: Indicates if the host has accepted guest's capabilities.
 * reservedz: Must be 0.
 */


struct dm_capabilities_resp_msg {
	
struct dm_header hdr;
	
__u64 is_accepted:1;
	
__u64 reservedz:63;
} 
__packed;

/*
 * This message is used to report memory pressure from the guest.
 * This message is not part of any transaction and there is no
 * response to this message.
 *
 * num_avail: Available memory in pages.
 * num_committed: Committed memory in pages.
 * page_file_size: The accumulated size of all page files
 *                 in the system in pages.
 * zero_free: The nunber of zero and free pages.
 * page_file_writes: The writes to the page file in pages.
 * io_diff: An indicator of file cache efficiency or page file activity,
 *          calculated as File Cache Page Fault Count - Page Read Count.
 *          This value is in pages.
 *
 * Some of these metrics are Windows specific and fortunately
 * the algorithm on the host side that computes the guest memory
 * pressure only uses num_committed value.
 */


struct dm_status {
	
struct dm_header hdr;
	
__u64 num_avail;
	
__u64 num_committed;
	
__u64 page_file_size;
	
__u64 zero_free;
	
__u32 page_file_writes;
	
__u32 io_diff;
} 
__packed;


/*
 * Message to ask the guest to allocate memory - balloon up message.
 * This message is sent from the host to the guest. The guest may not be
 * able to allocate as much memory as requested.
 *
 * num_pages: number of pages to allocate.
 */


struct dm_balloon {
	
struct dm_header hdr;
	
__u32 num_pages;
	
__u32 reservedz;
} 
__packed;


/*
 * Balloon response message; this message is sent from the guest
 * to the host in response to the balloon message.
 *
 * reservedz: Reserved; must be set to zero.
 * more_pages: If FALSE, this is the last message of the transaction.
 * if TRUE there will atleast one more message from the guest.
 *
 * range_count: The number of ranges in the range array.
 *
 * range_array: An array of page ranges returned to the host.
 *
 */


struct dm_balloon_response {
	
struct dm_header hdr;
	
__u32 reservedz;
	
__u32 more_pages:1;
	
__u32 range_count:31;
	
union dm_mem_page_range range_array[];
} 
__packed;

/*
 * Un-balloon message; this message is sent from the host
 * to the guest to give guest more memory.
 *
 * more_pages: If FALSE, this is the last message of the transaction.
 * if TRUE there will atleast one more message from the guest.
 *
 * reservedz: Reserved; must be set to zero.
 *
 * range_count: The number of ranges in the range array.
 *
 * range_array: An array of page ranges returned to the host.
 *
 */


struct dm_unballoon_request {
	
struct dm_header hdr;
	
__u32 more_pages:1;
	
__u32 reservedz:31;
	
__u32 range_count;
	
union dm_mem_page_range range_array[];
} 
__packed;

/*
 * Un-balloon response message; this message is sent from the guest
 * to the host in response to an unballoon request.
 *
 */


struct dm_unballoon_response {
	
struct dm_header hdr;
} 
__packed;


/*
 * Hot add request message. Message sent from the host to the guest.
 *
 * mem_range: Memory range to hot add.
 *
 * On Linux we currently don't support this since we cannot hot add
 * arbitrary granularity of memory.
 */


struct dm_hot_add {
	
struct dm_header hdr;
	
union dm_mem_page_range range;
} 
__packed;

/*
 * Hot add response message.
 * This message is sent by the guest to report the status of a hot add request.
 * If page_count is less than the requested page count, then the host should
 * assume all further hot add requests will fail, since this indicates that
 * the guest has hit an upper physical memory barrier.
 *
 * Hot adds may also fail due to low resources; in this case, the guest must
 * not complete this message until the hot add can succeed, and the host must
 * not send a new hot add request until the response is sent.
 * If VSC fails to hot add memory DYNMEM_NUMBER_OF_UNSUCCESSFUL_HOTADD_ATTEMPTS
 * times it fails the request.
 *
 *
 * page_count: number of pages that were successfully hot added.
 *
 * result: result of the operation 1: success, 0: failure.
 *
 */


struct dm_hot_add_response {
	
struct dm_header hdr;
	
__u32 page_count;
	
__u32 result;
} 
__packed;

/*
 * Types of information sent from host to the guest.
 */


enum dm_info_type {
	
INFO_TYPE_MAX_PAGE_CNT = 0,
	
MAX_INFO_TYPE
};


/*
 * Header for the information message.
 */


struct dm_info_header {
	
enum dm_info_type type;
	
__u32 data_size;
} 
__packed;

/*
 * This message is sent from the host to the guest to pass
 * some relevant information (win8 addition).
 *
 * reserved: no used.
 * info_size: size of the information blob.
 * info: information blob.
 */


struct dm_info_msg {
	
struct dm_header hdr;
	
__u32 reserved;
	
__u32 info_size;
	
__u8  info[];
};

/*
 * End protocol definitions.
 */

/*
 * State to manage hot adding memory into the guest.
 * The range start_pfn : end_pfn specifies the range
 * that the host has asked us to hot add. The range
 * start_pfn : ha_end_pfn specifies the range that we have
 * currently hot added. We hot add in multiples of 128M
 * chunks; it is possible that we may not be able to bring
 * online all the pages in the region. The range
 * covered_start_pfn:covered_end_pfn defines the pages that can
 * be brough online.
 */


struct hv_hotadd_state {
	
struct list_head list;
	
unsigned long start_pfn;
	
unsigned long covered_start_pfn;
	
unsigned long covered_end_pfn;
	
unsigned long ha_end_pfn;
	
unsigned long end_pfn;
	/*
         * A list of gaps.
         */
	
struct list_head gap_list;
};


struct hv_hotadd_gap {
	
struct list_head list;
	
unsigned long start_pfn;
	
unsigned long end_pfn;
};


struct balloon_state {
	
__u32 num_pages;
	
struct work_struct wrk;
};


struct hot_add_wrk {
	
union dm_mem_page_range ha_page_range;
	
union dm_mem_page_range ha_region_range;
	
struct work_struct wrk;
};


static bool hot_add = true;

static bool do_hot_add;
/*
 * Delay reporting memory pressure by
 * the specified number of seconds.
 */

static uint pressure_report_delay = 45;

/*
 * The last time we posted a pressure report to host.
 */

static unsigned long last_post_time;

module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");

module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");

static atomic_t trans_id = ATOMIC_INIT(0);


static int dm_ring_size = (5 * PAGE_SIZE);

/*
 * Driver specific state.
 */


enum hv_dm_state {
	
DM_INITIALIZING = 0,
	
DM_INITIALIZED,
	
DM_BALLOON_UP,
	
DM_BALLOON_DOWN,
	
DM_HOT_ADD,
	
DM_INIT_ERROR
};



static __u8 recv_buffer[PAGE_SIZE];

static __u8 *send_buffer;

#define PAGES_IN_2M	512

#define HA_CHUNK (32 * 1024)


struct hv_dynmem_device {
	
struct hv_device *dev;
	
enum hv_dm_state state;
	
struct completion host_event;
	
struct completion config_event;

	/*
         * Number of pages we have currently ballooned out.
         */
	
unsigned int num_pages_ballooned;
	
unsigned int num_pages_onlined;
	
unsigned int num_pages_added;

	/*
         * State to manage the ballooning (up) operation.
         */
	
struct balloon_state balloon_wrk;

	/*
         * State to execute the "hot-add" operation.
         */
	
struct hot_add_wrk ha_wrk;

	/*
         * This state tracks if the host has specified a hot-add
         * region.
         */
	
bool host_specified_ha_region;

	/*
         * State to synchronize hot-add.
         */
	
struct completion  ol_waitevent;
	
bool ha_waiting;
	/*
         * This thread handles hot-add
         * requests from the host as well as notifying
         * the host with regards to memory pressure in
         * the guest.
         */
	
struct task_struct *thread;

	/*
         * Protects ha_region_list, num_pages_onlined counter and individual
         * regions from ha_region_list.
         */
	
spinlock_t ha_lock;

	/*
         * A list of hot-add regions.
         */
	
struct list_head ha_region_list;

	/*
         * We start with the highest version we can support
         * and downgrade based on the host; we save here the
         * next version to try.
         */
	
__u32 next_version;

	/*
         * The negotiated version agreed by host.
         */
	
__u32 version;
};


static struct hv_dynmem_device dm_device;

static void post_status(struct hv_dynmem_device *dm);

#ifdef CONFIG_MEMORY_HOTPLUG

static int hv_memory_notifier(struct notifier_block *nb, unsigned long val, void *v) { struct memory_notify *mem = (struct memory_notify *)v; unsigned long flags; switch (val) { case MEM_ONLINE: spin_lock_irqsave(&dm_device.ha_lock, flags); dm_device.num_pages_onlined += mem->nr_pages; spin_unlock_irqrestore(&dm_device.ha_lock, flags); /* Fall through */ case MEM_CANCEL_ONLINE: if (dm_device.ha_waiting) { dm_device.ha_waiting = false; complete(&dm_device.ol_waitevent); } break; case MEM_OFFLINE: spin_lock_irqsave(&dm_device.ha_lock, flags); dm_device.num_pages_onlined -= mem->nr_pages; spin_unlock_irqrestore(&dm_device.ha_lock, flags); break; case MEM_GOING_ONLINE: case MEM_GOING_OFFLINE: case MEM_CANCEL_OFFLINE: break; } return NOTIFY_OK; }

Contributors

PersonTokensPropCommitsCommitProp
Vitaly Kuznetsov7854.17%583.33%
K. Y. Srinivasan6645.83%116.67%
Total144100.00%6100.00%

static struct notifier_block hv_memory_nb = { .notifier_call = hv_memory_notifier, .priority = 0 }; /* Check if the particular page is backed and can be onlined and online it. */
static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg) { unsigned long cur_start_pgp; unsigned long cur_end_pgp; struct hv_hotadd_gap *gap; cur_start_pgp = (unsigned long)pfn_to_page(has->covered_start_pfn); cur_end_pgp = (unsigned long)pfn_to_page(has->covered_end_pfn); /* The page is not backed. */ if (((unsigned long)pg < cur_start_pgp) || ((unsigned long)pg >= cur_end_pgp)) return; /* Check for gaps. */ list_for_each_entry(gap, &has->gap_list, list) { cur_start_pgp = (unsigned long) pfn_to_page(gap->start_pfn); cur_end_pgp = (unsigned long) pfn_to_page(gap->end_pfn); if (((unsigned long)pg >= cur_start_pgp) && ((unsigned long)pg < cur_end_pgp)) { return; } } /* This frame is currently backed; online the page. */ __online_page_set_limits(pg); __online_page_increment_counters(pg); __online_page_free(pg); }

Contributors

PersonTokensPropCommitsCommitProp
Vitaly Kuznetsov157100.00%1100.00%
Total157100.00%1100.00%


static void hv_bring_pgs_online(struct hv_hotadd_state *has, unsigned long start_pfn, unsigned long size) { int i; pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn); for (i = 0; i < size; i++) hv_page_online_one(has, pfn_to_page(start_pfn + i)); }

Contributors

PersonTokensPropCommitsCommitProp
K. Y. Srinivasan3766.07%125.00%
Alex Ng916.07%125.00%
Vitaly Kuznetsov916.07%125.00%
Wei Yongjun11.79%125.00%
Total56100.00%4100.00%


static void hv_mem_hot_add(unsigned long start, unsigned long size, unsigned long pfn_count, struct hv_hotadd_state *has) { int ret = 0; int i, nid; unsigned long start_pfn; unsigned long processed_pfn; unsigned long total_pfn = pfn_count; unsigned long flags; for (i = 0; i < (size/HA_CHUNK); i++) { start_pfn = start + (i * HA_CHUNK); spin_lock_irqsave(&dm_device.ha_lock, flags); has->ha_end_pfn += HA_CHUNK; if (total_pfn > HA_CHUNK) { processed_pfn = HA_CHUNK; total_pfn -= HA_CHUNK; } else { processed_pfn = total_pfn; total_pfn = 0; } has->covered_end_pfn += processed_pfn; spin_unlock_irqrestore(&dm_device.ha_lock, flags); init_completion(&dm_device.ol_waitevent); dm_device.ha_waiting = !memhp_auto_online; nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn)); ret = add_memory(nid, PFN_PHYS((start_pfn)), (HA_CHUNK << PAGE_SHIFT)); if (ret) { pr_warn("hot_add memory failed error is %d\n", ret); if (ret == -EEXIST) { /* * This error indicates that the error * is not a transient failure. This is the * case where the guest's physical address map * precludes hot adding memory. Stop all further * memory hot-add. */ do_hot_add = false; } spin_lock_irqsave(&dm_device.ha_lock, flags); has->ha_end_pfn -= HA_CHUNK; has->covered_end_pfn -= processed_pfn; spin_unlock_irqrestore(&dm_device.ha_lock, flags); break; } /* * Wait for the memory block to be onlined when memory onlining * is done outside of kernel (memhp_auto_online). Since the hot * add has succeeded, it is ok to proceed even if the pages in * the hot added region have not been "onlined" within the * allowed time. */ if (dm_device.ha_waiting) wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ); post_status(&dm_device); } return; }

Contributors

PersonTokensPropCommitsCommitProp
K. Y. Srinivasan21680.00%450.00%
Vitaly Kuznetsov5319.63%337.50%
Alex Ng10.37%112.50%
Total270100.00%8100.00%


static void hv_online_page(struct page *pg) { struct hv_hotadd_state *has; unsigned long cur_start_pgp; unsigned long cur_end_pgp; unsigned long flags; spin_lock_irqsave(&dm_device.ha_lock, flags); list_for_each_entry(has, &dm_device.ha_region_list, list) { cur_start_pgp = (unsigned long) pfn_to_page(has->start_pfn); cur_end_pgp = (unsigned long)pfn_to_page(has->end_pfn); /* The page belongs to a different HAS. */ if (((unsigned long)pg < cur_start_pgp) || ((unsigned long)pg >= cur_end_pgp)) continue; hv_page_online_one(has, pg); break; } spin_unlock_irqrestore(&dm_device.ha_lock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
K. Y. Srinivasan7968.10%133.33%
Vitaly Kuznetsov3731.90%266.67%
Total116100.00%3100.00%


static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt) { struct hv_hotadd_state *has; struct hv_hotadd_gap *gap; unsigned long residual, new_inc; int ret = 0; unsigned long flags; spin_lock_irqsave(&dm_device.ha_lock, flags); list_for_each_entry(has, &dm_device.ha_region_list, list) { /* * If the pfn range we are dealing with is not in the current * "hot add block", move on. */ if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn) continue; /* * If the current start pfn is not where the covered_end * is, create a gap and update covered_end_pfn. */ if (has->covered_end_pfn != start_pfn) { gap = kzalloc(sizeof(struct hv_hotadd_gap), GFP_ATOMIC); if (!gap) { ret = -ENOMEM; break; } INIT_LIST_HEAD(&gap->list); gap->start_pfn = has->covered_end_pfn; gap->end_pfn = start_pfn; list_add_tail(&gap->list, &has->gap_list); has->covered_end_pfn = start_pfn; } /* * If the current hot add-request extends beyond * our current limit; extend it. */ if ((start_pfn + pfn_cnt) > has->end_pfn) { residual = (start_pfn + pfn_cnt - has->end_pfn); /* * Extend the region by multiples of HA_CHUNK. */ new_inc = (residual / HA_CHUNK) * HA_CHUNK; if (residual % HA_CHUNK) new_inc += HA_CHUNK; has->end_pfn += new_inc; } ret = 1; break; } spin_unlock_irqrestore(&dm_device.ha_lock, flags); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Vitaly Kuznetsov12455.11%375.00%
K. Y. Srinivasan10144.89%125.00%
Total225100.00%4100.00%


static unsigned long handle_pg_range(unsigned long pg_start, unsigned long pg_count) { unsigned long start_pfn = pg_start; unsigned long pfn_cnt = pg_count; unsigned long size; struct hv_hotadd_state *has; unsigned long pgs_ol = 0; unsigned long old_covered_state; unsigned long res = 0, flags; pr_debug("Hot adding %lu pages starting at pfn 0x%lx.\n", pg_count, pg_start); spin_lock_irqsave(&dm_device.ha_lock, flags); list_for_each_entry(has, &dm_device.ha_region_list, list) { /* * If the pfn range we are dealing with is not in the current * "hot add block", move on. */ if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn) continue; old_covered_state = has->covered_end_pfn; if (start_pfn < has->ha_end_pfn) { /* * This is the case where we are backing pages * in an already hot added region. Bring * these pages online first. */ pgs_ol = has->ha_end_pfn - start_pfn; if (pgs_ol > pfn_cnt) pgs_ol = pfn_cnt; has->covered_end_pfn += pgs_ol; pfn_cnt -= pgs_ol; /* * Check if the corresponding memory block is already * online by checking its last previously backed page. * In case it is we need to bring rest (which was not * backed previously) online too. */ if (start_pfn > has->start_pfn && !PageReserved(pfn_to_page(start_pfn - 1))) hv_bring_pgs_online(has, start_pfn, pgs_ol); } if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) { /* * We have some residual hot add range * that needs to be hot added; hot add * it now. Hot add a multiple of * of HA_CHUNK that fully covers the pages * we have. */ size = (has->end_pfn - has->ha_end_pfn); if (pfn_cnt <= size) { size = ((pfn_cnt / HA_CHUNK) * HA_CHUNK); if (pfn_cnt % HA_CHUNK) size += HA_CHUNK; } else { pfn_cnt = size; } spin_unlock_irqrestore(&dm_device.ha_lock, flags); hv_mem_hot_add(has->ha_end_pfn, size, pfn_cnt, has); spin_lock_irqsave(&dm_device.ha_lock, flags); } /* * If we managed to online any pages that were given to us, * we declare success. */ res = has->covered_end_pfn - old_covered_state; break; } spin_unlock_irqrestore(&dm_device.ha_lock, flags); return res; }

Contributors

PersonTokensPropCommitsCommitProp
K. Y. Srinivasan20066.89%116.67%
Vitaly Kuznetsov9030.10%466.67%
Alex Ng93.01%116.67%
Total299100.00%6100.00%


static unsigned long process_hot_add(unsigned long pg_start, unsigned long pfn_cnt, unsigned long rg_start, unsigned long rg_size) { struct hv_hotadd_state *ha_region = NULL; int covered; unsigned long flags; if (pfn_cnt == 0) return 0; if (!dm_device.host_specified_ha_region) { covered = pfn_covered(pg_start, pfn_cnt); if (covered < 0) return 0; if (covered) goto do_pg_range; } /* * If the host has specified a hot-add range; deal with it first. */ if (rg_size != 0) { ha_region = kzalloc(sizeof(struct hv_hotadd_state), GFP_KERNEL); if (!ha_region) return 0; INIT_LIST_HEAD(&ha_region->list); INIT_LIST_HEAD(&ha_region->gap_list); ha_region->start_pfn = rg_start; ha_region->ha_end_pfn = rg_start; ha_region->covered_start_pfn = pg_start; ha_region->covered_end_pfn = pg_start; ha_region->end_pfn = rg_start + rg_size; spin_lock_irqsave(&dm_device.ha_lock, flags); list_add_tail(&ha_region->list, &dm_device.ha_region_list); spin_unlock_irqrestore(&dm_device.ha_lock, flags); } do_pg_range: /* * Process the page range specified; bringing them * online if possible. */ return handle_pg_range(pg_start, pfn_cnt); }

Contributors

PersonTokensPropCommitsCommitProp
K. Y. Srinivasan13164.85%125.00%
Vitaly Kuznetsov7135.15%375.00%
Total202100.00%4100.00%

#endif
static void hot_add_req(struct work_struct *dummy) { struct dm_hot_add_response resp; #ifdef CONFIG_MEMORY_HOTPLUG unsigned long pg_start, pfn_cnt; unsigned long rg_start, rg_sz; #endif struct hv_dynmem_device *dm = &dm_device; memset(&resp, 0, sizeof(struct dm_hot_add_response)); resp.hdr.type = DM_MEM_HOT_ADD_RESPONSE; resp.hdr.size = sizeof(struct dm_hot_add_response); #ifdef CONFIG_MEMORY_HOTPLUG pg_start = dm->ha_wrk.ha_page_range.finfo.start_page; pfn_cnt = dm->ha_wrk.ha_page_range.finfo.page_cnt; rg_start = dm->ha_wrk.ha_region_range.finfo.start_page; rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt; if ((rg_start == 0) && (!dm->host_specified_ha_region)) { unsigned long region_size; unsigned long region_start; /* * The host has not specified the hot-add region. * Based on the hot-add page range being specified, * compute a hot-add region that can cover the pages * that need to be hot-added while ensuring the alignment * and size requirements of Linux as it relates to hot-add. */ region_start = pg_start; region_size = (pfn_cnt / HA_CHUNK) * HA_CHUNK; if (pfn_cnt % HA_CHUNK) region_size += HA_CHUNK; region_start = (pg_start / HA_CHUNK) * HA_CHUNK; rg_start = region_start; rg_sz = region_size; } if (do_hot_add) resp.page_count = process_hot_add(pg_start, pfn_cnt, rg_start, rg_sz); dm->num_pages_added += resp.page_count; #endif /* * The result field of the response structure has the * following semantics: * * 1. If all or some pages hot-added: Guest should return success. * * 2. If no pages could be hot-added: * * If the guest returns success, then the host * will not attempt any further hot-add operations. This * signifies a permanent failure. * * If the guest returns failure, then this failure will be * treated as a transient failure and the host may retry the * hot-add operation after some delay. */ if (resp.page_count > 0) resp.result = 1; else if (!do_hot_add) resp.result = 1; else resp.result = 0; if (!do_hot_add || (resp.page_count == 0)) pr_info("Memory hot add failed\n"); dm->state = DM_INITIALIZED; resp.hdr.trans_id = atomic_inc_return(&trans_id); vmbus_sendpacket(dm->dev->channel, &resp, sizeof(struct dm_hot_add_response), (unsigned long)NULL, VM_PKT_DATA_INBAND, 0); }

Contributors

PersonTokensPropCommitsCommitProp
K. Y. Srinivasan31297.50%583.33%
Vitaly Kuznetsov82.50%116.67%
Total320100.00%6100.00%


static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg) { struct dm_info_header *info_hdr; info_hdr = (struct dm_info_header *)msg->info; switch (info_hdr->type) { case INFO_TYPE_MAX_PAGE_CNT: if (info_hdr->data_size == sizeof(__u64)) { __u64 *max_page_count = (__u64 *)&info_hdr[1]; pr_info("INFO_TYPE_MAX_PAGE_CNT = %llu\n", *max_page_count); } break; default: pr_info("Received Unknown type: %d\n", info_hdr->type); } }

Contributors

PersonTokensPropCommitsCommitProp
K. Y. Srinivasan6168.54%266.67%
Alex Ng2831.46%133.33%
Total89100.00%3100.00%


static unsigned long compute_balloon_floor(void) { unsigned long min_pages; #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) /* Simple continuous piecewiese linear function: * max MiB -> min MiB gradient * 0 0 * 16 16 * 32 24 * 128 72 (1/2) * 512 168 (1/4) * 2048 360 (1/8) * 8192 744 (1/16) * 32768 1512 (1/32) */ if (totalram_pages < MB2PAGES(128)) min_pages = MB2PAGES(8)