Release 4.11 drivers/hv/hv.c
/*
* Copyright (c) 2009, Microsoft Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*
* Authors:
* Haiyang Zhang <haiyangz@microsoft.com>
* Hank Janssen <hjanssen@microsoft.com>
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/hyperv.h>
#include <linux/version.h>
#include <linux/interrupt.h>
#include <linux/clockchips.h>
#include <asm/hyperv.h>
#include <asm/mshyperv.h>
#include "hyperv_vmbus.h"
/* The one and only */
struct hv_context hv_context = {
.synic_initialized = false,
};
#define HV_TIMER_FREQUENCY (10 * 1000 * 1000)
/* 100ns period */
#define HV_MAX_MAX_DELTA_TICKS 0xffffffff
#define HV_MIN_DELTA_TICKS 1
/*
* hv_init - Main initialization routine.
*
* This routine must be called before any other routines in here are called
*/
int hv_init(void)
{
if (!hv_is_hypercall_page_setup())
return -ENOTSUPP;
hv_context.cpu_context = alloc_percpu(struct hv_per_cpu_context);
if (!hv_context.cpu_context)
return -ENOMEM;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 19 | 46.34% | 1 | 12.50% |
Hank Janssen | 11 | 26.83% | 1 | 12.50% |
K. Y. Srinivasan | 9 | 21.95% | 4 | 50.00% |
Haiyang Zhang | 1 | 2.44% | 1 | 12.50% |
Greg Kroah-Hartman | 1 | 2.44% | 1 | 12.50% |
Total | 41 | 100.00% | 8 | 100.00% |
/*
* hv_post_message - Post a message using the hypervisor message IPC.
*
* This involves a hypercall.
*/
int hv_post_message(union hv_connection_id connection_id,
enum hv_message_type message_type,
void *payload, size_t payload_size)
{
struct hv_input_post_message *aligned_msg;
struct hv_per_cpu_context *hv_cpu;
u64 status;
if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT)
return -EMSGSIZE;
hv_cpu = get_cpu_ptr(hv_context.cpu_context);
aligned_msg = hv_cpu->post_msg_page;
aligned_msg->connectionid = connection_id;
aligned_msg->reserved = 0;
aligned_msg->message_type = message_type;
aligned_msg->payload_size = payload_size;
memcpy((void *)aligned_msg->payload, payload, payload_size);
put_cpu_ptr(hv_cpu);
status = hv_do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL);
return status & 0xFFFF;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hank Janssen | 52 | 44.07% | 1 | 7.14% |
Haiyang Zhang | 20 | 16.95% | 3 | 21.43% |
Stephen Hemminger | 20 | 16.95% | 1 | 7.14% |
Greg Kroah-Hartman | 11 | 9.32% | 5 | 35.71% |
K. Y. Srinivasan | 10 | 8.47% | 2 | 14.29% |
Jake Oshins | 4 | 3.39% | 1 | 7.14% |
Dan Carpenter | 1 | 0.85% | 1 | 7.14% |
Total | 118 | 100.00% | 14 | 100.00% |
static int hv_ce_set_next_event(unsigned long delta,
struct clock_event_device *evt)
{
u64 current_tick;
WARN_ON(!clockevent_state_oneshot(evt));
hv_get_current_tick(current_tick);
current_tick += delta;
hv_init_timer(HV_X64_MSR_STIMER0_COUNT, current_tick);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
K. Y. Srinivasan | 41 | 89.13% | 2 | 50.00% |
Viresh Kumar | 4 | 8.70% | 1 | 25.00% |
Thomas Gleixner | 1 | 2.17% | 1 | 25.00% |
Total | 46 | 100.00% | 4 | 100.00% |
static int hv_ce_shutdown(struct clock_event_device *evt)
{
hv_init_timer(HV_X64_MSR_STIMER0_COUNT, 0);
hv_init_timer_config(HV_X64_MSR_STIMER0_CONFIG, 0);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Viresh Kumar | 25 | 89.29% | 1 | 33.33% |
K. Y. Srinivasan | 3 | 10.71% | 2 | 66.67% |
Total | 28 | 100.00% | 3 | 100.00% |
static int hv_ce_set_oneshot(struct clock_event_device *evt)
{
union hv_timer_config timer_cfg;
timer_cfg.enable = 1;
timer_cfg.auto_enable = 1;
timer_cfg.sintx = VMBUS_MESSAGE_SINT;
hv_init_timer_config(HV_X64_MSR_STIMER0_CONFIG, timer_cfg.as_uint64);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
K. Y. Srinivasan | 40 | 88.89% | 2 | 66.67% |
Viresh Kumar | 5 | 11.11% | 1 | 33.33% |
Total | 45 | 100.00% | 3 | 100.00% |
static void hv_init_clockevent_device(struct clock_event_device *dev, int cpu)
{
dev->name = "Hyper-V clockevent";
dev->features = CLOCK_EVT_FEAT_ONESHOT;
dev->cpumask = cpumask_of(cpu);
dev->rating = 1000;
/*
* Avoid settint dev->owner = THIS_MODULE deliberately as doing so will
* result in clockevents_config_and_register() taking additional
* references to the hv_vmbus module making it impossible to unload.
*/
dev->set_state_shutdown = hv_ce_shutdown;
dev->set_state_oneshot = hv_ce_set_oneshot;
dev->set_next_event = hv_ce_set_next_event;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
K. Y. Srinivasan | 51 | 85.00% | 1 | 33.33% |
Viresh Kumar | 8 | 13.33% | 1 | 33.33% |
Vitaly Kuznetsov | 1 | 1.67% | 1 | 33.33% |
Total | 60 | 100.00% | 3 | 100.00% |
int hv_synic_alloc(void)
{
int cpu;
hv_context.hv_numa_map = kzalloc(sizeof(struct cpumask) * nr_node_ids,
GFP_ATOMIC);
if (hv_context.hv_numa_map == NULL) {
pr_err("Unable to allocate NUMA map\n");
goto err;
}
for_each_present_cpu(cpu) {
struct hv_per_cpu_context *hv_cpu
= per_cpu_ptr(hv_context.cpu_context, cpu);
memset(hv_cpu, 0, sizeof(*hv_cpu));
tasklet_init(&hv_cpu->msg_dpc,
vmbus_on_msg_dpc, (unsigned long) hv_cpu);
hv_cpu->clk_evt = kzalloc(sizeof(struct clock_event_device),
GFP_KERNEL);
if (hv_cpu->clk_evt == NULL) {
pr_err("Unable to allocate clock event device\n");
goto err;
}
hv_init_clockevent_device(hv_cpu->clk_evt, cpu);
hv_cpu->synic_message_page =
(void *)get_zeroed_page(GFP_ATOMIC);
if (hv_cpu->synic_message_page == NULL) {
pr_err("Unable to allocate SYNIC message page\n");
goto err;
}
hv_cpu->synic_event_page = (void *)get_zeroed_page(GFP_ATOMIC);
if (hv_cpu->synic_event_page == NULL) {
pr_err("Unable to allocate SYNIC event page\n");
goto err;
}
hv_cpu->post_msg_page = (void *)get_zeroed_page(GFP_ATOMIC);
if (hv_cpu->post_msg_page == NULL) {
pr_err("Unable to allocate post msg page\n");
goto err;
}
INIT_LIST_HEAD(&hv_cpu->chan_list);
}
return 0;
err:
return -ENOMEM;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
K. Y. Srinivasan | 106 | 43.09% | 5 | 38.46% |
Stephen Hemminger | 54 | 21.95% | 1 | 7.69% |
Hank Janssen | 44 | 17.89% | 2 | 15.38% |
Jason (Hui) Wang | 17 | 6.91% | 1 | 7.69% |
Greg Kroah-Hartman | 14 | 5.69% | 1 | 7.69% |
Vitaly Kuznetsov | 6 | 2.44% | 2 | 15.38% |
Haiyang Zhang | 5 | 2.03% | 1 | 7.69% |
Total | 246 | 100.00% | 13 | 100.00% |
void hv_synic_free(void)
{
int cpu;
for_each_present_cpu(cpu) {
struct hv_per_cpu_context *hv_cpu
= per_cpu_ptr(hv_context.cpu_context, cpu);
if (hv_cpu->synic_event_page)
free_page((unsigned long)hv_cpu->synic_event_page);
if (hv_cpu->synic_message_page)
free_page((unsigned long)hv_cpu->synic_message_page);
if (hv_cpu->post_msg_page)
free_page((unsigned long)hv_cpu->post_msg_page);
}
kfree(hv_context.hv_numa_map);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jason (Hui) Wang | 32 | 36.36% | 1 | 14.29% |
Stephen Hemminger | 28 | 31.82% | 1 | 14.29% |
K. Y. Srinivasan | 27 | 30.68% | 4 | 57.14% |
Felipe Pena | 1 | 1.14% | 1 | 14.29% |
Total | 88 | 100.00% | 7 | 100.00% |
/*
* hv_synic_init - Initialize the Synthethic Interrupt Controller.
*
* If it is already initialized by another entity (ie x2v shim), we need to
* retrieve the initialized message and event pages. Otherwise, we create and
* initialize the message and event pages.
*/
int hv_synic_init(unsigned int cpu)
{
struct hv_per_cpu_context *hv_cpu
= per_cpu_ptr(hv_context.cpu_context, cpu);
union hv_synic_simp simp;
union hv_synic_siefp siefp;
union hv_synic_sint shared_sint;
union hv_synic_scontrol sctrl;
u64 vp_index;
/* Setup the Synic's message page */
hv_get_simp(simp.as_uint64);
simp.simp_enabled = 1;
simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page)
>> PAGE_SHIFT;
hv_set_simp(simp.as_uint64);
/* Setup the Synic's event page */
hv_get_siefp(siefp.as_uint64);
siefp.siefp_enabled = 1;
siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page)
>> PAGE_SHIFT;
hv_set_siefp(siefp.as_uint64);
/* Setup the shared SINT. */
hv_get_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
shared_sint.as_uint64);
shared_sint.as_uint64 = 0;
shared_sint.vector = HYPERVISOR_CALLBACK_VECTOR;
shared_sint.masked = false;
shared_sint.auto_eoi = true;
hv_set_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
shared_sint.as_uint64);
/* Enable the global synic bit */
hv_get_synic_state(sctrl.as_uint64);
sctrl.enable = 1;
hv_set_synic_state(sctrl.as_uint64);
hv_context.synic_initialized = true;
/*
* Setup the mapping between Hyper-V's notion
* of cpuid and Linux' notion of cpuid.
* This array will be indexed using Linux cpuid.
*/
hv_get_vp_index(vp_index);
hv_context.vp_index[cpu] = (u32)vp_index;
/*
* Register the per-cpu clockevent source.
*/
if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE)
clockevents_config_and_register(hv_cpu->clk_evt,
HV_TIMER_FREQUENCY,
HV_MIN_DELTA_TICKS,
HV_MAX_MAX_DELTA_TICKS);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hank Janssen | 89 | 39.21% | 1 | 5.00% |
K. Y. Srinivasan | 48 | 21.15% | 9 | 45.00% |
Haiyang Zhang | 27 | 11.89% | 3 | 15.00% |
Jason (Hui) Wang | 23 | 10.13% | 1 | 5.00% |
Stephen Hemminger | 20 | 8.81% | 1 | 5.00% |
Greg Kroah-Hartman | 9 | 3.96% | 3 | 15.00% |
Vitaly Kuznetsov | 7 | 3.08% | 1 | 5.00% |
Bill Pemberton | 4 | 1.76% | 1 | 5.00% |
Total | 227 | 100.00% | 20 | 100.00% |
/*
* hv_synic_clockevents_cleanup - Cleanup clockevent devices
*/
void hv_synic_clockevents_cleanup(void)
{
int cpu;
if (!(ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE))
return;
for_each_present_cpu(cpu) {
struct hv_per_cpu_context *hv_cpu
= per_cpu_ptr(hv_context.cpu_context, cpu);
clockevents_unbind_device(hv_cpu->clk_evt, cpu);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Vitaly Kuznetsov | 34 | 66.67% | 2 | 66.67% |
Stephen Hemminger | 17 | 33.33% | 1 | 33.33% |
Total | 51 | 100.00% | 3 | 100.00% |
/*
* hv_synic_cleanup - Cleanup routine for hv_synic_init().
*/
int hv_synic_cleanup(unsigned int cpu)
{
union hv_synic_sint shared_sint;
union hv_synic_simp simp;
union hv_synic_siefp siefp;
union hv_synic_scontrol sctrl;
struct vmbus_channel *channel, *sc;
bool channel_found = false;
unsigned long flags;
if (!hv_context.synic_initialized)
return -EFAULT;
/*
* Search for channels which are bound to the CPU we're about to
* cleanup. In case we find one and vmbus is still connected we need to
* fail, this will effectively prevent CPU offlining. There is no way
* we can re-bind channels to different CPUs for now.
*/
mutex_lock(&vmbus_connection.channel_mutex);
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
if (channel->target_cpu == cpu) {
channel_found = true;
break;
}
spin_lock_irqsave(&channel->lock, flags);
list_for_each_entry(sc, &channel->sc_list, sc_list) {
if (sc->target_cpu == cpu) {
channel_found = true;
break;
}
}
spin_unlock_irqrestore(&channel->lock, flags);
if (channel_found)
break;
}
mutex_unlock(&vmbus_connection.channel_mutex);
if (channel_found && vmbus_connection.conn_state == CONNECTED)
return -EBUSY;
/* Turn off clockevent device */
if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE) {
struct hv_per_cpu_context *hv_cpu
= this_cpu_ptr(hv_context.cpu_context);
clockevents_unbind_device(hv_cpu->clk_evt, cpu);
hv_ce_shutdown(hv_cpu->clk_evt);
put_cpu_ptr(hv_cpu);
}
hv_get_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
shared_sint.as_uint64);
shared_sint.masked = 1;
/* Need to correctly cleanup in the case of SMP!!! */
/* Disable the interrupt */
hv_set_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
shared_sint.as_uint64);
hv_get_simp(simp.as_uint64);
simp.simp_enabled = 0;
simp.base_simp_gpa = 0;
hv_set_simp(simp.as_uint64);
hv_get_siefp(siefp.as_uint64);
siefp.siefp_enabled = 0;
siefp.base_siefp_gpa = 0;
hv_set_siefp(siefp.as_uint64);
/* Disable the global synic bit */
hv_get_synic_state(sctrl.as_uint64);
sctrl.enable = 0;
hv_set_synic_state(sctrl.as_uint64);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Vitaly Kuznetsov | 174 | 56.31% | 5 | 25.00% |
Hank Janssen | 76 | 24.60% | 1 | 5.00% |
Stephen Hemminger | 20 | 6.47% | 1 | 5.00% |
Haiyang Zhang | 18 | 5.83% | 4 | 20.00% |
Greg Kroah-Hartman | 11 | 3.56% | 3 | 15.00% |
K. Y. Srinivasan | 8 | 2.59% | 4 | 20.00% |
Bill Pemberton | 1 | 0.32% | 1 | 5.00% |
Viresh Kumar | 1 | 0.32% | 1 | 5.00% |
Total | 309 | 100.00% | 20 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
K. Y. Srinivasan | 371 | 27.89% | 20 | 34.48% |
Hank Janssen | 287 | 21.58% | 2 | 3.45% |
Vitaly Kuznetsov | 223 | 16.77% | 7 | 12.07% |
Stephen Hemminger | 178 | 13.38% | 1 | 1.72% |
Haiyang Zhang | 76 | 5.71% | 4 | 6.90% |
Jason (Hui) Wang | 73 | 5.49% | 1 | 1.72% |
Greg Kroah-Hartman | 61 | 4.59% | 15 | 25.86% |
Viresh Kumar | 43 | 3.23% | 1 | 1.72% |
Bill Pemberton | 8 | 0.60% | 2 | 3.45% |
Jake Oshins | 4 | 0.30% | 1 | 1.72% |
Tejun Heo | 3 | 0.23% | 1 | 1.72% |
Thomas Gleixner | 1 | 0.08% | 1 | 1.72% |
Felipe Pena | 1 | 0.08% | 1 | 1.72% |
Dan Carpenter | 1 | 0.08% | 1 | 1.72% |
Total | 1330 | 100.00% | 58 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.