Release 4.16 drivers/visorbus/visorchipset.c
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2010 - 2015 UNISYS CORPORATION
* All rights reserved.
*/
#include <linux/acpi.h>
#include <linux/crash_dump.h>
#include <linux/visorbus.h>
#include "visorbus_private.h"
/* {72120008-4AAB-11DC-8530-444553544200} */
#define VISOR_SIOVM_GUID GUID_INIT(0x72120008, 0x4AAB, 0x11DC, 0x85, 0x30, \
0x44, 0x45, 0x53, 0x54, 0x42, 0x00)
static const guid_t visor_vhba_channel_guid = VISOR_VHBA_CHANNEL_GUID;
static const guid_t visor_siovm_guid = VISOR_SIOVM_GUID;
static const guid_t visor_controlvm_channel_guid = VISOR_CONTROLVM_CHANNEL_GUID;
#define POLLJIFFIES_CONTROLVM_FAST 1
#define POLLJIFFIES_CONTROLVM_SLOW 100
#define MAX_CONTROLVM_PAYLOAD_BYTES (1024 * 128)
#define UNISYS_VISOR_LEAF_ID 0x40000000
/* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
#define UNISYS_VISOR_ID_EBX 0x73696e55
#define UNISYS_VISOR_ID_ECX 0x70537379
#define UNISYS_VISOR_ID_EDX 0x34367261
/*
* When the controlvm channel is idle for at least MIN_IDLE_SECONDS, we switch
* to slow polling mode. As soon as we get a controlvm message, we switch back
* to fast polling mode.
*/
#define MIN_IDLE_SECONDS 10
struct parser_context {
unsigned long allocbytes;
unsigned long param_bytes;
u8 *curr;
unsigned long bytes_remaining;
bool byte_stream;
struct visor_controlvm_parameters_header data;
};
/* VMCALL_CONTROLVM_ADDR: Used by all guests, not just IO. */
#define VMCALL_CONTROLVM_ADDR 0x0501
enum vmcall_result {
VMCALL_RESULT_SUCCESS = 0,
VMCALL_RESULT_INVALID_PARAM = 1,
VMCALL_RESULT_DATA_UNAVAILABLE = 2,
VMCALL_RESULT_FAILURE_UNAVAILABLE = 3,
VMCALL_RESULT_DEVICE_ERROR = 4,
VMCALL_RESULT_DEVICE_NOT_READY = 5
};
/*
* struct vmcall_io_controlvm_addr_params - Structure for IO VMCALLS. Has
* parameters to VMCALL_CONTROLVM_ADDR
* interface.
* @address: The Guest-relative physical address of the ControlVm channel.
* This VMCall fills this in with the appropriate address.
* Contents provided by this VMCALL (OUT).
* @channel_bytes: The size of the ControlVm channel in bytes This VMCall fills
* this in with the appropriate address. Contents provided by
* this VMCALL (OUT).
* @unused: Unused Bytes in the 64-Bit Aligned Struct.
*/
struct vmcall_io_controlvm_addr_params {
u64 address;
u32 channel_bytes;
u8 unused[4];
} __packed;
struct visorchipset_device {
struct acpi_device *acpi_device;
unsigned long poll_jiffies;
/* when we got our last controlvm message */
unsigned long most_recent_message_jiffies;
struct delayed_work periodic_controlvm_work;
struct visorchannel *controlvm_channel;
unsigned long controlvm_payload_bytes_buffered;
/*
* The following variables are used to handle the scenario where we are
* unable to offload the payload from a controlvm message due to memory
* requirements. In this scenario, we simply stash the controlvm
* message, then attempt to process it again the next time
* controlvm_periodic_work() runs.
*/
struct controlvm_message controlvm_pending_msg;
bool controlvm_pending_msg_valid;
struct vmcall_io_controlvm_addr_params controlvm_params;
};
static struct visorchipset_device *chipset_dev;
struct parahotplug_request {
struct list_head list;
int id;
unsigned long expiration;
struct controlvm_message msg;
};
/* prototypes for attributes */
static ssize_t toolaction_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
u8 tool_action = 0;
int err;
err = visorchannel_read(chipset_dev->controlvm_channel,
offsetof(struct visor_controlvm_channel,
tool_action),
&tool_action, sizeof(u8));
if (err)
return err;
return sprintf(buf, "%u\n", tool_action);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Kershner | 48 | 68.57% | 2 | 33.33% |
Benjamin Romer | 18 | 25.71% | 1 | 16.67% |
Sameer Wadgaonkar | 3 | 4.29% | 2 | 33.33% |
David Binder | 1 | 1.43% | 1 | 16.67% |
Total | 70 | 100.00% | 6 | 100.00% |
static ssize_t toolaction_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
u8 tool_action;
int err;
if (kstrtou8(buf, 10, &tool_action))
return -EINVAL;
err = visorchannel_write(chipset_dev->controlvm_channel,
offsetof(struct visor_controlvm_channel,
tool_action),
&tool_action, sizeof(u8));
if (err)
return err;
return count;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Kershner | 56 | 69.14% | 2 | 40.00% |
Benjamin Romer | 22 | 27.16% | 1 | 20.00% |
Sameer Wadgaonkar | 3 | 3.70% | 2 | 40.00% |
Total | 81 | 100.00% | 5 | 100.00% |
static DEVICE_ATTR_RW(toolaction);
static ssize_t boottotool_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct efi_visor_indication efi_visor_indication;
int err;
err = visorchannel_read(chipset_dev->controlvm_channel,
offsetof(struct visor_controlvm_channel,
efi_visor_ind),
&efi_visor_indication,
sizeof(struct efi_visor_indication));
if (err)
return err;
return sprintf(buf, "%u\n", efi_visor_indication.boot_to_tool);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Kershner | 44 | 61.11% | 2 | 33.33% |
Benjamin Romer | 18 | 25.00% | 1 | 16.67% |
Sameer Wadgaonkar | 9 | 12.50% | 2 | 33.33% |
David Binder | 1 | 1.39% | 1 | 16.67% |
Total | 72 | 100.00% | 6 | 100.00% |
static ssize_t boottotool_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int val, err;
struct efi_visor_indication efi_visor_indication;
if (kstrtoint(buf, 10, &val))
return -EINVAL;
efi_visor_indication.boot_to_tool = val;
err = visorchannel_write(chipset_dev->controlvm_channel,
offsetof(struct visor_controlvm_channel,
efi_visor_ind),
&(efi_visor_indication),
sizeof(struct efi_visor_indication));
if (err)
return err;
return count;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Kershner | 62 | 66.67% | 2 | 40.00% |
Benjamin Romer | 22 | 23.66% | 1 | 20.00% |
Sameer Wadgaonkar | 9 | 9.68% | 2 | 40.00% |
Total | 93 | 100.00% | 5 | 100.00% |
static DEVICE_ATTR_RW(boottotool);
static ssize_t error_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
u32 error = 0;
int err;
err = visorchannel_read(chipset_dev->controlvm_channel,
offsetof(struct visor_controlvm_channel,
installation_error),
&error, sizeof(u32));
if (err)
return err;
return sprintf(buf, "%u\n", error);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Kershner | 47 | 67.14% | 2 | 28.57% |
Benjamin Romer | 18 | 25.71% | 1 | 14.29% |
Sameer Wadgaonkar | 3 | 4.29% | 2 | 28.57% |
David Binder | 2 | 2.86% | 2 | 28.57% |
Total | 70 | 100.00% | 7 | 100.00% |
static ssize_t error_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
u32 error;
int err;
if (kstrtou32(buf, 10, &error))
return -EINVAL;
err = visorchannel_write(chipset_dev->controlvm_channel,
offsetof(struct visor_controlvm_channel,
installation_error),
&error, sizeof(u32));
if (err)
return err;
return count;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Kershner | 56 | 69.14% | 2 | 40.00% |
Benjamin Romer | 22 | 27.16% | 1 | 20.00% |
Sameer Wadgaonkar | 3 | 3.70% | 2 | 40.00% |
Total | 81 | 100.00% | 5 | 100.00% |
static DEVICE_ATTR_RW(error);
static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
u32 text_id = 0;
int err;
err = visorchannel_read(chipset_dev->controlvm_channel,
offsetof(struct visor_controlvm_channel,
installation_text_id),
&text_id, sizeof(u32));
if (err)
return err;
return sprintf(buf, "%u\n", text_id);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Kershner | 47 | 67.14% | 2 | 28.57% |
Benjamin Romer | 18 | 25.71% | 1 | 14.29% |
Sameer Wadgaonkar | 3 | 4.29% | 2 | 28.57% |
David Binder | 2 | 2.86% | 2 | 28.57% |
Total | 70 | 100.00% | 7 | 100.00% |
static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
u32 text_id;
int err;
if (kstrtou32(buf, 10, &text_id))
return -EINVAL;
err = visorchannel_write(chipset_dev->controlvm_channel,
offsetof(struct visor_controlvm_channel,
installation_text_id),
&text_id, sizeof(u32));
if (err)
return err;
return count;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Kershner | 56 | 69.14% | 2 | 40.00% |
Benjamin Romer | 22 | 27.16% | 1 | 20.00% |
Sameer Wadgaonkar | 3 | 3.70% | 2 | 40.00% |
Total | 81 | 100.00% | 5 | 100.00% |
static DEVICE_ATTR_RW(textid);
static ssize_t remaining_steps_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
u16 remaining_steps = 0;
int err;
err = visorchannel_read(chipset_dev->controlvm_channel,
offsetof(struct visor_controlvm_channel,
installation_remaining_steps),
&remaining_steps, sizeof(u16));
if (err)
return err;
return sprintf(buf, "%hu\n", remaining_steps);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Kershner | 48 | 68.57% | 2 | 33.33% |
Benjamin Romer | 18 | 25.71% | 1 | 16.67% |
Sameer Wadgaonkar | 3 | 4.29% | 2 | 33.33% |
David Binder | 1 | 1.43% | 1 | 16.67% |
Total | 70 | 100.00% | 6 | 100.00% |
static ssize_t remaining_steps_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
u16 remaining_steps;
int err;
if (kstrtou16(buf, 10, &remaining_steps))
return -EINVAL;
err = visorchannel_write(chipset_dev->controlvm_channel,
offsetof(struct visor_controlvm_channel,
installation_remaining_steps),
&remaining_steps, sizeof(u16));
if (err)
return err;
return count;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Kershner | 56 | 69.14% | 2 | 40.00% |
Benjamin Romer | 22 | 27.16% | 1 | 20.00% |
Sameer Wadgaonkar | 3 | 3.70% | 2 | 40.00% |
Total | 81 | 100.00% | 5 | 100.00% |
static DEVICE_ATTR_RW(remaining_steps);
static void controlvm_init_response(struct controlvm_message *msg,
struct controlvm_message_header *msg_hdr,
int response)
{
memset(msg, 0, sizeof(struct controlvm_message));
memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
msg->hdr.payload_bytes = 0;
msg->hdr.payload_vm_offset = 0;
msg->hdr.payload_max_bytes = 0;
if (response < 0) {
msg->hdr.flags.failed = 1;
msg->hdr.completion_status = (u32)(-response);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Benjamin Romer | 73 | 70.19% | 5 | 83.33% |
Ken Cox | 31 | 29.81% | 1 | 16.67% |
Total | 104 | 100.00% | 6 | 100.00% |
static int controlvm_respond_chipset_init(
struct controlvm_message_header *msg_hdr,
int response,
enum visor_chipset_feature features)
{
struct controlvm_message outmsg;
controlvm_init_response(&outmsg, msg_hdr, response);
outmsg.cmd.init_chipset.features = features;
return visorchannel_signalinsert(chipset_dev->controlvm_channel,
CONTROLVM_QUEUE_REQUEST, &outmsg);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Benjamin Romer | 30 | 54.55% | 5 | 50.00% |
David Kershner | 13 | 23.64% | 2 | 20.00% |
Ken Cox | 9 | 16.36% | 1 | 10.00% |
Sameer Wadgaonkar | 3 | 5.45% | 2 | 20.00% |
Total | 55 | 100.00% | 10 | 100.00% |
static int chipset_init(struct controlvm_message *inmsg)
{
static int chipset_inited;
enum visor_chipset_feature features = 0;
int rc = CONTROLVM_RESP_SUCCESS;
int res = 0;
if (chipset_inited) {
rc = -CONTROLVM_RESP_ALREADY_DONE;
res = -EIO;
goto out_respond;
}
chipset_inited = 1;
/*
* Set features to indicate we support parahotplug (if Command also
* supports it). Set the "reply" bit so Command knows this is a
* features-aware driver.
*/
features = inmsg->cmd.init_chipset.features &
VISOR_CHIPSET_FEATURE_PARA_HOTPLUG;
features |= VISOR_CHIPSET_FEATURE_REPLY;
out_respond:
if (inmsg->hdr.flags.response_expected)
res = controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
return res;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Kershner | 94 | 94.00% | 3 | 42.86% |
Sameer Wadgaonkar | 4 | 4.00% | 2 | 28.57% |
Ken Cox | 1 | 1.00% | 1 | 14.29% |
Benjamin Romer | 1 | 1.00% | 1 | 14.29% |
Total | 100 | 100.00% | 7 | 100.00% |
static int controlvm_respond(struct controlvm_message_header *msg_hdr,
int response, struct visor_segment_state *state)
{
struct controlvm_message outmsg;
controlvm_init_response(&outmsg, msg_hdr, response);
if (outmsg.hdr.flags.test_message == 1)
return -EINVAL;
if (state) {
outmsg.cmd.device_change_state.state = *state;
outmsg.cmd.device_change_state.flags.phys_device = 1;
}
return visorchannel_signalinsert(chipset_dev->controlvm_channel,
CONTROLVM_QUEUE_REQUEST, &outmsg);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Benjamin Romer | 44 | 48.35% | 6 | 46.15% |
David Kershner | 26 | 28.57% | 4 | 30.77% |
Ken Cox | 18 | 19.78% | 1 | 7.69% |
Sameer Wadgaonkar | 3 | 3.30% | 2 | 15.38% |
Total | 91 | 100.00% | 13 | 100.00% |
enum crash_obj_type {
CRASH_DEV,
CRASH_BUS,
};
static int save_crash_message(struct controlvm_message *msg,
enum crash_obj_type cr_type)
{
u32 local_crash_msg_offset;
u16 local_crash_msg_count;
int err;
err = visorchannel_read(chipset_dev->controlvm_channel,
offsetof(struct visor_controlvm_channel,
saved_crash_message_count),
&local_crash_msg_count, sizeof(u16));
if (err) {
dev_err(&chipset_dev->acpi_device->dev,
"failed to read message count\n");
return err;
}
if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
dev_err(&chipset_dev->acpi_device->dev,
"invalid number of messages\n");
return -EIO;
}
err = visorchannel_read(chipset_dev->controlvm_channel,
offsetof(struct visor_controlvm_channel,
saved_crash_message_offset),
&local_crash_msg_offset, sizeof(u32));
if (err) {
dev_err(&chipset_dev->acpi_device->dev,
"failed to read offset\n");
return err;
}
switch (cr_type) {
case CRASH_DEV:
local_crash_msg_offset += sizeof(struct controlvm_message);
err = visorchannel_write(chipset_dev->controlvm_channel,
local_crash_msg_offset, msg,
sizeof(struct controlvm_message));
if (err) {
dev_err(&chipset_dev->acpi_device->dev,
"failed to write dev msg\n");
return err;
}
break;
case CRASH_BUS:
err = visorchannel_write(chipset_dev->controlvm_channel,
local_crash_msg_offset, msg,
sizeof(struct controlvm_message));
if (err) {
dev_err(&chipset_dev->acpi_device->dev,
"failed to write bus msg\n");
return err;
}
break;
default:
dev_err(&chipset_dev->acpi_device->dev,
"Invalid crash_obj_type\n");
break;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tim Sell | 124 | 47.88% | 1 | 11.11% |
David Kershner | 95 | 36.68% | 2 | 22.22% |
David Binder | 23 | 8.88% | 1 | 11.11% |
Sameer Wadgaonkar | 10 | 3.86% | 2 | 22.22% |
Bryan Thompson | 4 | 1.54% | 1 | 11.11% |
Jon Frisch | 2 | 0.77% | 1 | 11.11% |
Ken Cox | 1 | 0.39% | 1 | 11.11% |
Total | 259 | 100.00% | 9 | 100.00% |
static int controlvm_responder(enum controlvm_id cmd_id,
struct controlvm_message_header *pending_msg_hdr,
int response)
{
if (pending_msg_hdr->id != (u32)cmd_id)
return -EINVAL;
return controlvm_respond(pending_msg_hdr, response, NULL);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ken Cox | 19 | 44.19% | 1 | 7.69% |
David Kershner | 10 | 23.26% | 3 | 23.08% |
Benjamin Romer | 7 | 16.28% | 5 | 38.46% |
Don Zickus | 5 | 11.63% | 2 | 15.38% |
Veronika Kabatova | 1 | 2.33% | 1 | 7.69% |
Tim Sell | 1 | 2.33% | 1 | 7.69% |
Total | 43 | 100.00% | 13 | 100.00% |
static int device_changestate_responder(enum controlvm_id cmd_id,
struct visor_device *p, int response,
struct visor_segment_state state)
{
struct controlvm_message outmsg;
if (p->pending_msg_hdr->id != cmd_id)
return -EINVAL;
controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
outmsg.cmd.device_change_state.bus_no = p->chipset_bus_no;
outmsg.cmd.device_change_state.dev_no = p->chipset_dev_no;
outmsg.cmd.device_change_state.state = state;
return visorchannel_signalinsert(chipset_dev->controlvm_channel,
CONTROLVM_QUEUE_REQUEST, &outmsg);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Benjamin Romer | 46 | 46.46% | 8 | 42.11% |
Ken Cox | 28 | 28.28% | 1 | 5.26% |
David Kershner | 15 | 15.15% | 3 | 15.79% |
Don Zickus | 5 | 5.05% | 3 | 15.79% |
Sameer Wadgaonkar | 3 | 3.03% | 2 | 10.53% |
Veronika Kabatova | 2 | 2.02% | 2 | 10.53% |
Total | 99 | 100.00% | 19 | 100.00% |
static int visorbus_create(struct controlvm_message *inmsg)
{
struct controlvm_message_packet *cmd = &inmsg->cmd;
struct controlvm_message_header *pmsg_hdr;
u32 bus_no = cmd->create_bus.bus_no;
struct visor_device *bus_info;
struct visorchannel *visorchannel;
int err;
bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
if (bus_info && bus_info->state.created == 1) {
dev_err(&chipset_dev->acpi_device->dev,
"failed %s: already exists\n", __func__);
err = -EEXIST;
goto err_respond;
}
bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
if (!bus_info) {
err = -ENOMEM;
goto err_respond;
}
INIT_LIST_HEAD(&bus_info->list_all);
bus_info->chipset_bus_no = bus_no;
bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
if (guid_equal(&cmd->create_bus.bus_inst_guid, &visor_siovm_guid)) {
err = save_crash_message(inmsg, CRASH_BUS);
if (err)
goto err_free_bus_info;
}
if (inmsg->hdr.flags.response_expected == 1) {
pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
if (!pmsg_hdr) {
err = -ENOMEM;
goto err_free_bus_info;
}
memcpy(pmsg_hdr, &inmsg->hdr,
sizeof(struct controlvm_message_header));
bus_info->pending_msg_hdr = pmsg_hdr;
}
visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
GFP_KERNEL,
&cmd->create_bus.bus_data_type_guid,
false);
if (!visorchannel) {
err = -ENOMEM;
goto err_free_pending_msg;
}
bus_info->visorchannel = visorchannel;
/* Response will be handled by visorbus_create_instance on success */
err = visorbus_create_instance(bus_info);
if (err)
goto err_destroy_channel;
return 0;
err_destroy_channel:
visorchannel_destroy(visorchannel);
err_free_pending_msg:
kfree(bus_info->pending_msg_hdr);
err_free_bus_info:
kfree(bus_info);
err_respond:
if (inmsg->hdr.flags.response_expected == 1)
controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Kershner | 197 | 55.18% | 9 | 34.62% |
Benjamin Romer | 61 | 17.09% | 7 | 26.92% |
Ken Cox | 52 | 14.57% | 2 | 7.69% |
Tim Sell | 17 | 4.76% | 1 | 3.85% |
Don Zickus | 16 | 4.48% | 2 | 7.69% |
Andy Shevchenko | 7 | 1.96% | 1 | 3.85% |
Zachary Dremann | 3 | 0.84% | 1 | 3.85% |
Sameer Wadgaonkar | 3 | 0.84% | 2 | 7.69% |
Jes Sorensen | 1 | 0.28% | 1 | 3.85% |
Total | 357 | 100.00% | 26 | 100.00% |
static int visorbus_destroy(struct controlvm_message *inmsg)
{
struct controlvm_message_header *pmsg_hdr;
u32 bus_no = inmsg->cmd.destroy_bus.bus_no;
struct visor_device *bus_info;
int err;
bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
if (!bus_info) {
err = -ENODEV;
goto err_respond;
}
if (bus_info->state.created == 0) {
err = -ENOENT;
goto err_respond;
}
if (bus_info->pending_msg_hdr) {
/* only non-NULL if dev is still waiting on a response */
err = -EEXIST;
goto err_respond;
}
if (inmsg->hdr.flags.response_expected == 1) {
pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
if (!pmsg_hdr) {
err = -ENOMEM;
goto err_respond;
}
memcpy(pmsg_hdr, &inmsg->hdr,
sizeof(struct controlvm_message_header));
bus_info->pending_msg_hdr = pmsg_hdr;
}
/* Response will be handled by visorbus_remove_instance */
visorbus_remove_instance(bus_info);
return 0;
err_respond:
if (inmsg->hdr.flags.response_expected == 1)
controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Kershner | 150 | 73.53% | 4 | 30.77% |
Ken Cox | 34 | 16.67% | 2 | 15.38% |
Benjamin Romer | 12 | 5.88% | 4 | 30.77% |
Don Zickus | 6 | 2.94% | 1 | 7.69% |
Jes Sorensen | 1 | 0.49% | 1 | 7.69% |
Sameer Wadgaonkar | 1 | 0.49% | 1 | 7.69% |
Total | 204 | 100.00% | 13 | 100.00% |
static const guid_t *parser_id_get(struct parser_context *ctx)
{
return &ctx->data.id;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Kershner | 21 | 100.00% | 1 | 100.00% |
Total | 21 | 100.00% | 1 | 100.00% |
static void *parser_string_get(u8 *pscan, int nscan)
{
int value_length;
void *value;
if (nscan == 0)
return NULL;
value_length = strnlen(pscan, nscan);
value = kzalloc(value_length + 1, GFP_KERNEL);
if (!value)
return NULL;
if (value_length > 0)
memcpy(value, pscan, value_length);
return value;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Kershner | 73 | 96.05% | 4 | 66.67% |
Benjamin Romer | 2 | 2.63% | 1 | 16.67% |
Ken Cox | 1 | 1.32% | 1 | 16.67% |
Total | 76 | 100.00% | 6 | 100.00% |
static void *parser_name_get(struct parser_context *ctx)
{
struct visor_controlvm_parameters_header *phdr;
phdr = &ctx->data;
if ((unsigned long)phdr->name_offset +
(unsigned long)phdr->name_length > ctx->param_bytes)
return NULL;
ctx->curr = (char *)&phdr + phdr->name_offset;
ctx->bytes_remaining = phdr->name_length;
return parser_string_get(ctx->curr, phdr->name_length);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Kershner | 73 | 86.90% | 4 | 66.67% |
Tim Sell | 8 | 9.52% | 1 | 16.67% |
Benjamin Romer | 3 | 3.57% | 1 | 16.67% |
Total | 84 | 100.00% | 6 | 100.00% |
static int visorbus_configure(struct controlvm_message *inmsg,
struct parser_context *parser_ctx)
{
struct controlvm_message_packet *cmd = &inmsg->cmd;
u32 bus_no;
struct visor_device *bus_info;
int err = 0;
bus_no = cmd->configure_bus.bus_no;
bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
if (!bus_info) {
err = -EINVAL;
goto err_respond;
}
if (bus_info->state.created == 0) {
err = -EINVAL;
goto err_respond;
}
if (bus_info->pending_msg_hdr) {
err = -EIO;
goto err_respond;
}
err = visorchannel_set_clientpartition(bus_info->visorchannel,
cmd->configure_bus.guest_handle);
if (err)
goto err_respond;
if (parser_ctx) {
const guid_t *partition_guid = parser_id_get(parser_ctx);
guid_copy(&bus_info->partition_guid, partition_guid);
bus_info->name = parser_name_get(parser_ctx);
}
if (inmsg->hdr.flags.response_expected == 1)
controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
return 0;
err_respond:
dev_err(&chipset_dev->acpi_device->dev,
"%s exited with err: %d\n", __func__, err);
if (inmsg->hdr.flags.response_expected == 1)
controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Kershner | 103 | 41.87% | 6 | 30.00% |
Benjamin Romer | 84 | 34.15% | 7 | 35.00% |
Ken Cox | 31 | 12.60% | 2 | 10.00% |
Andy Shevchenko | 15 | 6.10% | 1 | 5.00% |
Don Zickus | 11 | 4.47% | 2 | 10.00% |
Jes Sorensen | 1 | 0.41% | 1 | 5.00% |
Sameer Wadgaonkar | 1 | 0.41% | 1 | 5.00% |
Total | 246 | 100.00% | 20 | 100.00% |
static int visorbus_device_create(struct controlvm_message *inmsg)
{
struct controlvm_message_packet *cmd = &inmsg->cmd;
struct controlvm_message_header *pmsg_hdr;
u32 bus_no = cmd->create_device.bus_no;
u32 dev_no = cmd->create_device.dev_no;
struct visor_device *dev_info;
struct visor_device *bus_info;
struct visorchannel *visorchannel;
int err;
bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
if (!bus_info) {
dev_err(&chipset_dev->acpi_device->dev,
"failed to get bus by id: %d\n", bus_no);
err = -ENODEV;
goto err_respond;
}
if (bus_info->state.created == 0) {
dev_err(&chipset_dev->acpi_device->dev,
"bus not created, id: %d\n", bus_no);
err = -EINVAL;
goto err_respond;
}
dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
if (dev_info && dev_info->state.created == 1) {
dev_err(&chipset_dev->acpi_device->dev,
"failed to get bus by id: %d/%d\n", bus_no, dev_no);
err = -EEXIST;
goto err_respond;
}
dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
if (!dev_info) {
err = -ENOMEM;
goto err_respond;
}
dev_info->chipset_bus_no = bus_no;
dev_info->chipset_dev_no = dev_no;
guid_copy(&dev_info->inst, &cmd->create_device.dev_inst_guid);
dev_info->device.parent = &bus_info->device;
visorchannel = visorchannel_create(cmd->create_device.channel_addr,
GFP_KERNEL,
&cmd->create_device.data_type_guid,
true);
if (!visorchannel) {
dev_err(&chipset_dev->acpi_device->dev,
"failed to create visorchannel: %d/%d\n",
bus_no, dev_no);
err = -ENOMEM;
goto err_free_dev_info;
}
dev_info->visorchannel = visorchannel;
guid_copy(&dev_info->channel_type_guid,
&cmd->create_device.data_type_guid);
if (guid_equal(&cmd->create_device.data_type_guid,
&visor_vhba_channel_guid)) {
err = save_crash_message(inmsg, CRASH_DEV);
if (err)
goto err_destroy_visorchannel;
}
if (inmsg->hdr.flags.response_expected == 1) {
pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
if (!pmsg_hdr) {
err = -ENOMEM;
goto err_destroy_visorchannel;
}
memcpy(pmsg_hdr, &inmsg->hdr,
sizeof(struct controlvm_message_header));
dev_info->pending_msg_hdr = pmsg_hdr;
}
/* create_visor_device will send response */
err = create_visor_device(dev_info);
if (err)
goto err_destroy_visorchannel;
return 0;
err_destroy_visorchannel:
visorchannel_destroy(visorchannel);
err_free_dev_info:
kfree(dev_info);
err_respond:
if (inmsg->hdr.flags.response_expected == 1)
controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Kershner | 179 | 36.76% | 5 | 19.23% |
Ken Cox | 143 | 29.36% | 2 | 7.69% |
Don Zickus | 83 | 17.04% | 4 | 15.38% |
Benjamin Romer | 35 | 7.19% | 8 | 30.77% |
Andy Shevchenko | 21 | 4.31% | 1 | 3.85% |
Tim Sell | 17 | 3.49% | 1 | 3.85% |
Sameer Wadgaonkar | 6 | 1.23% | 3 | 11.54% |
Jes Sorensen | 2 | 0.41% | 1 | 3.85% |
Andreea-Cristina Bernat | 1 | 0.21% | 1 | 3.85% |
Total | 487 | 100.00% | 26 | 100.00% |
static int visorbus_device_changestate(struct controlvm_message *inmsg)
{
struct controlvm_message_packet *cmd = &inmsg->cmd;
struct controlvm_message_header *pmsg_hdr;
u32 bus_no = cmd->device_change_state.bus_no;
u32 dev_no = cmd->device_change_state.dev_no;
struct visor_segment_state state = cmd->device_change_state.state;
struct visor_device *dev_info;
int err = 0;
dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
if (!dev_info) {
err = -ENODEV;
goto err_respond;
}
if (dev_info->state.created == 0) {
err = -EINVAL;
goto err_respond;
}
if (dev_info->pending_msg_hdr) {
/* only non-NULL if dev is still waiting on a response */
err = -EIO;
goto err_respond;
}
if (inmsg->hdr.flags.response_expected == 1) {
pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
if (!pmsg_hdr) {
err = -ENOMEM;
goto err_respond;
}
memcpy(pmsg_hdr, &inmsg->hdr,
sizeof(struct controlvm_message_header));
dev_info->pending_msg_hdr = pmsg_hdr;
}
if (state.alive == segment_state_running.alive &&
state.operating == segment_state_running.operating)
/* Response will be sent from visorchipset_device_resume */
err = visorchipset_device_resume(dev_info);
/* ServerNotReady / ServerLost / SegmentStateStandby */
else if (state.alive == segment_state_standby.alive &&
state.operating == segment_state_standby.operating)
/*
* technically this is standby case where server is lost.
* Response will be sent from visorchipset_device_pause.
*/
err = visorchipset_device_pause(dev_info);
if (err)
goto err_respond;
return 0;
err_respond:
dev_err(&chipset_dev->acpi_device->dev, "failed: %d\n", err);
if (inmsg->hdr.flags.response_expected == 1)
controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Kershner | 193 | 63.91% | 6 | 30.00% |
Ken Cox | 75 | 24.83% | 2 | 10.00% |
Benjamin Romer | 20 | 6.62% | 5 | 25.00% |
Sameer Wadgaonkar | 6 | 1.99% | 3 | 15.00% |
Don Zickus | 5 | 1.66% | 2 | 10.00% |
Jes Sorensen | 2 | 0.66% | 1 | 5.00% |
Veronika Kabatova | 1 | 0.33% | 1 | 5.00% |
Total | 302 | 100.00% | 20 | 100.00% |
static int visorbus_device_destroy(struct controlvm_message *inmsg)
{
struct controlvm_message_packet *cmd = &inmsg->cmd;
struct controlvm_message_header *pmsg_hdr;
u32 bus_no = cmd->destroy_device.bus_no;
u32 dev_no = cmd->destroy_device.dev_no;
struct visor_device *dev_info;
int err;
dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
if (!dev_info) {
err = -ENODEV;
goto err_respond;
}
if (dev_info->state.created == 0) {
err = -EINVAL;
goto err_respond;
}
if (dev_info->pending_msg_hdr) {
/* only non-NULL if dev is still waiting on a response */
err = -EIO;
goto err_respond;
}
if (inmsg->hdr.flags.response_expected == 1) {
pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
if (!pmsg_hdr) {
err = -ENOMEM;
goto err_respond;
}
memcpy(pmsg_hdr, &inmsg->hdr,
sizeof(struct controlvm_message_header));
dev_info->pending_msg_hdr = pmsg_hdr;
}
kfree(dev_info->name);
remove_visor_device(dev_info);
return 0;
err_respond:
if (inmsg->hdr.flags.response_expected == 1)
controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Kershner | 118 | 51.98% | 3 | 20.00% |
Ken Cox | 74 | 32.60% | 2 | 13.33% |
Benjamin Romer | 20 | 8.81% | 5 | 33.33% |
Zachary Dremann | 7 | 3.08% | 1 | 6.67% |
Don Zickus | 4 | 1.76% | 1 | 6.67% |
Sameer Wadgaonkar | 2 | 0.88% | 2 | 13.33% |
Jes Sorensen | 2 | 0.88% | 1 | 6.67% |
Total | 227 | 100.00% | 15 | 100.00% |
/*
* The general parahotplug flow works as follows. The visorchipset receives
* a DEVICE_CHANGESTATE message from Command specifying a physical device
* to enable or disable. The CONTROLVM message handler calls
* parahotplug_process_message, which then adds the message to a global list
* and kicks off a udev event which causes a user level script to enable or
* disable the specified device. The udev script then writes to
* /sys/devices/platform/visorchipset/parahotplug, which causes the
* parahotplug store functions to get called, at which point the
* appropriate CONTROLVM message is retrieved from the list and responded to.
*/
#define PARAHOTPLUG_TIMEOUT_MS 2000
/*
* parahotplug_next_id() - generate unique int to match an outstanding
* CONTROLVM message with a udev script /sys
* response
*
* Return: a unique integer value
*/
static int parahotplug_next_id(void)
{
static atomic_t id = ATOMIC_INIT(0);
return atomic_inc_return(&id);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ken Cox | 24 | 100.00% | 1 | 100.00% |
Total | 24 | 100.00% | 1 | 100.00% |
/*
* parahotplug_next_expiration() - returns the time (in jiffies) when a
* CONTROLVM message on the list should expire
* -- PARAHOTPLUG_TIMEOUT_MS in the future
*
* Return: expected expiration time (in jiffies)
*/
static unsigned long parahotplug_next_expiration(void)
{
return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ken Cox | 14 | 82.35% | 1 | 50.00% |
Nicholas Mc Guire | 3 | 17.65% | 1 | 50.00% |
Total | 17 | 100.00% | 2 | 100.00% |
/*
* parahotplug_request_create() - create a parahotplug_request, which is
* basically a wrapper for a CONTROLVM_MESSAGE
* that we can stick on a list
* @msg: the message to insert in the request
*
* Return: the request containing the provided message
*/
static struct parahotplug_request *parahotplug_request_create(
struct controlvm_message *msg)
{
struct parahotplug_request *req;
req = kmalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return NULL;
req->id = parahotplug_next_id();
req->expiration = parahotplug_next_expiration();
req->msg = *msg;
return req;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ken Cox | 54 | 85.71% | 1 | 25.00% |
Quentin Lambert | 6 | 9.52% | 1 | 25.00% |
Benjamin Romer | 3 | 4.76% | 2 | 50.00% |
Total | 63 | 100.00% | 4 | 100.00% |
/*
* parahotplug_request_destroy() - free a parahotplug_request
* @req: the request to deallocate
*/
static void parahotplug_request_destroy(struct parahotplug_request *req)
{
kfree(req);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ken Cox | 16 | 100.00% | 1 | 100.00% |
Total | 16 | 100.00% | 1 | 100.00% |
static LIST_HEAD(parahotplug_request_list);
/* lock for above */
static DEFINE_SPINLOCK(parahotplug_request_list_lock);
/*
* parahotplug_request_complete() - mark request as complete
* @id: the id of the request
* @active: indicates whether the request is assigned to active partition
*
* Called from the /sys handler, which means the user script has
* finished the enable/disable. Find the matching identifier, and
* respond to the CONTROLVM message with success.
*
* Return: 0 on success or -EINVAL on failure
*/
static int parahotplug_request_complete(int id, u16 active)
{
struct list_head *pos;
struct list_head *tmp;
struct parahotplug_request *req;
spin_lock(¶hotplug_request_list_lock);
/* Look for a request matching "id". */
list_for_each_safe(pos, tmp, ¶hotplug_request_list) {
req = list_entry(pos, struct parahotplug_request, list);
if (req->id == id) {
/*
* Found a match. Remove it from the list and
* respond.
*/
list_del(pos);
spin_unlock(¶hotplug_request_list_lock);
req->msg.cmd.device_change_state.state.active = active;
if (req->msg.hdr.flags.response_expected)
controlvm_respond(
&req->msg.hdr, CONTROLVM_RESP_SUCCESS,
&req->msg.cmd.device_change_state.state);
parahotplug_request_destroy(req);
return 0;
}
}
spin_unlock(¶hotplug_request_list_lock);
return -EINVAL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ken Cox | 120 | 82.76% | 1 | 10.00% |
David Kershner | 14 | 9.66% | 3 | 30.00% |
Benjamin Romer | 10 | 6.90% | 5 | 50.00% |
David Binder | 1 | 0.69% | 1 | 10.00% |
Total | 145 | 100.00% | 10 | 100.00% |
/*
* devicedisabled_store() - disables the hotplug device
* @dev: sysfs interface variable not utilized in this function
* @attr: sysfs interface variable not utilized in this function
* @buf: buffer containing the device id
* @count: the size of the buffer
*
* The parahotplug/devicedisabled interface gets called by our support script
* when an SR-IOV device has been shut down. The ID is passed to the script
* and then passed back when the device has been removed.
*
* Return: the size of the buffer for success or negative for error
*/
static ssize_t devicedisabled_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
unsigned int id;
int err;
if (kstrtouint(buf, 10, &id))
return -EINVAL;
err = parahotplug_request_complete(id, 0);
if (err < 0)
return err;
return count;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Kershner | 68 | 100.00% | 1 | 100.00% |
Total | 68 | 100.00% | 1 | 100.00% |
static DEVICE_ATTR_WO(devicedisabled);
/*
* deviceenabled_store() - enables the hotplug device
* @dev: sysfs interface variable not utilized in this function
* @attr: sysfs interface variable not utilized in this function
* @buf: buffer containing the device id
* @count: the size of the buffer
*
* The parahotplug/deviceenabled interface gets called by our support script
* when an SR-IOV device has been recovered. The ID is passed to the script
* and then passed back when the device has been brought back up.
*
* Return: the size of the buffer for success or negative for error
*/
static ssize_t deviceenabled_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
unsigned int id;
if (kstrtouint(buf, 10, &id))
return -EINVAL;
parahotplug_request_complete(id, 1);
return count;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Kershner | 54 | 100.00% | 1 | 100.00% |
Total | 54 | 100.00% | 1 | 100.00% |
static DEVICE_ATTR_WO(deviceenabled);
static struct attribute *visorchipset_install_attrs[] = {
&dev_attr_toolaction.attr,
&dev_attr_boottotool.attr,
&dev_attr_error.attr,
&dev_attr_textid.attr,
&dev_attr_remaining_steps.attr,
NULL
};
static const struct attribute_group visorchipset_install_group = {
.name = "install",
.attrs = visorchipset_install_attrs
};
static struct attribute *visorchipset_parahotplug_attrs[] = {
&dev_attr_devicedisabled.attr,
&dev_attr_deviceenabled.attr,
NULL
};
static const struct attribute_group visorchipset_parahotplug_group = {
.name = "parahotplug",
.attrs = visorchipset_parahotplug_attrs
};
static const struct attribute_group *visorchipset_dev_groups[] = {
&visorchipset_install_group,
&visorchipset_parahotplug_group,
NULL
};
/*
* parahotplug_request_kickoff() - initiate parahotplug request
* @req: the request to initiate
*
* Cause uevent to run the user level script to do the disable/enable specified
* in the parahotplug_request.
*/
static int parahotplug_request_kickoff(struct parahotplug_request *req)
{
struct controlvm_message_packet *cmd = &req->msg.cmd;
char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
env_func[40];
char *envp[] = { env_cmd, env_id, env_state, env_bus, env_dev,
env_func, NULL
};
sprintf(env_cmd, "VISOR_PARAHOTPLUG=1");
sprintf(env_id, "VISOR_PARAHOTPLUG_ID=%d", req->id);
sprintf(env_state, "VISOR_PARAHOTPLUG_STATE=%d",
cmd->device_change_state.state.active);
sprintf(env_bus, "VISOR_PARAHOTPLUG_BUS=%d",
cmd->device_change_state.bus_no);
sprintf(env_dev, "VISOR_PARAHOTPLUG_DEVICE=%d",
cmd->device_change_state.dev_no >> 3);
sprintf(env_func, "VISOR_PARAHOTPLUG_FUNCTION=%d",
cmd->device_change_state.dev_no & 0x7);
return kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
KOBJ_CHANGE, envp);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Kershner | 158 | 94.05% | 2 | 50.00% |
Sameer Wadgaonkar | 10 | 5.95% | 2 | 50.00% |
Total | 168 | 100.00% | 4 | 100.00% |
/*
* parahotplug_process_message() - enables or disables a PCI device by kicking
* off a udev script
* @inmsg: the message indicating whether to enable or disable
*/
static int parahotplug_process_message(struct controlvm_message *inmsg)
{
struct parahotplug_request *req;
int err;
req = parahotplug_request_create(inmsg);
if (!req)
return -ENOMEM;
/*
* For enable messages, just respond with success right away, we don't
* need to wait to see if the enable was successful.
*/
if (inmsg->cmd.device_change_state.state.active) {
err = parahotplug_request_kickoff(req);
if (err)
goto err_respond;
controlvm_respond(&inmsg->hdr, CONTROLVM_RESP_SUCCESS,
&inmsg->cmd.device_change_state.state);
parahotplug_request_destroy(req);
return 0;
}
/*
* For disable messages, add the request to the request list before
* kicking off the udev script. It won't get responded to until the
* script has indicated it's done.
*/
spin_lock(¶hotplug_request_list_lock);
list_add_tail(&req->list, ¶hotplug_request_list);
spin_unlock(¶hotplug_request_list_lock);
err = parahotplug_request_kickoff(req);
if (err)
goto err_respond;
return 0;
err_respond:
controlvm_respond(&inmsg->hdr, err,
&inmsg->cmd.device_change_state.state);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Kershner | 156 | 100.00% | 6 | 100.00% |
Total | 156 | 100.00% | 6 | 100.00% |
/*
* chipset_ready_uevent() - sends chipset_ready action
*
* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
*
* Return: 0 on success, negative on failure
*/
static int chipset_ready_uevent(struct controlvm_message_header *msg_hdr)
{
int res;
res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj, KOBJ_ONLINE);
if (msg_hdr->flags.response_expected)
controlvm_respond(msg_hdr, res, NULL);
return res;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Kershner | 27 | 54.00% | 3 | 60.00% |
David Binder | 19 | 38.00% | 1 | 20.00% |
Sameer Wadgaonkar | 4 | 8.00% | 1 | 20.00% |
Total | 50 | 100.00% | 5 | 100.00% |
/*
* chipset_selftest_uevent() - sends chipset_selftest action
*
* Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
*
* Return: 0 on success, negative on failure
*/
static int chipset_selftest_uevent(struct controlvm_message_header *msg_hdr)
{
char env_selftest[20];
char *envp[] = { env_selftest, NULL };
int res;
sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
res = kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj,
KOBJ_CHANGE, envp);
if (msg_hdr->flags.response_expected)
controlvm_respond(msg_hdr, res, NULL);
return res;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Kershner | 67 | 85.90% | 3 | 60.00% |
David Binder | 7 | 8.97% | 1 | 20.00% |
Sameer Wadgaonkar | 4 | 5.13% | 1 | 20.00% |
Total | 78 | 100.00% | 5 | 100.00% |
/*
* chipset_notready_uevent() - sends chipset_notready action
*
* Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
*
* Return: 0 on success, negative on failure
*/
static int chipset_notready_uevent(struct controlvm_message_header *msg_hdr)
{
int res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj,
KOBJ_OFFLINE);
if (msg_hdr->flags.response_expected)
controlvm_respond(msg_hdr, res, NULL);
return res;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Kershner | 23 | 47.92% | 3 | 42.86% |
David Binder | 12 | 25.00% | 2 | 28.57% |
Ken Cox | 9 | 18.75% | 1 | 14.29% |
Sameer Wadgaonkar | 4 | 8.33% | 1 | 14.29% |
Total | 48 | 100.00% | 7 | 100.00% |
static int unisys_vmcall(unsigned long tuple, unsigned long param)
{
int result = 0;
unsigned int cpuid_eax, cpuid_ebx, cpuid_ecx, cpuid_edx;
unsigned long reg_ebx;
unsigned long reg_ecx;
reg_ebx = param & 0xFFFFFFFF;
reg_ecx = param >> 32;
cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx);
if (!(cpuid_ecx & 0x80000000))
return -EPERM;
__asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) :
"a"(tuple), "b"(reg_ebx), "c"(reg_ecx));
if (result)
goto error;
return 0;
/* Need to convert from VMCALL error codes to Linux */
error:
switch (result) {
case VMCALL_RESULT_INVALID_PARAM:
return -EINVAL;
case VMCALL_RESULT_DATA_UNAVAILABLE:
return -ENODEV;
default:
return -EFAULT;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Kershner | 119 | 99.17% | 3 | 75.00% |
Sameer Wadgaonkar | 1 | 0.83% | 1 | 25.00% |
Total | 120 | 100.00% | 4 | 100.00% |
static int controlvm_channel_create(struct visorchipset_device *dev)
{
struct visorchannel *chan;
u64 addr;
int err;
err = unisys_vmcall(VMCALL_CONTROLVM_ADDR,
virt_to_phys(&dev->controlvm_params));
if (err)
return err;
addr = dev->controlvm_params.address;
chan = visorchannel_create(addr, GFP_KERNEL,
&visor_controlvm_channel_guid, true);
if (!chan)
return -ENOMEM;
dev->controlvm_channel = chan;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Kershner | 64 | 76.19% | 4 | 50.00% |
Ken Cox | 14 | 16.67% | 1 | 12.50% |
Sameer Wadgaonkar | 5 | 5.95% | 2 | 25.00% |
Benjamin Romer | 1 | 1.19% | 1 | 12.50% |
Total | 84 | 100.00% | 8 | 100.00% |
static void setup_crash_devices_work_queue(struct work_struct *work)
{
struct controlvm_message local_crash_bus_msg;
struct controlvm_message local_crash_dev_msg;
struct controlvm_message msg;
u32 local_crash_msg_offset;
u16 local_crash_msg_count;
/* send init chipset msg */
msg.hdr.id = CONTROLVM_CHIPSET_INIT;
msg.cmd.init_chipset.bus_count = 23;
msg.cmd.init_chipset.switch_count = 0;
chipset_init(&msg);
/* get saved message count */
if (visorchannel_read(chipset_dev->controlvm_channel,
offsetof(struct visor_controlvm_channel,
saved_crash_message_count),
&local_crash_msg_count, sizeof(u16)) < 0) {
dev_err(&chipset_dev->acpi_device->dev,
"failed to read channel\n");
return;
}
if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
dev_err(&chipset_dev->acpi_device->dev, "invalid count\n");
return;
}
/* get saved crash message offset */
if (visorchannel_read(chipset_dev->controlvm_channel,
offsetof(struct visor_controlvm_channel,
saved_crash_message_offset),
&local_crash_msg_offset, sizeof(u32)) < 0) {
dev_err(&chipset_dev->acpi_device->dev,
"failed to read channel\n");
return;
}
/* read create device message for storage bus offset */
if (visorchannel_read(chipset_dev->controlvm_channel,
local_crash_msg_offset,
&local_crash_bus_msg,
sizeof(struct controlvm_message)) < 0) {
dev_err(&chipset_dev->acpi_device->dev,
"failed to read channel\n");
return;
}
/* read create device message for storage device */
if (visorchannel_read(chipset_dev->controlvm_channel,
local_crash_msg_offset +
sizeof(struct controlvm_message),
&local_crash_dev_msg,
sizeof(struct controlvm_message)) < 0) {
dev_err(&chipset_dev->acpi_device->dev,
"failed to read channel\n");
return;
}
/* reuse IOVM create bus message */
if (!local_crash_bus_msg.cmd.create_bus.channel_addr) {
dev_err(&chipset_dev->acpi_device->dev,
"no valid create_bus message\n");
return;
}
visorbus_create(&local_crash_bus_msg);
/* reuse create device message for storage device */
if (!local_crash_dev_msg.cmd.create_device.channel_addr) {
dev_err(&chipset_dev->acpi_device->dev,
"no valid create_device message\n");
return;
}
visorbus_device_create(&local_crash_dev_msg);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Kershner | 238 | 74.61% | 3 | 27.27% |
Ken Cox | 49 | 15.36% | 1 | 9.09% |
Benjamin Romer | 14 | 4.39% | 2 | 18.18% |
Sameer Wadgaonkar | 12 | 3.76% | 4 | 36.36% |
Bryan Thompson | 6 | 1.88% | 1 | 9.09% |
Total | 319 | 100.00% | 11 | 100.00% |
void visorbus_response(struct visor_device *bus_info, int response,
int controlvm_id)
{
if (!bus_info->pending_msg_hdr)
return;
controlvm_responder(controlvm_id, bus_info->pending_msg_hdr, response);
kfree(bus_info->pending_msg_hdr);
bus_info->pending_msg_hdr = NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Kershner | 37 | 77.08% | 4 | 50.00% |
Benjamin Romer | 5 | 10.42% | 1 | 12.50% |
Sameer Wadgaonkar | 3 | 6.25% | 1 | 12.50% |
Ken Cox | 2 | 4.17% | 1 | 12.50% |
Charles Daniels | 1 | 2.08% | 1 | 12.50% |
Total | 48 | 100.00% | 8 | 100.00% |
void visorbus_device_changestate_response(struct visor_device *dev_info,
int response,
struct visor_segment_state state)
{
if (!dev_info->pending_msg_hdr)
return;
device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE, dev_info,
response, state);
kfree(dev_info->pending_msg_hdr);
dev_info->pending_msg_hdr = NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Kershner | 36 | 73.47% | 2 | 33.33% |
Ken Cox | 6 | 12.24% | 1 | 16.67% |
Sameer Wadgaonkar | 5 | 10.20% | 1 | 16.67% |
Benjamin Romer | 1 | 2.04% | 1 | 16.67% |
Charles Daniels | 1 | 2.04% | 1 | 16.67% |
Total | 49 | 100.00% | 6 | 100.00% |
static void parser_done(struct parser_context *ctx)
{
chipset_dev->controlvm_payload_bytes_buffered -= ctx->param_bytes;
kfree(ctx);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Kershner | 24 | 100.00% | 1 | 100.00% |
Total | 24 | 100.00% | 1 | 100.00% |
static struct parser_context *parser_init_stream(u64 addr, u32 bytes,
bool *retry)
{
unsigned long allocbytes;
struct parser_context *ctx;
void *mapping;
*retry = false;
/* alloc an extra byte to ensure payload is \0 terminated */
allocbytes = (unsigned long)bytes + 1 + (sizeof(struct parser_context) -
sizeof(struct visor_controlvm_parameters_header));
if ((chipset_dev->controlvm_payload_bytes_buffered + bytes) >
MAX_CONTROLVM_PAYLOAD_BYTES) {
*retry = true;
return NULL;
}
ctx = kzalloc(allocbytes, GFP_KERNEL);
if (!ctx) {
*retry = true;
return NULL;
}
ctx->allocbytes = allocbytes;
ctx->param_bytes = bytes;
mapping = memremap(addr, bytes, MEMREMAP_WB);
if (!mapping)
goto err_finish_ctx;
memcpy(&ctx->data, mapping, bytes);
memunmap(mapping);
ctx->byte_stream = true;
chipset_dev->controlvm_payload_bytes_buffered += ctx->param_bytes;
return ctx;
err_finish_ctx:
kfree(ctx);
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Kershner | 143 | 78.57% | 3 | 33.33% |
Sameer Wadgaonkar | 19 | 10.44% | 3 | 33.33% |
Benjamin Romer | 8 | 4.40% | 1 | 11.11% |
Erik Arfvidson | 6 | 3.30% | 1 | 11.11% |
Tim Sell | 6 | 3.30% | 1 | 11.11% |
Total | 182 | 100.00% | 9 | 100.00% |
/*
* handle_command() - process a controlvm message
* @inmsg: the message to process
* @channel_addr: address of the controlvm channel
*
* Return:
* 0 - Successfully processed the message
* -EAGAIN - ControlVM message was not processed and should be retried
* reading the next controlvm message; a scenario where this can
* occur is when we need to throttle the allocation of memory in
* which to copy out controlvm payload data.
* < 0 - error: ControlVM message was processed but an error occurred.
*/
static int handle_command(struct controlvm_message inmsg, u64 channel_addr)
{
struct controlvm_message_packet *cmd = &inmsg.cmd;
u64 parm_addr;
u32 parm_bytes;
struct parser_context *parser_ctx = NULL;
struct controlvm_message ackmsg;
int err = 0;
/* create parsing context if necessary */
parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
parm_bytes = inmsg.hdr.payload_bytes;
/*
* Parameter and channel addresses within test messages actually lie
* within our OS-controlled memory. We need to know that, because it
* makes a difference in how we compute the virtual address.
*/
if (parm_bytes) {
bool retry;
parser_ctx = parser_init_stream(parm_addr, parm_bytes, &retry);
if (!parser_ctx && retry)
return -EAGAIN;
}
controlvm_init_response(&ackmsg, &inmsg.hdr, CONTROLVM_RESP_SUCCESS);
err = visorchannel_signalinsert(chipset_dev->controlvm_channel,
CONTROLVM_QUEUE_ACK, &ackmsg);
if (err)
return err;
switch (inmsg.hdr.id) {
case CONTROLVM_CHIPSET_INIT:
err = chipset_init(&inmsg);
break;
case CONTROLVM_BUS_CREATE:
err = visorbus_create(&inmsg);
break;
case CONTROLVM_BUS_DESTROY:
err = visorbus_destroy(&inmsg);
break;
case CONTROLVM_BUS_CONFIGURE:
err = visorbus_configure(&inmsg, parser_ctx);
break;
case CONTROLVM_DEVICE_CREATE:
err = visorbus_device_create(&inmsg);
break;
case CONTROLVM_DEVICE_CHANGESTATE:
if (cmd->device_change_state.flags.phys_device) {
err = parahotplug_process_message(&inmsg);
} else {
/*
* save the hdr and cmd structures for later use when
* sending back the response to Command
*/
err = visorbus_device_changestate(&inmsg);
break;
}
break;
case CONTROLVM_DEVICE_DESTROY:
err = visorbus_device_destroy(&inmsg);
break;
case CONTROLVM_DEVICE_CONFIGURE:
/* no op just send a respond that we passed */
if (inmsg.hdr.flags.response_expected)
controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS,
NULL);
break;
case CONTROLVM_CHIPSET_READY:
err = chipset_ready_uevent(&inmsg.hdr);
break;
case CONTROLVM_CHIPSET_SELFTEST:
err = chipset_selftest_uevent(&inmsg.hdr);
break;
case CONTROLVM_CHIPSET_STOP:
err = chipset_notready_uevent(&inmsg.hdr);
break;
default:
err = -ENOMSG;
if (inmsg.hdr.flags.response_expected)
controlvm_respond(&inmsg.hdr,
-CONTROLVM_RESP_ID_UNKNOWN, NULL);
break;
}
if (parser_ctx) {
parser_done(parser_ctx);
parser_ctx = NULL;
}
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Kershner | 289 | 78.32% | 7 | 53.85% |
Erik Arfvidson | 68 | 18.43% | 1 | 7.69% |
Sameer Wadgaonkar | 9 | 2.44% | 4 | 30.77% |
David Binder | 3 | 0.81% | 1 | 7.69% |
Total | 369 | 100.00% | 13 | 100.00% |
/*
* read_controlvm_event() - retreives the next message from the
* CONTROLVM_QUEUE_EVENT queue in the controlvm
* channel
* @msg: pointer to the retrieved message
*
* Return: 0 if valid message was retrieved or -error
*/
static int read_controlvm_event(struct controlvm_message *msg)
{
int err = visorchannel_signalremove(chipset_dev->controlvm_channel,
CONTROLVM_QUEUE_EVENT, msg);
if (err)
return err;
/* got a message */
if (msg->hdr.flags.test_message == 1)
return -EINVAL;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Kershner | 48 | 92.31% | 2 | 50.00% |
David Binder | 2 | 3.85% | 1 | 25.00% |
Sameer Wadgaonkar | 2 | 3.85% | 1 | 25.00% |
Total | 52 | 100.00% | 4 | 100.00% |
/*
* parahotplug_process_list() - remove any request from the list that's been on
* there too long and respond with an error
*/
static void parahotplug_process_list(void)
{
struct list_head *pos;
struct list_head *tmp;
spin_lock(¶hotplug_request_list_lock);
list_for_each_safe(pos, tmp, ¶hotplug_request_list) {
struct parahotplug_request *req =
list_entry(pos, struct parahotplug_request, list);
if (!time_after_eq(jiffies, req->expiration))
continue;
list_del(pos);
if (req->msg.hdr.flags.response_expected)
controlvm_respond(
&req->msg.hdr,
CONTROLVM_RESP_DEVICE_UDEV_TIMEOUT,
&req->msg.cmd.device_change_state.state);
parahotplug_request_destroy(req);
}
spin_unlock(¶hotplug_request_list_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Kershner | 112 | 99.12% | 2 | 66.67% |
Sameer Wadgaonkar | 1 | 0.88% | 1 | 33.33% |
Total | 113 | 100.00% | 3 | 100.00% |
static void controlvm_periodic_work(struct work_struct *work)
{
struct controlvm_message inmsg;
int count = 0;
int err;
/* Drain the RESPONSE queue make it empty */
do {
err = visorchannel_signalremove(chipset_dev->controlvm_channel,
CONTROLVM_QUEUE_RESPONSE,
&inmsg);
} while ((!err) && (++count < CONTROLVM_MESSAGE_MAX));
if (err != -EAGAIN)
goto schedule_out;
if (chipset_dev->controlvm_pending_msg_valid) {
/*
* we throttled processing of a prior msg, so try to process
* it again rather than reading a new one
*/
inmsg = chipset_dev->controlvm_pending_msg;
chipset_dev->controlvm_pending_msg_valid = false;
err = 0;
} else {
err = read_controlvm_event(&inmsg);
}
while (!err) {
chipset_dev->most_recent_message_jiffies = jiffies;
err = handle_command(inmsg,
visorchannel_get_physaddr
(chipset_dev->controlvm_channel));
if (err == -EAGAIN) {
chipset_dev->controlvm_pending_msg = inmsg;
chipset_dev->controlvm_pending_msg_valid = true;
break;
}
err = read_controlvm_event(&inmsg);
}
/* parahotplug_worker */
parahotplug_process_list();
/*
* The controlvm messages are sent in a bulk. If we start receiving messages, we
* want the polling to be fast. If we do not receive any message for
* MIN_IDLE_SECONDS, we can slow down the polling.
*/
schedule_out:
if (time_after(jiffies, chipset_dev->most_recent_message_jiffies +
(HZ * MIN_IDLE_SECONDS))) {
/*
* it's been longer than MIN_IDLE_SECONDS since we processed
* our last controlvm message; slow down the polling
*/
if (chipset_dev->poll_jiffies != POLLJIFFIES_CONTROLVM_SLOW)
chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_SLOW;
} else {
if (chipset_dev->poll_jiffies != POLLJIFFIES_CONTROLVM_FAST)
chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST;
}
schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
chipset_dev->poll_jiffies);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Kershner | 198 | 86.46% | 7 | 77.78% |
Sameer Wadgaonkar | 31 | 13.54% | 2 | 22.22% |
Total | 229 | 100.00% | 9 | 100.00% |
static int visorchipset_init(struct acpi_device *acpi_device)
{
int err = -ENODEV;
struct visorchannel *controlvm_channel;
chipset_dev = kzalloc(sizeof(*chipset_dev), GFP_KERNEL);
if (!chipset_dev)
goto error;
err = controlvm_channel_create(chipset_dev);
if (err)
goto error_free_chipset_dev;
acpi_device->driver_data = chipset_dev;
chipset_dev->acpi_device = acpi_device;
chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST;
err = sysfs_create_groups(&chipset_dev->acpi_device->dev.kobj,
visorchipset_dev_groups);
if (err < 0)
goto error_destroy_channel;
controlvm_channel = chipset_dev->controlvm_channel;
if (!visor_check_channel(visorchannel_get_header(controlvm_channel),
&chipset_dev->acpi_device->dev,
&visor_controlvm_channel_guid,
"controlvm",
sizeof(struct visor_controlvm_channel),
VISOR_CONTROLVM_CHANNEL_VERSIONID,
VISOR_CHANNEL_SIGNATURE))
goto error_delete_groups;
/* if booting in a crash kernel */
if (is_kdump_kernel())
INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
setup_crash_devices_work_queue);
else
INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work,
controlvm_periodic_work);
chipset_dev->most_recent_message_jiffies = jiffies;
chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST;
schedule_delayed_work(&chipset_dev->periodic_controlvm_work,
chipset_dev->poll_jiffies);
err = visorbus_init();
if (err < 0)
goto error_cancel_work;
return 0;
error_cancel_work:
cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
error_delete_groups:
sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj,
visorchipset_dev_groups);
error_destroy_channel:
visorchannel_destroy(chipset_dev->controlvm_channel);
error_free_chipset_dev:
kfree(chipset_dev);
error:
dev_err(&acpi_device->dev, "failed with error %d\n", err);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Sameer Wadgaonkar | 135 | 50.19% | 5 | 17.86% |
David Kershner | 56 | 20.82% | 6 | 21.43% |
Ken Cox | 43 | 15.99% | 3 | 10.71% |
Benjamin Romer | 16 | 5.95% | 7 | 25.00% |
Erik Arfvidson | 7 | 2.60% | 1 | 3.57% |
Prarit Bhargava | 6 | 2.23% | 2 | 7.14% |
Andy Shevchenko | 2 | 0.74% | 1 | 3.57% |
Tim Sell | 2 | 0.74% | 1 | 3.57% |
Bryan Thompson | 1 | 0.37% | 1 | 3.57% |
Amitoj Kaur Chawla | 1 | 0.37% | 1 | 3.57% |
Total | 269 | 100.00% | 28 | 100.00% |
static int visorchipset_exit(struct acpi_device *acpi_device)
{
visorbus_exit();
cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work);
sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj,
visorchipset_dev_groups);
visorchannel_destroy(chipset_dev->controlvm_channel);
kfree(chipset_dev);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Sameer Wadgaonkar | 22 | 43.14% | 2 | 25.00% |
Prarit Bhargava | 12 | 23.53% | 2 | 25.00% |
Ken Cox | 12 | 23.53% | 1 | 12.50% |
Don Zickus | 3 | 5.88% | 1 | 12.50% |
Benjamin Romer | 1 | 1.96% | 1 | 12.50% |
Amitoj Kaur Chawla | 1 | 1.96% | 1 | 12.50% |
Total | 51 | 100.00% | 8 | 100.00% |
static const struct acpi_device_id unisys_device_ids[] = {
{"PNP0A07", 0},
{"", 0},
};
static struct acpi_driver unisys_acpi_driver = {
.name = "unisys_acpi",
.class = "unisys_acpi_class",
.owner = THIS_MODULE,
.ids = unisys_device_ids,
.ops = {
.add = visorchipset_init,
.remove = visorchipset_exit,
},
};
MODULE_DEVICE_TABLE(acpi, unisys_device_ids);
static __init int visorutil_spar_detect(void)
{
unsigned int eax, ebx, ecx, edx;
if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
/* check the ID */
cpuid(UNISYS_VISOR_LEAF_ID, &eax, &ebx, &ecx, &edx);
return (ebx == UNISYS_VISOR_ID_EBX) &&
(ecx == UNISYS_VISOR_ID_ECX) &&
(edx == UNISYS_VISOR_ID_EDX);
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Erik Arfvidson | 59 | 86.76% | 1 | 25.00% |
Sameer Wadgaonkar | 4 | 5.88% | 1 | 25.00% |
Borislav Petkov | 4 | 5.88% | 1 | 25.00% |
David Kershner | 1 | 1.47% | 1 | 25.00% |
Total | 68 | 100.00% | 4 | 100.00% |
static int __init init_unisys(void)
{
int result;
if (!visorutil_spar_detect())
return -ENODEV;
result = acpi_bus_register_driver(&unisys_acpi_driver);
if (result)
return -ENODEV;
pr_info("Unisys Visorchipset Driver Loaded.\n");
return 0;
}Contributors
Person | Tokens | Prop | Commits | CommitProp |
Prarit Bhargava | 43 | 93.48% | 1 | 33.33% |
Erik Arfvidson | 2 | 4.35% | 1 | 33.33% |
Arnd Bergmann | 1 | 2.17% | 1 | 33.33% |
Total | 46 | 100.00% | 3 | 100.00% |
;
static void __exit exit_unisys(void)
{
acpi_bus_unregister_driver(&unisys_acpi_driver);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Prarit Bhargava | 13 | 86.67% | 1 | 33.33% |
Arnd Bergmann | 1 | 6.67% | 1 | 33.33% |
Ken Cox | 1 | 6.67% | 1 | 33.33% |
Total | 15 | 100.00% | 3 | 100.00% |
module_init(init_unisys);
module_exit(exit_unisys);
MODULE_AUTHOR("Unisys");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("s-Par visorbus driver for virtual device buses");
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Kershner | 3969 | 56.99% | 96 | 47.06% |
Ken Cox | 947 | 13.60% | 3 | 1.47% |
Benjamin Romer | 737 | 10.58% | 33 | 16.18% |
Sameer Wadgaonkar | 389 | 5.59% | 22 | 10.78% |
Erik Arfvidson | 186 | 2.67% | 4 | 1.96% |
Tim Sell | 175 | 2.51% | 3 | 1.47% |
Prarit Bhargava | 149 | 2.14% | 3 | 1.47% |
David Binder | 140 | 2.01% | 9 | 4.41% |
Don Zickus | 138 | 1.98% | 7 | 3.43% |
Andy Shevchenko | 66 | 0.95% | 1 | 0.49% |
Bryan Thompson | 13 | 0.19% | 3 | 1.47% |
Zachary Dremann | 10 | 0.14% | 2 | 0.98% |
Jes Sorensen | 9 | 0.13% | 1 | 0.49% |
Jon Frisch | 7 | 0.10% | 3 | 1.47% |
Quentin Lambert | 6 | 0.09% | 1 | 0.49% |
Borislav Petkov | 4 | 0.06% | 1 | 0.49% |
Veronika Kabatova | 4 | 0.06% | 2 | 0.98% |
Nicholas Mc Guire | 3 | 0.04% | 1 | 0.49% |
Charles Daniels | 2 | 0.03% | 1 | 0.49% |
Amitoj Kaur Chawla | 2 | 0.03% | 1 | 0.49% |
Greg Kroah-Hartman | 2 | 0.03% | 2 | 0.98% |
Arnd Bergmann | 2 | 0.03% | 1 | 0.49% |
Andreea-Cristina Bernat | 1 | 0.01% | 1 | 0.49% |
Arvind Yadav | 1 | 0.01% | 1 | 0.49% |
Mihaela Muraru | 1 | 0.01% | 1 | 0.49% |
Bhaktipriya Shridhar | 1 | 0.01% | 1 | 0.49% |
Total | 6964 | 100.00% | 204 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.