cregit-Linux how code gets into the kernel

Release 4.16 drivers/visorbus/visorchipset.c

Directory: drivers/visorbus
// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (C) 2010 - 2015 UNISYS CORPORATION
 * All rights reserved.
 */

#include <linux/acpi.h>
#include <linux/crash_dump.h>
#include <linux/visorbus.h>

#include "visorbus_private.h"

/* {72120008-4AAB-11DC-8530-444553544200} */

#define VISOR_SIOVM_GUID GUID_INIT(0x72120008, 0x4AAB, 0x11DC, 0x85, 0x30, \
                                   0x44, 0x45, 0x53, 0x54, 0x42, 0x00)


static const guid_t visor_vhba_channel_guid = VISOR_VHBA_CHANNEL_GUID;

static const guid_t visor_siovm_guid = VISOR_SIOVM_GUID;

static const guid_t visor_controlvm_channel_guid = VISOR_CONTROLVM_CHANNEL_GUID;


#define POLLJIFFIES_CONTROLVM_FAST 1

#define POLLJIFFIES_CONTROLVM_SLOW 100


#define MAX_CONTROLVM_PAYLOAD_BYTES (1024 * 128)


#define UNISYS_VISOR_LEAF_ID 0x40000000

/* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */

#define UNISYS_VISOR_ID_EBX 0x73696e55

#define UNISYS_VISOR_ID_ECX 0x70537379

#define UNISYS_VISOR_ID_EDX 0x34367261

/*
 * When the controlvm channel is idle for at least MIN_IDLE_SECONDS, we switch
 * to slow polling mode. As soon as we get a controlvm message, we switch back
 * to fast polling mode.
 */

#define MIN_IDLE_SECONDS 10


struct parser_context {
	
unsigned long allocbytes;
	
unsigned long param_bytes;
	
u8 *curr;
	
unsigned long bytes_remaining;
	
bool byte_stream;
	
struct visor_controlvm_parameters_header data;
};

/* VMCALL_CONTROLVM_ADDR: Used by all guests, not just IO. */

#define VMCALL_CONTROLVM_ADDR 0x0501


enum vmcall_result {
	
VMCALL_RESULT_SUCCESS = 0,
	
VMCALL_RESULT_INVALID_PARAM = 1,
	
VMCALL_RESULT_DATA_UNAVAILABLE = 2,
	
VMCALL_RESULT_FAILURE_UNAVAILABLE = 3,
	
VMCALL_RESULT_DEVICE_ERROR = 4,
	
VMCALL_RESULT_DEVICE_NOT_READY = 5
};

/*
 * struct vmcall_io_controlvm_addr_params - Structure for IO VMCALLS. Has
 *                                          parameters to VMCALL_CONTROLVM_ADDR
 *                                          interface.
 * @address:       The Guest-relative physical address of the ControlVm channel.
 *                 This VMCall fills this in with the appropriate address.
 *                 Contents provided by this VMCALL (OUT).
 * @channel_bytes: The size of the ControlVm channel in bytes This VMCall fills
 *                 this in with the appropriate address. Contents provided by
 *                 this VMCALL (OUT).
 * @unused:        Unused Bytes in the 64-Bit Aligned Struct.
 */

struct vmcall_io_controlvm_addr_params {
	
u64 address;
	
u32 channel_bytes;
	
u8 unused[4];

} __packed;


struct visorchipset_device {
	
struct acpi_device *acpi_device;
	
unsigned long poll_jiffies;
	/* when we got our last controlvm message */
	
unsigned long most_recent_message_jiffies;
	
struct delayed_work periodic_controlvm_work;
	
struct visorchannel *controlvm_channel;
	
unsigned long controlvm_payload_bytes_buffered;
	/*
         * The following variables are used to handle the scenario where we are
         * unable to offload the payload from a controlvm message due to memory
         * requirements. In this scenario, we simply stash the controlvm
         * message, then attempt to process it again the next time
         * controlvm_periodic_work() runs.
         */
	
struct controlvm_message controlvm_pending_msg;
	
bool controlvm_pending_msg_valid;
	
struct vmcall_io_controlvm_addr_params controlvm_params;
};


static struct visorchipset_device *chipset_dev;


struct parahotplug_request {
	
struct list_head list;
	
int id;
	
unsigned long expiration;
	
struct controlvm_message msg;
};

/* prototypes for attributes */

static ssize_t toolaction_show(struct device *dev, struct device_attribute *attr, char *buf) { u8 tool_action = 0; int err; err = visorchannel_read(chipset_dev->controlvm_channel, offsetof(struct visor_controlvm_channel, tool_action), &tool_action, sizeof(u8)); if (err) return err; return sprintf(buf, "%u\n", tool_action); }

Contributors

PersonTokensPropCommitsCommitProp
David Kershner4868.57%233.33%
Benjamin Romer1825.71%116.67%
Sameer Wadgaonkar34.29%233.33%
David Binder11.43%116.67%
Total70100.00%6100.00%


static ssize_t toolaction_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { u8 tool_action; int err; if (kstrtou8(buf, 10, &tool_action)) return -EINVAL; err = visorchannel_write(chipset_dev->controlvm_channel, offsetof(struct visor_controlvm_channel, tool_action), &tool_action, sizeof(u8)); if (err) return err; return count; }

Contributors

PersonTokensPropCommitsCommitProp
David Kershner5669.14%240.00%
Benjamin Romer2227.16%120.00%
Sameer Wadgaonkar33.70%240.00%
Total81100.00%5100.00%

static DEVICE_ATTR_RW(toolaction);
static ssize_t boottotool_show(struct device *dev, struct device_attribute *attr, char *buf) { struct efi_visor_indication efi_visor_indication; int err; err = visorchannel_read(chipset_dev->controlvm_channel, offsetof(struct visor_controlvm_channel, efi_visor_ind), &efi_visor_indication, sizeof(struct efi_visor_indication)); if (err) return err; return sprintf(buf, "%u\n", efi_visor_indication.boot_to_tool); }

Contributors

PersonTokensPropCommitsCommitProp
David Kershner4461.11%233.33%
Benjamin Romer1825.00%116.67%
Sameer Wadgaonkar912.50%233.33%
David Binder11.39%116.67%
Total72100.00%6100.00%


static ssize_t boottotool_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int val, err; struct efi_visor_indication efi_visor_indication; if (kstrtoint(buf, 10, &val)) return -EINVAL; efi_visor_indication.boot_to_tool = val; err = visorchannel_write(chipset_dev->controlvm_channel, offsetof(struct visor_controlvm_channel, efi_visor_ind), &(efi_visor_indication), sizeof(struct efi_visor_indication)); if (err) return err; return count; }

Contributors

PersonTokensPropCommitsCommitProp
David Kershner6266.67%240.00%
Benjamin Romer2223.66%120.00%
Sameer Wadgaonkar99.68%240.00%
Total93100.00%5100.00%

static DEVICE_ATTR_RW(boottotool);
static ssize_t error_show(struct device *dev, struct device_attribute *attr, char *buf) { u32 error = 0; int err; err = visorchannel_read(chipset_dev->controlvm_channel, offsetof(struct visor_controlvm_channel, installation_error), &error, sizeof(u32)); if (err) return err; return sprintf(buf, "%u\n", error); }

Contributors

PersonTokensPropCommitsCommitProp
David Kershner4767.14%228.57%
Benjamin Romer1825.71%114.29%
Sameer Wadgaonkar34.29%228.57%
David Binder22.86%228.57%
Total70100.00%7100.00%


static ssize_t error_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { u32 error; int err; if (kstrtou32(buf, 10, &error)) return -EINVAL; err = visorchannel_write(chipset_dev->controlvm_channel, offsetof(struct visor_controlvm_channel, installation_error), &error, sizeof(u32)); if (err) return err; return count; }

Contributors

PersonTokensPropCommitsCommitProp
David Kershner5669.14%240.00%
Benjamin Romer2227.16%120.00%
Sameer Wadgaonkar33.70%240.00%
Total81100.00%5100.00%

static DEVICE_ATTR_RW(error);
static ssize_t textid_show(struct device *dev, struct device_attribute *attr, char *buf) { u32 text_id = 0; int err; err = visorchannel_read(chipset_dev->controlvm_channel, offsetof(struct visor_controlvm_channel, installation_text_id), &text_id, sizeof(u32)); if (err) return err; return sprintf(buf, "%u\n", text_id); }

Contributors

PersonTokensPropCommitsCommitProp
David Kershner4767.14%228.57%
Benjamin Romer1825.71%114.29%
Sameer Wadgaonkar34.29%228.57%
David Binder22.86%228.57%
Total70100.00%7100.00%


static ssize_t textid_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { u32 text_id; int err; if (kstrtou32(buf, 10, &text_id)) return -EINVAL; err = visorchannel_write(chipset_dev->controlvm_channel, offsetof(struct visor_controlvm_channel, installation_text_id), &text_id, sizeof(u32)); if (err) return err; return count; }

Contributors

PersonTokensPropCommitsCommitProp
David Kershner5669.14%240.00%
Benjamin Romer2227.16%120.00%
Sameer Wadgaonkar33.70%240.00%
Total81100.00%5100.00%

static DEVICE_ATTR_RW(textid);
static ssize_t remaining_steps_show(struct device *dev, struct device_attribute *attr, char *buf) { u16 remaining_steps = 0; int err; err = visorchannel_read(chipset_dev->controlvm_channel, offsetof(struct visor_controlvm_channel, installation_remaining_steps), &remaining_steps, sizeof(u16)); if (err) return err; return sprintf(buf, "%hu\n", remaining_steps); }

Contributors

PersonTokensPropCommitsCommitProp
David Kershner4868.57%233.33%
Benjamin Romer1825.71%116.67%
Sameer Wadgaonkar34.29%233.33%
David Binder11.43%116.67%
Total70100.00%6100.00%


static ssize_t remaining_steps_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { u16 remaining_steps; int err; if (kstrtou16(buf, 10, &remaining_steps)) return -EINVAL; err = visorchannel_write(chipset_dev->controlvm_channel, offsetof(struct visor_controlvm_channel, installation_remaining_steps), &remaining_steps, sizeof(u16)); if (err) return err; return count; }

Contributors

PersonTokensPropCommitsCommitProp
David Kershner5669.14%240.00%
Benjamin Romer2227.16%120.00%
Sameer Wadgaonkar33.70%240.00%
Total81100.00%5100.00%

static DEVICE_ATTR_RW(remaining_steps);
static void controlvm_init_response(struct controlvm_message *msg, struct controlvm_message_header *msg_hdr, int response) { memset(msg, 0, sizeof(struct controlvm_message)); memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header)); msg->hdr.payload_bytes = 0; msg->hdr.payload_vm_offset = 0; msg->hdr.payload_max_bytes = 0; if (response < 0) { msg->hdr.flags.failed = 1; msg->hdr.completion_status = (u32)(-response); } }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Romer7370.19%583.33%
Ken Cox3129.81%116.67%
Total104100.00%6100.00%


static int controlvm_respond_chipset_init( struct controlvm_message_header *msg_hdr, int response, enum visor_chipset_feature features) { struct controlvm_message outmsg; controlvm_init_response(&outmsg, msg_hdr, response); outmsg.cmd.init_chipset.features = features; return visorchannel_signalinsert(chipset_dev->controlvm_channel, CONTROLVM_QUEUE_REQUEST, &outmsg); }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Romer3054.55%550.00%
David Kershner1323.64%220.00%
Ken Cox916.36%110.00%
Sameer Wadgaonkar35.45%220.00%
Total55100.00%10100.00%


static int chipset_init(struct controlvm_message *inmsg) { static int chipset_inited; enum visor_chipset_feature features = 0; int rc = CONTROLVM_RESP_SUCCESS; int res = 0; if (chipset_inited) { rc = -CONTROLVM_RESP_ALREADY_DONE; res = -EIO; goto out_respond; } chipset_inited = 1; /* * Set features to indicate we support parahotplug (if Command also * supports it). Set the "reply" bit so Command knows this is a * features-aware driver. */ features = inmsg->cmd.init_chipset.features & VISOR_CHIPSET_FEATURE_PARA_HOTPLUG; features |= VISOR_CHIPSET_FEATURE_REPLY; out_respond: if (inmsg->hdr.flags.response_expected) res = controlvm_respond_chipset_init(&inmsg->hdr, rc, features); return res; }

Contributors

PersonTokensPropCommitsCommitProp
David Kershner9494.00%342.86%
Sameer Wadgaonkar44.00%228.57%
Ken Cox11.00%114.29%
Benjamin Romer11.00%114.29%
Total100100.00%7100.00%


static int controlvm_respond(struct controlvm_message_header *msg_hdr, int response, struct visor_segment_state *state) { struct controlvm_message outmsg; controlvm_init_response(&outmsg, msg_hdr, response); if (outmsg.hdr.flags.test_message == 1) return -EINVAL; if (state) { outmsg.cmd.device_change_state.state = *state; outmsg.cmd.device_change_state.flags.phys_device = 1; } return visorchannel_signalinsert(chipset_dev->controlvm_channel, CONTROLVM_QUEUE_REQUEST, &outmsg); }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Romer4448.35%646.15%
David Kershner2628.57%430.77%
Ken Cox1819.78%17.69%
Sameer Wadgaonkar33.30%215.38%
Total91100.00%13100.00%

enum crash_obj_type { CRASH_DEV, CRASH_BUS, };
static int save_crash_message(struct controlvm_message *msg, enum crash_obj_type cr_type) { u32 local_crash_msg_offset; u16 local_crash_msg_count; int err; err = visorchannel_read(chipset_dev->controlvm_channel, offsetof(struct visor_controlvm_channel, saved_crash_message_count), &local_crash_msg_count, sizeof(u16)); if (err) { dev_err(&chipset_dev->acpi_device->dev, "failed to read message count\n"); return err; } if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) { dev_err(&chipset_dev->acpi_device->dev, "invalid number of messages\n"); return -EIO; } err = visorchannel_read(chipset_dev->controlvm_channel, offsetof(struct visor_controlvm_channel, saved_crash_message_offset), &local_crash_msg_offset, sizeof(u32)); if (err) { dev_err(&chipset_dev->acpi_device->dev, "failed to read offset\n"); return err; } switch (cr_type) { case CRASH_DEV: local_crash_msg_offset += sizeof(struct controlvm_message); err = visorchannel_write(chipset_dev->controlvm_channel, local_crash_msg_offset, msg, sizeof(struct controlvm_message)); if (err) { dev_err(&chipset_dev->acpi_device->dev, "failed to write dev msg\n"); return err; } break; case CRASH_BUS: err = visorchannel_write(chipset_dev->controlvm_channel, local_crash_msg_offset, msg, sizeof(struct controlvm_message)); if (err) { dev_err(&chipset_dev->acpi_device->dev, "failed to write bus msg\n"); return err; } break; default: dev_err(&chipset_dev->acpi_device->dev, "Invalid crash_obj_type\n"); break; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Tim Sell12447.88%111.11%
David Kershner9536.68%222.22%
David Binder238.88%111.11%
Sameer Wadgaonkar103.86%222.22%
Bryan Thompson41.54%111.11%
Jon Frisch20.77%111.11%
Ken Cox10.39%111.11%
Total259100.00%9100.00%


static int controlvm_responder(enum controlvm_id cmd_id, struct controlvm_message_header *pending_msg_hdr, int response) { if (pending_msg_hdr->id != (u32)cmd_id) return -EINVAL; return controlvm_respond(pending_msg_hdr, response, NULL); }

Contributors

PersonTokensPropCommitsCommitProp
Ken Cox1944.19%17.69%
David Kershner1023.26%323.08%
Benjamin Romer716.28%538.46%
Don Zickus511.63%215.38%
Veronika Kabatova12.33%17.69%
Tim Sell12.33%17.69%
Total43100.00%13100.00%


static int device_changestate_responder(enum controlvm_id cmd_id, struct visor_device *p, int response, struct visor_segment_state state) { struct controlvm_message outmsg; if (p->pending_msg_hdr->id != cmd_id) return -EINVAL; controlvm_init_response(&outmsg, p->pending_msg_hdr, response); outmsg.cmd.device_change_state.bus_no = p->chipset_bus_no; outmsg.cmd.device_change_state.dev_no = p->chipset_dev_no; outmsg.cmd.device_change_state.state = state; return visorchannel_signalinsert(chipset_dev->controlvm_channel, CONTROLVM_QUEUE_REQUEST, &outmsg); }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Romer4646.46%842.11%
Ken Cox2828.28%15.26%
David Kershner1515.15%315.79%
Don Zickus55.05%315.79%
Sameer Wadgaonkar33.03%210.53%
Veronika Kabatova22.02%210.53%
Total99100.00%19100.00%


static int visorbus_create(struct controlvm_message *inmsg) { struct controlvm_message_packet *cmd = &inmsg->cmd; struct controlvm_message_header *pmsg_hdr; u32 bus_no = cmd->create_bus.bus_no; struct visor_device *bus_info; struct visorchannel *visorchannel; int err; bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL); if (bus_info && bus_info->state.created == 1) { dev_err(&chipset_dev->acpi_device->dev, "failed %s: already exists\n", __func__); err = -EEXIST; goto err_respond; } bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL); if (!bus_info) { err = -ENOMEM; goto err_respond; } INIT_LIST_HEAD(&bus_info->list_all); bus_info->chipset_bus_no = bus_no; bus_info->chipset_dev_no = BUS_ROOT_DEVICE; if (guid_equal(&cmd->create_bus.bus_inst_guid, &visor_siovm_guid)) { err = save_crash_message(inmsg, CRASH_BUS); if (err) goto err_free_bus_info; } if (inmsg->hdr.flags.response_expected == 1) { pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL); if (!pmsg_hdr) { err = -ENOMEM; goto err_free_bus_info; } memcpy(pmsg_hdr, &inmsg->hdr, sizeof(struct controlvm_message_header)); bus_info->pending_msg_hdr = pmsg_hdr; } visorchannel = visorchannel_create(cmd->create_bus.channel_addr, GFP_KERNEL, &cmd->create_bus.bus_data_type_guid, false); if (!visorchannel) { err = -ENOMEM; goto err_free_pending_msg; } bus_info->visorchannel = visorchannel; /* Response will be handled by visorbus_create_instance on success */ err = visorbus_create_instance(bus_info); if (err) goto err_destroy_channel; return 0; err_destroy_channel: visorchannel_destroy(visorchannel); err_free_pending_msg: kfree(bus_info->pending_msg_hdr); err_free_bus_info: kfree(bus_info); err_respond: if (inmsg->hdr.flags.response_expected == 1) controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err); return err; }

Contributors

PersonTokensPropCommitsCommitProp
David Kershner19755.18%934.62%
Benjamin Romer6117.09%726.92%
Ken Cox5214.57%27.69%
Tim Sell174.76%13.85%
Don Zickus164.48%27.69%
Andy Shevchenko71.96%13.85%
Zachary Dremann30.84%13.85%
Sameer Wadgaonkar30.84%27.69%
Jes Sorensen10.28%13.85%
Total357100.00%26100.00%


static int visorbus_destroy(struct controlvm_message *inmsg) { struct controlvm_message_header *pmsg_hdr; u32 bus_no = inmsg->cmd.destroy_bus.bus_no; struct visor_device *bus_info; int err; bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL); if (!bus_info) { err = -ENODEV; goto err_respond; } if (bus_info->state.created == 0) { err = -ENOENT; goto err_respond; } if (bus_info->pending_msg_hdr) { /* only non-NULL if dev is still waiting on a response */ err = -EEXIST; goto err_respond; } if (inmsg->hdr.flags.response_expected == 1) { pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL); if (!pmsg_hdr) { err = -ENOMEM; goto err_respond; } memcpy(pmsg_hdr, &inmsg->hdr, sizeof(struct controlvm_message_header)); bus_info->pending_msg_hdr = pmsg_hdr; } /* Response will be handled by visorbus_remove_instance */ visorbus_remove_instance(bus_info); return 0; err_respond: if (inmsg->hdr.flags.response_expected == 1) controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err); return err; }

Contributors

PersonTokensPropCommitsCommitProp
David Kershner15073.53%430.77%
Ken Cox3416.67%215.38%
Benjamin Romer125.88%430.77%
Don Zickus62.94%17.69%
Jes Sorensen10.49%17.69%
Sameer Wadgaonkar10.49%17.69%
Total204100.00%13100.00%


static const guid_t *parser_id_get(struct parser_context *ctx) { return &ctx->data.id; }

Contributors

PersonTokensPropCommitsCommitProp
David Kershner21100.00%1100.00%
Total21100.00%1100.00%


static void *parser_string_get(u8 *pscan, int nscan) { int value_length; void *value; if (nscan == 0) return NULL; value_length = strnlen(pscan, nscan); value = kzalloc(value_length + 1, GFP_KERNEL); if (!value) return NULL; if (value_length > 0) memcpy(value, pscan, value_length); return value; }

Contributors

PersonTokensPropCommitsCommitProp
David Kershner7396.05%466.67%
Benjamin Romer22.63%116.67%
Ken Cox11.32%116.67%
Total76100.00%6100.00%


static void *parser_name_get(struct parser_context *ctx) { struct visor_controlvm_parameters_header *phdr; phdr = &ctx->data; if ((unsigned long)phdr->name_offset + (unsigned long)phdr->name_length > ctx->param_bytes) return NULL; ctx->curr = (char *)&phdr + phdr->name_offset; ctx->bytes_remaining = phdr->name_length; return parser_string_get(ctx->curr, phdr->name_length); }

Contributors

PersonTokensPropCommitsCommitProp
David Kershner7386.90%466.67%
Tim Sell89.52%116.67%
Benjamin Romer33.57%116.67%
Total84100.00%6100.00%


static int visorbus_configure(struct controlvm_message *inmsg, struct parser_context *parser_ctx) { struct controlvm_message_packet *cmd = &inmsg->cmd; u32 bus_no; struct visor_device *bus_info; int err = 0; bus_no = cmd->configure_bus.bus_no; bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL); if (!bus_info) { err = -EINVAL; goto err_respond; } if (bus_info->state.created == 0) { err = -EINVAL; goto err_respond; } if (bus_info->pending_msg_hdr) { err = -EIO; goto err_respond; } err = visorchannel_set_clientpartition(bus_info->visorchannel, cmd->configure_bus.guest_handle); if (err) goto err_respond; if (parser_ctx) { const guid_t *partition_guid = parser_id_get(parser_ctx); guid_copy(&bus_info->partition_guid, partition_guid); bus_info->name = parser_name_get(parser_ctx); } if (inmsg->hdr.flags.response_expected == 1) controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err); return 0; err_respond: dev_err(&chipset_dev->acpi_device->dev, "%s exited with err: %d\n", __func__, err); if (inmsg->hdr.flags.response_expected == 1) controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err); return err; }

Contributors

PersonTokensPropCommitsCommitProp
David Kershner10341.87%630.00%
Benjamin Romer8434.15%735.00%
Ken Cox3112.60%210.00%
Andy Shevchenko156.10%15.00%
Don Zickus114.47%210.00%
Jes Sorensen10.41%15.00%
Sameer Wadgaonkar10.41%15.00%
Total246100.00%20100.00%


static int visorbus_device_create(struct controlvm_message *inmsg) { struct controlvm_message_packet *cmd = &inmsg->cmd; struct controlvm_message_header *pmsg_hdr; u32 bus_no = cmd->create_device.bus_no; u32 dev_no = cmd->create_device.dev_no; struct visor_device *dev_info; struct visor_device *bus_info; struct visorchannel *visorchannel; int err; bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL); if (!bus_info) { dev_err(&chipset_dev->acpi_device->dev, "failed to get bus by id: %d\n", bus_no); err = -ENODEV; goto err_respond; } if (bus_info->state.created == 0) { dev_err(&chipset_dev->acpi_device->dev, "bus not created, id: %d\n", bus_no); err = -EINVAL; goto err_respond; } dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL); if (dev_info && dev_info->state.created == 1) { dev_err(&chipset_dev->acpi_device->dev, "failed to get bus by id: %d/%d\n", bus_no, dev_no); err = -EEXIST; goto err_respond; } dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL); if (!dev_info) { err = -ENOMEM; goto err_respond; } dev_info->chipset_bus_no = bus_no; dev_info->chipset_dev_no = dev_no; guid_copy(&dev_info->inst, &cmd->create_device.dev_inst_guid); dev_info->device.parent = &bus_info->device; visorchannel = visorchannel_create(cmd->create_device.channel_addr, GFP_KERNEL, &cmd->create_device.data_type_guid, true); if (!visorchannel) { dev_err(&chipset_dev->acpi_device->dev, "failed to create visorchannel: %d/%d\n", bus_no, dev_no); err = -ENOMEM; goto err_free_dev_info; } dev_info->visorchannel = visorchannel; guid_copy(&dev_info->channel_type_guid, &cmd->create_device.data_type_guid); if (guid_equal(&cmd->create_device.data_type_guid, &visor_vhba_channel_guid)) { err = save_crash_message(inmsg, CRASH_DEV); if (err) goto err_destroy_visorchannel; } if (inmsg->hdr.flags.response_expected == 1) { pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL); if (!pmsg_hdr) { err = -ENOMEM; goto err_destroy_visorchannel; } memcpy(pmsg_hdr, &inmsg->hdr, sizeof(struct controlvm_message_header)); dev_info->pending_msg_hdr = pmsg_hdr; } /* create_visor_device will send response */ err = create_visor_device(dev_info); if (err) goto err_destroy_visorchannel; return 0; err_destroy_visorchannel: visorchannel_destroy(visorchannel); err_free_dev_info: kfree(dev_info); err_respond: if (inmsg->hdr.flags.response_expected == 1) controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err); return err; }

Contributors

PersonTokensPropCommitsCommitProp
David Kershner17936.76%519.23%
Ken Cox14329.36%27.69%
Don Zickus8317.04%415.38%
Benjamin Romer357.19%830.77%
Andy Shevchenko214.31%13.85%
Tim Sell173.49%13.85%
Sameer Wadgaonkar61.23%311.54%
Jes Sorensen20.41%13.85%
Andreea-Cristina Bernat10.21%13.85%
Total487100.00%26100.00%


static int visorbus_device_changestate(struct controlvm_message *inmsg) { struct controlvm_message_packet *cmd = &inmsg->cmd; struct controlvm_message_header *pmsg_hdr; u32 bus_no = cmd->device_change_state.bus_no; u32 dev_no = cmd->device_change_state.dev_no; struct visor_segment_state state = cmd->device_change_state.state; struct visor_device *dev_info; int err = 0; dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL); if (!dev_info) { err = -ENODEV; goto err_respond; } if (dev_info->state.created == 0) { err = -EINVAL; goto err_respond; } if (dev_info->pending_msg_hdr) { /* only non-NULL if dev is still waiting on a response */ err = -EIO; goto err_respond; } if (inmsg->hdr.flags.response_expected == 1) { pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL); if (!pmsg_hdr) { err = -ENOMEM; goto err_respond; } memcpy(pmsg_hdr, &inmsg->hdr, sizeof(struct controlvm_message_header)); dev_info->pending_msg_hdr = pmsg_hdr; } if (state.alive == segment_state_running.alive && state.operating == segment_state_running.operating) /* Response will be sent from visorchipset_device_resume */ err = visorchipset_device_resume(dev_info); /* ServerNotReady / ServerLost / SegmentStateStandby */ else if (state.alive == segment_state_standby.alive && state.operating == segment_state_standby.operating) /* * technically this is standby case where server is lost. * Response will be sent from visorchipset_device_pause. */ err = visorchipset_device_pause(dev_info); if (err) goto err_respond; return 0; err_respond: dev_err(&chipset_dev->acpi_device->dev, "failed: %d\n", err); if (inmsg->hdr.flags.response_expected == 1) controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err); return err; }

Contributors

PersonTokensPropCommitsCommitProp
David Kershner19363.91%630.00%
Ken Cox7524.83%210.00%
Benjamin Romer206.62%525.00%
Sameer Wadgaonkar61.99%315.00%
Don Zickus51.66%210.00%
Jes Sorensen20.66%15.00%
Veronika Kabatova10.33%15.00%
Total302100.00%20100.00%


static int visorbus_device_destroy(struct controlvm_message *inmsg) { struct controlvm_message_packet *cmd = &inmsg->cmd; struct controlvm_message_header *pmsg_hdr; u32 bus_no = cmd->destroy_device.bus_no; u32 dev_no = cmd->destroy_device.dev_no; struct visor_device *dev_info; int err; dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL); if (!dev_info) { err = -ENODEV; goto err_respond; } if (dev_info->state.created == 0) { err = -EINVAL; goto err_respond; } if (dev_info->pending_msg_hdr) { /* only non-NULL if dev is still waiting on a response */ err = -EIO; goto err_respond; } if (inmsg->hdr.flags.response_expected == 1) { pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL); if (!pmsg_hdr) { err = -ENOMEM; goto err_respond; } memcpy(pmsg_hdr, &inmsg->hdr, sizeof(struct controlvm_message_header)); dev_info->pending_msg_hdr = pmsg_hdr; } kfree(dev_info->name); remove_visor_device(dev_info); return 0; err_respond: if (inmsg->hdr.flags.response_expected == 1) controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err); return err; }

Contributors

PersonTokensPropCommitsCommitProp
David Kershner11851.98%320.00%
Ken Cox7432.60%213.33%
Benjamin Romer208.81%533.33%
Zachary Dremann73.08%16.67%
Don Zickus41.76%16.67%
Sameer Wadgaonkar20.88%213.33%
Jes Sorensen20.88%16.67%
Total227100.00%15100.00%

/* * The general parahotplug flow works as follows. The visorchipset receives * a DEVICE_CHANGESTATE message from Command specifying a physical device * to enable or disable. The CONTROLVM message handler calls * parahotplug_process_message, which then adds the message to a global list * and kicks off a udev event which causes a user level script to enable or * disable the specified device. The udev script then writes to * /sys/devices/platform/visorchipset/parahotplug, which causes the * parahotplug store functions to get called, at which point the * appropriate CONTROLVM message is retrieved from the list and responded to. */ #define PARAHOTPLUG_TIMEOUT_MS 2000 /* * parahotplug_next_id() - generate unique int to match an outstanding * CONTROLVM message with a udev script /sys * response * * Return: a unique integer value */
static int parahotplug_next_id(void) { static atomic_t id = ATOMIC_INIT(0); return atomic_inc_return(&id); }

Contributors

PersonTokensPropCommitsCommitProp
Ken Cox24100.00%1100.00%
Total24100.00%1100.00%

/* * parahotplug_next_expiration() - returns the time (in jiffies) when a * CONTROLVM message on the list should expire * -- PARAHOTPLUG_TIMEOUT_MS in the future * * Return: expected expiration time (in jiffies) */
static unsigned long parahotplug_next_expiration(void) { return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS); }

Contributors

PersonTokensPropCommitsCommitProp
Ken Cox1482.35%150.00%
Nicholas Mc Guire317.65%150.00%
Total17100.00%2100.00%

/* * parahotplug_request_create() - create a parahotplug_request, which is * basically a wrapper for a CONTROLVM_MESSAGE * that we can stick on a list * @msg: the message to insert in the request * * Return: the request containing the provided message */
static struct parahotplug_request *parahotplug_request_create( struct controlvm_message *msg) { struct parahotplug_request *req; req = kmalloc(sizeof(*req), GFP_KERNEL); if (!req) return NULL; req->id = parahotplug_next_id(); req->expiration = parahotplug_next_expiration(); req->msg = *msg; return req; }

Contributors

PersonTokensPropCommitsCommitProp
Ken Cox5485.71%125.00%
Quentin Lambert69.52%125.00%
Benjamin Romer34.76%250.00%
Total63100.00%4100.00%

/* * parahotplug_request_destroy() - free a parahotplug_request * @req: the request to deallocate */
static void parahotplug_request_destroy(struct parahotplug_request *req) { kfree(req); }

Contributors

PersonTokensPropCommitsCommitProp
Ken Cox16100.00%1100.00%
Total16100.00%1100.00%

static LIST_HEAD(parahotplug_request_list); /* lock for above */ static DEFINE_SPINLOCK(parahotplug_request_list_lock); /* * parahotplug_request_complete() - mark request as complete * @id: the id of the request * @active: indicates whether the request is assigned to active partition * * Called from the /sys handler, which means the user script has * finished the enable/disable. Find the matching identifier, and * respond to the CONTROLVM message with success. * * Return: 0 on success or -EINVAL on failure */
static int parahotplug_request_complete(int id, u16 active) { struct list_head *pos; struct list_head *tmp; struct parahotplug_request *req; spin_lock(&parahotplug_request_list_lock); /* Look for a request matching "id". */ list_for_each_safe(pos, tmp, &parahotplug_request_list) { req = list_entry(pos, struct parahotplug_request, list); if (req->id == id) { /* * Found a match. Remove it from the list and * respond. */ list_del(pos); spin_unlock(&parahotplug_request_list_lock); req->msg.cmd.device_change_state.state.active = active; if (req->msg.hdr.flags.response_expected) controlvm_respond( &req->msg.hdr, CONTROLVM_RESP_SUCCESS, &req->msg.cmd.device_change_state.state); parahotplug_request_destroy(req); return 0; } } spin_unlock(&parahotplug_request_list_lock); return -EINVAL; }

Contributors

PersonTokensPropCommitsCommitProp
Ken Cox12082.76%110.00%
David Kershner149.66%330.00%
Benjamin Romer106.90%550.00%
David Binder10.69%110.00%
Total145100.00%10100.00%

/* * devicedisabled_store() - disables the hotplug device * @dev: sysfs interface variable not utilized in this function * @attr: sysfs interface variable not utilized in this function * @buf: buffer containing the device id * @count: the size of the buffer * * The parahotplug/devicedisabled interface gets called by our support script * when an SR-IOV device has been shut down. The ID is passed to the script * and then passed back when the device has been removed. * * Return: the size of the buffer for success or negative for error */
static ssize_t devicedisabled_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned int id; int err; if (kstrtouint(buf, 10, &id)) return -EINVAL; err = parahotplug_request_complete(id, 0); if (err < 0) return err; return count; }

Contributors

PersonTokensPropCommitsCommitProp
David Kershner68100.00%1100.00%
Total68100.00%1100.00%

static DEVICE_ATTR_WO(devicedisabled); /* * deviceenabled_store() - enables the hotplug device * @dev: sysfs interface variable not utilized in this function * @attr: sysfs interface variable not utilized in this function * @buf: buffer containing the device id * @count: the size of the buffer * * The parahotplug/deviceenabled interface gets called by our support script * when an SR-IOV device has been recovered. The ID is passed to the script * and then passed back when the device has been brought back up. * * Return: the size of the buffer for success or negative for error */
static ssize_t deviceenabled_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned int id; if (kstrtouint(buf, 10, &id)) return -EINVAL; parahotplug_request_complete(id, 1); return count; }

Contributors

PersonTokensPropCommitsCommitProp
David Kershner54100.00%1100.00%
Total54100.00%1100.00%

static DEVICE_ATTR_WO(deviceenabled); static struct attribute *visorchipset_install_attrs[] = { &dev_attr_toolaction.attr, &dev_attr_boottotool.attr, &dev_attr_error.attr, &dev_attr_textid.attr, &dev_attr_remaining_steps.attr, NULL }; static const struct attribute_group visorchipset_install_group = { .name = "install", .attrs = visorchipset_install_attrs }; static struct attribute *visorchipset_parahotplug_attrs[] = { &dev_attr_devicedisabled.attr, &dev_attr_deviceenabled.attr, NULL }; static const struct attribute_group visorchipset_parahotplug_group = { .name = "parahotplug", .attrs = visorchipset_parahotplug_attrs }; static const struct attribute_group *visorchipset_dev_groups[] = { &visorchipset_install_group, &visorchipset_parahotplug_group, NULL }; /* * parahotplug_request_kickoff() - initiate parahotplug request * @req: the request to initiate * * Cause uevent to run the user level script to do the disable/enable specified * in the parahotplug_request. */
static int parahotplug_request_kickoff(struct parahotplug_request *req) { struct controlvm_message_packet *cmd = &req->msg.cmd; char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40], env_func[40]; char *envp[] = { env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL }; sprintf(env_cmd, "VISOR_PARAHOTPLUG=1"); sprintf(env_id, "VISOR_PARAHOTPLUG_ID=%d", req->id); sprintf(env_state, "VISOR_PARAHOTPLUG_STATE=%d", cmd->device_change_state.state.active); sprintf(env_bus, "VISOR_PARAHOTPLUG_BUS=%d", cmd->device_change_state.bus_no); sprintf(env_dev, "VISOR_PARAHOTPLUG_DEVICE=%d", cmd->device_change_state.dev_no >> 3); sprintf(env_func, "VISOR_PARAHOTPLUG_FUNCTION=%d", cmd->device_change_state.dev_no & 0x7); return kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj, KOBJ_CHANGE, envp); }

Contributors

PersonTokensPropCommitsCommitProp
David Kershner15894.05%250.00%
Sameer Wadgaonkar105.95%250.00%
Total168100.00%4100.00%

/* * parahotplug_process_message() - enables or disables a PCI device by kicking * off a udev script * @inmsg: the message indicating whether to enable or disable */
static int parahotplug_process_message(struct controlvm_message *inmsg) { struct parahotplug_request *req; int err; req = parahotplug_request_create(inmsg); if (!req) return -ENOMEM; /* * For enable messages, just respond with success right away, we don't * need to wait to see if the enable was successful. */ if (inmsg->cmd.device_change_state.state.active) { err = parahotplug_request_kickoff(req); if (err) goto err_respond; controlvm_respond(&inmsg->hdr, CONTROLVM_RESP_SUCCESS, &inmsg->cmd.device_change_state.state); parahotplug_request_destroy(req); return 0; } /* * For disable messages, add the request to the request list before * kicking off the udev script. It won't get responded to until the * script has indicated it's done. */ spin_lock(&parahotplug_request_list_lock); list_add_tail(&req->list, &parahotplug_request_list); spin_unlock(&parahotplug_request_list_lock); err = parahotplug_request_kickoff(req); if (err) goto err_respond; return 0; err_respond: controlvm_respond(&inmsg->hdr, err, &inmsg->cmd.device_change_state.state); return err; }

Contributors

PersonTokensPropCommitsCommitProp
David Kershner156100.00%6100.00%
Total156100.00%6100.00%

/* * chipset_ready_uevent() - sends chipset_ready action * * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset. * * Return: 0 on success, negative on failure */
static int chipset_ready_uevent(struct controlvm_message_header *msg_hdr) { int res; res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj, KOBJ_ONLINE); if (msg_hdr->flags.response_expected) controlvm_respond(msg_hdr, res, NULL); return res; }

Contributors

PersonTokensPropCommitsCommitProp
David Kershner2754.00%360.00%
David Binder1938.00%120.00%
Sameer Wadgaonkar48.00%120.00%
Total50100.00%5100.00%

/* * chipset_selftest_uevent() - sends chipset_selftest action * * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset. * * Return: 0 on success, negative on failure */
static int chipset_selftest_uevent(struct controlvm_message_header *msg_hdr) { char env_selftest[20]; char *envp[] = { env_selftest, NULL }; int res; sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1); res = kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj, KOBJ_CHANGE, envp); if (msg_hdr->flags.response_expected) controlvm_respond(msg_hdr, res, NULL); return res; }

Contributors

PersonTokensPropCommitsCommitProp
David Kershner6785.90%360.00%
David Binder78.97%120.00%
Sameer Wadgaonkar45.13%120.00%
Total78100.00%5100.00%

/* * chipset_notready_uevent() - sends chipset_notready action * * Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset. * * Return: 0 on success, negative on failure */
static int chipset_notready_uevent(struct controlvm_message_header *msg_hdr) { int res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj, KOBJ_OFFLINE); if (msg_hdr->flags.response_expected) controlvm_respond(msg_hdr, res, NULL); return res; }

Contributors

PersonTokensPropCommitsCommitProp
David Kershner2347.92%342.86%
David Binder1225.00%228.57%
Ken Cox918.75%114.29%
Sameer Wadgaonkar48.33%114.29%
Total48100.00%7100.00%


static int unisys_vmcall(unsigned long tuple, unsigned long param) { int result = 0; unsigned int cpuid_eax, cpuid_ebx, cpuid_ecx, cpuid_edx; unsigned long reg_ebx; unsigned long reg_ecx; reg_ebx = param & 0xFFFFFFFF; reg_ecx = param >> 32; cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx); if (!(cpuid_ecx & 0x80000000)) return -EPERM; __asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) : "a"(tuple), "b"(reg_ebx), "c"(reg_ecx)); if (result) goto error; return 0; /* Need to convert from VMCALL error codes to Linux */ error: switch (result) { case VMCALL_RESULT_INVALID_PARAM: return -EINVAL; case VMCALL_RESULT_DATA_UNAVAILABLE: return -ENODEV; default: return -EFAULT; } }

Contributors

PersonTokensPropCommitsCommitProp
David Kershner11999.17%375.00%
Sameer Wadgaonkar10.83%125.00%
Total120100.00%4100.00%


static int controlvm_channel_create(struct visorchipset_device *dev) { struct visorchannel *chan; u64 addr; int err; err = unisys_vmcall(VMCALL_CONTROLVM_ADDR, virt_to_phys(&dev->controlvm_params)); if (err) return err; addr = dev->controlvm_params.address; chan = visorchannel_create(addr, GFP_KERNEL, &visor_controlvm_channel_guid, true); if (!chan) return -ENOMEM; dev->controlvm_channel = chan; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
David Kershner6476.19%450.00%
Ken Cox1416.67%112.50%
Sameer Wadgaonkar55.95%225.00%
Benjamin Romer11.19%112.50%
Total84100.00%8100.00%


static void setup_crash_devices_work_queue(struct work_struct *work) { struct controlvm_message local_crash_bus_msg; struct controlvm_message local_crash_dev_msg; struct controlvm_message msg; u32 local_crash_msg_offset; u16 local_crash_msg_count; /* send init chipset msg */ msg.hdr.id = CONTROLVM_CHIPSET_INIT; msg.cmd.init_chipset.bus_count = 23; msg.cmd.init_chipset.switch_count = 0; chipset_init(&msg); /* get saved message count */ if (visorchannel_read(chipset_dev->controlvm_channel, offsetof(struct visor_controlvm_channel, saved_crash_message_count), &local_crash_msg_count, sizeof(u16)) < 0) { dev_err(&chipset_dev->acpi_device->dev, "failed to read channel\n"); return; } if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) { dev_err(&chipset_dev->acpi_device->dev, "invalid count\n"); return; } /* get saved crash message offset */ if (visorchannel_read(chipset_dev->controlvm_channel, offsetof(struct visor_controlvm_channel, saved_crash_message_offset), &local_crash_msg_offset, sizeof(u32)) < 0) { dev_err(&chipset_dev->acpi_device->dev, "failed to read channel\n"); return; } /* read create device message for storage bus offset */ if (visorchannel_read(chipset_dev->controlvm_channel, local_crash_msg_offset, &local_crash_bus_msg, sizeof(struct controlvm_message)) < 0) { dev_err(&chipset_dev->acpi_device->dev, "failed to read channel\n"); return; } /* read create device message for storage device */ if (visorchannel_read(chipset_dev->controlvm_channel, local_crash_msg_offset + sizeof(struct controlvm_message), &local_crash_dev_msg, sizeof(struct controlvm_message)) < 0) { dev_err(&chipset_dev->acpi_device->dev, "failed to read channel\n"); return; } /* reuse IOVM create bus message */ if (!local_crash_bus_msg.cmd.create_bus.channel_addr) { dev_err(&chipset_dev->acpi_device->dev, "no valid create_bus message\n"); return; } visorbus_create(&local_crash_bus_msg); /* reuse create device message for storage device */ if (!local_crash_dev_msg.cmd.create_device.channel_addr) { dev_err(&chipset_dev->acpi_device->dev, "no valid create_device message\n"); return; } visorbus_device_create(&local_crash_dev_msg); }

Contributors

PersonTokensPropCommitsCommitProp
David Kershner23874.61%327.27%
Ken Cox4915.36%19.09%
Benjamin Romer144.39%218.18%
Sameer Wadgaonkar123.76%436.36%
Bryan Thompson61.88%19.09%
Total319100.00%11100.00%


void visorbus_response(struct visor_device *bus_info, int response, int controlvm_id) { if (!bus_info->pending_msg_hdr) return; controlvm_responder(controlvm_id, bus_info->pending_msg_hdr, response); kfree(bus_info->pending_msg_hdr); bus_info->pending_msg_hdr = NULL; }

Contributors

PersonTokensPropCommitsCommitProp
David Kershner3777.08%450.00%
Benjamin Romer510.42%112.50%
Sameer Wadgaonkar36.25%112.50%
Ken Cox24.17%112.50%
Charles Daniels12.08%112.50%
Total48100.00%8100.00%


void visorbus_device_changestate_response(struct visor_device *dev_info, int response, struct visor_segment_state state) { if (!dev_info->pending_msg_hdr) return; device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE, dev_info, response, state); kfree(dev_info->pending_msg_hdr); dev_info->pending_msg_hdr = NULL; }

Contributors

PersonTokensPropCommitsCommitProp
David Kershner3673.47%233.33%
Ken Cox612.24%116.67%
Sameer Wadgaonkar510.20%116.67%
Benjamin Romer12.04%116.67%
Charles Daniels12.04%116.67%
Total49100.00%6100.00%


static void parser_done(struct parser_context *ctx) { chipset_dev->controlvm_payload_bytes_buffered -= ctx->param_bytes; kfree(ctx); }

Contributors

PersonTokensPropCommitsCommitProp
David Kershner24100.00%1100.00%
Total24100.00%1100.00%


static struct parser_context *parser_init_stream(u64 addr, u32 bytes, bool *retry) { unsigned long allocbytes; struct parser_context *ctx; void *mapping; *retry = false; /* alloc an extra byte to ensure payload is \0 terminated */ allocbytes = (unsigned long)bytes + 1 + (sizeof(struct parser_context) - sizeof(struct visor_controlvm_parameters_header)); if ((chipset_dev->controlvm_payload_bytes_buffered + bytes) > MAX_CONTROLVM_PAYLOAD_BYTES) { *retry = true; return NULL; } ctx = kzalloc(allocbytes, GFP_KERNEL); if (!ctx) { *retry = true; return NULL; } ctx->allocbytes = allocbytes; ctx->param_bytes = bytes; mapping = memremap(addr, bytes, MEMREMAP_WB); if (!mapping) goto err_finish_ctx; memcpy(&ctx->data, mapping, bytes); memunmap(mapping); ctx->byte_stream = true; chipset_dev->controlvm_payload_bytes_buffered += ctx->param_bytes; return ctx; err_finish_ctx: kfree(ctx); return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
David Kershner14378.57%333.33%
Sameer Wadgaonkar1910.44%333.33%
Benjamin Romer84.40%111.11%
Erik Arfvidson63.30%111.11%
Tim Sell63.30%111.11%
Total182100.00%9100.00%

/* * handle_command() - process a controlvm message * @inmsg: the message to process * @channel_addr: address of the controlvm channel * * Return: * 0 - Successfully processed the message * -EAGAIN - ControlVM message was not processed and should be retried * reading the next controlvm message; a scenario where this can * occur is when we need to throttle the allocation of memory in * which to copy out controlvm payload data. * < 0 - error: ControlVM message was processed but an error occurred. */
static int handle_command(struct controlvm_message inmsg, u64 channel_addr) { struct controlvm_message_packet *cmd = &inmsg.cmd; u64 parm_addr; u32 parm_bytes; struct parser_context *parser_ctx = NULL; struct controlvm_message ackmsg; int err = 0; /* create parsing context if necessary */ parm_addr = channel_addr + inmsg.hdr.payload_vm_offset; parm_bytes = inmsg.hdr.payload_bytes; /* * Parameter and channel addresses within test messages actually lie * within our OS-controlled memory. We need to know that, because it * makes a difference in how we compute the virtual address. */ if (parm_bytes) { bool retry; parser_ctx = parser_init_stream(parm_addr, parm_bytes, &retry); if (!parser_ctx && retry) return -EAGAIN; } controlvm_init_response(&ackmsg, &inmsg.hdr, CONTROLVM_RESP_SUCCESS); err = visorchannel_signalinsert(chipset_dev->controlvm_channel, CONTROLVM_QUEUE_ACK, &ackmsg); if (err) return err; switch (inmsg.hdr.id) { case CONTROLVM_CHIPSET_INIT: err = chipset_init(&inmsg); break; case CONTROLVM_BUS_CREATE: err = visorbus_create(&inmsg); break; case CONTROLVM_BUS_DESTROY: err = visorbus_destroy(&inmsg); break; case CONTROLVM_BUS_CONFIGURE: err = visorbus_configure(&inmsg, parser_ctx); break; case CONTROLVM_DEVICE_CREATE: err = visorbus_device_create(&inmsg); break; case CONTROLVM_DEVICE_CHANGESTATE: if (cmd->device_change_state.flags.phys_device) { err = parahotplug_process_message(&inmsg); } else { /* * save the hdr and cmd structures for later use when * sending back the response to Command */ err = visorbus_device_changestate(&inmsg); break; } break; case CONTROLVM_DEVICE_DESTROY: err = visorbus_device_destroy(&inmsg); break; case CONTROLVM_DEVICE_CONFIGURE: /* no op just send a respond that we passed */ if (inmsg.hdr.flags.response_expected) controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS, NULL); break; case CONTROLVM_CHIPSET_READY: err = chipset_ready_uevent(&inmsg.hdr); break; case CONTROLVM_CHIPSET_SELFTEST: err = chipset_selftest_uevent(&inmsg.hdr); break; case CONTROLVM_CHIPSET_STOP: err = chipset_notready_uevent(&inmsg.hdr); break; default: err = -ENOMSG; if (inmsg.hdr.flags.response_expected) controlvm_respond(&inmsg.hdr, -CONTROLVM_RESP_ID_UNKNOWN, NULL); break; } if (parser_ctx) { parser_done(parser_ctx); parser_ctx = NULL; } return err; }

Contributors

PersonTokensPropCommitsCommitProp
David Kershner28978.32%753.85%
Erik Arfvidson6818.43%17.69%
Sameer Wadgaonkar92.44%430.77%
David Binder30.81%17.69%
Total369100.00%13100.00%

/* * read_controlvm_event() - retreives the next message from the * CONTROLVM_QUEUE_EVENT queue in the controlvm * channel * @msg: pointer to the retrieved message * * Return: 0 if valid message was retrieved or -error */
static int read_controlvm_event(struct controlvm_message *msg) { int err = visorchannel_signalremove(chipset_dev->controlvm_channel, CONTROLVM_QUEUE_EVENT, msg); if (err) return err; /* got a message */ if (msg->hdr.flags.test_message == 1) return -EINVAL; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
David Kershner4892.31%250.00%
David Binder23.85%125.00%
Sameer Wadgaonkar23.85%125.00%
Total52100.00%4100.00%

/* * parahotplug_process_list() - remove any request from the list that's been on * there too long and respond with an error */
static void parahotplug_process_list(void) { struct list_head *pos; struct list_head *tmp; spin_lock(&parahotplug_request_list_lock); list_for_each_safe(pos, tmp, &parahotplug_request_list) { struct parahotplug_request *req = list_entry(pos, struct parahotplug_request, list); if (!time_after_eq(jiffies, req->expiration)) continue; list_del(pos); if (req->msg.hdr.flags.response_expected) controlvm_respond( &req->msg.hdr, CONTROLVM_RESP_DEVICE_UDEV_TIMEOUT, &req->msg.cmd.device_change_state.state); parahotplug_request_destroy(req); } spin_unlock(&parahotplug_request_list_lock); }

Contributors

PersonTokensPropCommitsCommitProp
David Kershner11299.12%266.67%
Sameer Wadgaonkar10.88%133.33%
Total113100.00%3100.00%


static void controlvm_periodic_work(struct work_struct *work) { struct controlvm_message inmsg; int count = 0; int err; /* Drain the RESPONSE queue make it empty */ do { err = visorchannel_signalremove(chipset_dev->controlvm_channel, CONTROLVM_QUEUE_RESPONSE, &inmsg); } while ((!err) && (++count < CONTROLVM_MESSAGE_MAX)); if (err != -EAGAIN) goto schedule_out; if (chipset_dev->controlvm_pending_msg_valid) { /* * we throttled processing of a prior msg, so try to process * it again rather than reading a new one */ inmsg = chipset_dev->controlvm_pending_msg; chipset_dev->controlvm_pending_msg_valid = false; err = 0; } else { err = read_controlvm_event(&inmsg); } while (!err) { chipset_dev->most_recent_message_jiffies = jiffies; err = handle_command(inmsg, visorchannel_get_physaddr (chipset_dev->controlvm_channel)); if (err == -EAGAIN) { chipset_dev->controlvm_pending_msg = inmsg; chipset_dev->controlvm_pending_msg_valid = true; break; } err = read_controlvm_event(&inmsg); } /* parahotplug_worker */ parahotplug_process_list(); /* * The controlvm messages are sent in a bulk. If we start receiving messages, we * want the polling to be fast. If we do not receive any message for * MIN_IDLE_SECONDS, we can slow down the polling. */ schedule_out: if (time_after(jiffies, chipset_dev->most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) { /* * it's been longer than MIN_IDLE_SECONDS since we processed * our last controlvm message; slow down the polling */ if (chipset_dev->poll_jiffies != POLLJIFFIES_CONTROLVM_SLOW) chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_SLOW; } else { if (chipset_dev->poll_jiffies != POLLJIFFIES_CONTROLVM_FAST) chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST; } schedule_delayed_work(&chipset_dev->periodic_controlvm_work, chipset_dev->poll_jiffies); }

Contributors

PersonTokensPropCommitsCommitProp
David Kershner19886.46%777.78%
Sameer Wadgaonkar3113.54%222.22%
Total229100.00%9100.00%


static int visorchipset_init(struct acpi_device *acpi_device) { int err = -ENODEV; struct visorchannel *controlvm_channel; chipset_dev = kzalloc(sizeof(*chipset_dev), GFP_KERNEL); if (!chipset_dev) goto error; err = controlvm_channel_create(chipset_dev); if (err) goto error_free_chipset_dev; acpi_device->driver_data = chipset_dev; chipset_dev->acpi_device = acpi_device; chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST; err = sysfs_create_groups(&chipset_dev->acpi_device->dev.kobj, visorchipset_dev_groups); if (err < 0) goto error_destroy_channel; controlvm_channel = chipset_dev->controlvm_channel; if (!visor_check_channel(visorchannel_get_header(controlvm_channel), &chipset_dev->acpi_device->dev, &visor_controlvm_channel_guid, "controlvm", sizeof(struct visor_controlvm_channel), VISOR_CONTROLVM_CHANNEL_VERSIONID, VISOR_CHANNEL_SIGNATURE)) goto error_delete_groups; /* if booting in a crash kernel */ if (is_kdump_kernel()) INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work, setup_crash_devices_work_queue); else INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work, controlvm_periodic_work); chipset_dev->most_recent_message_jiffies = jiffies; chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST; schedule_delayed_work(&chipset_dev->periodic_controlvm_work, chipset_dev->poll_jiffies); err = visorbus_init(); if (err < 0) goto error_cancel_work; return 0; error_cancel_work: cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work); error_delete_groups: sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj, visorchipset_dev_groups); error_destroy_channel: visorchannel_destroy(chipset_dev->controlvm_channel); error_free_chipset_dev: kfree(chipset_dev); error: dev_err(&acpi_device->dev, "failed with error %d\n", err); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Sameer Wadgaonkar13550.19%517.86%
David Kershner5620.82%621.43%
Ken Cox4315.99%310.71%
Benjamin Romer165.95%725.00%
Erik Arfvidson72.60%13.57%
Prarit Bhargava62.23%27.14%
Andy Shevchenko20.74%13.57%
Tim Sell20.74%13.57%
Bryan Thompson10.37%13.57%
Amitoj Kaur Chawla10.37%13.57%
Total269100.00%28100.00%


static int visorchipset_exit(struct acpi_device *acpi_device) { visorbus_exit(); cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work); sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj, visorchipset_dev_groups); visorchannel_destroy(chipset_dev->controlvm_channel); kfree(chipset_dev); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Sameer Wadgaonkar2243.14%225.00%
Prarit Bhargava1223.53%225.00%
Ken Cox1223.53%112.50%
Don Zickus35.88%112.50%
Benjamin Romer11.96%112.50%
Amitoj Kaur Chawla11.96%112.50%
Total51100.00%8100.00%

static const struct acpi_device_id unisys_device_ids[] = { {"PNP0A07", 0}, {"", 0}, }; static struct acpi_driver unisys_acpi_driver = { .name = "unisys_acpi", .class = "unisys_acpi_class", .owner = THIS_MODULE, .ids = unisys_device_ids, .ops = { .add = visorchipset_init, .remove = visorchipset_exit, }, }; MODULE_DEVICE_TABLE(acpi, unisys_device_ids);
static __init int visorutil_spar_detect(void) { unsigned int eax, ebx, ecx, edx; if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { /* check the ID */ cpuid(UNISYS_VISOR_LEAF_ID, &eax, &ebx, &ecx, &edx); return (ebx == UNISYS_VISOR_ID_EBX) && (ecx == UNISYS_VISOR_ID_ECX) && (edx == UNISYS_VISOR_ID_EDX); } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Erik Arfvidson5986.76%125.00%
Sameer Wadgaonkar45.88%125.00%
Borislav Petkov45.88%125.00%
David Kershner11.47%125.00%
Total68100.00%4100.00%


static int __init init_unisys(void) { int result; if (!visorutil_spar_detect()) return -ENODEV; result = acpi_bus_register_driver(&unisys_acpi_driver); if (result) return -ENODEV; pr_info("Unisys Visorchipset Driver Loaded.\n"); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Prarit Bhargava4393.48%133.33%
Erik Arfvidson24.35%133.33%
Arnd Bergmann12.17%133.33%
Total46100.00%3100.00%

;
static void __exit exit_unisys(void) { acpi_bus_unregister_driver(&unisys_acpi_driver); }

Contributors

PersonTokensPropCommitsCommitProp
Prarit Bhargava1386.67%133.33%
Arnd Bergmann16.67%133.33%
Ken Cox16.67%133.33%
Total15100.00%3100.00%

module_init(init_unisys); module_exit(exit_unisys); MODULE_AUTHOR("Unisys"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("s-Par visorbus driver for virtual device buses");

Overall Contributors

PersonTokensPropCommitsCommitProp
David Kershner396956.99%9647.06%
Ken Cox94713.60%31.47%
Benjamin Romer73710.58%3316.18%
Sameer Wadgaonkar3895.59%2210.78%
Erik Arfvidson1862.67%41.96%
Tim Sell1752.51%31.47%
Prarit Bhargava1492.14%31.47%
David Binder1402.01%94.41%
Don Zickus1381.98%73.43%
Andy Shevchenko660.95%10.49%
Bryan Thompson130.19%31.47%
Zachary Dremann100.14%20.98%
Jes Sorensen90.13%10.49%
Jon Frisch70.10%31.47%
Quentin Lambert60.09%10.49%
Borislav Petkov40.06%10.49%
Veronika Kabatova40.06%20.98%
Nicholas Mc Guire30.04%10.49%
Charles Daniels20.03%10.49%
Amitoj Kaur Chawla20.03%10.49%
Greg Kroah-Hartman20.03%20.98%
Arnd Bergmann20.03%10.49%
Andreea-Cristina Bernat10.01%10.49%
Arvind Yadav10.01%10.49%
Mihaela Muraru10.01%10.49%
Bhaktipriya Shridhar10.01%10.49%
Total6964100.00%204100.00%
Directory: drivers/visorbus
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.