Release 4.11 drivers/misc/mei/init.c
/*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
* Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*/
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/delay.h>
#include <linux/mei.h>
#include "mei_dev.h"
#include "hbm.h"
#include "client.h"
const char *mei_dev_state_str(int state)
{
#define MEI_DEV_STATE(state) case MEI_DEV_##state: return #state
switch (state) {
MEI_DEV_STATE(INITIALIZING);
MEI_DEV_STATE(INIT_CLIENTS);
MEI_DEV_STATE(ENABLED);
MEI_DEV_STATE(RESETTING);
MEI_DEV_STATE(DISABLED);
MEI_DEV_STATE(POWER_DOWN);
MEI_DEV_STATE(POWER_UP);
default:
return "unknown";
}
#undef MEI_DEV_STATE
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tomas Winkler | 63 | 96.92% | 1 | 33.33% |
Bill Nottingham | 1 | 1.54% | 1 | 33.33% |
Masanari Iida | 1 | 1.54% | 1 | 33.33% |
Total | 65 | 100.00% | 3 | 100.00% |
const char *mei_pg_state_str(enum mei_pg_state state)
{
#define MEI_PG_STATE(state) case MEI_PG_##state: return #state
switch (state) {
MEI_PG_STATE(OFF);
MEI_PG_STATE(ON);
default:
return "unknown";
}
#undef MEI_PG_STATE
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexander Usyskin | 41 | 100.00% | 1 | 100.00% |
Total | 41 | 100.00% | 1 | 100.00% |
/**
* mei_fw_status2str - convert fw status registers to printable string
*
* @fw_status: firmware status
* @buf: string buffer at minimal size MEI_FW_STATUS_STR_SZ
* @len: buffer len must be >= MEI_FW_STATUS_STR_SZ
*
* Return: number of bytes written or -EINVAL if buffer is to small
*/
ssize_t mei_fw_status2str(struct mei_fw_status *fw_status,
char *buf, size_t len)
{
ssize_t cnt = 0;
int i;
buf[0] = '\0';
if (len < MEI_FW_STATUS_STR_SZ)
return -EINVAL;
for (i = 0; i < fw_status->count; i++)
cnt += scnprintf(buf + cnt, len - cnt, "%08X ",
fw_status->status[i]);
/* drop last space */
buf[cnt] = '\0';
return cnt;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexander Usyskin | 90 | 100.00% | 1 | 100.00% |
Total | 90 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(mei_fw_status2str);
/**
* mei_cancel_work - Cancel mei background jobs
*
* @dev: the device structure
*/
void mei_cancel_work(struct mei_device *dev)
{
cancel_work_sync(&dev->reset_work);
cancel_work_sync(&dev->bus_rescan_work);
cancel_delayed_work_sync(&dev->timer_work);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Oren Weil | 17 | 50.00% | 1 | 16.67% |
Alexander Usyskin | 9 | 26.47% | 2 | 33.33% |
Tomas Winkler | 8 | 23.53% | 3 | 50.00% |
Total | 34 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL_GPL(mei_cancel_work);
/**
* mei_reset - resets host and fw.
*
* @dev: the device structure
*
* Return: 0 on success or < 0 if the reset hasn't succeeded
*/
int mei_reset(struct mei_device *dev)
{
enum mei_dev_state state = dev->dev_state;
bool interrupts_enabled;
int ret;
if (state != MEI_DEV_INITIALIZING &&
state != MEI_DEV_DISABLED &&
state != MEI_DEV_POWER_DOWN &&
state != MEI_DEV_POWER_UP) {
char fw_sts_str[MEI_FW_STATUS_STR_SZ];
mei_fw_status_str(dev, fw_sts_str, MEI_FW_STATUS_STR_SZ);
dev_warn(dev->dev, "unexpected reset: dev_state = %s fw status = %s\n",
mei_dev_state_str(state), fw_sts_str);
}
mei_clear_interrupts(dev);
/* we're already in reset, cancel the init timer
* if the reset was called due the hbm protocol error
* we need to call it before hw start
* so the hbm watchdog won't kick in
*/
mei_hbm_idle(dev);
/* enter reset flow */
interrupts_enabled = state != MEI_DEV_POWER_DOWN;
dev->dev_state = MEI_DEV_RESETTING;
dev->reset_count++;
if (dev->reset_count > MEI_MAX_CONSEC_RESET) {
dev_err(dev->dev, "reset: reached maximal consecutive resets: disabling the device\n");
dev->dev_state = MEI_DEV_DISABLED;
return -ENODEV;
}
ret = mei_hw_reset(dev, interrupts_enabled);
/* fall through and remove the sw state even if hw reset has failed */
/* no need to clean up software state in case of power up */
if (state != MEI_DEV_INITIALIZING &&
state != MEI_DEV_POWER_UP) {
/* remove all waiting requests */
mei_cl_all_disconnect(dev);
/* remove entry if already in list */
dev_dbg(dev->dev, "remove iamthif from the file list.\n");
mei_cl_unlink(&dev->iamthif_cl);
mei_amthif_reset_params(dev);
}
mei_hbm_reset(dev);
dev->rd_msg_hdr = 0;
if (ret) {
dev_err(dev->dev, "hw_reset failed ret = %d\n", ret);
return ret;
}
if (state == MEI_DEV_POWER_DOWN) {
dev_dbg(dev->dev, "powering down: end of reset\n");
dev->dev_state = MEI_DEV_DISABLED;
return 0;
}
ret = mei_hw_start(dev);
if (ret) {
dev_err(dev->dev, "hw_start failed ret = %d\n", ret);
return ret;
}
dev_dbg(dev->dev, "link is established start sending messages.\n");
dev->dev_state = MEI_DEV_INIT_CLIENTS;
ret = mei_hbm_start_req(dev);
if (ret) {
dev_err(dev->dev, "hbm_start failed ret = %d\n", ret);
dev->dev_state = MEI_DEV_RESETTING;
return ret;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tomas Winkler | 239 | 74.69% | 13 | 72.22% |
Oren Weil | 51 | 15.94% | 1 | 5.56% |
Alexander Usyskin | 30 | 9.38% | 4 | 22.22% |
Total | 320 | 100.00% | 18 | 100.00% |
EXPORT_SYMBOL_GPL(mei_reset);
/**
* mei_start - initializes host and fw to start work.
*
* @dev: the device structure
*
* Return: 0 on success, <0 on failure.
*/
int mei_start(struct mei_device *dev)
{
int ret;
mutex_lock(&dev->device_lock);
/* acknowledge interrupt and stop interrupts */
mei_clear_interrupts(dev);
mei_hw_config(dev);
dev_dbg(dev->dev, "reset in start the mei device.\n");
dev->reset_count = 0;
do {
dev->dev_state = MEI_DEV_INITIALIZING;
ret = mei_reset(dev);
if (ret == -ENODEV || dev->dev_state == MEI_DEV_DISABLED) {
dev_err(dev->dev, "reset failed ret = %d", ret);
goto err;
}
} while (ret);
/* we cannot start the device w/o hbm start message completed */
if (dev->dev_state == MEI_DEV_DISABLED) {
dev_err(dev->dev, "reset failed");
goto err;
}
if (mei_hbm_start_wait(dev)) {
dev_err(dev->dev, "HBM haven't started");
goto err;
}
if (!mei_host_is_ready(dev)) {
dev_err(dev->dev, "host is not ready.\n");
goto err;
}
if (!mei_hw_is_ready(dev)) {
dev_err(dev->dev, "ME is not ready.\n");
goto err;
}
if (!mei_hbm_version_is_supported(dev)) {
dev_dbg(dev->dev, "MEI start failed.\n");
goto err;
}
dev_dbg(dev->dev, "link layer has been established.\n");
mutex_unlock(&dev->device_lock);
return 0;
err:
dev_err(dev->dev, "link layer initialization failed.\n");
dev->dev_state = MEI_DEV_DISABLED;
mutex_unlock(&dev->device_lock);
return -ENODEV;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tomas Winkler | 223 | 87.11% | 9 | 90.00% |
Oren Weil | 33 | 12.89% | 1 | 10.00% |
Total | 256 | 100.00% | 10 | 100.00% |
EXPORT_SYMBOL_GPL(mei_start);
/**
* mei_restart - restart device after suspend
*
* @dev: the device structure
*
* Return: 0 on success or -ENODEV if the restart hasn't succeeded
*/
int mei_restart(struct mei_device *dev)
{
int err;
mutex_lock(&dev->device_lock);
dev->dev_state = MEI_DEV_POWER_UP;
dev->reset_count = 0;
err = mei_reset(dev);
mutex_unlock(&dev->device_lock);
if (err == -ENODEV || dev->dev_state == MEI_DEV_DISABLED) {
dev_err(dev->dev, "device disabled = %d\n", err);
return -ENODEV;
}
/* try to start again */
if (err)
schedule_work(&dev->reset_work);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tomas Winkler | 93 | 98.94% | 5 | 83.33% |
Oren Weil | 1 | 1.06% | 1 | 16.67% |
Total | 94 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL_GPL(mei_restart);
static void mei_reset_work(struct work_struct *work)
{
struct mei_device *dev =
container_of(work, struct mei_device, reset_work);
int ret;
mei_clear_interrupts(dev);
mei_synchronize_irq(dev);
mutex_lock(&dev->device_lock);
ret = mei_reset(dev);
mutex_unlock(&dev->device_lock);
if (dev->dev_state == MEI_DEV_DISABLED) {
dev_err(dev->dev, "device disabled = %d\n", ret);
return;
}
/* retry reset in case of failure */
if (ret)
schedule_work(&dev->reset_work);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tomas Winkler | 97 | 100.00% | 4 | 100.00% |
Total | 97 | 100.00% | 4 | 100.00% |
void mei_stop(struct mei_device *dev)
{
dev_dbg(dev->dev, "stopping the device.\n");
mei_cl_bus_remove_devices(dev);
mei_cancel_work(dev);
mei_clear_interrupts(dev);
mei_synchronize_irq(dev);
mutex_lock(&dev->device_lock);
dev->dev_state = MEI_DEV_POWER_DOWN;
mei_reset(dev);
/* move device to disabled state unconditionally */
dev->dev_state = MEI_DEV_DISABLED;
mutex_unlock(&dev->device_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tomas Winkler | 65 | 89.04% | 4 | 66.67% |
Alexander Usyskin | 7 | 9.59% | 1 | 16.67% |
Samuel Ortiz | 1 | 1.37% | 1 | 16.67% |
Total | 73 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL_GPL(mei_stop);
/**
* mei_write_is_idle - check if the write queues are idle
*
* @dev: the device structure
*
* Return: true of there is no pending write
*/
bool mei_write_is_idle(struct mei_device *dev)
{
bool idle = (dev->dev_state == MEI_DEV_ENABLED &&
list_empty(&dev->ctrl_wr_list) &&
list_empty(&dev->write_list) &&
list_empty(&dev->write_waiting_list));
dev_dbg(dev->dev, "write pg: is idle[%d] state=%s ctrl=%01d write=%01d wwait=%01d\n",
idle,
mei_dev_state_str(dev->dev_state),
list_empty(&dev->ctrl_wr_list),
list_empty(&dev->write_list),
list_empty(&dev->write_waiting_list));
return idle;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tomas Winkler | 90 | 100.00% | 2 | 100.00% |
Total | 90 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL_GPL(mei_write_is_idle);
/**
* mei_device_init -- initialize mei_device structure
*
* @dev: the mei device
* @device: the device structure
* @hw_ops: hw operations
*/
void mei_device_init(struct mei_device *dev,
struct device *device,
const struct mei_hw_ops *hw_ops)
{
/* setup our list array */
INIT_LIST_HEAD(&dev->file_list);
INIT_LIST_HEAD(&dev->device_list);
INIT_LIST_HEAD(&dev->me_clients);
mutex_init(&dev->device_lock);
init_rwsem(&dev->me_clients_rwsem);
mutex_init(&dev->cl_bus_lock);
init_waitqueue_head(&dev->wait_hw_ready);
init_waitqueue_head(&dev->wait_pg);
init_waitqueue_head(&dev->wait_hbm_start);
dev->dev_state = MEI_DEV_INITIALIZING;
dev->reset_count = 0;
INIT_LIST_HEAD(&dev->write_list);
INIT_LIST_HEAD(&dev->write_waiting_list);
INIT_LIST_HEAD(&dev->ctrl_wr_list);
INIT_LIST_HEAD(&dev->ctrl_rd_list);
INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
INIT_WORK(&dev->reset_work, mei_reset_work);
INIT_WORK(&dev->bus_rescan_work, mei_cl_bus_rescan_work);
INIT_LIST_HEAD(&dev->iamthif_cl.link);
INIT_LIST_HEAD(&dev->amthif_cmd_list);
bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
dev->open_handle_count = 0;
/*
* Reserving the first client ID
* 0: Reserved for MEI Bus Message communications
*/
bitmap_set(dev->host_clients_map, 0, 1);
dev->pg_event = MEI_PG_EVENT_IDLE;
dev->ops = hw_ops;
dev->dev = device;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tomas Winkler | 207 | 89.61% | 8 | 66.67% |
Alexander Usyskin | 24 | 10.39% | 4 | 33.33% |
Total | 231 | 100.00% | 12 | 100.00% |
EXPORT_SYMBOL_GPL(mei_device_init);
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tomas Winkler | 1128 | 77.10% | 34 | 66.67% |
Alexander Usyskin | 212 | 14.49% | 13 | 25.49% |
Oren Weil | 120 | 8.20% | 1 | 1.96% |
Bill Nottingham | 1 | 0.07% | 1 | 1.96% |
Masanari Iida | 1 | 0.07% | 1 | 1.96% |
Samuel Ortiz | 1 | 0.07% | 1 | 1.96% |
Total | 1463 | 100.00% | 51 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.