Release 4.11 drivers/scsi/scsi_error.c
/*
* scsi_error.c Copyright (C) 1997 Eric Youngdale
*
* SCSI error/timeout handling
* Initial versions: Eric Youngdale. Based upon conversations with
* Leonard Zubkoff and David Miller at Linux Expo,
* ideas originating from all over the place.
*
* Restructured scsi_unjam_host and associated functions.
* September 04, 2002 Mike Anderson (andmike@us.ibm.com)
*
* Forward port of Russell King's (rmk@arm.linux.org.uk) changes and
* minor cleanups.
* September 30, 2002 Mike Anderson (andmike@us.ibm.com)
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/gfp.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/interrupt.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/jiffies.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_driver.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_common.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_ioctl.h>
#include <scsi/scsi_dh.h>
#include <scsi/sg.h>
#include "scsi_priv.h"
#include "scsi_logging.h"
#include "scsi_transport_api.h"
#include <trace/events/scsi.h>
static void scsi_eh_done(struct scsi_cmnd *scmd);
/*
* These should *probably* be handled by the host itself.
* Since it is allowed to sleep, it probably should.
*/
#define BUS_RESET_SETTLE_TIME (10)
#define HOST_RESET_SETTLE_TIME (10)
static int scsi_eh_try_stu(struct scsi_cmnd *scmd);
static int scsi_try_to_abort_cmd(struct scsi_host_template *,
struct scsi_cmnd *);
/* called with shost->host_lock held */
void scsi_eh_wakeup(struct Scsi_Host *shost)
{
if (atomic_read(&shost->host_busy) == shost->host_failed) {
trace_scsi_eh_wakeup(shost);
wake_up_process(shost->ehandler);
SCSI_LOG_ERROR_RECOVERY(5, shost_printk(KERN_INFO, shost,
"Waking error handler thread\n"));
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 27 | 51.92% | 2 | 33.33% |
Hannes Reinecke | 19 | 36.54% | 2 | 33.33% |
Keiichiro Tokunaga | 5 | 9.62% | 1 | 16.67% |
James Bottomley | 1 | 1.92% | 1 | 16.67% |
Total | 52 | 100.00% | 6 | 100.00% |
/**
* scsi_schedule_eh - schedule EH for SCSI host
* @shost: SCSI host to invoke error handling on.
*
* Schedule SCSI EH without scmd.
*/
void scsi_schedule_eh(struct Scsi_Host *shost)
{
unsigned long flags;
spin_lock_irqsave(shost->host_lock, flags);
if (scsi_host_set_state(shost, SHOST_RECOVERY) == 0 ||
scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY) == 0) {
shost->host_eh_scheduled++;
scsi_eh_wakeup(shost);
}
spin_unlock_irqrestore(shost->host_lock, flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hannes Reinecke | 64 | 100.00% | 1 | 100.00% |
Total | 64 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(scsi_schedule_eh);
static int scsi_host_eh_past_deadline(struct Scsi_Host *shost)
{
if (!shost->last_reset || shost->eh_deadline == -1)
return 0;
/*
* 32bit accesses are guaranteed to be atomic
* (on all supported architectures), so instead
* of using a spinlock we can as well double check
* if eh_deadline has been set to 'off' during the
* time_before call.
*/
if (time_before(jiffies, shost->last_reset + shost->eh_deadline) &&
shost->eh_deadline > -1)
return 0;
return 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hannes Reinecke | 50 | 87.72% | 2 | 66.67% |
Ren Mingxin | 7 | 12.28% | 1 | 33.33% |
Total | 57 | 100.00% | 3 | 100.00% |
/**
* scmd_eh_abort_handler - Handle command aborts
* @work: command to be aborted.
*/
void
scmd_eh_abort_handler(struct work_struct *work)
{
struct scsi_cmnd *scmd =
container_of(work, struct scsi_cmnd, abort_work.work);
struct scsi_device *sdev = scmd->device;
int rtn;
if (scsi_host_eh_past_deadline(sdev->host)) {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"eh timeout, not aborting\n"));
} else {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"aborting command\n"));
rtn = scsi_try_to_abort_cmd(sdev->host->hostt, scmd);
if (rtn == SUCCESS) {
set_host_byte(scmd, DID_TIME_OUT);
if (scsi_host_eh_past_deadline(sdev->host)) {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"eh timeout, not retrying "
"aborted command\n"));
} else if (!scsi_noretry_cmd(scmd) &&
(++scmd->retries <= scmd->allowed)) {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_WARNING, scmd,
"retry aborted command\n"));
scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
return;
} else {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_WARNING, scmd,
"finish aborted command\n"));
scsi_finish_command(scmd);
return;
}
} else {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"cmd abort %s\n",
(rtn == FAST_IO_FAIL) ?
"not send" : "failed"));
}
}
if (!scsi_eh_scmd_add(scmd, 0)) {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_WARNING, scmd,
"terminate aborted command\n"));
set_host_byte(scmd, DID_TIME_OUT);
scsi_finish_command(scmd);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hannes Reinecke | 229 | 85.77% | 3 | 60.00% |
Ren Mingxin | 30 | 11.24% | 1 | 20.00% |
Ulrich Obergfell | 8 | 3.00% | 1 | 20.00% |
Total | 267 | 100.00% | 5 | 100.00% |
/**
* scsi_abort_command - schedule a command abort
* @scmd: scmd to abort.
*
* We only need to abort commands after a command timeout
*/
static int
scsi_abort_command(struct scsi_cmnd *scmd)
{
struct scsi_device *sdev = scmd->device;
struct Scsi_Host *shost = sdev->host;
unsigned long flags;
if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) {
/*
* Retry after abort failed, escalate to next level.
*/
scmd->eh_eflags &= ~SCSI_EH_ABORT_SCHEDULED;
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"previous abort failed\n"));
BUG_ON(delayed_work_pending(&scmd->abort_work));
return FAILED;
}
/*
* Do not try a command abort if
* SCSI EH has already started.
*/
spin_lock_irqsave(shost->host_lock, flags);
if (scsi_host_in_recovery(shost)) {
spin_unlock_irqrestore(shost->host_lock, flags);
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"not aborting, host in recovery\n"));
return FAILED;
}
if (shost->eh_deadline != -1 && !shost->last_reset)
shost->last_reset = jiffies;
spin_unlock_irqrestore(shost->host_lock, flags);
scmd->eh_eflags |= SCSI_EH_ABORT_SCHEDULED;
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd, "abort scheduled\n"));
queue_delayed_work(shost->tmf_work_q, &scmd->abort_work, HZ / 100);
return SUCCESS;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hannes Reinecke | 149 | 77.60% | 4 | 44.44% |
Tejun Heo | 28 | 14.58% | 2 | 22.22% |
Christoph Hellwig | 8 | 4.17% | 1 | 11.11% |
Bart Van Assche | 4 | 2.08% | 1 | 11.11% |
Ren Mingxin | 3 | 1.56% | 1 | 11.11% |
Total | 192 | 100.00% | 9 | 100.00% |
/**
* scsi_eh_scmd_add - add scsi cmd to error handling.
* @scmd: scmd to run eh on.
* @eh_flag: optional SCSI_EH flag.
*
* Return value:
* 0 on failure.
*/
int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)
{
struct Scsi_Host *shost = scmd->device->host;
unsigned long flags;
int ret = 0;
if (!shost->ehandler)
return 0;
spin_lock_irqsave(shost->host_lock, flags);
if (scsi_host_set_state(shost, SHOST_RECOVERY))
if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY))
goto out_unlock;
if (shost->eh_deadline != -1 && !shost->last_reset)
shost->last_reset = jiffies;
ret = 1;
if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED)
eh_flag &= ~SCSI_EH_CANCEL_CMD;
scmd->eh_eflags |= eh_flag;
list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q);
shost->host_failed++;
scsi_eh_wakeup(shost);
out_unlock:
spin_unlock_irqrestore(shost->host_lock, flags);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mike Anderson | 81 | 52.94% | 2 | 22.22% |
James Bottomley | 35 | 22.88% | 2 | 22.22% |
Hannes Reinecke | 30 | 19.61% | 2 | 22.22% |
Christoph Hellwig | 4 | 2.61% | 2 | 22.22% |
Ren Mingxin | 3 | 1.96% | 1 | 11.11% |
Total | 153 | 100.00% | 9 | 100.00% |
/**
* scsi_times_out - Timeout function for normal scsi commands.
* @req: request that is timing out.
*
* Notes:
* We do not need to lock this. There is the potential for a race
* only in that the normal completion handling might run, but if the
* normal completion function determines that the timer has already
* fired, then it mustn't do anything.
*/
enum blk_eh_timer_return scsi_times_out(struct request *req)
{
struct scsi_cmnd *scmd = req->special;
enum blk_eh_timer_return rtn = BLK_EH_NOT_HANDLED;
struct Scsi_Host *host = scmd->device->host;
trace_scsi_dispatch_cmd_timeout(scmd);
scsi_log_completion(scmd, TIMEOUT_ERROR);
if (host->eh_deadline != -1 && !host->last_reset)
host->last_reset = jiffies;
if (host->hostt->eh_timed_out)
rtn = host->hostt->eh_timed_out(scmd);
if (rtn == BLK_EH_NOT_HANDLED) {
if (!host->hostt->no_async_abort &&
scsi_abort_command(scmd) == SUCCESS)
return BLK_EH_NOT_HANDLED;
set_host_byte(scmd, DID_TIME_OUT);
if (!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))
rtn = BLK_EH_HANDLED;
}
return rtn;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hannes Reinecke | 44 | 31.65% | 3 | 16.67% |
James Bottomley | 18 | 12.95% | 2 | 11.11% |
Jens Axboe | 18 | 12.95% | 1 | 5.56% |
Jesper Juhl | 11 | 7.91% | 1 | 5.56% |
Linus Torvalds (pre-git) | 10 | 7.19% | 1 | 5.56% |
Christoph Hellwig | 8 | 5.76% | 2 | 11.11% |
Mike Anderson | 8 | 5.76% | 2 | 11.11% |
Patrick Mansfield | 7 | 5.04% | 2 | 11.11% |
Keiichiro Tokunaga | 5 | 3.60% | 1 | 5.56% |
Ulrich Obergfell | 4 | 2.88% | 1 | 5.56% |
Ren Mingxin | 3 | 2.16% | 1 | 5.56% |
Martin K. Petersen | 3 | 2.16% | 1 | 5.56% |
Total | 139 | 100.00% | 18 | 100.00% |
/**
* scsi_block_when_processing_errors - Prevent cmds from being queued.
* @sdev: Device on which we are performing recovery.
*
* Description:
* We block until the host is out of error recovery, and then check to
* see whether the host or the device is offline.
*
* Return value:
* 0 when dev was taken offline by error recovery. 1 OK to proceed.
*/
int scsi_block_when_processing_errors(struct scsi_device *sdev)
{
int online;
wait_event(sdev->host->host_wait, !scsi_host_in_recovery(sdev->host));
online = scsi_device_online(sdev);
SCSI_LOG_ERROR_RECOVERY(5, sdev_printk(KERN_INFO, sdev,
"%s: rtn: %d\n", __func__, online));
return online;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 30 | 51.72% | 2 | 20.00% |
James Bottomley | 14 | 24.14% | 2 | 20.00% |
Hannes Reinecke | 5 | 8.62% | 1 | 10.00% |
Mike Anderson | 3 | 5.17% | 1 | 10.00% |
Christoph Hellwig | 3 | 5.17% | 2 | 20.00% |
Alan Cox | 2 | 3.45% | 1 | 10.00% |
Harvey Harrison | 1 | 1.72% | 1 | 10.00% |
Total | 58 | 100.00% | 10 | 100.00% |
EXPORT_SYMBOL(scsi_block_when_processing_errors);
#ifdef CONFIG_SCSI_LOGGING
/**
* scsi_eh_prt_fail_stats - Log info on failures.
* @shost: scsi host being recovered.
* @work_q: Queue of scsi cmds to process.
*/
static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
struct list_head *work_q)
{
struct scsi_cmnd *scmd;
struct scsi_device *sdev;
int total_failures = 0;
int cmd_failed = 0;
int cmd_cancel = 0;
int devices_failed = 0;
shost_for_each_device(sdev, shost) {
list_for_each_entry(scmd, work_q, eh_entry) {
if (scmd->device == sdev) {
++total_failures;
if (scmd->eh_eflags & SCSI_EH_CANCEL_CMD)
++cmd_cancel;
else
++cmd_failed;
}
}
if (cmd_cancel || cmd_failed) {
SCSI_LOG_ERROR_RECOVERY(3,
shost_printk(KERN_INFO, shost,
"%s: cmds failed: %d, cancel: %d\n",
__func__, cmd_failed,
cmd_cancel));
cmd_cancel = 0;
cmd_failed = 0;
++devices_failed;
}
}
SCSI_LOG_ERROR_RECOVERY(2, shost_printk(KERN_INFO, shost,
"Total of %d commands on %d"
" devices require eh work\n",
total_failures, devices_failed));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mike Anderson | 114 | 75.50% | 3 | 21.43% |
Linus Torvalds (pre-git) | 16 | 10.60% | 3 | 21.43% |
Hannes Reinecke | 7 | 4.64% | 2 | 14.29% |
Christoph Hellwig | 5 | 3.31% | 2 | 14.29% |
Doug Ledford | 4 | 2.65% | 1 | 7.14% |
James Bottomley | 2 | 1.32% | 1 | 7.14% |
Alan Cox | 2 | 1.32% | 1 | 7.14% |
Harvey Harrison | 1 | 0.66% | 1 | 7.14% |
Total | 151 | 100.00% | 14 | 100.00% |
#endif
/**
* scsi_report_lun_change - Set flag on all *other* devices on the same target
* to indicate that a UNIT ATTENTION is expected.
* @sdev: Device reporting the UNIT ATTENTION
*/
static void scsi_report_lun_change(struct scsi_device *sdev)
{
sdev->sdev_target->expecting_lun_change = 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ewan D. Milne | 19 | 100.00% | 1 | 100.00% |
Total | 19 | 100.00% | 1 | 100.00% |
/**
* scsi_report_sense - Examine scsi sense information and log messages for
* certain conditions, also issue uevents for some of them.
* @sdev: Device reporting the sense code
* @sshdr: sshdr to be examined
*/
static void scsi_report_sense(struct scsi_device *sdev,
struct scsi_sense_hdr *sshdr)
{
enum scsi_device_event evt_type = SDEV_EVT_MAXBITS; /* i.e. none */
if (sshdr->sense_key == UNIT_ATTENTION) {
if (sshdr->asc == 0x3f && sshdr->ascq == 0x03) {
evt_type = SDEV_EVT_INQUIRY_CHANGE_REPORTED;
sdev_printk(KERN_WARNING, sdev,
"Inquiry data has changed");
} else if (sshdr->asc == 0x3f && sshdr->ascq == 0x0e) {
evt_type = SDEV_EVT_LUN_CHANGE_REPORTED;
scsi_report_lun_change(sdev);
sdev_printk(KERN_WARNING, sdev,
"Warning! Received an indication that the "
"LUN assignments on this target have "
"changed. The Linux SCSI layer does not "
"automatically remap LUN assignments.\n");
} else if (sshdr->asc == 0x3f)
sdev_printk(KERN_WARNING, sdev,
"Warning! Received an indication that the "
"operating parameters on this target have "
"changed. The Linux SCSI layer does not "
"automatically adjust these parameters.\n");
if (sshdr->asc == 0x38 && sshdr->ascq == 0x07) {
evt_type = SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED;
sdev_printk(KERN_WARNING, sdev,
"Warning! Received an indication that the "
"LUN reached a thin provisioning soft "
"threshold.\n");
}
if (sshdr->asc == 0x2a && sshdr->ascq == 0x01) {
evt_type = SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED;
sdev_printk(KERN_WARNING, sdev,
"Mode parameters changed");
} else if (sshdr->asc == 0x2a && sshdr->ascq == 0x06) {
evt_type = SDEV_EVT_ALUA_STATE_CHANGE_REPORTED;
sdev_printk(KERN_WARNING, sdev,
"Asymmetric access state changed");
} else if (sshdr->asc == 0x2a && sshdr->ascq == 0x09) {
evt_type = SDEV_EVT_CAPACITY_CHANGE_REPORTED;
sdev_printk(KERN_WARNING, sdev,
"Capacity data has changed");
} else if (sshdr->asc == 0x2a)
sdev_printk(KERN_WARNING, sdev,
"Parameters changed");
}
if (evt_type != SDEV_EVT_MAXBITS) {
set_bit(evt_type, sdev->pending_events);
schedule_work(&sdev->event_work);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ewan D. Milne | 254 | 89.44% | 1 | 50.00% |
Hannes Reinecke | 30 | 10.56% | 1 | 50.00% |
Total | 284 | 100.00% | 2 | 100.00% |
/**
* scsi_check_sense - Examine scsi cmd sense
* @scmd: Cmd to have sense checked.
*
* Return value:
* SUCCESS or FAILED or NEEDS_RETRY or ADD_TO_MLQUEUE
*
* Notes:
* When a deferred error is detected the current command has
* not been executed and needs retrying.
*/
int scsi_check_sense(struct scsi_cmnd *scmd)
{
struct scsi_device *sdev = scmd->device;
struct scsi_sense_hdr sshdr;
if (! scsi_command_normalize_sense(scmd, &sshdr))
return FAILED; /* no valid sense data */
scsi_report_sense(sdev, &sshdr);
if (scsi_sense_is_deferred(&sshdr))
return NEEDS_RETRY;
if (sdev->handler && sdev->handler->check_sense) {
int rc;
rc = sdev->handler->check_sense(sdev, &sshdr);
if (rc != SCSI_RETURN_NOT_HANDLED)
return rc;
/* handler does not care. Drop down to default handling */
}
if (scmd->cmnd[0] == TEST_UNIT_READY && scmd->scsi_done != scsi_eh_done)
/*
* nasty: for mid-layer issued TURs, we need to return the
* actual sense data without any recovery attempt. For eh
* issued ones, we need to try to recover and interpret
*/
return SUCCESS;
/*
* Previous logic looked for FILEMARK, EOM or ILI which are
* mainly associated with tapes and returned SUCCESS.
*/
if (sshdr.response_code == 0x70) {
/* fixed format */
if (scmd->sense_buffer[2] & 0xe0)
return SUCCESS;
} else {
/*
* descriptor format: look for "stream commands sense data
* descriptor" (see SSC-3). Assume single sense data
* descriptor. Ignore ILI from SBC-2 READ LONG and WRITE LONG.
*/
if ((sshdr.additional_length > 3) &&
(scmd->sense_buffer[8] == 0x4) &&
(scmd->sense_buffer[11] & 0xe0))
return SUCCESS;
}
switch (sshdr.sense_key) {
case NO_SENSE:
return SUCCESS;
case RECOVERED_ERROR:
return /* soft_error */ SUCCESS;
case ABORTED_COMMAND:
if (sshdr.asc == 0x10) /* DIF */
return SUCCESS;
return NEEDS_RETRY;
case NOT_READY:
case UNIT_ATTENTION:
/*
* if we are expecting a cc/ua because of a bus reset that we
* performed, treat this just as a retry. otherwise this is
* information that we should pass up to the upper-level driver
* so that we can deal with it there.
*/
if (scmd->device->expecting_cc_ua) {
/*
* Because some device does not queue unit
* attentions correctly, we carefully check
* additional sense code and qualifier so as
* not to squash media change unit attention.
*/
if (sshdr.asc != 0x28 || sshdr.ascq != 0x00) {
scmd->device->expecting_cc_ua = 0;
return NEEDS_RETRY;
}
}
/*
* we might also expect a cc/ua if another LUN on the target
* reported a UA with an ASC/ASCQ of 3F 0E -
* REPORTED LUNS DATA HAS CHANGED.
*/
if (scmd->device->sdev_target->expecting_lun_change &&
sshdr.asc == 0x3f && sshdr.ascq == 0x0e)
return NEEDS_RETRY;
/*
* if the device is in the process of becoming ready, we
* should retry.
*/
if ((sshdr.asc == 0x04) && (sshdr.ascq == 0x01))
return NEEDS_RETRY;
/*
* if the device is not started, we need to wake
* the error handler to start the motor
*/
if (scmd->device->allow_restart &&
(sshdr.asc == 0x04) && (sshdr.ascq == 0x02))
return FAILED;
/*
* Pass the UA upwards for a determination in the completion
* functions.
*/
return SUCCESS;
/* these are not supported */
case DATA_PROTECT:
if (sshdr.asc == 0x27 && sshdr.ascq == 0x07) {
/* Thin provisioning hard threshold reached */
set_host_byte(scmd, DID_ALLOC_FAILURE);
return SUCCESS;
}
case COPY_ABORTED:
case VOLUME_OVERFLOW:
case MISCOMPARE:
case BLANK_CHECK:
set_host_byte(scmd, DID_TARGET_FAILURE);
return SUCCESS;
case MEDIUM_ERROR:
if (sshdr.asc == 0x11 || /* UNRECOVERED READ ERR */
sshdr.asc == 0x13 || /* AMNF DATA FIELD */
sshdr.asc == 0x14) { /* RECORD NOT FOUND */
set_host_byte(scmd, DID_MEDIUM_ERROR);
return SUCCESS;
}
return NEEDS_RETRY;
case HARDWARE_ERROR:
if (scmd->device->retry_hwerror)
return ADD_TO_MLQUEUE;
else
set_host_byte(scmd, DID_TARGET_FAILURE);
case ILLEGAL_REQUEST:
if (sshdr.asc == 0x20 || /* Invalid command operation code */
sshdr.asc == 0x21 || /* Logical block address out of range */
sshdr.asc == 0x24 || /* Invalid field in cdb */
sshdr.asc == 0x26) { /* Parameter value invalid */
set_host_byte(scmd, DID_TARGET_FAILURE);
}
return SUCCESS;
default:
return SUCCESS;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mike Anderson | 105 | 20.47% | 2 | 8.70% |
Douglas Gilbert | 79 | 15.40% | 1 | 4.35% |
Hannes Reinecke | 64 | 12.48% | 4 | 17.39% |
Chandra Seetharaman | 47 | 9.16% | 1 | 4.35% |
Mike Snitzer | 35 | 6.82% | 1 | 4.35% |
Ewan D. Milne | 34 | 6.63% | 1 | 4.35% |
Linus Torvalds (pre-git) | 27 | 5.26% | 1 | 4.35% |
Luben Tuikov | 27 | 5.26% | 1 | 4.35% |
Christoph Hellwig | 27 | 5.26% | 4 | 17.39% |
Brian King | 22 | 4.29% | 1 | 4.35% |
TARUISI Hiroaki | 17 | 3.31% | 1 | 4.35% |
Alan Stern | 14 | 2.73% | 2 | 8.70% |
Martin K. Petersen | 12 | 2.34% | 1 | 4.35% |
James Bottomley | 2 | 0.39% | 1 | 4.35% |
Jesper Juhl | 1 | 0.19% | 1 | 4.35% |
Total | 513 | 100.00% | 23 | 100.00% |
EXPORT_SYMBOL_GPL(scsi_check_sense);
static void scsi_handle_queue_ramp_up(struct scsi_device *sdev)
{
struct scsi_host_template *sht = sdev->host->hostt;
struct scsi_device *tmp_sdev;
if (!sht->track_queue_depth ||
sdev->queue_depth >= sdev->max_queue_depth)
return;
if (time_before(jiffies,
sdev->last_queue_ramp_up + sdev->queue_ramp_up_period))
return;
if (time_before(jiffies,
sdev->last_queue_full_time + sdev->queue_ramp_up_period))
return;
/*
* Walk all devices of a target and do
* ramp up on them.
*/
shost_for_each_device(tmp_sdev, sdev->host) {
if (tmp_sdev->channel != sdev->channel ||
tmp_sdev->id != sdev->id ||
tmp_sdev->queue_depth == sdev->max_queue_depth)
continue;
scsi_change_queue_depth(tmp_sdev, tmp_sdev->queue_depth + 1);
sdev->last_queue_ramp_up = jiffies;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Vasu Dev | 126 | 98.44% | 1 | 33.33% |
Christoph Hellwig | 2 | 1.56% | 2 | 66.67% |
Total | 128 | 100.00% | 3 | 100.00% |
static void scsi_handle_queue_full(struct scsi_device *sdev)
{
struct scsi_host_template *sht = sdev->host->hostt;
struct scsi_device *tmp_sdev;
if (!sht->track_queue_depth)
return;
shost_for_each_device(tmp_sdev, sdev->host) {
if (tmp_sdev->channel != sdev->channel ||
tmp_sdev->id != sdev->id)
continue;
/*
* We do not know the number of commands that were at
* the device when we got the queue full so we start
* from the highest possible value and work our way down.
*/
scsi_track_queue_full(tmp_sdev, tmp_sdev->queue_depth - 1);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mike Christie | 72 | 97.30% | 1 | 50.00% |
Christoph Hellwig | 2 | 2.70% | 1 | 50.00% |
Total | 74 | 100.00% | 2 | 100.00% |
/**
* scsi_eh_completed_normally - Disposition a eh cmd on return from LLD.
* @scmd: SCSI cmd to examine.
*
* Notes:
* This is *only* called when we are examining the status of commands
* queued during error recovery. the main difference here is that we
* don't allow for the possibility of retries here, and we are a lot
* more restrictive about what we consider acceptable.
*/
static int scsi_eh_completed_normally(struct scsi_cmnd *scmd)
{
/*
* first check the host byte, to see if there is anything in there
* that would indicate what we need to do.
*/
if (host_byte(scmd->result) == DID_RESET) {
/*
* rats. we are already in the error handler, so we now
* get to try and figure out what to do next. if the sense
* is valid, we have a pretty good idea of what to do.
* if not, we mark it as FAILED.
*/
return scsi_check_sense(scmd);
}
if (host_byte(scmd->result) != DID_OK)
return FAILED;
/*
* next, check the message byte.
*/
if (msg_byte(scmd->result) != COMMAND_COMPLETE)
return FAILED;
/*
* now, check the status byte to see if this indicates
* anything special.
*/
switch (status_byte(scmd->result)) {
case GOOD:
scsi_handle_queue_ramp_up(scmd->device);
case COMMAND_TERMINATED:
return SUCCESS;
case CHECK_CONDITION:
return scsi_check_sense(scmd);
case CONDITION_GOOD:
case INTERMEDIATE_GOOD:
case INTERMEDIATE_C_GOOD:
/*
* who knows? FIXME(eric)
*/
return SUCCESS;
case RESERVATION_CONFLICT:
if (scmd->cmnd[0] == TEST_UNIT_READY)
/* it is a success, we probed the device and
* found it */
return SUCCESS;
/* otherwise, we failed to send the command */
return FAILED;
case QUEUE_FULL:
scsi_handle_queue_full(scmd->device);
/* fall through */
case BUSY:
return NEEDS_RETRY;
default:
return FAILED;
}
return FAILED;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mike Anderson | 87 | 55.41% | 1 | 9.09% |
Linus Torvalds (pre-git) | 22 | 14.01% | 2 | 18.18% |
James Bottomley | 16 | 10.19% | 1 | 9.09% |
Mike Christie | 10 | 6.37% | 1 | 9.09% |
Vasu Dev | 7 | 4.46% | 1 | 9.09% |
Linus Torvalds | 4 | 2.55% | 1 | 9.09% |
Michael Reed | 4 | 2.55% | 1 | 9.09% |
Hannes Reinecke | 3 | 1.91% | 1 | 9.09% |
Christoph Hellwig | 2 | 1.27% | 1 | 9.09% |
Alan Cox | 2 | 1.27% | 1 | 9.09% |
Total | 157 | 100.00% | 11 | 100.00% |
/**
* scsi_eh_done - Completion function for error handling.
* @scmd: Cmd that is done.
*/
static void scsi_eh_done(struct scsi_cmnd *scmd)
{
struct completion *eh_action;
SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
"%s result: %x\n", __func__, scmd->result));
eh_action = scmd->device->host->eh_action;
if (eh_action)
complete(eh_action);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mike Anderson | 18 | 32.73% | 1 | 11.11% |
Michael Reed | 15 | 27.27% | 1 | 11.11% |
Linus Torvalds (pre-git) | 10 | 18.18% | 1 | 11.11% |
Hannes Reinecke | 6 | 10.91% | 2 | 22.22% |
Luben Tuikov | 2 | 3.64% | 1 | 11.11% |
Christoph Hellwig | 2 | 3.64% | 1 | 11.11% |
Harvey Harrison | 1 | 1.82% | 1 | 11.11% |
Alan Cox | 1 | 1.82% | 1 | 11.11% |
Total | 55 | 100.00% | 9 | 100.00% |
/**
* scsi_try_host_reset - ask host adapter to reset itself
* @scmd: SCSI cmd to send host reset.
*/
static int scsi_try_host_reset(struct scsi_cmnd *scmd)
{
unsigned long flags;
int rtn;
struct Scsi_Host *host = scmd->device->host;
struct scsi_host_template *hostt = host->hostt;
SCSI_LOG_ERROR_RECOVERY(3,
shost_printk(KERN_INFO, host, "Snd Host RST\n"));
if (!hostt->eh_host_reset_handler)
return FAILED;
rtn = hostt->eh_host_reset_handler(scmd);
if (rtn == SUCCESS) {
if (!hostt->skip_settle_delay)
ssleep(HOST_RESET_SETTLE_TIME);
spin_lock_irqsave(host->host_lock, flags);
scsi_report_bus_reset(host, scmd_channel(scmd));
spin_unlock_irqrestore(host->host_lock, flags);
}
return rtn;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Brian King | 54 | 44.26% | 1 | 11.11% |
Christoph Hellwig | 29 | 23.77% | 4 | 44.44% |
Jesper Juhl | 20 | 16.39% | 1 | 11.11% |
Mike Anderson | 10 | 8.20% | 1 | 11.11% |
Hannes Reinecke | 5 | 4.10% | 1 | 11.11% |
Mike Christie | 4 | 3.28% | 1 | 11.11% |
Total | 122 | 100.00% | 9 | 100.00% |
/**
* scsi_try_bus_reset - ask host to perform a bus reset
* @scmd: SCSI cmd to send bus reset.
*/
static int scsi_try_bus_reset(struct scsi_cmnd *scmd)
{
unsigned long flags;
int rtn;
struct Scsi_Host *host = scmd->device->host;
struct scsi_host_template *hostt = host->hostt;
SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
"%s: Snd Bus RST\n", __func__));
if (!hostt->eh_bus_reset_handler)
return FAILED;
rtn = hostt->eh_bus_reset_handler(scmd);
if (rtn == SUCCESS) {
if (!hostt->skip_settle_delay)
ssleep(BUS_RESET_SETTLE_TIME);
spin_lock_irqsave(host->host_lock, flags);
scsi_report_bus_reset(host, scmd_channel(scmd));
spin_unlock_irqrestore(host->host_lock, flags);
}
return rtn;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Brian King | 62 | 50.00% | 1 | 10.00% |
Jesper Juhl | 20 | 16.13% | 1 | 10.00% |
Mike Anderson | 18 | 14.52% | 1 | 10.00% |
Christoph Hellwig | 12 | 9.68% | 1 | 10.00% |
Hannes Reinecke | 5 | 4.03% | 1 | 10.00% |
Linus Torvalds (pre-git) | 4 | 3.23% | 2 | 20.00% |
Patrick Mansfield | 2 | 1.61% | 2 | 20.00% |
Harvey Harrison | 1 | 0.81% | 1 | 10.00% |
Total | 124 | 100.00% | 10 | 100.00% |
static void __scsi_report_device_reset(struct scsi_device *sdev, void *data)
{
sdev->was_reset = 1;
sdev->expecting_cc_ua = 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mike Christie | 27 | 100.00% | 1 | 100.00% |
Total | 27 | 100.00% | 1 | 100.00% |
/**
* scsi_try_target_reset - Ask host to perform a target reset
* @scmd: SCSI cmd used to send a target reset
*
* Notes:
* There is no timeout for this operation. if this operation is
* unreliable for a given host, then the host itself needs to put a
* timer on it, and set the host back to a consistent state prior to
* returning.
*/
static int scsi_try_target_reset(struct scsi_cmnd *scmd)
{
unsigned long flags;
int rtn;
struct Scsi_Host *host = scmd->device->host;
struct scsi_host_template *hostt = host->hostt;
if (!hostt->eh_target_reset_handler)
return FAILED;
rtn = hostt->eh_target_reset_handler(scmd);
if (rtn == SUCCESS) {
spin_lock_irqsave(host->host_lock, flags);
__starget_for_each_device(scsi_target(scmd->device), NULL,
__scsi_report_device_reset);
spin_unlock_irqrestore(host->host_lock, flags);
}
return rtn;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Brian King | 35 | 35.00% | 1 | 20.00% |
Mike Christie | 34 | 34.00% | 1 | 20.00% |
Jesper Juhl | 17 | 17.00% | 1 | 20.00% |
Mike Anderson | 13 | 13.00% | 1 | 20.00% |
Linus Torvalds (pre-git) | 1 | 1.00% | 1 | 20.00% |
Total | 100 | 100.00% | 5 | 100.00% |
/**
* scsi_try_bus_device_reset - Ask host to perform a BDR on a dev
* @scmd: SCSI cmd used to send BDR
*
* Notes:
* There is no timeout for this operation. if this operation is
* unreliable for a given host, then the host itself needs to put a
* timer on it, and set the host back to a consistent state prior to
* returning.
*/
static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd)
{
int rtn;
struct scsi_host_template *hostt = scmd->device->host->hostt;
if (!hostt->eh_device_reset_handler)
return FAILED;
rtn = hostt->eh_device_reset_handler(scmd);
if (rtn == SUCCESS)
__scsi_report_device_reset(scmd->device, NULL)