Release 4.11 drivers/scsi/scsi_lib.c
/*
* Copyright (C) 1999 Eric Youngdale
* Copyright (C) 2014 Christoph Hellwig
*
* SCSI queueing library.
* Initial versions: Eric Youngdale (eric@andante.org).
* Based upon conversations with large numbers
* of people at Linux Expo.
*/
#include <linux/bio.h>
#include <linux/bitops.h>
#include <linux/blkdev.h>
#include <linux/completion.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/hardirq.h>
#include <linux/scatterlist.h>
#include <linux/blk-mq.h>
#include <linux/ratelimit.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_driver.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_dh.h>
#include <trace/events/scsi.h>
#include "scsi_priv.h"
#include "scsi_logging.h"
static struct kmem_cache *scsi_sdb_cache;
static struct kmem_cache *scsi_sense_cache;
static struct kmem_cache *scsi_sense_isadma_cache;
static DEFINE_MUTEX(scsi_sense_cache_mutex);
static inline struct kmem_cache *
scsi_select_sense_cache(struct Scsi_Host *shost)
{
return shost->unchecked_isa_dma ?
scsi_sense_isadma_cache : scsi_sense_cache;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 23 | 100.00% | 1 | 100.00% |
Total | 23 | 100.00% | 1 | 100.00% |
static void scsi_free_sense_buffer(struct Scsi_Host *shost,
unsigned char *sense_buffer)
{
kmem_cache_free(scsi_select_sense_cache(shost), sense_buffer);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 26 | 100.00% | 2 | 100.00% |
Total | 26 | 100.00% | 2 | 100.00% |
static unsigned char *scsi_alloc_sense_buffer(struct Scsi_Host *shost,
gfp_t gfp_mask, int numa_node)
{
return kmem_cache_alloc_node(scsi_select_sense_cache(shost), gfp_mask,
numa_node);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 32 | 100.00% | 2 | 100.00% |
Total | 32 | 100.00% | 2 | 100.00% |
int scsi_init_sense_cache(struct Scsi_Host *shost)
{
struct kmem_cache *cache;
int ret = 0;
cache = scsi_select_sense_cache(shost);
if (cache)
return 0;
mutex_lock(&scsi_sense_cache_mutex);
if (shost->unchecked_isa_dma) {
scsi_sense_isadma_cache =
kmem_cache_create("scsi_sense_cache(DMA)",
SCSI_SENSE_BUFFERSIZE, 0,
SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL);
if (!scsi_sense_isadma_cache)
ret = -ENOMEM;
} else {
scsi_sense_cache =
kmem_cache_create("scsi_sense_cache",
SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN, NULL);
if (!scsi_sense_cache)
ret = -ENOMEM;
}
mutex_unlock(&scsi_sense_cache_mutex);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 112 | 100.00% | 1 | 100.00% |
Total | 112 | 100.00% | 1 | 100.00% |
/*
* When to reinvoke queueing after a resource shortage. It's 3 msecs to
* not change behaviour from the previous unplug mechanism, experimentation
* may prove this needs changing.
*/
#define SCSI_QUEUE_DELAY 3
static void
scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
{
struct Scsi_Host *host = cmd->device->host;
struct scsi_device *device = cmd->device;
struct scsi_target *starget = scsi_target(device);
/*
* Set the appropriate busy bit for the device/host.
*
* If the host/device isn't busy, assume that something actually
* completed, and that we should be able to queue a command now.
*
* Note that the prior mid-layer assumption that any host could
* always queue at least one command is now broken. The mid-layer
* will implement a user specifiable stall (see
* scsi_host.max_host_blocked and scsi_device.max_device_blocked)
* if a command is requeued with no other commands outstanding
* either for the device or for the host.
*/
switch (reason) {
case SCSI_MLQUEUE_HOST_BUSY:
atomic_set(&host->host_blocked, host->max_host_blocked);
break;
case SCSI_MLQUEUE_DEVICE_BUSY:
case SCSI_MLQUEUE_EH_RETRY:
atomic_set(&device->device_blocked,
device->max_device_blocked);
break;
case SCSI_MLQUEUE_TARGET_BUSY:
atomic_set(&starget->target_blocked,
starget->max_target_blocked);
break;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 63 | 61.76% | 4 | 33.33% |
Mike Christie | 30 | 29.41% | 1 | 8.33% |
James Smart | 3 | 2.94% | 1 | 8.33% |
Tejun Heo | 2 | 1.96% | 2 | 16.67% |
Linus Torvalds | 1 | 0.98% | 1 | 8.33% |
Mike Anderson | 1 | 0.98% | 1 | 8.33% |
James Bottomley | 1 | 0.98% | 1 | 8.33% |
Bart Van Assche | 1 | 0.98% | 1 | 8.33% |
Total | 102 | 100.00% | 12 | 100.00% |
static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
{
struct scsi_device *sdev = cmd->device;
blk_mq_requeue_request(cmd->request, true);
put_device(&sdev->sdev_gendev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 35 | 94.59% | 1 | 50.00% |
Bart Van Assche | 2 | 5.41% | 1 | 50.00% |
Total | 37 | 100.00% | 2 | 100.00% |
/**
* __scsi_queue_insert - private queue insertion
* @cmd: The SCSI command being requeued
* @reason: The reason for the requeue
* @unbusy: Whether the queue should be unbusied
*
* This is a private queue insertion. The public interface
* scsi_queue_insert() always assumes the queue should be unbusied
* because it's always called before the completion. This function is
* for a requeue after completion, which should only occur in this
* file.
*/
static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
{
struct scsi_device *device = cmd->device;
struct request_queue *q = device->request_queue;
unsigned long flags;
SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd,
"Inserting command %p into mlqueue\n", cmd));
scsi_set_blocked(cmd, reason);
/*
* Decrement the counters, since these commands are no longer
* active on the host/device.
*/
if (unbusy)
scsi_device_unbusy(device);
/*
* Requeue this command. It will go before all other commands
* that are already in the queue. Schedule requeue work under
* lock such that the kblockd_schedule_work() call happens
* before blk_cleanup_queue() finishes.
*/
cmd->result = 0;
if (q->mq_ops) {
scsi_mq_requeue_cmd(cmd);
return;
}
spin_lock_irqsave(q->queue_lock, flags);
blk_requeue_request(q, cmd->request);
kblockd_schedule_work(&device->requeue_work);
spin_unlock_irqrestore(q->queue_lock, flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 80 | 62.50% | 4 | 30.77% |
Tejun Heo | 13 | 10.16% | 1 | 7.69% |
Bart Van Assche | 10 | 7.81% | 1 | 7.69% |
James Bottomley | 7 | 5.47% | 2 | 15.38% |
Alan Stern | 6 | 4.69% | 1 | 7.69% |
Jens Axboe | 5 | 3.91% | 1 | 7.69% |
Linus Torvalds | 4 | 3.12% | 1 | 7.69% |
Patrick Mansfield | 2 | 1.56% | 1 | 7.69% |
Linus Torvalds (pre-git) | 1 | 0.78% | 1 | 7.69% |
Total | 128 | 100.00% | 13 | 100.00% |
/*
* Function: scsi_queue_insert()
*
* Purpose: Insert a command in the midlevel queue.
*
* Arguments: cmd - command that we are adding to queue.
* reason - why we are inserting command to queue.
*
* Lock status: Assumed that lock is not held upon entry.
*
* Returns: Nothing.
*
* Notes: We do this for one of two cases. Either the host is busy
* and it cannot accept any more commands for the time being,
* or the device returned QUEUE_FULL and can accept no more
* commands.
* Notes: This could be called either from an interrupt context or a
* normal process context.
*/
void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
{
__scsi_queue_insert(cmd, reason, 1);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
James Bottomley | 20 | 90.91% | 1 | 50.00% |
Bart Van Assche | 2 | 9.09% | 1 | 50.00% |
Total | 22 | 100.00% | 2 | 100.00% |
/**
* scsi_execute - insert request and wait for the result
* @sdev: scsi device
* @cmd: scsi command
* @data_direction: data direction
* @buffer: data buffer
* @bufflen: len of buffer
* @sense: optional sense buffer
* @sshdr: optional decoded sense header
* @timeout: request timeout in seconds
* @retries: number of times to retry request
* @flags: flags for ->cmd_flags
* @rq_flags: flags for ->rq_flags
* @resid: optional residual length
*
* returns the req->errors value which is the scsi_cmnd result
* field.
*/
int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen,
unsigned char *sense, struct scsi_sense_hdr *sshdr,
int timeout, int retries, u64 flags, req_flags_t rq_flags,
int *resid)
{
struct request *req;
struct scsi_request *rq;
int ret = DRIVER_ERROR << 24;
req = blk_get_request(sdev->request_queue,
data_direction == DMA_TO_DEVICE ?
REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, __GFP_RECLAIM);
if (IS_ERR(req))
return ret;
rq = scsi_req(req);
scsi_req_init(req);
if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
buffer, bufflen, __GFP_RECLAIM))
goto out;
rq->cmd_len = COMMAND_SIZE(cmd[0]);
memcpy(rq->cmd, cmd, rq->cmd_len);
req->retries = retries;
req->timeout = timeout;
req->cmd_flags |= flags;
req->rq_flags |= rq_flags | RQF_QUIET | RQF_PREEMPT;
/*
* head injection *required* here otherwise quiesce won't work
*/
blk_execute_rq(req->q, NULL, req, 1);
/*
* Some devices (USB mass-storage in particular) may transfer
* garbage data together with a residue indicating that the data
* is invalid. Prevent the garbage from being misinterpreted
* and prevent security leaks by zeroing out the excess data.
*/
if (unlikely(rq->resid_len > 0 && rq->resid_len <= bufflen))
memset(buffer + (bufflen - rq->resid_len), 0, rq->resid_len);
if (resid)
*resid = rq->resid_len;
if (sense && rq->sense_len)
memcpy(sense, rq->sense, SCSI_SENSE_BUFFERSIZE);
if (sshdr)
scsi_normalize_sense(rq->sense, rq->sense_len, sshdr);
ret = req->errors;
out:
blk_put_request(req);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
James Bottomley | 154 | 51.51% | 3 | 17.65% |
Christoph Hellwig | 80 | 26.76% | 5 | 29.41% |
Alan Stern | 29 | 9.70% | 1 | 5.88% |
FUJITA Tomonori | 13 | 4.35% | 1 | 5.88% |
Mike Christie | 6 | 2.01% | 1 | 5.88% |
Jens Axboe | 6 | 2.01% | 2 | 11.76% |
Tejun Heo | 5 | 1.67% | 1 | 5.88% |
Joe Lawrence | 3 | 1.00% | 1 | 5.88% |
Mel Gorman | 2 | 0.67% | 1 | 5.88% |
Martin K. Petersen | 1 | 0.33% | 1 | 5.88% |
Total | 299 | 100.00% | 17 | 100.00% |
EXPORT_SYMBOL(scsi_execute);
/*
* Function: scsi_init_cmd_errh()
*
* Purpose: Initialize cmd fields related to error handling.
*
* Arguments: cmd - command that is ready to be queued.
*
* Notes: This function has the job of initializing a number of
* fields related to error handling. Typically this will
* be called once for each command, as required.
*/
static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
{
cmd->serial_number = 0;
scsi_set_resid(cmd, 0);
memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
if (cmd->cmd_len == 0)
cmd->cmd_len = scsi_command_size(cmd->cmnd);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 34 | 62.96% | 1 | 11.11% |
Christoph Hellwig | 10 | 18.52% | 3 | 33.33% |
Boaz Harrosh | 5 | 9.26% | 2 | 22.22% |
Michael Reed | 3 | 5.56% | 1 | 11.11% |
Alan Cox | 1 | 1.85% | 1 | 11.11% |
FUJITA Tomonori | 1 | 1.85% | 1 | 11.11% |
Total | 54 | 100.00% | 9 | 100.00% |
void scsi_device_unbusy(struct scsi_device *sdev)
{
struct Scsi_Host *shost = sdev->host;
struct scsi_target *starget = scsi_target(sdev);
unsigned long flags;
atomic_dec(&shost->host_busy);
if (starget->can_queue > 0)
atomic_dec(&starget->target_busy);
if (unlikely(scsi_host_in_recovery(shost) &&
(shost->host_failed || shost->host_eh_scheduled))) {
spin_lock_irqsave(shost->host_lock, flags);
scsi_eh_wakeup(shost);
spin_unlock_irqrestore(shost->host_lock, flags);
}
atomic_dec(&sdev->device_busy);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 87 | 79.09% | 5 | 62.50% |
Mike Christie | 14 | 12.73% | 1 | 12.50% |
Tejun Heo | 6 | 5.45% | 1 | 12.50% |
James Bottomley | 3 | 2.73% | 1 | 12.50% |
Total | 110 | 100.00% | 8 | 100.00% |
static void scsi_kick_queue(struct request_queue *q)
{
if (q->mq_ops)
blk_mq_start_hw_queues(q);
else
blk_run_queue(q);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 28 | 100.00% | 1 | 100.00% |
Total | 28 | 100.00% | 1 | 100.00% |
/*
* Called for single_lun devices on IO completion. Clear starget_sdev_user,
* and call blk_run_queue for all the scsi_devices on the target -
* including current_sdev first.
*
* Called with *no* scsi locks held.
*/
static void scsi_single_lun_run(struct scsi_device *current_sdev)
{
struct Scsi_Host *shost = current_sdev->host;
struct scsi_device *sdev, *tmp;
struct scsi_target *starget = scsi_target(current_sdev);
unsigned long flags;
spin_lock_irqsave(shost->host_lock, flags);
starget->starget_sdev_user = NULL;
spin_unlock_irqrestore(shost->host_lock, flags);
/*
* Call blk_run_queue for all LUNs on the target, starting with
* current_sdev. We race with others (to set starget_sdev_user),
* but in most cases, we will be first. Ideally, each LU on the
* target would get some limited time or requests on the target.
*/
scsi_kick_queue(current_sdev->request_queue);
spin_lock_irqsave(shost->host_lock, flags);
if (starget->starget_sdev_user)
goto out;
list_for_each_entry_safe(sdev, tmp, &starget->devices,
same_target_siblings) {
if (sdev == current_sdev)
continue;
if (scsi_device_get(sdev))
continue;
spin_unlock_irqrestore(shost->host_lock, flags);
scsi_kick_queue(sdev->request_queue);
spin_lock_irqsave(shost->host_lock, flags);
scsi_device_put(sdev);
}
out:
spin_unlock_irqrestore(shost->host_lock, flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick Mansfield | 82 | 51.25% | 3 | 37.50% |
Christoph Hellwig | 57 | 35.62% | 3 | 37.50% |
James Bottomley | 20 | 12.50% | 1 | 12.50% |
Linus Torvalds | 1 | 0.62% | 1 | 12.50% |
Total | 160 | 100.00% | 8 | 100.00% |
static inline bool scsi_device_is_busy(struct scsi_device *sdev)
{
if (atomic_read(&sdev->device_busy) >= sdev->queue_depth)
return true;
if (atomic_read(&sdev->device_blocked) > 0)
return true;
return false;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kiyoshi Ueda | 28 | 59.57% | 1 | 33.33% |
Christoph Hellwig | 19 | 40.43% | 2 | 66.67% |
Total | 47 | 100.00% | 3 | 100.00% |
static inline bool scsi_target_is_busy(struct scsi_target *starget)
{
if (starget->can_queue > 0) {
if (atomic_read(&starget->target_busy) >= starget->can_queue)
return true;
if (atomic_read(&starget->target_blocked) > 0)
return true;
}
return false;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 30 | 52.63% | 3 | 75.00% |
Mike Christie | 27 | 47.37% | 1 | 25.00% |
Total | 57 | 100.00% | 4 | 100.00% |
static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
{
if (shost->can_queue > 0 &&
atomic_read(&shost->host_busy) >= shost->can_queue)
return true;
if (atomic_read(&shost->host_blocked) > 0)
return true;
if (shost->host_self_blocked)
return true;
return false;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kiyoshi Ueda | 37 | 59.68% | 1 | 33.33% |
Christoph Hellwig | 25 | 40.32% | 2 | 66.67% |
Total | 62 | 100.00% | 3 | 100.00% |
static void scsi_starved_list_run(struct Scsi_Host *shost)
{
LIST_HEAD(starved_list);
struct scsi_device *sdev;
unsigned long flags;
spin_lock_irqsave(shost->host_lock, flags);
list_splice_init(&shost->starved_list, &starved_list);
while (!list_empty(&starved_list)) {
struct request_queue *slq;
/*
* As long as shost is accepting commands and we have
* starved queues, call blk_run_queue. scsi_request_fn
* drops the queue_lock and can add us back to the
* starved_list.
*
* host_lock protects the starved_list and starved_entry.
* scsi_request_fn must get the host_lock before checking
* or modifying starved_list or starved_entry.
*/
if (scsi_host_is_busy(shost))
break;
sdev = list_entry(starved_list.next,
struct scsi_device, starved_entry);
list_del_init(&sdev->starved_entry);
if (scsi_target_is_busy(scsi_target(sdev))) {
list_move_tail(&sdev->starved_entry,
&shost->starved_list);
continue;
}
/*
* Once we drop the host lock, a racing scsi_remove_device()
* call may remove the sdev from the starved list and destroy
* it and the queue. Mitigate by taking a reference to the
* queue and never touching the sdev again after we drop the
* host lock. Note: if __scsi_remove_device() invokes
* blk_cleanup_queue() before the queue is run from this
* function then blk_run_queue() will return immediately since
* blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING.
*/
slq = sdev->request_queue;
if (!blk_get_queue(slq))
continue;
spin_unlock_irqrestore(shost->host_lock, flags);
scsi_kick_queue(slq);
blk_put_queue(slq);
spin_lock_irqsave(shost->host_lock, flags);
}
/* put any unprocessed entries back */
list_splice(&starved_list, &shost->starved_list);
spin_unlock_irqrestore(shost->host_lock, flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mike Christie | 69 | 37.70% | 2 | 11.76% |
Patrick Mansfield | 31 | 16.94% | 3 | 17.65% |
James Bottomley | 30 | 16.39% | 1 | 5.88% |
Christoph Hellwig | 21 | 11.48% | 5 | 29.41% |
Jens Axboe | 13 | 7.10% | 1 | 5.88% |
Doug Ledford | 10 | 5.46% | 2 | 11.76% |
Linus Torvalds (pre-git) | 7 | 3.83% | 2 | 11.76% |
Kiyoshi Ueda | 2 | 1.09% | 1 | 5.88% |
Total | 183 | 100.00% | 17 | 100.00% |
/*
* Function: scsi_run_queue()
*
* Purpose: Select a proper request queue to serve next
*
* Arguments: q - last request's queue
*
* Returns: Nothing
*
* Notes: The previous command was completely finished, start
* a new one if possible.
*/
static void scsi_run_queue(struct request_queue *q)
{
struct scsi_device *sdev = q->queuedata;
if (scsi_target(sdev)->single_lun)
scsi_single_lun_run(sdev);
if (!list_empty(&sdev->host->starved_list))
scsi_starved_list_run(sdev->host);
if (q->mq_ops)
blk_mq_run_hw_queues(q, false);
else
blk_run_queue(q);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 67 | 91.78% | 3 | 50.00% |
Linus Torvalds (pre-git) | 4 | 5.48% | 1 | 16.67% |
Bart Van Assche | 1 | 1.37% | 1 | 16.67% |
Linus Torvalds | 1 | 1.37% | 1 | 16.67% |
Total | 73 | 100.00% | 6 | 100.00% |
void scsi_requeue_run_queue(struct work_struct *work)
{
struct scsi_device *sdev;
struct request_queue *q;
sdev = container_of(work, struct scsi_device, requeue_work);
q = sdev->request_queue;
scsi_run_queue(q);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 43 | 100.00% | 1 | 100.00% |
Total | 43 | 100.00% | 1 | 100.00% |
/*
* Function: scsi_requeue_command()
*
* Purpose: Handle post-processing of completed commands.
*
* Arguments: q - queue to operate on
* cmd - command that may need to be requeued.
*
* Returns: Nothing
*
* Notes: After command completion, there may be blocks left
* over which weren't finished by the previous command
* this can be for a number of reasons - the main one is
* I/O errors in the middle of the request, in which case
* we need to request the blocks that come after the bad
* sector.
* Notes: Upon return, cmd is a stale pointer.
*/
static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
{
struct scsi_device *sdev = cmd->device;
struct request *req = cmd->request;
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
blk_unprep_request(req);
req->special = NULL;
scsi_put_command(cmd);
blk_requeue_request(q, req);
spin_unlock_irqrestore(q->queue_lock, flags);
scsi_run_queue(q);
put_device(&sdev->sdev_gendev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 44 | 47.83% | 2 | 28.57% |
Bart Van Assche | 17 | 18.48% | 1 | 14.29% |
Tejun Heo | 16 | 17.39% | 1 | 14.29% |
James Bottomley | 15 | 16.30% | 3 | 42.86% |
Total | 92 | 100.00% | 7 | 100.00% |
void scsi_run_host_queues(struct Scsi_Host *shost)
{
struct scsi_device *sdev;
shost_for_each_device(sdev, shost)
scsi_run_queue(sdev->request_queue);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 28 | 100.00% | 2 | 100.00% |
Total | 28 | 100.00% | 2 | 100.00% |
static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
{
if (!blk_rq_is_passthrough(cmd->request)) {
struct scsi_driver *drv = scsi_cmd_to_driver(cmd);
if (drv->uninit_command)
drv->uninit_command(cmd);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 33 | 71.74% | 5 | 62.50% |
Linus Torvalds (pre-git) | 8 | 17.39% | 1 | 12.50% |
Boaz Harrosh | 5 | 10.87% | 2 | 25.00% |
Total | 46 | 100.00% | 8 | 100.00% |
static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd)
{
struct scsi_data_buffer *sdb;
if (cmd->sdb.table.nents)
sg_free_table_chained(&cmd->sdb.table, true);
if (cmd->request->next_rq) {
sdb = cmd->request->next_rq->special;
if (sdb)
sg_free_table_chained(&sdb->table, true);
}
if (scsi_prot_sg_count(cmd))
sg_free_table_chained(&cmd->prot_sdb->table, true);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 63 | 69.23% | 2 | 40.00% |
Ming Lin | 25 | 27.47% | 2 | 40.00% |
Boaz Harrosh | 3 | 3.30% | 1 | 20.00% |
Total | 91 | 100.00% | 5 | 100.00% |
static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
{
struct scsi_device *sdev = cmd->device;
struct Scsi_Host *shost = sdev->host;
unsigned long flags;
scsi_mq_free_sgtables(cmd);
scsi_uninit_cmd(cmd);
if (shost->use_cmd_list) {
BUG_ON(list_empty(&cmd->list));
spin_lock_irqsave(&sdev->list_lock, flags);
list_del_init(&cmd->list);
spin_unlock_irqrestore(&sdev->list_lock, flags);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 64 | 71.11% | 1 | 50.00% |
Kashyap Desai | 26 | 28.89% | 1 | 50.00% |
Total | 90 | 100.00% | 2 | 100.00% |
/*
* Function: scsi_release_buffers()
*
* Purpose: Free resources allocate for a scsi_command.
*
* Arguments: cmd - command that we are bailing.
*
* Lock status: Assumed that no lock is held upon entry.
*
* Returns: Nothing
*
* Notes: In the event that an upper level driver rejects a
* command, we must release resources allocated during
* the __init_io() function. Primarily this would involve
* the scatter-gather table.
*/
static void scsi_release_buffers(struct scsi_cmnd *cmd)
{
if (cmd->sdb.table.nents)
sg_free_table_chained(&cmd->sdb.table, false);
memset(&cmd->sdb, 0, sizeof(cmd->sdb));
if (scsi_prot_sg_count(cmd))
sg_free_table_chained(&cmd->prot_sdb->table, false);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 62 | 89.86% | 1 | 33.33% |
Ming Lin | 7 | 10.14% | 2 | 66.67% |
Total | 69 | 100.00% | 3 | 100.00% |
static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd)
{
struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special;
sg_free_table_chained(&bidi_sdb->table, false);
kmem_cache_free(scsi_sdb_cache, bidi_sdb);
cmd->request->next_rq->special = NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 28 | 54.90% | 1 | 20.00% |
Boaz Harrosh | 18 | 35.29% | 1 | 20.00% |
Ming Lin | 4 | 7.84% | 2 | 40.00% |
Martin K. Petersen | 1 | 1.96% | 1 | 20.00% |
Total | 51 | 100.00% | 5 | 100.00% |
static bool scsi_end_request(struct request *req, int error,
unsigned int bytes, unsigned int bidi_bytes)
{
struct scsi_cmnd *cmd = req->special;
struct scsi_device *sdev = cmd->device;
struct request_queue *q = sdev->request_queue;
if (blk_update_request(req, error, bytes))
return true;
/* Bidi request must be completed as a whole */
if (unlikely(bidi_bytes) &&
blk_update_request(req->next_rq, error, bidi_bytes))
return true;
if (blk_queue_add_random(q))
add_disk_randomness(req->rq_disk);
if (req->mq_ctx) {
/*
* In the MQ case the command gets freed by __blk_mq_end_request,
* so we have to do all cleanup that depends on it earlier.
*
* We also can't kick the queues from irq context, so we
* will have to defer it to a workqueue.
*/
scsi_mq_uninit_cmd(cmd);
__blk_mq_end_request(req, error);
if (scsi_target(sdev)->single_lun ||
!list_empty(&sdev->host->starved_list))
kblockd_schedule_work(&sdev->requeue_work);
else
blk_mq_run_hw_queues(q, true);
} else {
unsigned long flags;
if (bidi_bytes)
scsi_release_bidi_buffers(cmd);
scsi_release_buffers(cmd);
scsi_put_command(cmd);
spin_lock_irqsave(q->queue_lock, flags);
blk_finish_request(req, error);
spin_unlock_irqrestore(q->queue_lock, flags);
scsi_run_queue(q);
}
put_device(&sdev->sdev_gendev);
return false;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 213 | 95.52% | 5 | 71.43% |
Daniel Gryniewicz | 9 | 4.04% | 1 | 14.29% |
Bart Van Assche | 1 | 0.45% | 1 | 14.29% |
Total | 223 | 100.00% | 7 | 100.00% |
/**
* __scsi_error_from_host_byte - translate SCSI error code into errno
* @cmd: SCSI command (unused)
* @result: scsi error code
*
* Translate SCSI error code into standard UNIX errno.
* Return values:
* -ENOLINK temporary transport failure
* -EREMOTEIO permanent target failure, do not retry
* -EBADE permanent nexus failure, retry on other path
* -ENOSPC No write space available
* -ENODATA Medium error
* -EIO unspecified I/O error
*/
static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
{
int error = 0;
switch(host_byte(result)) {
case DID_TRANSPORT_FAILFAST:
error = -ENOLINK;
break;
case DID_TARGET_FAILURE:
set_host_byte(cmd, DID_OK);
error = -EREMOTEIO;
break;
case DID_NEXUS_FAILURE:
set_host_byte(cmd, DID_OK);
error = -EBADE;
break;
case DID_ALLOC_FAILURE:
set_host_byte(cmd, DID_OK);
error = -ENOSPC;
break;
case DID_MEDIUM_ERROR:
set_host_byte(cmd, DID_OK);
error = -ENODATA;
break;
default:
error = -EIO;
break;
}
return error;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hannes Reinecke | 103 | 92.79% | 3 | 75.00% |
Babu Moger | 8 | 7.21% | 1 | 25.00% |
Total | 111 | 100.00% | 4 | 100.00% |
/*
* Function: scsi_io_completion()
*
* Purpose: Completion processing for block device I/O requests.
*
* Arguments: cmd - command that is finished.
*
* Lock status: Assumed that no lock is held upon entry.
*
* Returns: Nothing
*
* Notes: We will finish off the specified number of sectors. If we
* are done, the command block will be released and the queue
* function will be goosed. If we are not done then we have to
* figure out what to do next:
*
* a) We can call scsi_requeue_command(). The request
* will be unprepared and put back on the queue. Then
* a new command will be created for it. This should
* be used if we made forward progress, or if we want
* to switch from READ(10) to READ(6) for example.
*
* b) We can call __scsi_queue_insert(). The request will
* be put back on the queue and retried using the same
* command as before, possibly after a delay.
*
* c) We can call scsi_end_request() with -EIO to fail
* the remainder of the request.
*/
void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
{
int result = cmd->result;
struct request_queue *q = cmd->device->request_queue;
struct request *req = cmd->request;
int error = 0;
struct scsi_sense_hdr sshdr;
bool sense_valid = false;
int sense_deferred = 0, level = 0;
enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
ACTION_DELAYED_RETRY} action;
unsigned long wait_for = (cmd->allowed + 1) * req->timeout;
if (result) {
sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
if (sense_valid)
sense_deferred = scsi_sense_is_deferred(&sshdr);
}
if (blk_rq_is_passthrough(req)) {
if (result) {
if (sense_valid) {
/*
* SG_IO wants current and deferred errors
*/
scsi_req(req)->sense_len =
min(8 + cmd->sense_buffer[7],
SCSI_SENSE_BUFFERSIZE);
}
if (!sense_deferred)
error = __scsi_error_from_host_byte(cmd, result);
}
/*
* __scsi_error_from_host_byte may have reset the host_byte
*/
req->errors = cmd->result;
scsi_req(req)->resid_len = scsi_get_resid(cmd);
if (scsi_bidi_cmnd(cmd)) {
/*
* Bidi commands Must be complete as a whole,
* both sides at once.
*/
scsi_req(req->next_rq)->resid_len = scsi_in(cmd)->resid;
if (scsi_end_request(req, 0, blk_rq_bytes(req),
blk_rq_bytes(req->next_rq)))
BUG();
return;
}
} else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) {
/*
* Flush commands do not transfers any data, and thus cannot use
* good_bytes != blk_rq_bytes(req) as the signal for an error.
* This sets the error explicitly for the problem case.
*/
error = __scsi_error_from_host_byte(cmd, result);
}
/* no bidi support for !blk_rq_is_passthrough yet */
BUG_ON(blk_bidi_rq(req));
/*
* Next deal with any sectors which we were able to correctly
* handle.
*/
SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd,
"%u sectors total, %d bytes done.\n",
blk_rq_sectors(req), good_bytes));
/*
* Recovered errors need reporting, but they're always treated as
* success, so fiddle the result code here. For passthrough requests
* we already took a copy of the original into rq->errors which
* is what gets returned to the user
*/
if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
/* if ATA PASS-THROUGH INFORMATION AVAILABLE skip
* print since caller wants ATA registers. Only occurs on
* SCSI ATA PASS_THROUGH commands when CK_COND=1
*/
if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
;
else if (!(req->rq_flags & RQF_QUIET))
scsi_print_sense(cmd);
result = 0;
/* for passthrough error may be set */
error = 0;