cregit-Linux how code gets into the kernel

Release 4.11 drivers/scsi/scsi_lib.c

Directory: drivers/scsi
/*
 * Copyright (C) 1999 Eric Youngdale
 * Copyright (C) 2014 Christoph Hellwig
 *
 *  SCSI queueing library.
 *      Initial versions: Eric Youngdale (eric@andante.org).
 *                        Based upon conversations with large numbers
 *                        of people at Linux Expo.
 */

#include <linux/bio.h>
#include <linux/bitops.h>
#include <linux/blkdev.h>
#include <linux/completion.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/hardirq.h>
#include <linux/scatterlist.h>
#include <linux/blk-mq.h>
#include <linux/ratelimit.h>
#include <asm/unaligned.h>

#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_driver.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_dh.h>

#include <trace/events/scsi.h>

#include "scsi_priv.h"
#include "scsi_logging.h"


static struct kmem_cache *scsi_sdb_cache;

static struct kmem_cache *scsi_sense_cache;

static struct kmem_cache *scsi_sense_isadma_cache;
static DEFINE_MUTEX(scsi_sense_cache_mutex);


static inline struct kmem_cache * scsi_select_sense_cache(struct Scsi_Host *shost) { return shost->unchecked_isa_dma ? scsi_sense_isadma_cache : scsi_sense_cache; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig23100.00%1100.00%
Total23100.00%1100.00%


static void scsi_free_sense_buffer(struct Scsi_Host *shost, unsigned char *sense_buffer) { kmem_cache_free(scsi_select_sense_cache(shost), sense_buffer); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig26100.00%2100.00%
Total26100.00%2100.00%


static unsigned char *scsi_alloc_sense_buffer(struct Scsi_Host *shost, gfp_t gfp_mask, int numa_node) { return kmem_cache_alloc_node(scsi_select_sense_cache(shost), gfp_mask, numa_node); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig32100.00%2100.00%
Total32100.00%2100.00%


int scsi_init_sense_cache(struct Scsi_Host *shost) { struct kmem_cache *cache; int ret = 0; cache = scsi_select_sense_cache(shost); if (cache) return 0; mutex_lock(&scsi_sense_cache_mutex); if (shost->unchecked_isa_dma) { scsi_sense_isadma_cache = kmem_cache_create("scsi_sense_cache(DMA)", SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL); if (!scsi_sense_isadma_cache) ret = -ENOMEM; } else { scsi_sense_cache = kmem_cache_create("scsi_sense_cache", SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN, NULL); if (!scsi_sense_cache) ret = -ENOMEM; } mutex_unlock(&scsi_sense_cache_mutex); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig112100.00%1100.00%
Total112100.00%1100.00%

/* * When to reinvoke queueing after a resource shortage. It's 3 msecs to * not change behaviour from the previous unplug mechanism, experimentation * may prove this needs changing. */ #define SCSI_QUEUE_DELAY 3
static void scsi_set_blocked(struct scsi_cmnd *cmd, int reason) { struct Scsi_Host *host = cmd->device->host; struct scsi_device *device = cmd->device; struct scsi_target *starget = scsi_target(device); /* * Set the appropriate busy bit for the device/host. * * If the host/device isn't busy, assume that something actually * completed, and that we should be able to queue a command now. * * Note that the prior mid-layer assumption that any host could * always queue at least one command is now broken. The mid-layer * will implement a user specifiable stall (see * scsi_host.max_host_blocked and scsi_device.max_device_blocked) * if a command is requeued with no other commands outstanding * either for the device or for the host. */ switch (reason) { case SCSI_MLQUEUE_HOST_BUSY: atomic_set(&host->host_blocked, host->max_host_blocked); break; case SCSI_MLQUEUE_DEVICE_BUSY: case SCSI_MLQUEUE_EH_RETRY: atomic_set(&device->device_blocked, device->max_device_blocked); break; case SCSI_MLQUEUE_TARGET_BUSY: atomic_set(&starget->target_blocked, starget->max_target_blocked); break; } }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig6361.76%433.33%
Mike Christie3029.41%18.33%
James Smart32.94%18.33%
Tejun Heo21.96%216.67%
Linus Torvalds10.98%18.33%
Mike Anderson10.98%18.33%
James Bottomley10.98%18.33%
Bart Van Assche10.98%18.33%
Total102100.00%12100.00%


static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd) { struct scsi_device *sdev = cmd->device; blk_mq_requeue_request(cmd->request, true); put_device(&sdev->sdev_gendev); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig3594.59%150.00%
Bart Van Assche25.41%150.00%
Total37100.00%2100.00%

/** * __scsi_queue_insert - private queue insertion * @cmd: The SCSI command being requeued * @reason: The reason for the requeue * @unbusy: Whether the queue should be unbusied * * This is a private queue insertion. The public interface * scsi_queue_insert() always assumes the queue should be unbusied * because it's always called before the completion. This function is * for a requeue after completion, which should only occur in this * file. */
static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) { struct scsi_device *device = cmd->device; struct request_queue *q = device->request_queue; unsigned long flags; SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd, "Inserting command %p into mlqueue\n", cmd)); scsi_set_blocked(cmd, reason); /* * Decrement the counters, since these commands are no longer * active on the host/device. */ if (unbusy) scsi_device_unbusy(device); /* * Requeue this command. It will go before all other commands * that are already in the queue. Schedule requeue work under * lock such that the kblockd_schedule_work() call happens * before blk_cleanup_queue() finishes. */ cmd->result = 0; if (q->mq_ops) { scsi_mq_requeue_cmd(cmd); return; } spin_lock_irqsave(q->queue_lock, flags); blk_requeue_request(q, cmd->request); kblockd_schedule_work(&device->requeue_work); spin_unlock_irqrestore(q->queue_lock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig8062.50%430.77%
Tejun Heo1310.16%17.69%
Bart Van Assche107.81%17.69%
James Bottomley75.47%215.38%
Alan Stern64.69%17.69%
Jens Axboe53.91%17.69%
Linus Torvalds43.12%17.69%
Patrick Mansfield21.56%17.69%
Linus Torvalds (pre-git)10.78%17.69%
Total128100.00%13100.00%

/* * Function: scsi_queue_insert() * * Purpose: Insert a command in the midlevel queue. * * Arguments: cmd - command that we are adding to queue. * reason - why we are inserting command to queue. * * Lock status: Assumed that lock is not held upon entry. * * Returns: Nothing. * * Notes: We do this for one of two cases. Either the host is busy * and it cannot accept any more commands for the time being, * or the device returned QUEUE_FULL and can accept no more * commands. * Notes: This could be called either from an interrupt context or a * normal process context. */
void scsi_queue_insert(struct scsi_cmnd *cmd, int reason) { __scsi_queue_insert(cmd, reason, 1); }

Contributors

PersonTokensPropCommitsCommitProp
James Bottomley2090.91%150.00%
Bart Van Assche29.09%150.00%
Total22100.00%2100.00%

/** * scsi_execute - insert request and wait for the result * @sdev: scsi device * @cmd: scsi command * @data_direction: data direction * @buffer: data buffer * @bufflen: len of buffer * @sense: optional sense buffer * @sshdr: optional decoded sense header * @timeout: request timeout in seconds * @retries: number of times to retry request * @flags: flags for ->cmd_flags * @rq_flags: flags for ->rq_flags * @resid: optional residual length * * returns the req->errors value which is the scsi_cmnd result * field. */
int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, int data_direction, void *buffer, unsigned bufflen, unsigned char *sense, struct scsi_sense_hdr *sshdr, int timeout, int retries, u64 flags, req_flags_t rq_flags, int *resid) { struct request *req; struct scsi_request *rq; int ret = DRIVER_ERROR << 24; req = blk_get_request(sdev->request_queue, data_direction == DMA_TO_DEVICE ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, __GFP_RECLAIM); if (IS_ERR(req)) return ret; rq = scsi_req(req); scsi_req_init(req); if (bufflen && blk_rq_map_kern(sdev->request_queue, req, buffer, bufflen, __GFP_RECLAIM)) goto out; rq->cmd_len = COMMAND_SIZE(cmd[0]); memcpy(rq->cmd, cmd, rq->cmd_len); req->retries = retries; req->timeout = timeout; req->cmd_flags |= flags; req->rq_flags |= rq_flags | RQF_QUIET | RQF_PREEMPT; /* * head injection *required* here otherwise quiesce won't work */ blk_execute_rq(req->q, NULL, req, 1); /* * Some devices (USB mass-storage in particular) may transfer * garbage data together with a residue indicating that the data * is invalid. Prevent the garbage from being misinterpreted * and prevent security leaks by zeroing out the excess data. */ if (unlikely(rq->resid_len > 0 && rq->resid_len <= bufflen)) memset(buffer + (bufflen - rq->resid_len), 0, rq->resid_len); if (resid) *resid = rq->resid_len; if (sense && rq->sense_len) memcpy(sense, rq->sense, SCSI_SENSE_BUFFERSIZE); if (sshdr) scsi_normalize_sense(rq->sense, rq->sense_len, sshdr); ret = req->errors; out: blk_put_request(req); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
James Bottomley15451.51%317.65%
Christoph Hellwig8026.76%529.41%
Alan Stern299.70%15.88%
FUJITA Tomonori134.35%15.88%
Mike Christie62.01%15.88%
Jens Axboe62.01%211.76%
Tejun Heo51.67%15.88%
Joe Lawrence31.00%15.88%
Mel Gorman20.67%15.88%
Martin K. Petersen10.33%15.88%
Total299100.00%17100.00%

EXPORT_SYMBOL(scsi_execute); /* * Function: scsi_init_cmd_errh() * * Purpose: Initialize cmd fields related to error handling. * * Arguments: cmd - command that is ready to be queued. * * Notes: This function has the job of initializing a number of * fields related to error handling. Typically this will * be called once for each command, as required. */
static void scsi_init_cmd_errh(struct scsi_cmnd *cmd) { cmd->serial_number = 0; scsi_set_resid(cmd, 0); memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); if (cmd->cmd_len == 0) cmd->cmd_len = scsi_command_size(cmd->cmnd); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)3462.96%111.11%
Christoph Hellwig1018.52%333.33%
Boaz Harrosh59.26%222.22%
Michael Reed35.56%111.11%
Alan Cox11.85%111.11%
FUJITA Tomonori11.85%111.11%
Total54100.00%9100.00%


void scsi_device_unbusy(struct scsi_device *sdev) { struct Scsi_Host *shost = sdev->host; struct scsi_target *starget = scsi_target(sdev); unsigned long flags; atomic_dec(&shost->host_busy); if (starget->can_queue > 0) atomic_dec(&starget->target_busy); if (unlikely(scsi_host_in_recovery(shost) && (shost->host_failed || shost->host_eh_scheduled))) { spin_lock_irqsave(shost->host_lock, flags); scsi_eh_wakeup(shost); spin_unlock_irqrestore(shost->host_lock, flags); } atomic_dec(&sdev->device_busy); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig8779.09%562.50%
Mike Christie1412.73%112.50%
Tejun Heo65.45%112.50%
James Bottomley32.73%112.50%
Total110100.00%8100.00%


static void scsi_kick_queue(struct request_queue *q) { if (q->mq_ops) blk_mq_start_hw_queues(q); else blk_run_queue(q); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig28100.00%1100.00%
Total28100.00%1100.00%

/* * Called for single_lun devices on IO completion. Clear starget_sdev_user, * and call blk_run_queue for all the scsi_devices on the target - * including current_sdev first. * * Called with *no* scsi locks held. */
static void scsi_single_lun_run(struct scsi_device *current_sdev) { struct Scsi_Host *shost = current_sdev->host; struct scsi_device *sdev, *tmp; struct scsi_target *starget = scsi_target(current_sdev); unsigned long flags; spin_lock_irqsave(shost->host_lock, flags); starget->starget_sdev_user = NULL; spin_unlock_irqrestore(shost->host_lock, flags); /* * Call blk_run_queue for all LUNs on the target, starting with * current_sdev. We race with others (to set starget_sdev_user), * but in most cases, we will be first. Ideally, each LU on the * target would get some limited time or requests on the target. */ scsi_kick_queue(current_sdev->request_queue); spin_lock_irqsave(shost->host_lock, flags); if (starget->starget_sdev_user) goto out; list_for_each_entry_safe(sdev, tmp, &starget->devices, same_target_siblings) { if (sdev == current_sdev) continue; if (scsi_device_get(sdev)) continue; spin_unlock_irqrestore(shost->host_lock, flags); scsi_kick_queue(sdev->request_queue); spin_lock_irqsave(shost->host_lock, flags); scsi_device_put(sdev); } out: spin_unlock_irqrestore(shost->host_lock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Patrick Mansfield8251.25%337.50%
Christoph Hellwig5735.62%337.50%
James Bottomley2012.50%112.50%
Linus Torvalds10.62%112.50%
Total160100.00%8100.00%


static inline bool scsi_device_is_busy(struct scsi_device *sdev) { if (atomic_read(&sdev->device_busy) >= sdev->queue_depth) return true; if (atomic_read(&sdev->device_blocked) > 0) return true; return false; }

Contributors

PersonTokensPropCommitsCommitProp
Kiyoshi Ueda2859.57%133.33%
Christoph Hellwig1940.43%266.67%
Total47100.00%3100.00%


static inline bool scsi_target_is_busy(struct scsi_target *starget) { if (starget->can_queue > 0) { if (atomic_read(&starget->target_busy) >= starget->can_queue) return true; if (atomic_read(&starget->target_blocked) > 0) return true; } return false; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig3052.63%375.00%
Mike Christie2747.37%125.00%
Total57100.00%4100.00%


static inline bool scsi_host_is_busy(struct Scsi_Host *shost) { if (shost->can_queue > 0 && atomic_read(&shost->host_busy) >= shost->can_queue) return true; if (atomic_read(&shost->host_blocked) > 0) return true; if (shost->host_self_blocked) return true; return false; }

Contributors

PersonTokensPropCommitsCommitProp
Kiyoshi Ueda3759.68%133.33%
Christoph Hellwig2540.32%266.67%
Total62100.00%3100.00%


static void scsi_starved_list_run(struct Scsi_Host *shost) { LIST_HEAD(starved_list); struct scsi_device *sdev; unsigned long flags; spin_lock_irqsave(shost->host_lock, flags); list_splice_init(&shost->starved_list, &starved_list); while (!list_empty(&starved_list)) { struct request_queue *slq; /* * As long as shost is accepting commands and we have * starved queues, call blk_run_queue. scsi_request_fn * drops the queue_lock and can add us back to the * starved_list. * * host_lock protects the starved_list and starved_entry. * scsi_request_fn must get the host_lock before checking * or modifying starved_list or starved_entry. */ if (scsi_host_is_busy(shost)) break; sdev = list_entry(starved_list.next, struct scsi_device, starved_entry); list_del_init(&sdev->starved_entry); if (scsi_target_is_busy(scsi_target(sdev))) { list_move_tail(&sdev->starved_entry, &shost->starved_list); continue; } /* * Once we drop the host lock, a racing scsi_remove_device() * call may remove the sdev from the starved list and destroy * it and the queue. Mitigate by taking a reference to the * queue and never touching the sdev again after we drop the * host lock. Note: if __scsi_remove_device() invokes * blk_cleanup_queue() before the queue is run from this * function then blk_run_queue() will return immediately since * blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING. */ slq = sdev->request_queue; if (!blk_get_queue(slq)) continue; spin_unlock_irqrestore(shost->host_lock, flags); scsi_kick_queue(slq); blk_put_queue(slq); spin_lock_irqsave(shost->host_lock, flags); } /* put any unprocessed entries back */ list_splice(&starved_list, &shost->starved_list); spin_unlock_irqrestore(shost->host_lock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Mike Christie6937.70%211.76%
Patrick Mansfield3116.94%317.65%
James Bottomley3016.39%15.88%
Christoph Hellwig2111.48%529.41%
Jens Axboe137.10%15.88%
Doug Ledford105.46%211.76%
Linus Torvalds (pre-git)73.83%211.76%
Kiyoshi Ueda21.09%15.88%
Total183100.00%17100.00%

/* * Function: scsi_run_queue() * * Purpose: Select a proper request queue to serve next * * Arguments: q - last request's queue * * Returns: Nothing * * Notes: The previous command was completely finished, start * a new one if possible. */
static void scsi_run_queue(struct request_queue *q) { struct scsi_device *sdev = q->queuedata; if (scsi_target(sdev)->single_lun) scsi_single_lun_run(sdev); if (!list_empty(&sdev->host->starved_list)) scsi_starved_list_run(sdev->host); if (q->mq_ops) blk_mq_run_hw_queues(q, false); else blk_run_queue(q); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig6791.78%350.00%
Linus Torvalds (pre-git)45.48%116.67%
Bart Van Assche11.37%116.67%
Linus Torvalds11.37%116.67%
Total73100.00%6100.00%


void scsi_requeue_run_queue(struct work_struct *work) { struct scsi_device *sdev; struct request_queue *q; sdev = container_of(work, struct scsi_device, requeue_work); q = sdev->request_queue; scsi_run_queue(q); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe43100.00%1100.00%
Total43100.00%1100.00%

/* * Function: scsi_requeue_command() * * Purpose: Handle post-processing of completed commands. * * Arguments: q - queue to operate on * cmd - command that may need to be requeued. * * Returns: Nothing * * Notes: After command completion, there may be blocks left * over which weren't finished by the previous command * this can be for a number of reasons - the main one is * I/O errors in the middle of the request, in which case * we need to request the blocks that come after the bad * sector. * Notes: Upon return, cmd is a stale pointer. */
static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) { struct scsi_device *sdev = cmd->device; struct request *req = cmd->request; unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); blk_unprep_request(req); req->special = NULL; scsi_put_command(cmd); blk_requeue_request(q, req); spin_unlock_irqrestore(q->queue_lock, flags); scsi_run_queue(q); put_device(&sdev->sdev_gendev); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig4447.83%228.57%
Bart Van Assche1718.48%114.29%
Tejun Heo1617.39%114.29%
James Bottomley1516.30%342.86%
Total92100.00%7100.00%


void scsi_run_host_queues(struct Scsi_Host *shost) { struct scsi_device *sdev; shost_for_each_device(sdev, shost) scsi_run_queue(sdev->request_queue); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig28100.00%2100.00%
Total28100.00%2100.00%


static void scsi_uninit_cmd(struct scsi_cmnd *cmd) { if (!blk_rq_is_passthrough(cmd->request)) { struct scsi_driver *drv = scsi_cmd_to_driver(cmd); if (drv->uninit_command) drv->uninit_command(cmd); } }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig3371.74%562.50%
Linus Torvalds (pre-git)817.39%112.50%
Boaz Harrosh510.87%225.00%
Total46100.00%8100.00%


static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd) { struct scsi_data_buffer *sdb; if (cmd->sdb.table.nents) sg_free_table_chained(&cmd->sdb.table, true); if (cmd->request->next_rq) { sdb = cmd->request->next_rq->special; if (sdb) sg_free_table_chained(&sdb->table, true); } if (scsi_prot_sg_count(cmd)) sg_free_table_chained(&cmd->prot_sdb->table, true); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig6369.23%240.00%
Ming Lin2527.47%240.00%
Boaz Harrosh33.30%120.00%
Total91100.00%5100.00%


static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) { struct scsi_device *sdev = cmd->device; struct Scsi_Host *shost = sdev->host; unsigned long flags; scsi_mq_free_sgtables(cmd); scsi_uninit_cmd(cmd); if (shost->use_cmd_list) { BUG_ON(list_empty(&cmd->list)); spin_lock_irqsave(&sdev->list_lock, flags); list_del_init(&cmd->list); spin_unlock_irqrestore(&sdev->list_lock, flags); } }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig6471.11%150.00%
Kashyap Desai2628.89%150.00%
Total90100.00%2100.00%

/* * Function: scsi_release_buffers() * * Purpose: Free resources allocate for a scsi_command. * * Arguments: cmd - command that we are bailing. * * Lock status: Assumed that no lock is held upon entry. * * Returns: Nothing * * Notes: In the event that an upper level driver rejects a * command, we must release resources allocated during * the __init_io() function. Primarily this would involve * the scatter-gather table. */
static void scsi_release_buffers(struct scsi_cmnd *cmd) { if (cmd->sdb.table.nents) sg_free_table_chained(&cmd->sdb.table, false); memset(&cmd->sdb, 0, sizeof(cmd->sdb)); if (scsi_prot_sg_count(cmd)) sg_free_table_chained(&cmd->prot_sdb->table, false); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig6289.86%133.33%
Ming Lin710.14%266.67%
Total69100.00%3100.00%


static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd) { struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special; sg_free_table_chained(&bidi_sdb->table, false); kmem_cache_free(scsi_sdb_cache, bidi_sdb); cmd->request->next_rq->special = NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig2854.90%120.00%
Boaz Harrosh1835.29%120.00%
Ming Lin47.84%240.00%
Martin K. Petersen11.96%120.00%
Total51100.00%5100.00%


static bool scsi_end_request(struct request *req, int error, unsigned int bytes, unsigned int bidi_bytes) { struct scsi_cmnd *cmd = req->special; struct scsi_device *sdev = cmd->device; struct request_queue *q = sdev->request_queue; if (blk_update_request(req, error, bytes)) return true; /* Bidi request must be completed as a whole */ if (unlikely(bidi_bytes) && blk_update_request(req->next_rq, error, bidi_bytes)) return true; if (blk_queue_add_random(q)) add_disk_randomness(req->rq_disk); if (req->mq_ctx) { /* * In the MQ case the command gets freed by __blk_mq_end_request, * so we have to do all cleanup that depends on it earlier. * * We also can't kick the queues from irq context, so we * will have to defer it to a workqueue. */ scsi_mq_uninit_cmd(cmd); __blk_mq_end_request(req, error); if (scsi_target(sdev)->single_lun || !list_empty(&sdev->host->starved_list)) kblockd_schedule_work(&sdev->requeue_work); else blk_mq_run_hw_queues(q, true); } else { unsigned long flags; if (bidi_bytes) scsi_release_bidi_buffers(cmd); scsi_release_buffers(cmd); scsi_put_command(cmd); spin_lock_irqsave(q->queue_lock, flags); blk_finish_request(req, error); spin_unlock_irqrestore(q->queue_lock, flags); scsi_run_queue(q); } put_device(&sdev->sdev_gendev); return false; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig21395.52%571.43%
Daniel Gryniewicz94.04%114.29%
Bart Van Assche10.45%114.29%
Total223100.00%7100.00%

/** * __scsi_error_from_host_byte - translate SCSI error code into errno * @cmd: SCSI command (unused) * @result: scsi error code * * Translate SCSI error code into standard UNIX errno. * Return values: * -ENOLINK temporary transport failure * -EREMOTEIO permanent target failure, do not retry * -EBADE permanent nexus failure, retry on other path * -ENOSPC No write space available * -ENODATA Medium error * -EIO unspecified I/O error */
static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result) { int error = 0; switch(host_byte(result)) { case DID_TRANSPORT_FAILFAST: error = -ENOLINK; break; case DID_TARGET_FAILURE: set_host_byte(cmd, DID_OK); error = -EREMOTEIO; break; case DID_NEXUS_FAILURE: set_host_byte(cmd, DID_OK); error = -EBADE; break; case DID_ALLOC_FAILURE: set_host_byte(cmd, DID_OK); error = -ENOSPC; break; case DID_MEDIUM_ERROR: set_host_byte(cmd, DID_OK); error = -ENODATA; break; default: error = -EIO; break; } return error; }

Contributors

PersonTokensPropCommitsCommitProp
Hannes Reinecke10392.79%375.00%
Babu Moger87.21%125.00%
Total111100.00%4100.00%

/* * Function: scsi_io_completion() * * Purpose: Completion processing for block device I/O requests. * * Arguments: cmd - command that is finished. * * Lock status: Assumed that no lock is held upon entry. * * Returns: Nothing * * Notes: We will finish off the specified number of sectors. If we * are done, the command block will be released and the queue * function will be goosed. If we are not done then we have to * figure out what to do next: * * a) We can call scsi_requeue_command(). The request * will be unprepared and put back on the queue. Then * a new command will be created for it. This should * be used if we made forward progress, or if we want * to switch from READ(10) to READ(6) for example. * * b) We can call __scsi_queue_insert(). The request will * be put back on the queue and retried using the same * command as before, possibly after a delay. * * c) We can call scsi_end_request() with -EIO to fail * the remainder of the request. */
void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) { int result = cmd->result; struct request_queue *q = cmd->device->request_queue; struct request *req = cmd->request; int error = 0; struct scsi_sense_hdr sshdr; bool sense_valid = false; int sense_deferred = 0, level = 0; enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY, ACTION_DELAYED_RETRY} action; unsigned long wait_for = (cmd->allowed + 1) * req->timeout; if (result) { sense_valid = scsi_command_normalize_sense(cmd, &sshdr); if (sense_valid) sense_deferred = scsi_sense_is_deferred(&sshdr); } if (blk_rq_is_passthrough(req)) { if (result) { if (sense_valid) { /* * SG_IO wants current and deferred errors */ scsi_req(req)->sense_len = min(8 + cmd->sense_buffer[7], SCSI_SENSE_BUFFERSIZE); } if (!sense_deferred) error = __scsi_error_from_host_byte(cmd, result); } /* * __scsi_error_from_host_byte may have reset the host_byte */ req->errors = cmd->result; scsi_req(req)->resid_len = scsi_get_resid(cmd); if (scsi_bidi_cmnd(cmd)) { /* * Bidi commands Must be complete as a whole, * both sides at once. */ scsi_req(req->next_rq)->resid_len = scsi_in(cmd)->resid; if (scsi_end_request(req, 0, blk_rq_bytes(req), blk_rq_bytes(req->next_rq))) BUG(); return; } } else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) { /* * Flush commands do not transfers any data, and thus cannot use * good_bytes != blk_rq_bytes(req) as the signal for an error. * This sets the error explicitly for the problem case. */ error = __scsi_error_from_host_byte(cmd, result); } /* no bidi support for !blk_rq_is_passthrough yet */ BUG_ON(blk_bidi_rq(req)); /* * Next deal with any sectors which we were able to correctly * handle. */ SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd, "%u sectors total, %d bytes done.\n", blk_rq_sectors(req), good_bytes)); /* * Recovered errors need reporting, but they're always treated as * success, so fiddle the result code here. For passthrough requests * we already took a copy of the original into rq->errors which * is what gets returned to the user */ if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) { /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip * print since caller wants ATA registers. Only occurs on * SCSI ATA PASS_THROUGH commands when CK_COND=1 */ if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d)) ; else if (!(req->rq_flags & RQF_QUIET)) scsi_print_sense(cmd); result = 0; /* for passthrough error may be set */ error = 0;