cregit-Linux how code gets into the kernel

Release 4.11 drivers/scsi/virtio_scsi.c

Directory: drivers/scsi
/*
 * Virtio SCSI HBA driver
 *
 * Copyright IBM Corp. 2010
 * Copyright Red Hat, Inc. 2011
 *
 * Authors:
 *  Stefan Hajnoczi   <stefanha@linux.vnet.ibm.com>
 *  Paolo Bonzini   <pbonzini@redhat.com>
 *
 * This work is licensed under the terms of the GNU GPL, version 2 or later.
 * See the COPYING file in the top-level directory.
 *
 */


#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mempool.h>
#include <linux/interrupt.h>
#include <linux/virtio.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_config.h>
#include <linux/virtio_scsi.h>
#include <linux/cpu.h>
#include <linux/blkdev.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_tcq.h>
#include <linux/seqlock.h>
#include <linux/blk-mq-virtio.h>


#define VIRTIO_SCSI_MEMPOOL_SZ 64

#define VIRTIO_SCSI_EVENT_LEN 8

#define VIRTIO_SCSI_VQ_BASE 2

/* Command queue element */

struct virtio_scsi_cmd {
	
struct scsi_cmnd *sc;
	
struct completion *comp;
	union {
		
struct virtio_scsi_cmd_req       cmd;
		
struct virtio_scsi_cmd_req_pi    cmd_pi;
		
struct virtio_scsi_ctrl_tmf_req  tmf;
		
struct virtio_scsi_ctrl_an_req   an;
	} 
req;
	union {
		
struct virtio_scsi_cmd_resp      cmd;
		
struct virtio_scsi_ctrl_tmf_resp tmf;
		
struct virtio_scsi_ctrl_an_resp  an;
		
struct virtio_scsi_event         evt;
	} 
resp;
} 
____cacheline_aligned_in_smp;


struct virtio_scsi_event_node {
	
struct virtio_scsi *vscsi;
	
struct virtio_scsi_event event;
	
struct work_struct work;
};


struct virtio_scsi_vq {
	/* Protects vq */
	
spinlock_t vq_lock;

	
struct virtqueue *vq;
};

/*
 * Per-target queue state.
 *
 * This struct holds the data needed by the queue steering policy.  When a
 * target is sent multiple requests, we need to drive them to the same queue so
 * that FIFO processing order is kept.  However, if a target was idle, we can
 * choose a queue arbitrarily.  In this case the queue is chosen according to
 * the current VCPU, so the driver expects the number of request queues to be
 * equal to the number of VCPUs.  This makes it easy and fast to select the
 * queue, and also lets the driver optimize the IRQ affinity for the virtqueues
 * (each virtqueue's affinity is set to the CPU that "owns" the queue).
 *
 * tgt_seq is held to serialize reading and writing req_vq.
 *
 * Decrements of reqs are never concurrent with writes of req_vq: before the
 * decrement reqs will be != 0; after the decrement the virtqueue completion
 * routine will not use the req_vq so it can be changed by a new request.
 * Thus they can happen outside the tgt_seq, provided of course we make reqs
 * an atomic_t.
 */

struct virtio_scsi_target_state {
	
seqcount_t tgt_seq;

	/* Count of outstanding requests. */
	
atomic_t reqs;

	/* Currently active virtqueue for requests sent to this target. */
	
struct virtio_scsi_vq *req_vq;
};

/* Driver instance state */

struct virtio_scsi {
	
struct virtio_device *vdev;

	/* Get some buffers ready for event vq */
	
struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN];

	
u32 num_queues;

	/* If the affinity hint is set for virtqueues */
	
bool affinity_hint_set;

	
struct hlist_node node;

	/* Protected by event_vq lock */
	
bool stop_events;

	
struct virtio_scsi_vq ctrl_vq;
	
struct virtio_scsi_vq event_vq;
	
struct virtio_scsi_vq req_vqs[];
};


static struct kmem_cache *virtscsi_cmd_cache;

static mempool_t *virtscsi_cmd_pool;


static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev) { return vdev->priv; }

Contributors

PersonTokensPropCommitsCommitProp
Paolo Bonzini19100.00%1100.00%
Total19100.00%1100.00%


static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid) { if (!resid) return; if (!scsi_bidi_cmnd(sc)) { scsi_set_resid(sc, resid); return; } scsi_in(sc)->resid = min(resid, scsi_in(sc)->length); scsi_out(sc)->resid = resid - scsi_in(sc)->resid; }

Contributors

PersonTokensPropCommitsCommitProp
Paolo Bonzini73100.00%1100.00%
Total73100.00%1100.00%

/** * virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done * * Called with vq_lock held. */
static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf) { struct virtio_scsi_cmd *cmd = buf; struct scsi_cmnd *sc = cmd->sc; struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd; struct virtio_scsi_target_state *tgt = scsi_target(sc->device)->hostdata; dev_dbg(&sc->device->sdev_gendev, "cmd %p response %u status %#02x sense_len %u\n", sc, resp->response, resp->status, resp->sense_len); sc->result = resp->status; virtscsi_compute_resid(sc, virtio32_to_cpu(vscsi->vdev, resp->resid)); switch (resp->response) { case VIRTIO_SCSI_S_OK: set_host_byte(sc, DID_OK); break; case VIRTIO_SCSI_S_OVERRUN: set_host_byte(sc, DID_ERROR); break; case VIRTIO_SCSI_S_ABORTED: set_host_byte(sc, DID_ABORT); break; case VIRTIO_SCSI_S_BAD_TARGET: set_host_byte(sc, DID_BAD_TARGET); break; case VIRTIO_SCSI_S_RESET: set_host_byte(sc, DID_RESET); break; case VIRTIO_SCSI_S_BUSY: set_host_byte(sc, DID_BUS_BUSY); break; case VIRTIO_SCSI_S_TRANSPORT_FAILURE: set_host_byte(sc, DID_TRANSPORT_DISRUPTED); break; case VIRTIO_SCSI_S_TARGET_FAILURE: set_host_byte(sc, DID_TARGET_FAILURE); break; case VIRTIO_SCSI_S_NEXUS_FAILURE: set_host_byte(sc, DID_NEXUS_FAILURE); break; default: scmd_printk(KERN_WARNING, sc, "Unknown response %d", resp->response); /* fall through */ case VIRTIO_SCSI_S_FAILURE: set_host_byte(sc, DID_ERROR); break; } WARN_ON(virtio32_to_cpu(vscsi->vdev, resp->sense_len) > VIRTIO_SCSI_SENSE_SIZE); if (sc->sense_buffer) { memcpy(sc->sense_buffer, resp->sense, min_t(u32, virtio32_to_cpu(vscsi->vdev, resp->sense_len), VIRTIO_SCSI_SENSE_SIZE)); if (resp->sense_len) set_driver_byte(sc, DRIVER_SENSE); } sc->scsi_done(sc); atomic_dec(&tgt->reqs); }

Contributors

PersonTokensPropCommitsCommitProp
Paolo Bonzini30093.46%375.00%
Michael S. Tsirkin216.54%125.00%
Total321100.00%4100.00%


static void virtscsi_vq_done(struct virtio_scsi *vscsi, struct virtio_scsi_vq *virtscsi_vq, void (*fn)(struct virtio_scsi *vscsi, void *buf)) { void *buf; unsigned int len; unsigned long flags; struct virtqueue *vq = virtscsi_vq->vq; spin_lock_irqsave(&virtscsi_vq->vq_lock, flags); do { virtqueue_disable_cb(vq); while ((buf = virtqueue_get_buf(vq, &len)) != NULL) fn(vscsi, buf); if (unlikely(virtqueue_is_broken(vq))) break; } while (!virtqueue_enable_cb(vq)); spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Paolo Bonzini11391.13%375.00%
Heinz Graalfs118.87%125.00%
Total124100.00%4100.00%


static void virtscsi_req_done(struct virtqueue *vq) { struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); struct virtio_scsi *vscsi = shost_priv(sh); int index = vq->index - VIRTIO_SCSI_VQ_BASE; struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index]; virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd); }

Contributors

PersonTokensPropCommitsCommitProp
Paolo Bonzini64100.00%5100.00%
Total64100.00%5100.00%

;
static void virtscsi_poll_requests(struct virtio_scsi *vscsi) { int i, num_vqs; num_vqs = vscsi->num_queues; for (i = 0; i < num_vqs; i++) virtscsi_vq_done(vscsi, &vscsi->req_vqs[i], virtscsi_complete_cmd); }

Contributors

PersonTokensPropCommitsCommitProp
Paolo Bonzini50100.00%1100.00%
Total50100.00%1100.00%


static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf) { struct virtio_scsi_cmd *cmd = buf; if (cmd->comp) complete(cmd->comp); }

Contributors

PersonTokensPropCommitsCommitProp
Paolo Bonzini3497.14%266.67%
Daniel Wagner12.86%133.33%
Total35100.00%3100.00%


static void virtscsi_ctrl_done(struct virtqueue *vq) { struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); struct virtio_scsi *vscsi = shost_priv(sh); virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free); }

Contributors

PersonTokensPropCommitsCommitProp
Paolo Bonzini45100.00%4100.00%
Total45100.00%4100.00%

; static void virtscsi_handle_event(struct work_struct *work);
static int virtscsi_kick_event(struct virtio_scsi *vscsi, struct virtio_scsi_event_node *event_node) { int err; struct scatterlist sg; unsigned long flags; INIT_WORK(&event_node->work, virtscsi_handle_event); sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event)); spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags); err = virtqueue_add_inbuf(vscsi->event_vq.vq, &sg, 1, event_node, GFP_ATOMIC); if (!err) virtqueue_kick(vscsi->event_vq.vq); spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Cong Meng9885.22%120.00%
Paolo Bonzini108.70%120.00%
Rusty Russell65.22%240.00%
Richard W.M. Jones10.87%120.00%
Total115100.00%5100.00%


static int virtscsi_kick_event_all(struct virtio_scsi *vscsi) { int i; for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) { vscsi->event_list[i].vscsi = vscsi; virtscsi_kick_event(vscsi, &vscsi->event_list[i]); } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Cong Meng56100.00%1100.00%
Total56100.00%1100.00%


static void virtscsi_cancel_event_work(struct virtio_scsi *vscsi) { int i; /* Stop scheduling work before calling cancel_work_sync. */ spin_lock_irq(&vscsi->event_vq.vq_lock); vscsi->stop_events = true; spin_unlock_irq(&vscsi->event_vq.vq_lock); for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) cancel_work_sync(&vscsi->event_list[i].work); }

Contributors

PersonTokensPropCommitsCommitProp
Cong Meng4059.70%150.00%
Michael S. Tsirkin2740.30%150.00%
Total67100.00%2100.00%


static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi, struct virtio_scsi_event *event) { struct scsi_device *sdev; struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); unsigned int target = event->lun[1]; unsigned int lun = (event->lun[2] << 8) | event->lun[3]; switch (virtio32_to_cpu(vscsi->vdev, event->reason)) { case VIRTIO_SCSI_EVT_RESET_RESCAN: scsi_add_device(shost, 0, target, lun); break; case VIRTIO_SCSI_EVT_RESET_REMOVED: sdev = scsi_device_lookup(shost, 0, target, lun); if (sdev) { scsi_remove_device(sdev); scsi_device_put(sdev); } else { pr_err("SCSI device %d 0 %d %d not found\n", shost->host_no, target, lun); } break; default: pr_info("Unsupport virtio scsi event reason %x\n", event->reason); } }

Contributors

PersonTokensPropCommitsCommitProp
Cong Meng14895.48%150.00%
Michael S. Tsirkin74.52%150.00%
Total155100.00%2100.00%


static void virtscsi_handle_param_change(struct virtio_scsi *vscsi, struct virtio_scsi_event *event) { struct scsi_device *sdev; struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); unsigned int target = event->lun[1]; unsigned int lun = (event->lun[2] << 8) | event->lun[3]; u8 asc = virtio32_to_cpu(vscsi->vdev, event->reason) & 255; u8 ascq = virtio32_to_cpu(vscsi->vdev, event->reason) >> 8; sdev = scsi_device_lookup(shost, 0, target, lun); if (!sdev) { pr_err("SCSI device %d 0 %d %d not found\n", shost->host_no, target, lun); return; } /* Handle "Parameters changed", "Mode parameters changed", and "Capacity data has changed". */ if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09)) scsi_rescan_device(&sdev->sdev_gendev); scsi_device_put(sdev); }

Contributors

PersonTokensPropCommitsCommitProp
Paolo Bonzini15291.57%150.00%
Michael S. Tsirkin148.43%150.00%
Total166100.00%2100.00%


static void virtscsi_handle_event(struct work_struct *work) { struct virtio_scsi_event_node *event_node = container_of(work, struct virtio_scsi_event_node, work); struct virtio_scsi *vscsi = event_node->vscsi; struct virtio_scsi_event *event = &event_node->event; if (event->event & cpu_to_virtio32(vscsi->vdev, VIRTIO_SCSI_T_EVENTS_MISSED)) { event->event &= ~cpu_to_virtio32(vscsi->vdev, VIRTIO_SCSI_T_EVENTS_MISSED); scsi_scan_host(virtio_scsi_host(vscsi->vdev)); } switch (virtio32_to_cpu(vscsi->vdev, event->event)) { case VIRTIO_SCSI_T_NO_EVENT: break; case VIRTIO_SCSI_T_TRANSPORT_RESET: virtscsi_handle_transport_reset(vscsi, event); break; case VIRTIO_SCSI_T_PARAM_CHANGE: virtscsi_handle_param_change(vscsi, event); break; default: pr_err("Unsupport virtio scsi event %x\n", event->event); } virtscsi_kick_event(vscsi, event_node); }

Contributors

PersonTokensPropCommitsCommitProp
Cong Meng11277.78%133.33%
Michael S. Tsirkin2114.58%133.33%
Paolo Bonzini117.64%133.33%
Total144100.00%3100.00%


static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf) { struct virtio_scsi_event_node *event_node = buf; if (!vscsi->stop_events) queue_work(system_freezable_wq, &event_node->work); }

Contributors

PersonTokensPropCommitsCommitProp
Cong Meng2461.54%125.00%
Paolo Bonzini820.51%250.00%
Michael S. Tsirkin717.95%125.00%
Total39100.00%4100.00%


static void virtscsi_event_done(struct virtqueue *vq) { struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); struct virtio_scsi *vscsi = shost_priv(sh); virtscsi_vq_done(vscsi, &vscsi->event_vq, virtscsi_complete_event); }

Contributors

PersonTokensPropCommitsCommitProp
Paolo Bonzini45100.00%4100.00%
Total45100.00%4100.00%

; /** * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue * @vq : the struct virtqueue we're talking about * @cmd : command structure * @req_size : size of the request buffer * @resp_size : size of the response buffer */
static int virtscsi_add_cmd(struct virtqueue *vq, struct virtio_scsi_cmd *cmd, size_t req_size, size_t resp_size) { struct scsi_cmnd *sc = cmd->sc; struct scatterlist *sgs[6], req, resp; struct sg_table *out, *in; unsigned out_num = 0, in_num = 0; out = in = NULL; if (sc && sc->sc_data_direction != DMA_NONE) { if (sc->sc_data_direction != DMA_FROM_DEVICE) out = &scsi_out(sc)->table; if (sc->sc_data_direction != DMA_TO_DEVICE) in = &scsi_in(sc)->table; } /* Request header. */ sg_init_one(&req, &cmd->req, req_size); sgs[out_num++] = &req; /* Data-out buffer. */ if (out) { /* Place WRITE protection SGLs before Data OUT payload */ if (scsi_prot_sg_count(sc)) sgs[out_num++] = scsi_prot_sglist(sc); sgs[out_num++] = out->sgl; } /* Response header. */ sg_init_one(&resp, &cmd->resp, resp_size); sgs[out_num + in_num++] = &resp; /* Data-in buffer */ if (in) { /* Place READ protection SGLs before Data IN payload */ if (scsi_prot_sg_count(sc)) sgs[out_num + in_num++] = scsi_prot_sglist(sc); sgs[out_num + in_num++] = in->sgl; } return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC); }

Contributors

PersonTokensPropCommitsCommitProp
Wanlong Gao13251.97%125.00%
Paolo Bonzini7629.92%125.00%
Nicholas Bellinger4517.72%125.00%
Rusty Russell10.39%125.00%
Total254100.00%4100.00%


static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq, struct virtio_scsi_cmd *cmd, size_t req_size, size_t resp_size) { unsigned long flags; int err; bool needs_kick = false; spin_lock_irqsave(&vq->vq_lock, flags); err = virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size); if (!err) needs_kick = virtqueue_kick_prepare(vq->vq); spin_unlock_irqrestore(&vq->vq_lock, flags); if (needs_kick) virtqueue_notify(vq->vq); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Paolo Bonzini7981.44%466.67%
Rusty Russell1212.37%116.67%
Wanlong Gao66.19%116.67%
Total97100.00%6100.00%


static void virtio_scsi_init_hdr(struct virtio_device *vdev, struct virtio_scsi_cmd_req *cmd, struct scsi_cmnd *sc) { cmd->lun[0] = 1; cmd->lun[1] = sc->device->id; cmd->lun[2] = (sc->device->lun >> 8) | 0x40; cmd->lun[3] = sc->device->lun & 0xff; cmd->tag = cpu_to_virtio64(vdev, (unsigned long)sc); cmd->task_attr = VIRTIO_SCSI_S_SIMPLE; cmd->prio = 0; cmd->crn = 0; }

Contributors

PersonTokensPropCommitsCommitProp
Nicholas Bellinger10090.91%150.00%
Michael S. Tsirkin109.09%150.00%
Total110100.00%2100.00%

#ifdef CONFIG_BLK_DEV_INTEGRITY
static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev, struct virtio_scsi_cmd_req_pi *cmd_pi, struct scsi_cmnd *sc) { struct request *rq = sc->request; struct blk_integrity *bi; virtio_scsi_init_hdr(vdev, (struct virtio_scsi_cmd_req *)cmd_pi, sc); if (!rq || !scsi_prot_sg_count(sc)) return; bi = blk_get_integrity(rq->rq_disk); if (sc->sc_data_direction == DMA_TO_DEVICE) cmd_pi->pi_bytesout = cpu_to_virtio32(vdev, blk_rq_sectors(rq) * bi->tuple_size); else if (sc->sc_data_direction == DMA_FROM_DEVICE) cmd_pi->pi_bytesin = cpu_to_virtio32(vdev, blk_rq_sectors(rq) * bi->tuple_size); }

Contributors

PersonTokensPropCommitsCommitProp
Nicholas Bellinger10686.18%150.00%
Michael S. Tsirkin1713.82%150.00%
Total123100.00%2100.00%

#endif
static int virtscsi_queuecommand(struct virtio_scsi *vscsi, struct virtio_scsi_vq *req_vq, struct scsi_cmnd *sc) { struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc); unsigned long flags; int req_size; int ret; BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize); /* TODO: check feature bit and fail if unsupported? */ BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL); dev_dbg(&sc->device->sdev_gendev, "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]); memset(cmd, 0, sizeof(*cmd)); cmd->sc = sc; BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE); #ifdef CONFIG_BLK_DEV_INTEGRITY if (virtio_has_feature(vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) { virtio_scsi_init_hdr_pi(vscsi->vdev, &cmd->req.cmd_pi, sc); memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len); req_size = sizeof(cmd->req.cmd_pi); } else #endif { virtio_scsi_init_hdr(vscsi->vdev, &cmd->req.cmd, sc); memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len); req_size = sizeof(cmd->req.cmd); } ret = virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)); if (ret == -EIO) { cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET; spin_lock_irqsave(&req_vq->vq_lock, flags); virtscsi_complete_cmd(vscsi, cmd); spin_unlock_irqrestore(&req_vq->vq_lock, flags); } else if (ret != 0) { return SCSI_MLQUEUE_HOST_BUSY; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Paolo Bonzini15247.80%333.33%
Nicholas Bellinger7322.96%111.11%
Eric Farman6219.50%111.11%
Christoph Hellwig206.29%222.22%
Michael S. Tsirkin82.52%111.11%
Linus Torvalds30.94%111.11%
Total318100.00%9100.00%


static int virtscsi_queuecommand_single(struct Scsi_Host *sh, struct scsi_cmnd *sc) { struct virtio_scsi *vscsi = shost_priv(sh); struct virtio_scsi_target_state *tgt = scsi_target(sc->device)->hostdata; atomic_inc(&tgt->reqs); return virtscsi_queuecommand(vscsi, &vscsi->req_vqs[0], sc); }

Contributors

PersonTokensPropCommitsCommitProp
Paolo Bonzini64100.00%3100.00%
Total64100.00%3100.00%


static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi, struct scsi_cmnd *sc) { u32 tag = blk_mq_unique_tag(sc->request); u16 hwq = blk_mq_unique_tag_to_hwq(tag); return &vscsi->req_vqs[hwq]; }

Contributors

PersonTokensPropCommitsCommitProp
Ming Lei45100.00%1100.00%
Total45100.00%1100.00%


static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi, struct virtio_scsi_target_state *tgt) { struct virtio_scsi_vq *vq; unsigned long flags; u32 queue_num; local_irq_save(flags); if (atomic_inc_return(&tgt->reqs) > 1) { unsigned long seq; do { seq = read_seqcount_begin(&tgt->tgt_seq); vq = tgt->req_vq; } while (read_seqcount_retry(&tgt->tgt_seq, seq)); } else { /* no writes can be concurrent because of atomic_t */ write_seqcount_begin(&tgt->tgt_seq); /* keep previous req_vq if a reader just arrived */ if (unlikely(atomic_read(&tgt->reqs) > 1)) { vq = tgt->req_vq; goto unlock; } queue_num = smp_processor_id(); while (unlikely(queue_num >= vscsi->num_queues)) queue_num -= vscsi->num_queues;