Release 4.18 block/bsg.c
/*
* bsg.c - block layer implementation of the sg v4 interface
*
* Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs
* Copyright (C) 2004 Peter M. Jones <pjones@redhat.com>
*
* This file is subject to the terms and conditions of the GNU General Public
* License version 2. See the file "COPYING" in the main directory of this
* archive for more details.
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/file.h>
#include <linux/blkdev.h>
#include <linux/poll.h>
#include <linux/cdev.h>
#include <linux/jiffies.h>
#include <linux/percpu.h>
#include <linux/uio.h>
#include <linux/idr.h>
#include <linux/bsg.h>
#include <linux/slab.h>
#include <scsi/scsi.h>
#include <scsi/scsi_ioctl.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_driver.h>
#include <scsi/sg.h>
#define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver"
#define BSG_VERSION "0.4"
#define bsg_dbg(bd, fmt, ...) \
pr_debug("%s: " fmt, (bd)->name, ##__VA_ARGS__)
struct bsg_device {
struct request_queue *queue;
spinlock_t lock;
struct list_head busy_list;
struct list_head done_list;
struct hlist_node dev_list;
atomic_t ref_count;
int queued_cmds;
int done_cmds;
wait_queue_head_t wq_done;
wait_queue_head_t wq_free;
char name[20];
int max_queue;
unsigned long flags;
};
enum {
BSG_F_BLOCK = 1,
};
#define BSG_DEFAULT_CMDS 64
#define BSG_MAX_DEVS 32768
static DEFINE_MUTEX(bsg_mutex);
static DEFINE_IDR(bsg_minor_idr);
#define BSG_LIST_ARRAY_SIZE 8
static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE];
static struct class *bsg_class;
static int bsg_major;
static struct kmem_cache *bsg_cmd_cachep;
/*
* our internal command type
*/
struct bsg_command {
struct bsg_device *bd;
struct list_head list;
struct request *rq;
struct bio *bio;
struct bio *bidi_bio;
int err;
struct sg_io_v4 hdr;
};
static void bsg_free_command(struct bsg_command *bc)
{
struct bsg_device *bd = bc->bd;
unsigned long flags;
kmem_cache_free(bsg_cmd_cachep, bc);
spin_lock_irqsave(&bd->lock, flags);
bd->queued_cmds--;
spin_unlock_irqrestore(&bd->lock, flags);
wake_up(&bd->wq_free);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 64 | 100.00% | 2 | 100.00% |
Total | 64 | 100.00% | 2 | 100.00% |
static struct bsg_command *bsg_alloc_command(struct bsg_device *bd)
{
struct bsg_command *bc = ERR_PTR(-EINVAL);
spin_lock_irq(&bd->lock);
if (bd->queued_cmds >= bd->max_queue)
goto out;
bd->queued_cmds++;
spin_unlock_irq(&bd->lock);
bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL);
if (unlikely(!bc)) {
spin_lock_irq(&bd->lock);
bd->queued_cmds--;
bc = ERR_PTR(-ENOMEM);
goto out;
}
bc->bd = bd;
INIT_LIST_HEAD(&bc->list);
bsg_dbg(bd, "returning free cmd %p\n", bc);
return bc;
out:
spin_unlock_irq(&bd->lock);
return bc;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 117 | 83.57% | 3 | 50.00% |
FUJITA Tomonori | 20 | 14.29% | 2 | 33.33% |
Johannes Thumshirn | 3 | 2.14% | 1 | 16.67% |
Total | 140 | 100.00% | 6 | 100.00% |
static inline struct hlist_head *bsg_dev_idx_hash(int index)
{
return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)];
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
FUJITA Tomonori | 20 | 80.00% | 1 | 50.00% |
Jens Axboe | 5 | 20.00% | 1 | 50.00% |
Total | 25 | 100.00% | 2 | 100.00% |
#define uptr64(val) ((void __user *)(uintptr_t)(val))
static int bsg_scsi_check_proto(struct sg_io_v4 *hdr)
{
if (hdr->protocol != BSG_PROTOCOL_SCSI ||
hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_CMD)
return -EINVAL;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 27 | 84.38% | 1 | 33.33% |
FUJITA Tomonori | 4 | 12.50% | 1 | 33.33% |
Jens Axboe | 1 | 3.12% | 1 | 33.33% |
Total | 32 | 100.00% | 3 | 100.00% |
static int bsg_scsi_fill_hdr(struct request *rq, struct sg_io_v4 *hdr,
fmode_t mode)
{
struct scsi_request *sreq = scsi_req(rq);
sreq->cmd_len = hdr->request_len;
if (sreq->cmd_len > BLK_MAX_CDB) {
sreq->cmd = kzalloc(sreq->cmd_len, GFP_KERNEL);
if (!sreq->cmd)
return -ENOMEM;
}
if (copy_from_user(sreq->cmd, uptr64(hdr->request), sreq->cmd_len))
return -EFAULT;
if (blk_verify_command(sreq->cmd, mode))
return -EPERM;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
FUJITA Tomonori | 72 | 63.72% | 4 | 50.00% |
Christoph Hellwig | 40 | 35.40% | 3 | 37.50% |
Al Viro | 1 | 0.88% | 1 | 12.50% |
Total | 113 | 100.00% | 8 | 100.00% |
static int bsg_scsi_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
{
struct scsi_request *sreq = scsi_req(rq);
int ret = 0;
/*
* fill in all the output members
*/
hdr->device_status = sreq->result & 0xff;
hdr->transport_status = host_byte(sreq->result);
hdr->driver_status = driver_byte(sreq->result);
hdr->info = 0;
if (hdr->device_status || hdr->transport_status || hdr->driver_status)
hdr->info |= SG_INFO_CHECK;
hdr->response_len = 0;
if (sreq->sense_len && hdr->response) {
int len = min_t(unsigned int, hdr->max_response_len,
sreq->sense_len);
if (copy_to_user(uptr64(hdr->response), sreq->sense, len))
ret = -EFAULT;
else
hdr->response_len = len;
}
if (rq->next_rq) {
hdr->dout_resid = sreq->resid_len;
hdr->din_resid = scsi_req(rq->next_rq)->resid_len;
} else if (rq_data_dir(rq) == READ) {
hdr->din_resid = sreq->resid_len;
} else {
hdr->dout_resid = sreq->resid_len;
}
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 162 | 76.06% | 1 | 16.67% |
FUJITA Tomonori | 34 | 15.96% | 2 | 33.33% |
Jens Axboe | 9 | 4.23% | 1 | 16.67% |
Linus Torvalds | 6 | 2.82% | 1 | 16.67% |
Randy Dunlap | 2 | 0.94% | 1 | 16.67% |
Total | 213 | 100.00% | 6 | 100.00% |
static void bsg_scsi_free_rq(struct request *rq)
{
scsi_req_free_cmd(scsi_req(rq));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 17 | 89.47% | 1 | 50.00% |
Jens Axboe | 2 | 10.53% | 1 | 50.00% |
Total | 19 | 100.00% | 2 | 100.00% |
static const struct bsg_ops bsg_scsi_ops = {
.check_proto = bsg_scsi_check_proto,
.fill_hdr = bsg_scsi_fill_hdr,
.complete_rq = bsg_scsi_complete_rq,
.free_rq = bsg_scsi_free_rq,
};
static struct request *
bsg_map_hdr(struct request_queue *q, struct sg_io_v4 *hdr, fmode_t mode)
{
struct request *rq, *next_rq = NULL;
int ret;
if (!q->bsg_dev.class_dev)
return ERR_PTR(-ENXIO);
if (hdr->guard != 'Q')
return ERR_PTR(-EINVAL);
ret = q->bsg_dev.ops->check_proto(hdr);
if (ret)
return ERR_PTR(ret);
rq = blk_get_request(q, hdr->dout_xfer_len ?
REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
if (IS_ERR(rq))
return rq;
ret = q->bsg_dev.ops->fill_hdr(rq, hdr, mode);
if (ret)
goto out;
rq->timeout = msecs_to_jiffies(hdr->timeout);
if (!rq->timeout)
rq->timeout = q->sg_timeout;
if (!rq->timeout)
rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
if (rq->timeout < BLK_MIN_SG_TIMEOUT)
rq->timeout = BLK_MIN_SG_TIMEOUT;
if (hdr->dout_xfer_len && hdr->din_xfer_len) {
if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
ret = -EOPNOTSUPP;
goto out;
}
next_rq = blk_get_request(q, REQ_OP_SCSI_IN, 0);
if (IS_ERR(next_rq)) {
ret = PTR_ERR(next_rq);
goto out;
}
rq->next_rq = next_rq;
ret = blk_rq_map_user(q, next_rq, NULL, uptr64(hdr->din_xferp),
hdr->din_xfer_len, GFP_KERNEL);
if (ret)
goto out_free_nextrq;
}
if (hdr->dout_xfer_len) {
ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr->dout_xferp),
hdr->dout_xfer_len, GFP_KERNEL);
} else if (hdr->din_xfer_len) {
ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr->din_xferp),
hdr->din_xfer_len, GFP_KERNEL);
}
if (ret)
goto out_unmap_nextrq;
return rq;
out_unmap_nextrq:
if (rq->next_rq)
blk_rq_unmap_user(rq->next_rq->bio);
out_free_nextrq:
if (rq->next_rq)
blk_put_request(rq->next_rq);
out:
q->bsg_dev.ops->free_rq(rq);
blk_put_request(rq);
return ERR_PTR(ret);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 176 | 41.41% | 4 | 26.67% |
FUJITA Tomonori | 121 | 28.47% | 6 | 40.00% |
Jens Axboe | 103 | 24.24% | 2 | 13.33% |
James Smart | 13 | 3.06% | 1 | 6.67% |
Joe Lawrence | 11 | 2.59% | 1 | 6.67% |
Al Viro | 1 | 0.24% | 1 | 6.67% |
Total | 425 | 100.00% | 15 | 100.00% |
/*
* async completion call-back from the block layer, when scsi/ide/whatever
* calls end_that_request_last() on a request
*/
static void bsg_rq_end_io(struct request *rq, blk_status_t status)
{
struct bsg_command *bc = rq->end_io_data;
struct bsg_device *bd = bc->bd;
unsigned long flags;
bsg_dbg(bd, "finished rq %p bc %p, bio %p\n",
rq, bc, bc->bio);
bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration);
spin_lock_irqsave(&bd->lock, flags);
list_move_tail(&bc->list, &bd->done_list);
bd->done_cmds++;
spin_unlock_irqrestore(&bd->lock, flags);
wake_up(&bd->wq_done);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 109 | 95.61% | 2 | 50.00% |
Johannes Thumshirn | 3 | 2.63% | 1 | 25.00% |
Christoph Hellwig | 2 | 1.75% | 1 | 25.00% |
Total | 114 | 100.00% | 4 | 100.00% |
/*
* do final setup of a 'bc' and submit the matching 'rq' to the block
* layer for io
*/
static void bsg_add_command(struct bsg_device *bd, struct request_queue *q,
struct bsg_command *bc, struct request *rq)
{
int at_head = (0 == (bc->hdr.flags & BSG_FLAG_Q_AT_TAIL));
/*
* add bc command to busy queue and submit rq for io
*/
bc->rq = rq;
bc->bio = rq->bio;
if (rq->next_rq)
bc->bidi_bio = rq->next_rq->bio;
bc->hdr.duration = jiffies;
spin_lock_irq(&bd->lock);
list_add_tail(&bc->list, &bd->busy_list);
spin_unlock_irq(&bd->lock);
bsg_dbg(bd, "queueing rq %p, bc %p\n", rq, bc);
rq->end_io_data = bc;
blk_execute_rq_nowait(q, NULL, rq, at_head, bsg_rq_end_io);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 103 | 73.05% | 2 | 33.33% |
Boaz Harrosh | 18 | 12.77% | 1 | 16.67% |
FUJITA Tomonori | 17 | 12.06% | 2 | 33.33% |
Johannes Thumshirn | 3 | 2.13% | 1 | 16.67% |
Total | 141 | 100.00% | 6 | 100.00% |
static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd)
{
struct bsg_command *bc = NULL;
spin_lock_irq(&bd->lock);
if (bd->done_cmds) {
bc = list_first_entry(&bd->done_list, struct bsg_command, list);
list_del(&bc->list);
bd->done_cmds--;
}
spin_unlock_irq(&bd->lock);
return bc;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 73 | 97.33% | 2 | 66.67% |
FUJITA Tomonori | 2 | 2.67% | 1 | 33.33% |
Total | 75 | 100.00% | 3 | 100.00% |
/*
* Get a finished command from the done list
*/
static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd)
{
struct bsg_command *bc;
int ret;
do {
bc = bsg_next_done_cmd(bd);
if (bc)
break;
if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
bc = ERR_PTR(-EAGAIN);
break;
}
ret = wait_event_interruptible(bd->wq_done, bd->done_cmds);
if (ret) {
bc = ERR_PTR(-ERESTARTSYS);
break;
}
} while (1);
bsg_dbg(bd, "returning done %p\n", bc);
return bc;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
FUJITA Tomonori | 40 | 38.10% | 3 | 60.00% |
Jens Axboe | 33 | 31.43% | 1 | 20.00% |
Christoph Hellwig | 32 | 30.48% | 1 | 20.00% |
Total | 105 | 100.00% | 5 | 100.00% |
static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
struct bio *bio, struct bio *bidi_bio)
{
int ret;
ret = rq->q->bsg_dev.ops->complete_rq(rq, hdr);
if (rq->next_rq) {
blk_rq_unmap_user(bidi_bio);
blk_put_request(rq->next_rq);
}
blk_rq_unmap_user(bio);
rq->q->bsg_dev.ops->free_rq(rq);
blk_put_request(rq);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 55 | 59.78% | 2 | 28.57% |
FUJITA Tomonori | 35 | 38.04% | 4 | 57.14% |
James Bottomley | 2 | 2.17% | 1 | 14.29% |
Total | 92 | 100.00% | 7 | 100.00% |
static bool bsg_complete(struct bsg_device *bd)
{
bool ret = false;
bool spin;
do {
spin_lock_irq(&bd->lock);
BUG_ON(bd->done_cmds > bd->queued_cmds);
/*
* All commands consumed.
*/
if (bd->done_cmds == bd->queued_cmds)
ret = true;
spin = !test_bit(BSG_F_BLOCK, &bd->flags);
spin_unlock_irq(&bd->lock);
} while (!ret && spin);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Zijlstra | 88 | 100.00% | 1 | 100.00% |
Total | 88 | 100.00% | 1 | 100.00% |
static int bsg_complete_all_commands(struct bsg_device *bd)
{
struct bsg_command *bc;
int ret, tret;
bsg_dbg(bd, "entered\n");
/*
* wait for all commands to complete
*/
io_wait_event(bd->wq_done, bsg_complete(bd));
/*
* discard done commands
*/
ret = 0;
do {
spin_lock_irq(&bd->lock);
if (!bd->queued_cmds) {
spin_unlock_irq(&bd->lock);
break;
}
spin_unlock_irq(&bd->lock);
bc = bsg_get_done_cmd(bd);
if (IS_ERR(bc))
break;
tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
bc->bidi_bio);
if (!ret)
ret = tret;
bsg_free_command(bc);
} while (1);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 94 | 66.20% | 1 | 14.29% |
FUJITA Tomonori | 37 | 26.06% | 4 | 57.14% |
Peter Zijlstra | 8 | 5.63% | 1 | 14.29% |
Johannes Thumshirn | 3 | 2.11% | 1 | 14.29% |
Total | 142 | 100.00% | 7 | 100.00% |
static int
__bsg_read(char __user *buf, size_t count, struct bsg_device *bd,
const struct iovec *iov, ssize_t *bytes_read)
{
struct bsg_command *bc;
int nr_commands, ret;
if (count % sizeof(struct sg_io_v4))
return -EINVAL;
ret = 0;
nr_commands = count / sizeof(struct sg_io_v4);
while (nr_commands) {
bc = bsg_get_done_cmd(bd);
if (IS_ERR(bc)) {
ret = PTR_ERR(bc);
break;
}
/*
* this is the only case where we need to copy data back
* after completing the request. so do that here,
* bsg_complete_work() cannot do that for us
*/
ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
bc->bidi_bio);
if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr)))
ret = -EFAULT;
bsg_free_command(bc);
if (ret)
break;
buf += sizeof(struct sg_io_v4);
*bytes_read += sizeof(struct sg_io_v4);
nr_commands--;
}
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 167 | 94.35% | 2 | 40.00% |
FUJITA Tomonori | 10 | 5.65% | 3 | 60.00% |
Total | 177 | 100.00% | 5 | 100.00% |
static inline void bsg_set_block(struct bsg_device *bd, struct file *file)
{
if (file->f_flags & O_NONBLOCK)
clear_bit(BSG_F_BLOCK, &bd->flags);
else
set_bit(BSG_F_BLOCK, &bd->flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 46 | 100.00% | 1 | 100.00% |
Total | 46 | 100.00% | 1 | 100.00% |
/*
* Check if the error is a "real" error that we should return.
*/
static inline int err_block_err(int ret)
{
if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN)
return 1;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 35 | 100.00% | 1 | 100.00% |
Total | 35 | 100.00% | 1 | 100.00% |
static ssize_t
bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
struct bsg_device *bd = file->private_data;
int ret;
ssize_t bytes_read;
bsg_dbg(bd, "read %zd bytes\n", count);
bsg_set_block(bd, file);
bytes_read = 0;
ret = __bsg_read(buf, count, bd, NULL, &bytes_read);
*ppos = bytes_read;
if (!bytes_read || err_block_err(ret))
bytes_read = ret;
return bytes_read;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 93 | 96.88% | 1 | 50.00% |
Johannes Thumshirn | 3 | 3.12% | 1 | 50.00% |
Total | 96 | 100.00% | 2 | 100.00% |
static int __bsg_write(struct bsg_device *bd, const char __user *buf,
size_t count, ssize_t *bytes_written, fmode_t mode)
{
struct bsg_command *bc;
struct request *rq;
int ret, nr_commands;
if (count % sizeof(struct sg_io_v4))
return -EINVAL;
nr_commands = count / sizeof(struct sg_io_v4);
rq = NULL;
bc = NULL;
ret = 0;
while (nr_commands) {
struct request_queue *q = bd->queue;
bc = bsg_alloc_command(bd);
if (IS_ERR(bc)) {
ret = PTR_ERR(bc);
bc = NULL;
break;
}
if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) {
ret = -EFAULT;
break;
}
/*
* get a request, fill in the blanks, and add to request queue
*/
rq = bsg_map_hdr(bd->queue, &bc->hdr, mode);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
rq = NULL;
break;
}
bsg_add_command(bd, q, bc, rq);
bc = NULL;
rq = NULL;
nr_commands--;
buf += sizeof(struct sg_io_v4);
*bytes_written += sizeof(struct sg_io_v4);
}
if (bc)
bsg_free_command(bc);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 225 | 94.94% | 3 | 33.33% |
FUJITA Tomonori | 7 | 2.95% | 3 | 33.33% |
Christoph Hellwig | 4 | 1.69% | 2 | 22.22% |
Al Viro | 1 | 0.42% | 1 | 11.11% |
Total | 237 | 100.00% | 9 | 100.00% |
static ssize_t
bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
struct bsg_device *bd = file->private_data;
ssize_t bytes_written;
int ret;
bsg_dbg(bd, "write %zd bytes\n", count);
if (unlikely(uaccess_kernel()))
return -EINVAL;
bsg_set_block(bd, file);
bytes_written = 0;
ret = __bsg_write(bd, buf, count, &bytes_written, file->f_mode);
*ppos = bytes_written;
/*
* return bytes written on non-fatal errors
*/
if (!bytes_written || err_block_err(ret))
bytes_written = ret;
bsg_dbg(bd, "returning %zd\n", bytes_written);
return bytes_written;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jens Axboe | 99 | 81.82% | 2 | 33.33% |
Al Viro | 12 | 9.92% | 2 | 33.33% |
Johannes Thumshirn | 6 | 4.96% | 1 | 16.67% |
FUJITA Tomonori | 4 | 3.31% | 1 | 16.67% |
Total | 121 | 100.00% | 6 | 100.00% |
static struct bsg_device *bsg_alloc_device(void)
{
struct bsg_device *bd;
bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL);
if (unlikely(!bd))
return NULL;
spin_lock_init(&bd->lock);
bd->max_queue = BSG