cregit-Linux how code gets into the kernel

Release 4.18 block/bsg.c

Directory: block
/*
 * bsg.c - block layer implementation of the sg v4 interface
 *
 * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs
 * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com>
 *
 *  This file is subject to the terms and conditions of the GNU General Public
 *  License version 2.  See the file "COPYING" in the main directory of this
 *  archive for more details.
 *
 */
#include <linux/module.h>
#include <linux/init.h>
#include <linux/file.h>
#include <linux/blkdev.h>
#include <linux/poll.h>
#include <linux/cdev.h>
#include <linux/jiffies.h>
#include <linux/percpu.h>
#include <linux/uio.h>
#include <linux/idr.h>
#include <linux/bsg.h>
#include <linux/slab.h>

#include <scsi/scsi.h>
#include <scsi/scsi_ioctl.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_driver.h>
#include <scsi/sg.h>


#define BSG_DESCRIPTION	"Block layer SCSI generic (bsg) driver"

#define BSG_VERSION	"0.4"


#define bsg_dbg(bd, fmt, ...) \
	pr_debug("%s: " fmt, (bd)->name, ##__VA_ARGS__)


struct bsg_device {
	
struct request_queue *queue;
	
spinlock_t lock;
	
struct list_head busy_list;
	
struct list_head done_list;
	
struct hlist_node dev_list;
	
atomic_t ref_count;
	
int queued_cmds;
	
int done_cmds;
	
wait_queue_head_t wq_done;
	
wait_queue_head_t wq_free;
	
char name[20];
	
int max_queue;
	
unsigned long flags;
};


enum {
	
BSG_F_BLOCK		= 1,
};


#define BSG_DEFAULT_CMDS	64

#define BSG_MAX_DEVS		32768

static DEFINE_MUTEX(bsg_mutex);
static DEFINE_IDR(bsg_minor_idr);


#define BSG_LIST_ARRAY_SIZE	8

static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE];


static struct class *bsg_class;

static int bsg_major;


static struct kmem_cache *bsg_cmd_cachep;

/*
 * our internal command type
 */

struct bsg_command {
	
struct bsg_device *bd;
	
struct list_head list;
	
struct request *rq;
	
struct bio *bio;
	
struct bio *bidi_bio;
	
int err;
	
struct sg_io_v4 hdr;
};


static void bsg_free_command(struct bsg_command *bc) { struct bsg_device *bd = bc->bd; unsigned long flags; kmem_cache_free(bsg_cmd_cachep, bc); spin_lock_irqsave(&bd->lock, flags); bd->queued_cmds--; spin_unlock_irqrestore(&bd->lock, flags); wake_up(&bd->wq_free); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe64100.00%2100.00%
Total64100.00%2100.00%


static struct bsg_command *bsg_alloc_command(struct bsg_device *bd) { struct bsg_command *bc = ERR_PTR(-EINVAL); spin_lock_irq(&bd->lock); if (bd->queued_cmds >= bd->max_queue) goto out; bd->queued_cmds++; spin_unlock_irq(&bd->lock); bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL); if (unlikely(!bc)) { spin_lock_irq(&bd->lock); bd->queued_cmds--; bc = ERR_PTR(-ENOMEM); goto out; } bc->bd = bd; INIT_LIST_HEAD(&bc->list); bsg_dbg(bd, "returning free cmd %p\n", bc); return bc; out: spin_unlock_irq(&bd->lock); return bc; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe11783.57%350.00%
FUJITA Tomonori2014.29%233.33%
Johannes Thumshirn32.14%116.67%
Total140100.00%6100.00%


static inline struct hlist_head *bsg_dev_idx_hash(int index) { return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)]; }

Contributors

PersonTokensPropCommitsCommitProp
FUJITA Tomonori2080.00%150.00%
Jens Axboe520.00%150.00%
Total25100.00%2100.00%

#define uptr64(val) ((void __user *)(uintptr_t)(val))
static int bsg_scsi_check_proto(struct sg_io_v4 *hdr) { if (hdr->protocol != BSG_PROTOCOL_SCSI || hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_CMD) return -EINVAL; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig2784.38%133.33%
FUJITA Tomonori412.50%133.33%
Jens Axboe13.12%133.33%
Total32100.00%3100.00%


static int bsg_scsi_fill_hdr(struct request *rq, struct sg_io_v4 *hdr, fmode_t mode) { struct scsi_request *sreq = scsi_req(rq); sreq->cmd_len = hdr->request_len; if (sreq->cmd_len > BLK_MAX_CDB) { sreq->cmd = kzalloc(sreq->cmd_len, GFP_KERNEL); if (!sreq->cmd) return -ENOMEM; } if (copy_from_user(sreq->cmd, uptr64(hdr->request), sreq->cmd_len)) return -EFAULT; if (blk_verify_command(sreq->cmd, mode)) return -EPERM; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
FUJITA Tomonori7263.72%450.00%
Christoph Hellwig4035.40%337.50%
Al Viro10.88%112.50%
Total113100.00%8100.00%


static int bsg_scsi_complete_rq(struct request *rq, struct sg_io_v4 *hdr) { struct scsi_request *sreq = scsi_req(rq); int ret = 0; /* * fill in all the output members */ hdr->device_status = sreq->result & 0xff; hdr->transport_status = host_byte(sreq->result); hdr->driver_status = driver_byte(sreq->result); hdr->info = 0; if (hdr->device_status || hdr->transport_status || hdr->driver_status) hdr->info |= SG_INFO_CHECK; hdr->response_len = 0; if (sreq->sense_len && hdr->response) { int len = min_t(unsigned int, hdr->max_response_len, sreq->sense_len); if (copy_to_user(uptr64(hdr->response), sreq->sense, len)) ret = -EFAULT; else hdr->response_len = len; } if (rq->next_rq) { hdr->dout_resid = sreq->resid_len; hdr->din_resid = scsi_req(rq->next_rq)->resid_len; } else if (rq_data_dir(rq) == READ) { hdr->din_resid = sreq->resid_len; } else { hdr->dout_resid = sreq->resid_len; } return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig16276.06%116.67%
FUJITA Tomonori3415.96%233.33%
Jens Axboe94.23%116.67%
Linus Torvalds62.82%116.67%
Randy Dunlap20.94%116.67%
Total213100.00%6100.00%


static void bsg_scsi_free_rq(struct request *rq) { scsi_req_free_cmd(scsi_req(rq)); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig1789.47%150.00%
Jens Axboe210.53%150.00%
Total19100.00%2100.00%

static const struct bsg_ops bsg_scsi_ops = { .check_proto = bsg_scsi_check_proto, .fill_hdr = bsg_scsi_fill_hdr, .complete_rq = bsg_scsi_complete_rq, .free_rq = bsg_scsi_free_rq, };
static struct request * bsg_map_hdr(struct request_queue *q, struct sg_io_v4 *hdr, fmode_t mode) { struct request *rq, *next_rq = NULL; int ret; if (!q->bsg_dev.class_dev) return ERR_PTR(-ENXIO); if (hdr->guard != 'Q') return ERR_PTR(-EINVAL); ret = q->bsg_dev.ops->check_proto(hdr); if (ret) return ERR_PTR(ret); rq = blk_get_request(q, hdr->dout_xfer_len ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0); if (IS_ERR(rq)) return rq; ret = q->bsg_dev.ops->fill_hdr(rq, hdr, mode); if (ret) goto out; rq->timeout = msecs_to_jiffies(hdr->timeout); if (!rq->timeout) rq->timeout = q->sg_timeout; if (!rq->timeout) rq->timeout = BLK_DEFAULT_SG_TIMEOUT; if (rq->timeout < BLK_MIN_SG_TIMEOUT) rq->timeout = BLK_MIN_SG_TIMEOUT; if (hdr->dout_xfer_len && hdr->din_xfer_len) { if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) { ret = -EOPNOTSUPP; goto out; } next_rq = blk_get_request(q, REQ_OP_SCSI_IN, 0); if (IS_ERR(next_rq)) { ret = PTR_ERR(next_rq); goto out; } rq->next_rq = next_rq; ret = blk_rq_map_user(q, next_rq, NULL, uptr64(hdr->din_xferp), hdr->din_xfer_len, GFP_KERNEL); if (ret) goto out_free_nextrq; } if (hdr->dout_xfer_len) { ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr->dout_xferp), hdr->dout_xfer_len, GFP_KERNEL); } else if (hdr->din_xfer_len) { ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr->din_xferp), hdr->din_xfer_len, GFP_KERNEL); } if (ret) goto out_unmap_nextrq; return rq; out_unmap_nextrq: if (rq->next_rq) blk_rq_unmap_user(rq->next_rq->bio); out_free_nextrq: if (rq->next_rq) blk_put_request(rq->next_rq); out: q->bsg_dev.ops->free_rq(rq); blk_put_request(rq); return ERR_PTR(ret); }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig17641.41%426.67%
FUJITA Tomonori12128.47%640.00%
Jens Axboe10324.24%213.33%
James Smart133.06%16.67%
Joe Lawrence112.59%16.67%
Al Viro10.24%16.67%
Total425100.00%15100.00%

/* * async completion call-back from the block layer, when scsi/ide/whatever * calls end_that_request_last() on a request */
static void bsg_rq_end_io(struct request *rq, blk_status_t status) { struct bsg_command *bc = rq->end_io_data; struct bsg_device *bd = bc->bd; unsigned long flags; bsg_dbg(bd, "finished rq %p bc %p, bio %p\n", rq, bc, bc->bio); bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration); spin_lock_irqsave(&bd->lock, flags); list_move_tail(&bc->list, &bd->done_list); bd->done_cmds++; spin_unlock_irqrestore(&bd->lock, flags); wake_up(&bd->wq_done); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe10995.61%250.00%
Johannes Thumshirn32.63%125.00%
Christoph Hellwig21.75%125.00%
Total114100.00%4100.00%

/* * do final setup of a 'bc' and submit the matching 'rq' to the block * layer for io */
static void bsg_add_command(struct bsg_device *bd, struct request_queue *q, struct bsg_command *bc, struct request *rq) { int at_head = (0 == (bc->hdr.flags & BSG_FLAG_Q_AT_TAIL)); /* * add bc command to busy queue and submit rq for io */ bc->rq = rq; bc->bio = rq->bio; if (rq->next_rq) bc->bidi_bio = rq->next_rq->bio; bc->hdr.duration = jiffies; spin_lock_irq(&bd->lock); list_add_tail(&bc->list, &bd->busy_list); spin_unlock_irq(&bd->lock); bsg_dbg(bd, "queueing rq %p, bc %p\n", rq, bc); rq->end_io_data = bc; blk_execute_rq_nowait(q, NULL, rq, at_head, bsg_rq_end_io); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe10373.05%233.33%
Boaz Harrosh1812.77%116.67%
FUJITA Tomonori1712.06%233.33%
Johannes Thumshirn32.13%116.67%
Total141100.00%6100.00%


static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd) { struct bsg_command *bc = NULL; spin_lock_irq(&bd->lock); if (bd->done_cmds) { bc = list_first_entry(&bd->done_list, struct bsg_command, list); list_del(&bc->list); bd->done_cmds--; } spin_unlock_irq(&bd->lock); return bc; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe7397.33%266.67%
FUJITA Tomonori22.67%133.33%
Total75100.00%3100.00%

/* * Get a finished command from the done list */
static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd) { struct bsg_command *bc; int ret; do { bc = bsg_next_done_cmd(bd); if (bc) break; if (!test_bit(BSG_F_BLOCK, &bd->flags)) { bc = ERR_PTR(-EAGAIN); break; } ret = wait_event_interruptible(bd->wq_done, bd->done_cmds); if (ret) { bc = ERR_PTR(-ERESTARTSYS); break; } } while (1); bsg_dbg(bd, "returning done %p\n", bc); return bc; }

Contributors

PersonTokensPropCommitsCommitProp
FUJITA Tomonori4038.10%360.00%
Jens Axboe3331.43%120.00%
Christoph Hellwig3230.48%120.00%
Total105100.00%5100.00%


static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, struct bio *bio, struct bio *bidi_bio) { int ret; ret = rq->q->bsg_dev.ops->complete_rq(rq, hdr); if (rq->next_rq) { blk_rq_unmap_user(bidi_bio); blk_put_request(rq->next_rq); } blk_rq_unmap_user(bio); rq->q->bsg_dev.ops->free_rq(rq); blk_put_request(rq); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Christoph Hellwig5559.78%228.57%
FUJITA Tomonori3538.04%457.14%
James Bottomley22.17%114.29%
Total92100.00%7100.00%


static bool bsg_complete(struct bsg_device *bd) { bool ret = false; bool spin; do { spin_lock_irq(&bd->lock); BUG_ON(bd->done_cmds > bd->queued_cmds); /* * All commands consumed. */ if (bd->done_cmds == bd->queued_cmds) ret = true; spin = !test_bit(BSG_F_BLOCK, &bd->flags); spin_unlock_irq(&bd->lock); } while (!ret && spin); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra88100.00%1100.00%
Total88100.00%1100.00%


static int bsg_complete_all_commands(struct bsg_device *bd) { struct bsg_command *bc; int ret, tret; bsg_dbg(bd, "entered\n"); /* * wait for all commands to complete */ io_wait_event(bd->wq_done, bsg_complete(bd)); /* * discard done commands */ ret = 0; do { spin_lock_irq(&bd->lock); if (!bd->queued_cmds) { spin_unlock_irq(&bd->lock); break; } spin_unlock_irq(&bd->lock); bc = bsg_get_done_cmd(bd); if (IS_ERR(bc)) break; tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, bc->bidi_bio); if (!ret) ret = tret; bsg_free_command(bc); } while (1); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe9466.20%114.29%
FUJITA Tomonori3726.06%457.14%
Peter Zijlstra85.63%114.29%
Johannes Thumshirn32.11%114.29%
Total142100.00%7100.00%


static int __bsg_read(char __user *buf, size_t count, struct bsg_device *bd, const struct iovec *iov, ssize_t *bytes_read) { struct bsg_command *bc; int nr_commands, ret; if (count % sizeof(struct sg_io_v4)) return -EINVAL; ret = 0; nr_commands = count / sizeof(struct sg_io_v4); while (nr_commands) { bc = bsg_get_done_cmd(bd); if (IS_ERR(bc)) { ret = PTR_ERR(bc); break; } /* * this is the only case where we need to copy data back * after completing the request. so do that here, * bsg_complete_work() cannot do that for us */ ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, bc->bidi_bio); if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr))) ret = -EFAULT; bsg_free_command(bc); if (ret) break; buf += sizeof(struct sg_io_v4); *bytes_read += sizeof(struct sg_io_v4); nr_commands--; } return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe16794.35%240.00%
FUJITA Tomonori105.65%360.00%
Total177100.00%5100.00%


static inline void bsg_set_block(struct bsg_device *bd, struct file *file) { if (file->f_flags & O_NONBLOCK) clear_bit(BSG_F_BLOCK, &bd->flags); else set_bit(BSG_F_BLOCK, &bd->flags); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe46100.00%1100.00%
Total46100.00%1100.00%

/* * Check if the error is a "real" error that we should return. */
static inline int err_block_err(int ret) { if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN) return 1; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe35100.00%1100.00%
Total35100.00%1100.00%


static ssize_t bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct bsg_device *bd = file->private_data; int ret; ssize_t bytes_read; bsg_dbg(bd, "read %zd bytes\n", count); bsg_set_block(bd, file); bytes_read = 0; ret = __bsg_read(buf, count, bd, NULL, &bytes_read); *ppos = bytes_read; if (!bytes_read || err_block_err(ret)) bytes_read = ret; return bytes_read; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe9396.88%150.00%
Johannes Thumshirn33.12%150.00%
Total96100.00%2100.00%


static int __bsg_write(struct bsg_device *bd, const char __user *buf, size_t count, ssize_t *bytes_written, fmode_t mode) { struct bsg_command *bc; struct request *rq; int ret, nr_commands; if (count % sizeof(struct sg_io_v4)) return -EINVAL; nr_commands = count / sizeof(struct sg_io_v4); rq = NULL; bc = NULL; ret = 0; while (nr_commands) { struct request_queue *q = bd->queue; bc = bsg_alloc_command(bd); if (IS_ERR(bc)) { ret = PTR_ERR(bc); bc = NULL; break; } if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) { ret = -EFAULT; break; } /* * get a request, fill in the blanks, and add to request queue */ rq = bsg_map_hdr(bd->queue, &bc->hdr, mode); if (IS_ERR(rq)) { ret = PTR_ERR(rq); rq = NULL; break; } bsg_add_command(bd, q, bc, rq); bc = NULL; rq = NULL; nr_commands--; buf += sizeof(struct sg_io_v4); *bytes_written += sizeof(struct sg_io_v4); } if (bc) bsg_free_command(bc); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe22594.94%333.33%
FUJITA Tomonori72.95%333.33%
Christoph Hellwig41.69%222.22%
Al Viro10.42%111.11%
Total237100.00%9100.00%


static ssize_t bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct bsg_device *bd = file->private_data; ssize_t bytes_written; int ret; bsg_dbg(bd, "write %zd bytes\n", count); if (unlikely(uaccess_kernel())) return -EINVAL; bsg_set_block(bd, file); bytes_written = 0; ret = __bsg_write(bd, buf, count, &bytes_written, file->f_mode); *ppos = bytes_written; /* * return bytes written on non-fatal errors */ if (!bytes_written || err_block_err(ret)) bytes_written = ret; bsg_dbg(bd, "returning %zd\n", bytes_written); return bytes_written; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe9981.82%233.33%
Al Viro129.92%233.33%
Johannes Thumshirn64.96%116.67%
FUJITA Tomonori43.31%116.67%
Total121100.00%6100.00%


static struct bsg_device *bsg_alloc_device(void) { struct bsg_device *bd; bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL); if (unlikely(!bd)) return NULL; spin_lock_init(&bd->lock); bd->max_queue = BSG