cregit-Linux how code gets into the kernel

Release 4.15 block/bsg.c

Directory: block
/*
 * bsg.c - block layer implementation of the sg v4 interface
 *
 * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs
 * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com>
 *
 *  This file is subject to the terms and conditions of the GNU General Public
 *  License version 2.  See the file "COPYING" in the main directory of this
 *  archive for more details.
 *
 */
#include <linux/module.h>
#include <linux/init.h>
#include <linux/file.h>
#include <linux/blkdev.h>
#include <linux/poll.h>
#include <linux/cdev.h>
#include <linux/jiffies.h>
#include <linux/percpu.h>
#include <linux/uio.h>
#include <linux/idr.h>
#include <linux/bsg.h>
#include <linux/slab.h>

#include <scsi/scsi.h>
#include <scsi/scsi_ioctl.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_driver.h>
#include <scsi/sg.h>


#define BSG_DESCRIPTION	"Block layer SCSI generic (bsg) driver"

#define BSG_VERSION	"0.4"


struct bsg_device {
	
struct request_queue *queue;
	
spinlock_t lock;
	
struct list_head busy_list;
	
struct list_head done_list;
	
struct hlist_node dev_list;
	
atomic_t ref_count;
	
int queued_cmds;
	
int done_cmds;
	
wait_queue_head_t wq_done;
	
wait_queue_head_t wq_free;
	
char name[20];
	
int max_queue;
	
unsigned long flags;
};

enum {
	
BSG_F_BLOCK		= 1,
};


#define BSG_DEFAULT_CMDS	64

#define BSG_MAX_DEVS		32768


#undef BSG_DEBUG

#ifdef BSG_DEBUG

#define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ##args)
#else

#define dprintk(fmt, args...)
#endif

static DEFINE_MUTEX(bsg_mutex);
static DEFINE_IDR(bsg_minor_idr);


#define BSG_LIST_ARRAY_SIZE	8

static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE];


static struct class *bsg_class;

static int bsg_major;


static struct kmem_cache *bsg_cmd_cachep;

/*
 * our internal command type
 */

struct bsg_command {
	
struct bsg_device *bd;
	
struct list_head list;
	
struct request *rq;
	
struct bio *bio;
	
struct bio *bidi_bio;
	
int err;
	
struct sg_io_v4 hdr;
};


static void bsg_free_command(struct bsg_command *bc) { struct bsg_device *bd = bc->bd; unsigned long flags; kmem_cache_free(bsg_cmd_cachep, bc); spin_lock_irqsave(&bd->lock, flags); bd->queued_cmds--; spin_unlock_irqrestore(&bd->lock, flags); wake_up(&bd->wq_free); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe64100.00%2100.00%
Total64100.00%2100.00%


static struct bsg_command *bsg_alloc_command(struct bsg_device *bd) { struct bsg_command *bc = ERR_PTR(-EINVAL); spin_lock_irq(&bd->lock); if (bd->queued_cmds >= bd->max_queue) goto out; bd->queued_cmds++; spin_unlock_irq(&bd->lock); bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL); if (unlikely(!bc)) { spin_lock_irq(&bd->lock); bd->queued_cmds--; bc = ERR_PTR(-ENOMEM); goto out; } bc->bd = bd; INIT_LIST_HEAD(&bc->list); dprintk("%s: returning free cmd %p\n", bd->name, bc); return bc; out: spin_unlock_irq(&bd->lock); return bc; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe12285.92%360.00%
FUJITA Tomonori2014.08%240.00%
Total142100.00%5100.00%


static inline struct hlist_head *bsg_dev_idx_hash(int index) { return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)]; }

Contributors

PersonTokensPropCommitsCommitProp
FUJITA Tomonori2080.00%150.00%
Jens Axboe520.00%150.00%
Total25100.00%2100.00%


static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, struct sg_io_v4 *hdr, struct bsg_device *bd, fmode_t mode) { struct scsi_request *req = scsi_req(rq); if (hdr->request_len > BLK_MAX_CDB) { req->cmd = kzalloc(hdr->request_len, GFP_KERNEL); if (!req->cmd) return -ENOMEM; } if (copy_from_user(req->cmd, (void __user *)(unsigned long)hdr->request, hdr->request_len)) return -EFAULT; if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) { if (blk_verify_command(req->cmd, mode)) return -EPERM; } else if (!capable(CAP_SYS_RAWIO)) return -EPERM; /* * fill in request structure */ req->cmd_len = hdr->request_len; rq->timeout = msecs_to_jiffies(hdr->timeout); if (!rq->timeout) rq->timeout = q->sg_timeout; if (!rq->timeout) rq->timeout = BLK_DEFAULT_SG_TIMEOUT; if (rq->timeout < BLK_MIN_SG_TIMEOUT) rq->timeout = BLK_MIN_SG_TIMEOUT; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
FUJITA Tomonori16479.61%433.33%
Christoph Hellwig178.25%216.67%
Linus Torvalds146.80%18.33%
Adel Gadllah41.94%18.33%
Randy Dunlap31.46%18.33%
Jens Axboe20.97%18.33%
Namhyung Kim10.49%18.33%
Al Viro10.49%18.33%
Total206100.00%12100.00%

/* * Check if sg_io_v4 from user is allowed and valid */
static int bsg_validate_sgv4_hdr(struct sg_io_v4 *hdr, int *op) { int ret = 0; if (hdr->guard != 'Q') return -EINVAL; switch (hdr->protocol) { case BSG_PROTOCOL_SCSI: switch (hdr->subprotocol) { case BSG_SUB_PROTOCOL_SCSI_CMD: case BSG_SUB_PROTOCOL_SCSI_TRANSPORT: break; default: ret = -EINVAL; } break; default: ret = -EINVAL; } *op = hdr->dout_xfer_len ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN; return ret; }

Contributors

PersonTokensPropCommitsCommitProp
FUJITA Tomonori5463.53%240.00%
Jens Axboe2731.76%240.00%
Christoph Hellwig44.71%120.00%
Total85100.00%5100.00%

/* * map sg_io_v4 to a request. */
static struct request * bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t mode) { struct request_queue *q = bd->queue; struct request *rq, *next_rq = NULL; int ret; unsigned int op, dxfer_len; void __user *dxferp = NULL; struct bsg_class_device *bcd = &q->bsg_dev; /* if the LLD has been removed then the bsg_unregister_queue will * eventually be called and the class_dev was freed, so we can no * longer use this request_queue. Return no such address. */ if (!bcd->class_dev) return ERR_PTR(-ENXIO); dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp, hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp, hdr->din_xfer_len); ret = bsg_validate_sgv4_hdr(hdr, &op); if (ret) return ERR_PTR(ret); /* * map scatter-gather elements separately and string them to request */ rq = blk_get_request(q, op, GFP_KERNEL); if (IS_ERR(rq)) return rq; ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, mode); if (ret) goto out; if (op == REQ_OP_SCSI_OUT && hdr->din_xfer_len) { if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) { ret = -EOPNOTSUPP; goto out; } next_rq = blk_get_request(q, REQ_OP_SCSI_IN, GFP_KERNEL); if (IS_ERR(next_rq)) { ret = PTR_ERR(next_rq); next_rq = NULL; goto out; } rq->next_rq = next_rq; dxferp = (void __user *)(unsigned long)hdr->din_xferp; ret = blk_rq_map_user(q, next_rq, NULL, dxferp, hdr->din_xfer_len, GFP_KERNEL); if (ret) goto out; } if (hdr->dout_xfer_len) { dxfer_len = hdr->dout_xfer_len; dxferp = (void __user *)(unsigned long)hdr->dout_xferp; } else if (hdr->din_xfer_len) { dxfer_len = hdr->din_xfer_len; dxferp = (void __user *)(unsigned long)hdr->din_xferp; } else dxfer_len = 0; if (dxfer_len) { ret = blk_rq_map_user(q, rq, NULL, dxferp, dxfer_len, GFP_KERNEL); if (ret) goto out; } return rq; out: scsi_req_free_cmd(scsi_req(rq)); blk_put_request(rq); if (next_rq) { blk_rq_unmap_user(next_rq->bio); blk_put_request(next_rq); } return ERR_PTR(ret); }

Contributors

PersonTokensPropCommitsCommitProp
FUJITA Tomonori21951.17%635.29%
Jens Axboe15435.98%317.65%
James Smart255.84%15.88%
Christoph Hellwig133.04%317.65%
Joe Lawrence112.57%15.88%
Namhyung Kim40.93%15.88%
Daniel Mack10.23%15.88%
Al Viro10.23%15.88%
Total428100.00%17100.00%

/* * async completion call-back from the block layer, when scsi/ide/whatever * calls end_that_request_last() on a request */
static void bsg_rq_end_io(struct request *rq, blk_status_t status) { struct bsg_command *bc = rq->end_io_data; struct bsg_device *bd = bc->bd; unsigned long flags; dprintk("%s: finished rq %p bc %p, bio %p\n", bd->name, rq, bc, bc->bio); bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration); spin_lock_irqsave(&bd->lock, flags); list_move_tail(&bc->list, &bd->done_list); bd->done_cmds++; spin_unlock_irqrestore(&bd->lock, flags); wake_up(&bd->wq_done); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe11397.41%266.67%
Christoph Hellwig32.59%133.33%
Total116100.00%3100.00%

/* * do final setup of a 'bc' and submit the matching 'rq' to the block * layer for io */
static void bsg_add_command(struct bsg_device *bd, struct request_queue *q, struct bsg_command *bc, struct request *rq) { int at_head = (0 == (bc->hdr.flags & BSG_FLAG_Q_AT_TAIL)); /* * add bc command to busy queue and submit rq for io */ bc->rq = rq; bc->bio = rq->bio; if (rq->next_rq) bc->bidi_bio = rq->next_rq->bio; bc->hdr.duration = jiffies; spin_lock_irq(&bd->lock); list_add_tail(&bc->list, &bd->busy_list); spin_unlock_irq(&bd->lock); dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc); rq->end_io_data = bc; blk_execute_rq_nowait(q, NULL, rq, at_head, bsg_rq_end_io); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe10875.52%240.00%
Boaz Harrosh1812.59%120.00%
FUJITA Tomonori1711.89%240.00%
Total143100.00%5100.00%


static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd) { struct bsg_command *bc = NULL; spin_lock_irq(&bd->lock); if (bd->done_cmds) { bc = list_first_entry(&bd->done_list, struct bsg_command, list); list_del(&bc->list); bd->done_cmds--; } spin_unlock_irq(&bd->lock); return bc; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe7397.33%266.67%
FUJITA Tomonori22.67%133.33%
Total75100.00%3100.00%

/* * Get a finished command from the done list */
static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd) { struct bsg_command *bc; int ret; do { bc = bsg_next_done_cmd(bd); if (bc) break; if (!test_bit(BSG_F_BLOCK, &bd->flags)) { bc = ERR_PTR(-EAGAIN); break; } ret = wait_event_interruptible(bd->wq_done, bd->done_cmds); if (ret) { bc = ERR_PTR(-ERESTARTSYS); break; } } while (1); dprintk("%s: returning done %p\n", bd->name, bc); return bc; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe7469.16%150.00%
FUJITA Tomonori3330.84%150.00%
Total107100.00%2100.00%


static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, struct bio *bio, struct bio *bidi_bio) { struct scsi_request *req = scsi_req(rq); int ret = 0; dprintk("rq %p bio %p 0x%x\n", rq, bio, req->result); /* * fill in all the output members */ hdr->device_status = req->result & 0xff; hdr->transport_status = host_byte(req->result); hdr->driver_status = driver_byte(req->result); hdr->info = 0; if (hdr->device_status || hdr->transport_status || hdr->driver_status) hdr->info |= SG_INFO_CHECK; hdr->response_len = 0; if (req->sense_len && hdr->response) { int len = min_t(unsigned int, hdr->max_response_len, req->sense_len); ret = copy_to_user((void __user *)(unsigned long)hdr->response, req->sense, len); if (!ret) hdr->response_len = len; else ret = -EFAULT; } if (rq->next_rq) { hdr->dout_resid = req->resid_len; hdr->din_resid = scsi_req(rq->next_rq)->resid_len; blk_rq_unmap_user(bidi_bio); blk_put_request(rq->next_rq); } else if (rq_data_dir(rq) == READ) hdr->din_resid = req->resid_len; else hdr->dout_resid = req->resid_len; /* * If the request generated a negative error number, return it * (providing we aren't already returning an error); if it's * just a protocol response (i.e. non negative), that gets * processed above. */ if (!ret && req->result < 0) ret = req->result; blk_rq_unmap_user(bio); scsi_req_free_cmd(req); blk_put_request(rq); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
FUJITA Tomonori23180.21%541.67%
Christoph Hellwig3311.46%216.67%
James Bottomley144.86%18.33%
Tejun Heo41.39%18.33%
Jens Axboe41.39%18.33%
Boaz Harrosh10.35%18.33%
Namhyung Kim10.35%18.33%
Total288100.00%12100.00%


static bool bsg_complete(struct bsg_device *bd) { bool ret = false; bool spin; do { spin_lock_irq(&bd->lock); BUG_ON(bd->done_cmds > bd->queued_cmds); /* * All commands consumed. */ if (bd->done_cmds == bd->queued_cmds) ret = true; spin = !test_bit(BSG_F_BLOCK, &bd->flags); spin_unlock_irq(&bd->lock); } while (!ret && spin); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Peter Zijlstra88100.00%1100.00%
Total88100.00%1100.00%


static int bsg_complete_all_commands(struct bsg_device *bd) { struct bsg_command *bc; int ret, tret; dprintk("%s: entered\n", bd->name); /* * wait for all commands to complete */ io_wait_event(bd->wq_done, bsg_complete(bd)); /* * discard done commands */ ret = 0; do { spin_lock_irq(&bd->lock); if (!bd->queued_cmds) { spin_unlock_irq(&bd->lock); break; } spin_unlock_irq(&bd->lock); bc = bsg_get_done_cmd(bd); if (IS_ERR(bc)) break; tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, bc->bidi_bio); if (!ret) ret = tret; bsg_free_command(bc); } while (1); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe9968.75%116.67%
FUJITA Tomonori3725.69%466.67%
Peter Zijlstra85.56%116.67%
Total144100.00%6100.00%


static int __bsg_read(char __user *buf, size_t count, struct bsg_device *bd, const struct iovec *iov, ssize_t *bytes_read) { struct bsg_command *bc; int nr_commands, ret; if (count % sizeof(struct sg_io_v4)) return -EINVAL; ret = 0; nr_commands = count / sizeof(struct sg_io_v4); while (nr_commands) { bc = bsg_get_done_cmd(bd); if (IS_ERR(bc)) { ret = PTR_ERR(bc); break; } /* * this is the only case where we need to copy data back * after completing the request. so do that here, * bsg_complete_work() cannot do that for us */ ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, bc->bidi_bio); if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr))) ret = -EFAULT; bsg_free_command(bc); if (ret) break; buf += sizeof(struct sg_io_v4); *bytes_read += sizeof(struct sg_io_v4); nr_commands--; } return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe16794.35%240.00%
FUJITA Tomonori105.65%360.00%
Total177100.00%5100.00%


static inline void bsg_set_block(struct bsg_device *bd, struct file *file) { if (file->f_flags & O_NONBLOCK) clear_bit(BSG_F_BLOCK, &bd->flags); else set_bit(BSG_F_BLOCK, &bd->flags); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe46100.00%1100.00%
Total46100.00%1100.00%

/* * Check if the error is a "real" error that we should return. */
static inline int err_block_err(int ret) { if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN) return 1; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe35100.00%1100.00%
Total35100.00%1100.00%


static ssize_t bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct bsg_device *bd = file->private_data; int ret; ssize_t bytes_read; dprintk("%s: read %zd bytes\n", bd->name, count); bsg_set_block(bd, file); bytes_read = 0; ret = __bsg_read(buf, count, bd, NULL, &bytes_read); *ppos = bytes_read; if (!bytes_read || err_block_err(ret)) bytes_read = ret; return bytes_read; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe9798.98%150.00%
Alexey Dobriyan11.02%150.00%
Total98100.00%2100.00%


static int __bsg_write(struct bsg_device *bd, const char __user *buf, size_t count, ssize_t *bytes_written, fmode_t mode) { struct bsg_command *bc; struct request *rq; int ret, nr_commands; if (count % sizeof(struct sg_io_v4)) return -EINVAL; nr_commands = count / sizeof(struct sg_io_v4); rq = NULL; bc = NULL; ret = 0; while (nr_commands) { struct request_queue *q = bd->queue; bc = bsg_alloc_command(bd); if (IS_ERR(bc)) { ret = PTR_ERR(bc); bc = NULL; break; } if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) { ret = -EFAULT; break; } /* * get a request, fill in the blanks, and add to request queue */ rq = bsg_map_hdr(bd, &bc->hdr, mode); if (IS_ERR(rq)) { ret = PTR_ERR(rq); rq = NULL; break; } bsg_add_command(bd, q, bc, rq); bc = NULL; rq = NULL; nr_commands--; buf += sizeof(struct sg_io_v4); *bytes_written += sizeof(struct sg_io_v4); } if (bc) bsg_free_command(bc); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe22595.74%337.50%
FUJITA Tomonori72.98%337.50%
Christoph Hellwig20.85%112.50%
Al Viro10.43%112.50%
Total235100.00%8100.00%


static ssize_t bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct bsg_device *bd = file->private_data; ssize_t bytes_written; int ret; dprintk("%s: write %zd bytes\n", bd->name, count); if (unlikely(uaccess_kernel())) return -EINVAL; bsg_set_block(bd, file); bytes_written = 0; ret = __bsg_write(bd, buf, count, &bytes_written, file->f_mode); *ppos = bytes_written; /* * return bytes written on non-fatal errors */ if (!bytes_written || err_block_err(ret)) bytes_written = ret; dprintk("%s: returning %zd\n", bd->name, bytes_written); return bytes_written; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe10785.60%233.33%
Al Viro129.60%233.33%
FUJITA Tomonori43.20%116.67%
Alexey Dobriyan21.60%116.67%
Total125100.00%6100.00%


static struct bsg_device *bsg_alloc_device(void) { struct bsg_device *bd; bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL); if (unlikely(!bd)) return NULL; spin_lock_init(&bd->lock); bd->max_queue = BSG_DEFAULT_CMDS; INIT_LIST_HEAD(&bd->busy_list); INIT_LIST_HEAD(&bd->done_list); INIT_HLIST_NODE(&bd->dev_list); init_waitqueue_head(&bd->wq_free); init_waitqueue_head(&bd->wq_done); return bd; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe96100.00%2100.00%
Total96100.00%2100.00%


static void bsg_kref_release_function(struct kref *kref) { struct bsg_class_device *bcd = container_of(kref, struct bsg_class_device, ref); struct device *parent = bcd->parent; if (bcd->release) bcd->release(bcd->parent); put_device(parent); }

Contributors

PersonTokensPropCommitsCommitProp
FUJITA Tomonori4683.64%150.00%
James Bottomley916.36%150.00%
Total55100.00%2100.00%


static int bsg_put_device(struct bsg_device *bd) { int ret = 0, do_free; struct request_queue *q = bd->queue; mutex_lock(&bsg_mutex); do_free = atomic_dec_and_test(&bd->ref_count); if (!do_free) { mutex_unlock(&bsg_mutex); goto out; } hlist_del(&bd->dev_list); mutex_unlock(&bsg_mutex); dprintk("%s: tearing down\n", bd->name); /* * close can always block */ set_bit(BSG_F_BLOCK, &bd->flags); /* * correct error detection baddies here again. it's the responsibility * of the app to properly reap commands before close() if it wants * fool-proof error detection */ ret = bsg_complete_all_commands(bd); kfree(bd); out: kref_put(&q->bsg_dev.ref, bsg_kref_release_function); if (do_free) blk_put_queue(q); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe7153.79%240.00%
FUJITA Tomonori6146.21%360.00%
Total132100.00%5100.00%


static struct bsg_device *bsg_add_device(struct inode *inode, struct request_queue *rq, struct file *file) { struct bsg_device *bd; #ifdef BSG_DEBUG unsigned char buf[32]; #endif if (!blk_queue_scsi_passthrough(rq)) { WARN_ONCE(true, "Attempt to register a non-SCSI queue\n"); return ERR_PTR(-EINVAL); } if (!blk_get_queue(rq)) return ERR_PTR(-ENXIO); bd = bsg_alloc_device(); if (!bd) { blk_put_queue(rq); return ERR_PTR(-ENOMEM); } bd->queue = rq; bsg_set_block(bd, file); atomic_set(&bd->ref_count, 1); mutex_lock(&bsg_mutex); hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode))); strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1); dprintk("bound to <%s>, max queue %d\n", format_dev_t(buf, inode->i_rdev), bd->max_queue); mutex_unlock(&bsg_mutex); return bd; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe13668.00%111.11%
FUJITA Tomonori3417.00%555.56%
Bart Van Assche2412.00%111.11%
Tejun Heo31.50%111.11%
Kay Sievers31.50%111.11%
Total200100.00%9100.00%


static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q) { struct bsg_device *bd; mutex_lock(&bsg_mutex); hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) { if (bd->queue == q) { atomic_inc(&bd->ref_count); goto found; } } bd = NULL; found: mutex_unlock(&bsg_mutex); return bd; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe5473.97%125.00%
FUJITA Tomonori1926.03%375.00%
Total73100.00%4100.00%


static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file) { struct bsg_device *bd; struct bsg_class_device *bcd; /* * find the class device */ mutex_lock(&bsg_mutex); bcd = idr_find(&bsg_minor_idr, iminor(inode)); if (bcd) kref_get(&bcd->ref); mutex_unlock(&bsg_mutex); if (!bcd) return ERR_PTR(-ENODEV); bd = __bsg_get_device(iminor(inode), bcd->queue); if (bd) return bd; bd = bsg_add_device(inode, bcd->queue, file); if (IS_ERR(bd)) kref_put(&bcd->ref, bsg_kref_release_function); return bd; }

Contributors

PersonTokensPropCommitsCommitProp
FUJITA Tomonori6750.76%583.33%
Jens Axboe6549.24%116.67%
Total132100.00%6100.00%


static int bsg_open(struct inode *inode, struct file *file) { struct bsg_device *bd; bd = bsg_get_device(inode, file); if (IS_ERR(bd)) return PTR_ERR(bd); file->private_data = bd; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe4892.31%150.00%
Jonathan Corbet47.69%150.00%
Total52100.00%2100.00%


static int bsg_release(struct inode *inode, struct file *file) { struct bsg_device *bd = file->private_data; file->private_data = NULL; return bsg_put_device(bd); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe37100.00%1100.00%
Total37100.00%1100.00%


static unsigned int bsg_poll(struct file *file, poll_table *wait) { struct bsg_device *bd = file->private_data; unsigned int mask = 0; poll_wait(file, &bd->wq_done, wait); poll_wait(file, &bd->wq_free, wait); spin_lock_irq(&bd->lock); if (!list_empty(&bd->done_list)) mask |= POLLIN | POLLRDNORM; if (bd->queued_cmds < bd->max_queue) mask |= POLLOUT; spin_unlock_irq(&bd->lock); return mask; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe10499.05%150.00%
Namhyung Kim10.95%150.00%
Total105100.00%2100.00%


static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct bsg_device *bd = file->private_data; int __user *uarg = (int __user *) arg; int ret; switch (cmd) { /* * our own ioctls */ case SG_GET_COMMAND_Q: return put_user(bd->max_queue, uarg); case SG_SET_COMMAND_Q: { int queue; if (get_user(queue, uarg)) return -EFAULT; if (queue < 1) return -EINVAL; spin_lock_irq(&bd->lock); bd->max_queue = queue; spin_unlock_irq(&bd->lock); return 0; } /* * SCSI/sg ioctls */ case SG_GET_VERSION_NUM: case SCSI_IOCTL_GET_IDLUN: case SCSI_IOCTL_GET_BUS_NUMBER: case SG_SET_TIMEOUT: case SG_GET_TIMEOUT: case SG_GET_RESERVED_SIZE: case SG_SET_RESERVED_SIZE: case SG_EMULATED_HOST: case SCSI_IOCTL_SEND_COMMAND: { void __user *uarg = (void __user *) arg; return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg); } case SG_IO: { struct request *rq; struct bio *bio, *bidi_bio = NULL; struct sg_io_v4 hdr; int at_head; if (copy_from_user(&hdr, uarg, sizeof(hdr))) return -EFAULT; rq = bsg_map_hdr(bd, &hdr, file->f_mode); if (IS_ERR(rq)) return PTR_ERR(rq); bio = rq->bio; if (rq->next_rq) bidi_bio = rq->next_rq->bio; at_head = (0 == (hdr.flags & BSG_FLAG_Q_AT_TAIL)); blk_execute_rq(bd->queue, NULL, rq, at_head); ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio); if (copy_to_user(uarg, &hdr, sizeof(hdr))) return -EFAULT; return ret; } default: return -ENOTTY; } }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe17451.18%325.00%
FUJITA Tomonori13840.59%650.00%
Boaz Harrosh185.29%18.33%
James Bottomley61.76%18.33%
Al Viro41.18%18.33%
Total340100.00%12100.00%

static const struct file_operations bsg_fops = { .read = bsg_read, .write = bsg_write, .poll = bsg_poll, .open = bsg_open, .release = bsg_release, .unlocked_ioctl = bsg_ioctl, .owner = THIS_MODULE, .llseek = default_llseek, };
void bsg_unregister_queue(struct request_queue *q) { struct bsg_class_device *bcd = &q->bsg_dev; if (!bcd->class_dev) return; mutex_lock(&bsg_mutex); idr_remove(&bsg_minor_idr, bcd->minor); if (q->kobj.sd) sysfs_remove_link(&q->kobj, "bsg"); device_unregister(bcd->class_dev); bcd->class_dev = NULL; kref_put(&bcd->ref, bsg_kref_release_function); mutex_unlock(&bsg_mutex); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe5156.04%112.50%
FUJITA Tomonori2931.87%450.00%
Stanislaw Gruszka88.79%112.50%
James Bottomley22.20%112.50%
Tony Jones11.10%112.50%
Total91100.00%8100.00%

EXPORT_SYMBOL_GPL(bsg_unregister_queue);
int bsg_register_queue(struct request_queue *q, struct device *parent, const char *name, void (*release)(struct device *)) { struct bsg_class_device *bcd; dev_t dev; int ret; struct device *class_dev = NULL; const char *devname; if (name) devname = name; else devname = dev_name(parent); /* * we need a proper transport to send commands, not a stacked device */ if (!queue_is_rq_based(q)) return 0; bcd = &q->bsg_dev; memset(bcd, 0, sizeof(*bcd)); mutex_lock(&bsg_mutex); ret = idr_alloc(&bsg_minor_idr, bcd, 0, BSG_MAX_DEVS, GFP_KERNEL); if (ret < 0) { if (ret == -ENOSPC) { printk(KERN_ERR "bsg: too many bsg devices\n"); ret = -EINVAL; } goto unlock; } bcd->minor = ret; bcd->queue = q; bcd->parent = get_device(parent); bcd->release = release; kref_init(&bcd->ref); dev = MKDEV(bsg_major, bcd->minor); class_dev = device_create(bsg_class, parent, dev, NULL, "%s", devname); if (IS_ERR(class_dev)) { ret = PTR_ERR(class_dev); goto put_dev; } bcd->class_dev = class_dev; if (q->kobj.sd) { ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg"); if (ret) goto unregister_class_dev; } mutex_unlock(&bsg_mutex); return 0; unregister_class_dev: device_unregister(class_dev); put_dev: put_device(parent); idr_remove(&bsg_minor_idr, bcd->minor); unlock: mutex_unlock(&bsg_mutex); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
FUJITA Tomonori14043.34%633.33%
Jens Axboe12338.08%422.22%
James Bottomley3510.84%211.11%
Tejun Heo164.95%15.56%
Greg Kroah-Hartman30.93%211.11%
Kay Sievers30.93%15.56%
Tony Jones20.62%15.56%
Linus Torvalds10.31%15.56%
Total323100.00%18100.00%

EXPORT_SYMBOL_GPL(bsg_register_queue); static struct cdev bsg_cdev;
static char *bsg_devnode(struct device *dev, umode_t *mode) { return kasprintf(GFP_KERNEL, "bsg/%s", dev_name(dev)); }

Contributors

PersonTokensPropCommitsCommitProp
Kay Sievers2896.55%266.67%
Al Viro13.45%133.33%
Total29100.00%3100.00%


static int __init bsg_init(void) { int ret, i; dev_t devid; bsg_cmd_cachep = kmem_cache_create("bsg_cmd", sizeof(struct bsg_command), 0, 0, NULL); if (!bsg_cmd_cachep) { printk(KERN_ERR "bsg: failed creating slab cache\n"); return -ENOMEM; } for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++) INIT_HLIST_HEAD(&bsg_device_list[i]); bsg_class = class_create(THIS_MODULE, "bsg"); if (IS_ERR(bsg_class)) { ret = PTR_ERR(bsg_class); goto destroy_kmemcache; } bsg_class->devnode = bsg_devnode; ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg"); if (ret) goto destroy_bsg_class; bsg_major = MAJOR(devid); cdev_init(&bsg_cdev, &bsg_fops); ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS); if (ret) goto unregister_chrdev; printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION " loaded (major %d)\n", bsg_major); return 0; unregister_chrdev: unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS); destroy_bsg_class: class_destroy(bsg_class); destroy_kmemcache: kmem_cache_destroy(bsg_cmd_cachep); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe12859.81%545.45%
FUJITA Tomonori8037.38%436.36%
Kay Sievers62.80%218.18%
Total214100.00%11100.00%

MODULE_AUTHOR("Jens Axboe"); MODULE_DESCRIPTION(BSG_DESCRIPTION); MODULE_LICENSE("GPL"); device_initcall(bsg_init);

Overall Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe286959.38%910.98%
FUJITA Tomonori148430.71%3036.59%
Peter Zijlstra961.99%11.22%
Christoph Hellwig721.49%56.10%
James Bottomley661.37%44.88%
Kay Sievers410.85%33.66%
Boaz Harrosh370.77%22.44%
Tejun Heo260.54%44.88%
James Smart250.52%11.22%
Bart Van Assche240.50%11.22%
Al Viro200.41%56.10%
Linus Torvalds150.31%22.44%
Joe Lawrence110.23%11.22%
Stanislaw Gruszka80.17%11.22%
Namhyung Kim70.14%22.44%
Randy Dunlap60.12%11.22%
Arnd Bergmann50.10%11.22%
Adel Gadllah40.08%11.22%
Jonathan Corbet40.08%11.22%
Tony Jones30.06%11.22%
Alexey Dobriyan30.06%11.22%
Greg Kroah-Hartman30.06%22.44%
Harvey Harrison10.02%11.22%
Daniel Mack10.02%11.22%
Arjan van de Ven10.02%11.22%
Total4832100.00%82100.00%
Directory: block
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.