Release 4.12 drivers/char/raw.c
  
  
  
/*
 * linux/drivers/char/raw.c
 *
 * Front-end raw character devices.  These can be bound to any block
 * devices to provide genuine Unix raw character device semantics.
 *
 * We reserve minor number 0 for a control interface.  ioctl()s on this
 * device are used to bind the other minor numbers to block devices.
 */
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/major.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <linux/module.h>
#include <linux/raw.h>
#include <linux/capability.h>
#include <linux/uio.h>
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/gfp.h>
#include <linux/compat.h>
#include <linux/vmalloc.h>
#include <linux/uaccess.h>
struct raw_device_data {
	
struct block_device *binding;
	
int inuse;
};
static struct class *raw_class;
static struct raw_device_data *raw_devices;
static DEFINE_MUTEX(raw_mutex);
static const struct file_operations raw_ctl_fops; 
/* forward declaration */
static int max_raw_minors = MAX_RAW_MINORS;
module_param(max_raw_minors, int, 0);
MODULE_PARM_DESC(max_raw_minors, "Maximum number of raw devices (1-65536)");
/*
 * Open/close code for raw IO.
 *
 * We just rewrite the i_mapping for the /dev/raw/rawN file descriptor to
 * point at the blockdev's address_space and set the file handle to use
 * O_DIRECT.
 *
 * Set the device's soft blocksize to the minimum possible.  This gives the
 * finest possible alignment and has no adverse impact on performance.
 */
static int raw_open(struct inode *inode, struct file *filp)
{
	const int minor = iminor(inode);
	struct block_device *bdev;
	int err;
	if (minor == 0) {	/* It is the control device */
		filp->f_op = &raw_ctl_fops;
		return 0;
	}
	mutex_lock(&raw_mutex);
	/*
         * All we need to do on open is check that the device is bound.
         */
	bdev = raw_devices[minor].binding;
	err = -ENODEV;
	if (!bdev)
		goto out;
	bdgrab(bdev);
	err = blkdev_get(bdev, filp->f_mode | FMODE_EXCL, raw_open);
	if (err)
		goto out;
	err = set_blocksize(bdev, bdev_logical_block_size(bdev));
	if (err)
		goto out1;
	filp->f_flags |= O_DIRECT;
	filp->f_mapping = bdev->bd_inode->i_mapping;
	if (++raw_devices[minor].inuse == 1)
		file_inode(filp)->i_mapping =
			bdev->bd_inode->i_mapping;
	filp->private_data = bdev;
	mutex_unlock(&raw_mutex);
	return 0;
out1:
	blkdev_put(bdev, filp->f_mode | FMODE_EXCL);
out:
	mutex_unlock(&raw_mutex);
	return err;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Andrew Morton | 92 | 44.44% | 8 | 40.00% | 
| Linus Torvalds (pre-git) | 70 | 33.82% | 2 | 10.00% | 
| Linus Torvalds | 19 | 9.18% | 1 | 5.00% | 
| Al Viro | 15 | 7.25% | 5 | 25.00% | 
| Tejun Heo | 6 | 2.90% | 1 | 5.00% | 
| Arjan van de Ven | 3 | 1.45% | 1 | 5.00% | 
| Ilya Dryomov | 1 | 0.48% | 1 | 5.00% | 
| Martin K. Petersen | 1 | 0.48% | 1 | 5.00% | 
| Total | 207 | 100.00% | 20 | 100.00% | 
/*
 * When the final fd which refers to this character-special node is closed, we
 * make its ->mapping point back at its own i_data.
 */
static int raw_release(struct inode *inode, struct file *filp)
{
	const int minor= iminor(inode);
	struct block_device *bdev;
	mutex_lock(&raw_mutex);
	bdev = raw_devices[minor].binding;
	if (--raw_devices[minor].inuse == 0)
		/* Here  inode->i_mapping == bdev->bd_inode->i_mapping  */
		inode->i_mapping = &inode->i_data;
	mutex_unlock(&raw_mutex);
	blkdev_put(bdev, filp->f_mode | FMODE_EXCL);
	return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Linus Torvalds (pre-git) | 31 | 35.63% | 2 | 20.00% | 
| Andrew Morton | 31 | 35.63% | 3 | 30.00% | 
| Linus Torvalds | 16 | 18.39% | 1 | 10.00% | 
| Al Viro | 5 | 5.75% | 2 | 20.00% | 
| Arjan van de Ven | 2 | 2.30% | 1 | 10.00% | 
| Tejun Heo | 2 | 2.30% | 1 | 10.00% | 
| Total | 87 | 100.00% | 10 | 100.00% | 
/*
 * Forward ioctls to the underlying block device.
 */
static long
raw_ioctl(struct file *filp, unsigned int command, unsigned long arg)
{
	struct block_device *bdev = filp->private_data;
	return blkdev_ioctl(bdev, 0, command, arg);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Dave Jones | 27 | 67.50% | 1 | 12.50% | 
| Al Viro | 6 | 15.00% | 3 | 37.50% | 
| Andrew Morton | 4 | 10.00% | 2 | 25.00% | 
| Stephen C. Tweedie | 2 | 5.00% | 1 | 12.50% | 
| Arnd Bergmann | 1 | 2.50% | 1 | 12.50% | 
| Total | 40 | 100.00% | 8 | 100.00% | 
static int bind_set(int number, u64 major, u64 minor)
{
	dev_t dev = MKDEV(major, minor);
	struct raw_device_data *rawdev;
	int err = 0;
	if (number <= 0 || number >= max_raw_minors)
		return -EINVAL;
	if (MAJOR(dev) != major || MINOR(dev) != minor)
		return -EINVAL;
	rawdev = &raw_devices[number];
	/*
         * This is like making block devices, so demand the
         * same capability
         */
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;
	/*
         * For now, we don't need to check that the underlying
         * block device is present or not: we can do that when
         * the raw device is opened.  Just check that the
         * major/minor numbers make sense.
         */
	if (MAJOR(dev) == 0 && dev != 0)
		return -EINVAL;
	mutex_lock(&raw_mutex);
	if (rawdev->inuse) {
		mutex_unlock(&raw_mutex);
		return -EBUSY;
	}
	if (rawdev->binding) {
		bdput(rawdev->binding);
		module_put(THIS_MODULE);
	}
	if (!dev) {
		/* unbind */
		rawdev->binding = NULL;
		device_destroy(raw_class, MKDEV(RAW_MAJOR, number));
	} else {
		rawdev->binding = bdget(dev);
		if (rawdev->binding == NULL) {
			err = -ENOMEM;
		} else {
			dev_t raw = MKDEV(RAW_MAJOR, number);
			__module_get(THIS_MODULE);
			device_destroy(raw_class, raw);
			device_create(raw_class, NULL, raw, NULL,
				      "raw%d", number);
		}
	}
	mutex_unlock(&raw_mutex);
	return err;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Al Viro | 89 | 35.04% | 1 | 6.67% | 
| Andrew Morton | 83 | 32.68% | 4 | 26.67% | 
| Linus Torvalds (pre-git) | 37 | 14.57% | 3 | 20.00% | 
| Greg Kroah-Hartman | 24 | 9.45% | 3 | 20.00% | 
| Linus Torvalds | 16 | 6.30% | 1 | 6.67% | 
| Arjan van de Ven | 3 | 1.18% | 1 | 6.67% | 
| Jeff Moyer | 1 | 0.39% | 1 | 6.67% | 
| Jan Kara | 1 | 0.39% | 1 | 6.67% | 
| Total | 254 | 100.00% | 15 | 100.00% | 
static int bind_get(int number, dev_t *dev)
{
	struct raw_device_data *rawdev;
	struct block_device *bdev;
	if (number <= 0 || number >= max_raw_minors)
		return -EINVAL;
	rawdev = &raw_devices[number];
	mutex_lock(&raw_mutex);
	bdev = rawdev->binding;
	*dev = bdev ? bdev->bd_dev : 0;
	mutex_unlock(&raw_mutex);
	return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Al Viro | 55 | 71.43% | 2 | 25.00% | 
| Linus Torvalds (pre-git) | 12 | 15.58% | 2 | 25.00% | 
| Andrew Morton | 7 | 9.09% | 1 | 12.50% | 
| Paul Bolle | 1 | 1.30% | 1 | 12.50% | 
| Linus Torvalds | 1 | 1.30% | 1 | 12.50% | 
| Arjan van de Ven | 1 | 1.30% | 1 | 12.50% | 
| Total | 77 | 100.00% | 8 | 100.00% | 
/*
 * Deal with ioctls against the raw-device control interface, to bind
 * and unbind other raw devices.
 */
static long raw_ctl_ioctl(struct file *filp, unsigned int command,
			  unsigned long arg)
{
	struct raw_config_request rq;
	dev_t dev;
	int err;
	switch (command) {
	case RAW_SETBIND:
		if (copy_from_user(&rq, (void __user *) arg, sizeof(rq)))
			return -EFAULT;
		return bind_set(rq.raw_minor, rq.block_major, rq.block_minor);
	case RAW_GETBIND:
		if (copy_from_user(&rq, (void __user *) arg, sizeof(rq)))
			return -EFAULT;
		err = bind_get(rq.raw_minor, &dev);
		if (err)
			return err;
		rq.block_major = MAJOR(dev);
		rq.block_minor = MINOR(dev);
		if (copy_to_user((void __user *)arg, &rq, sizeof(rq)))
			return -EFAULT;
		return 0;
	}
	return -EINVAL;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Al Viro | 136 | 78.61% | 2 | 28.57% | 
| Linus Torvalds (pre-git) | 31 | 17.92% | 2 | 28.57% | 
| Andrew Morton | 5 | 2.89% | 2 | 28.57% | 
| Arnaldo Carvalho de Melo | 1 | 0.58% | 1 | 14.29% | 
| Total | 173 | 100.00% | 7 | 100.00% | 
#ifdef CONFIG_COMPAT
struct raw32_config_request {
	
compat_int_t	raw_minor;
	
compat_u64	block_major;
	
compat_u64	block_minor;
};
static long raw_ctl_compat_ioctl(struct file *file, unsigned int cmd,
				unsigned long arg)
{
	struct raw32_config_request __user *user_req = compat_ptr(arg);
	struct raw32_config_request rq;
	dev_t dev;
	int err = 0;
	switch (cmd) {
	case RAW_SETBIND:
		if (copy_from_user(&rq, user_req, sizeof(rq)))
			return -EFAULT;
		return bind_set(rq.raw_minor, rq.block_major, rq.block_minor);
	case RAW_GETBIND:
		if (copy_from_user(&rq, user_req, sizeof(rq)))
			return -EFAULT;
		err = bind_get(rq.raw_minor, &dev);
		if (err)
			return err;
		rq.block_major = MAJOR(dev);
		rq.block_minor = MINOR(dev);
		if (copy_to_user(user_req, &rq, sizeof(rq)))
			return -EFAULT;
		return 0;
	}
	return -EINVAL;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Al Viro | 160 | 93.57% | 1 | 25.00% | 
| Linus Torvalds (pre-git) | 6 | 3.51% | 1 | 25.00% | 
| Andrew Morton | 4 | 2.34% | 1 | 25.00% | 
| Arnd Bergmann | 1 | 0.58% | 1 | 25.00% | 
| Total | 171 | 100.00% | 4 | 100.00% | 
#endif
static const struct file_operations raw_fops = {
	.read_iter	= blkdev_read_iter,
	.write_iter	= blkdev_write_iter,
	.fsync		= blkdev_fsync,
	.open		= raw_open,
	.release	= raw_release,
	.unlocked_ioctl = raw_ioctl,
	.llseek		= default_llseek,
	.owner		= THIS_MODULE,
};
static const struct file_operations raw_ctl_fops = {
	.unlocked_ioctl = raw_ctl_ioctl,
#ifdef CONFIG_COMPAT
	.compat_ioctl	= raw_ctl_compat_ioctl,
#endif
	.open		= raw_open,
	.owner		= THIS_MODULE,
	.llseek		= noop_llseek,
};
static struct cdev raw_cdev;
static char *raw_devnode(struct device *dev, umode_t *mode)
{
	return kasprintf(GFP_KERNEL, "raw/%s", dev_name(dev));
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Kay Sievers | 28 | 96.55% | 2 | 66.67% | 
| Al Viro | 1 | 3.45% | 1 | 33.33% | 
| Total | 29 | 100.00% | 3 | 100.00% | 
static int __init raw_init(void)
{
	dev_t dev = MKDEV(RAW_MAJOR, 0);
	int ret;
	if (max_raw_minors < 1 || max_raw_minors > 65536) {
		printk(KERN_WARNING "raw: invalid max_raw_minors (must be"
			" between 1 and 65536), using %d\n", MAX_RAW_MINORS);
		max_raw_minors = MAX_RAW_MINORS;
	}
	raw_devices = vzalloc(sizeof(struct raw_device_data) * max_raw_minors);
	if (!raw_devices) {
		printk(KERN_ERR "Not enough memory for raw device structures\n");
		ret = -ENOMEM;
		goto error;
	}
	ret = register_chrdev_region(dev, max_raw_minors, "raw");
	if (ret)
		goto error;
	cdev_init(&raw_cdev, &raw_fops);
	ret = cdev_add(&raw_cdev, dev, max_raw_minors);
	if (ret)
		goto error_region;
	raw_class = class_create(THIS_MODULE, "raw");
	if (IS_ERR(raw_class)) {
		printk(KERN_ERR "Error creating raw class.\n");
		cdev_del(&raw_cdev);
		ret = PTR_ERR(raw_class);
		goto error_region;
	}
	raw_class->devnode = raw_devnode;
	device_create(raw_class, NULL, MKDEV(RAW_MAJOR, 0), NULL, "rawctl");
	return 0;
error_region:
	unregister_chrdev_region(dev, max_raw_minors);
error:
	vfree(raw_devices);
	return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Jan Kara | 66 | 30.99% | 1 | 8.33% | 
| Andrew Morton | 59 | 27.70% | 2 | 16.67% | 
| Greg Kroah-Hartman | 53 | 24.88% | 5 | 41.67% | 
| Rolf Eike Beer | 28 | 13.15% | 1 | 8.33% | 
| Kay Sievers | 6 | 2.82% | 2 | 16.67% | 
| Joe Perches | 1 | 0.47% | 1 | 8.33% | 
| Total | 213 | 100.00% | 12 | 100.00% | 
static void __exit raw_exit(void)
{
	device_destroy(raw_class, MKDEV(RAW_MAJOR, 0));
	class_destroy(raw_class);
	cdev_del(&raw_cdev);
	unregister_chrdev_region(MKDEV(RAW_MAJOR, 0), max_raw_minors);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Andrew Morton | 26 | 59.09% | 2 | 33.33% | 
| Greg Kroah-Hartman | 17 | 38.64% | 3 | 50.00% | 
| Jan Kara | 1 | 2.27% | 1 | 16.67% | 
| Total | 44 | 100.00% | 6 | 100.00% | 
module_init(raw_init);
module_exit(raw_exit);
MODULE_LICENSE("GPL");
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Al Viro | 501 | 32.68% | 12 | 18.75% | 
| Andrew Morton | 401 | 26.16% | 15 | 23.44% | 
| Linus Torvalds (pre-git) | 220 | 14.35% | 5 | 7.81% | 
| Greg Kroah-Hartman | 103 | 6.72% | 6 | 9.38% | 
| Jan Kara | 94 | 6.13% | 1 | 1.56% | 
| Linus Torvalds | 59 | 3.85% | 2 | 3.12% | 
| Kay Sievers | 34 | 2.22% | 2 | 3.12% | 
| Rolf Eike Beer | 28 | 1.83% | 1 | 1.56% | 
| Dave Jones | 27 | 1.76% | 1 | 1.56% | 
| Arjan van de Ven | 16 | 1.04% | 2 | 3.12% | 
| Arnd Bergmann | 14 | 0.91% | 2 | 3.12% | 
| Tejun Heo | 13 | 0.85% | 3 | 4.69% | 
| Randy Dunlap | 5 | 0.33% | 1 | 1.56% | 
| Anton Blanchard | 4 | 0.26% | 1 | 1.56% | 
| Manfred Spraul | 3 | 0.20% | 1 | 1.56% | 
| Stephen C. Tweedie | 2 | 0.13% | 1 | 1.56% | 
| Jonathan Corbet | 2 | 0.13% | 1 | 1.56% | 
| Martin K. Petersen | 1 | 0.07% | 1 | 1.56% | 
| David Jeffery | 1 | 0.07% | 1 | 1.56% | 
| Ilya Dryomov | 1 | 0.07% | 1 | 1.56% | 
| Arnaldo Carvalho de Melo | 1 | 0.07% | 1 | 1.56% | 
| Jeff Moyer | 1 | 0.07% | 1 | 1.56% | 
| Paul Bolle | 1 | 0.07% | 1 | 1.56% | 
| Joe Perches | 1 | 0.07% | 1 | 1.56% | 
| Total | 1533 | 100.00% | 64 | 100.00% | 
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.