cregit-Linux how code gets into the kernel

Release 4.11 drivers/staging/vme/devices/vme_user.c

/*
 * VMEbus User access driver
 *
 * Author: Martyn Welch <martyn.welch@ge.com>
 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
 *
 * Based on work by:
 *   Tom Armistead and Ajit Prem
 *     Copyright 2004 Motorola Inc.
 *
 *
 * This program is free software; you can redistribute  it and/or modify it
 * under  the terms of  the GNU General  Public License as published by the
 * Free Software Foundation;  either version 2 of the  License, or (at your
 * option) any later version.
 */


#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/atomic.h>
#include <linux/cdev.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/ioctl.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/pci.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/syscalls.h>
#include <linux/types.h>

#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/vme.h>

#include "vme_user.h"


static const char driver_name[] = "vme_user";


static int bus[VME_USER_BUS_MAX];

static unsigned int bus_num;

/* Currently Documentation/admin-guide/devices.rst defines the
 * following for VME:
 *
 * 221 char     VME bus
 *                0 = /dev/bus/vme/m0           First master image
 *                1 = /dev/bus/vme/m1           Second master image
 *                2 = /dev/bus/vme/m2           Third master image
 *                3 = /dev/bus/vme/m3           Fourth master image
 *                4 = /dev/bus/vme/s0           First slave image
 *                5 = /dev/bus/vme/s1           Second slave image
 *                6 = /dev/bus/vme/s2           Third slave image
 *                7 = /dev/bus/vme/s3           Fourth slave image
 *                8 = /dev/bus/vme/ctl          Control
 *
 *              It is expected that all VME bus drivers will use the
 *              same interface.  For interface documentation see
 *              http://www.vmelinux.org/.
 *
 * However the VME driver at http://www.vmelinux.org/ is rather old and doesn't
 * even support the tsi148 chipset (which has 8 master and 8 slave windows).
 * We'll run with this for now as far as possible, however it probably makes
 * sense to get rid of the old mappings and just do everything dynamically.
 *
 * So for now, we'll restrict the driver to providing 4 masters and 4 slaves as
 * defined above and try to support at least some of the interface from
 * http://www.vmelinux.org/ as an alternative the driver can be written
 * providing a saner interface later.
 *
 * The vmelinux.org driver never supported slave images, the devices reserved
 * for slaves were repurposed to support all 8 master images on the UniverseII!
 * We shall support 4 masters and 4 slaves with this driver.
 */

#define VME_MAJOR	221	
/* VME Major Device Number */

#define VME_DEVS	9	
/* Number of dev entries */


#define MASTER_MINOR	0

#define MASTER_MAX	3

#define SLAVE_MINOR	4

#define SLAVE_MAX	7

#define CONTROL_MINOR	8


#define PCI_BUF_SIZE  0x20000	
/* Size of one slave image buffer */

/*
 * Structure to handle image related parameters.
 */

struct image_desc {
	
void *kern_buf;	/* Buffer address in kernel space */
	
dma_addr_t pci_buf;	/* Buffer address in PCI address space */
	
unsigned long long size_buf;	/* Buffer size */
	
struct mutex mutex;	/* Mutex for locking image */
	
struct device *device;	/* Sysfs device */
	
struct vme_resource *resource;	/* VME resource */
	
int mmap_count;		/* Number of current mmap's */
};


static struct image_desc image[VME_DEVS];


static struct cdev *vme_user_cdev;		
/* Character device */

static struct class *vme_user_sysfs_class;	
/* Sysfs class */

static struct vme_dev *vme_user_bridge;		
/* Pointer to user device */


static const int type[VME_DEVS] = {	MASTER_MINOR,	MASTER_MINOR,
					MASTER_MINOR,	MASTER_MINOR,
					SLAVE_MINOR,	SLAVE_MINOR,
					SLAVE_MINOR,	SLAVE_MINOR,
					CONTROL_MINOR
				};


struct vme_user_vma_priv {
	
unsigned int minor;
	
atomic_t refcnt;
};


static ssize_t resource_to_user(int minor, char __user *buf, size_t count, loff_t *ppos) { ssize_t copied = 0; if (count > image[minor].size_buf) count = image[minor].size_buf; copied = vme_master_read(image[minor].resource, image[minor].kern_buf, count, *ppos); if (copied < 0) return (int)copied; if (__copy_to_user(buf, image[minor].kern_buf, (unsigned long)copied)) return -EFAULT; return copied; }

Contributors

PersonTokensPropCommitsCommitProp
Martyn Welch9688.07%133.33%
Dmitry Kalinkin1311.93%266.67%
Total109100.00%3100.00%


static ssize_t resource_from_user(unsigned int minor, const char __user *buf, size_t count, loff_t *ppos) { if (count > image[minor].size_buf) count = image[minor].size_buf; if (__copy_from_user(image[minor].kern_buf, buf, (unsigned long)count)) return -EFAULT; return vme_master_write(image[minor].resource, image[minor].kern_buf, count, *ppos); }

Contributors

PersonTokensPropCommitsCommitProp
Martyn Welch7280.00%120.00%
Dmitry Kalinkin1718.89%360.00%
Emilio G. Cota11.11%120.00%
Total90100.00%5100.00%


static ssize_t buffer_to_user(unsigned int minor, char __user *buf, size_t count, loff_t *ppos) { void *image_ptr; image_ptr = image[minor].kern_buf + *ppos; if (__copy_to_user(buf, image_ptr, (unsigned long)count)) return -EFAULT; return count; }

Contributors

PersonTokensPropCommitsCommitProp
Martyn Welch5490.00%150.00%
Dmitry Kalinkin610.00%150.00%
Total60100.00%2100.00%


static ssize_t buffer_from_user(unsigned int minor, const char __user *buf, size_t count, loff_t *ppos) { void *image_ptr; image_ptr = image[minor].kern_buf + *ppos; if (__copy_from_user(image_ptr, buf, (unsigned long)count)) return -EFAULT; return count; }

Contributors

PersonTokensPropCommitsCommitProp
Martyn Welch5488.52%133.33%
Dmitry Kalinkin69.84%133.33%
Emilio G. Cota11.64%133.33%
Total61100.00%3100.00%


static ssize_t vme_user_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { unsigned int minor = MINOR(file_inode(file)->i_rdev); ssize_t retval; size_t image_size; if (minor == CONTROL_MINOR) return 0; mutex_lock(&image[minor].mutex); /* XXX Do we *really* want this helper - we can use vme_*_get ? */ image_size = vme_get_size(image[minor].resource); /* Ensure we are starting at a valid location */ if ((*ppos < 0) || (*ppos > (image_size - 1))) { mutex_unlock(&image[minor].mutex); return 0; } /* Ensure not reading past end of the image */ if (*ppos + count > image_size) count = image_size - *ppos; switch (type[minor]) { case MASTER_MINOR: retval = resource_to_user(minor, buf, count, ppos); break; case SLAVE_MINOR: retval = buffer_to_user(minor, buf, count, ppos); break; default: retval = -EINVAL; } mutex_unlock(&image[minor].mutex); if (retval > 0) *ppos += retval; return retval; }

Contributors

PersonTokensPropCommitsCommitProp
Martyn Welch18289.22%116.67%
Vincent Bossier94.41%116.67%
Santosh Nayak62.94%116.67%
Al Viro31.47%116.67%
Dmitry Kalinkin31.47%116.67%
Emilio G. Cota10.49%116.67%
Total204100.00%6100.00%


static ssize_t vme_user_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { unsigned int minor = MINOR(file_inode(file)->i_rdev); ssize_t retval; size_t image_size; if (minor == CONTROL_MINOR) return 0; mutex_lock(&image[minor].mutex); image_size = vme_get_size(image[minor].resource); /* Ensure we are starting at a valid location */ if ((*ppos < 0) || (*ppos > (image_size - 1))) { mutex_unlock(&image[minor].mutex); return 0; } /* Ensure not reading past end of the image */ if (*ppos + count > image_size) count = image_size - *ppos; switch (type[minor]) { case MASTER_MINOR: retval = resource_from_user(minor, buf, count, ppos); break; case SLAVE_MINOR: retval = buffer_from_user(minor, buf, count, ppos); break; default: retval = -EINVAL; } mutex_unlock(&image[minor].mutex); if (retval > 0) *ppos += retval; return retval; }

Contributors

PersonTokensPropCommitsCommitProp
Martyn Welch18289.22%116.67%
Vincent Bossier94.41%116.67%
Santosh Nayak62.94%116.67%
Al Viro31.47%116.67%
Dmitry Kalinkin31.47%116.67%
Emilio G. Cota10.49%116.67%
Total204100.00%6100.00%


static loff_t vme_user_llseek(struct file *file, loff_t off, int whence) { unsigned int minor = MINOR(file_inode(file)->i_rdev); size_t image_size; loff_t res; switch (type[minor]) { case MASTER_MINOR: case SLAVE_MINOR: mutex_lock(&image[minor].mutex); image_size = vme_get_size(image[minor].resource); res = fixed_size_llseek(file, off, whence, image_size); mutex_unlock(&image[minor].mutex); return res; } return -EINVAL; }

Contributors

PersonTokensPropCommitsCommitProp
Arthur Benilov5047.17%114.29%
Martyn Welch1917.92%114.29%
Dmitry Kalinkin1615.09%114.29%
Al Viro1413.21%228.57%
Santosh Nayak43.77%114.29%
Vincent Bossier32.83%114.29%
Total106100.00%7100.00%

/* * The ioctls provided by the old VME access method (the one at vmelinux.org) * are most certainly wrong as the effectively push the registers layout * through to user space. Given that the VME core can handle multiple bridges, * with different register layouts this is most certainly not the way to go. * * We aren't using the structures defined in the Motorola driver either - these * are also quite low level, however we should use the definitions that have * already been defined. */
static int vme_user_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { struct vme_master master; struct vme_slave slave; struct vme_irq_id irq_req; unsigned long copied; unsigned int minor = MINOR(inode->i_rdev); int retval; dma_addr_t pci_addr; void __user *argp = (void __user *)arg; switch (type[minor]) { case CONTROL_MINOR: switch (cmd) { case VME_IRQ_GEN: copied = copy_from_user(&irq_req, argp, sizeof(irq_req)); if (copied) { pr_warn("Partial copy from userspace\n"); return -EFAULT; } return vme_irq_generate(vme_user_bridge, irq_req.level, irq_req.statid); } break; case MASTER_MINOR: switch (cmd) { case VME_GET_MASTER: memset(&master, 0, sizeof(master)); /* XXX We do not want to push aspace, cycle and width * to userspace as they are */ retval = vme_master_get(image[minor].resource, &master.enable, &master.vme_addr, &master.size, &master.aspace, &master.cycle, &master.dwidth); copied = copy_to_user(argp, &master, sizeof(master)); if (copied) { pr_warn("Partial copy to userspace\n"); return -EFAULT; } return retval; case VME_SET_MASTER: if (image[minor].mmap_count != 0) { pr_warn("Can't adjust mapped window\n"); return -EPERM; } copied = copy_from_user(&master, argp, sizeof(master)); if (copied) { pr_warn("Partial copy from userspace\n"); return -EFAULT; } /* XXX We do not want to push aspace, cycle and width * to userspace as they are */ return vme_master_set(image[minor].resource, master.enable, master.vme_addr, master.size, master.aspace, master.cycle, master.dwidth); break; } break; case SLAVE_MINOR: switch (cmd) { case VME_GET_SLAVE: memset(&slave, 0, sizeof(slave)); /* XXX We do not want to push aspace, cycle and width * to userspace as they are */ retval = vme_slave_get(image[minor].resource, &slave.enable, &slave.vme_addr, &slave.size, &pci_addr, &slave.aspace, &slave.cycle); copied = copy_to_user(argp, &slave, sizeof(slave)); if (copied) { pr_warn("Partial copy to userspace\n"); return -EFAULT; } return retval; case VME_SET_SLAVE: copied = copy_from_user(&slave, argp, sizeof(slave)); if (copied) { pr_warn("Partial copy from userspace\n"); return -EFAULT; } /* XXX We do not want to push aspace, cycle and width * to userspace as they are */ return vme_slave_set(image[minor].resource, slave.enable, slave.vme_addr, slave.size, image[minor].pci_buf, slave.aspace, slave.cycle); break; } break; } return -EINVAL; }

Contributors

PersonTokensPropCommitsCommitProp
Martyn Welch39978.85%222.22%
Vincent Bossier5310.47%111.11%
Dmitry Kalinkin224.35%111.11%
Emilio G. Cota163.16%111.11%
Toshiaki Yamane91.78%111.11%
Egor Uleyskiy50.99%111.11%
Dan Carpenter10.20%111.11%
Mahati Chamarthy10.20%111.11%
Total506100.00%9100.00%


static long vme_user_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret; struct inode *inode = file_inode(file); unsigned int minor = MINOR(inode->i_rdev); mutex_lock(&image[minor].mutex); ret = vme_user_ioctl(inode, file, cmd, arg); mutex_unlock(&image[minor].mutex); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Arnd Bergmann4758.02%266.67%
Dmitry Kalinkin3441.98%133.33%
Total81100.00%3100.00%


static void vme_user_vm_open(struct vm_area_struct *vma) { struct vme_user_vma_priv *vma_priv = vma->vm_private_data; atomic_inc(&vma_priv->refcnt); }

Contributors

PersonTokensPropCommitsCommitProp
Dmitry Kalinkin28100.00%1100.00%
Total28100.00%1100.00%


static void vme_user_vm_close(struct vm_area_struct *vma) { struct vme_user_vma_priv *vma_priv = vma->vm_private_data; unsigned int minor = vma_priv->minor; if (!atomic_dec_and_test(&vma_priv->refcnt)) return; mutex_lock(&image[minor].mutex); image[minor].mmap_count--; mutex_unlock(&image[minor].mutex); kfree(vma_priv); }

Contributors

PersonTokensPropCommitsCommitProp
Dmitry Kalinkin75100.00%1100.00%
Total75100.00%1100.00%

static const struct vm_operations_struct vme_user_vm_ops = { .open = vme_user_vm_open, .close = vme_user_vm_close, };
static int vme_user_master_mmap(unsigned int minor, struct vm_area_struct *vma) { int err; struct vme_user_vma_priv *vma_priv; mutex_lock(&image[minor].mutex); err = vme_master_mmap(image[minor].resource, vma); if (err) { mutex_unlock(&image[minor].mutex); return err; } vma_priv = kmalloc(sizeof(*vma_priv), GFP_KERNEL); if (!vma_priv) { mutex_unlock(&image[minor].mutex); return -ENOMEM; } vma_priv->minor = minor; atomic_set(&vma_priv->refcnt, 1); vma->vm_ops = &vme_user_vm_ops; vma->vm_private_data = vma_priv; image[minor].mmap_count++; mutex_unlock(&image[minor].mutex); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Dmitry Kalinkin154100.00%3100.00%
Total154100.00%3100.00%


static int vme_user_mmap(struct file *file, struct vm_area_struct *vma) { unsigned int minor = MINOR(file_inode(file)->i_rdev); if (type[minor] == MASTER_MINOR) return vme_user_master_mmap(minor, vma); return -ENODEV; }

Contributors

PersonTokensPropCommitsCommitProp
Dmitry Kalinkin51100.00%1100.00%
Total51100.00%1100.00%

static const struct file_operations vme_user_fops = { .read = vme_user_read, .write = vme_user_write, .llseek = vme_user_llseek, .unlocked_ioctl = vme_user_unlocked_ioctl, .compat_ioctl = vme_user_unlocked_ioctl, .mmap = vme_user_mmap, };
static int vme_user_match(struct vme_dev *vdev) { int i; int cur_bus = vme_bus_num(vdev); int cur_slot = vme_slot_num(vdev); for (i = 0; i < bus_num; i++) if ((cur_bus == bus[i]) && (cur_slot == vdev->num)) return 1; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Martyn Welch4769.12%266.67%
Manohar Vanga2130.88%133.33%
Total68100.00%3100.00%

/* * In this simple access driver, the old behaviour is being preserved as much * as practical. We will therefore reserve the buffers and request the images * here so that we don't have to do it later. */
static int vme_user_probe(struct vme_dev *vdev) { int i, err; char *name; /* Save pointer to the bridge device */ if (vme_user_bridge) { dev_err(&vdev->dev, "Driver can only be loaded for 1 device\n"); err = -EINVAL; goto err_dev; } vme_user_bridge = vdev; /* Initialise descriptors */ for (i = 0; i < VME_DEVS; i++) { image[i].kern_buf = NULL; image[i].pci_buf = 0; mutex_init(&image[i].mutex); image[i].device = NULL; image[i].resource = NULL; } /* Assign major and minor numbers for the driver */ err = register_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS, driver_name); if (err) { dev_warn(&vdev->dev, "Error getting Major Number %d for driver.\n", VME_MAJOR); goto err_region; } /* Register the driver as a char device */ vme_user_cdev = cdev_alloc(); if (!vme_user_cdev) { err = -ENOMEM; goto err_char; } vme_user_cdev->ops = &vme_user_fops; vme_user_cdev->owner = THIS_MODULE; err = cdev_add(vme_user_cdev, MKDEV(VME_MAJOR, 0), VME_DEVS); if (err) goto err_char; /* Request slave resources and allocate buffers (128kB wide) */ for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) { /* XXX Need to properly request attributes */ /* For ca91cx42 bridge there are only two slave windows * supporting A16 addressing, so we request A24 supported * by all windows. */ image[i].resource = vme_slave_request(vme_user_bridge, VME_A24, VME_SCT); if (!image[i].resource) { dev_warn(&vdev->dev, "Unable to allocate slave resource\n"); err = -ENOMEM; goto err_slave; } image[i].size_buf = PCI_BUF_SIZE; image[i].kern_buf = vme_alloc_consistent(image[i].resource, image[i].size_buf, &image[i].pci_buf); if (!image[i].kern_buf) { dev_warn(&vdev->dev, "Unable to allocate memory for buffer\n"); image[i].pci_buf = 0; vme_slave_free(image[i].resource); err = -ENOMEM; goto err_slave; } } /* * Request master resources allocate page sized buffers for small * reads and writes */ for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) { /* XXX Need to properly request attributes */ image[i].resource = vme_master_request(vme_user_bridge, VME_A32, VME_SCT, VME_D32); if (!image[i].resource) { dev_warn(&vdev->dev, "Unable to allocate master resource\n"); err = -ENOMEM; goto err_master; } image[i].size_buf = PCI_BUF_SIZE; image[i].kern_buf = kmalloc(image[i].size_buf, GFP_KERNEL); if (!image[i].kern_buf) { err = -ENOMEM; vme_master_free(image[i].resource); goto err_master; } } /* Create sysfs entries - on udev systems this creates the dev files */ vme_user_sysfs_class = class_create(THIS_MODULE, driver_name); if (IS_ERR(vme_user_sysfs_class)) { dev_err(&vdev->dev, "Error creating vme_user class.\n"); err = PTR_ERR(vme_user_sysfs_class); goto err_class; } /* Add sysfs Entries */ for (i = 0; i < VME_DEVS; i++) { int num; switch (type[i]) { case MASTER_MINOR: name = "bus/vme/m%d"; break; case CONTROL_MINOR: name = "bus/vme/ctl"; break; case SLAVE_MINOR: name = "bus/vme/s%d"; break; default: err = -EINVAL; goto err_sysfs; } num = (type[i] == SLAVE_MINOR) ? i - (MASTER_MAX + 1) : i; image[i].device = device_create(vme_user_sysfs_class, NULL, MKDEV(VME_MAJOR, i), NULL, name, num); if (IS_ERR(image[i].device)) { dev_info(&vdev->dev, "Error creating sysfs device\n"); err = PTR_ERR(image[i].device); goto err_sysfs; } } return 0; err_sysfs: while (i > 0) { i--; device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i)); } class_destroy(vme_user_sysfs_class); /* Ensure counter set correctly to unalloc all master windows */ i = MASTER_MAX + 1; err_master: while (i > MASTER_MINOR) { i--; kfree(image[i].kern_buf); vme_master_free(image[i].resource); } /* * Ensure counter set correctly to unalloc all slave windows and buffers */ i = SLAVE_MAX + 1; err_slave: while (i > SLAVE_MINOR) { i--; vme_free_consistent(image[i].resource, image[i].size_buf, image[i].kern_buf, image[i].pci_buf); vme_slave_free(image[i].resource); } err_class: cdev_del(vme_user_cdev); err_char: unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS); err_region: err_dev: return err; }

Contributors

PersonTokensPropCommitsCommitProp
Martyn Welch61773.37%212.50%
Arthur Benilov627.37%212.50%
Toshiaki Yamane455.35%16.25%
Dmitry Kalinkin313.69%212.50%
Vincent Bossier252.97%16.25%
DaeSeok Youn192.26%16.25%
Kumar Amit Mehta151.78%16.25%
Wei Yongjun101.19%16.25%
Bojan Prtvar60.71%16.25%
Emilio G. Cota40.48%16.25%
Manohar Vanga30.36%16.25%
Santosh Nayak20.24%16.25%
Walt Feasel20.24%16.25%
Total841100.00%16100.00%


static int vme_user_remove(struct vme_dev *dev) { int i; /* Remove sysfs Entries */ for (i = 0; i < VME_DEVS; i++) { mutex_destroy(&image[i].mutex); device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i)); } class_destroy(vme_user_sysfs_class); for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) { kfree(image[i].kern_buf); vme_master_free(image[i].resource); } for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) { vme_slave_set(image[i].resource, 0, 0, 0, 0, VME_A32, 0); vme_free_consistent(image[i].resource, image[i].size_buf, image[i].kern_buf, image[i].pci_buf); vme_slave_free(image[i].resource); } /* Unregister device driver */ cdev_del(vme_user_cdev); /* Unregister the major and minor device numbers */ unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Martyn Welch11657.71%222.22%
Arthur Benilov2713.43%111.11%
Dmitry Kalinkin2713.43%111.11%
Emilio G. Cota167.96%222.22%
Santosh Nayak136.47%111.11%
Walt Feasel10.50%111.11%
Manohar Vanga10.50%111.11%
Total201100.00%9100.00%

static struct vme_driver vme_user_driver = { .name = driver_name, .match = vme_user_match, .probe = vme_user_probe, .remove = vme_user_remove, };
static int __init vme_user_init(void) { int retval = 0; pr_info("VME User Space Access Driver\n"); if (bus_num == 0) { pr_err("No cards, skipping registration\n"); retval = -ENODEV; goto err_nocard; } /* Let's start by supporting one bus, we can support more than one * in future revisions if that ever becomes necessary. */ if (bus_num > VME_USER_BUS_MAX) { pr_err("Driver only able to handle %d buses\n", VME_USER_BUS_MAX); bus_num = VME_USER_BUS_MAX; } /* * Here we just register the maximum number of devices we can and * leave vme_user_match() to allow only 1 to go through to probe(). * This way, if we later want to allow multiple user access devices, * we just change the code in vme_user_match(). */ retval = vme_register_driver(&vme_user_driver, VME_MAX_SLOTS); if (retval) goto err_reg; return retval; err_reg: err_nocard: return retval; }

Contributors

PersonTokensPropCommitsCommitProp
Dmitry Kalinkin88100.00%1100.00%
Total88100.00%1100.00%


static void __exit vme_user_exit(void) { vme_unregister_driver(&vme_user_driver); }

Contributors

PersonTokensPropCommitsCommitProp
Martyn Welch15100.00%2100.00%
Total15100.00%2100.00%

MODULE_PARM_DESC(bus, "Enumeration of VMEbus to which the driver is connected"); module_param_array(bus, int, &bus_num, 0000); MODULE_DESCRIPTION("VME User Space Access Driver"); MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com"); MODULE_LICENSE("GPL"); module_init(vme_user_init); module_exit(vme_user_exit);

Overall Contributors

PersonTokensPropCommitsCommitProp
Martyn Welch209263.16%510.20%
Dmitry Kalinkin67420.35%1122.45%
Arthur Benilov1394.20%36.12%
Vincent Bossier1043.14%36.12%
Toshiaki Yamane611.84%12.04%
Arnd Bergmann471.42%24.08%
Emilio G. Cota441.33%510.20%
Santosh Nayak351.06%12.04%
Manohar Vanga280.85%36.12%
Al Viro200.60%24.08%
DaeSeok Youn190.57%12.04%
Kumar Amit Mehta150.45%12.04%
Wei Yongjun100.30%12.04%
Bojan Prtvar60.18%12.04%
Egor Uleyskiy50.15%12.04%
Tejun Heo30.09%12.04%
Walt Feasel30.09%12.04%
Nanakos Chrysostomos20.06%12.04%
Philip Thiemann10.03%12.04%
Greg Kroah-Hartman10.03%12.04%
Ryan Swan10.03%12.04%
Mahati Chamarthy10.03%12.04%
Dan Carpenter10.03%12.04%
Total3312100.00%49100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.