Release 4.12 drivers/usb/usb-skeleton.c
  
  
  
/*
 * USB Skeleton driver - 2.2
 *
 * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com)
 *
 *      This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License as
 *      published by the Free Software Foundation, version 2.
 *
 * This driver is based on the 2.6.3 version of drivers/usb/usb-skeleton.c
 * but has been rewritten to be easier to read and use.
 *
 */
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kref.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/mutex.h>
/* Define these values to match your devices */
#define USB_SKEL_VENDOR_ID	0xfff0
#define USB_SKEL_PRODUCT_ID	0xfff0
/* table of devices that work with this driver */
static const struct usb_device_id skel_table[] = {
	{ USB_DEVICE(USB_SKEL_VENDOR_ID, USB_SKEL_PRODUCT_ID) },
	{ }					/* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, skel_table);
/* Get a minor range for your devices from the usb maintainer */
#define USB_SKEL_MINOR_BASE	192
/* our private defines. if this grows any larger, use your own .h file */
#define MAX_TRANSFER		(PAGE_SIZE - 512)
/* MAX_TRANSFER is chosen so that the VM is not stressed by
   allocations > PAGE_SIZE and the number of packets in a page
   is an integer 512 is the largest possible packet on EHCI */
#define WRITES_IN_FLIGHT	8
/* arbitrarily chosen */
/* Structure to hold all of our device specific stuff */
struct usb_skel {
	
struct usb_device	*udev;			/* the usb device for this device */
	
struct usb_interface	*interface;		/* the interface for this device */
	
struct semaphore	limit_sem;		/* limiting the number of writes in progress */
	
struct usb_anchor	submitted;		/* in case we need to retract our submissions */
	
struct urb		*bulk_in_urb;		/* the urb to read data with */
	
unsigned char           *bulk_in_buffer;	/* the buffer to receive data */
	
size_t			bulk_in_size;		/* the size of the receive buffer */
	
size_t			bulk_in_filled;		/* number of bytes in the buffer */
	
size_t			bulk_in_copied;		/* already copied to user space */
	
__u8			bulk_in_endpointAddr;	/* the address of the bulk in endpoint */
	
__u8			bulk_out_endpointAddr;	/* the address of the bulk out endpoint */
	
int			errors;			/* the last request tanked */
	
bool			ongoing_read;		/* a read is going on */
	
spinlock_t		err_lock;		/* lock for errors */
	
struct kref		kref;
	
struct mutex		io_mutex;		/* synchronize I/O with disconnect */
	
wait_queue_head_t	bulk_in_wait;		/* to wait for an ongoing read */
};
#define to_skel_dev(d) container_of(d, struct usb_skel, kref)
static struct usb_driver skel_driver;
static void skel_draw_down(struct usb_skel *dev);
static void skel_delete(struct kref *kref)
{
	struct usb_skel *dev = to_skel_dev(kref);
	usb_free_urb(dev->bulk_in_urb);
	usb_put_dev(dev->udev);
	kfree(dev->bulk_in_buffer);
	kfree(dev);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Greg Kroah-Hartman | 33 | 70.21% | 1 | 33.33% | 
| Oliver Neukum | 7 | 14.89% | 1 | 33.33% | 
| Linus Torvalds | 7 | 14.89% | 1 | 33.33% | 
| Total | 47 | 100.00% | 3 | 100.00% | 
static int skel_open(struct inode *inode, struct file *file)
{
	struct usb_skel *dev;
	struct usb_interface *interface;
	int subminor;
	int retval = 0;
	subminor = iminor(inode);
	interface = usb_find_interface(&skel_driver, subminor);
	if (!interface) {
		pr_err("%s - error, can't find device for minor %d\n",
			__func__, subminor);
		retval = -ENODEV;
		goto exit;
	}
	dev = usb_get_intfdata(interface);
	if (!dev) {
		retval = -ENODEV;
		goto exit;
	}
	retval = usb_autopm_get_interface(interface);
	if (retval)
		goto exit;
	/* increment our usage count for the device */
	kref_get(&dev->kref);
	/* save our object in the file's private structure */
	file->private_data = dev;
exit:
	return retval;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Linus Torvalds | 53 | 40.15% | 1 | 8.33% | 
| Greg Kroah-Hartman | 39 | 29.55% | 4 | 33.33% | 
| Alan Stern | 24 | 18.18% | 2 | 16.67% | 
| Constantine Shulyupin | 10 | 7.58% | 1 | 8.33% | 
| Oliver Neukum | 4 | 3.03% | 2 | 16.67% | 
| Al Viro | 1 | 0.76% | 1 | 8.33% | 
| Harvey Harrison | 1 | 0.76% | 1 | 8.33% | 
| Total | 132 | 100.00% | 12 | 100.00% | 
static int skel_release(struct inode *inode, struct file *file)
{
	struct usb_skel *dev;
	dev = file->private_data;
	if (dev == NULL)
		return -ENODEV;
	/* allow the device to be autosuspended */
	mutex_lock(&dev->io_mutex);
	if (dev->interface)
		usb_autopm_put_interface(dev->interface);
	mutex_unlock(&dev->io_mutex);
	/* decrement the count on our device */
	kref_put(&dev->kref, skel_delete);
	return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Linus Torvalds | 35 | 43.21% | 2 | 50.00% | 
| Alan Stern | 30 | 37.04% | 1 | 25.00% | 
| Greg Kroah-Hartman | 16 | 19.75% | 1 | 25.00% | 
| Total | 81 | 100.00% | 4 | 100.00% | 
static int skel_flush(struct file *file, fl_owner_t id)
{
	struct usb_skel *dev;
	int res;
	dev = file->private_data;
	if (dev == NULL)
		return -ENODEV;
	/* wait for io to stop */
	mutex_lock(&dev->io_mutex);
	skel_draw_down(dev);
	/* read out errors, leave subsequent opens a clean slate */
	spin_lock_irq(&dev->err_lock);
	res = dev->errors ? (dev->errors == -EPIPE ? -EPIPE : -EIO) : 0;
	dev->errors = 0;
	spin_unlock_irq(&dev->err_lock);
	mutex_unlock(&dev->io_mutex);
	return res;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Oliver Neukum | 109 | 100.00% | 2 | 100.00% | 
| Total | 109 | 100.00% | 2 | 100.00% | 
static void skel_read_bulk_callback(struct urb *urb)
{
	struct usb_skel *dev;
	dev = urb->context;
	spin_lock(&dev->err_lock);
	/* sync/async unlink faults aren't errors */
	if (urb->status) {
		if (!(urb->status == -ENOENT ||
		    urb->status == -ECONNRESET ||
		    urb->status == -ESHUTDOWN))
			dev_err(&dev->interface->dev,
				"%s - nonzero write bulk status received: %d\n",
				__func__, urb->status);
		dev->errors = urb->status;
	} else {
		dev->bulk_in_filled = urb->actual_length;
	}
	dev->ongoing_read = 0;
	spin_unlock(&dev->err_lock);
	wake_up_interruptible(&dev->bulk_in_wait);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Oliver Neukum | 113 | 91.13% | 1 | 33.33% | 
| Greg Kroah-Hartman | 9 | 7.26% | 1 | 33.33% | 
| Du Xing | 2 | 1.61% | 1 | 33.33% | 
| Total | 124 | 100.00% | 3 | 100.00% | 
static int skel_do_read_io(struct usb_skel *dev, size_t count)
{
	int rv;
	/* prepare a read */
	usb_fill_bulk_urb(dev->bulk_in_urb,
			dev->udev,
			usb_rcvbulkpipe(dev->udev,
				dev->bulk_in_endpointAddr),
			dev->bulk_in_buffer,
			min(dev->bulk_in_size, count),
			skel_read_bulk_callback,
			dev);
	/* tell everybody to leave the URB alone */
	spin_lock_irq(&dev->err_lock);
	dev->ongoing_read = 1;
	spin_unlock_irq(&dev->err_lock);
	/* submit bulk in urb, which means no data to deliver */
	dev->bulk_in_filled = 0;
	dev->bulk_in_copied = 0;
	/* do it */
	rv = usb_submit_urb(dev->bulk_in_urb, GFP_KERNEL);
	if (rv < 0) {
		dev_err(&dev->interface->dev,
			"%s - failed submitting read urb, error %d\n",
			__func__, rv);
		rv = (rv == -ENOMEM) ? rv : -EIO;
		spin_lock_irq(&dev->err_lock);
		dev->ongoing_read = 0;
		spin_unlock_irq(&dev->err_lock);
	}
	return rv;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Oliver Neukum | 146 | 86.90% | 2 | 50.00% | 
| Du Xing | 13 | 7.74% | 1 | 25.00% | 
| Greg Kroah-Hartman | 9 | 5.36% | 1 | 25.00% | 
| Total | 168 | 100.00% | 4 | 100.00% | 
static ssize_t skel_read(struct file *file, char *buffer, size_t count,
			 loff_t *ppos)
{
	struct usb_skel *dev;
	int rv;
	bool ongoing_io;
	dev = file->private_data;
	/* if we cannot read at all, return EOF */
	if (!dev->bulk_in_urb || !count)
		return 0;
	/* no concurrent readers */
	rv = mutex_lock_interruptible(&dev->io_mutex);
	if (rv < 0)
		return rv;
	if (!dev->interface) {		/* disconnect() was called */
		rv = -ENODEV;
		goto exit;
	}
	/* if IO is under way, we must not touch things */
retry:
	spin_lock_irq(&dev->err_lock);
	ongoing_io = dev->ongoing_read;
	spin_unlock_irq(&dev->err_lock);
	if (ongoing_io) {
		/* nonblocking IO shall not wait */
		if (file->f_flags & O_NONBLOCK) {
			rv = -EAGAIN;
			goto exit;
		}
		/*
                 * IO may take forever
                 * hence wait in an interruptible state
                 */
		rv = wait_event_interruptible(dev->bulk_in_wait, (!dev->ongoing_read));
		if (rv < 0)
			goto exit;
	}
	/* errors must be reported */
	rv = dev->errors;
	if (rv < 0) {
		/* any error is reported once */
		dev->errors = 0;
		/* to preserve notifications about reset */
		rv = (rv == -EPIPE) ? rv : -EIO;
		/* report it */
		goto exit;
	}
	/*
         * if the buffer is filled we may satisfy the read
         * else we need to start IO
         */
	if (dev->bulk_in_filled) {
		/* we had read data */
		size_t available = dev->bulk_in_filled - dev->bulk_in_copied;
		size_t chunk = min(available, count);
		if (!available) {
			/*
                         * all data has been used
                         * actual IO needs to be done
                         */
			rv = skel_do_read_io(dev, count);
			if (rv < 0)
				goto exit;
			else
				goto retry;
		}
		/*
                 * data is available
                 * chunk tells us how much shall be copied
                 */
		if (copy_to_user(buffer,
				 dev->bulk_in_buffer + dev->bulk_in_copied,
				 chunk))
			rv = -EFAULT;
		else
			rv = chunk;
		dev->bulk_in_copied += chunk;
		/*
                 * if we are asked for more than we have,
                 * we start IO but don't wait
                 */
		if (available < count)
			skel_do_read_io(dev, count - chunk);
	} else {
		/* no data in the buffer */
		rv = skel_do_read_io(dev, count);
		if (rv < 0)
			goto exit;
		else
			goto retry;
	}
exit:
	mutex_unlock(&dev->io_mutex);
	return rv;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Oliver Neukum | 234 | 65.36% | 2 | 20.00% | 
| Linus Torvalds | 64 | 17.88% | 1 | 10.00% | 
| Alan Stern | 37 | 10.34% | 2 | 20.00% | 
| Greg Kroah-Hartman | 12 | 3.35% | 2 | 20.00% | 
| Du Xing | 9 | 2.51% | 1 | 10.00% | 
| Al Viro | 1 | 0.28% | 1 | 10.00% | 
| Chen Wang | 1 | 0.28% | 1 | 10.00% | 
| Total | 358 | 100.00% | 10 | 100.00% | 
static void skel_write_bulk_callback(struct urb *urb)
{
	struct usb_skel *dev;
	dev = urb->context;
	/* sync/async unlink faults aren't errors */
	if (urb->status) {
		if (!(urb->status == -ENOENT ||
		    urb->status == -ECONNRESET ||
		    urb->status == -ESHUTDOWN))
			dev_err(&dev->interface->dev,
				"%s - nonzero write bulk status received: %d\n",
				__func__, urb->status);
		spin_lock(&dev->err_lock);
		dev->errors = urb->status;
		spin_unlock(&dev->err_lock);
	}
	/* free up our allocated buffer */
	usb_free_coherent(urb->dev, urb->transfer_buffer_length,
			  urb->transfer_buffer, urb->transfer_dma);
	up(&dev->limit_sem);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Greg Kroah-Hartman | 72 | 56.69% | 3 | 37.50% | 
| Oliver Neukum | 36 | 28.35% | 2 | 25.00% | 
| Linus Torvalds | 17 | 13.39% | 1 | 12.50% | 
| Harvey Harrison | 1 | 0.79% | 1 | 12.50% | 
| Daniel Mack | 1 | 0.79% | 1 | 12.50% | 
| Total | 127 | 100.00% | 8 | 100.00% | 
static ssize_t skel_write(struct file *file, const char *user_buffer,
			  size_t count, loff_t *ppos)
{
	struct usb_skel *dev;
	int retval = 0;
	struct urb *urb = NULL;
	char *buf = NULL;
	size_t writesize = min(count, (size_t)MAX_TRANSFER);
	dev = file->private_data;
	/* verify that we actually have some data to write */
	if (count == 0)
		goto exit;
	/*
         * limit the number of URBs in flight to stop a user from using up all
         * RAM
         */
	if (!(file->f_flags & O_NONBLOCK)) {
		if (down_interruptible(&dev->limit_sem)) {
			retval = -ERESTARTSYS;
			goto exit;
		}
	} else {
		if (down_trylock(&dev->limit_sem)) {
			retval = -EAGAIN;
			goto exit;
		}
	}
	spin_lock_irq(&dev->err_lock);
	retval = dev->errors;
	if (retval < 0) {
		/* any error is reported once */
		dev->errors = 0;
		/* to preserve notifications about reset */
		retval = (retval == -EPIPE) ? retval : -EIO;
	}
	spin_unlock_irq(&dev->err_lock);
	if (retval < 0)
		goto error;
	/* create a urb, and a buffer for it, and copy the data to the urb */
	urb = usb_alloc_urb(0, GFP_KERNEL);
	if (!urb) {
		retval = -ENOMEM;
		goto error;
	}
	buf = usb_alloc_coherent(dev->udev, writesize, GFP_KERNEL,
				 &urb->transfer_dma);
	if (!buf) {
		retval = -ENOMEM;
		goto error;
	}
	if (copy_from_user(buf, user_buffer, writesize)) {
		retval = -EFAULT;
		goto error;
	}
	/* this lock makes sure we don't submit URBs to gone devices */
	mutex_lock(&dev->io_mutex);
	if (!dev->interface) {		/* disconnect() was called */
		mutex_unlock(&dev->io_mutex);
		retval = -ENODEV;
		goto error;
	}
	/* initialize the urb properly */
	usb_fill_bulk_urb(urb, dev->udev,
			  usb_sndbulkpipe(dev->udev, dev->bulk_out_endpointAddr),
			  buf, writesize, skel_write_bulk_callback, dev);
	urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
	usb_anchor_urb(urb, &dev->submitted);
	/* send the data out the bulk port */
	retval = usb_submit_urb(urb, GFP_KERNEL);
	mutex_unlock(&dev->io_mutex);
	if (retval) {
		dev_err(&dev->interface->dev,
			"%s - failed submitting write urb, error %d\n",
			__func__, retval);
		goto error_unanchor;
	}
	/*
         * release our reference to this urb, the USB core will eventually free
         * it entirely
         */
	usb_free_urb(urb);
	return writesize;
error_unanchor:
	usb_unanchor_urb(urb);
error:
	if (urb) {
		usb_free_coherent(dev->udev, writesize, buf, urb->transfer_dma);
		usb_free_urb(urb);
	}
	up(&dev->limit_sem);
exit:
	return retval;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Oliver Neukum | 181 | 40.04% | 4 | 23.53% | 
| Greg Kroah-Hartman | 171 | 37.83% | 4 | 23.53% | 
| Linus Torvalds | 58 | 12.83% | 1 | 5.88% | 
| Alan Stern | 19 | 4.20% | 2 | 11.76% | 
| Sam Bishop | 16 | 3.54% | 1 | 5.88% | 
| Julia Lawall | 2 | 0.44% | 1 | 5.88% | 
| Daniel Mack | 2 | 0.44% | 1 | 5.88% | 
| Harvey Harrison | 1 | 0.22% | 1 | 5.88% | 
| Kay Sievers | 1 | 0.22% | 1 | 5.88% | 
| Olav Kongas | 1 | 0.22% | 1 | 5.88% | 
| Total | 452 | 100.00% | 17 | 100.00% | 
static const struct file_operations skel_fops = {
	.owner =	THIS_MODULE,
	.read =		skel_read,
	.write =	skel_write,
	.open =		skel_open,
	.release =	skel_release,
	.flush =	skel_flush,
	.llseek =	noop_llseek,
};
/*
 * usb class driver info in order to get a minor number from the usb core,
 * and to have the device registered with the driver core
 */
static struct usb_class_driver skel_class = {
	.name =		"skel%d",
	.fops =		&skel_fops,
	.minor_base =	USB_SKEL_MINOR_BASE,
};
static int skel_probe(struct usb_interface *interface,
		      const struct usb_device_id *id)
{
	struct usb_skel *dev;
	struct usb_endpoint_descriptor *bulk_in, *bulk_out;
	int retval;
	/* allocate memory for our device state and initialize it */
	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
	if (!dev)
		return -ENOMEM;
	kref_init(&dev->kref);
	sema_init(&dev->limit_sem, WRITES_IN_FLIGHT);
	mutex_init(&dev->io_mutex);
	spin_lock_init(&dev->err_lock);
	init_usb_anchor(&dev->submitted);
	init_waitqueue_head(&dev->bulk_in_wait);
	dev->udev = usb_get_dev(interface_to_usbdev(interface));
	dev->interface = interface;
	/* set up the endpoint information */
	/* use only the first bulk-in and bulk-out endpoints */
	retval = usb_find_common_endpoints(interface->cur_altsetting,
			&bulk_in, &bulk_out, NULL, NULL);
	if (retval) {
		dev_err(&interface->dev,
			"Could not find both bulk-in and bulk-out endpoints\n");
		goto error;
	}
	dev->bulk_in_size = usb_endpoint_maxp(bulk_in);
	dev->bulk_in_endpointAddr = bulk_in->bEndpointAddress;
	dev->bulk_in_buffer = kmalloc(dev->bulk_in_size, GFP_KERNEL);
	if (!dev->bulk_in_buffer) {
		retval = -ENOMEM;
		goto error;
	}
	dev->bulk_in_urb = usb_alloc_urb(0, GFP_KERNEL);
	if (!dev->bulk_in_urb) {
		retval = -ENOMEM;
		goto error;
	}
	dev->bulk_out_endpointAddr = bulk_out->bEndpointAddress;
	/* save our data pointer in this interface device */
	usb_set_intfdata(interface, dev);
	/* we can register the device now, as it is ready */
	retval = usb_register_dev(interface, &skel_class);
	if (retval) {
		/* something prevented us from registering this driver */
		dev_err(&interface->dev,
			"Not able to get a minor for this device.\n");
		usb_set_intfdata(interface, NULL);
		goto error;
	}
	/* let the user know what node this device is now attached to */
	dev_info(&interface->dev,
		 "USB Skeleton device now attached to USBSkel-%d",
		 interface->minor);
	return 0;
error:
	/* this frees allocated memory */
	kref_put(&dev->kref, skel_delete);
	return retval;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Linus Torvalds | 100 | 30.58% | 1 | 4.35% | 
| Oliver Neukum | 83 | 25.38% | 5 | 21.74% | 
| Greg Kroah-Hartman | 62 | 18.96% | 9 | 39.13% | 
| Johan Hovold | 55 | 16.82% | 1 | 4.35% | 
| Alan Stern | 16 | 4.89% | 2 | 8.70% | 
| Matt Kraai | 6 | 1.83% | 1 | 4.35% | 
| Du Xing | 2 | 0.61% | 1 | 4.35% | 
| Michael Hayes | 1 | 0.31% | 1 | 4.35% | 
| Luiz Fernando N. Capitulino | 1 | 0.31% | 1 | 4.35% | 
| Kay Sievers | 1 | 0.31% | 1 | 4.35% | 
| Total | 327 | 100.00% | 23 | 100.00% | 
static void skel_disconnect(struct usb_interface *interface)
{
	struct usb_skel *dev;
	int minor = interface->minor;
	dev = usb_get_intfdata(interface);
	usb_set_intfdata(interface, NULL);
	/* give back our minor */
	usb_deregister_dev(interface, &skel_class);
	/* prevent more I/O from starting */
	mutex_lock(&dev->io_mutex);
	dev->interface = NULL;
	mutex_unlock(&dev->io_mutex);
	usb_kill_anchored_urbs(&dev->submitted);
	/* decrement our usage count */
	kref_put(&dev->kref, skel_delete);
	dev_info(&interface->dev, "USB Skeleton #%d now disconnected", minor);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Greg Kroah-Hartman | 30 | 30.00% | 6 | 50.00% | 
| Linus Torvalds | 29 | 29.00% | 1 | 8.33% | 
| Alan Stern | 26 | 26.00% | 2 | 16.67% | 
| Oliver Neukum | 8 | 8.00% | 1 | 8.33% | 
| Matt Kraai | 6 | 6.00% | 1 | 8.33% | 
| Kay Sievers | 1 | 1.00% | 1 | 8.33% | 
| Total | 100 | 100.00% | 12 | 100.00% | 
static void skel_draw_down(struct usb_skel *dev)
{
	int time;
	time = usb_wait_anchor_empty_timeout(&dev->submitted, 1000);
	if (!time)
		usb_kill_anchored_urbs(&dev->submitted);
	usb_kill_urb(dev->bulk_in_urb);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Oliver Neukum | 46 | 100.00% | 2 | 100.00% | 
| Total | 46 | 100.00% | 2 | 100.00% | 
static int skel_suspend(struct usb_interface *intf, pm_message_t message)
{
	struct usb_skel *dev = usb_get_intfdata(intf);
	if (!dev)
		return 0;
	skel_draw_down(dev);
	return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Oliver Neukum | 40 | 100.00% | 1 | 100.00% | 
| Total | 40 | 100.00% | 1 | 100.00% | 
static int skel_resume(struct usb_interface *intf)
{
	return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Oliver Neukum | 14 | 100.00% | 1 | 100.00% | 
| Total | 14 | 100.00% | 1 | 100.00% | 
static int skel_pre_reset(struct usb_interface *intf)
{
	struct usb_skel *dev = usb_get_intfdata(intf);
	mutex_lock(&dev->io_mutex);
	skel_draw_down(dev);
	return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Oliver Neukum | 37 | 100.00% | 1 | 100.00% | 
| Total | 37 | 100.00% | 1 | 100.00% | 
static int skel_post_reset(struct usb_interface *intf)
{
	struct usb_skel *dev = usb_get_intfdata(intf);
	/* we are sure no URBs are active - no locking needed */
	dev->errors = -EPIPE;
	mutex_unlock(&dev->io_mutex);
	return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Oliver Neukum | 40 | 100.00% | 1 | 100.00% | 
| Total | 40 | 100.00% | 1 | 100.00% | 
static struct usb_driver skel_driver = {
	.name =		"skeleton",
	.probe =	skel_probe,
	.disconnect =	skel_disconnect,
	.suspend =	skel_suspend,
	.resume =	skel_resume,
	.pre_reset =	skel_pre_reset,
	.post_reset =	skel_post_reset,
	.id_table =	skel_table,
	.supports_autosuspend = 1,
};
module_usb_driver(skel_driver);
MODULE_LICENSE("GPL");
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp | 
| Oliver Neukum | 1187 | 47.16% | 11 | 20.00% | 
| Greg Kroah-Hartman | 551 | 21.89% | 18 | 32.73% | 
| Linus Torvalds | 469 | 18.63% | 3 | 5.45% | 
| Alan Stern | 161 | 6.40% | 3 | 5.45% | 
| Johan Hovold | 55 | 2.19% | 1 | 1.82% | 
| Du Xing | 28 | 1.11% | 1 | 1.82% | 
| Sam Bishop | 16 | 0.64% | 1 | 1.82% | 
| Matt Kraai | 12 | 0.48% | 1 | 1.82% | 
| Constantine Shulyupin | 10 | 0.40% | 1 | 1.82% | 
| Luiz Fernando N. Capitulino | 5 | 0.20% | 2 | 3.64% | 
| Arnd Bergmann | 5 | 0.20% | 1 | 1.82% | 
| Harvey Harrison | 3 | 0.12% | 1 | 1.82% | 
| Daniel Mack | 3 | 0.12% | 1 | 1.82% | 
| Kay Sievers | 3 | 0.12% | 2 | 3.64% | 
| Julia Lawall | 2 | 0.08% | 1 | 1.82% | 
| Al Viro | 2 | 0.08% | 2 | 3.64% | 
| Chen Wang | 1 | 0.04% | 1 | 1.82% | 
| David Brownell | 1 | 0.04% | 1 | 1.82% | 
| Michael Hayes | 1 | 0.04% | 1 | 1.82% | 
| Márton Németh | 1 | 0.04% | 1 | 1.82% | 
| Olav Kongas | 1 | 0.04% | 1 | 1.82% | 
| Total | 2517 | 100.00% | 55 | 100.00% | 
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.