cregit-Linux how code gets into the kernel

Release 4.11 drivers/usb/usb-skeleton.c

Directory: drivers/usb
/*
 * USB Skeleton driver - 2.2
 *
 * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com)
 *
 *      This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License as
 *      published by the Free Software Foundation, version 2.
 *
 * This driver is based on the 2.6.3 version of drivers/usb/usb-skeleton.c
 * but has been rewritten to be easier to read and use.
 *
 */

#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kref.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/mutex.h>


/* Define these values to match your devices */

#define USB_SKEL_VENDOR_ID	0xfff0

#define USB_SKEL_PRODUCT_ID	0xfff0

/* table of devices that work with this driver */

static const struct usb_device_id skel_table[] = {
	{ USB_DEVICE(USB_SKEL_VENDOR_ID, USB_SKEL_PRODUCT_ID) },
	{ }					/* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, skel_table);


/* Get a minor range for your devices from the usb maintainer */

#define USB_SKEL_MINOR_BASE	192

/* our private defines. if this grows any larger, use your own .h file */

#define MAX_TRANSFER		(PAGE_SIZE - 512)
/* MAX_TRANSFER is chosen so that the VM is not stressed by
   allocations > PAGE_SIZE and the number of packets in a page
   is an integer 512 is the largest possible packet on EHCI */

#define WRITES_IN_FLIGHT	8
/* arbitrarily chosen */

/* Structure to hold all of our device specific stuff */

struct usb_skel {
	
struct usb_device	*udev;			/* the usb device for this device */
	
struct usb_interface	*interface;		/* the interface for this device */
	
struct semaphore	limit_sem;		/* limiting the number of writes in progress */
	
struct usb_anchor	submitted;		/* in case we need to retract our submissions */
	
struct urb		*bulk_in_urb;		/* the urb to read data with */
	
unsigned char           *bulk_in_buffer;	/* the buffer to receive data */
	
size_t			bulk_in_size;		/* the size of the receive buffer */
	
size_t			bulk_in_filled;		/* number of bytes in the buffer */
	
size_t			bulk_in_copied;		/* already copied to user space */
	
__u8			bulk_in_endpointAddr;	/* the address of the bulk in endpoint */
	
__u8			bulk_out_endpointAddr;	/* the address of the bulk out endpoint */
	
int			errors;			/* the last request tanked */
	
bool			ongoing_read;		/* a read is going on */
	
spinlock_t		err_lock;		/* lock for errors */
	
struct kref		kref;
	
struct mutex		io_mutex;		/* synchronize I/O with disconnect */
	
wait_queue_head_t	bulk_in_wait;		/* to wait for an ongoing read */
};

#define to_skel_dev(d) container_of(d, struct usb_skel, kref)


static struct usb_driver skel_driver;
static void skel_draw_down(struct usb_skel *dev);


static void skel_delete(struct kref *kref) { struct usb_skel *dev = to_skel_dev(kref); usb_free_urb(dev->bulk_in_urb); usb_put_dev(dev->udev); kfree(dev->bulk_in_buffer); kfree(dev); }

Contributors

PersonTokensPropCommitsCommitProp
Greg Kroah-Hartman3472.34%133.33%
Oliver Neukum714.89%133.33%
Linus Torvalds612.77%133.33%
Total47100.00%3100.00%


static int skel_open(struct inode *inode, struct file *file) { struct usb_skel *dev; struct usb_interface *interface; int subminor; int retval = 0; subminor = iminor(inode); interface = usb_find_interface(&skel_driver, subminor); if (!interface) { pr_err("%s - error, can't find device for minor %d\n", __func__, subminor); retval = -ENODEV; goto exit; } dev = usb_get_intfdata(interface); if (!dev) { retval = -ENODEV; goto exit; } retval = usb_autopm_get_interface(interface); if (retval) goto exit; /* increment our usage count for the device */ kref_get(&dev->kref); /* save our object in the file's private structure */ file->private_data = dev; exit: return retval; }

Contributors

PersonTokensPropCommitsCommitProp
Greg Kroah-Hartman4937.12%538.46%
Linus Torvalds4332.58%17.69%
Alan Stern2418.18%215.38%
Constantine Shulyupin107.58%17.69%
Oliver Neukum43.03%215.38%
Kay Sievers10.76%17.69%
Harvey Harrison10.76%17.69%
Total132100.00%13100.00%


static int skel_release(struct inode *inode, struct file *file) { struct usb_skel *dev; dev = file->private_data; if (dev == NULL) return -ENODEV; /* allow the device to be autosuspended */ mutex_lock(&dev->io_mutex); if (dev->interface) usb_autopm_put_interface(dev->interface); mutex_unlock(&dev->io_mutex); /* decrement the count on our device */ kref_put(&dev->kref, skel_delete); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds4758.02%240.00%
Alan Stern3037.04%120.00%
Greg Kroah-Hartman44.94%240.00%
Total81100.00%5100.00%


static int skel_flush(struct file *file, fl_owner_t id) { struct usb_skel *dev; int res; dev = file->private_data; if (dev == NULL) return -ENODEV; /* wait for io to stop */ mutex_lock(&dev->io_mutex); skel_draw_down(dev); /* read out errors, leave subsequent opens a clean slate */ spin_lock_irq(&dev->err_lock); res = dev->errors ? (dev->errors == -EPIPE ? -EPIPE : -EIO) : 0; dev->errors = 0; spin_unlock_irq(&dev->err_lock); mutex_unlock(&dev->io_mutex); return res; }

Contributors

PersonTokensPropCommitsCommitProp
Oliver Neukum109100.00%2100.00%
Total109100.00%2100.00%


static void skel_read_bulk_callback(struct urb *urb) { struct usb_skel *dev; dev = urb->context; spin_lock(&dev->err_lock); /* sync/async unlink faults aren't errors */ if (urb->status) { if (!(urb->status == -ENOENT || urb->status == -ECONNRESET || urb->status == -ESHUTDOWN)) dev_err(&dev->interface->dev, "%s - nonzero write bulk status received: %d\n", __func__, urb->status); dev->errors = urb->status; } else { dev->bulk_in_filled = urb->actual_length; } dev->ongoing_read = 0; spin_unlock(&dev->err_lock); wake_up_interruptible(&dev->bulk_in_wait); }

Contributors

PersonTokensPropCommitsCommitProp
Oliver Neukum11391.13%133.33%
Greg Kroah-Hartman97.26%133.33%
Du Xing21.61%133.33%
Total124100.00%3100.00%


static int skel_do_read_io(struct usb_skel *dev, size_t count) { int rv; /* prepare a read */ usb_fill_bulk_urb(dev->bulk_in_urb, dev->udev, usb_rcvbulkpipe(dev->udev, dev->bulk_in_endpointAddr), dev->bulk_in_buffer, min(dev->bulk_in_size, count), skel_read_bulk_callback, dev); /* tell everybody to leave the URB alone */ spin_lock_irq(&dev->err_lock); dev->ongoing_read = 1; spin_unlock_irq(&dev->err_lock); /* submit bulk in urb, which means no data to deliver */ dev->bulk_in_filled = 0; dev->bulk_in_copied = 0; /* do it */ rv = usb_submit_urb(dev->bulk_in_urb, GFP_KERNEL); if (rv < 0) { dev_err(&dev->interface->dev, "%s - failed submitting read urb, error %d\n", __func__, rv); rv = (rv == -ENOMEM) ? rv : -EIO; spin_lock_irq(&dev->err_lock); dev->ongoing_read = 0; spin_unlock_irq(&dev->err_lock); } return rv; }

Contributors

PersonTokensPropCommitsCommitProp
Oliver Neukum14686.90%250.00%
Du Xing137.74%125.00%
Greg Kroah-Hartman95.36%125.00%
Total168100.00%4100.00%


static ssize_t skel_read(struct file *file, char *buffer, size_t count, loff_t *ppos) { struct usb_skel *dev; int rv; bool ongoing_io; dev = file->private_data; /* if we cannot read at all, return EOF */ if (!dev->bulk_in_urb || !count) return 0; /* no concurrent readers */ rv = mutex_lock_interruptible(&dev->io_mutex); if (rv < 0) return rv; if (!dev->interface) { /* disconnect() was called */ rv = -ENODEV; goto exit; } /* if IO is under way, we must not touch things */ retry: spin_lock_irq(&dev->err_lock); ongoing_io = dev->ongoing_read; spin_unlock_irq(&dev->err_lock); if (ongoing_io) { /* nonblocking IO shall not wait */ if (file->f_flags & O_NONBLOCK) { rv = -EAGAIN; goto exit; } /* * IO may take forever * hence wait in an interruptible state */ rv = wait_event_interruptible(dev->bulk_in_wait, (!dev->ongoing_read)); if (rv < 0) goto exit; } /* errors must be reported */ rv = dev->errors; if (rv < 0) { /* any error is reported once */ dev->errors = 0; /* to preserve notifications about reset */ rv = (rv == -EPIPE) ? rv : -EIO; /* report it */ goto exit; } /* * if the buffer is filled we may satisfy the read * else we need to start IO */ if (dev->bulk_in_filled) { /* we had read data */ size_t available = dev->bulk_in_filled - dev->bulk_in_copied; size_t chunk = min(available, count); if (!available) { /* * all data has been used * actual IO needs to be done */ rv = skel_do_read_io(dev, count); if (rv < 0) goto exit; else goto retry; } /* * data is available * chunk tells us how much shall be copied */ if (copy_to_user(buffer, dev->bulk_in_buffer + dev->bulk_in_copied, chunk)) rv = -EFAULT; else rv = chunk; dev->bulk_in_copied += chunk; /* * if we are asked for more than we have, * we start IO but don't wait */ if (available < count) skel_do_read_io(dev, count - chunk); } else { /* no data in the buffer */ rv = skel_do_read_io(dev, count); if (rv < 0) goto exit; else goto retry; } exit: mutex_unlock(&dev->io_mutex); return rv; }

Contributors

PersonTokensPropCommitsCommitProp
Oliver Neukum23465.36%220.00%
Linus Torvalds6919.27%110.00%
Alan Stern3710.34%220.00%
Du Xing92.51%110.00%
Greg Kroah-Hartman71.96%220.00%
Chen Wang10.28%110.00%
Al Viro10.28%110.00%
Total358100.00%10100.00%


static void skel_write_bulk_callback(struct urb *urb) { struct usb_skel *dev; dev = urb->context; /* sync/async unlink faults aren't errors */ if (urb->status) { if (!(urb->status == -ENOENT || urb->status == -ECONNRESET || urb->status == -ESHUTDOWN)) dev_err(&dev->interface->dev, "%s - nonzero write bulk status received: %d\n", __func__, urb->status); spin_lock(&dev->err_lock); dev->errors = urb->status; spin_unlock(&dev->err_lock); } /* free up our allocated buffer */ usb_free_coherent(urb->dev, urb->transfer_buffer_length, urb->transfer_buffer, urb->transfer_dma); up(&dev->limit_sem); }

Contributors

PersonTokensPropCommitsCommitProp
Greg Kroah-Hartman8365.35%228.57%
Oliver Neukum3628.35%228.57%
Linus Torvalds64.72%114.29%
Daniel Mack10.79%114.29%
Harvey Harrison10.79%114.29%
Total127100.00%7100.00%


static ssize_t skel_write(struct file *file, const char *user_buffer, size_t count, loff_t *ppos) { struct usb_skel *dev; int retval = 0; struct urb *urb = NULL; char *buf = NULL; size_t writesize = min(count, (size_t)MAX_TRANSFER); dev = file->private_data; /* verify that we actually have some data to write */ if (count == 0) goto exit; /* * limit the number of URBs in flight to stop a user from using up all * RAM */ if (!(file->f_flags & O_NONBLOCK)) { if (down_interruptible(&dev->limit_sem)) { retval = -ERESTARTSYS; goto exit; } } else { if (down_trylock(&dev->limit_sem)) { retval = -EAGAIN; goto exit; } } spin_lock_irq(&dev->err_lock); retval = dev->errors; if (retval < 0) { /* any error is reported once */ dev->errors = 0; /* to preserve notifications about reset */ retval = (retval == -EPIPE) ? retval : -EIO; } spin_unlock_irq(&dev->err_lock); if (retval < 0) goto error; /* create a urb, and a buffer for it, and copy the data to the urb */ urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { retval = -ENOMEM; goto error; } buf = usb_alloc_coherent(dev->udev, writesize, GFP_KERNEL, &urb->transfer_dma); if (!buf) { retval = -ENOMEM; goto error; } if (copy_from_user(buf, user_buffer, writesize)) { retval = -EFAULT; goto error; } /* this lock makes sure we don't submit URBs to gone devices */ mutex_lock(&dev->io_mutex); if (!dev->interface) { /* disconnect() was called */ mutex_unlock(&dev->io_mutex); retval = -ENODEV; goto error; } /* initialize the urb properly */ usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, dev->bulk_out_endpointAddr), buf, writesize, skel_write_bulk_callback, dev); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_anchor_urb(urb, &dev->submitted); /* send the data out the bulk port */ retval = usb_submit_urb(urb, GFP_KERNEL); mutex_unlock(&dev->io_mutex); if (retval) { dev_err(&dev->interface->dev, "%s - failed submitting write urb, error %d\n", __func__, retval); goto error_unanchor; } /* * release our reference to this urb, the USB core will eventually free * it entirely */ usb_free_urb(urb); return writesize; error_unanchor: usb_unanchor_urb(urb); error: if (urb) { usb_free_coherent(dev->udev, writesize, buf, urb->transfer_dma); usb_free_urb(urb); } up(&dev->limit_sem); exit: return retval; }

Contributors

PersonTokensPropCommitsCommitProp
Oliver Neukum18140.04%423.53%
Greg Kroah-Hartman12126.77%529.41%
Linus Torvalds10523.23%15.88%
Alan Stern235.09%211.76%
Sam Bishop163.54%15.88%
Julia Lawall20.44%15.88%
Daniel Mack20.44%15.88%
Olav Kongas10.22%15.88%
Harvey Harrison10.22%15.88%
Total452100.00%17100.00%

static const struct file_operations skel_fops = { .owner = THIS_MODULE, .read = skel_read, .write = skel_write, .open = skel_open, .release = skel_release, .flush = skel_flush, .llseek = noop_llseek, }; /* * usb class driver info in order to get a minor number from the usb core, * and to have the device registered with the driver core */ static struct usb_class_driver skel_class = { .name = "skel%d", .fops = &skel_fops, .minor_base = USB_SKEL_MINOR_BASE, };
static int skel_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_skel *dev; struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; size_t buffer_size; int i; int retval = -ENOMEM; /* allocate memory for our device state and initialize it */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) goto error; kref_init(&dev->kref); sema_init(&dev->limit_sem, WRITES_IN_FLIGHT); mutex_init(&dev->io_mutex); spin_lock_init(&dev->err_lock); init_usb_anchor(&dev->submitted); init_waitqueue_head(&dev->bulk_in_wait); dev->udev = usb_get_dev(interface_to_usbdev(interface)); dev->interface = interface; /* set up the endpoint information */ /* use only the first bulk-in and bulk-out endpoints */ iface_desc = interface->cur_altsetting; for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; if (!dev->bulk_in_endpointAddr && usb_endpoint_is_bulk_in(endpoint)) { /* we found a bulk in endpoint */ buffer_size = usb_endpoint_maxp(endpoint); dev->bulk_in_size = buffer_size; dev->bulk_in_endpointAddr = endpoint->bEndpointAddress; dev->bulk_in_buffer = kmalloc(buffer_size, GFP_KERNEL); if (!dev->bulk_in_buffer) goto error; dev->bulk_in_urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->bulk_in_urb) goto error; } if (!dev->bulk_out_endpointAddr && usb_endpoint_is_bulk_out(endpoint)) { /* we found a bulk out endpoint */ dev->bulk_out_endpointAddr = endpoint->bEndpointAddress; } } if (!(dev->bulk_in_endpointAddr && dev->bulk_out_endpointAddr)) { dev_err(&interface->dev, "Could not find both bulk-in and bulk-out endpoints\n"); goto error; } /* save our data pointer in this interface device */ usb_set_intfdata(interface, dev); /* we can register the device now, as it is ready */ retval = usb_register_dev(interface, &skel_class); if (retval) { /* something prevented us from registering this driver */ dev_err(&interface->dev, "Not able to get a minor for this device.\n"); usb_set_intfdata(interface, NULL); goto error; } /* let the user know what node this device is now attached to */ dev_info(&interface->dev, "USB Skeleton device now attached to USBSkel-%d", interface->minor); return 0; error: if (dev) /* this frees allocated memory */ kref_put(&dev->kref, skel_delete); return retval; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds15439.90%14.00%
Oliver Neukum8622.28%520.00%
Greg Kroah-Hartman7719.95%1040.00%
Alan Stern4912.69%28.00%
Luiz Fernando N. Capitulino71.81%14.00%
Matt Kraai61.55%14.00%
David Brownell20.52%14.00%
Du Xing20.52%14.00%
Kay Sievers10.26%14.00%
Michael Hayes10.26%14.00%
Kuninori Morimoto10.26%14.00%
Total386100.00%25100.00%


static void skel_disconnect(struct usb_interface *interface) { struct usb_skel *dev; int minor = interface->minor; dev = usb_get_intfdata(interface); usb_set_intfdata(interface, NULL); /* give back our minor */ usb_deregister_dev(interface, &skel_class); /* prevent more I/O from starting */ mutex_lock(&dev->io_mutex); dev->interface = NULL; mutex_unlock(&dev->io_mutex); usb_kill_anchored_urbs(&dev->submitted); /* decrement our usage count */ kref_put(&dev->kref, skel_delete); dev_info(&interface->dev, "USB Skeleton #%d now disconnected", minor); }

Contributors

PersonTokensPropCommitsCommitProp
Greg Kroah-Hartman3030.00%650.00%
Linus Torvalds2929.00%18.33%
Alan Stern2626.00%216.67%
Oliver Neukum88.00%18.33%
Matt Kraai66.00%18.33%
Kay Sievers11.00%18.33%
Total100100.00%12100.00%


static void skel_draw_down(struct usb_skel *dev) { int time; time = usb_wait_anchor_empty_timeout(&dev->submitted, 1000); if (!time) usb_kill_anchored_urbs(&dev->submitted); usb_kill_urb(dev->bulk_in_urb); }

Contributors

PersonTokensPropCommitsCommitProp
Oliver Neukum46100.00%2100.00%
Total46100.00%2100.00%


static int skel_suspend(struct usb_interface *intf, pm_message_t message) { struct usb_skel *dev = usb_get_intfdata(intf); if (!dev) return 0; skel_draw_down(dev); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Oliver Neukum40100.00%1100.00%
Total40100.00%1100.00%


static int skel_resume(struct usb_interface *intf) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Oliver Neukum14100.00%1100.00%
Total14100.00%1100.00%


static int skel_pre_reset(struct usb_interface *intf) { struct usb_skel *dev = usb_get_intfdata(intf); mutex_lock(&dev->io_mutex); skel_draw_down(dev); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Oliver Neukum37100.00%1100.00%
Total37100.00%1100.00%


static int skel_post_reset(struct usb_interface *intf) { struct usb_skel *dev = usb_get_intfdata(intf); /* we are sure no URBs are active - no locking needed */ dev->errors = -EPIPE; mutex_unlock(&dev->io_mutex); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Oliver Neukum40100.00%1100.00%
Total40100.00%1100.00%

static struct usb_driver skel_driver = { .name = "skeleton", .probe = skel_probe, .disconnect = skel_disconnect, .suspend = skel_suspend, .resume = skel_resume, .pre_reset = skel_pre_reset, .post_reset = skel_post_reset, .id_table = skel_table, .supports_autosuspend = 1, }; module_usb_driver(skel_driver); MODULE_LICENSE("GPL");

Overall Contributors

PersonTokensPropCommitsCommitProp
Oliver Neukum119046.20%1120.00%
Linus Torvalds56521.93%35.45%
Greg Kroah-Hartman52120.23%1934.55%
Alan Stern1987.69%35.45%
Du Xing281.09%11.82%
Sam Bishop160.62%11.82%
Matt Kraai120.47%11.82%
Luiz Fernando N. Capitulino110.43%23.64%
Constantine Shulyupin100.39%11.82%
Arnd Bergmann50.19%11.82%
David Brownell30.12%23.64%
Kay Sievers30.12%11.82%
Daniel Mack30.12%11.82%
Harvey Harrison30.12%11.82%
Julia Lawall20.08%11.82%
Márton Németh10.04%11.82%
Chen Wang10.04%11.82%
Olav Kongas10.04%11.82%
Michael Hayes10.04%11.82%
Kuninori Morimoto10.04%11.82%
Al Viro10.04%11.82%
Total2576100.00%55100.00%
Directory: drivers/usb
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.