cregit-Linux how code gets into the kernel

Release 4.7 drivers/s390/char/tape_core.c

/*
 *    basic function of the tape device driver
 *
 *  S390 and zSeries version
 *    Copyright IBM Corp. 2001, 2009
 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 *               Michael Holzheu <holzheu@de.ibm.com>
 *               Tuan Ngo-Anh <ngoanh@de.ibm.com>
 *               Martin Schwidefsky <schwidefsky@de.ibm.com>
 *               Stefan Bader <shbader@de.ibm.com>
 */


#define KMSG_COMPONENT "tape"

#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

#include <linux/module.h>
#include <linux/init.h>	     // for kernel parameters
#include <linux/kmod.h>	     // for requesting modules
#include <linux/spinlock.h>  // for locks
#include <linux/vmalloc.h>
#include <linux/list.h>
#include <linux/slab.h>

#include <asm/types.h>	     // for variable types


#define TAPE_DBF_AREA	tape_core_dbf

#include "tape.h"
#include "tape_std.h"


#define LONG_BUSY_TIMEOUT 180 
/* seconds */

static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *);
static void tape_delayed_next_request(struct work_struct *);
static void tape_long_busy_timeout(unsigned long data);

/*
 * One list to contain all tape devices of all disciplines, so
 * we can assign the devices to minor numbers of the same major
 * The list is protected by the rwlock
 */
static LIST_HEAD(tape_device_list);
static DEFINE_RWLOCK(tape_device_lock);

/*
 * Pointer to debug area.
 */

debug_info_t *TAPE_DBF_AREA = NULL;

EXPORT_SYMBOL(TAPE_DBF_AREA);

/*
 * Printable strings for tape enumerations.
 */

const char *tape_state_verbose[TS_SIZE] =
{
	[TS_UNUSED]   = "UNUSED",
	[TS_IN_USE]   = "IN_USE",
	[TS_BLKUSE]   = "BLKUSE",
	[TS_INIT]     = "INIT  ",
	[TS_NOT_OPER] = "NOT_OP"
};


const char *tape_op_verbose[TO_SIZE] =
{
	[TO_BLOCK] = "BLK",	[TO_BSB] = "BSB",
	[TO_BSF] = "BSF",	[TO_DSE] = "DSE",
	[TO_FSB] = "FSB",	[TO_FSF] = "FSF",
	[TO_LBL] = "LBL",	[TO_NOP] = "NOP",
	[TO_RBA] = "RBA",	[TO_RBI] = "RBI",
	[TO_RFO] = "RFO",	[TO_REW] = "REW",
	[TO_RUN] = "RUN",	[TO_WRI] = "WRI",
	[TO_WTM] = "WTM",	[TO_MSEN] = "MSN",
	[TO_LOAD] = "LOA",	[TO_READ_CONFIG] = "RCF",
	[TO_READ_ATTMSG] = "RAT",
	[TO_DIS] = "DIS",	[TO_ASSIGN] = "ASS",
	[TO_UNASSIGN] = "UAS",  [TO_CRYPT_ON] = "CON",
	[TO_CRYPT_OFF] = "COF",	[TO_KEKL_SET] = "KLS",
	[TO_KEKL_QUERY] = "KLQ",[TO_RDC] = "RDC",
};


static int devid_to_int(struct ccw_dev_id *dev_id) { return dev_id->devno + (dev_id->ssid << 16); }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton1250.00%150.00%
cornelia huckcornelia huck1250.00%150.00%
Total24100.00%2100.00%

/* * Some channel attached tape specific attributes. * * FIXME: In the future the first_minor and blocksize attribute should be * replaced by a link to the cdev tree. */
static ssize_t tape_medium_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tape_device *tdev; tdev = dev_get_drvdata(dev); return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->medium_state); }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton2554.35%240.00%
martin schwidefskymartin schwidefsky1328.26%120.00%
yani ioannouyani ioannou510.87%120.00%
greg kroah-hartmangreg kroah-hartman36.52%120.00%
Total46100.00%5100.00%

static DEVICE_ATTR(medium_state, 0444, tape_medium_state_show, NULL);
static ssize_t tape_first_minor_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tape_device *tdev; tdev = dev_get_drvdata(dev); return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->first_minor); }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton3269.57%240.00%
martin schwidefskymartin schwidefsky613.04%120.00%
yani ioannouyani ioannou510.87%120.00%
greg kroah-hartmangreg kroah-hartman36.52%120.00%
Total46100.00%5100.00%

static DEVICE_ATTR(first_minor, 0444, tape_first_minor_show, NULL);
static ssize_t tape_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tape_device *tdev; tdev = dev_get_drvdata(dev); return scnprintf(buf, PAGE_SIZE, "%s\n", (tdev->first_minor < 0) ? "OFFLINE" : tape_state_verbose[tdev->tape_state]); }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton3966.10%240.00%
martin schwidefskymartin schwidefsky1220.34%120.00%
yani ioannouyani ioannou58.47%120.00%
greg kroah-hartmangreg kroah-hartman35.08%120.00%
Total59100.00%5100.00%

static DEVICE_ATTR(state, 0444, tape_state_show, NULL);
static ssize_t tape_operation_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tape_device *tdev; ssize_t rc; tdev = dev_get_drvdata(dev); if (tdev->first_minor < 0) return scnprintf(buf, PAGE_SIZE, "N/A\n"); spin_lock_irq(get_ccwdev_lock(tdev->cdev)); if (list_empty(&tdev->req_queue)) rc = scnprintf(buf, PAGE_SIZE, "---\n"); else { struct tape_request *req; req = list_entry(tdev->req_queue.next, struct tape_request, list); rc = scnprintf(buf,PAGE_SIZE, "%s\n", tape_op_verbose[req->op]); } spin_unlock_irq(get_ccwdev_lock(tdev->cdev)); return rc; }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton13093.53%240.00%
yani ioannouyani ioannou53.60%120.00%
greg kroah-hartmangreg kroah-hartman32.16%120.00%
martin schwidefskymartin schwidefsky10.72%120.00%
Total139100.00%5100.00%

static DEVICE_ATTR(operation, 0444, tape_operation_show, NULL);
static ssize_t tape_blocksize_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tape_device *tdev; tdev = dev_get_drvdata(dev); return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->char_data.block_size); }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton4083.33%250.00%
yani ioannouyani ioannou510.42%125.00%
greg kroah-hartmangreg kroah-hartman36.25%125.00%
Total48100.00%4100.00%

static DEVICE_ATTR(blocksize, 0444, tape_blocksize_show, NULL); static struct attribute *tape_attrs[] = { &dev_attr_medium_state.attr, &dev_attr_first_minor.attr, &dev_attr_state.attr, &dev_attr_operation.attr, &dev_attr_blocksize.attr, NULL }; static struct attribute_group tape_attr_group = { .attrs = tape_attrs, }; /* * Tape state functions */
void tape_state_set(struct tape_device *device, enum tape_state newstate) { const char *str; if (device->tape_state == TS_NOT_OPER) { DBF_EVENT(3, "ts_set err: not oper\n"); return; } DBF_EVENT(4, "ts. dev: %x\n", device->first_minor); DBF_EVENT(4, "old ts:\t\n"); if (device->tape_state < TS_SIZE && device->tape_state >=0 ) str = tape_state_verbose[device->tape_state]; else str = "UNKNOWN TS"; DBF_EVENT(4, "%s\n", str); DBF_EVENT(4, "new ts:\t\n"); if (newstate < TS_SIZE && newstate >= 0) str = tape_state_verbose[newstate]; else str = "UNKNOWN TS"; DBF_EVENT(4, "%s\n", str); device->tape_state = newstate; wake_up(&device->state_change_wq); }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton12888.89%133.33%
martin schwidefskymartin schwidefsky139.03%133.33%
peter oberparleiterpeter oberparleiter32.08%133.33%
Total144100.00%3100.00%

struct tape_med_state_work_data { struct tape_device *device; enum tape_medium_state state; struct work_struct work; };
static void tape_med_state_work_handler(struct work_struct *work) { static char env_state_loaded[] = "MEDIUM_STATE=LOADED"; static char env_state_unloaded[] = "MEDIUM_STATE=UNLOADED"; struct tape_med_state_work_data *p = container_of(work, struct tape_med_state_work_data, work); struct tape_device *device = p->device; char *envp[] = { NULL, NULL }; switch (p->state) { case MS_UNLOADED: pr_info("%s: The tape cartridge has been successfully " "unloaded\n", dev_name(&device->cdev->dev)); envp[0] = env_state_unloaded; kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp); break; case MS_LOADED: pr_info("%s: A tape cartridge has been mounted\n", dev_name(&device->cdev->dev)); envp[0] = env_state_loaded; kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp); break; default: break; } tape_put_device(device); kfree(p); }

Contributors

PersonTokensPropCommitsCommitProp
martin schwidefskymartin schwidefsky165100.00%1100.00%
Total165100.00%1100.00%


static void tape_med_state_work(struct tape_device *device, enum tape_medium_state state) { struct tape_med_state_work_data *p; p = kzalloc(sizeof(*p), GFP_ATOMIC); if (p) { INIT_WORK(&p->work, tape_med_state_work_handler); p->device = tape_get_device(device); p->state = state; schedule_work(&p->work); } }

Contributors

PersonTokensPropCommitsCommitProp
martin schwidefskymartin schwidefsky72100.00%1100.00%
Total72100.00%1100.00%


void tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate) { enum tape_medium_state oldstate; oldstate = device->medium_state; if (oldstate == newstate) return; device->medium_state = newstate; switch(newstate){ case MS_UNLOADED: device->tape_generic_status |= GMT_DR_OPEN(~0); if (oldstate == MS_LOADED) tape_med_state_work(device, MS_UNLOADED); break; case MS_LOADED: device->tape_generic_status &= ~GMT_DR_OPEN(~0); if (oldstate == MS_UNLOADED) tape_med_state_work(device, MS_LOADED); break; default: break; } wake_up(&device->state_change_wq); }

Contributors

PersonTokensPropCommitsCommitProp
martin schwidefskymartin schwidefsky9688.89%250.00%
michael holzheumichael holzheu109.26%125.00%
kay sieverskay sievers21.85%125.00%
Total108100.00%4100.00%

/* * Stop running ccw. Has to be called with the device lock held. */
static int __tape_cancel_io(struct tape_device *device, struct tape_request *request) { int retries; int rc; /* Check if interrupt has already been processed */ if (request->callback == NULL) return 0; rc = 0; for (retries = 0; retries < 5; retries++) { rc = ccw_device_clear(device->cdev, (long) request); switch (rc) { case 0: request->status = TAPE_REQUEST_DONE; return 0; case -EBUSY: request->status = TAPE_REQUEST_CANCEL; schedule_delayed_work(&device->tape_dnr, 0); return 0; case -ENODEV: DBF_EXCEPTION(2, "device gone, retry\n"); break; case -EIO: DBF_EXCEPTION(2, "I/O error, retry\n"); break; default: BUG(); } } return rc; }

Contributors

PersonTokensPropCommitsCommitProp
martin schwidefskymartin schwidefsky9669.06%250.00%
stefan baderstefan bader3021.58%125.00%
andrew mortonandrew morton139.35%125.00%
Total139100.00%4100.00%

/* * Add device into the sorted list, giving it the first * available minor number. */
static int tape_assign_minor(struct tape_device *device) { struct tape_device *tmp; int minor; minor = 0; write_lock(&tape_device_lock); list_for_each_entry(tmp, &tape_device_list, node) { if (minor < tmp->first_minor) break; minor += TAPE_MINORS_PER_DEV; } if (minor >= 256) { write_unlock(&tape_device_lock); return -ENODEV; } device->first_minor = minor; list_add_tail(&device->node, &tmp->node); write_unlock(&tape_device_lock); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
martin schwidefskymartin schwidefsky9798.98%150.00%
al viroal viro11.02%150.00%
Total98100.00%2100.00%

/* remove device from the list */
static void tape_remove_minor(struct tape_device *device) { write_lock(&tape_device_lock); list_del_init(&device->node); device->first_minor = -1; write_unlock(&tape_device_lock); }

Contributors

PersonTokensPropCommitsCommitProp
martin schwidefskymartin schwidefsky38100.00%1100.00%
Total38100.00%1100.00%

/* * Set a device online. * * This function is called by the common I/O layer to move a device from the * detected but offline into the online state. * If we return an error (RC < 0) the device remains in the offline state. This * can happen if the device is assigned somewhere else, for example. */
int tape_generic_online(struct tape_device *device, struct tape_discipline *discipline) { int rc; DBF_LH(6, "tape_enable_device(%p, %p)\n", device, discipline); if (device->tape_state != TS_INIT) { DBF_LH(3, "Tapestate not INIT (%d)\n", device->tape_state); return -EINVAL; } init_timer(&device->lb_timeout); device->lb_timeout.function = tape_long_busy_timeout; /* Let the discipline have a go at the device. */ device->discipline = discipline; if (!try_module_get(discipline->owner)) { return -EINVAL; } rc = discipline->setup_device(device); if (rc) goto out; rc = tape_assign_minor(device); if (rc) goto out_discipline; rc = tapechar_setup_device(device); if (rc) goto out_minor; tape_state_set(device, TS_UNUSED); DBF_LH(3, "(%08x): Drive set online\n", device->cdev_id); return 0; out_minor: tape_remove_minor(device); out_discipline: device->discipline->cleanup_device(device); device->discipline = NULL; out: module_put(discipline->owner); return rc; }

Contributors

PersonTokensPropCommitsCommitProp
martin schwidefskymartin schwidefsky11257.73%116.67%
andrew mortonandrew morton3618.56%233.33%
heiko carstensheiko carstens2311.86%116.67%
michael holzheumichael holzheu168.25%116.67%
roel kluinroel kluin73.61%116.67%
Total194100.00%6100.00%


static void tape_cleanup_device(struct tape_device *device) { tapechar_cleanup_device(device); device->discipline->cleanup_device(device); module_put(device->discipline->owner); tape_remove_minor(device); tape_med_state_set(device, MS_UNKNOWN); }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton3780.43%150.00%
heiko carstensheiko carstens919.57%150.00%
Total46100.00%2100.00%

/* * Suspend device. * * Called by the common I/O layer if the drive should be suspended on user * request. We refuse to suspend if the device is loaded or in use for the * following reason: * While the Linux guest is suspended, it might be logged off which causes * devices to be detached. Tape devices are automatically rewound and unloaded * during DETACH processing (unless the tape device was attached with the * NOASSIGN or MULTIUSER option). After rewind/unload, there is no way to * resume the original state of the tape device, since we would need to * manually re-load the cartridge which was active at suspend time. */
int tape_generic_pm_suspend(struct ccw_device *cdev) { struct tape_device *device; device = dev_get_drvdata(&cdev->dev); if (!device) { return -ENODEV; } DBF_LH(3, "(%08x): tape_generic_pm_suspend(%p)\n", device->cdev_id, device); if (device->medium_state != MS_UNLOADED) { pr_err("A cartridge is loaded in tape device %s, " "refusing to suspend\n", dev_name(&cdev->dev)); return -EBUSY; } spin_lock_irq(get_ccwdev_lock(device->cdev)); switch (device->tape_state) { case TS_INIT: case TS_NOT_OPER: case TS_UNUSED: spin_unlock_irq(get_ccwdev_lock(device->cdev)); break; default: pr_err("Tape device %s is busy, refusing to " "suspend\n", dev_name(&cdev->dev)); spin_unlock_irq(get_ccwdev_lock(device->cdev)); return -EBUSY; } DBF_LH(3, "(%08x): Drive suspended.\n", device->cdev_id); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
frank munzertfrank munzert15497.47%150.00%
martin schwidefskymartin schwidefsky42.53%150.00%
Total158100.00%2100.00%

/* * Set device offline. * * Called by the common I/O layer if the drive should set offline on user * request. We may prevent this by returning an error. * Manual offline is only allowed while the drive is not in use. */
int tape_generic_offline(struct ccw_device *cdev) { struct tape_device *device; device = dev_get_drvdata(&cdev->dev); if (!device) { return -ENODEV; } DBF_LH(3, "(%08x): tape_generic_offline(%p)\n", device->cdev_id, device); spin_lock_irq(get_ccwdev_lock(device->cdev)); switch (device->tape_state) { case TS_INIT: case TS_NOT_OPER: spin_unlock_irq(get_ccwdev_lock(device->cdev)); break; case TS_UNUSED: tape_state_set(device, TS_INIT); spin_unlock_irq(get_ccwdev_lock(device->cdev)); tape_cleanup_device(device); break; default: DBF_EVENT(3, "(%08x): Set offline failed " "- drive in use.\n", device->cdev_id); spin_unlock_irq(get_ccwdev_lock(device->cdev)); return -EBUSY; } DBF_LH(3, "(%08x): Drive set offline.\n", device->cdev_id); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton9663.58%350.00%
martin schwidefskymartin schwidefsky3825.17%116.67%
frank munzertfrank munzert138.61%116.67%
greg kroah-hartmangreg kroah-hartman42.65%116.67%
Total151100.00%6100.00%

/* * Allocate memory for a new device structure. */
static struct tape_device * tape_alloc_device(void) { struct tape_device *device; device = kzalloc(sizeof(struct tape_device), GFP_KERNEL); if (device == NULL) { DBF_EXCEPTION(2, "ti:no mem\n"); return ERR_PTR(-ENOMEM); } device->modeset_byte = kmalloc(1, GFP_KERNEL | GFP_DMA); if (device->modeset_byte == NULL) { DBF_EXCEPTION(2, "ti:no mem\n"); kfree(device); return ERR_PTR(-ENOMEM); } mutex_init(&device->mutex); INIT_LIST_HEAD(&device->req_queue); INIT_LIST_HEAD(&device->node); init_waitqueue_head(&device->state_change_wq); init_waitqueue_head(&device->wait_queue); device->tape_state = TS_INIT; device->medium_state = MS_UNKNOWN; *device->modeset_byte = 0; device->first_minor = -1; atomic_set(&device->ref_count, 1); INIT_DELAYED_WORK(&device->tape_dnr, tape_delayed_next_request); return device; }

Contributors

PersonTokensPropCommitsCommitProp
martin schwidefskymartin schwidefsky15485.08%457.14%
andrew mortonandrew morton179.39%114.29%
stefan baderstefan bader94.97%114.29%
eric sesterhenneric sesterhenn10.55%114.29%
Total181100.00%7100.00%

/* * Get a reference to an existing device structure. This will automatically * increment the reference count. */
struct tape_device * tape_get_device(struct tape_device *device) { int count; count = atomic_inc_return(&device->ref_count); DBF_EVENT(4, "tape_get_device(%p) = %i\n", device, count); return device; }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton2358.97%150.00%
martin schwidefskymartin schwidefsky1641.03%150.00%
Total39100.00%2100.00%

/* * Decrease the reference counter of a devices structure. If the * reference counter reaches zero free the device structure. * The function returns a NULL pointer to be used by the caller * for clearing reference pointers. */
void tape_put_device(struct tape_device *device) { int count; count = atomic_dec_return(&device->ref_count); DBF_EVENT(4, "tape_put_device(%p) -> %i\n", device, count); BUG_ON(count < 0); if (count == 0) { kfree(device->modeset_byte); kfree(device); } }

Contributors

PersonTokensPropCommitsCommitProp
martin schwidefskymartin schwidefsky3354.10%266.67%
andrew mortonandrew morton2845.90%133.33%
Total61100.00%3100.00%

/* * Find tape device by a device index. */
struct tape_device * tape_find_device(int devindex) { struct tape_device *device, *tmp; device = ERR_PTR(-ENODEV); read_lock(&tape_device_lock); list_for_each_entry(tmp, &tape_device_list, node) { if (tmp->first_minor / TAPE_MINORS_PER_DEV == devindex) { device = tape_get_device(tmp); break; } } read_unlock(&tape_device_lock); return device; }

Contributors

PersonTokensPropCommitsCommitProp
martin schwidefskymartin schwidefsky6591.55%250.00%
andrew mortonandrew morton68.45%250.00%
Total71100.00%4100.00%

/* * Driverfs tape probe function. */
int tape_generic_probe(struct ccw_device *cdev) { struct tape_device *device; int ret; struct ccw_dev_id dev_id; device = tape_alloc_device(); if (IS_ERR(device)) return -ENODEV; ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH); ret = sysfs_create_group(&cdev->dev.kobj, &tape_attr_group); if (ret) { tape_put_device(device); return ret; } dev_set_drvdata(&cdev->dev, device); cdev->handler = __tape_do_irq; device->cdev = cdev; ccw_device_get_id(cdev, &dev_id); device->cdev_id = devid_to_int(&dev_id); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton5243.70%120.00%
heiko carstensheiko carstens4537.82%120.00%
cornelia huckcornelia huck1512.61%120.00%
greg kroah-hartmangreg kroah-hartman54.20%120.00%
peter oberparleiterpeter oberparleiter21.68%120.00%
Total119100.00%5100.00%


static void __tape_discard_requests(struct tape_device *device) { struct tape_request * request; struct list_head * l, *n; list_for_each_safe(l, n, &device->req_queue) { request = list_entry(l, struct tape_request, list); if (request->status == TAPE_REQUEST_IN_IO) request->status = TAPE_REQUEST_DONE; list_del(&request->list); /* Decrease ref_count for removed request. */ request->device = NULL; tape_put_device(device); request->rc = -EIO; if (request->callback != NULL) request->callback(request, request->callback_data); } }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton10498.11%266.67%
martin schwidefskymartin schwidefsky21.89%133.33%
Total106100.00%3100.00%

/* * Driverfs tape remove function. * * This function is called whenever the common I/O layer detects the device * gone. This can happen at any time and we cannot refuse. */
void tape_generic_remove(struct ccw_device *cdev) { struct tape_device * device; device = dev_get_drvdata(&cdev->dev); if (!device) { return; } DBF_LH(3, "(%08x): tape_generic_remove(%p)\n", device->cdev_id, cdev); spin_lock_irq(get_ccwdev_lock(device->cdev)); switch (device->tape_state) { case TS_INIT: tape_state_set(device, TS_NOT_OPER); case TS_NOT_OPER: /* * Nothing to do. */ spin_unlock_irq(get_ccwdev_lock(device->cdev)); break; case TS_UNUSED: /* * Need only to release the device. */ tape_state_set(device, TS_NOT_OPER); spin_unlock_irq(get_ccwdev_lock(device->cdev)); tape_cleanup_device(device); break; default: /* * There may be requests on the queue. We will not get * an interrupt for a request that was running. So we * just post them all as I/O errors. */ DBF_EVENT(3, "(%08x): Drive in use vanished!\n", device->cdev_id); pr_warn("%s: A tape unit was detached while in use\n", dev_name(&device->cdev->dev)); tape_state_set(device, TS_NOT_OPER); __tape_discard_requests(device); spin_unlock_irq(get_ccwdev_lock(device->cdev)); tape_cleanup_device(device); } device = dev_get_drvdata(&cdev->dev); if (device) { sysfs_remove_group(&cdev->dev.kobj, &tape_attr_group); dev_set_drvdata(&cdev->dev, NULL); tape_put_device(device); } }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton15069.77%330.00%
martin schwidefskymartin schwidefsky4520.93%330.00%
greg kroah-hartmangreg kroah-hartman125.58%110.00%
michael holzheumichael holzheu41.86%110.00%
joe perchesjoe perches20.93%110.00%
kay sieverskay sievers20.93%110.00%
Total215100.00%10100.00%

/* * Allocate a new tape ccw request */
struct tape_request * tape_alloc_request(int cplength, int datasize) { struct tape_request *request; BUG_ON(datasize > PAGE_SIZE || (cplength*sizeof(struct ccw1)) > PAGE_SIZE); DBF_LH(6, "tape_alloc_request(%d, %d)\n", cplength, datasize); request = kzalloc(sizeof(struct tape_request), GFP_KERNEL); if (request == NULL) { DBF_EXCEPTION(1, "cqra nomem\n"); return ERR_PTR(-ENOMEM); } /* allocate channel program */ if (cplength > 0) { request->cpaddr = kcalloc(cplength, sizeof(struct ccw1), GFP_ATOMIC | GFP_DMA); if (request->cpaddr == NULL) { DBF_EXCEPTION(1, "cqra nomem\n"); kfree(request); return ERR_PTR(-ENOMEM); } } /* alloc small kernel buffer */ if (datasize > 0) { request->cpdata = kzalloc(datasize, GFP_KERNEL | GFP_DMA); if (request->cpdata == NULL) { DBF_EXCEPTION(1, "cqra nomem\n"); kfree(request->cpaddr); kfree(request); return ERR_PTR(-ENOMEM); } } DBF_LH(6, "New request %p(%p/%p)\n", request, request->cpaddr, request->cpdata); return request; }

Contributors

PersonTokensPropCommitsCommitProp
martin schwidefskymartin schwidefsky18082.57%125.00%
andrew mortonandrew morton3114.22%125.00%
eric sesterhenneric sesterhenn41.83%125.00%
stoyan gaydarovstoyan gaydarov31.38%125.00%
Total218100.00%4100.00%

/* * Free tape ccw request */
void tape_free_request (struct tape_request * request) { DBF_LH(6, "Free request %p\n", request); if (request->device) tape_put_device(request->device); kfree(request->cpdata); kfree(request->cpaddr); kfree(request); }

Contributors

PersonTokensPropCommitsCommitProp
martin schwidefskymartin schwidefsky3262.75%150.00%
andrew mortonandrew morton1937.25%150.00%
Total51100.00%2100.00%


static int __tape_start_io(struct tape_device *device, struct tape_request *request) { int rc; rc = ccw_device_start( device->cdev, request->cpaddr, (unsigned long) request, 0x00, request->options ); if (rc == 0) { request->status = TAPE_REQUEST_IN_IO; } else if (rc == -EBUSY) { /* The common I/O subsystem is currently busy. Retry later. */ request->status = TAPE_REQUEST_QUEUED; schedule_delayed_work(&device->tape_dnr, 0); rc = 0; } else { /* Start failed. Remove request and indicate failure. */ DBF_EVENT(1, "tape: start request failed with RC = %i\n", rc); } return rc; }

Contributors

PersonTokensPropCommitsCommitProp
andrew mortonandrew morton6662.86%133.33%
stefan baderstefan bader3634.29%133.33%
martin schwidefskymartin schwidefsky32.86%133.33%
Total105100.00%3100.00%


static void __tape_start_next_request(struct tape_device *device) { struct list_head *l, *n; struct tape_request *request; int rc; DBF_LH(6, "__tape_start_next_request(%p)\n", device); /* * Try to start each request on request queue until one is * started successful. */ list_for_each_safe(l, n, &device->req_queue) { request = list_entry(l, struct tape_request, list); /* * Avoid race condition if bottom-half was triggered more than * once. */ if (request->status == TAPE_REQUEST_IN_IO) return; /* * Request has already been stopped. We have to wait until * the request is removed from the queue in the interrupt * handling. */ if (request->status == TAPE_REQUEST_DONE) return; /* * We wanted to cancel the request but the common I/O layer * was busy at that time. This can only happen if this * function is called by delayed_next_request. * Otherwise we start the next request on the queue. */ if (request->status == TAPE_REQUEST_CANCEL) { rc = __tape_cancel_io(device, request); } else { rc = __tape_start_io(device, request); } if (rc == 0) return; /* Set ending status. */ request->rc = rc; request->status = TAPE_REQUEST_DONE; /* Remove from request queue. */ list_del(&request->list); /* Do callback. */ if (request->callback != NULL) request->callback(request, request->callback_data); } }

Contributors

PersonTokensPropCommitsCommitProp
stefan baderstefan bader11773.12%125.00%
andrew mortonandrew morton2918.12%125.00%
michael holzheumichael holzheu106.25%125.00%
martin schwidefskymartin schwidefsky42.50%125.00%
Total160100.00%4100.00%


static void tape_delayed_next_request(struct work_struct *work) { struct tape_device *device = container_of(work, struct tape_device, tape_dnr.work); DBF_LH(6, "tape_delayed_next_request(%p)\n", device); spin_lock_irq(get_ccwdev_lock(device->cdev)); __tape_start_next_request(device); spin_unlock_irq(get_ccwdev_lock(device->cdev)); }

Contributors

PersonTokensPropCommitsCommitProp
stefan baderstefan bader4877.42%150.00%
martin schwidefskymartin schwidefsky1422.58%150.00%
Total62100.00%2100.00%


static void tape_long_busy_timeout(unsigned long data) { struct tape_request *request; struct tape_device *device; device = (struct tape_device *) data; spin_lock_irq(get_ccwdev_lock(device->cdev)); request = list_entry(device->req_queue.next, struct tape_request, list); BUG_ON(request->status != TAPE_REQUEST_LONG_BUSY); DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id); __tape_start_next_request(device); device->lb_timeout.data = 0UL; tape_put_device(device); spin_unlock_irq(get_ccwdev_lock(device->cdev)); }

Contributors

PersonTokensPropCommitsCommitProp
michael holzheumichael holzheu9895.15%133.33%
stoyan gaydarovstoyan gaydarov32.91%133.33%
martin schwidefskymartin schwidefsky21.94%133.33%
Total103100.00%3100.00%


static void __tape_end_request( struct tape_device * device, struct tape_request * request, int rc) { DBF_LH(6, "__tape_end_request(%p, %p, %i)\n", device, request, rc); if (request) { request->rc = rc; request->status = TAPE_REQUEST_DONE; /* Remove from request queue. */ list_del(&request->list); /* Do callback. */ if (request->callback != NULL) request->callback(request, request->callback_data); } /* Start next request. */ if (!list_empty(&device->req_queue)) __tape_start_next_request(device); }

Contributors

PersonTokensPropCommitsCommitProp
stefan baderstefan bader96100.00%1100.00%
Total96100.00%1100.00%

/* * Write sense data to dbf */
void tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request, struct irb *irb) { unsigned int *sptr; const char* op; if (request != NULL) op = tape_op_verbose[request->op]; else op = "---"; DBF_EVENT(3, "DSTAT : %02x CSTAT: %02x\n", irb->scsw.cmd.dstat, irb->scsw.cmd.cstat); DBF_EVENT(3, "DEVICE: %08x OP\t: %s\n", device->cdev_id, op); sptr = (unsigned int *) irb->ecw; DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]); DBF_EVENT(3, "%08x %08x\n", sptr[2], sptr[3]); DBF_EVENT(3, "%08x %08x\n", sptr[4], sptr[5]); DBF_EVENT(3, "%08x %08x\n", sptr[6], sptr[7]); }

Contributors

PersonTokensPropCommitsCommitProp
martin schwidefskymartin schwidefsky15996.36%133.33%
peter oberparleiterpeter oberparleiter42.42%133.33%
andrew mortonandrew morton21.21%133.33%
Total165100.00%3100.00%

/* * I/O helper function. Adds the request to the request queue * and starts it if the tape is idle. Has to be called with * the device lock held. */
static int __tape_start_request(struct tape_device *device, struct tape_request *request) { int rc; switch (request->op) { case TO_MSEN: case TO_ASSIGN: case TO_UNASSIGN: case TO_READ_ATTMSG: case TO_RDC: if (device->tape_state == TS_INIT) break; if (device->tape_state == TS_UNUSED) break; default: if (device->tape_state == TS_BLKUSE) break; if (device->tape_state != TS_IN_USE) return -ENODEV; } /* Increase use count of device for the added request. */ request->device = tape_get_device(device); if (list_empty(&device->req_queue)) { /* No other requests are on the queue. Start this one. */ rc = __tape_start_io(device, request); if (rc) return rc; DBF_LH(5, "Request %p added for execution.\n", request); list_add(&request->list, &device->req_queue); } else { DBF_LH(5, "Request %p add to queue.\n", request); request->status = TAPE_REQUEST_QUEUED; list_add_tail(&request->list, &device->req_queue); } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
martin schwidefskymartin schwidefsky9855.37%240.00%
andrew mortonandrew morton6838.42%120.00%
stefan baderstefan bader84.52%120.00%
michael holzheumichael holzheu31.69%120.00%
Total177100.00%5100.00%

/* * Add the request to the request queue, try to start it if the * tape is idle. Return without waiting for end of i/o. */
int tape_do_io_async(struct tape_device *device, struct tape_request *request) { int rc; DBF_LH(6, "tape_do_io_async(%p, %p)\n", device, request); spin_lock_irq(get_ccwdev_lock(device->cdev)); /* Add request to request queue and try to start it. */ rc = __tape_start_request(device, request); spin_unlock_irq(get_ccwdev_lock(device->cdev)); return rc; }

Contributors

PersonTokensPropCommitsCommitProp
martin schwidefskymartin schwidefsky5080.65%133.33%
andrew mortonandrew morton1117.74%133.33%
stefan baderstefan bader11.61%133.33%
Total62100.00%3100.00%

/* * tape_do_io/__tape_wake_up * Add the request to the request queue, try to start it if the * tape is idle and wait uninterruptible for its completion. */
static void __tape_wake_up(struct tape_request *request, void *data) { request->callback = NULL; wake_up((wait_queue_head_t *) data); }

Contributors

PersonTokensPropCommitsCommitProp
martin schwidefskymartin schwidefsky30100.00%1100.00%
Total30100.00%1100.00%


int tape_do_io(struct tape_device *device, struct tape_request *request) { int rc; spin_lock_irq(get_ccwdev_lock(device->cdev)); /* Setup callback */ request->callback = __tape_wake_up; request->callback_data = &device->wait_queue; /* Add request to request queue and try to start it. */ rc = __tape_start_request(device, request); spin_unlock_irq(get_ccwdev_lock(device->cdev)); if (rc) return rc; /* Request added to the queue. Wait for its completion. */ wait_event(device->wait_queue, (request->callback == NULL)); /* Get rc from request */ return request->rc; }

Contributors

PersonTokensPropCommitsCommitProp
martin schwidefskymartin schwidefsky9298.92%266.67%
stefan baderstefan bader11.08%133.33%
Total93100.00%3100.00%

/* * tape_do_io_interruptible/__tape_wake_up_interruptible * Add the request to the request queue, try to start it if the * tape is idle and wait uninterruptible for its completion. */
static void __tape_wake_up_interruptible(struct tape_request *request, void *data) { request->callback = NULL; wake_up_interruptible((wait_queue_head_t *) data); }

Contributors

PersonTokensPropCommitsCommitProp
martin schwidefskymartin schwidefsky30100.00%1100.00%
Total30100.00%1100.00%


int tape_do_io_interruptible(struct tape_device *device, struct tape_request *request) { int rc; spin_lock_irq(get_ccwdev_lock(device->cdev)); /* Setup callback */ request->callback = __tape_wake_up_interruptible; request->callback_data = &device->wait_queue; rc = __tape_start_request(device, request); spin_unlock_irq(get_ccwdev_lock(device->cdev)); if (rc) return rc; /* Request added to the queue. Wait for its completion. */ rc = wait_event_interruptible(device->wait_queue, (request->callback == NULL)); if (rc != -ERESTARTSYS) /* Request finished normally. */ return request->rc; /* Interrupted by a signal. We have to stop the current request. */ spin_lock_irq(get_ccwdev_lock(device->cdev)); rc = __tape_cancel_io(device, request); spin_unlock_irq(get_ccwdev_lock(device->cdev)); if (rc == 0) { /* Wait for the interrupt that acknowledges the halt. */ do { rc = wait_event_interruptible( device->wait_queue, (request->callback == NULL) ); } while (rc == -ERESTARTSYS); DBF_EVENT(3, "IO stopped on %08x\n", device->cdev_id); rc = -ERESTARTSYS; } return rc; }

Contributors

PersonTokensPropCommitsCommitProp
martin schwidefskymartin schwidefsky12566.84%233.33%
stefan baderstefan bader3719.79%116.67%
andrew mortonandrew morton2412.83%233.33%
michael holzheumichael holzheu10.53%116.67%
Total187100.00%6100.00%

/* * Stop running ccw. */
int tape_cancel_io(struct tape_device *device, struct tape_request *request) { int rc; spin_lock_irq(get_ccwdev_lock(device->cdev)); rc = __tape_cancel_io(device, request); spin_unlock_irq(get_ccwdev_lock(device->cdev)); return rc; }

Contributors

PersonTokensPropCommitsCommitProp
michael holzheumichael holzheu50100.00%1100.00%
Total50100.00%1100.00%

/* * Tape interrupt routine, called from the ccw_device layer */
static void __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) { struct tape_device *device; struct tape_request *request; int rc; device = dev_get_drvdata(&cdev->dev); if (device == NULL) { return; } request = (struct tape_request *) intparm; DBF_LH(6, "__tape_do_irq(device=%p, request=%p)\n", device, request); /* On special conditions irb is an error pointer */ if (IS_ERR(irb)) { /* FIXME: What to do with the request? */ switch (PTR_ERR(irb)) { case -ETIMEDOUT: DBF_LH(1, "(%08x): Request timed out\n", device->cdev_id); case -EIO: __tape_end_request(device, request, -EIO); break; default: DBF_LH(1, "(%08x): Unexpected i/o error %li\n", device->cdev_id, PTR_ERR(irb)); } return; } /* * If the condition code is not zero and the start function bit is * still set, this is an deferred error and the last start I/O did * not succeed. At this point the condition that caused the deferred * error might still apply. So we just schedule the request to be * started later. */ if (irb->scsw.cmd.cc != 0 && (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) && (request->status == TAPE_REQUEST_IN_IO)) { DBF_EVENT(3,"(%08x): deferred cc=%i, fctl=%i. restarting\n", device->cdev_id, irb->scsw.cmd.cc, irb->scsw.cmd.fctl); request->status = TAPE_REQUEST_QUEUED; schedule_delayed_work(&device->tape_dnr, HZ); return; } /* May be an unsolicited irq */ if(request != NULL) request->rescnt = irb->scsw.cmd.count; else if ((irb->scsw.cmd.dstat == 0x85 || irb->scsw.cmd.dstat == 0x80) && !list_empty(&device->req_queue)) { /* Not Ready to Ready after long busy ? */ struct tape_request *req; req = list_entry(device->req_queue.next, struct tape_request, list); if (req->status == TAPE_REQUEST_LONG_BUSY) { DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id); if (del_timer(&device->lb_timeout)) { device->lb_timeout.data = 0UL; tape_put_device(device); __tape_start_next_request(device); } return; } } if (irb->scsw.cmd.dstat != 0x0c) { /* Set the 'ONLINE' flag depending on sense byte 1 */ if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE) device->tape_generic_status |= GMT_ONLINE(~0); else device->tape_generic_status &= ~GMT_ONLINE(~0); /* * Any request that does not come back with channel end * and device end is unusual. Log the sense data. */ DBF_EVENT(3,"-- Tape Interrupthandler --\n"); tape_dump_sense_dbf(device, request, irb); } else { /* Upon normal completion the device _is_ online */ device->tape_generic_status |= GMT_ONLINE(~0); } if (device->tape_state == TS_NOT_OPER) { DBF_EVENT(6, "tape:device is not operational\n"); return; } /* * Request that were canceled still come back with an interrupt. * To detect these request the state will be set to TAPE_REQUEST_DONE. */ if(request != NULL && request->status == TAPE_REQUEST_DONE) { __tape_end_request(device, request, -EIO); return; } rc = device->discipline->irq(device, request, irb); /* * rc < 0 : request finished unsuccessfully. * rc == TAPE_IO_SUCCESS: request finished successfully. * rc == TAPE_IO_PENDING: request is still running. Ignore rc. * rc == TAPE_IO_RETRY: request finished but needs another go. * rc == TAPE_IO_STOP: request needs to get terminated. */ switch (rc) { case TAPE_IO_SUCCESS: /* Upon normal completion the device _is_ online */ device->tape_generic_status |= GMT_ONLINE(~0); __tape_end_request(device, request, rc); break; case TAPE_IO_PENDING: break; case TAPE_IO_LONG_BUSY: device->lb_timeout.data = (unsigned long) tape_get_device(device); device->lb_timeout.expires = jiffies + LONG_BUSY_TIMEOUT * HZ; DBF_EVENT(3, "(%08x): add timer\n", device->cdev_id); add_timer(&device->lb_timeout); request->status = TAPE_REQUEST_LONG_BUSY; break; case TAPE_IO_RETRY: rc = __tape_start_io(device, request); if (rc) __tape_end_request(device, request, rc); break; case TAPE_IO_STOP: rc = __tape_cancel_io(device, request); if (rc) __tape_end_request(device, request, rc); break; default: if (rc > 0) { DBF_EVENT(6, "xunknownrc\n"); __tape_end_request(device, request, -EIO); } else { __tape_end_request(device, request, rc); } break; } }

Contributors

PersonTokensPropCommitsCommitProp
martin schwidefskymartin schwidefsky26338.91%216.67%
michael holzheumichael holzheu18226.92%216.67%
andrew mortonandrew morton10315.24%216.67%
stefan baderstefan bader9614.20%216.67%
peter oberparleiterpeter oberparleiter162.37%18.33%
sebastian ottsebastian ott60.89%18.33%
carsten ottecarsten otte60.89%18.33%
greg kroah-hartmangreg kroah-hartman40.59%18.33%
Total676100.00%12100.00%

/* * Tape device open function used by tape_char frontend. */
int tape_open(struct tape_device *device) { int rc; spin_lock_irq(get_ccwdev_lock(device->cdev)); if (device->tape_state == TS_NOT_OPER) { DBF_EVENT(6, "TAPE:nodev\n"); rc = -ENODEV; } else if (device->tape_state == TS_IN_USE) { DBF_EVENT(6, "TAPE:dbusy\n"); rc = -EBUSY; } else if (device->tape_state == TS_BLKUSE) { DBF_EVENT(6, "TAPE:dbusy\n"); rc = -EBUSY; } else if (device->discipline != NULL && !try_module_get(device->discipline->owner)) { DBF_EVENT(6, "TAPE:nodisc\n"); rc = -ENODEV; } else { tape_state_set(device, TS_IN_USE); rc = 0; } spin_unlock_irq(get_ccwdev_lock(device->cdev)); return rc; }

Contributors

PersonTokensPropCommitsCommitProp
martin schwidefskymartin schwidefsky12582.78%125.00%
andrew mortonandrew morton2315.23%125.00%
frank munzertfrank munzert21.32%125.00%
christoph hellwigchristoph hellwig10.66%125.00%
Total151100.00%4100.00%

/* * Tape device release function used by tape_char frontend. */
int tape_release(struct tape_device *device) { spin_lock_irq(get_ccwdev_lock(device->cdev)); if (device->tape_state == TS_IN_USE) tape_state_set(device, TS_UNUSED); module_put(device->discipline->owner); spin_unlock_irq(get_ccwdev_lock(device->cdev)); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
martin schwidefskymartin schwidefsky5494.74%133.33%
frank munzertfrank munzert23.51%133.33%
christoph hellwigchristoph hellwig11.75%133.33%
Total57100.00%3100.00%

/* * Execute a magnetic tape command a number of times. */
int tape_mtop(struct tape_device *device, int mt_op, int mt_count) { tape_mtop_fn fn; int rc; DBF_EVENT(6, "TAPE:mtio\n"); DBF_EVENT(6, "TAPE:ioop: %x\n", mt_op); DBF_EVENT(6, "TAPE:arg: %x\n", mt_count); if (mt_op < 0 || mt_op >= TAPE_NR_MTOPS) return -EINVAL; fn = device->discipline->mtop_array[mt_op]; if (fn == NULL) return -EINVAL; /* We assume that the backends can handle count up to 500. */ if (mt_op == MTBSR || mt_op == MTFSR || mt_op == MTFSF || mt_op == MTBSF || mt_op == MTFSFM || mt_op == MTBSFM) { rc = 0; for (; mt_count > 500; mt_count -= 500) if ((rc = fn(device, 500)) != 0) break; if (rc == 0) rc = fn(device, mt_count); } else rc = fn(device, mt_count); return rc; }

Contributors

PersonTokensPropCommitsCommitProp
martin schwidefskymartin schwidefsky16999.41%150.00%
andrew mortonandrew morton10.59%150.00%
Total170100.00%2100.00%

/* * Tape init function. */
static int tape_init (void) { TAPE_DBF_AREA = debug_register ( "tape", 2, 2, 4*sizeof(long)); debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view); #ifdef DBF_LIKE_HELL debug_set_level(TAPE_DBF_AREA, 6); #endif DBF_EVENT(3, "tape init\n"); tape_proc_init(); tapechar_init (); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
martin schwidefskymartin schwidefsky4572.58%120.00%
andrew mortonandrew morton1524.19%240.00%
heiko carstensheiko carstens11.61%120.00%
michael holzheumichael holzheu11.61%120.00%
Total62100.00%5100.00%

/* * Tape exit function. */
static void tape_exit(void) { DBF_EVENT(6, "tape exit\n"); /* Get rid of the frontends */ tapechar_exit(); tape_proc_cleanup(); debug_unregister (TAPE_DBF_AREA); }

Contributors

PersonTokensPropCommitsCommitProp
martin schwidefskymartin schwidefsky2696.30%150.00%
andrew mortonandrew morton13.70%150.00%
Total27100.00%2100.00%

MODULE_AUTHOR("(C) 2001 IBM Deutschland Entwicklung GmbH by Carsten Otte and " "Michael Holzheu (cotte@de.ibm.com,holzheu@de.ibm.com)"); MODULE_DESCRIPTION("Linux on zSeries channel attached tape device driver"); MODULE_LICENSE("GPL"); module_init(tape_init); module_exit(tape_exit); EXPORT_SYMBOL(tape_generic_remove); EXPORT_SYMBOL(tape_generic_probe); EXPORT_SYMBOL(tape_generic_online); EXPORT_SYMBOL(tape_generic_offline); EXPORT_SYMBOL(tape_generic_pm_suspend); EXPORT_SYMBOL(tape_put_device); EXPORT_SYMBOL(tape_get_device); EXPORT_SYMBOL(tape_state_verbose); EXPORT_SYMBOL(tape_op_verbose); EXPORT_SYMBOL(tape_state_set); EXPORT_SYMBOL(tape_med_state_set); EXPORT_SYMBOL(tape_alloc_request); EXPORT_SYMBOL(tape_free_request); EXPORT_SYMBOL(tape_dump_sense_dbf); EXPORT_SYMBOL(tape_do_io); EXPORT_SYMBOL(tape_do_io_async); EXPORT_SYMBOL(tape_do_io_interruptible); EXPORT_SYMBOL(tape_cancel_io); EXPORT_SYMBOL(tape_mtop);

Overall Contributors

PersonTokensPropCommitsCommitProp
martin schwidefskymartin schwidefsky296051.03%917.65%
andrew mortonandrew morton150025.86%59.80%
stefan baderstefan bader4808.27%23.92%
michael holzheumichael holzheu4337.46%815.69%
frank munzertfrank munzert1773.05%35.88%
heiko carstensheiko carstens821.41%59.80%
greg kroah-hartmangreg kroah-hartman400.69%11.96%
cornelia huckcornelia huck270.47%11.96%
peter oberparleiterpeter oberparleiter250.43%35.88%
yani ioannouyani ioannou250.43%11.96%
carsten ottecarsten otte100.17%11.96%
roel kluinroel kluin70.12%11.96%
stoyan gaydarovstoyan gaydarov60.10%11.96%
sebastian ottsebastian ott60.10%11.96%
eric sesterhenneric sesterhenn50.09%11.96%
kay sieverskay sievers40.07%11.96%
thomas gleixnerthomas gleixner40.07%11.96%
tejun heotejun heo30.05%11.96%
cheng renquancheng renquan20.03%11.96%
christoph hellwigchristoph hellwig20.03%23.92%
joe perchesjoe perches20.03%11.96%
al viroal viro10.02%11.96%
Total5801100.00%51100.00%
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
{% endraw %}