cregit-Linux how code gets into the kernel

Release 4.7 drivers/net/irda/sir_dev.c

Directory: drivers/net/irda
/*********************************************************************
 *
 *      sir_dev.c:      irda sir network device
 * 
 *      Copyright (c) 2002 Martin Diehl
 * 
 *      This program is free software; you can redistribute it and/or 
 *      modify it under the terms of the GNU General Public License as 
 *      published by the Free Software Foundation; either version 2 of 
 *      the License, or (at your option) any later version.
 *
 ********************************************************************/    

#include <linux/hardirq.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/delay.h>

#include <net/irda/irda.h>
#include <net/irda/wrapper.h>
#include <net/irda/irda_device.h>

#include "sir-dev.h"



static struct workqueue_struct *irda_sir_wq;

/* STATE MACHINE */

/* substate handler of the config-fsm to handle the cases where we want
 * to wait for transmit completion before changing the port configuration
 */


static int sirdev_tx_complete_fsm(struct sir_dev *dev) { struct sir_fsm *fsm = &dev->fsm; unsigned next_state, delay; unsigned bytes_left; do { next_state = fsm->substate; /* default: stay in current substate */ delay = 0; switch(fsm->substate) { case SIRDEV_STATE_WAIT_XMIT: if (dev->drv->chars_in_buffer) bytes_left = dev->drv->chars_in_buffer(dev); else bytes_left = 0; if (!bytes_left) { next_state = SIRDEV_STATE_WAIT_UNTIL_SENT; break; } if (dev->speed > 115200) delay = (bytes_left*8*10000) / (dev->speed/100); else if (dev->speed > 0) delay = (bytes_left*10*10000) / (dev->speed/100); else delay = 0; /* expected delay (usec) until remaining bytes are sent */ if (delay < 100) { udelay(delay); delay = 0; break; } /* sleep some longer delay (msec) */ delay = (delay+999) / 1000; break; case SIRDEV_STATE_WAIT_UNTIL_SENT: /* block until underlaying hardware buffer are empty */ if (dev->drv->wait_until_sent) dev->drv->wait_until_sent(dev); next_state = SIRDEV_STATE_TX_DONE; break; case SIRDEV_STATE_TX_DONE: return 0; default: net_err_ratelimited("%s - undefined state\n", __func__); return -EINVAL; } fsm->substate = next_state; } while (delay == 0); return delay; }

Contributors

PersonTokensPropCommitsCommitProp
christoph hellwigchristoph hellwig23799.16%133.33%
harvey harrisonharvey harrison10.42%133.33%
joe perchesjoe perches10.42%133.33%
Total239100.00%3100.00%

/* * Function sirdev_config_fsm * * State machine to handle the configuration of the device (and attached dongle, if any). * This handler is scheduled for execution in kIrDAd context, so we can sleep. * however, kIrDAd is shared by all sir_dev devices so we better don't sleep there too * long. Instead, for longer delays we start a timer to reschedule us later. * On entry, fsm->sem is always locked and the netdev xmit queue stopped. * Both must be unlocked/restarted on completion - but only on final exit. */
static void sirdev_config_fsm(struct work_struct *work) { struct sir_dev *dev = container_of(work, struct sir_dev, fsm.work.work); struct sir_fsm *fsm = &dev->fsm; int next_state; int ret = -1; unsigned delay; pr_debug("%s(), <%ld>\n", __func__, jiffies); do { pr_debug("%s - state=0x%04x / substate=0x%04x\n", __func__, fsm->state, fsm->substate); next_state = fsm->state; delay = 0; switch(fsm->state) { case SIRDEV_STATE_DONGLE_OPEN: if (dev->dongle_drv != NULL) { ret = sirdev_put_dongle(dev); if (ret) { fsm->result = -EINVAL; next_state = SIRDEV_STATE_ERROR; break; } } /* Initialize dongle */ ret = sirdev_get_dongle(dev, fsm->param); if (ret) { fsm->result = ret; next_state = SIRDEV_STATE_ERROR; break; } /* Dongles are powered through the modem control lines which * were just set during open. Before resetting, let's wait for * the power to stabilize. This is what some dongle drivers did * in open before, while others didn't - should be safe anyway. */ delay = 50; fsm->substate = SIRDEV_STATE_DONGLE_RESET; next_state = SIRDEV_STATE_DONGLE_RESET; fsm->param = 9600; break; case SIRDEV_STATE_DONGLE_CLOSE: /* shouldn't we just treat this as success=? */ if (dev->dongle_drv == NULL) { fsm->result = -EINVAL; next_state = SIRDEV_STATE_ERROR; break; } ret = sirdev_put_dongle(dev); if (ret) { fsm->result = ret; next_state = SIRDEV_STATE_ERROR; break; } next_state = SIRDEV_STATE_DONE; break; case SIRDEV_STATE_SET_DTR_RTS: ret = sirdev_set_dtr_rts(dev, (fsm->param&0x02) ? TRUE : FALSE, (fsm->param&0x01) ? TRUE : FALSE); next_state = SIRDEV_STATE_DONE; break; case SIRDEV_STATE_SET_SPEED: fsm->substate = SIRDEV_STATE_WAIT_XMIT; next_state = SIRDEV_STATE_DONGLE_CHECK; break; case SIRDEV_STATE_DONGLE_CHECK: ret = sirdev_tx_complete_fsm(dev); if (ret < 0) { fsm->result = ret; next_state = SIRDEV_STATE_ERROR; break; } if ((delay=ret) != 0) break; if (dev->dongle_drv) { fsm->substate = SIRDEV_STATE_DONGLE_RESET; next_state = SIRDEV_STATE_DONGLE_RESET; } else { dev->speed = fsm->param; next_state = SIRDEV_STATE_PORT_SPEED; } break; case SIRDEV_STATE_DONGLE_RESET: if (dev->dongle_drv->reset) { ret = dev->dongle_drv->reset(dev); if (ret < 0) { fsm->result = ret; next_state = SIRDEV_STATE_ERROR; break; } } else ret = 0; if ((delay=ret) == 0) { /* set serial port according to dongle default speed */ if (dev->drv->set_speed) dev->drv->set_speed(dev, dev->speed); fsm->substate = SIRDEV_STATE_DONGLE_SPEED; next_state = SIRDEV_STATE_DONGLE_SPEED; } break; case SIRDEV_STATE_DONGLE_SPEED: if (dev->dongle_drv->set_speed) { ret = dev->dongle_drv->set_speed(dev, fsm->param); if (ret < 0) { fsm->result = ret; next_state = SIRDEV_STATE_ERROR; break; } } else ret = 0; if ((delay=ret) == 0) next_state = SIRDEV_STATE_PORT_SPEED; break; case SIRDEV_STATE_PORT_SPEED: /* Finally we are ready to change the serial port speed */ if (dev->drv->set_speed) dev->drv->set_speed(dev, dev->speed); dev->new_speed = 0; next_state = SIRDEV_STATE_DONE; break; case SIRDEV_STATE_DONE: /* Signal network layer so it can send more frames */ netif_wake_queue(dev->netdev); next_state = SIRDEV_STATE_COMPLETE; break; default: net_err_ratelimited("%s - undefined state\n", __func__); fsm->result = -EINVAL; /* fall thru */ case SIRDEV_STATE_ERROR: net_err_ratelimited("%s - error: %d\n", __func__, fsm->result); #if 0 /* don't enable this before we have netdev->tx_timeout to recover */ netif_stop_queue(dev->netdev); #else netif_wake_queue(dev->netdev); #endif /* fall thru */ case SIRDEV_STATE_COMPLETE: /* config change finished, so we are not busy any longer */ sirdev_enable_rx(dev); up(&fsm->sem); return; } fsm->state = next_state; } while(!delay); queue_delayed_work(irda_sir_wq, &fsm->work, msecs_to_jiffies(delay)); }

Contributors

PersonTokensPropCommitsCommitProp
christoph hellwigchristoph hellwig64796.28%116.67%
david howellsdavid howells162.38%116.67%
harvey harrisonharvey harrison40.60%116.67%
joe perchesjoe perches40.60%233.33%
alexander shiyanalexander shiyan10.15%116.67%
Total672100.00%6100.00%

/* schedule some device configuration task for execution by kIrDAd * on behalf of the above state machine. * can be called from process or interrupt/tasklet context. */
int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned param) { struct sir_fsm *fsm = &dev->fsm; pr_debug("%s - state=0x%04x / param=%u\n", __func__, initial_state, param); if (down_trylock(&fsm->sem)) { if (in_interrupt() || in_atomic() || irqs_disabled()) { pr_debug("%s(), state machine busy!\n", __func__); return -EWOULDBLOCK; } else down(&fsm->sem); } if (fsm->state == SIRDEV_STATE_DEAD) { /* race with sirdev_close should never happen */ net_err_ratelimited("%s(), instance staled!\n", __func__); up(&fsm->sem); return -ESTALE; /* or better EPIPE? */ } netif_stop_queue(dev->netdev); atomic_set(&dev->enable_rx, 0); fsm->state = initial_state; fsm->param = param; fsm->result = 0; INIT_DELAYED_WORK(&fsm->work, sirdev_config_fsm); queue_delayed_work(irda_sir_wq, &fsm->work, 0); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
christoph hellwigchristoph hellwig16394.22%120.00%
david howellsdavid howells42.31%120.00%
joe perchesjoe perches31.73%240.00%
harvey harrisonharvey harrison31.73%120.00%
Total173100.00%5100.00%

/***************************************************************************/
void sirdev_enable_rx(struct sir_dev *dev) { if (unlikely(atomic_read(&dev->enable_rx))) return; /* flush rx-buffer - should also help in case of problems with echo cancelation */ dev->rx_buff.data = dev->rx_buff.head; dev->rx_buff.len = 0; dev->rx_buff.in_frame = FALSE; dev->rx_buff.state = OUTSIDE_FRAME; atomic_set(&dev->enable_rx, 1); }

Contributors

PersonTokensPropCommitsCommitProp
jean tourrilhesjean tourrilhes71100.00%2100.00%
Total71100.00%2100.00%


static int sirdev_is_receiving(struct sir_dev *dev) { if (!atomic_read(&dev->enable_rx)) return 0; return dev->rx_buff.state != OUTSIDE_FRAME; }

Contributors

PersonTokensPropCommitsCommitProp
jean tourrilhesjean tourrilhes34100.00%1100.00%
Total34100.00%1100.00%


int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type) { int err; pr_debug("%s : requesting dongle %d.\n", __func__, type); err = sirdev_schedule_dongle_open(dev, type); if (unlikely(err)) return err; down(&dev->fsm.sem); /* block until config change completed */ err = dev->fsm.result; up(&dev->fsm.sem); return err; }

Contributors

PersonTokensPropCommitsCommitProp
jean tourrilhesjean tourrilhes7497.37%133.33%
harvey harrisonharvey harrison11.32%133.33%
joe perchesjoe perches11.32%133.33%
Total76100.00%3100.00%

EXPORT_SYMBOL(sirdev_set_dongle); /* used by dongle drivers for dongle programming */
int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len) { unsigned long flags; int ret; if (unlikely(len > dev->tx_buff.truesize)) return -ENOSPC; spin_lock_irqsave(&dev->tx_lock, flags); /* serialize with other tx operations */ while (dev->tx_buff.len > 0) { /* wait until tx idle */ spin_unlock_irqrestore(&dev->tx_lock, flags); msleep(10); spin_lock_irqsave(&dev->tx_lock, flags); } dev->tx_buff.data = dev->tx_buff.head; memcpy(dev->tx_buff.data, buf, len); dev->tx_buff.len = len; ret = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len); if (ret > 0) { pr_debug("%s(), raw-tx started\n", __func__); dev->tx_buff.data += ret; dev->tx_buff.len -= ret; dev->raw_tx = 1; ret = len; /* all data is going to be sent */ } spin_unlock_irqrestore(&dev->tx_lock, flags); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
jean tourrilhesjean tourrilhes19998.51%350.00%
domen puncerdomen puncer10.50%116.67%
harvey harrisonharvey harrison10.50%116.67%
joe perchesjoe perches10.50%116.67%
Total202100.00%6100.00%

EXPORT_SYMBOL(sirdev_raw_write); /* seems some dongle drivers may need this */
int sirdev_raw_read(struct sir_dev *dev, char *buf, int len) { int count; if (atomic_read(&dev->enable_rx)) return -EIO; /* fail if we expect irda-frames */ count = (len < dev->rx_buff.len) ? len : dev->rx_buff.len; if (count > 0) { memcpy(buf, dev->rx_buff.data, count); dev->rx_buff.data += count; dev->rx_buff.len -= count; } /* remaining stuff gets flushed when re-enabling normal rx */ return count; }

Contributors

PersonTokensPropCommitsCommitProp
jean tourrilhesjean tourrilhes96100.00%2100.00%
Total96100.00%2100.00%

EXPORT_SYMBOL(sirdev_raw_read);
int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts) { int ret = -ENXIO; if (dev->drv->set_dtr_rts) ret = dev->drv->set_dtr_rts(dev, dtr, rts); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
jean tourrilhesjean tourrilhes48100.00%1100.00%
Total48100.00%1100.00%

EXPORT_SYMBOL(sirdev_set_dtr_rts); /**********************************************************************/ /* called from client driver - likely with bh-context - to indicate * it made some progress with transmission. Hence we send the next * chunk, if any, or complete the skb otherwise */
void sirdev_write_complete(struct sir_dev *dev) { unsigned long flags; struct sk_buff *skb; int actual = 0; int err; spin_lock_irqsave(&dev->tx_lock, flags); pr_debug("%s() - dev->tx_buff.len = %d\n", __func__, dev->tx_buff.len); if (likely(dev->tx_buff.len > 0)) { /* Write data left in transmit buffer */ actual = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len); if (likely(actual>0)) { dev->tx_buff.data += actual; dev->tx_buff.len -= actual; } else if (unlikely(actual<0)) { /* could be dropped later when we have tx_timeout to recover */ net_err_ratelimited("%s: drv->do_write failed (%d)\n", __func__, actual); if ((skb=dev->tx_skb) != NULL) { dev->tx_skb = NULL; dev_kfree_skb_any(skb); dev->netdev->stats.tx_errors++; dev->netdev->stats.tx_dropped++; } dev->tx_buff.len = 0; } if (dev->tx_buff.len > 0) goto done; /* more data to send later */ } if (unlikely(dev->raw_tx != 0)) { /* in raw mode we are just done now after the buffer was sent * completely. Since this was requested by some dongle driver * running under the control of the irda-thread we must take * care here not to re-enable the queue. The queue will be * restarted when the irda-thread has completed the request. */ pr_debug("%s(), raw-tx done\n", __func__); dev->raw_tx = 0; goto done; /* no post-frame handling in raw mode */ } /* we have finished now sending this skb. * update statistics and free the skb. * finally we check and trigger a pending speed change, if any. * if not we switch to rx mode and wake the queue for further * packets. * note the scheduled speed request blocks until the lower * client driver and the corresponding hardware has really * finished sending all data (xmit fifo drained f.e.) * before the speed change gets finally done and the queue * re-activated. */ pr_debug("%s(), finished with frame!\n", __func__); if ((skb=dev->tx_skb) != NULL) { dev->tx_skb = NULL; dev->netdev->stats.tx_packets++; dev->netdev->stats.tx_bytes += skb->len; dev_kfree_skb_any(skb); } if (unlikely(dev->new_speed > 0)) { pr_debug("%s(), Changing speed!\n", __func__); err = sirdev_schedule_speed(dev, dev->new_speed); if (unlikely(err)) { /* should never happen * forget the speed change and hope the stack recovers */ net_err_ratelimited("%s - schedule speed change failed: %d\n", __func__, err); netif_wake_queue(dev->netdev); } /* else: success * speed change in progress now * on completion dev->new_speed gets cleared, * rx-reenabled and the queue restarted */ } else { sirdev_enable_rx(dev); netif_wake_queue(dev->netdev); } done: spin_unlock_irqrestore(&dev->tx_lock, flags); }

Contributors

PersonTokensPropCommitsCommitProp
jean tourrilhesjean tourrilhes34391.96%337.50%
andrew mortonandrew morton102.68%112.50%
stephen hemmingerstephen hemminger82.14%112.50%
harvey harrisonharvey harrison61.61%112.50%
joe perchesjoe perches61.61%225.00%
Total373100.00%8100.00%

EXPORT_SYMBOL(sirdev_write_complete); /* called from client driver - likely with bh-context - to give us * some more received bytes. We put them into the rx-buffer, * normally unwrapping and building LAP-skb's (unless rx disabled) */
int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count) { if (!dev || !dev->netdev) { net_warn_ratelimited("%s(), not ready yet!\n", __func__); return -1; } if (!dev->irlap) { net_warn_ratelimited("%s - too early: %p / %zd!\n", __func__, cp, count); return -1; } if (cp==NULL) { /* error already at lower level receive * just update stats and set media busy */ irda_device_set_media_busy(dev->netdev, TRUE); dev->netdev->stats.rx_dropped++; pr_debug("%s; rx-drop: %zd\n", __func__, count); return 0; } /* Read the characters into the buffer */ if (likely(atomic_read(&dev->enable_rx))) { while (count--) /* Unwrap and destuff one byte */ async_unwrap_char(dev->netdev, &dev->netdev->stats, &dev->rx_buff, *cp++); } else { while (count--) { /* rx not enabled: save the raw bytes and never * trigger any netif_rx. The received bytes are flushed * later when we re-enable rx but might be read meanwhile * by the dongle driver. */ dev->rx_buff.data[dev->rx_buff.len++] = *cp++; /* What should we do when the buffer is full? */ if (unlikely(dev->rx_buff.len == dev->rx_buff.truesize)) dev->rx_buff.len = 0; } } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
jean tourrilhesjean tourrilhes19694.23%228.57%
stephen hemmingerstephen hemminger41.92%114.29%
harvey harrisonharvey harrison31.44%114.29%
joe perchesjoe perches31.44%228.57%
andrew mortonandrew morton20.96%114.29%
Total208100.00%7100.00%

EXPORT_SYMBOL(sirdev_receive); /**********************************************************************/ /* callbacks from network layer */
static netdev_tx_t sirdev_hard_xmit(struct sk_buff *skb, struct net_device *ndev) { struct sir_dev *dev = netdev_priv(ndev); unsigned long flags; int actual = 0; int err; s32 speed; IRDA_ASSERT(dev != NULL, return NETDEV_TX_OK;); netif_stop_queue(ndev); pr_debug("%s(), skb->len = %d\n", __func__, skb->len); speed = irda_get_next_speed(skb); if ((speed != dev->speed) && (speed != -1)) { if (!skb->len) { err = sirdev_schedule_speed(dev, speed); if (unlikely(err == -EWOULDBLOCK)) { /* Failed to initiate the speed change, likely the fsm * is still busy (pretty unlikely, but...) * We refuse to accept the skb and return with the queue * stopped so the network layer will retry after the * fsm completes and wakes the queue. */ return NETDEV_TX_BUSY; } else if (unlikely(err)) { /* other fatal error - forget the speed change and * hope the stack will recover somehow */ netif_start_queue(ndev); } /* else: success * speed change in progress now * on completion the queue gets restarted */ dev_kfree_skb_any(skb); return NETDEV_TX_OK; } else dev->new_speed = speed; } /* Init tx buffer*/ dev->tx_buff.data = dev->tx_buff.head; /* Check problems */ if(spin_is_locked(&dev->tx_lock)) { pr_debug("%s(), write not completed\n", __func__); } /* serialize with write completion */ spin_lock_irqsave(&dev->tx_lock, flags); /* Copy skb to tx_buff while wrapping, stuffing and making CRC */ dev->tx_buff.len = async_wrap_skb(skb, dev->tx_buff.data, dev->tx_buff.truesize); /* transmission will start now - disable receive. * if we are just in the middle of an incoming frame, * treat it as collision. probably it's a good idea to * reset the rx_buf OUTSIDE_FRAME in this case too? */ atomic_set(&dev->enable_rx, 0); if (unlikely(sirdev_is_receiving(dev))) dev->netdev->stats.collisions++; actual = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len); if (likely(actual > 0)) { dev->tx_skb = skb; dev->tx_buff.data += actual; dev->tx_buff.len -= actual; } else if (unlikely(actual < 0)) { /* could be dropped later when we have tx_timeout to recover */ net_err_ratelimited("%s: drv->do_write failed (%d)\n", __func__, actual); dev_kfree_skb_any(skb); dev->netdev->stats.tx_errors++; dev->netdev->stats.tx_dropped++; netif_wake_queue(ndev); } spin_unlock_irqrestore(&dev->tx_lock, flags); return NETDEV_TX_OK; }

Contributors

PersonTokensPropCommitsCommitProp
jean tourrilhesjean tourrilhes35394.64%325.00%
stephen hemmingerstephen hemminger71.88%216.67%
patrick mchardypatrick mchardy41.07%325.00%
wang chenwang chen30.80%18.33%
joe perchesjoe perches30.80%216.67%
harvey harrisonharvey harrison30.80%18.33%
Total373100.00%12100.00%

/* called from network layer with rtnl hold */
static int sirdev_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) { struct if_irda_req *irq = (struct if_irda_req *) rq; struct sir_dev *dev = netdev_priv(ndev); int ret = 0; IRDA_ASSERT(dev != NULL, return -1;); pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, ndev->name, cmd); switch (cmd) { case SIOCSBANDWIDTH: /* Set bandwidth */ if (!capable(CAP_NET_ADMIN)) ret = -EPERM; else ret = sirdev_schedule_speed(dev, irq->ifr_baudrate); /* cannot sleep here for completion * we are called from network layer with rtnl hold */ break; case SIOCSDONGLE: /* Set dongle */ if (!capable(CAP_NET_ADMIN)) ret = -EPERM; else ret = sirdev_schedule_dongle_open(dev, irq->ifr_dongle); /* cannot sleep here for completion * we are called from network layer with rtnl hold */ break; case SIOCSMEDIABUSY: /* Set media busy */ if (!capable(CAP_NET_ADMIN)) ret = -EPERM; else irda_device_set_media_busy(dev->netdev, TRUE); break; case SIOCGRECEIVING: /* Check if we are receiving right now */ irq->ifr_receiving = sirdev_is_receiving(dev); break; case SIOCSDTRRTS: if (!capable(CAP_NET_ADMIN)) ret = -EPERM; else ret = sirdev_schedule_dtr_rts(dev, irq->ifr_dtr, irq->ifr_rts); /* cannot sleep here for completion * we are called from network layer with rtnl hold */ break; case SIOCSMODE: #if 0 if (!capable(CAP_NET_ADMIN)) ret = -EPERM; else ret = sirdev_schedule_mode(dev, irq->ifr_mode); /* cannot sleep here for completion * we are called from network layer with rtnl hold */ break; #endif default: ret = -EOPNOTSUPP; } return ret; }

Contributors

PersonTokensPropCommitsCommitProp
jean tourrilhesjean tourrilhes22797.84%240.00%
wang chenwang chen31.29%120.00%
harvey harrisonharvey harrison10.43%120.00%
joe perchesjoe perches10.43%120.00%
Total232100.00%5100.00%

/* ----------------------------------------------------------------------------- */ #define SIRBUF_ALLOCSIZE 4269 /* worst case size of a wrapped IrLAP frame */
static int sirdev_alloc_buffers(struct sir_dev *dev) { dev->tx_buff.truesize = SIRBUF_ALLOCSIZE; dev->rx_buff.truesize = IRDA_SKB_MAX_MTU; /* Bootstrap ZeroCopy Rx */ dev->rx_buff.skb = __netdev_alloc_skb(dev->netdev, dev->rx_buff.truesize, GFP_KERNEL); if (dev->rx_buff.skb == NULL) return -ENOMEM; skb_reserve(dev->rx_buff.skb, 1); dev->rx_buff.head = dev->rx_buff.skb->data; dev->tx_buff.head = kmalloc(dev->tx_buff.truesize, GFP_KERNEL); if (dev->tx_buff.head == NULL) { kfree_skb(dev->rx_buff.skb); dev->rx_buff.skb = NULL; dev->rx_buff.head = NULL; return -ENOMEM; } dev->tx_buff.data = dev->tx_buff.head; dev->rx_buff.data = dev->rx_buff.head; dev->tx_buff.len = 0; dev->rx_buff.len = 0; dev->rx_buff.in_frame = FALSE; dev->rx_buff.state = OUTSIDE_FRAME; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
jean tourrilhesjean tourrilhes20097.56%266.67%
david s. millerdavid s. miller52.44%133.33%
Total205100.00%3100.00%

;
static void sirdev_free_buffers(struct sir_dev *dev) { kfree_skb(dev->rx_buff.skb); kfree(dev->tx_buff.head); dev->rx_buff.head = dev->tx_buff.head = NULL; dev->rx_buff.skb = NULL; }

Contributors

PersonTokensPropCommitsCommitProp
jean tourrilhesjean tourrilhes51100.00%2100.00%
Total51100.00%2100.00%


static int sirdev_open(struct net_device *ndev) { struct sir_dev *dev = netdev_priv(ndev); const struct sir_driver *drv = dev->drv; if (!drv) return -ENODEV; /* increase the reference count of the driver module before doing serious stuff */ if (!try_module_get(drv->owner)) return -ESTALE; if (sirdev_alloc_buffers(dev)) goto errout_dec; if (!dev->drv->start_dev || dev->drv->start_dev(dev)) goto errout_free; sirdev_enable_rx(dev); dev->raw_tx = 0; netif_start_queue(ndev); dev->irlap = irlap_open(ndev, &dev->qos, dev->hwname); if (!dev->irlap) goto errout_stop; netif_wake_queue(ndev); pr_debug("%s - done, speed = %d\n", __func__, dev->speed); return 0; errout_stop: atomic_set(&dev->enable_rx, 0); if (dev->drv->stop_dev) dev->drv->stop_dev(dev); errout_free: sirdev_free_buffers(dev); errout_dec: module_put(drv->owner); return -EAGAIN; }

Contributors

PersonTokensPropCommitsCommitProp
jean tourrilhesjean tourrilhes19196.46%228.57%
wang chenwang chen31.52%114.29%
christoph hellwigchristoph hellwig21.01%228.57%
joe perchesjoe perches10.51%114.29%
harvey harrisonharvey harrison10.51%114.29%
Total198100.00%7100.00%


static int sirdev_close(struct net_device *ndev) { struct sir_dev *dev = netdev_priv(ndev); const struct sir_driver *drv; /* pr_debug("%s\n", __func__); */ netif_stop_queue(ndev); down(&dev->fsm.sem); /* block on pending config completion */ atomic_set(&dev->enable_rx, 0); if (unlikely(!dev->irlap)) goto out; irlap_close(dev->irlap); dev->irlap = NULL; drv = dev->drv; if (unlikely(!drv || !dev->priv)) goto out; if (drv->stop_dev) drv->stop_dev(dev); sirdev_free_buffers(dev); module_put(drv->owner); out: dev->speed = 0; up(&dev->fsm.sem); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
jean tourrilhesjean tourrilhes14396.62%125.00%
wang chenwang chen32.03%125.00%
joe perchesjoe perches10.68%125.00%
christoph hellwigchristoph hellwig10.68%125.00%
Total148100.00%4100.00%

static const struct net_device_ops sirdev_ops = { .ndo_start_xmit = sirdev_hard_xmit, .ndo_open = sirdev_open, .ndo_stop = sirdev_close, .ndo_do_ioctl = sirdev_ioctl, }; /* ----------------------------------------------------------------------------- */
struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *name) { struct net_device *ndev; struct sir_dev *dev; pr_debug("%s - %s\n", __func__, name); /* instead of adding tests to protect against drv->do_write==NULL * at several places we refuse to create a sir_dev instance for * drivers which don't implement do_write. */ if (!drv || !drv->do_write) return NULL; /* * Allocate new instance of the device */ ndev = alloc_irdadev(sizeof(*dev)); if (ndev == NULL) { net_err_ratelimited("%s - Can't allocate memory for IrDA control block!\n", __func__); goto out; } dev = netdev_priv(ndev); irda_init_max_qos_capabilies(&dev->qos); dev->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|IR_115200; dev->qos.min_turn_time.bits = drv->qos_mtt_bits; irda_qos_bits_to_value(&dev->qos); strncpy(dev->hwname, name, sizeof(dev->hwname)-1); atomic_set(&dev->enable_rx, 0); dev->tx_skb = NULL; spin_lock_init(&dev->tx_lock); sema_init(&dev->fsm.sem, 1); dev->drv = drv; dev->netdev = ndev; /* Override the network functions we need to use */ ndev->netdev_ops = &sirdev_ops; if (register_netdev(ndev)) { net_err_ratelimited("%s(), register_netdev() failed!\n", __func__); goto out_freenetdev; } return dev; out_freenetdev: free_netdev(ndev); out: return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
jean tourrilhesjean tourrilhes21689.26%110.00%
stephen hemmingerstephen hemminger114.55%220.00%
thomas gleixnerthomas gleixner31.24%110.00%
wang chenwang chen31.24%110.00%
joe perchesjoe perches31.24%220.00%
harvey harrisonharvey harrison31.24%110.00%
martin diehlmartin diehl20.83%110.00%
michael hayesmichael hayes10.41%110.00%
Total242100.00%10100.00%

EXPORT_SYMBOL(sirdev_get_instance);
int sirdev_put_instance(struct sir_dev *dev) { int err = 0; pr_debug("%s\n", __func__); atomic_set(&dev->enable_rx, 0); netif_carrier_off(dev->netdev); netif_device_detach(dev->netdev); if (dev->dongle_drv) err = sirdev_schedule_dongle_close(dev); if (err) net_err_ratelimited("%s - error %d\n", __func__, err); sirdev_close(dev->netdev); down(&dev->fsm.sem); dev->fsm.state = SIRDEV_STATE_DEAD; /* mark staled */ dev->dongle_drv = NULL; dev->priv = NULL; up(&dev->fsm.sem); /* Remove netdevice */ unregister_netdev(dev->netdev); free_netdev(dev->netdev); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
jean tourrilhesjean tourrilhes13194.93%120.00%
stephen hemmingerstephen hemminger32.17%120.00%
harvey harrisonharvey harrison21.45%120.00%
joe perchesjoe perches21.45%240.00%
Total138100.00%5100.00%

EXPORT_SYMBOL(sirdev_put_instance);
static int __init sir_wq_init(void) { irda_sir_wq = create_singlethread_workqueue("irda_sir_wq"); if (!irda_sir_wq) return -ENOMEM; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
christoph hellwigchristoph hellwig28100.00%1100.00%
Total28100.00%1100.00%


static void __exit sir_wq_exit(void) { destroy_workqueue(irda_sir_wq); }

Contributors

PersonTokensPropCommitsCommitProp
christoph hellwigchristoph hellwig14100.00%1100.00%
Total14100.00%1100.00%

module_init(sir_wq_init); module_exit(sir_wq_exit); MODULE_AUTHOR("Martin Diehl <info@mdiehl.de>"); MODULE_DESCRIPTION("IrDA SIR core"); MODULE_LICENSE("GPL");

Overall Contributors

PersonTokensPropCommitsCommitProp
jean tourrilhesjean tourrilhes261265.76%618.75%
christoph hellwigchristoph hellwig112728.37%39.38%
stephen hemmingerstephen hemminger611.54%412.50%
adrian bunkadrian bunk401.01%13.12%
joe perchesjoe perches300.76%26.25%
harvey harrisonharvey harrison290.73%13.12%
david howellsdavid howells200.50%13.12%
wang chenwang chen150.38%13.12%
andrew mortonandrew morton120.30%26.25%
david s. millerdavid s. miller50.13%13.12%
domen puncerdomen puncer40.10%13.12%
patrick mchardypatrick mchardy40.10%39.38%
alexey dobriyanalexey dobriyan30.08%13.12%
thomas gleixnerthomas gleixner30.08%13.12%
tejun heotejun heo30.08%13.12%
martin diehlmartin diehl20.05%13.12%
alexander shiyanalexander shiyan10.03%13.12%
michael hayesmichael hayes10.03%13.12%
Total3972100.00%32100.00%
Directory: drivers/net/irda
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
{% endraw %}