Release 4.11 drivers/net/ifb.c
/* drivers/net/ifb.c:
The purpose of this driver is to provide a device that allows
for sharing of resources:
1) qdiscs/policies that are per device as opposed to system wide.
ifb allows for a device which can be redirected to thus providing
an impression of sharing.
2) Allows for queueing incoming traffic for shaping instead of
dropping.
The original concept is based on what is known as the IMQ
driver initially written by Martin Devera, later rewritten
by Patrick McHardy and then maintained by Andre Correa.
You need the tc action mirror or redirect to feed this device
packets.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version
2 of the License, or (at your option) any later version.
Authors: Jamal Hadi Salim (2005)
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
#include <net/pkt_sched.h>
#include <net/net_namespace.h>
#define TX_Q_LIMIT 32
struct ifb_q_private {
struct net_device *dev;
struct tasklet_struct ifb_tasklet;
int tasklet_pending;
int txqnum;
struct sk_buff_head rq;
u64 rx_packets;
u64 rx_bytes;
struct u64_stats_sync rsync;
struct u64_stats_sync tsync;
u64 tx_packets;
u64 tx_bytes;
struct sk_buff_head tq;
}
____cacheline_aligned_in_smp;
struct ifb_dev_private {
struct ifb_q_private *tx_private;
};
static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev);
static int ifb_open(struct net_device *dev);
static int ifb_close(struct net_device *dev);
static void ifb_ri_tasklet(unsigned long _txp)
{
struct ifb_q_private *txp = (struct ifb_q_private *)_txp;
struct netdev_queue *txq;
struct sk_buff *skb;
txq = netdev_get_tx_queue(txp->dev, txp->txqnum);
skb = skb_peek(&txp->tq);
if (!skb) {
if (!__netif_tx_trylock(txq))
goto resched;
skb_queue_splice_tail_init(&txp->rq, &txp->tq);
__netif_tx_unlock(txq);
}
while ((skb = __skb_dequeue(&txp->tq)) != NULL) {
skb->tc_redirected = 0;
skb->tc_skip_classify = 1;
u64_stats_update_begin(&txp->tsync);
txp->tx_packets++;
txp->tx_bytes += skb->len;
u64_stats_update_end(&txp->tsync);
rcu_read_lock();
skb->dev = dev_get_by_index_rcu(dev_net(txp->dev), skb->skb_iif);
if (!skb->dev) {
rcu_read_unlock();
dev_kfree_skb(skb);
txp->dev->stats.tx_dropped++;
if (skb_queue_len(&txp->tq) != 0)
goto resched;
break;
}
rcu_read_unlock();
skb->skb_iif = txp->dev->ifindex;
if (!skb->tc_from_ingress) {
dev_queue_xmit(skb);
} else {
skb_pull(skb, skb->mac_len);
netif_receive_skb(skb);
}
}
if (__netif_tx_trylock(txq)) {
skb = skb_peek(&txp->rq);
if (!skb) {
txp->tasklet_pending = 0;
if (netif_tx_queue_stopped(txq))
netif_tx_wake_queue(txq);
} else {
__netif_tx_unlock(txq);
goto resched;
}
__netif_tx_unlock(txq);
} else {
resched:
txp->tasklet_pending = 1;
tasklet_schedule(&txp->ifb_tasklet);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jamal Hadi Salim | 163 | 48.51% | 1 | 5.88% |
Eric Dumazet | 69 | 20.54% | 7 | 41.18% |
Patrick McHardy | 38 | 11.31% | 1 | 5.88% |
David S. Miller | 22 | 6.55% | 1 | 5.88% |
Stephen Hemminger | 16 | 4.76% | 1 | 5.88% |
Changli Gao | 16 | 4.76% | 2 | 11.76% |
Willem de Bruijn | 10 | 2.98% | 2 | 11.76% |
Herbert Xu | 1 | 0.30% | 1 | 5.88% |
Eric W. Biedermann | 1 | 0.30% | 1 | 5.88% |
Total | 336 | 100.00% | 17 | 100.00% |
static void ifb_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
struct ifb_dev_private *dp = netdev_priv(dev);
struct ifb_q_private *txp = dp->tx_private;
unsigned int start;
u64 packets, bytes;
int i;
for (i = 0; i < dev->num_tx_queues; i++,txp++) {
do {
start = u64_stats_fetch_begin_irq(&txp->rsync);
packets = txp->rx_packets;
bytes = txp->rx_bytes;
} while (u64_stats_fetch_retry_irq(&txp->rsync, start));
stats->rx_packets += packets;
stats->rx_bytes += bytes;
do {
start = u64_stats_fetch_begin_irq(&txp->tsync);
packets = txp->tx_packets;
bytes = txp->tx_bytes;
} while (u64_stats_fetch_retry_irq(&txp->tsync, start));
stats->tx_packets += packets;
stats->tx_bytes += bytes;
}
stats->rx_dropped = dev->stats.rx_dropped;
stats->tx_dropped = dev->stats.tx_dropped;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 109 | 58.29% | 2 | 50.00% |
Eric Dumazet | 74 | 39.57% | 1 | 25.00% |
Eric W. Biedermann | 4 | 2.14% | 1 | 25.00% |
Total | 187 | 100.00% | 4 | 100.00% |
static int ifb_dev_init(struct net_device *dev)
{
struct ifb_dev_private *dp = netdev_priv(dev);
struct ifb_q_private *txp;
int i;
txp = kcalloc(dev->num_tx_queues, sizeof(*txp), GFP_KERNEL);
if (!txp)
return -ENOMEM;
dp->tx_private = txp;
for (i = 0; i < dev->num_tx_queues; i++,txp++) {
txp->txqnum = i;
txp->dev = dev;
__skb_queue_head_init(&txp->rq);
__skb_queue_head_init(&txp->tq);
u64_stats_init(&txp->rsync);
u64_stats_init(&txp->tsync);
tasklet_init(&txp->ifb_tasklet, ifb_ri_tasklet,
(unsigned long)txp);
netif_tx_start_queue(netdev_get_tx_queue(dev, i));
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 154 | 100.00% | 1 | 100.00% |
Total | 154 | 100.00% | 1 | 100.00% |
static const struct net_device_ops ifb_netdev_ops = {
.ndo_open = ifb_open,
.ndo_stop = ifb_close,
.ndo_get_stats64 = ifb_stats64,
.ndo_start_xmit = ifb_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_init = ifb_dev_init,
};
#define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \
NETIF_F_TSO_ECN | NETIF_F_TSO | NETIF_F_TSO6 | \
NETIF_F_GSO_ENCAP_ALL | \
NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | \
NETIF_F_HW_VLAN_STAG_TX)
static void ifb_dev_free(struct net_device *dev)
{
struct ifb_dev_private *dp = netdev_priv(dev);
struct ifb_q_private *txp = dp->tx_private;
int i;
for (i = 0; i < dev->num_tx_queues; i++,txp++) {
tasklet_kill(&txp->ifb_tasklet);
__skb_queue_purge(&txp->rq);
__skb_queue_purge(&txp->tq);
}
kfree(dp->tx_private);
free_netdev(dev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 75 | 84.27% | 1 | 33.33% |
Jamal Hadi Salim | 13 | 14.61% | 1 | 33.33% |
Stephen Hemminger | 1 | 1.12% | 1 | 33.33% |
Total | 89 | 100.00% | 3 | 100.00% |
static void ifb_setup(struct net_device *dev)
{
/* Initialize the device structure. */
dev->netdev_ops = &ifb_netdev_ops;
/* Fill in device structure with ethernet-generic values. */
ether_setup(dev);
dev->tx_queue_len = TX_Q_LIMIT;
dev->features |= IFB_FEATURES;
dev->hw_features |= dev->features;
dev->hw_enc_features |= dev->features;
dev->vlan_features |= IFB_FEATURES & ~(NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX);
dev->flags |= IFF_NOARP;
dev->flags &= ~IFF_MULTICAST;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
netif_keep_dst(dev);
eth_hw_addr_random(dev);
dev->destructor = ifb_dev_free;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 56 | 54.90% | 5 | 45.45% |
Jamal Hadi Salim | 34 | 33.33% | 1 | 9.09% |
Vlad Yasevich | 7 | 6.86% | 1 | 9.09% |
Stephen Hemminger | 2 | 1.96% | 1 | 9.09% |
Neil Horman | 1 | 0.98% | 1 | 9.09% |
Danny Kukawka | 1 | 0.98% | 1 | 9.09% |
Patrick McHardy | 1 | 0.98% | 1 | 9.09% |
Total | 102 | 100.00% | 11 | 100.00% |
static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ifb_dev_private *dp = netdev_priv(dev);
struct ifb_q_private *txp = dp->tx_private + skb_get_queue_mapping(skb);
u64_stats_update_begin(&txp->rsync);
txp->rx_packets++;
txp->rx_bytes += skb->len;
u64_stats_update_end(&txp->rsync);
if (!skb->tc_redirected || !skb->skb_iif) {
dev_kfree_skb(skb);
dev->stats.rx_dropped++;
return NETDEV_TX_OK;
}
if (skb_queue_len(&txp->rq) >= dev->tx_queue_len)
netif_tx_stop_queue(netdev_get_tx_queue(dev, txp->txqnum));
__skb_queue_tail(&txp->rq, skb);
if (!txp->tasklet_pending) {
txp->tasklet_pending = 1;
tasklet_schedule(&txp->ifb_tasklet);
}
return NETDEV_TX_OK;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jamal Hadi Salim | 100 | 62.50% | 1 | 11.11% |
Eric Dumazet | 33 | 20.62% | 2 | 22.22% |
Stephen Hemminger | 20 | 12.50% | 2 | 22.22% |
Willem de Bruijn | 4 | 2.50% | 2 | 22.22% |
Dean Gaudet | 2 | 1.25% | 1 | 11.11% |
Changli Gao | 1 | 0.62% | 1 | 11.11% |
Total | 160 | 100.00% | 9 | 100.00% |
static int ifb_close(struct net_device *dev)
{
netif_tx_stop_all_queues(dev);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jamal Hadi Salim | 18 | 94.74% | 1 | 50.00% |
Eric Dumazet | 1 | 5.26% | 1 | 50.00% |
Total | 19 | 100.00% | 2 | 100.00% |
static int ifb_open(struct net_device *dev)
{
netif_tx_start_all_queues(dev);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jamal Hadi Salim | 18 | 94.74% | 1 | 50.00% |
Eric Dumazet | 1 | 5.26% | 1 | 50.00% |
Total | 19 | 100.00% | 2 | 100.00% |
static int ifb_validate(struct nlattr *tb[], struct nlattr *data[])
{
if (tb[IFLA_ADDRESS]) {
if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
return -EINVAL;
if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
return -EADDRNOTAVAIL;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 64 | 100.00% | 1 | 100.00% |
Total | 64 | 100.00% | 1 | 100.00% |
static struct rtnl_link_ops ifb_link_ops __read_mostly = {
.kind = "ifb",
.priv_size = sizeof(struct ifb_dev_private),
.setup = ifb_setup,
.validate = ifb_validate,
};
/* Number of ifb devices to be set up by this module.
* Note that these legacy devices have one queue.
* Prefer something like : ip link add ifb10 numtxqueues 8 type ifb
*/
static int numifbs = 2;
module_param(numifbs, int, 0);
MODULE_PARM_DESC(numifbs, "Number of ifb devices");
static int __init ifb_init_one(int index)
{
struct net_device *dev_ifb;
int err;
dev_ifb = alloc_netdev(sizeof(struct ifb_dev_private), "ifb%d",
NET_NAME_UNKNOWN, ifb_setup);
if (!dev_ifb)
return -ENOMEM;
dev_ifb->rtnl_link_ops = &ifb_link_ops;
err = register_netdevice(dev_ifb);
if (err < 0)
goto err;
return 0;
err:
free_netdev(dev_ifb);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jamal Hadi Salim | 50 | 62.50% | 1 | 25.00% |
Patrick McHardy | 27 | 33.75% | 1 | 25.00% |
Tom Gundersen | 2 | 2.50% | 1 | 25.00% |
Eric Dumazet | 1 | 1.25% | 1 | 25.00% |
Total | 80 | 100.00% | 4 | 100.00% |
static int __init ifb_init_module(void)
{
int i, err;
rtnl_lock();
err = __rtnl_link_register(&ifb_link_ops);
if (err < 0)
goto out;
for (i = 0; i < numifbs && !err; i++) {
err = ifb_init_one(i);
cond_resched();
}
if (err)
__rtnl_link_unregister(&ifb_link_ops);
out:
rtnl_unlock();
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jamal Hadi Salim | 44 | 55.00% | 1 | 25.00% |
Patrick McHardy | 20 | 25.00% | 1 | 25.00% |
Ding Tianhong | 16 | 20.00% | 2 | 50.00% |
Total | 80 | 100.00% | 4 | 100.00% |
static void __exit ifb_cleanup_module(void)
{
rtnl_link_unregister(&ifb_link_ops);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jamal Hadi Salim | 9 | 60.00% | 1 | 33.33% |
Patrick McHardy | 6 | 40.00% | 2 | 66.67% |
Total | 15 | 100.00% | 3 | 100.00% |
module_init(ifb_init_module);
module_exit(ifb_cleanup_module);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jamal Hadi Salim");
MODULE_ALIAS_RTNL_LINK("ifb");
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jamal Hadi Salim | 546 | 35.16% | 1 | 2.56% |
Eric Dumazet | 506 | 32.58% | 11 | 28.21% |
Patrick McHardy | 208 | 13.39% | 4 | 10.26% |
Stephen Hemminger | 198 | 12.75% | 5 | 12.82% |
David S. Miller | 22 | 1.42% | 1 | 2.56% |
Changli Gao | 17 | 1.09% | 2 | 5.13% |
Ding Tianhong | 16 | 1.03% | 2 | 5.13% |
Willem de Bruijn | 14 | 0.90% | 3 | 7.69% |
Eric W. Biedermann | 8 | 0.52% | 2 | 5.13% |
Vlad Yasevich | 7 | 0.45% | 1 | 2.56% |
Alexey Dobriyan | 3 | 0.19% | 1 | 2.56% |
Tom Gundersen | 2 | 0.13% | 1 | 2.56% |
Dean Gaudet | 2 | 0.13% | 1 | 2.56% |
Danny Kukawka | 1 | 0.06% | 1 | 2.56% |
Neil Horman | 1 | 0.06% | 1 | 2.56% |
Jeff Garzik | 1 | 0.06% | 1 | 2.56% |
Herbert Xu | 1 | 0.06% | 1 | 2.56% |
Total | 1553 | 100.00% | 39 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.