Release 4.11 net/sched/sch_generic.c
/*
* net/sched/sch_generic.c Generic packet scheduler routines.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
* Jamal Hadi Salim, <hadi@cyberus.ca> 990601
* - Ingress support
*/
#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/init.h>
#include <linux/rcupdate.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/if_vlan.h>
#include <net/sch_generic.h>
#include <net/pkt_sched.h>
#include <net/dst.h>
/* Qdisc to use by default */
const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
EXPORT_SYMBOL(default_qdisc_ops);
/* Main transmission queue. */
/* Modifications to data participating in scheduling must be protected with
* qdisc_lock(qdisc) spinlock.
*
* The idea is the following:
* - enqueue, dequeue are serialized via qdisc root lock
* - ingress filtering is also serialized via qdisc root lock
* - updates to tree and tree walking are only done under the rtnl mutex.
*/
static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
{
q->gso_skb = skb;
q->qstats.requeues++;
qdisc_qstats_backlog_inc(q, skb);
q->q.qlen++; /* it's still part of the queue */
__netif_schedule(q);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jamal Hadi Salim | 19 | 35.85% | 1 | 11.11% |
Jarek Poplawski | 13 | 24.53% | 2 | 22.22% |
Krishna Kumar | 9 | 16.98% | 2 | 22.22% |
Américo Wang | 7 | 13.21% | 1 | 11.11% |
David S. Miller | 2 | 3.77% | 1 | 11.11% |
Linus Torvalds (pre-git) | 2 | 3.77% | 1 | 11.11% |
Andi Kleen | 1 | 1.89% | 1 | 11.11% |
Total | 53 | 100.00% | 9 | 100.00% |
static void try_bulk_dequeue_skb(struct Qdisc *q,
struct sk_buff *skb,
const struct netdev_queue *txq,
int *packets)
{
int bytelimit = qdisc_avail_bulklimit(txq) - skb->len;
while (bytelimit > 0) {
struct sk_buff *nskb = q->dequeue(q);
if (!nskb)
break;
bytelimit -= nskb->len; /* covers GSO len */
skb->next = nskb;
skb = nskb;
(*packets)++; /* GSO counts as one pkt */
}
skb->next = NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jesper Dangaard Brouer | 62 | 65.96% | 3 | 75.00% |
Eric Dumazet | 32 | 34.04% | 1 | 25.00% |
Total | 94 | 100.00% | 4 | 100.00% |
/* This variant of try_bulk_dequeue_skb() makes sure
* all skbs in the chain are for the same txq
*/
static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
struct sk_buff *skb,
int *packets)
{
int mapping = skb_get_queue_mapping(skb);
struct sk_buff *nskb;
int cnt = 0;
do {
nskb = q->dequeue(q);
if (!nskb)
break;
if (unlikely(skb_get_queue_mapping(nskb) != mapping)) {
q->skb_bad_txq = nskb;
qdisc_qstats_backlog_inc(q, nskb);
q->q.qlen++;
break;
}
skb->next = nskb;
skb = nskb;
} while (++cnt < 8);
(*packets) += cnt;
skb->next = NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 122 | 100.00% | 1 | 100.00% |
Total | 122 | 100.00% | 1 | 100.00% |
/* Note that dequeue_skb can possibly return a SKB list (via skb->next).
* A requeued skb (via q->gso_skb) can also be a SKB list.
*/
static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
int *packets)
{
struct sk_buff *skb = q->gso_skb;
const struct netdev_queue *txq = q->dev_queue;
*packets = 1;
if (unlikely(skb)) {
/* skb in gso_skb were already validated */
*validate = false;
/* check the reason of requeuing without tx lock first */
txq = skb_get_tx_queue(txq->dev, skb);
if (!netif_xmit_frozen_or_stopped(txq)) {
q->gso_skb = NULL;
qdisc_qstats_backlog_dec(q, skb);
q->q.qlen--;
} else
skb = NULL;
return skb;
}
*validate = true;
skb = q->skb_bad_txq;
if (unlikely(skb)) {
/* check the reason of requeuing without tx lock first */
txq = skb_get_tx_queue(txq->dev, skb);
if (!netif_xmit_frozen_or_stopped(txq)) {
q->skb_bad_txq = NULL;
qdisc_qstats_backlog_dec(q, skb);
q->q.qlen--;
goto bulk;
}
return NULL;
}
if (!(q->flags & TCQ_F_ONETXQUEUE) ||
!netif_xmit_frozen_or_stopped(txq))
skb = q->dequeue(q);
if (skb) {
bulk:
if (qdisc_may_bulk(q))
try_bulk_dequeue_skb(q, skb, txq, packets);
else
try_bulk_dequeue_skb_slow(q, skb, packets);
}
return skb;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 128 | 52.67% | 3 | 20.00% |
Jarek Poplawski | 37 | 15.23% | 2 | 13.33% |
Jamal Hadi Salim | 29 | 11.93% | 1 | 6.67% |
Jesper Dangaard Brouer | 23 | 9.47% | 2 | 13.33% |
Krishna Kumar | 9 | 3.70% | 1 | 6.67% |
David S. Miller | 8 | 3.29% | 3 | 20.00% |
Américo Wang | 7 | 2.88% | 1 | 6.67% |
Daniel Borkmann | 1 | 0.41% | 1 | 6.67% |
Tom Herbert | 1 | 0.41% | 1 | 6.67% |
Total | 243 | 100.00% | 15 | 100.00% |
/*
* Transmit possibly several skbs, and handle the return status as
* required. Owning running seqcount bit guarantees that
* only one CPU can execute this function.
*
* Returns to the caller:
* 0 - queue is empty or throttled.
* >0 - queue is not empty.
*/
int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
struct net_device *dev, struct netdev_queue *txq,
spinlock_t *root_lock, bool validate)
{
int ret = NETDEV_TX_BUSY;
/* And release qdisc */
spin_unlock(root_lock);
/* Note that we validate skb (GSO, checksum, ...) outside of locks */
if (validate)
skb = validate_xmit_skb_list(skb, dev);
if (likely(skb)) {
HARD_TX_LOCK(dev, txq, smp_processor_id());
if (!netif_xmit_frozen_or_stopped(txq))
skb = dev_hard_start_xmit(skb, dev, txq, &ret);
HARD_TX_UNLOCK(dev, txq);
} else {
spin_lock(root_lock);
return qdisc_qlen(q);
}
spin_lock(root_lock);
if (dev_xmit_complete(ret)) {
/* Driver sent out skb successfully or skb was consumed */
ret = qdisc_qlen(q);
} else {
/* Driver returned NETDEV_TX_BUSY - requeue skb */
if (unlikely(ret != NETDEV_TX_BUSY))
net_warn_ratelimited("BUG %s code %d qlen %d\n",
dev->name, ret, q->q.qlen);
ret = dev_requeue_skb(skb, q);
}
if (ret && netif_xmit_frozen_or_stopped(txq))
ret = 0;
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jamal Hadi Salim | 45 | 22.96% | 2 | 7.69% |
David S. Miller | 31 | 15.82% | 6 | 23.08% |
Krishna Kumar | 31 | 15.82% | 2 | 7.69% |
Eric Dumazet | 25 | 12.76% | 2 | 7.69% |
Lars Persson | 16 | 8.16% | 1 | 3.85% |
Linus Torvalds (pre-git) | 15 | 7.65% | 4 | 15.38% |
Jarek Poplawski | 8 | 4.08% | 1 | 3.85% |
Stephen Hemminger | 8 | 4.08% | 1 | 3.85% |
Peter P. Waskiewicz Jr | 7 | 3.57% | 1 | 3.85% |
Herbert Xu | 5 | 2.55% | 3 | 11.54% |
Tom Herbert | 2 | 1.02% | 1 | 3.85% |
Thomas Graf | 2 | 1.02% | 1 | 3.85% |
Joe Perches | 1 | 0.51% | 1 | 3.85% |
Total | 196 | 100.00% | 26 | 100.00% |
/*
* NOTE: Called under qdisc_lock(q) with locally disabled BH.
*
* running seqcount guarantees only one CPU can process
* this qdisc at a time. qdisc_lock(q) serializes queue accesses for
* this queue.
*
* netif_tx_lock serializes accesses to device driver.
*
* qdisc_lock(q) and netif_tx_lock are mutually exclusive,
* if one is grabbed, another must be free.
*
* Note, that this procedure can be called by a watchdog timer
*
* Returns to the caller:
* 0 - queue is empty or throttled.
* >0 - queue is not empty.
*
*/
static inline int qdisc_restart(struct Qdisc *q, int *packets)
{
struct netdev_queue *txq;
struct net_device *dev;
spinlock_t *root_lock;
struct sk_buff *skb;
bool validate;
/* Dequeue packet */
skb = dequeue_skb(q, &validate, packets);
if (unlikely(!skb))
return 0;
root_lock = qdisc_lock(q);
dev = qdisc_dev(q);
txq = skb_get_tx_queue(dev, skb);
return sch_direct_xmit(skb, q, dev, txq, root_lock, validate);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Krishna Kumar | 86 | 85.15% | 1 | 25.00% |
Eric Dumazet | 8 | 7.92% | 1 | 25.00% |
Jesper Dangaard Brouer | 6 | 5.94% | 1 | 25.00% |
Daniel Borkmann | 1 | 0.99% | 1 | 25.00% |
Total | 101 | 100.00% | 4 | 100.00% |
void __qdisc_run(struct Qdisc *q)
{
int quota = dev_tx_weight;
int packets;
while (qdisc_restart(q, &packets)) {
/*
* Ordered by possible occurrence: Postpone processing if
* 1. we've exceeded packet quota
* 2. another process needs the CPU;
*/
quota -= packets;
if (quota <= 0 || need_resched()) {
__netif_schedule(q);
break;
}
}
qdisc_run_end(q);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 31 | 54.39% | 3 | 30.00% |
Jesper Dangaard Brouer | 10 | 17.54% | 1 | 10.00% |
David S. Miller | 6 | 10.53% | 2 | 20.00% |
Jamal Hadi Salim | 6 | 10.53% | 1 | 10.00% |
Krishna Kumar | 2 | 3.51% | 1 | 10.00% |
Matthias Tafelmeier | 1 | 1.75% | 1 | 10.00% |
Eric Dumazet | 1 | 1.75% | 1 | 10.00% |
Total | 57 | 100.00% | 10 | 100.00% |
unsigned long dev_trans_start(struct net_device *dev)
{
unsigned long val, res;
unsigned int i;
if (is_vlan_dev(dev))
dev = vlan_dev_real_dev(dev);
res = netdev_get_tx_queue(dev, 0)->trans_start;
for (i = 1; i < dev->num_tx_queues; i++) {
val = netdev_get_tx_queue(dev, i)->trans_start;
if (val && time_after(val, res))
res = val;
}
return res;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 66 | 71.74% | 1 | 33.33% |
Nikolay Aleksandrov | 20 | 21.74% | 1 | 33.33% |
Florian Westphal | 6 | 6.52% | 1 | 33.33% |
Total | 92 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(dev_trans_start);
static void dev_watchdog(unsigned long arg)
{
struct net_device *dev = (struct net_device *)arg;
netif_tx_lock(dev);
if (!qdisc_tx_is_noop(dev)) {
if (netif_device_present(dev) &&
netif_running(dev) &&
netif_carrier_ok(dev)) {
int some_queue_timedout = 0;
unsigned int i;
unsigned long trans_start;
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq;
txq = netdev_get_tx_queue(dev, i);
trans_start = txq->trans_start;
if (netif_xmit_stopped(txq) &&
time_after(jiffies, (trans_start +
dev->watchdog_timeo))) {
some_queue_timedout = 1;
txq->trans_timeout++;
break;
}
}
if (some_queue_timedout) {
WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
dev->name, netdev_drivername(dev), i);
dev->netdev_ops->ndo_tx_timeout(dev);
}
if (!mod_timer(&dev->watchdog_timer,
round_jiffies(jiffies +
dev->watchdog_timeo)))
dev_hold(dev);
}
}
netif_tx_unlock(dev);
dev_put(dev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 108 | 52.17% | 6 | 37.50% |
David S. Miller | 46 | 22.22% | 1 | 6.25% |
Eric Dumazet | 26 | 12.56% | 1 | 6.25% |
Arjan van de Ven | 11 | 5.31% | 3 | 18.75% |
Stephen Hemminger | 8 | 3.86% | 2 | 12.50% |
David Decotigny | 5 | 2.42% | 1 | 6.25% |
Herbert Xu | 2 | 0.97% | 1 | 6.25% |
Tom Herbert | 1 | 0.48% | 1 | 6.25% |
Total | 207 | 100.00% | 16 | 100.00% |
void __netdev_watchdog_up(struct net_device *dev)
{
if (dev->netdev_ops->ndo_tx_timeout) {
if (dev->watchdog_timeo <= 0)
dev->watchdog_timeo = 5*HZ;
if (!mod_timer(&dev->watchdog_timer,
round_jiffies(jiffies + dev->watchdog_timeo)))
dev_hold(dev);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 55 | 90.16% | 7 | 77.78% |
Venkatesh Pallipadi | 3 | 4.92% | 1 | 11.11% |
Stephen Hemminger | 3 | 4.92% | 1 | 11.11% |
Total | 61 | 100.00% | 9 | 100.00% |
static void dev_watchdog_up(struct net_device *dev)
{
__netdev_watchdog_up(dev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 16 | 100.00% | 5 | 100.00% |
Total | 16 | 100.00% | 5 | 100.00% |
static void dev_watchdog_down(struct net_device *dev)
{
netif_tx_lock_bh(dev);
if (del_timer(&dev->watchdog_timer))
dev_put(dev);
netif_tx_unlock_bh(dev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 33 | 91.67% | 5 | 71.43% |
Herbert Xu | 2 | 5.56% | 1 | 14.29% |
Stephen Hemminger | 1 | 2.78% | 1 | 14.29% |
Total | 36 | 100.00% | 7 | 100.00% |
/**
* netif_carrier_on - set carrier
* @dev: network device
*
* Device has detected that carrier.
*/
void netif_carrier_on(struct net_device *dev)
{
if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
if (dev->reg_state == NETREG_UNINITIALIZED)
return;
atomic_inc(&dev->carrier_changes);
linkwatch_fire_event(dev);
if (netif_running(dev))
__netdev_watchdog_up(dev);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Denys Vlasenko | 39 | 67.24% | 1 | 25.00% |
David S. Miller | 9 | 15.52% | 1 | 25.00% |
David Decotigny | 8 | 13.79% | 1 | 25.00% |
Jeff Garzik | 2 | 3.45% | 1 | 25.00% |
Total | 58 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(netif_carrier_on);
/**
* netif_carrier_off - clear carrier
* @dev: network device
*
* Device has detected loss of carrier.
*/
void netif_carrier_off(struct net_device *dev)
{
if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
if (dev->reg_state == NETREG_UNINITIALIZED)
return;
atomic_inc(&dev->carrier_changes);
linkwatch_fire_event(dev);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Denys Vlasenko | 28 | 59.57% | 1 | 33.33% |
David S. Miller | 11 | 23.40% | 1 | 33.33% |
David Decotigny | 8 | 17.02% | 1 | 33.33% |
Total | 47 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(netif_carrier_off);
/* "NOOP" scheduler: the best scheduler, recommended for all interfaces
under all circumstances. It is difficult to invent anything faster or
cheaper.
*/
static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
struct sk_buff **to_free)
{
__qdisc_drop(skb, to_free);
return NET_XMIT_CN;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 23 | 71.88% | 2 | 66.67% |
Eric Dumazet | 9 | 28.12% | 1 | 33.33% |
Total | 32 | 100.00% | 3 | 100.00% |
static struct sk_buff *noop_dequeue(struct Qdisc *qdisc)
{
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 16 | 100.00% | 1 | 100.00% |
Total | 16 | 100.00% | 1 | 100.00% |
struct Qdisc_ops noop_qdisc_ops __read_mostly = {
.id = "noop",
.priv_size = 0,
.enqueue = noop_enqueue,
.dequeue = noop_dequeue,
.peek = noop_dequeue,
.owner = THIS_MODULE,
};
static struct netdev_queue noop_netdev_queue = {
.qdisc = &noop_qdisc,
.qdisc_sleeping = &noop_qdisc,
};
struct Qdisc noop_qdisc = {
.enqueue = noop_enqueue,
.dequeue = noop_dequeue,
.flags = TCQ_F_BUILTIN,
.ops = &noop_qdisc_ops,
.q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
.dev_queue = &noop_netdev_queue,
.running = SEQCNT_ZERO(noop_qdisc.running),
.busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
};
EXPORT_SYMBOL(noop_qdisc);
static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt)
{
/* register_qdisc() assigns a default of noop_enqueue if unset,
* but __dev_queue_xmit() treats noqueue only as such
* if this is NULL - so clear it here. */
qdisc->enqueue = NULL;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Phil Sutter | 26 | 100.00% | 1 | 100.00% |
Total | 26 | 100.00% | 1 | 100.00% |
struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
.id = "noqueue",
.priv_size = 0,
.init = noqueue_init,
.enqueue = noop_enqueue,
.dequeue = noop_dequeue,
.peek = noop_dequeue,
.owner = THIS_MODULE,
};
static const u8 prio2band[TC_PRIO_MAX + 1] = {
1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
};
/* 3-band FIFO queue: old style, but should be a bit faster than
generic prio+fifo combination.
*/
#define PFIFO_FAST_BANDS 3
/*
* Private data for a pfifo_fast scheduler containing:
* - queues for the three band
* - bitmap indicating which of the bands contain skbs
*/
struct pfifo_fast_priv {
u32 bitmap;
struct qdisc_skb_head q[PFIFO_FAST_BANDS];
};
/*
* Convert a bitmap to the first band number where an skb is queued, where:
* bitmap=0 means there are no skbs on any band.
* bitmap=1 means there is an skb on band 0.
* bitmap=7 means there are skbs on all 3 bands, etc.
*/
static const int bitmap2band[] = {-1, 0, 1, 0, 2, 0, 1, 0};
static inline struct qdisc_skb_head *band2list(struct pfifo_fast_priv *priv,
int band)
{
return priv->q + band;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Krishna Kumar | 9 | 37.50% | 1 | 20.00% |
David S. Miller | 7 | 29.17% | 2 | 40.00% |
Thomas Graf | 7 | 29.17% | 1 | 20.00% |
Florian Westphal | 1 | 4.17% | 1 | 20.00% |
Total | 24 | 100.00% | 5 | 100.00% |
static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
struct sk_buff **to_free)
{
if (qdisc->q.qlen < qdisc_dev(qdisc)->tx_queue_len) {
int band = prio2band[skb->priority & TC_PRIO_MAX];
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
struct qdisc_skb_head *list = band2list(priv, band);
priv->bitmap |= (1 << band);
qdisc->q.qlen++;
return __qdisc_enqueue_tail(skb, qdisc, list);
}
return qdisc_drop(skb, qdisc, to_free);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Krishna Kumar | 49 | 44.55% | 2 | 20.00% |
David S. Miller | 30 | 27.27% | 1 | 10.00% |
Thomas Graf | 12 | 10.91% | 2 | 20.00% |
Eric Dumazet | 8 | 7.27% | 1 | 10.00% |
Linus Torvalds (pre-git) | 6 | 5.45% | 1 | 10.00% |
Florian Westphal | 3 | 2.73% | 2 | 20.00% |
Jamal Hadi Salim | 2 | 1.82% | 1 | 10.00% |
Total | 110 | 100.00% | 10 | 100.00% |
static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
{
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
int band = bitmap2band[priv->bitmap];
if (likely(band >= 0)) {
struct qdisc_skb_head *qh = band2list(priv, band);
struct sk_buff *skb = __qdisc_dequeue_head(qh);
if (likely(skb != NULL)) {
qdisc_qstats_backlog_dec(qdisc, skb);
qdisc_bstats_update(qdisc, skb);
}
qdisc->q.qlen--;
if (qh->qlen == 0)
priv->bitmap &= ~(1 << band);
return skb;
}
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Krishna Kumar | 51 | 41.46% | 1 | 16.67% |
Florian Westphal | 33 | 26.83% | 2 | 33.33% |
Linus Torvalds (pre-git) | 24 | 19.51% | 1 | 16.67% |
David S. Miller | 13 | 10.57% | 1 | 16.67% |
Thomas Graf | 2 | 1.63% | 1 | 16.67% |
Total | 123 | 100.00% | 6 | 100.00% |
static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
{
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
int band = bitmap2band[priv->bitmap];
if (band >= 0) {
struct qdisc_skb_head *qh = band2list(priv, band);
return qh->head;
}
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jarek Poplawski | 30 | 49.18% | 1 | 33.33% |
Krishna Kumar | 26 | 42.62% | 1 | 33.33% |
Florian Westphal | 5 | 8.20% | 1 | 33.33% |
Total | 61 | 100.00% | 3 | 100.00% |
static void pfifo_fast_reset(struct Qdisc *qdisc)
{
int prio;
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
__qdisc_reset_queue(band2list(priv, prio));
priv->bitmap = 0;
qdisc->qstats.backlog = 0;
qdisc->q.qlen = 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 35 | 50.72% | 1 | 16.67% |
Linus Torvalds (pre-git) | 14 | 20.29% | 3 | 50.00% |
Krishna Kumar | 13 | 18.84% | 1 | 16.67% |
Thomas Graf | 7 | 10.14% | 1 | 16.67% |
Total | 69 | 100.00% | 6 | 100.00% |
static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
{
struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1);
if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
goto nla_put_failure;
return skb->len;
nla_put_failure:
return -1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 72 | 100.00% | 2 | 100.00% |
Total | 72 | 100.00% | 2 | 100.00% |
static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
{
int prio;
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
qdisc_skb_head_init(band2list(priv, prio));
/* Can by-pass the queue discipline */
qdisc->flags |= TCQ_F_CAN_BYPASS;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 46 | 74.19% | 1 | 20.00% |
Krishna Kumar | 7 | 11.29% | 1 | 20.00% |
Eric Dumazet | 7 | 11.29% | 1 | 20.00% |
Linus Torvalds (pre-git) | 1 | 1.61% | 1 | 20.00% |
Florian Westphal | 1 | 1.61% | 1 | 20.00% |
Total | 62 | 100.00% | 5 | 100.00% |
struct Qdisc_ops pfifo_fast_ops __read_mostly = {
.id = "pfifo_fast",
.priv_size = sizeof(struct pfifo_fast_priv),
.enqueue = pfifo_fast_enqueue,
.dequeue = pfifo_fast_dequeue,
.peek = pfifo_fast_peek,
.init = pfifo_fast_init,
.reset = pfifo_fast_reset,
.dump = pfifo_fast_dump,
.owner = THIS_MODULE,
};
EXPORT_SYMBOL(pfifo_fast_ops);
static struct lock_class_key qdisc_tx_busylock;
static struct lock_class_key qdisc_running_key;
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
const struct Qdisc_ops *ops)
{
void *p;
struct Qdisc *sch;
unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size;
int err = -ENOBUFS;
struct net_device *dev = dev_queue->dev;
p = kzalloc_node(size, GFP_KERNEL,
netdev_queue_numa_node_read(dev_queue));
if (!p)
goto errout;
sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
/* if we got non aligned memory, ask more and do alignment ourself */
if (sch != p) {
kfree(p);
p = kzalloc_node(size + QDISC_ALIGNTO - 1, GFP_KERNEL,
netdev_queue_numa_node_read(dev_queue));
if (!p)
goto errout;
sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
sch->padded = (char *) sch - (char *) p;
}
qdisc_skb_head_init(&sch->q);
spin_lock_init(&sch->q.lock);
spin_lock_init(&sch->busylock);
lockdep_set_class(&sch->busylock,
dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
seqcount_init(&sch->running);
lockdep_set_class(&sch->running,
dev->qdisc_running_key ?: &qdisc_running_key);
sch->ops = ops;
sch->enqueue = ops->enqueue;
sch->dequeue = ops->dequeue;
sch->dev_queue = dev_queue;
dev_hold(dev);
atomic_set(&sch->refcnt, 1);
return sch;
errout:
return ERR_PTR(err);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 124 | 42.91% | 5 | 35.71% |
Linus Torvalds (pre-git) | 71 | 24.57% | 3 | 21.43% |
Stephen Hemminger | 40 | 13.84% | 2 | 14.29% |
Thomas Graf | 30 | 10.38% | 1 | 7.14% |
Florian Westphal | 11 | 3.81% | 1 | 7.14% |
David S. Miller | 9 | 3.11% | 1 | 7.14% |
Patrick McHardy | 4 | 1.38% | 1 | 7.14% |
Total | 289 | 100.00% | 14 | 100.00% |
struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
const struct Qdisc_ops *ops,
unsigned int parentid)
{
struct Qdisc *sch;
if (!try_module_get(ops->owner))
return NULL;
sch = qdisc_alloc(dev_queue, ops);
if (IS_ERR(sch)) {
module_put(ops->owner);
return NULL;
}
sch->parent = parentid;
if (!ops->init || ops->init(sch, NULL) == 0)
return sch;
qdisc_destroy(sch);
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Graf | 35 | 33.98% | 2 | 20.00% |
Linus Torvalds (pre-git) | 25 | 24.27% | 3 | 30.00% |
Eric Dumazet | 15 | 14.56% | 1 | 10.00% |
Stephen Hemminger | 11 | 10.68% | 2 | 20.00% |
Patrick McHardy | 10 | 9.71% | 1 | 10.00% |
David S. Miller | 7 | 6.80% | 1 | 10.00% |
Total | 103 | 100.00% | 10 | 100.00% |
EXPORT_SYMBOL(qdisc_create_dflt);
/* Under qdisc_lock(qdisc) and BH! */
void qdisc_reset(struct Qdisc *qdisc)
{
const struct Qdisc_ops *ops = qdisc->ops;
if (ops->reset)
ops->reset(qdisc);
kfree_skb(qdisc->skb_bad_txq);
qdisc->skb_bad_txq = NULL;
if (qdisc->gso_skb) {
kfree_skb_list(qdisc->gso_skb);
qdisc->gso_skb = NULL;
}
qdisc->q.qlen = 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 31 | 41.33% | 1 | 16.67% |
Krishna Kumar | 16 | 21.33% | 1 | 16.67% |
Eric Dumazet | 15 | 20.00% | 2 | 33.33% |
Jarek Poplawski | 12 | 16.00% | 1 | 16.67% |
Jesper Dangaard Brouer | 1 | 1.33% | 1 | 16.67% |
Total | 75 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL(qdisc_reset);
static void qdisc_rcu_free(struct rcu_head *head)
{
struct Qdisc *qdisc = container_of(head, struct Qdisc, rcu_head);
if (qdisc_is_percpu_stats(qdisc)) {
free_percpu(qdisc->cpu_bstats);
free_percpu(qdisc->cpu_qstats);
}
kfree((char *) qdisc - qdisc->padded);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
|