Release 4.11 net/sched/sch_generic.c
/*
* net/sched/sch_generic.c Generic packet scheduler routines.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
* Jamal Hadi Salim, <hadi@cyberus.ca> 990601
* - Ingress support
*/
#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/init.h>
#include <linux/rcupdate.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/if_vlan.h>
#include <net/sch_generic.h>
#include <net/pkt_sched.h>
#include <net/dst.h>
/* Qdisc to use by default */
const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
EXPORT_SYMBOL(default_qdisc_ops);
/* Main transmission queue. */
/* Modifications to data participating in scheduling must be protected with
* qdisc_lock(qdisc) spinlock.
*
* The idea is the following:
* - enqueue, dequeue are serialized via qdisc root lock
* - ingress filtering is also serialized via qdisc root lock
* - updates to tree and tree walking are only done under the rtnl mutex.
*/
static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
{
q->gso_skb = skb;
q->qstats.requeues++;
qdisc_qstats_backlog_inc(q, skb);
q->q.qlen++; /* it's still part of the queue */
__netif_schedule(q);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jamal Hadi Salim | 19 | 35.85% | 1 | 11.11% |
Jarek Poplawski | 13 | 24.53% | 2 | 22.22% |
Krishna Kumar | 9 | 16.98% | 2 | 22.22% |
Américo Wang | 7 | 13.21% | 1 | 11.11% |
David S. Miller | 2 | 3.77% | 1 | 11.11% |
Linus Torvalds (pre-git) | 2 | 3.77% | 1 | 11.11% |
Andi Kleen | 1 | 1.89% | 1 | 11.11% |
Total | 53 | 100.00% | 9 | 100.00% |
static void try_bulk_dequeue_skb(struct Qdisc *q,
struct sk_buff *skb,
const struct netdev_queue *txq,
int *packets)
{
int bytelimit = qdisc_avail_bulklimit(txq) - skb->len;
while (bytelimit > 0) {
struct sk_buff *nskb = q->dequeue(q);
if (!nskb)
break;
bytelimit -= nskb->len; /* covers GSO len */
skb->next = nskb;
skb = nskb;
(*packets)++; /* GSO counts as one pkt */
}
skb->next = NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jesper Dangaard Brouer | 62 | 65.96% | 3 | 75.00% |
Eric Dumazet | 32 | 34.04% | 1 | 25.00% |
Total | 94 | 100.00% | 4 | 100.00% |
/* This variant of try_bulk_dequeue_skb() makes sure
* all skbs in the chain are for the same txq
*/
static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
struct sk_buff *skb,
int *packets)
{
int mapping = skb_get_queue_mapping(skb);
struct sk_buff *nskb;
int cnt = 0;
do {
nskb = q->dequeue(q);
if (!nskb)
break;
if (unlikely(skb_get_queue_mapping(nskb) != mapping)) {
q->skb_bad_txq = nskb;
qdisc_qstats_backlog_inc(q, nskb);
q->q.qlen++;
break;
}
skb->next = nskb;
skb = nskb;
} while (++cnt < 8);
(*packets) += cnt;
skb->next = NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 122 | 100.00% | 1 | 100.00% |
Total | 122 | 100.00% | 1 | 100.00% |
/* Note that dequeue_skb can possibly return a SKB list (via skb->next).
* A requeued skb (via q->gso_skb) can also be a SKB list.
*/
static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
int *packets)
{
struct sk_buff *skb = q->gso_skb;
const struct netdev_queue *txq = q->dev_queue;
*packets = 1;
if (unlikely(skb)) {
/* skb in gso_skb were already validated */
*validate = false;
/* check the reason of requeuing without tx lock first */
txq = skb_get_tx_queue(txq->dev, skb);
if (!netif_xmit_frozen_or_stopped(txq)) {
q->gso_skb = NULL;
qdisc_qstats_backlog_dec(q, skb);
q->q.qlen--;
} else
skb = NULL;
return skb;
}
*validate = true;
skb = q->skb_bad_txq;
if (unlikely(skb)) {
/* check the reason of requeuing without tx lock first */
txq = skb_get_tx_queue(txq->dev, skb);
if (!netif_xmit_frozen_or_stopped(txq)) {
q->skb_bad_txq = NULL;
qdisc_qstats_backlog_dec(q, skb);
q->q.qlen--;
goto bulk;
}
return NULL;
}
if (!(q->flags & TCQ_F_ONETXQUEUE) ||
!netif_xmit_frozen_or_stopped(txq))
skb = q->dequeue(q);
if (skb) {
bulk:
if (qdisc_may_bulk(q))
try_bulk_dequeue_skb(q, skb, txq, packets);
else
try_bulk_dequeue_skb_slow(q, skb, packets);
}
return skb;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 128 | 52.67% | 3 | 20.00% |
Jarek Poplawski | 37 | 15.23% | 2 | 13.33% |
Jamal Hadi Salim | 29 | 11.93% | 1 | 6.67% |
Jesper Dangaard Brouer | 23 | 9.47% | 2 | 13.33% |
Krishna Kumar | 9 | 3.70% | 1 | 6.67% |
David S. Miller | 8 | 3.29% | 3 | 20.00% |
Américo Wang | 7 | 2.88% | 1 | 6.67% |
Daniel Borkmann | 1 | 0.41% | 1 | 6.67% |
Tom Herbert | 1 | 0.41% | 1 | 6.67% |
Total | 243 | 100.00% | 15 | 100.00% |
/*
* Transmit possibly several skbs, and handle the return status as
* required. Owning running seqcount bit guarantees that
* only one CPU can execute this function.
*
* Returns to the caller:
* 0 - queue is empty or throttled.
* >0 - queue is not empty.
*/
int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
struct net_device *dev, struct netdev_queue *txq,
spinlock_t *root_lock, bool validate)
{
int ret = NETDEV_TX_BUSY;
/* And release qdisc */
spin_unlock(root_lock);
/* Note that we validate skb (GSO, checksum, ...) outside of locks */
if (validate)
skb = validate_xmit_skb_list(skb, dev);
if (likely(skb)) {
HARD_TX_LOCK(dev, txq, smp_processor_id());
if (!netif_xmit_frozen_or_stopped(txq))
skb = dev_hard_start_xmit(skb, dev, txq, &ret);
HARD_TX_UNLOCK(dev, txq);
} else {
spin_lock(root_lock);
return qdisc_qlen(q);
}
spin_lock(root_lock);
if (dev_xmit_complete(ret)) {
/* Driver sent out skb successfully or skb was consumed */
ret = qdisc_qlen(q);
} else {
/* Driver returned NETDEV_TX_BUSY - requeue skb */
if (unlikely(ret != NETDEV_TX_BUSY))
net_warn_ratelimited("BUG %s code %d qlen %d\n",
dev->name, ret, q->q.qlen);
ret = dev_requeue_skb(skb, q);
}
if (ret && netif_xmit_frozen_or_stopped(txq))
ret = 0;
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jamal Hadi Salim | 45 | 22.96% | 2 | 7.69% |
David S. Miller | 31 | 15.82% | 6 | 23.08% |
Krishna Kumar | 31 | 15.82% | 2 | 7.69% |
Eric Dumazet | 25 | 12.76% | 2 | 7.69% |
Lars Persson | 16 | 8.16% | 1 | 3.85% |
Linus Torvalds (pre-git) | 15 | 7.65% | 4 | 15.38% |
Jarek Poplawski | 8 | 4.08% | 1 | 3.85% |
Stephen Hemminger | 8 | 4.08% | 1 | 3.85% |
Peter P. Waskiewicz Jr | 7 | 3.57% | 1 | 3.85% |
Herbert Xu | 5 | 2.55% | 3 | 11.54% |
Tom Herbert | 2 | 1.02% | 1 | 3.85% |
Thomas Graf | 2 | 1.02% | 1 | 3.85% |
Joe Perches | 1 | 0.51% | 1 | 3.85% |
Total | 196 | 100.00% | 26 | 100.00% |
/*
* NOTE: Called under qdisc_lock(q) with locally disabled BH.
*
* running seqcount guarantees only one CPU can process
* this qdisc at a time. qdisc_lock(q) serializes queue accesses for
* this queue.
*
* netif_tx_lock serializes accesses to device driver.
*
* qdisc_lock(q) and netif_tx_lock are mutually exclusive,
* if one is grabbed, another must be free.
*
* Note, that this procedure can be called by a watchdog timer
*
* Returns to the caller:
* 0 - queue is empty or throttled.
* >0 - queue is not empty.
*
*/
static inline int qdisc_restart(struct Qdisc *q, int *packets)
{
struct netdev_queue *txq;
struct net_device *dev;
spinlock_t *root_lock;
struct sk_buff *skb;
bool validate;
/* Dequeue packet */
skb = dequeue_skb(q, &validate, packets);
if (unlikely(!skb))
return 0;
root_lock = qdisc_lock(q);
dev = qdisc_dev(q);
txq = skb_get_tx_queue(dev, skb);
return sch_direct_xmit(skb, q, dev, txq, root_lock, validate);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Krishna Kumar | 86 | 85.15% | 1 | 25.00% |
Eric Dumazet | 8 | 7.92% | 1 | 25.00% |
Jesper Dangaard Brouer | 6 | 5.94% | 1 | 25.00% |
Daniel Borkmann | 1 | 0.99% | 1 | 25.00% |
Total | 101 | 100.00% | 4 | 100.00% |
void __qdisc_run(struct Qdisc *q)
{
int quota = dev_tx_weight;
int packets;
while (qdisc_restart(q, &packets)) {
/*
* Ordered by possible occurrence: Postpone processing if
* 1. we've exceeded packet quota
* 2. another process needs the CPU;
*/
quota -= packets;
if (quota <= 0 || need_resched()) {
__netif_schedule(q);
break;
}
}
qdisc_run_end(q);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 31 | 54.39% | 3 | 30.00% |
Jesper Dangaard Brouer | 10 | 17.54% | 1 | 10.00% |
David S. Miller | 6 | 10.53% | 2 | 20.00% |
Jamal Hadi Salim | 6 | 10.53% | 1 | 10.00% |
Krishna Kumar | 2 | 3.51% | 1 | 10.00% |
Matthias Tafelmeier | 1 | 1.75% | 1 | 10.00% |
Eric Dumazet | 1 | 1.75% | 1 | 10.00% |
Total | 57 | 100.00% | 10 | 100.00% |
unsigned long dev_trans_start(struct net_device *dev)
{
unsigned long val, res;
unsigned int i;
if (is_vlan_dev(dev))
dev = vlan_dev_real_dev(dev);
res = netdev_get_tx_queue(dev, 0)->trans_start;
for (i = 1; i < dev->num_tx_queues; i++) {
val = netdev_get_tx_queue(dev, i)->trans_start;
if (val && time_after(val, res))
res = val;
}
return res;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 66 | 71.74% | 1 | 33.33% |
Nikolay Aleksandrov | 20 | 21.74% | 1 | 33.33% |
Florian Westphal | 6 | 6.52% | 1 | 33.33% |
Total | 92 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(dev_trans_start);
static void dev_watchdog(unsigned long arg)
{
struct net_device *dev = (struct net_device *)arg;
netif_tx_lock(dev);
if (!qdisc_tx_is_noop(dev)) {
if (netif_device_present(dev) &&
netif_running(dev) &&
netif_carrier_ok(dev)) {
int some_queue_timedout = 0;
unsigned int i;
unsigned long trans_start;
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq;
txq = netdev_get_tx_queue(dev, i);
trans_start = txq->trans_start;
if (netif_xmit_stopped(txq) &&
time_after(jiffies, (trans_start +
dev->watchdog_timeo))) {
some_queue_timedout = 1;
txq->trans_timeout++;
break;
}
}
if (some_queue_timedout) {
WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
dev->name, netdev_drivername(dev), i);
dev->netdev_ops->ndo_tx_timeout(dev);
}
if (!mod_timer(&dev->watchdog_timer,
round_jiffies(jiffies +
dev->watchdog_timeo)))
dev_hold(dev);
}
}
netif_tx_unlock(dev);
dev_put(dev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 108 | 52.17% | 6 | 37.50% |
David S. Miller | 46 | 22.22% | 1 | 6.25% |
Eric Dumazet | 26 | 12.56% | 1 | 6.25% |
Arjan van de Ven | 11 | 5.31% | 3 | 18.75% |
Stephen Hemminger | 8 | 3.86% | 2 | 12.50% |
David Decotigny | 5 | 2.42% | 1 | 6.25% |
Herbert Xu | 2 | 0.97% | 1 | 6.25% |
Tom Herbert | 1 | 0.48% | 1 | 6.25% |
Total | 207 | 100.00% | 16 | 100.00% |
void __netdev_watchdog_up(struct net_device *dev)
{
if (dev->netdev_ops->ndo_tx_timeout) {
if (dev->watchdog_timeo <= 0)
dev->watchdog_timeo = 5*HZ;
if (!mod_timer(&dev->watchdog_timer,
round_jiffies(jiffies + dev->watchdog_timeo)))
dev_hold(dev);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 55 | 90.16% | 7 | 77.78% |
Venkatesh Pallipadi | 3 | 4.92% | 1 | 11.11% |
Stephen Hemminger | 3 | 4.92% | 1 | 11.11% |
Total | 61 | 100.00% | 9 | 100.00% |
static void dev_watchdog_up(struct net_device *dev)
{
__netdev_watchdog_up(dev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 16 | 100.00% | 5 | 100.00% |
Total | 16 | 100.00% | 5 | 100.00% |
static void dev_watchdog_down(struct net_device *dev)
{
netif_tx_lock_bh(dev);
if (del_timer(&dev->watchdog_timer))
dev_put(dev);
netif_tx_unlock_bh(dev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 33 | 91.67% | 5 | 71.43% |
Herbert Xu | 2 | 5.56% | 1 | 14.29% |
Stephen Hemminger | 1 | 2.78% | 1 | 14.29% |
Total | 36 | 100.00% | 7 | 100.00% |
/**
* netif_carrier_on - set carrier
* @dev: network device
*
* Device has detected that carrier.
*/
void netif_carrier_on(struct net_device *dev)
{
if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
if (dev->reg_state == NETREG_UNINITIALIZED)
return;
atomic_inc(&dev->carrier_changes);
linkwatch_fire_event(dev);
if (netif_running(dev))
__netdev_watchdog_up(dev);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Denys Vlasenko | 39 | 67.24% | 1 | 25.00% |
David S. Miller | 9 | 15.52% | 1 | 25.00% |
David Decotigny | 8 | 13.79% | 1 | 25.00% |
Jeff Garzik | 2 | 3.45% | 1 | 25.00% |
Total | 58 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(netif_carrier_on);
/**
* netif_carrier_off - clear carrier
* @dev: network device
*
* Device has detected loss of carrier.
*/
void netif_carrier_off(struct net_device *dev)
{
if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
if (dev->reg_state == NETREG_UNINITIALIZED)
return;
atomic_inc(&dev->carrier_changes);
linkwatch_fire_event(dev);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Denys Vlasenko | 28 | 59.57% | 1 | 33.33% |
David S. Miller | 11 | 23.40% | 1 | 33.33% |
David Decotigny | 8 | 17.02% | 1 | 33.33% |
Total | 47 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(netif_carrier_off);
/* "NOOP" scheduler: the best scheduler, recommended for all interfaces
under all circumstances. It is difficult to invent anything faster or
cheaper.
*/
static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
struct sk_buff **to_free)
{
__qdisc_drop(skb, to_free);
return NET_XMIT_CN;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 23 | 71.88% | 2 | 66.67% |
Eric Dumazet | 9 | 28.12% | 1 | 33.33% |
Total | 32 | 100.00% | 3 | 100.00% |
static struct sk_buff *noop_dequeue(struct Qdisc *qdisc)
{
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 16 | 100.00% | 1 | 100.00% |
Total | 16 | 100.00% | 1 | 100.00% |
struct Qdisc_ops noop_qdisc_ops __read_mostly = {
.id = "noop",
.priv_size = 0,
.enqueue = noop_enqueue,
.dequeue = noop_dequeue,
.peek = noop_dequeue,
.owner = THIS_MODULE,
};
static struct netdev_queue noop_netdev_queue = {
.qdisc = &noop_qdisc,
.qdisc_sleeping = &noop_qdisc,
};
struct Qdisc noop_qdisc = {
.enqueue = noop_enqueue,
.dequeue = noop_dequeue,
.flags = TCQ_F_BUILTIN,
.ops = &noop_qdisc_ops,
.q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
.dev_queue = &noop_netdev_queue,
.running = SEQCNT_ZERO(noop_qdisc.running),
.busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
};
EXPORT_SYMBOL(noop_qdisc);
static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt)
{
/* register_qdisc() assigns a default of noop_enqueue if unset,
* but __dev_queue_xmit() treats noqueue only as such
* if this is NULL - so clear it here. */
qdisc->enqueue = NULL;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Phil Sutter | 26 | 100.00% | 1 | 100.00% |
Total | 26 | 100.00% | 1 | 100.00% |
struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
.id = "noqueue",
.priv_size = 0,
.init = noqueue_init,
.enqueue = noop_enqueue,
.dequeue = noop_dequeue,
.peek = noop_dequeue,
.owner = THIS_MODULE,
};
static const u8 prio2band[TC_PRIO_MAX + 1] = {
1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
};
/* 3-band FIFO queue: old style, but should be a bit faster than
generic prio+fifo combination.
*/
#define PFIFO_FAST_BANDS 3
/*
* Private data for a pfifo_fast scheduler containing:
* - queues for the three band
* - bitmap indicating which of the bands contain skbs
*/
struct pfifo_fast_priv {
u32 bitmap;
struct qdisc_skb_head q[PFIFO_FAST_BANDS];
};
/*
* Convert a bitmap to the first band number where an skb is queued, where:
* bitmap=0 means there are no skbs on any band.
* bitmap=1 means there is an skb on band 0.
* bitmap=7 means there are skbs on all 3 bands, etc.
*/
static const int bitmap2band[] = {-1, 0, 1, 0, 2, 0, 1, 0};
static inline struct qdisc_skb_head *band2list(struct pfifo_fast_priv *priv,
int band)
{
return priv->q + band;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Krishna Kumar | 9 | 37.50% | 1 | 20.00% |
David S. Miller | 7 | 29.17% | 2 | 40.00% |
Thomas Graf | 7 | 29.17% | 1 | 20.00% |
Florian Westphal | 1 | 4.17% | 1 | 20.00% |
Total | 24 | 100.00% | 5 | 100.00% |
static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
struct sk_buff **to_free)
{
if (qdisc->q.qlen < qdisc_dev(qdisc)->tx_queue_len) {
int band = prio2band[skb->priority & TC_PRIO_MAX];
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
struct qdisc_skb_head *list = band2list(priv, band);
priv->bitmap |= (1 << band);
qdisc->q.qlen++;
return __qdisc_enqueue_tail(skb, qdisc, list);
}
return qdisc_drop(skb, qdisc, to_free);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Krishna Kumar | 49 | 44.55% | 2 | 20.00% |
David S. Miller | 30 | 27.27% | 1 | 10.00% |
Thomas Graf | 12 | 10.91% | 2 | 20.00% |
Eric Dumazet | 8 | 7.27% | 1 | 10.00% |
Linus Torvalds (pre-git) | 6 | 5.45% | 1 | 10.00% |
Florian Westphal | 3 | 2.73% | 2 | 20.00% |
Jamal Hadi Salim | 2 | 1.82% | 1 | 10.00% |
Total | 110 | 100.00% | 10 | 100.00% |
static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
{
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
int band = bitmap2band[priv->bitmap];
if (likely(band >= 0)) {
struct qdisc_skb_head *qh = band2list(priv, band);
struct sk_buff *skb = __qdisc_dequeue_head(qh);
if (likely(skb != NULL)) {
qdisc_qstats_backlog_dec(qdisc, skb);
qdisc_bstats_update(qdisc, skb);
}
qdisc->q.qlen--;
if (qh->qlen == 0)
priv->bitmap &= ~(1 << band);
return skb;
}
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Krishna Kumar | 51 | 41.46% | 1 | 16.67% |
Florian Westphal | 33 | 26.83% | 2 | 33.33% |
Linus Torvalds (pre-git) | 24 | 19.51% | 1 | 16.67% |
David S. Miller | 13 | 10.57% | 1 | 16.67% |
Thomas Graf | 2 | 1.63% | 1 | 16.67% |
Total | 123 | 100.00% | 6 | 100.00% |
static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
{
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
int band = bitmap2band[priv->bitmap];
if (band >= 0) {
struct qdisc_skb_head *qh = band2list(priv, band);
return qh->head;
}
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jarek Poplawski | 30 | 49.18% | 1 | 33.33% |
Krishna Kumar | 26 | 42.62% | 1 | 33.33% |
Florian Westphal | 5 | 8.20% | 1 | 33.33% |
Total | 61 | 100.00% | 3 | 100.00% |
static void pfifo_fast_reset(struct Qdisc *qdisc)
{
int prio;
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
__qdisc_reset_queue(band2list(priv, prio));
priv->bitmap = 0;
qdisc->qstats.backlog = 0;
qdisc->q.qlen = 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 35 | 50.72% | 1 | 16.67% |
Linus Torvalds (pre-git) | 14 | 20.29% | 3 | 50.00% |
Krishna Kumar | 13 | 18.84% | 1 | 16.67% |
Thomas Graf | 7 | 10.14% | 1 | 16.67% |
Total | 69 | 100.00% | 6 | 100.00% |
static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
{
struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1);
if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
goto nla_put_failure;
return skb->len;
nla_put_failure:
return -1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 72 | 100.00% | 2 | 100.00% |
Total | 72 | 100.00% | 2 | 100.00% |
static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
{
int prio;
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
qdisc_skb_head_init(band2list(priv, prio));
/* Can by-pass the queue discipline */
qdisc->flags |= TCQ_F_CAN_BYPASS;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 46 | 74.19% | 1 | 20.00% |
Krishna Kumar | 7 | 11.29% | 1 | 20.00% |
Eric Dumazet | 7 | 11.29% | 1 | 20.00% |
Linus Torvalds (pre-git) | 1 | 1.61% | 1 | 20.00% |
Florian Westphal | 1 | 1.61% | 1 | 20.00% |
Total | 62 | 100.00% | 5 | 100.00% |
struct Qdisc_ops pfifo_fast_ops __read_mostly = {
.id = "pfifo_fast",
.priv_size = sizeof(struct pfifo_fast_priv),
.enqueue = pfifo_fast_enqueue,
.dequeue = pfifo_fast_dequeue,
.peek = pfifo_fast_peek,
.init = pfifo_fast_init,
.reset = pfifo_fast_reset,
.dump = pfifo_fast_dump,
.owner = THIS_MODULE,
};
EXPORT_SYMBOL(pfifo_fast_ops);
static struct lock_class_key qdisc_tx_busylock;
static struct lock_class_key qdisc_running_key;
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
const struct Qdisc_ops *ops)
{
void *p;
struct Qdisc *sch;
unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size;
int err = -ENOBUFS;
struct net_device *dev = dev_queue->dev;
p = kzalloc_node(size, GFP_KERNEL,
netdev_queue_numa_node_read(dev_queue));
if (!p)
goto errout;
sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
/* if we got non aligned memory, ask more and do alignment ourself */
if (sch != p) {
kfree(p);
p = kzalloc_node(size + QDISC_ALIGNTO - 1, GFP_KERNEL,
netdev_queue_numa_node_read(dev_queue));
if (!p)
goto errout;
sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
sch->padded = (char *) sch - (char *) p;
}
qdisc_skb_head_init(&sch->q);
spin_lock_init(&sch->q.lock);
spin_lock_init(&sch->busylock);
lockdep_set_class(&sch->busylock,
dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
seqcount_init(&sch->running);
lockdep_set_class(&sch->running,
dev->qdisc_running_key ?: &qdisc_running_key);
sch->ops = ops;
sch->enqueue = ops->enqueue;
sch->dequeue = ops->dequeue;
sch->dev_queue = dev_queue;
dev_hold(dev);
atomic_set(&sch->refcnt, 1);
return sch;
errout:
return ERR_PTR(err);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 124 | 42.91% | 5 | 35.71% |
Linus Torvalds (pre-git) | 71 | 24.57% | 3 | 21.43% |
Stephen Hemminger | 40 | 13.84% | 2 | 14.29% |
Thomas Graf | 30 | 10.38% | 1 | 7.14% |
Florian Westphal | 11 | 3.81% | 1 | 7.14% |
David S. Miller | 9 | 3.11% | 1 | 7.14% |
Patrick McHardy | 4 | 1.38% | 1 | 7.14% |
Total | 289 | 100.00% | 14 | 100.00% |
struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
const struct Qdisc_ops *ops,
unsigned int parentid)
{
struct Qdisc *sch;
if (!try_module_get(ops->owner))
return NULL;
sch = qdisc_alloc(dev_queue, ops);
if (IS_ERR(sch)) {
module_put(ops->owner);
return NULL;
}
sch->parent = parentid;
if (!ops->init || ops->init(sch, NULL) == 0)
return sch;
qdisc_destroy(sch);
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Graf | 35 | 33.98% | 2 | 20.00% |
Linus Torvalds (pre-git) | 25 | 24.27% | 3 | 30.00% |
Eric Dumazet | 15 | 14.56% | 1 | 10.00% |
Stephen Hemminger | 11 | 10.68% | 2 | 20.00% |
Patrick McHardy | 10 | 9.71% | 1 | 10.00% |
David S. Miller | 7 | 6.80% | 1 | 10.00% |
Total | 103 | 100.00% | 10 | 100.00% |
EXPORT_SYMBOL(qdisc_create_dflt);
/* Under qdisc_lock(qdisc) and BH! */
void qdisc_reset(struct Qdisc *qdisc)
{
const struct Qdisc_ops *ops = qdisc->ops;
if (ops->reset)
ops->reset(qdisc);
kfree_skb(qdisc->skb_bad_txq);
qdisc->skb_bad_txq = NULL;
if (qdisc->gso_skb) {
kfree_skb_list(qdisc->gso_skb);
qdisc->gso_skb = NULL;
}
qdisc->q.qlen = 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 31 | 41.33% | 1 | 16.67% |
Krishna Kumar | 16 | 21.33% | 1 | 16.67% |
Eric Dumazet | 15 | 20.00% | 2 | 33.33% |
Jarek Poplawski | 12 | 16.00% | 1 | 16.67% |
Jesper Dangaard Brouer | 1 | 1.33% | 1 | 16.67% |
Total | 75 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL(qdisc_reset);
static void qdisc_rcu_free(struct rcu_head *head)
{
struct Qdisc *qdisc = container_of(head, struct Qdisc, rcu_head);
if (qdisc_is_percpu_stats(qdisc)) {
free_percpu(qdisc->cpu_bstats);
free_percpu(qdisc->cpu_qstats);
}
kfree((char *) qdisc - qdisc->padded);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 39 | 62.90% | 1 | 33.33% |
John Fastabend | 23 | 37.10% | 2 | 66.67% |
Total | 62 | 100.00% | 3 | 100.00% |
void qdisc_destroy(struct Qdisc *qdisc)
{
const struct Qdisc_ops *ops = qdisc->ops;
if (qdisc->flags & TCQ_F_BUILTIN ||
!atomic_dec_and_test(&qdisc->refcnt))
return;
#ifdef CONFIG_NET_SCHED
qdisc_hash_del(qdisc);
qdisc_put_stab(rtnl_dereference(qdisc->stab));
#endif
gen_kill_estimator(&qdisc->rate_est);
if (ops->reset)
ops->reset(qdisc);
if (ops->destroy)
ops->destroy(qdisc);
module_put(ops->owner);
dev_put(qdisc_dev(qdisc));
kfree_skb_list(qdisc->gso_skb);
kfree_skb(qdisc->skb_bad_txq);
/*
* gen_estimator est_timer() might access qdisc->q.lock,
* wait a RCU grace period before freeing qdisc.
*/
call_rcu(&qdisc->rcu_head, qdisc_rcu_free);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 55 | 41.67% | 2 | 11.76% |
David S. Miller | 36 | 27.27% | 4 | 23.53% |
Eric Dumazet | 17 | 12.88% | 4 | 23.53% |
Jarek Poplawski | 9 | 6.82% | 2 | 11.76% |
Jussi Kivilinna | 7 | 5.30% | 1 | 5.88% |
Linus Torvalds (pre-git) | 5 | 3.79% | 1 | 5.88% |
Andrew Morton | 1 | 0.76% | 1 | 5.88% |
Jiri Kosina | 1 | 0.76% | 1 | 5.88% |
Jesper Dangaard Brouer | 1 | 0.76% | 1 | 5.88% |
Total | 132 | 100.00% | 17 | 100.00% |
EXPORT_SYMBOL(qdisc_destroy);
/* Attach toplevel qdisc to device queue. */
struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
struct Qdisc *qdisc)
{
struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
spinlock_t *root_lock;
root_lock = qdisc_lock(oqdisc);
spin_lock_bh(root_lock);
/* Prune old scheduler */
if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
qdisc_reset(oqdisc);
/* ... and graft new one */
if (qdisc == NULL)
qdisc = &noop_qdisc;
dev_queue->qdisc_sleeping = qdisc;
rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
spin_unlock_bh(root_lock);
return oqdisc;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 98 | 100.00% | 1 | 100.00% |
Total | 98 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(dev_graft_qdisc);
static void attach_one_default_qdisc(struct net_device *dev,
struct netdev_queue *dev_queue,
void *_unused)
{
struct Qdisc *qdisc;
const struct Qdisc_ops *ops = default_qdisc_ops;
if (dev->priv_flags & IFF_NO_QUEUE)
ops = &noqueue_qdisc_ops;
qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT);
if (!qdisc) {
netdev_info(dev, "activation failed\n");
return;
}
if (!netif_is_multiqueue(dev))
qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
dev_queue->qdisc_sleeping = qdisc;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 32 | 34.04% | 3 | 27.27% |
David S. Miller | 22 | 23.40% | 2 | 18.18% |
Eric Dumazet | 20 | 21.28% | 3 | 27.27% |
Phil Sutter | 18 | 19.15% | 2 | 18.18% |
Patrick McHardy | 2 | 2.13% | 1 | 9.09% |
Total | 94 | 100.00% | 11 | 100.00% |
static void attach_default_qdiscs(struct net_device *dev)
{
struct netdev_queue *txq;
struct Qdisc *qdisc;
txq = netdev_get_tx_queue(dev, 0);
if (!netif_is_multiqueue(dev) ||
dev->priv_flags & IFF_NO_QUEUE) {
netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
dev->qdisc = txq->qdisc_sleeping;
atomic_inc(&dev->qdisc->refcnt);
} else {
qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT);
if (qdisc) {
dev->qdisc = qdisc;
qdisc->ops->attach(qdisc);
}
}
#ifdef CONFIG_NET_SCHED
if (dev->qdisc != &noop_qdisc)
qdisc_hash_add(dev->qdisc);
#endif
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 100 | 76.92% | 1 | 20.00% |
Jiri Kosina | 18 | 13.85% | 1 | 20.00% |
Eric Dumazet | 6 | 4.62% | 1 | 20.00% |
Phil Sutter | 3 | 2.31% | 1 | 20.00% |
Américo Wang | 3 | 2.31% | 1 | 20.00% |
Total | 130 | 100.00% | 5 | 100.00% |
static void transition_one_qdisc(struct net_device *dev,
struct netdev_queue *dev_queue,
void *_need_watchdog)
{
struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping;
int *need_watchdog_p = _need_watchdog;
if (!(new_qdisc->flags & TCQ_F_BUILTIN))
clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state);
rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
if (need_watchdog_p) {
dev_queue->trans_start = 0;
*need_watchdog_p = 1;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 63 | 76.83% | 4 | 50.00% |
Eric Dumazet | 8 | 9.76% | 1 | 12.50% |
Patrick McHardy | 7 | 8.54% | 1 | 12.50% |
Linus Torvalds (pre-git) | 3 | 3.66% | 1 | 12.50% |
Tommy S. Christensen | 1 | 1.22% | 1 | 12.50% |
Total | 82 | 100.00% | 8 | 100.00% |
void dev_activate(struct net_device *dev)
{
int need_watchdog;
/* No queueing discipline is attached to device;
* create default one for devices, which need queueing
* and noqueue_qdisc for virtual interfaces
*/
if (dev->qdisc == &noop_qdisc)
attach_default_qdiscs(dev);
if (!netif_carrier_ok(dev))
/* Delay activation until next carrier-on event */
return;
need_watchdog = 0;
netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
if (dev_ingress_queue(dev))
transition_one_qdisc(dev, dev_ingress_queue(dev), NULL);
if (need_watchdog) {
netif_trans_update(dev);
dev_watchdog_up(dev);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 58 | 66.67% | 3 | 33.33% |
Linus Torvalds (pre-git) | 10 | 11.49% | 2 | 22.22% |
Eric Dumazet | 10 | 11.49% | 1 | 11.11% |
Patrick McHardy | 5 | 5.75% | 1 | 11.11% |
Florian Westphal | 3 | 3.45% | 1 | 11.11% |
Stephen Hemminger | 1 | 1.15% | 1 | 11.11% |
Total | 87 | 100.00% | 9 | 100.00% |
EXPORT_SYMBOL(dev_activate);
static void dev_deactivate_queue(struct net_device *dev,
struct netdev_queue *dev_queue,
void *_qdisc_default)
{
struct Qdisc *qdisc_default = _qdisc_default;
struct Qdisc *qdisc;
qdisc = rtnl_dereference(dev_queue->qdisc);
if (qdisc) {
spin_lock_bh(qdisc_lock(qdisc));
if (!(qdisc->flags & TCQ_F_BUILTIN))
set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
qdisc_reset(qdisc);
spin_unlock_bh(qdisc_lock(qdisc));
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 87 | 88.78% | 6 | 66.67% |
Linus Torvalds (pre-git) | 4 | 4.08% | 1 | 11.11% |
Jarek Poplawski | 4 | 4.08% | 1 | 11.11% |
John Fastabend | 3 | 3.06% | 1 | 11.11% |
Total | 98 | 100.00% | 9 | 100.00% |
static bool some_qdisc_is_busy(struct net_device *dev)
{
unsigned int i;
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *dev_queue;
spinlock_t *root_lock;
struct Qdisc *q;
int val;
dev_queue = netdev_get_tx_queue(dev, i);
q = dev_queue->qdisc_sleeping;
root_lock = qdisc_lock(q);
spin_lock_bh(root_lock);
val = (qdisc_is_running(q) ||
test_bit(__QDISC_STATE_SCHED, &q->state));
spin_unlock_bh(root_lock);
if (val)
return true;
}
return false;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 107 | 97.27% | 7 | 77.78% |
Herbert Xu | 2 | 1.82% | 1 | 11.11% |
Eric Dumazet | 1 | 0.91% | 1 | 11.11% |
Total | 110 | 100.00% | 9 | 100.00% |
/**
* dev_deactivate_many - deactivate transmissions on several devices
* @head: list of devices to deactivate
*
* This function returns only when all outstanding transmissions
* have completed, unless all devices are in dismantle phase.
*/
void dev_deactivate_many(struct list_head *head)
{
struct net_device *dev;
bool sync_needed = false;
list_for_each_entry(dev, head, close_list) {
netdev_for_each_tx_queue(dev, dev_deactivate_queue,
&noop_qdisc);
if (dev_ingress_queue(dev))
dev_deactivate_queue(dev, dev_ingress_queue(dev),
&noop_qdisc);
dev_watchdog_down(dev);
sync_needed |= !dev->dismantle;
}
/* Wait for outstanding qdisc-less dev_queue_xmit calls.
* This is avoided if all devices are in dismantle phase :
* Caller will call synchronize_net() for us
*/
if (sync_needed)
synchronize_net();
/* Wait for outstanding qdisc_run calls. */
list_for_each_entry(dev, head, close_list)
while (some_qdisc_is_busy(dev))
yield();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 28 | 28.28% | 2 | 14.29% |
David S. Miller | 25 | 25.25% | 4 | 28.57% |
Octavian Purdila | 23 | 23.23% | 1 | 7.14% |
Linus Torvalds (pre-git) | 17 | 17.17% | 4 | 28.57% |
Herbert Xu | 3 | 3.03% | 1 | 7.14% |
Eric W. Biedermann | 2 | 2.02% | 1 | 7.14% |
Linus Torvalds | 1 | 1.01% | 1 | 7.14% |
Total | 99 | 100.00% | 14 | 100.00% |
void dev_deactivate(struct net_device *dev)
{
LIST_HEAD(single);
list_add(&dev->close_list, &single);
dev_deactivate_many(&single);
list_del(&single);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Octavian Purdila | 31 | 81.58% | 1 | 33.33% |
Eric W. Biedermann | 7 | 18.42% | 2 | 66.67% |
Total | 38 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(dev_deactivate);
static void dev_init_scheduler_queue(struct net_device *dev,
struct netdev_queue *dev_queue,
void *_qdisc)
{
struct Qdisc *qdisc = _qdisc;
rcu_assign_pointer(dev_queue->qdisc, qdisc);
dev_queue->qdisc_sleeping = qdisc;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 38 | 90.48% | 2 | 66.67% |
John Fastabend | 4 | 9.52% | 1 | 33.33% |
Total | 42 | 100.00% | 3 | 100.00% |
void dev_init_scheduler(struct net_device *dev)
{
dev->qdisc = &noop_qdisc;
netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
if (dev_ingress_queue(dev))
dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 20 | 31.75% | 4 | 36.36% |
Pavel Emelyanov | 12 | 19.05% | 1 | 9.09% |
David S. Miller | 12 | 19.05% | 3 | 27.27% |
Eric Dumazet | 10 | 15.87% | 1 | 9.09% |
Patrick McHardy | 9 | 14.29% | 2 | 18.18% |
Total | 63 | 100.00% | 11 | 100.00% |
static void shutdown_scheduler_queue(struct net_device *dev,
struct netdev_queue *dev_queue,
void *_qdisc_default)
{
struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
struct Qdisc *qdisc_default = _qdisc_default;
if (qdisc) {
rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
dev_queue->qdisc_sleeping = qdisc_default;
qdisc_destroy(qdisc);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 32 | 51.61% | 2 | 33.33% |
Linus Torvalds (pre-git) | 26 | 41.94% | 3 | 50.00% |
Jarek Poplawski | 4 | 6.45% | 1 | 16.67% |
Total | 62 | 100.00% | 6 | 100.00% |
void dev_shutdown(struct net_device *dev)
{
netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
if (dev_ingress_queue(dev))
shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
qdisc_destroy(dev->qdisc);
dev->qdisc = &noop_qdisc;
WARN_ON(timer_pending(&dev->watchdog_timer));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 23 | 35.38% | 3 | 25.00% |
Linus Torvalds (pre-git) | 17 | 26.15% | 6 | 50.00% |
Patrick McHardy | 14 | 21.54% | 1 | 8.33% |
Eric Dumazet | 10 | 15.38% | 1 | 8.33% |
Ilpo Järvinen | 1 | 1.54% | 1 | 8.33% |
Total | 65 | 100.00% | 12 | 100.00% |
void psched_ratecfg_precompute(struct psched_ratecfg *r,
const struct tc_ratespec *conf,
u64 rate64)
{
memset(r, 0, sizeof(*r));
r->overhead = conf->overhead;
r->rate_bytes_ps = max_t(u64, conf->rate, rate64);
r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK);
r->mult = 1;
/*
* The deal here is to replace a divide by a reciprocal one
* in fast path (a reciprocal divide is a multiply and a shift)
*
* Normal formula would be :
* time_in_ns = (NSEC_PER_SEC * len) / rate_bps
*
* We compute mult/shift to use instead :
* time_in_ns = (len * mult) >> shift;
*
* We try to get the highest possible mult value for accuracy,
* but have to make sure no overflows will ever happen.
*/
if (r->rate_bytes_ps > 0) {
u64 factor = NSEC_PER_SEC;
for (;;) {
r->mult = div64_u64(factor, r->rate_bytes_ps);
if (r->mult & (1U << 31) || factor & (1ULL << 63))
break;
factor <<= 1;
r->shift++;
}
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 66 | 47.48% | 3 | 60.00% |
Jiri Pirko | 61 | 43.88% | 1 | 20.00% |
Jesper Dangaard Brouer | 12 | 8.63% | 1 | 20.00% |
Total | 139 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL(psched_ratecfg_precompute);
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 1046 | 24.27% | 31 | 17.61% |
Eric Dumazet | 851 | 19.74% | 28 | 15.91% |
Linus Torvalds (pre-git) | 662 | 15.36% | 15 | 8.52% |
Krishna Kumar | 350 | 8.12% | 5 | 2.84% |
Patrick McHardy | 238 | 5.52% | 9 | 5.11% |
Jarek Poplawski | 138 | 3.20% | 9 | 5.11% |
Jesper Dangaard Brouer | 116 | 2.69% | 5 | 2.84% |
Jamal Hadi Salim | 104 | 2.41% | 4 | 2.27% |
Thomas Graf | 98 | 2.27% | 7 | 3.98% |
Stephen Hemminger | 89 | 2.06% | 9 | 5.11% |
Jiri Pirko | 69 | 1.60% | 1 | 0.57% |
Denys Vlasenko | 67 | 1.55% | 1 | 0.57% |
Florian Westphal | 64 | 1.48% | 5 | 2.84% |
Dave Jones | 55 | 1.28% | 1 | 0.57% |
Octavian Purdila | 54 | 1.25% | 1 | 0.57% |
Phil Sutter | 52 | 1.21% | 3 | 1.70% |
Herbert Xu | 45 | 1.04% | 9 | 5.11% |
John Fastabend | 45 | 1.04% | 4 | 2.27% |
Nikolay Aleksandrov | 23 | 0.53% | 1 | 0.57% |
David Decotigny | 21 | 0.49% | 2 | 1.14% |
Jiri Kosina | 19 | 0.44% | 1 | 0.57% |
Américo Wang | 17 | 0.39% | 2 | 1.14% |
Lars Persson | 16 | 0.37% | 1 | 0.57% |
Pavel Emelyanov | 12 | 0.28% | 1 | 0.57% |
Arjan van de Ven | 11 | 0.26% | 3 | 1.70% |
Eric W. Biedermann | 9 | 0.21% | 2 | 1.14% |
Jussi Kivilinna | 7 | 0.16% | 1 | 0.57% |
Peter P. Waskiewicz Jr | 7 | 0.16% | 1 | 0.57% |
Tom Herbert | 4 | 0.09% | 1 | 0.57% |
Tejun Heo | 3 | 0.07% | 1 | 0.57% |
Arthur Kepner | 3 | 0.07% | 1 | 0.57% |
Venkatesh Pallipadi | 3 | 0.07% | 1 | 0.57% |
Daniel Borkmann | 2 | 0.05% | 1 | 0.57% |
Jeff Garzik | 2 | 0.05% | 1 | 0.57% |
Joe Perches | 1 | 0.02% | 1 | 0.57% |
Linus Torvalds | 1 | 0.02% | 1 | 0.57% |
Ilpo Järvinen | 1 | 0.02% | 1 | 0.57% |
Tommy S. Christensen | 1 | 0.02% | 1 | 0.57% |
Andi Kleen | 1 | 0.02% | 1 | 0.57% |
Andrew Morton | 1 | 0.02% | 1 | 0.57% |
Matthias Tafelmeier | 1 | 0.02% | 1 | 0.57% |
Adrian Bunk | 1 | 0.02% | 1 | 0.57% |
Total | 4310 | 100.00% | 176 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.