Release 4.7 net/sched/sch_cbq.c
/*
* net/sched/sch_cbq.c Class-Based Queueing discipline.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/skbuff.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
/* Class-Based Queueing (CBQ) algorithm.
=======================================
Sources: [1] Sally Floyd and Van Jacobson, "Link-sharing and Resource
Management Models for Packet Networks",
IEEE/ACM Transactions on Networking, Vol.3, No.4, 1995
[2] Sally Floyd, "Notes on CBQ and Guaranteed Service", 1995
[3] Sally Floyd, "Notes on Class-Based Queueing: Setting
Parameters", 1996
[4] Sally Floyd and Michael Speer, "Experimental Results
for Class-Based Queueing", 1998, not published.
-----------------------------------------------------------------------
Algorithm skeleton was taken from NS simulator cbq.cc.
If someone wants to check this code against the LBL version,
he should take into account that ONLY the skeleton was borrowed,
the implementation is different. Particularly:
--- The WRR algorithm is different. Our version looks more
reasonable (I hope) and works when quanta are allowed to be
less than MTU, which is always the case when real time classes
have small rates. Note, that the statement of [3] is
incomplete, delay may actually be estimated even if class
per-round allotment is less than MTU. Namely, if per-round
allotment is W*r_i, and r_1+...+r_k = r < 1
delay_i <= ([MTU/(W*r_i)]*W*r + W*r + k*MTU)/B
In the worst case we have IntServ estimate with D = W*r+k*MTU
and C = MTU*r. The proof (if correct at all) is trivial.
--- It seems that cbq-2.0 is not very accurate. At least, I cannot
interpret some places, which look like wrong translations
from NS. Anyone is advised to find these differences
and explain to me, why I am wrong 8).
--- Linux has no EOI event, so that we cannot estimate true class
idle time. Workaround is to consider the next dequeue event
as sign that previous packet is finished. This is wrong because of
internal device queueing, but on a permanently loaded link it is true.
Moreover, combined with clock integrator, this scheme looks
very close to an ideal solution. */
struct cbq_sched_data;
struct cbq_class {
struct Qdisc_class_common common;
struct cbq_class *next_alive; /* next class with backlog in this priority band */
/* Parameters */
unsigned char priority; /* class priority */
unsigned char priority2; /* priority to be used after overlimit */
unsigned char ewma_log; /* time constant for idle time calculation */
unsigned char ovl_strategy;
#ifdef CONFIG_NET_CLS_ACT
unsigned char police;
#endif
u32 defmap;
/* Link-sharing scheduler parameters */
long maxidle; /* Class parameters: see below. */
long offtime;
long minidle;
u32 avpkt;
struct qdisc_rate_table *R_tab;
/* Overlimit strategy parameters */
void (*overlimit)(struct cbq_class *cl);
psched_tdiff_t penalty;
/* General scheduler (WRR) parameters */
long allot;
long quantum; /* Allotment per WRR round */
long weight; /* Relative allotment: see below */
struct Qdisc *qdisc; /* Ptr to CBQ discipline */
struct cbq_class *split; /* Ptr to split node */
struct cbq_class *share; /* Ptr to LS parent in the class tree */
struct cbq_class *tparent; /* Ptr to tree parent in the class tree */
struct cbq_class *borrow; /* NULL if class is bandwidth limited;
parent otherwise */
struct cbq_class *sibling; /* Sibling chain */
struct cbq_class *children; /* Pointer to children chain */
struct Qdisc *q; /* Elementary queueing discipline */
/* Variables */
unsigned char cpriority; /* Effective priority */
unsigned char delayed;
unsigned char level; /* level of the class in hierarchy:
0 for leaf classes, and maximal
level of children + 1 for nodes.
*/
psched_time_t last; /* Last end of service */
psched_time_t undertime;
long avgidle;
long deficit; /* Saved deficit for WRR */
psched_time_t penalized;
struct gnet_stats_basic_packed bstats;
struct gnet_stats_queue qstats;
struct gnet_stats_rate_est64 rate_est;
struct tc_cbq_xstats xstats;
struct tcf_proto __rcu *filter_list;
int refcnt;
int filters;
struct cbq_class *defaults[TC_PRIO_MAX + 1];
};
struct cbq_sched_data {
struct Qdisc_class_hash clhash; /* Hash table of all classes */
int nclasses[TC_CBQ_MAXPRIO + 1];
unsigned int quanta[TC_CBQ_MAXPRIO + 1];
struct cbq_class link;
unsigned int activemask;
struct cbq_class *active[TC_CBQ_MAXPRIO + 1]; /* List of all classes
with backlog */
#ifdef CONFIG_NET_CLS_ACT
struct cbq_class *rx_class;
#endif
struct cbq_class *tx_class;
struct cbq_class *tx_borrowed;
int tx_len;
psched_time_t now; /* Cached timestamp */
unsigned int pmask;
struct hrtimer delay_timer;
struct qdisc_watchdog watchdog; /* Watchdog timer,
started when CBQ has
backlog, but cannot
transmit just now */
psched_tdiff_t wd_expires;
int toplevel;
u32 hgenerator;
};
#define L2T(cl, len) qdisc_l2t((cl)->R_tab, len)
static inline struct cbq_class *
cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
{
struct Qdisc_class_common *clc;
clc = qdisc_class_find(&q->clhash, classid);
if (clc == NULL)
return NULL;
return container_of(clc, struct cbq_class, common);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
patrick mchardy | patrick mchardy | 20 | 37.04% | 1 | 25.00% |
eric dumazet | eric dumazet | 18 | 33.33% | 1 | 25.00% |
pre-git | pre-git | 16 | 29.63% | 2 | 50.00% |
| Total | 54 | 100.00% | 4 | 100.00% |
#ifdef CONFIG_NET_CLS_ACT
static struct cbq_class *
cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
{
struct cbq_class *cl;
for (cl = this->tparent; cl; cl = cl->tparent) {
struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT];
if (new != NULL && new != this)
return new;
}
return NULL;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 58 | 84.06% | 2 | 66.67% |
eric dumazet | eric dumazet | 11 | 15.94% | 1 | 33.33% |
| Total | 69 | 100.00% | 3 | 100.00% |
#endif
/* Classify packet. The procedure is pretty complicated, but
* it allows us to combine link sharing and priority scheduling
* transparently.
*
* Namely, you can put link sharing rules (f.e. route based) at root of CBQ,
* so that it resolves to split nodes. Then packets are classified
* by logical priority, or a more specific classifier may be attached
* to the split node.
*/
static struct cbq_class *
cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
{
struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *head = &q->link;
struct cbq_class **defmap;
struct cbq_class *cl = NULL;
u32 prio = skb->priority;
struct tcf_proto *fl;
struct tcf_result res;
/*
* Step 1. If skb->priority points to one of our classes, use it.
*/
if (TC_H_MAJ(prio ^ sch->handle) == 0 &&
(cl = cbq_class_lookup(q, prio)) != NULL)
return cl;
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
for (;;) {
int result = 0;
defmap = head->defaults;
fl = rcu_dereference_bh(head->filter_list);
/*
* Step 2+n. Apply classifier.
*/
result = tc_classify(skb, fl, &res, true);
if (!fl || result < 0)
goto fallback;
cl = (void *)res.class;
if (!cl) {
if (TC_H_MAJ(res.classid))
cl = cbq_class_lookup(q, res.classid);
else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
cl = defmap[TC_PRIO_BESTEFFORT];
if (cl == NULL)
goto fallback;
}
if (cl->level >= head->level)
goto fallback;
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
case TC_ACT_QUEUED:
case TC_ACT_STOLEN:
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT:
return NULL;
case TC_ACT_RECLASSIFY:
return cbq_reclassify(skb, cl);
}
#endif
if (cl->level == 0)
return cl;
/*
* Step 3+n. If classifier selected a link sharing class,
* apply agency specific classifier.
* Repeat this procdure until we hit a leaf node.
*/
head = cl;
}
fallback:
cl = head;
/*
* Step 4. No success...
*/
if (TC_H_MAJ(prio) == 0 &&
!(cl = head->defaults[prio & TC_PRIO_MAX]) &&
!(cl = head->defaults[TC_PRIO_BESTEFFORT]))
return head;
return cl;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 265 | 75.50% | 4 | 28.57% |
jamal hadi salim | jamal hadi salim | 23 | 6.55% | 1 | 7.14% |
john fastabend | john fastabend | 20 | 5.70% | 1 | 7.14% |
patrick mchardy | patrick mchardy | 20 | 5.70% | 2 | 14.29% |
eric dumazet | eric dumazet | 12 | 3.42% | 2 | 14.29% |
jarek poplawski | jarek poplawski | 5 | 1.42% | 2 | 14.29% |
stephen hemminger | stephen hemminger | 3 | 0.85% | 1 | 7.14% |
daniel borkmann | daniel borkmann | 3 | 0.85% | 1 | 7.14% |
| Total | 351 | 100.00% | 14 | 100.00% |
/*
* A packet has just been enqueued on the empty class.
* cbq_activate_class adds it to the tail of active class list
* of its priority band.
*/
static inline void cbq_activate_class(struct cbq_class *cl)
{
struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
int prio = cl->cpriority;
struct cbq_class *cl_tail;
cl_tail = q->active[prio];
q->active[prio] = cl;
if (cl_tail != NULL) {
cl->next_alive = cl_tail->next_alive;
cl_tail->next_alive = cl;
} else {
cl->next_alive = cl;
q->activemask |= (1<<prio);
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 91 | 95.79% | 2 | 50.00% |
stephen hemminger | stephen hemminger | 3 | 3.16% | 1 | 25.00% |
eric dumazet | eric dumazet | 1 | 1.05% | 1 | 25.00% |
| Total | 95 | 100.00% | 4 | 100.00% |
/*
* Unlink class from active chain.
* Note that this same procedure is done directly in cbq_dequeue*
* during round-robin procedure.
*/
static void cbq_deactivate_class(struct cbq_class *this)
{
struct cbq_sched_data *q = qdisc_priv(this->qdisc);
int prio = this->cpriority;
struct cbq_class *cl;
struct cbq_class *cl_prev = q->active[prio];
do {
cl = cl_prev->next_alive;
if (cl == this) {
cl_prev->next_alive = cl->next_alive;
cl->next_alive = NULL;
if (cl == q->active[prio]) {
q->active[prio] = cl_prev;
if (cl == q->active[prio]) {
q->active[prio] = NULL;
q->activemask &= ~(1<<prio);
return;
}
}
return;
}
} while ((cl_prev = cl) != q->active[prio]);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 148 | 98.01% | 2 | 66.67% |
stephen hemminger | stephen hemminger | 3 | 1.99% | 1 | 33.33% |
| Total | 151 | 100.00% | 3 | 100.00% |
static void
cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
{
int toplevel = q->toplevel;
if (toplevel > cl->level && !(qdisc_is_throttled(cl->q))) {
psched_time_t now = psched_get_time();
do {
if (cl->undertime < now) {
q->toplevel = cl->level;
return;
}
} while ((cl = cl->borrow) != NULL && toplevel > cl->level);
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 82 | 91.11% | 3 | 42.86% |
patrick mchardy | patrick mchardy | 3 | 3.33% | 2 | 28.57% |
eric dumazet | eric dumazet | 3 | 3.33% | 1 | 14.29% |
vasily averin | vasily averin | 2 | 2.22% | 1 | 14.29% |
| Total | 90 | 100.00% | 7 | 100.00% |
static int
cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct cbq_sched_data *q = qdisc_priv(sch);
int uninitialized_var(ret);
struct cbq_class *cl = cbq_classify(skb, sch, &ret);
#ifdef CONFIG_NET_CLS_ACT
q->rx_class = cl;
#endif
if (cl == NULL) {
if (ret & __NET_XMIT_BYPASS)
qdisc_qstats_drop(sch);
kfree_skb(skb);
return ret;
}
#ifdef CONFIG_NET_CLS_ACT
cl->q->__parent = sch;
#endif
ret = qdisc_enqueue(skb, cl->q);
if (ret == NET_XMIT_SUCCESS) {
sch->q.qlen++;
cbq_mark_toplevel(q, cl);
if (!cl->next_alive)
cbq_activate_class(cl);
return ret;
}
if (net_xmit_drop_count(ret)) {
qdisc_qstats_drop(sch);
cbq_mark_toplevel(q, cl);
cl->qstats.drops++;
}
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 102 | 57.63% | 4 | 26.67% |
patrick mchardy | patrick mchardy | 23 | 12.99% | 3 | 20.00% |
jamal hadi salim | jamal hadi salim | 23 | 12.99% | 1 | 6.67% |
jarek poplawski | jarek poplawski | 11 | 6.21% | 2 | 13.33% |
john fastabend | john fastabend | 6 | 3.39% | 1 | 6.67% |
jussi kivilinna | jussi kivilinna | 5 | 2.82% | 1 | 6.67% |
satyam sharma | satyam sharma | 3 | 1.69% | 1 | 6.67% |
stephen hemminger | stephen hemminger | 3 | 1.69% | 1 | 6.67% |
thomas graf | thomas graf | 1 | 0.56% | 1 | 6.67% |
| Total | 177 | 100.00% | 15 | 100.00% |
/* Overlimit actions */
/* TC_CBQ_OVL_CLASSIC: (default) penalize leaf class by adding offtime */
static void cbq_ovl_classic(struct cbq_class *cl)
{
struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
psched_tdiff_t delay = cl->undertime - q->now;
if (!cl->delayed) {
delay += cl->offtime;
/*
* Class goes to sleep, so that it will have no
* chance to work avgidle. Let's forgive it 8)
*
* BTW cbq-2.0 has a crap in this
* place, apparently they forgot to shift it by cl->ewma_log.
*/
if (cl->avgidle < 0)
delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
if (cl->avgidle < cl->minidle)
cl->avgidle = cl->minidle;
if (delay <= 0)
delay = 1;
cl->undertime = q->now + delay;
cl->xstats.overactions++;
cl->delayed = 1;
}
if (q->wd_expires == 0 || q->wd_expires > delay)
q->wd_expires = delay;
/* Dirty work! We must schedule wakeups based on
* real available rate, rather than leaf rate,
* which may be tiny (even zero).
*/
if (q->toplevel == TC_CBQ_MAXLEVEL) {
struct cbq_class *b;
psched_tdiff_t base_delay = q->wd_expires;
for (b = cl->borrow; b; b = b->borrow) {
delay = b->undertime - q->now;
if (delay < base_delay) {
if (delay <= 0)
delay = 1;
base_delay = delay;
}
}
q->wd_expires = base_delay;
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 217 | 94.35% | 3 | 37.50% |
patrick mchardy | patrick mchardy | 7 | 3.04% | 2 | 25.00% |
stephen hemminger | stephen hemminger | 3 | 1.30% | 1 | 12.50% |
eric dumazet | eric dumazet | 2 | 0.87% | 1 | 12.50% |
linus torvalds | linus torvalds | 1 | 0.43% | 1 | 12.50% |
| Total | 230 | 100.00% | 8 | 100.00% |
/* TC_CBQ_OVL_RCLASSIC: penalize by offtime classes in hierarchy, when
* they go overlimit
*/
static void cbq_ovl_rclassic(struct cbq_class *cl)
{
struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
struct cbq_class *this = cl;
do {
if (cl->level > q->toplevel) {
cl = NULL;
break;
}
} while ((cl = cl->borrow) != NULL);
if (cl == NULL)
cl = this;
cbq_ovl_classic(cl);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 75 | 96.15% | 3 | 75.00% |
stephen hemminger | stephen hemminger | 3 | 3.85% | 1 | 25.00% |
| Total | 78 | 100.00% | 4 | 100.00% |
/* TC_CBQ_OVL_DELAY: delay until it will go to underlimit */
static void cbq_ovl_delay(struct cbq_class *cl)
{
struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
psched_tdiff_t delay = cl->undertime - q->now;
if (test_bit(__QDISC_STATE_DEACTIVATED,
&qdisc_root_sleeping(cl->qdisc)->state))
return;
if (!cl->delayed) {
psched_time_t sched = q->now;
ktime_t expires;
delay += cl->offtime;
if (cl->avgidle < 0)
delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
if (cl->avgidle < cl->minidle)
cl->avgidle = cl->minidle;
cl->undertime = q->now + delay;
if (delay > 0) {
sched += delay + cl->penalty;
cl->penalized = sched;
cl->cpriority = TC_CBQ_MAXPRIO;
q->pmask |= (1<<TC_CBQ_MAXPRIO);
expires = ns_to_ktime(PSCHED_TICKS2NS(sched));
if (hrtimer_try_to_cancel(&q->delay_timer) &&
ktime_to_ns(ktime_sub(
hrtimer_get_expires(&q->delay_timer),
expires)) > 0)
hrtimer_set_expires(&q->delay_timer, expires);
hrtimer_restart(&q->delay_timer);
cl->delayed = 1;
cl->xstats.overactions++;
return;
}
delay = 1;
}
if (q->wd_expires == 0 || q->wd_expires > delay)
q->wd_expires = delay;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 188 | 70.41% | 3 | 23.08% |
patrick mchardy | patrick mchardy | 29 | 10.86% | 3 | 23.08% |
david s. miller | david s. miller | 20 | 7.49% | 2 | 15.38% |
jarek poplawski | jarek poplawski | 19 | 7.12% | 2 | 15.38% |
arjan van de ven | arjan van de ven | 7 | 2.62% | 1 | 7.69% |
stephen hemminger | stephen hemminger | 3 | 1.12% | 1 | 7.69% |
eric dumazet | eric dumazet | 1 | 0.37% | 1 | 7.69% |
| Total | 267 | 100.00% | 13 | 100.00% |
/* TC_CBQ_OVL_LOWPRIO: penalize class by lowering its priority band */
static void cbq_ovl_lowprio(struct cbq_class *cl)
{
struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
cl->penalized = q->now + cl->penalty;
if (cl->cpriority != cl->priority2) {
cl->cpriority = cl->priority2;
q->pmask |= (1<<cl->cpriority);
cl->xstats.overactions++;
}
cbq_ovl_classic(cl);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 73 | 92.41% | 2 | 50.00% |
patrick mchardy | patrick mchardy | 3 | 3.80% | 1 | 25.00% |
stephen hemminger | stephen hemminger | 3 | 3.80% | 1 | 25.00% |
| Total | 79 | 100.00% | 4 | 100.00% |
/* TC_CBQ_OVL_DROP: penalize class by dropping */
static void cbq_ovl_drop(struct cbq_class *cl)
{
if (cl->q->ops->drop)
if (cl->q->ops->drop(cl->q))
cl->qdisc->q.qlen--;
cl->xstats.overactions++;
cbq_ovl_classic(cl);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 57 | 100.00% | 2 | 100.00% |
| Total | 57 | 100.00% | 2 | 100.00% |
static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio,
psched_time_t now)
{
struct cbq_class *cl;
struct cbq_class *cl_prev = q->active[prio];
psched_time_t sched = now;
if (cl_prev == NULL)
return 0;
do {
cl = cl_prev->next_alive;
if (now - cl->penalized > 0) {
cl_prev->next_alive = cl->next_alive;
cl->next_alive = NULL;
cl->cpriority = cl->priority;
cl->delayed = 0;
cbq_activate_class(cl);
if (cl == q->active[prio]) {
q->active[prio] = cl_prev;
if (cl == q->active[prio]) {
q->active[prio] = NULL;
return 0;
}
}
cl = cl_prev->next_alive;
} else if (sched - cl->penalized > 0)
sched = cl->penalized;
} while ((cl_prev = cl) != q->active[prio]);
return sched - now;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 187 | 96.89% | 2 | 50.00% |
patrick mchardy | patrick mchardy | 6 | 3.11% | 2 | 50.00% |
| Total | 193 | 100.00% | 4 | 100.00% |
static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
{
struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data,
delay_timer);
struct Qdisc *sch = q->watchdog.qdisc;
psched_time_t now;
psched_tdiff_t delay = 0;
unsigned int pmask;
now = psched_get_time();
pmask = q->pmask;
q->pmask = 0;
while (pmask) {
int prio = ffz(~pmask);
psched_tdiff_t tmp;
pmask &= ~(1<<prio);
tmp = cbq_undelay_prio(q, prio, now);
if (tmp > 0) {
q->pmask |= 1<<prio;
if (tmp < delay || delay == 0)
delay = tmp;
}
}
if (delay) {
ktime_t time;
time = ktime_set(0, 0);
time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay));
hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS_PINNED);
}
qdisc_unthrottled(sch);
__netif_schedule(qdisc_root(sch));
return HRTIMER_NORESTART;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 121 | 62.05% | 3 | 23.08% |
patrick mchardy | patrick mchardy | 62 | 31.79% | 2 | 15.38% |
david s. miller | david s. miller | 6 | 3.08% | 4 | 30.77% |
eric dumazet | eric dumazet | 5 | 2.56% | 3 | 23.08% |
jarek poplawski | jarek poplawski | 1 | 0.51% | 1 | 7.69% |
| Total | 195 | 100.00% | 13 | 100.00% |
#ifdef CONFIG_NET_CLS_ACT
static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
{
struct Qdisc *sch = child->__parent;
struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl = q->rx_class;
q->rx_class = NULL;
if (cl && (cl = cbq_reclassify(skb, cl)) != NULL) {
int ret;
cbq_mark_toplevel(q, cl);
q->rx_class = cl;
cl->q->__parent = sch;
ret = qdisc_enqueue(skb, cl->q);
if (ret == NET_XMIT_SUCCESS) {
sch->q.qlen++;
if (!cl->next_alive)
cbq_activate_class(cl);
return 0;
}
if (net_xmit_drop_count(ret))
qdisc_qstats_drop(sch);
return 0;
}
qdisc_qstats_drop(sch);
return -1;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 131 | 82.91% | 2 | 33.33% |
jarek poplawski | jarek poplawski | 17 | 10.76% | 1 | 16.67% |
john fastabend | john fastabend | 6 | 3.80% | 1 | 16.67% |
stephen hemminger | stephen hemminger | 3 | 1.90% | 1 | 16.67% |
jussi kivilinna | jussi kivilinna | 1 | 0.63% | 1 | 16.67% |
| Total | 158 | 100.00% | 6 | 100.00% |
#endif
/*
* It is mission critical procedure.
*
* We "regenerate" toplevel cutoff, if transmitting class
* has backlog and it is not regulated. It is not part of
* original CBQ description, but looks more reasonable.
* Probably, it is wrong. This question needs further investigation.
*/
static inline void
cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
struct cbq_class *borrowed)
{
if (cl && q->toplevel >= borrowed->level) {
if (cl->q->q.qlen > 1) {
do {
if (borrowed->undertime == PSCHED_PASTPERFECT) {
q->toplevel = borrowed->level;
return;
}
} while ((borrowed = borrowed->borrow) != NULL);
}
#if 0
/* It is not necessary now. Uncommenting it
will save CPU cycles, but decrease fairness.
*/
q->toplevel = TC_CBQ_MAXLEVEL;
#endif
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 89 | 96.74% | 2 | 50.00% |
patrick mchardy | patrick mchardy | 2 | 2.17% | 1 | 25.00% |
eric dumazet | eric dumazet | 1 | 1.09% | 1 | 25.00% |
| Total | 92 | 100.00% | 4 | 100.00% |
static void
cbq_update(struct cbq_sched_data *q)
{
struct cbq_class *this = q->tx_class;
struct cbq_class *cl = this;
int len = q->tx_len;
psched_time_t now;
q->tx_class = NULL;
/* Time integrator. We calculate EOS time
* by adding expected packet transmission time.
*/
now = q->now + L2T(&q->link, len);
for ( ; cl; cl = cl->share) {
long avgidle = cl->avgidle;
long idle;
cl->bstats.packets++;
cl->bstats.bytes += len;
/*
* (now - last) is total time between packet right edges.
* (last_pktlen/rate) is "virtual" busy time, so that
*
* idle = (now - last) - last_pktlen/rate
*/
idle = now - cl->last;
if ((unsigned long)idle > 128*1024*1024) {
avgidle = cl->maxidle;
} else {
idle -= L2T(cl, len);
/* true_avgidle := (1-W)*true_avgidle + W*idle,
* where W=2^{-ewma_log}. But cl->avgidle is scaled:
* cl->avgidle == true_avgidle/W,
* hence:
*/
avgidle += idle - (avgidle>>cl->ewma_log);
}
if (avgidle <= 0) {
/* Overlimit or at-limit */
if (avgidle < cl->minidle)
avgidle = cl->minidle;
cl->avgidle = avgidle;
/* Calculate expected time, when this class
* will be allowed to send.
* It will occur, when:
* (1-W)*true_avgidle + W*delay = 0, i.e.
* idle = (1/W - 1)*(-true_avgidle)
* or
* idle = (1 - W)*(-cl->avgidle);
*/
idle = (-avgidle) - ((-avgidle) >> cl->ewma_log);
/*
* That is not all.
* To maintain the rate allocated to the class,
* we add to undertime virtual clock,
* necessary to complete transmitted packet.
* (len/phys_bandwidth has been already passed
* to the moment of cbq_update)
*/
idle -= L2T(&q->link, len);
idle += L2T(cl, len);
cl->undertime = now + idle;
} else {
/* Underlimit */
cl->undertime = PSCHED_PASTPERFECT;
if (avgidle > cl->maxidle)
cl->avgidle = cl->maxidle;
else
cl->avgidle = avgidle;
}
if ((s64)(now - cl->last) > 0)
cl->last = now;
}
cbq_update_toplevel(q, this, q->tx_borrowed);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 248 | 83.50% | 3 | 33.33% |
vasily averin | vasily averin | 35 | 11.78% | 1 | 11.11% |
patrick mchardy | patrick mchardy | 8 | 2.69% | 3 | 33.33% |
eric dumazet | eric dumazet | 4 | 1.35% | 1 | 11.11% |
thomas graf | thomas graf | 2 | 0.67% | 1 | 11.11% |
| Total | 297 | 100.00% | 9 | 100.00% |
static inline struct cbq_class *
cbq_under_limit(struct cbq_class *cl)
{
struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
struct cbq_class *this_cl = cl;
if (cl->tparent == NULL)
return cl;
if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) {
cl->delayed = 0;
return cl;
}
do {
/* It is very suspicious place. Now overlimit
* action is generated for not bounded classes
* only if link is completely congested.
* Though it is in agree with ancestor-only paradigm,
* it looks very stupid. Particularly,
* it means that this chunk of code will either
* never be called or result in strong amplification
* of burstiness. Dangerous, silly, and, however,
* no another solution exists.
*/
cl = cl->borrow;
if (!cl) {
this_cl->qstats.overlimits++;
this_cl->overlimit(this_cl);
return NULL;
}
if (cl->level > q->toplevel)
return NULL;
} while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime);
cl->delayed = 0;
return cl;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 109 | 75.69% | 2 | 28.57% |
eric dumazet | eric dumazet | 25 | 17.36% | 1 | 14.29% |
patrick mchardy | patrick mchardy | 6 | 4.17% | 2 | 28.57% |
stephen hemminger | stephen hemminger | 3 | 2.08% | 1 | 14.29% |
thomas graf | thomas graf | 1 | 0.69% | 1 | 14.29% |
| Total | 144 | 100.00% | 7 | 100.00% |
static inline struct sk_buff *
cbq_dequeue_prio(struct Qdisc *sch, int prio)
{
struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl_tail, *cl_prev, *cl;
struct sk_buff *skb;
int deficit;
cl_tail = cl_prev = q->active[prio];
cl = cl_prev->next_alive;
do {
deficit = 0;
/* Start round */
do {
struct cbq_class *borrow = cl;
if (cl->q->q.qlen &&
(borrow = cbq_under_limit(cl)) == NULL)
goto skip_class;
if (cl->deficit <= 0) {
/* Class exhausted its allotment per
* this round. Switch to the next one.
*/
deficit = 1;
cl->deficit += cl->quantum;
goto next_class;
}
skb = cl->q->dequeue(cl->q);
/* Class did not give us any skb :-(
* It could occur even if cl->q->q.qlen != 0
* f.e. if cl->q == "tbf"
*/
if (skb == NULL)
goto skip_class;
cl->deficit -= qdisc_pkt_len(skb);
q->tx_class = cl;
q->tx_borrowed = borrow;
if (borrow != cl) {
#ifndef CBQ_XSTATS_BORROWS_BYTES
borrow->xstats.borrows++;
cl->xstats.borrows++;
#else
borrow->xstats.borrows += qdisc_pkt_len(skb);
cl->xstats.borrows += qdisc_pkt_len(skb);
#endif
}
q->tx_len = qdisc_pkt_len(skb);
if (cl->deficit <= 0) {
q->active[prio] = cl;
cl = cl->next_alive;
cl->deficit += cl->quantum;
}
return skb;
skip_class:
if (cl->q->q.qlen == 0 || prio != cl->cpriority) {
/* Class is empty or penalized.
* Unlink it from active chain.
*/
cl_prev->next_alive = cl->next_alive;
cl->next_alive = NULL;
/* Did cl_tail point to it? */
if (cl == cl_tail) {
/* Repair it! */
cl_tail = cl_prev;
/* Was it the last class in this band? */
if (cl == cl_tail) {
/* Kill the band! */
q->active[prio] = NULL;
q->activemask &= ~(1<<prio);
if (cl->q->q.qlen)
cbq_activate_class(cl);
return NULL;
}
q->active[prio] = cl_tail;
}
if (cl->q->q.qlen)
cbq_activate_class(cl);
cl = cl_prev;
}
next_class:
cl_prev = cl;
cl = cl->next_alive;
} while (cl_prev != cl_tail);
} while (deficit);
q->active[prio] = cl_prev;
return NULL;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 414 | 95.39% | 3 | 50.00% |
jussi kivilinna | jussi kivilinna | 12 | 2.76% | 1 | 16.67% |
eric dumazet | eric dumazet | 5 | 1.15% | 1 | 16.67% |
stephen hemminger | stephen hemminger | 3 | 0.69% | 1 | 16.67% |
| Total | 434 | 100.00% | 6 | 100.00% |
static inline struct sk_buff *
cbq_dequeue_1(struct Qdisc *sch)
{
struct cbq_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb;
unsigned int activemask;
activemask = q->activemask & 0xFF;
while (activemask) {
int prio = ffz(~activemask);
activemask &= ~(1<<prio);
skb = cbq_dequeue_prio(sch, prio);
if (skb)
return skb;
}
return NULL;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 60 | 71.43% | 1 | 33.33% |
eric dumazet | eric dumazet | 21 | 25.00% | 1 | 33.33% |
stephen hemminger | stephen hemminger | 3 | 3.57% | 1 | 33.33% |
| Total | 84 | 100.00% | 3 | 100.00% |
static struct sk_buff *
cbq_dequeue(struct Qdisc *sch)
{
struct sk_buff *skb;
struct cbq_sched_data *q = qdisc_priv(sch);
psched_time_t now;
now = psched_get_time();
if (q->tx_class)
cbq_update(q);
q->now = now;
for (;;) {
q->wd_expires = 0;
skb = cbq_dequeue_1(sch);
if (skb) {
qdisc_bstats_update(sch, skb);
sch->q.qlen--;
qdisc_unthrottled(sch);
return skb;
}
/* All the classes are overlimit.
*
* It is possible, if:
*
* 1. Scheduler is empty.
* 2. Toplevel cutoff inhibited borrowing.
* 3. Root class is overlimit.
*
* Reset 2d and 3d conditions and retry.
*
* Note, that NS and cbq-2.0 are buggy, peeking
* an arbitrary class is appropriate for ancestor-only
* sharing, but not for toplevel algorithm.
*
* Our version is better, but slower, because it requires
* two passes, but it is unavoidable with top-level sharing.
*/
if (q->toplevel == TC_CBQ_MAXLEVEL &&
q->link.undertime == PSCHED_PASTPERFECT)
break;
q->toplevel = TC_CBQ_MAXLEVEL;
q->link.undertime = PSCHED_PASTPERFECT;
}
/* No packets in scheduler or nobody wants to give them to us :-(
* Sigh... start watchdog timer in the last case.
*/
if (sch->q.qlen) {
qdisc_qstats_overlimit(sch);
if (q->wd_expires)
qdisc_watchdog_schedule(&q->watchdog,
now + q->wd_expires);
}
return NULL;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 131 | 76.16% | 2 | 16.67% |
patrick mchardy | patrick mchardy | 13 | 7.56% | 3 | 25.00% |
eric dumazet | eric dumazet | 13 | 7.56% | 3 | 25.00% |
vasily averin | vasily averin | 6 | 3.49% | 1 | 8.33% |
john fastabend | john fastabend | 3 | 1.74% | 1 | 8.33% |
vinay k nallamothu | vinay k nallamothu | 3 | 1.74% | 1 | 8.33% |
stephen hemminger | stephen hemminger | 3 | 1.74% | 1 | 8.33% |
| Total | 172 | 100.00% | 12 | 100.00% |
/* CBQ class maintanance routines */
static void cbq_adjust_levels(struct cbq_class *this)
{
if (this == NULL)
return;
do {
int level = 0;
struct cbq_class *cl;
cl = this->children;
if (cl) {
do {
if (cl->level > level)
level = cl->level;
} while ((cl = cl->sibling) != this->children);
}
this->level = level + 1;
} while ((this = this->tparent) != NULL);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 92 | 95.83% | 1 | 50.00% |
eric dumazet | eric dumazet | 4 | 4.17% | 1 | 50.00% |
| Total | 96 | 100.00% | 2 | 100.00% |
static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
{
struct cbq_class *cl;
unsigned int h;
if (q->quanta[prio] == 0)
return;
for (h = 0; h < q->clhash.hashsize; h++) {
hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
/* BUGGGG... Beware! This expression suffer of
* arithmetic overflows!
*/
if (cl->priority == prio) {
cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
q->quanta[prio];
}
if (cl->quantum <= 0 ||
cl->quantum > 32*qdisc_dev(cl->qdisc)->mtu) {
pr_warn("CBQ: class %08x has bad quantum==%ld, repaired.\n",
cl->common.classid, cl->quantum);
cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
}
}
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 136 | 85.00% | 2 | 33.33% |
patrick mchardy | patrick mchardy | 16 | 10.00% | 1 | 16.67% |
david s. miller | david s. miller | 6 | 3.75% | 1 | 16.67% |
yang yingliang | yang yingliang | 1 | 0.62% | 1 | 16.67% |
eric dumazet | eric dumazet | 1 | 0.62% | 1 | 16.67% |
| Total | 160 | 100.00% | 6 | 100.00% |
static void cbq_sync_defmap(struct cbq_class *cl)
{
struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
struct cbq_class *split = cl->split;
unsigned int h;
int i;
if (split == NULL)
return;
for (i = 0; i <= TC_PRIO_MAX; i++) {
if (split->defaults[i] == cl && !(cl->defmap & (1<<i)))
split->defaults[i] = NULL;
}
for (i = 0; i <= TC_PRIO_MAX; i++) {
int level = split->level;
if (split->defaults[i])
continue;
for (h = 0; h < q->clhash.hashsize; h++) {
struct cbq_class *c;
hlist_for_each_entry(c, &q->clhash.hash[h],
common.hnode) {
if (c->split == split && c->level < level &&
c->defmap & (1<<i)) {
split->defaults[i] = c;
level = c->level;
}
}
}
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 184 | 91.54% | 1 | 25.00% |
patrick mchardy | patrick mchardy | 13 | 6.47% | 1 | 25.00% |
stephen hemminger | stephen hemminger | 3 | 1.49% | 1 | 25.00% |
eric dumazet | eric dumazet | 1 | 0.50% | 1 | 25.00% |
| Total | 201 | 100.00% | 4 | 100.00% |
static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 mask)
{
struct cbq_class *split = NULL;
if (splitid == 0) {
split = cl->split;
if (!split)
return;
splitid = split->common.classid;
}
if (split == NULL || split->common.classid != splitid) {
for (split = cl->tparent; split; split = split->tparent)
if (split->common.classid == splitid)
break;
}
if (split == NULL)
return;
if (cl->split != split) {
cl->defmap = 0;
cbq_sync_defmap(cl);
cl->split = split;
cl->defmap = def & mask;
} else
cl->defmap = (cl->defmap & ~mask) | (def & mask);
cbq_sync_defmap(cl);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 154 | 93.33% | 1 | 33.33% |
patrick mchardy | patrick mchardy | 6 | 3.64% | 1 | 33.33% |
eric dumazet | eric dumazet | 5 | 3.03% | 1 | 33.33% |
| Total | 165 | 100.00% | 3 | 100.00% |
static void cbq_unlink_class(struct cbq_class *this)
{
struct cbq_class *cl, **clp;
struct cbq_sched_data *q = qdisc_priv(this->qdisc);
qdisc_class_hash_remove(&q->clhash, &this->common);
if (this->tparent) {
clp = &this->sibling;
cl = *clp;
do {
if (cl == this) {
*clp = cl->sibling;
break;
}
clp = &cl->sibling;
} while ((cl = *clp) != this->sibling);
if (this->tparent->children == this) {
this->tparent->children = this->sibling;
if (this->sibling == this)
this->tparent->children = NULL;
}
} else {
WARN_ON(this->sibling != this);
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 144 | 92.90% | 1 | 25.00% |
patrick mchardy | patrick mchardy | 6 | 3.87% | 1 | 25.00% |
stephen hemminger | stephen hemminger | 3 | 1.94% | 1 | 25.00% |
ilpo jarvinen | ilpo jarvinen | 2 | 1.29% | 1 | 25.00% |
| Total | 155 | 100.00% | 4 | 100.00% |
static void cbq_link_class(struct cbq_class *this)
{
struct cbq_sched_data *q = qdisc_priv(this->qdisc);
struct cbq_class *parent = this->tparent;
this->sibling = this;
qdisc_class_hash_insert(&q->clhash, &this->common);
if (parent == NULL)
return;
if (parent->children == NULL) {
parent->children = this;
} else {
this->sibling = parent->children->sibling;
parent->children->sibling = this;
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 83 | 87.37% | 2 | 50.00% |
patrick mchardy | patrick mchardy | 9 | 9.47% | 1 | 25.00% |
stephen hemminger | stephen hemminger | 3 | 3.16% | 1 | 25.00% |
| Total | 95 | 100.00% | 4 | 100.00% |
static unsigned int cbq_drop(struct Qdisc *sch)
{
struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl, *cl_head;
int prio;
unsigned int len;
for (prio = TC_CBQ_MAXPRIO; prio >= 0; prio--) {
cl_head = q->active[prio];
if (!cl_head)
continue;
cl = cl_head;
do {
if (cl->q->ops->drop && (len = cl->q->ops->drop(cl->q))) {
sch->q.qlen--;
if (!cl->q->q.qlen)
cbq_deactivate_class(cl);
return len;
}
} while ((cl = cl->next_alive) != cl_head);
}
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 102 | 70.34% | 3 | 37.50% |
jarek poplawski | jarek poplawski | 16 | 11.03% | 1 | 12.50% |
dmitry torokhov | dmitry torokhov | 10 | 6.90% | 1 | 12.50% |
linus torvalds | linus torvalds | 9 | 6.21% | 1 | 12.50% |
eric dumazet | eric dumazet | 5 | 3.45% | 1 | 12.50% |
stephen hemminger | stephen hemminger | 3 | 2.07% | 1 | 12.50% |
| Total | 145 | 100.00% | 8 | 100.00% |
static void
cbq_reset(struct Qdisc *sch)
{
struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl;
int prio;
unsigned int h;
q->activemask = 0;
q->pmask = 0;
q->tx_class = NULL;
q->tx_borrowed = NULL;
qdisc_watchdog_cancel(&q->watchdog);
hrtimer_cancel(&q->delay_timer);
q->toplevel = TC_CBQ_MAXLEVEL;
q->now = psched_get_time();
for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++)
q->active[prio] = NULL;
for (h = 0; h < q->clhash.hashsize; h++) {
hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
qdisc_reset(cl->q);
cl->next_alive = NULL;
cl->undertime = PSCHED_PASTPERFECT;
cl->avgidle = cl->maxidle;
cl->deficit = cl->quantum;
cl->cpriority = cl->priority;
}
}
sch->q.qlen = 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 163 | 86.70% | 3 | 30.00% |
patrick mchardy | patrick mchardy | 20 | 10.64% | 4 | 40.00% |
stephen hemminger | stephen hemminger | 3 | 1.60% | 1 | 10.00% |
david s. miller | david s. miller | 1 | 0.53% | 1 | 10.00% |
eric dumazet | eric dumazet | 1 | 0.53% | 1 | 10.00% |
| Total | 188 | 100.00% | 10 | 100.00% |
static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss)
{
if (lss->change & TCF_CBQ_LSS_FLAGS) {
cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
}
if (lss->change & TCF_CBQ_LSS_EWMA)
cl->ewma_log = lss->ewma_log;
if (lss->change & TCF_CBQ_LSS_AVPKT)
cl->avpkt = lss->avpkt;
if (lss->change & TCF_CBQ_LSS_MINIDLE)
cl->minidle = -(long)lss->minidle;
if (lss->change & TCF_CBQ_LSS_MAXIDLE) {
cl->maxidle = lss->maxidle;
cl->avgidle = lss->maxidle;
}
if (lss->change & TCF_CBQ_LSS_OFFTIME)
cl->offtime = lss->offtime;
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 159 | 100.00% | 2 | 100.00% |
| Total | 159 | 100.00% | 2 | 100.00% |
static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl)
{
q->nclasses[cl->priority]--;
q->quanta[cl->priority] -= cl->weight;
cbq_normalize_quanta(q, cl->priority);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 48 | 100.00% | 1 | 100.00% |
| Total | 48 | 100.00% | 1 | 100.00% |
static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl)
{
q->nclasses[cl->priority]++;
q->quanta[cl->priority] += cl->weight;
cbq_normalize_quanta(q, cl->priority);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 48 | 100.00% | 1 | 100.00% |
| Total | 48 | 100.00% | 1 | 100.00% |
static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
{
struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
if (wrr->allot)
cl->allot = wrr->allot;
if (wrr->weight)
cl->weight = wrr->weight;
if (wrr->priority) {
cl->priority = wrr->priority - 1;
cl->cpriority = cl->priority;
if (cl->priority >= cl->priority2)
cl->priority2 = TC_CBQ_MAXPRIO - 1;
}
cbq_addprio(q, cl);
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 107 | 97.27% | 1 | 50.00% |
stephen hemminger | stephen hemminger | 3 | 2.73% | 1 | 50.00% |
| Total | 110 | 100.00% | 2 | 100.00% |
static int cbq_set_overlimit(struct cbq_class *cl, struct tc_cbq_ovl *ovl)
{
switch (ovl->strategy) {
case TC_CBQ_OVL_CLASSIC:
cl->overlimit = cbq_ovl_classic;
break;
case TC_CBQ_OVL_DELAY:
cl->overlimit = cbq_ovl_delay;
break;
case TC_CBQ_OVL_LOWPRIO:
if (ovl->priority2 - 1 >= TC_CBQ_MAXPRIO ||
ovl->priority2 - 1 <= cl->priority)
return -EINVAL;
cl->priority2 = ovl->priority2 - 1;
cl->overlimit = cbq_ovl_lowprio;
break;
case TC_CBQ_OVL_DROP:
cl->overlimit = cbq_ovl_drop;
break;
case TC_CBQ_OVL_RCLASSIC:
cl->overlimit = cbq_ovl_rclassic;
break;
default:
return -EINVAL;
}
cl->penalty = ovl->penalty;
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 124 | 100.00% | 1 | 100.00% |
| Total | 124 | 100.00% | 1 | 100.00% |
#ifdef CONFIG_NET_CLS_ACT
static int cbq_set_police(struct cbq_class *cl, struct tc_cbq_police *p)
{
cl->police = p->police;
if (cl->q->handle) {
if (p->police == TC_POLICE_RECLASSIFY)
cl->q->reshape_fail = cbq_reshape_fail;
else
cl->q->reshape_fail = NULL;
}
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 62 | 100.00% | 2 | 100.00% |
| Total | 62 | 100.00% | 2 | 100.00% |
#endif
static int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt)
{
cbq_change_defmap(cl, fopt->split, fopt->defmap, fopt->defchange);
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 36 | 100.00% | 1 | 100.00% |
| Total | 36 | 100.00% | 1 | 100.00% |
static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
[TCA_CBQ_LSSOPT] = { .len = sizeof(struct tc_cbq_lssopt) },
[TCA_CBQ_WRROPT] = { .len = sizeof(struct tc_cbq_wrropt) },
[TCA_CBQ_FOPT] = { .len = sizeof(struct tc_cbq_fopt) },
[TCA_CBQ_OVL_STRATEGY] = { .len = sizeof(struct tc_cbq_ovl) },
[TCA_CBQ_RATE] = { .len = sizeof(struct tc_ratespec) },
[TCA_CBQ_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
[TCA_CBQ_POLICE] = { .len = sizeof(struct tc_cbq_police) },
};
static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
{
struct cbq_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_CBQ_MAX + 1];
struct tc_ratespec *r;
int err;
err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy);
if (err < 0)
return err;
if (tb[TCA_CBQ_RTAB] == NULL || tb[TCA_CBQ_RATE] == NULL)
return -EINVAL;
r = nla_data(tb[TCA_CBQ_RATE]);
if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL)
return -EINVAL;
err = qdisc_class_hash_init(&q->clhash);
if (err < 0)
goto put_rtab;
q->link.refcnt = 1;
q->link.sibling = &q->link;
q->link.common.classid = sch->handle;
q->link.qdisc = sch;
q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
sch->handle);
if (!q->link.q)
q->link.q = &noop_qdisc;
q->link.priority = TC_CBQ_MAXPRIO - 1;
q->link.priority2 = TC_CBQ_MAXPRIO - 1;
q->link.cpriority = TC_CBQ_MAXPRIO - 1;
q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC;
q->link.overlimit = cbq_ovl_classic;
q->link.allot = psched_mtu(qdisc_dev(sch));
q->link.quantum = q->link.allot;
q->link.weight = q->link.R_tab->rate.rate;
q->link.ewma_log = TC_CBQ_DEF_EWMA;
q->link.avpkt = q->link.allot/2;
q->link.minidle = -0x7FFFFFFF;
qdisc_watchdog_init(&q->watchdog, sch);
hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
q->delay_timer.function = cbq_undelay;
q->toplevel = TC_CBQ_MAXLEVEL;
q->now = psched_get_time();
cbq_link_class(&q->link);
if (tb[TCA_CBQ_LSSOPT])
cbq_set_lss(&q->link, nla_data(tb[TCA_CBQ_LSSOPT]));
cbq_addprio(q, &q->link);
return 0;
put_rtab:
qdisc_put_rtab(q->link.R_tab);
return err;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 344 | 78.54% | 3 | 17.65% |
patrick mchardy | patrick mchardy | 73 | 16.67% | 8 | 47.06% |
changli gao | changli gao | 9 | 2.05% | 1 | 5.88% |
david s. miller | david s. miller | 8 | 1.83% | 3 | 17.65% |
stephen hemminger | stephen hemminger | 3 | 0.68% | 1 | 5.88% |
eric dumazet | eric dumazet | 1 | 0.23% | 1 | 5.88% |
| Total | 438 | 100.00% | 17 | 100.00% |
static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
{
unsigned char *b = skb_tail_pointer(skb);
if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate))
goto nla_put_failure;
return skb->len;
nla_put_failure:
nlmsg_trim(skb, b);
return -1;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 60 | 83.33% | 1 | 20.00% |
david s. miller | david s. miller | 7 | 9.72% | 1 | 20.00% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 4 | 5.56% | 2 | 40.00% |
patrick mchardy | patrick mchardy | 1 | 1.39% | 1 | 20.00% |
| Total | 72 | 100.00% | 5 | 100.00% |
static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
{
unsigned char *b = skb_tail_pointer(skb);
struct tc_cbq_lssopt opt;
opt.flags = 0;
if (cl->borrow == NULL)
opt.flags |= TCF_CBQ_LSS_BOUNDED;
if (cl->share == NULL)
opt.flags |= TCF_CBQ_LSS_ISOLATED;
opt.ewma_log = cl->ewma_log;
opt.level = cl->level;
opt.avpkt = cl->avpkt;
opt.maxidle = cl->maxidle;
opt.minidle = (u32)(-cl->minidle);
opt.offtime = cl->offtime;
opt.change = ~0;
if (nla_put(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt))
goto nla_put_failure;
return skb->len;
nla_put_failure:
nlmsg_trim(skb, b);
return -1;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 151 | 92.64% | 1 | 20.00% |
david s. miller | david s. miller | 7 | 4.29% | 1 | 20.00% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 4 | 2.45% | 2 | 40.00% |
patrick mchardy | patrick mchardy | 1 | 0.61% | 1 | 20.00% |
| Total | 163 | 100.00% | 5 | 100.00% |
static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
{
unsigned char *b = skb_tail_pointer(skb);
struct tc_cbq_wrropt opt;
memset(&opt, 0, sizeof(opt));
opt.flags = 0;
opt.allot = cl->allot;
opt.priority = cl->priority + 1;
opt.cpriority = cl->cpriority + 1;
opt.weight = cl->weight;
if (nla_put(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt))
goto nla_put_failure;
return skb->len;
nla_put_failure:
nlmsg_trim(skb, b);
return -1;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 98 | 79.67% | 1 | 16.67% |
david s. miller | david s. miller | 20 | 16.26% | 2 | 33.33% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 4 | 3.25% | 2 | 33.33% |
patrick mchardy | patrick mchardy | 1 | 0.81% | 1 | 16.67% |
| Total | 123 | 100.00% | 6 | 100.00% |
static int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl)
{
unsigned char *b = skb_tail_pointer(skb);
struct tc_cbq_ovl opt;
opt.strategy = cl->ovl_strategy;
opt.priority2 = cl->priority2 + 1;
opt.pad = 0;
opt.penalty = cl->penalty;
if (nla_put(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt))
goto nla_put_failure;
return skb->len;
nla_put_failure:
nlmsg_trim(skb, b);
return -1;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 82 | 82.00% | 1 | 16.67% |
patrick mchardy | patrick mchardy | 7 | 7.00% | 2 | 33.33% |
david s. miller | david s. miller | 7 | 7.00% | 1 | 16.67% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 4 | 4.00% | 2 | 33.33% |
| Total | 100 | 100.00% | 6 | 100.00% |
static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
{
unsigned char *b = skb_tail_pointer(skb);
struct tc_cbq_fopt opt;
if (cl->split || cl->defmap) {
opt.split = cl->split ? cl->split->common.classid : 0;
opt.defmap = cl->defmap;
opt.defchange = ~0;
if (nla_put(skb, TCA_CBQ_FOPT, sizeof(opt), &opt))
goto nla_put_failure;
}
return skb->len;
nla_put_failure:
nlmsg_trim(skb, b);
return -1;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 99 | 87.61% | 1 | 16.67% |
david s. miller | david s. miller | 7 | 6.19% | 1 | 16.67% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 4 | 3.54% | 2 | 33.33% |
patrick mchardy | patrick mchardy | 3 | 2.65% | 2 | 33.33% |
| Total | 113 | 100.00% | 6 | 100.00% |
#ifdef CONFIG_NET_CLS_ACT
static int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
{
unsigned char *b = skb_tail_pointer(skb);
struct tc_cbq_police opt;
if (cl->police) {
opt.police = cl->police;
opt.__res1 = 0;
opt.__res2 = 0;
if (nla_put(skb, TCA_CBQ_POLICE, sizeof(opt), &opt))
goto nla_put_failure;
}
return skb->len;
nla_put_failure:
nlmsg_trim(skb, b);
return -1;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 72 | 75.00% | 2 | 28.57% |
patrick mchardy | patrick mchardy | 13 | 13.54% | 2 | 28.57% |
david s. miller | david s. miller | 7 | 7.29% | 1 | 14.29% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 4 | 4.17% | 2 | 28.57% |
| Total | 96 | 100.00% | 7 | 100.00% |
#endif
static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
{
if (cbq_dump_lss(skb, cl) < 0 ||
cbq_dump_rate(skb, cl) < 0 ||
cbq_dump_wrr(skb, cl) < 0 ||
cbq_dump_ovl(skb, cl) < 0 ||
#ifdef CONFIG_NET_CLS_ACT
cbq_dump_police(skb, cl) < 0 ||
#endif
cbq_dump_fopt(skb, cl) < 0)
return -1;
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 81 | 96.43% | 1 | 33.33% |
patrick mchardy | patrick mchardy | 3 | 3.57% | 2 | 66.67% |
| Total | 84 | 100.00% | 3 | 100.00% |
static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct cbq_sched_data *q = qdisc_priv(sch);
struct nlattr *nest;
nest = nla_nest_start(skb, TCA_OPTIONS);
if (nest == NULL)
goto nla_put_failure;
if (cbq_dump_attr(skb, &q->link) < 0)
goto nla_put_failure;
return nla_nest_end(skb, nest);
nla_put_failure:
nla_nest_cancel(skb, nest);
return -1;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 51 | 58.62% | 2 | 25.00% |
patrick mchardy | patrick mchardy | 20 | 22.99% | 2 | 25.00% |
thomas graf | thomas graf | 10 | 11.49% | 1 | 12.50% |
stephen hemminger | stephen hemminger | 3 | 3.45% | 1 | 12.50% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 2 | 2.30% | 1 | 12.50% |
yang yingliang | yang yingliang | 1 | 1.15% | 1 | 12.50% |
| Total | 87 | 100.00% | 8 | 100.00% |
static int
cbq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
{
struct cbq_sched_data *q = qdisc_priv(sch);
q->link.xstats.avgidle = q->link.avgidle;
return gnet_stats_copy_app(d, &q->link.xstats, sizeof(q->link.xstats));
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
thomas graf | thomas graf | 34 | 54.84% | 1 | 33.33% |
pre-git | pre-git | 28 | 45.16% | 2 | 66.67% |
| Total | 62 | 100.00% | 3 | 100.00% |
static int
cbq_dump_class(struct Qdisc *sch, unsigned long arg,
struct sk_buff *skb, struct tcmsg *tcm)
{
struct cbq_class *cl = (struct cbq_class *)arg;
struct nlattr *nest;
if (cl->tparent)
tcm->tcm_parent = cl->tparent->common.classid;
else
tcm->tcm_parent = TC_H_ROOT;
tcm->tcm_handle = cl->common.classid;
tcm->tcm_info = cl->q->handle;
nest = nla_nest_start(skb, TCA_OPTIONS);
if (nest == NULL)
goto nla_put_failure;
if (cbq_dump_attr(skb, cl) < 0)
goto nla_put_failure;
return nla_nest_end(skb, nest);
nla_put_failure:
nla_nest_cancel(skb, nest);
return -1;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 101 | 72.14% | 2 | 25.00% |
patrick mchardy | patrick mchardy | 24 | 17.14% | 3 | 37.50% |
thomas graf | thomas graf | 12 | 8.57% | 1 | 12.50% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 2 | 1.43% | 1 | 12.50% |
yang yingliang | yang yingliang | 1 | 0.71% | 1 | 12.50% |
| Total | 140 | 100.00% | 8 | 100.00% |
static int
cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
struct gnet_dump *d)
{
struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl = (struct cbq_class *)arg;
cl->xstats.avgidle = cl->avgidle;
cl->xstats.undertime = 0;
if (cl->undertime != PSCHED_PASTPERFECT)
cl->xstats.undertime = cl->undertime - q->now;
if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0)
return -1;
return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
thomas graf | thomas graf | 78 | 49.06% | 1 | 11.11% |
pre-git | pre-git | 61 | 38.36% | 2 | 22.22% |
john fastabend | john fastabend | 12 | 7.55% | 3 | 33.33% |
eric dumazet | eric dumazet | 5 | 3.14% | 1 | 11.11% |
patrick mchardy | patrick mchardy | 3 | 1.89% | 2 | 22.22% |
| Total | 159 | 100.00% | 9 | 100.00% |
static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
struct Qdisc **old)
{
struct cbq_class *cl = (struct cbq_class *)arg;
if (new == NULL) {
new = qdisc_create_dflt(sch->dev_queue,
&pfifo_qdisc_ops, cl->common.classid);
if (new == NULL)
return -ENOBUFS;
} else {
#ifdef CONFIG_NET_CLS_ACT
if (cl->police == TC_POLICE_RECLASSIFY)
new->reshape_fail = cbq_reshape_fail;
#endif
}
*old = qdisc_replace(sch, new, &cl->q);
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 87 | 76.32% | 2 | 20.00% |
patrick mchardy | patrick mchardy | 17 | 14.91% | 6 | 60.00% |
americo wang | americo wang | 6 | 5.26% | 1 | 10.00% |
david s. miller | david s. miller | 4 | 3.51% | 1 | 10.00% |
| Total | 114 | 100.00% | 10 | 100.00% |
static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg)
{
struct cbq_class *cl = (struct cbq_class *)arg;
return cl->q;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 34 | 100.00% | 1 | 100.00% |
| Total | 34 | 100.00% | 1 | 100.00% |
static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg)
{
struct cbq_class *cl = (struct cbq_class *)arg;
if (cl->q->q.qlen == 0)
cbq_deactivate_class(cl);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jarek poplawski | jarek poplawski | 44 | 100.00% | 1 | 100.00% |
| Total | 44 | 100.00% | 1 | 100.00% |
static unsigned long cbq_get(struct Qdisc *sch, u32 classid)
{
struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl = cbq_class_lookup(q, classid);
if (cl) {
cl->refcnt++;
return (unsigned long)cl;
}
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 55 | 94.83% | 1 | 50.00% |
stephen hemminger | stephen hemminger | 3 | 5.17% | 1 | 50.00% |
| Total | 58 | 100.00% | 2 | 100.00% |
static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
{
struct cbq_sched_data *q = qdisc_priv(sch);
WARN_ON(cl->filters);
tcf_destroy_chain(&cl->filter_list);
qdisc_destroy(cl->q);
qdisc_put_rtab(cl->R_tab);
gen_kill_estimator(&cl->bstats, &cl->rate_est);
if (cl != &q->link)
kfree(cl);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 40 | 48.78% | 2 | 25.00% |
patrick mchardy | patrick mchardy | 28 | 34.15% | 3 | 37.50% |
thomas graf | thomas graf | 13 | 15.85% | 2 | 25.00% |
ilpo jarvinen | ilpo jarvinen | 1 | 1.22% | 1 | 12.50% |
| Total | 82 | 100.00% | 8 | 100.00% |
static void cbq_destroy(struct Qdisc *sch)
{
struct cbq_sched_data *q = qdisc_priv(sch);
struct hlist_node *next;
struct cbq_class *cl;
unsigned int h;
#ifdef CONFIG_NET_CLS_ACT
q->rx_class = NULL;
#endif
/*
* Filters must be destroyed first because we don't destroy the
* classes from root to leafs which means that filters can still
* be bound to classes which have been destroyed already. --TGR '04
*/
for (h = 0; h < q->clhash.hashsize; h++) {
hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode)
tcf_destroy_chain(&cl->filter_list);
}
for (h = 0; h < q->clhash.hashsize; h++) {
hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h],
common.hnode)
cbq_destroy_class(sch, cl);
}
qdisc_class_hash_destroy(&q->clhash);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
patrick mchardy | patrick mchardy | 52 | 41.27% | 7 | 53.85% |
pre-git | pre-git | 51 | 40.48% | 2 | 15.38% |
thomas graf | thomas graf | 17 | 13.49% | 1 | 7.69% |
stephen hemminger | stephen hemminger | 3 | 2.38% | 1 | 7.69% |
linus torvalds | linus torvalds | 2 | 1.59% | 1 | 7.69% |
eric dumazet | eric dumazet | 1 | 0.79% | 1 | 7.69% |
| Total | 126 | 100.00% | 13 | 100.00% |
static void cbq_put(struct Qdisc *sch, unsigned long arg)
{
struct cbq_class *cl = (struct cbq_class *)arg;
if (--cl->refcnt == 0) {
#ifdef CONFIG_NET_CLS_ACT
spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
struct cbq_sched_data *q = qdisc_priv(sch);
spin_lock_bh(root_lock);
if (q->rx_class == cl)
q->rx_class = NULL;
spin_unlock_bh(root_lock);
#endif
cbq_destroy_class(sch, cl);
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 74 | 79.57% | 4 | 40.00% |
david s. miller | david s. miller | 10 | 10.75% | 1 | 10.00% |
patrick mchardy | patrick mchardy | 5 | 5.38% | 3 | 30.00% |
stephen hemminger | stephen hemminger | 3 | 3.23% | 1 | 10.00% |
jarek poplawski | jarek poplawski | 1 | 1.08% | 1 | 10.00% |
| Total | 93 | 100.00% | 10 | 100.00% |
static int
cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca,
unsigned long *arg)
{
int err;
struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl = (struct cbq_class *)*arg;
struct nlattr *opt = tca[TCA_OPTIONS];
struct nlattr *tb[TCA_CBQ_MAX + 1];
struct cbq_class *parent;
struct qdisc_rate_table *rtab = NULL;
if (opt == NULL)
return -EINVAL;
err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy);
if (err < 0)
return err;
if (cl) {
/* Check parent */
if (parentid) {
if (cl->tparent &&
cl->tparent->common.classid != parentid)
return -EINVAL;
if (!cl->tparent && parentid != TC_H_ROOT)
return -EINVAL;
}
if (tb[TCA_CBQ_RATE]) {
rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]),
tb[TCA_CBQ_RTAB]);
if (rtab == NULL)
return -EINVAL;
}
if (tca[TCA_RATE]) {
err = gen_replace_estimator(&cl->bstats, NULL,
&cl->rate_est,
qdisc_root_sleeping_lock(sch),
tca[TCA_RATE]);
if (err) {
qdisc_put_rtab(rtab);
return err;
}
}
/* Change class parameters */
sch_tree_lock(sch);
if (cl->next_alive != NULL)
cbq_deactivate_class(cl);
if (rtab) {
qdisc_put_rtab(cl->R_tab);
cl->R_tab = rtab;
}
if (tb[TCA_CBQ_LSSOPT])
cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
if (tb[TCA_CBQ_WRROPT]) {
cbq_rmprio(q, cl);
cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
}
if (tb[TCA_CBQ_OVL_STRATEGY])
cbq_set_overlimit(cl, nla_data(tb[TCA_CBQ_OVL_STRATEGY]));
#ifdef CONFIG_NET_CLS_ACT
if (tb[TCA_CBQ_POLICE])
cbq_set_police(cl, nla_data(tb[TCA_CBQ_POLICE]));
#endif
if (tb[TCA_CBQ_FOPT])
cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
if (cl->q->q.qlen)
cbq_activate_class(cl);
sch_tree_unlock(sch);
return 0;
}
if (parentid == TC_H_ROOT)
return -EINVAL;
if (tb[TCA_CBQ_WRROPT] == NULL || tb[TCA_CBQ_RATE] == NULL ||
tb[TCA_CBQ_LSSOPT] == NULL)
return -EINVAL;
rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB]);
if (rtab == NULL)
return -EINVAL;
if (classid) {
err = -EINVAL;
if (TC_H_MAJ(classid ^ sch->handle) ||
cbq_class_lookup(q, classid))
goto failure;
} else {
int i;
classid = TC_H_MAKE(sch->handle, 0x8000);
for (i = 0; i < 0x8000; i++) {
if (++q->hgenerator >= 0x8000)
q->hgenerator = 1;
if (cbq_class_lookup(q, classid|q->hgenerator) == NULL)
break;
}
err = -ENOSR;
if (i >= 0x8000)
goto failure;
classid = classid|q->hgenerator;
}
parent = &q->link;
if (parentid) {
parent = cbq_class_lookup(q, parentid);
err = -EINVAL;
if (parent == NULL)
goto failure;
}
err = -ENOBUFS;
cl = kzalloc(sizeof(*cl), GFP_KERNEL);
if (cl == NULL)
goto failure;
if (tca[TCA_RATE]) {
err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
qdisc_root_sleeping_lock(sch),
tca[TCA_RATE]);
if (err) {
kfree(cl);
goto failure;
}
}
cl->R_tab = rtab;
rtab = NULL;
cl->refcnt = 1;
cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid);
if (!cl->q)
cl->q = &noop_qdisc;
cl->common.classid = classid;
cl->tparent = parent;
cl->qdisc = sch;
cl->allot = parent->allot;
cl->quantum = cl->allot;
cl->weight = cl->R_tab->rate.rate;
sch_tree_lock(sch);
cbq_link_class(cl);
cl->borrow = cl->tparent;
if (cl->tparent != &q->link)
cl->share = cl->tparent;
cbq_adjust_levels(parent);
cl->minidle = -0x7FFFFFFF;
cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
if (cl->ewma_log == 0)
cl->ewma_log = q->link.ewma_log;
if (cl->maxidle == 0)
cl->maxidle = q->link.maxidle;
if (cl->avpkt == 0)
cl->avpkt = q->link.avpkt;
cl->overlimit = cbq_ovl_classic;
if (tb[TCA_CBQ_OVL_STRATEGY])
cbq_set_overlimit(cl, nla_data(tb[TCA_CBQ_OVL_STRATEGY]));
#ifdef CONFIG_NET_CLS_ACT
if (tb[TCA_CBQ_POLICE])
cbq_set_police(cl, nla_data(tb[TCA_CBQ_POLICE]));
#endif
if (tb[TCA_CBQ_FOPT])
cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
sch_tree_unlock(sch);
qdisc_class_hash_grow(sch, &q->clhash);
*arg = (unsigned long)cl;
return 0;
failure:
qdisc_put_rtab(rtab);
return err;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 878 | 83.30% | 5 | 26.32% |
stephen hemminger | stephen hemminger | 99 | 9.39% | 2 | 10.53% |
patrick mchardy | patrick mchardy | 61 | 5.79% | 8 | 42.11% |
changli gao | changli gao | 7 | 0.66% | 1 | 5.26% |
john fastabend | john fastabend | 4 | 0.38% | 1 | 5.26% |
david s. miller | david s. miller | 4 | 0.38% | 1 | 5.26% |
panagiotis issaris* | panagiotis issaris* | 1 | 0.09% | 1 | 5.26% |
| Total | 1054 | 100.00% | 19 | 100.00% |
static int cbq_delete(struct Qdisc *sch, unsigned long arg)
{
struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl = (struct cbq_class *)arg;
unsigned int qlen, backlog;
if (cl->filters || cl->children || cl == &q->link)
return -EBUSY;
sch_tree_lock(sch);
qlen = cl->q->q.qlen;
backlog = cl->q->qstats.backlog;
qdisc_reset(cl->q);
qdisc_tree_reduce_backlog(cl->q, qlen, backlog);
if (cl->next_alive)
cbq_deactivate_class(cl);
if (q->tx_borrowed == cl)
q->tx_borrowed = q->tx_class;
if (q->tx_class == cl) {
q->tx_class = NULL;
q->tx_borrowed = NULL;
}
#ifdef CONFIG_NET_CLS_ACT
if (q->rx_class == cl)
q->rx_class = NULL;
#endif
cbq_unlink_class(cl);
cbq_adjust_levels(cl->tparent);
cl->defmap = 0;
cbq_sync_defmap(cl);
cbq_rmprio(q, cl);
sch_tree_unlock(sch);
BUG_ON(--cl->refcnt == 0);
/*
* This shouldn't happen: we "hold" one cops->get() when called
* from tc_ctl_tclass; the destroy method is done from cops->put().
*/
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 171 | 76.34% | 4 | 40.00% |
jarek poplawski | jarek poplawski | 32 | 14.29% | 2 | 20.00% |
americo wang | americo wang | 15 | 6.70% | 1 | 10.00% |
stephen hemminger | stephen hemminger | 3 | 1.34% | 1 | 10.00% |
patrick mchardy | patrick mchardy | 3 | 1.34% | 2 | 20.00% |
| Total | 224 | 100.00% | 10 | 100.00% |
static struct tcf_proto __rcu **cbq_find_tcf(struct Qdisc *sch,
unsigned long arg)
{
struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl = (struct cbq_class *)arg;
if (cl == NULL)
cl = &q->link;
return &cl->filter_list;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 56 | 93.33% | 2 | 50.00% |
stephen hemminger | stephen hemminger | 3 | 5.00% | 1 | 25.00% |
john fastabend | john fastabend | 1 | 1.67% | 1 | 25.00% |
| Total | 60 | 100.00% | 4 | 100.00% |
static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
u32 classid)
{
struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *p = (struct cbq_class *)parent;
struct cbq_class *cl = cbq_class_lookup(q, classid);
if (cl) {
if (p && p->level <= cl->level)
return 0;
cl->filters++;
return (unsigned long)cl;
}
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 86 | 96.63% | 3 | 75.00% |
stephen hemminger | stephen hemminger | 3 | 3.37% | 1 | 25.00% |
| Total | 89 | 100.00% | 4 | 100.00% |
static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg)
{
struct cbq_class *cl = (struct cbq_class *)arg;
cl->filters--;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 32 | 100.00% | 2 | 100.00% |
| Total | 32 | 100.00% | 2 | 100.00% |
static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl;
unsigned int h;
if (arg->stop)
return;
for (h = 0; h < q->clhash.hashsize; h++) {
hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
if (arg->count < arg->skip) {
arg->count++;
continue;
}
if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
arg->stop = 1;
return;
}
arg->count++;
}
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 100 | 81.97% | 3 | 50.00% |
patrick mchardy | patrick mchardy | 18 | 14.75% | 1 | 16.67% |
stephen hemminger | stephen hemminger | 3 | 2.46% | 1 | 16.67% |
eric dumazet | eric dumazet | 1 | 0.82% | 1 | 16.67% |
| Total | 122 | 100.00% | 6 | 100.00% |
static const struct Qdisc_class_ops cbq_class_ops = {
.graft = cbq_graft,
.leaf = cbq_leaf,
.qlen_notify = cbq_qlen_notify,
.get = cbq_get,
.put = cbq_put,
.change = cbq_change_class,
.delete = cbq_delete,
.walk = cbq_walk,
.tcf_chain = cbq_find_tcf,
.bind_tcf = cbq_bind_filter,
.unbind_tcf = cbq_unbind_filter,
.dump = cbq_dump_class,
.dump_stats = cbq_dump_class_stats,
};
static struct Qdisc_ops cbq_qdisc_ops __read_mostly = {
.next = NULL,
.cl_ops = &cbq_class_ops,
.id = "cbq",
.priv_size = sizeof(struct cbq_sched_data),
.enqueue = cbq_enqueue,
.dequeue = cbq_dequeue,
.peek = qdisc_peek_dequeued,
.drop = cbq_drop,
.init = cbq_init,
.reset = cbq_reset,
.destroy = cbq_destroy,
.change = NULL,
.dump = cbq_dump,
.dump_stats = cbq_dump_stats,
.owner = THIS_MODULE,
};
static int __init cbq_module_init(void)
{
return register_qdisc(&cbq_qdisc_ops);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 13 | 81.25% | 2 | 66.67% |
al viro | al viro | 3 | 18.75% | 1 | 33.33% |
| Total | 16 | 100.00% | 3 | 100.00% |
static void __exit cbq_module_exit(void)
{
unregister_qdisc(&cbq_qdisc_ops);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 12 | 80.00% | 2 | 66.67% |
al viro | al viro | 3 | 20.00% | 1 | 33.33% |
| Total | 15 | 100.00% | 3 | 100.00% |
module_init(cbq_module_init)
module_exit(cbq_module_exit)
MODULE_LICENSE("GPL");
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 7808 | 80.24% | 9 | 8.49% |
patrick mchardy | patrick mchardy | 752 | 7.73% | 27 | 25.47% |
thomas graf | thomas graf | 186 | 1.91% | 5 | 4.72% |
stephen hemminger | stephen hemminger | 183 | 1.88% | 2 | 1.89% |
eric dumazet | eric dumazet | 160 | 1.64% | 10 | 9.43% |
jarek poplawski | jarek poplawski | 156 | 1.60% | 8 | 7.55% |
david s. miller | david s. miller | 120 | 1.23% | 10 | 9.43% |
dave jones | dave jones | 69 | 0.71% | 1 | 0.94% |
john fastabend | john fastabend | 53 | 0.54% | 5 | 4.72% |
jamal hadi salim | jamal hadi salim | 46 | 0.47% | 1 | 0.94% |
vasily averin | vasily averin | 43 | 0.44% | 3 | 2.83% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 28 | 0.29% | 2 | 1.89% |
americo wang | americo wang | 21 | 0.22% | 2 | 1.89% |
linus torvalds | linus torvalds | 19 | 0.20% | 5 | 4.72% |
jussi kivilinna | jussi kivilinna | 18 | 0.18% | 2 | 1.89% |
changli gao | changli gao | 16 | 0.16% | 1 | 0.94% |
al viro | al viro | 15 | 0.15% | 1 | 0.94% |
dmitry torokhov | dmitry torokhov | 10 | 0.10% | 1 | 0.94% |
arjan van de ven | arjan van de ven | 7 | 0.07% | 1 | 0.94% |
vinay k nallamothu | vinay k nallamothu | 3 | 0.03% | 1 | 0.94% |
daniel borkmann | daniel borkmann | 3 | 0.03% | 1 | 0.94% |
satyam sharma | satyam sharma | 3 | 0.03% | 1 | 0.94% |
tejun heo | tejun heo | 3 | 0.03% | 1 | 0.94% |
yang yingliang | yang yingliang | 3 | 0.03% | 2 | 1.89% |
ilpo jarvinen | ilpo jarvinen | 3 | 0.03% | 1 | 0.94% |
hideaki yoshifuji | hideaki yoshifuji | 1 | 0.01% | 1 | 0.94% |
panagiotis issaris* | panagiotis issaris* | 1 | 0.01% | 1 | 0.94% |
steven cole | steven cole | 1 | 0.01% | 1 | 0.94% |
| Total | 9731 | 100.00% | 106 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.