Release 4.11 net/sched/sch_prio.c
/*
* net/sched/sch_prio.c Simple 3-band priority "scheduler".
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
* Fixes: 19990609: J Hadi Salim <hadi@nortelnetworks.com>:
* Init -- EINVAL when opt undefined
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/skbuff.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
struct prio_sched_data {
int bands;
struct tcf_proto __rcu *filter_list;
u8 prio2band[TC_PRIO_MAX+1];
struct Qdisc *queues[TCQ_PRIO_BANDS];
};
static struct Qdisc *
prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
{
struct prio_sched_data *q = qdisc_priv(sch);
u32 band = skb->priority;
struct tcf_result res;
struct tcf_proto *fl;
int err;
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
if (TC_H_MAJ(skb->priority) != sch->handle) {
fl = rcu_dereference_bh(q->filter_list);
err = tc_classify(skb, fl, &res, false);
#ifdef CONFIG_NET_CLS_ACT
switch (err) {
case TC_ACT_STOLEN:
case TC_ACT_QUEUED:
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
case TC_ACT_SHOT:
return NULL;
}
#endif
if (!fl || err < 0) {
if (TC_H_MAJ(band))
band = 0;
return q->queues[q->prio2band[band & TC_PRIO_MAX]];
}
band = res.classid;
}
band = TC_H_MIN(band) - 1;
if (band >= q->bands)
return q->queues[q->prio2band[0]];
return q->queues[band];
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 91 | 43.96% | 3 | 18.75% |
Jamal Hadi Salim | 44 | 21.26% | 3 | 18.75% |
Patrick McHardy | 30 | 14.49% | 2 | 12.50% |
John Fastabend | 16 | 7.73% | 1 | 6.25% |
David S. Miller | 14 | 6.76% | 1 | 6.25% |
Jarek Poplawski | 5 | 2.42% | 2 | 12.50% |
Stephen Hemminger | 3 | 1.45% | 1 | 6.25% |
Daniel Borkmann | 2 | 0.97% | 1 | 6.25% |
Adrian Bunk | 1 | 0.48% | 1 | 6.25% |
Lucas Nussbaum | 1 | 0.48% | 1 | 6.25% |
Total | 207 | 100.00% | 16 | 100.00% |
static int
prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
{
struct Qdisc *qdisc;
int ret;
qdisc = prio_classify(skb, sch, &ret);
#ifdef CONFIG_NET_CLS_ACT
if (qdisc == NULL) {
if (ret & __NET_XMIT_BYPASS)
qdisc_qstats_drop(sch);
kfree_skb(skb);
return ret;
}
#endif
ret = qdisc_enqueue(skb, qdisc, to_free);
if (ret == NET_XMIT_SUCCESS) {
qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++;
return NET_XMIT_SUCCESS;
}
if (net_xmit_drop_count(ret))
qdisc_qstats_drop(sch);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 50 | 40.00% | 3 | 25.00% |
Patrick McHardy | 23 | 18.40% | 1 | 8.33% |
Jamal Hadi Salim | 17 | 13.60% | 2 | 16.67% |
Jarek Poplawski | 9 | 7.20% | 2 | 16.67% |
Eric Dumazet | 8 | 6.40% | 1 | 8.33% |
Américo Wang | 7 | 5.60% | 1 | 8.33% |
John Fastabend | 6 | 4.80% | 1 | 8.33% |
Jussi Kivilinna | 5 | 4.00% | 1 | 8.33% |
Total | 125 | 100.00% | 12 | 100.00% |
static struct sk_buff *prio_peek(struct Qdisc *sch)
{
struct prio_sched_data *q = qdisc_priv(sch);
int prio;
for (prio = 0; prio < q->bands; prio++) {
struct Qdisc *qdisc = q->queues[prio];
struct sk_buff *skb = qdisc->ops->peek(qdisc);
if (skb)
return skb;
}
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 79 | 100.00% | 1 | 100.00% |
Total | 79 | 100.00% | 1 | 100.00% |
static struct sk_buff *prio_dequeue(struct Qdisc *sch)
{
struct prio_sched_data *q = qdisc_priv(sch);
int prio;
for (prio = 0; prio < q->bands; prio++) {
struct Qdisc *qdisc = q->queues[prio];
struct sk_buff *skb = qdisc_dequeue_peeked(qdisc);
if (skb) {
qdisc_bstats_update(sch, skb);
qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
return skb;
}
}
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 49 | 50.00% | 1 | 14.29% |
Peter P. Waskiewicz Jr | 25 | 25.51% | 1 | 14.29% |
Eric Dumazet | 7 | 7.14% | 1 | 14.29% |
Américo Wang | 7 | 7.14% | 1 | 14.29% |
David S. Miller | 6 | 6.12% | 1 | 14.29% |
Stephen Hemminger | 3 | 3.06% | 1 | 14.29% |
Florian Westphal | 1 | 1.02% | 1 | 14.29% |
Total | 98 | 100.00% | 7 | 100.00% |
static void
prio_reset(struct Qdisc *sch)
{
int prio;
struct prio_sched_data *q = qdisc_priv(sch);
for (prio = 0; prio < q->bands; prio++)
qdisc_reset(q->queues[prio]);
sch->qstats.backlog = 0;
sch->q.qlen = 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 54 | 83.08% | 2 | 50.00% |
Américo Wang | 8 | 12.31% | 1 | 25.00% |
Stephen Hemminger | 3 | 4.62% | 1 | 25.00% |
Total | 65 | 100.00% | 4 | 100.00% |
static void
prio_destroy(struct Qdisc *sch)
{
int prio;
struct prio_sched_data *q = qdisc_priv(sch);
tcf_destroy_chain(&q->filter_list);
for (prio = 0; prio < q->bands; prio++)
qdisc_destroy(q->queues[prio]);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 46 | 80.70% | 1 | 20.00% |
David S. Miller | 5 | 8.77% | 1 | 20.00% |
Stephen Hemminger | 3 | 5.26% | 1 | 20.00% |
Patrick McHardy | 3 | 5.26% | 2 | 40.00% |
Total | 57 | 100.00% | 5 | 100.00% |
static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
{
struct prio_sched_data *q = qdisc_priv(sch);
struct Qdisc *queues[TCQ_PRIO_BANDS];
int oldbands = q->bands, i;
struct tc_prio_qopt *qopt;
if (nla_len(opt) < sizeof(*qopt))
return -EINVAL;
qopt = nla_data(opt);
if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2)
return -EINVAL;
for (i = 0; i <= TC_PRIO_MAX; i++) {
if (qopt->priomap[i] >= qopt->bands)
return -EINVAL;
}
/* Before commit, make sure we can allocate all new qdiscs */
for (i = oldbands; i < qopt->bands; i++) {
queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
TC_H_MAKE(sch->handle, i + 1));
if (!queues[i]) {
while (i > oldbands)
qdisc_destroy(queues[--i]);
return -ENOMEM;
}
}
sch_tree_lock(sch);
q->bands = qopt->bands;
memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
for (i = q->bands; i < oldbands; i++) {
struct Qdisc *child = q->queues[i];
qdisc_tree_reduce_backlog(child, child->q.qlen,
child->qstats.backlog);
qdisc_destroy(child);
}
for (i = oldbands; i < q->bands; i++)
q->queues[i] = queues[i];
sch_tree_unlock(sch);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 156 | 50.49% | 4 | 26.67% |
Eric Dumazet | 91 | 29.45% | 1 | 6.67% |
David S. Miller | 21 | 6.80% | 2 | 13.33% |
Patrick McHardy | 14 | 4.53% | 4 | 26.67% |
Peter P. Waskiewicz Jr | 11 | 3.56% | 1 | 6.67% |
Américo Wang | 7 | 2.27% | 1 | 6.67% |
Amnon Aaronsohn | 6 | 1.94% | 1 | 6.67% |
Stephen Hemminger | 3 | 0.97% | 1 | 6.67% |
Total | 309 | 100.00% | 15 | 100.00% |
static int prio_init(struct Qdisc *sch, struct nlattr *opt)
{
if (!opt)
return -EINVAL;
return prio_tune(sch, opt);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 30 | 90.91% | 4 | 66.67% |
Eric Dumazet | 2 | 6.06% | 1 | 16.67% |
Patrick McHardy | 1 | 3.03% | 1 | 16.67% |
Total | 33 | 100.00% | 6 | 100.00% |
static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct prio_sched_data *q = qdisc_priv(sch);
unsigned char *b = skb_tail_pointer(skb);
struct tc_prio_qopt opt;
opt.bands = q->bands;
memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1);
if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
goto nla_put_failure;
return skb->len;
nla_put_failure:
nlmsg_trim(skb, b);
return -1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 87 | 85.29% | 1 | 16.67% |
David S. Miller | 7 | 6.86% | 1 | 16.67% |
Arnaldo Carvalho de Melo | 4 | 3.92% | 2 | 33.33% |
Stephen Hemminger | 3 | 2.94% | 1 | 16.67% |
Patrick McHardy | 1 | 0.98% | 1 | 16.67% |
Total | 102 | 100.00% | 6 | 100.00% |
static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
struct Qdisc **old)
{
struct prio_sched_data *q = qdisc_priv(sch);
unsigned long band = arg - 1;
if (new == NULL)
new = &noop_qdisc;
*old = qdisc_replace(sch, new, &q->queues[band]);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 66 | 86.84% | 1 | 33.33% |
Américo Wang | 7 | 9.21% | 1 | 33.33% |
Stephen Hemminger | 3 | 3.95% | 1 | 33.33% |
Total | 76 | 100.00% | 3 | 100.00% |
static struct Qdisc *
prio_leaf(struct Qdisc *sch, unsigned long arg)
{
struct prio_sched_data *q = qdisc_priv(sch);
unsigned long band = arg - 1;
return q->queues[band];
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 40 | 93.02% | 1 | 50.00% |
Stephen Hemminger | 3 | 6.98% | 1 | 50.00% |
Total | 43 | 100.00% | 2 | 100.00% |
static unsigned long prio_get(struct Qdisc *sch, u32 classid)
{
struct prio_sched_data *q = qdisc_priv(sch);
unsigned long band = TC_H_MIN(classid);
if (band - 1 >= q->bands)
return 0;
return band;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 47 | 94.00% | 1 | 50.00% |
Stephen Hemminger | 3 | 6.00% | 1 | 50.00% |
Total | 50 | 100.00% | 2 | 100.00% |
static unsigned long prio_bind(struct Qdisc *sch, unsigned long parent, u32 classid)
{
return prio_get(sch, classid);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 27 | 100.00% | 1 | 100.00% |
Total | 27 | 100.00% | 1 | 100.00% |
static void prio_put(struct Qdisc *q, unsigned long cl)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 13 | 92.86% | 1 | 50.00% |
Joe Perches | 1 | 7.14% | 1 | 50.00% |
Total | 14 | 100.00% | 2 | 100.00% |
static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb,
struct tcmsg *tcm)
{
struct prio_sched_data *q = qdisc_priv(sch);
tcm->tcm_handle |= TC_H_MIN(cl);
tcm->tcm_info = q->queues[cl-1]->handle;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 50 | 80.65% | 2 | 50.00% |
Alexey Kuznetsov | 9 | 14.52% | 1 | 25.00% |
Stephen Hemminger | 3 | 4.84% | 1 | 25.00% |
Total | 62 | 100.00% | 4 | 100.00% |
static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
struct gnet_dump *d)
{
struct prio_sched_data *q = qdisc_priv(sch);
struct Qdisc *cl_q;
cl_q = q->queues[cl - 1];
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
d, NULL, &cl_q->bstats) < 0 ||
gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
return -1;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jarek Poplawski | 79 | 84.04% | 1 | 20.00% |
John Fastabend | 10 | 10.64% | 3 | 60.00% |
Eric Dumazet | 5 | 5.32% | 1 | 20.00% |
Total | 94 | 100.00% | 5 | 100.00% |
static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
struct prio_sched_data *q = qdisc_priv(sch);
int prio;
if (arg->stop)
return;
for (prio = 0; prio < q->bands; prio++) {
if (arg->count < arg->skip) {
arg->count++;
continue;
}
if (arg->fn(sch, prio + 1, arg) < 0) {
arg->stop = 1;
break;
}
arg->count++;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 99 | 97.06% | 2 | 66.67% |
Stephen Hemminger | 3 | 2.94% | 1 | 33.33% |
Total | 102 | 100.00% | 3 | 100.00% |
static struct tcf_proto __rcu **prio_find_tcf(struct Qdisc *sch,
unsigned long cl)
{
struct prio_sched_data *q = qdisc_priv(sch);
if (cl)
return NULL;
return &q->filter_list;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 38 | 90.48% | 1 | 33.33% |
Stephen Hemminger | 3 | 7.14% | 1 | 33.33% |
John Fastabend | 1 | 2.38% | 1 | 33.33% |
Total | 42 | 100.00% | 3 | 100.00% |
static const struct Qdisc_class_ops prio_class_ops = {
.graft = prio_graft,
.leaf = prio_leaf,
.get = prio_get,
.put = prio_put,
.walk = prio_walk,
.tcf_chain = prio_find_tcf,
.bind_tcf = prio_bind,
.unbind_tcf = prio_put,
.dump = prio_dump_class,
.dump_stats = prio_dump_class_stats,
};
static struct Qdisc_ops prio_qdisc_ops __read_mostly = {
.next = NULL,
.cl_ops = &prio_class_ops,
.id = "prio",
.priv_size = sizeof(struct prio_sched_data),
.enqueue = prio_enqueue,
.dequeue = prio_dequeue,
.peek = prio_peek,
.init = prio_init,
.reset = prio_reset,
.destroy = prio_destroy,
.change = prio_tune,
.dump = prio_dump,
.owner = THIS_MODULE,
};
static int __init prio_module_init(void)
{
return register_qdisc(&prio_qdisc_ops);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 13 | 81.25% | 2 | 66.67% |
Al Viro | 3 | 18.75% | 1 | 33.33% |
Total | 16 | 100.00% | 3 | 100.00% |
static void __exit prio_module_exit(void)
{
unregister_qdisc(&prio_qdisc_ops);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 12 | 80.00% | 2 | 66.67% |
Al Viro | 3 | 20.00% | 1 | 33.33% |
Total | 15 | 100.00% | 3 | 100.00% |
module_init(prio_module_init)
module_exit(prio_module_exit)
MODULE_LICENSE("GPL");
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 1074 | 58.82% | 6 | 10.17% |
Patrick McHardy | 156 | 8.54% | 9 | 15.25% |
Eric Dumazet | 115 | 6.30% | 5 | 8.47% |
Jarek Poplawski | 98 | 5.37% | 3 | 5.08% |
Jamal Hadi Salim | 61 | 3.34% | 3 | 5.08% |
David S. Miller | 58 | 3.18% | 5 | 8.47% |
Dave Jones | 38 | 2.08% | 1 | 1.69% |
Peter P. Waskiewicz Jr | 36 | 1.97% | 1 | 1.69% |
Américo Wang | 36 | 1.97% | 3 | 5.08% |
Stephen Hemminger | 36 | 1.97% | 1 | 1.69% |
John Fastabend | 34 | 1.86% | 5 | 8.47% |
Art Haas | 22 | 1.20% | 1 | 1.69% |
Al Viro | 15 | 0.82% | 1 | 1.69% |
Alexey Kuznetsov | 9 | 0.49% | 1 | 1.69% |
Linus Torvalds | 7 | 0.38% | 2 | 3.39% |
Arnaldo Carvalho de Melo | 7 | 0.38% | 2 | 3.39% |
Amnon Aaronsohn | 6 | 0.33% | 1 | 1.69% |
Jussi Kivilinna | 5 | 0.27% | 1 | 1.69% |
Tejun Heo | 3 | 0.16% | 1 | 1.69% |
Jiri Pirko | 3 | 0.16% | 1 | 1.69% |
Daniel Borkmann | 2 | 0.11% | 1 | 1.69% |
Lucas Nussbaum | 1 | 0.05% | 1 | 1.69% |
Hideaki Yoshifuji / 吉藤英明 | 1 | 0.05% | 1 | 1.69% |
Joe Perches | 1 | 0.05% | 1 | 1.69% |
Adrian Bunk | 1 | 0.05% | 1 | 1.69% |
Florian Westphal | 1 | 0.05% | 1 | 1.69% |
Total | 1826 | 100.00% | 59 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.