Release 4.11 net/sched/sch_api.c
/*
* net/sched/sch_api.c Packet scheduler API.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
* Fixes:
*
* Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
* Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
* Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/kmod.h>
#include <linux/list.h>
#include <linux/hrtimer.h>
#include <linux/lockdep.h>
#include <linux/slab.h>
#include <linux/hashtable.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
static int qdisc_notify(struct net *net, struct sk_buff *oskb,
struct nlmsghdr *n, u32 clid,
struct Qdisc *old, struct Qdisc *new);
static int tclass_notify(struct net *net, struct sk_buff *oskb,
struct nlmsghdr *n, struct Qdisc *q,
unsigned long cl, int event);
/*
Short review.
-------------
This file consists of two interrelated parts:
1. queueing disciplines manager frontend.
2. traffic classes manager frontend.
Generally, queueing discipline ("qdisc") is a black box,
which is able to enqueue packets and to dequeue them (when
device is ready to send something) in order and at times
determined by algorithm hidden in it.
qdisc's are divided to two categories:
- "queues", which have no internal structure visible from outside.
- "schedulers", which split all the packets to "traffic classes",
using "packet classifiers" (look at cls_api.c)
In turn, classes may have child qdiscs (as rule, queues)
attached to them etc. etc. etc.
The goal of the routines in this file is to translate
information supplied by user in the form of handles
to more intelligible for kernel form, to make some sanity
checks and part of work, which is common to all qdiscs
and to provide rtnetlink notifications.
All real intelligent work is done inside qdisc modules.
Every discipline has two major routines: enqueue and dequeue.
---dequeue
dequeue usually returns a skb to send. It is allowed to return NULL,
but it does not mean that queue is empty, it just means that
discipline does not want to send anything this time.
Queue is really empty if q->q.qlen == 0.
For complicated disciplines with multiple queues q->q is not
real packet queue, but however q->q.qlen must be valid.
---enqueue
enqueue returns 0, if packet was enqueued successfully.
If packet (this one or another one) was dropped, it returns
not zero error code.
NET_XMIT_DROP - this packet dropped
Expected action: do not backoff, but wait until queue will clear.
NET_XMIT_CN - probably this packet enqueued, but another one dropped.
Expected action: backoff or ignore
Auxiliary routines:
---peek
like dequeue but without removing a packet from the queue
---reset
returns qdisc to initial state: purge all buffers, clear all
timers, counters (except for statistics) etc.
---init
initializes newly created qdisc.
---destroy
destroys resources allocated by init and during lifetime of qdisc.
---change
changes qdisc parameters.
*/
/* Protects list of registered TC modules. It is pure SMP lock. */
static DEFINE_RWLOCK(qdisc_mod_lock);
/************************************************
* Queueing disciplines manipulation. *
************************************************/
/* The list of all installed queueing disciplines. */
static struct Qdisc_ops *qdisc_base;
/* Register/unregister queueing discipline */
int register_qdisc(struct Qdisc_ops *qops)
{
struct Qdisc_ops *q, **qp;
int rc = -EEXIST;
write_lock(&qdisc_mod_lock);
for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
if (!strcmp(qops->id, q->id))
goto out;
if (qops->enqueue == NULL)
qops->enqueue = noop_qdisc_ops.enqueue;
if (qops->peek == NULL) {
if (qops->dequeue == NULL)
qops->peek = noop_qdisc_ops.peek;
else
goto out_einval;
}
if (qops->dequeue == NULL)
qops->dequeue = noop_qdisc_ops.dequeue;
if (qops->cl_ops) {
const struct Qdisc_class_ops *cops = qops->cl_ops;
if (!(cops->get && cops->put && cops->walk && cops->leaf))
goto out_einval;
if (cops->tcf_chain && !(cops->bind_tcf && cops->unbind_tcf))
goto out_einval;
}
qops->next = NULL;
*qp = qops;
rc = 0;
out:
write_unlock(&qdisc_mod_lock);
return rc;
out_einval:
rc = -EINVAL;
goto out;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 113 | 48.92% | 2 | 33.33% |
Jarek Poplawski | 101 | 43.72% | 3 | 50.00% |
Arnaldo Carvalho de Melo | 17 | 7.36% | 1 | 16.67% |
Total | 231 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL(register_qdisc);
int unregister_qdisc(struct Qdisc_ops *qops)
{
struct Qdisc_ops *q, **qp;
int err = -ENOENT;
write_lock(&qdisc_mod_lock);
for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
if (q == qops)
break;
if (q) {
*qp = q->next;
q->next = NULL;
err = 0;
}
write_unlock(&qdisc_mod_lock);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 93 | 100.00% | 2 | 100.00% |
Total | 93 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(unregister_qdisc);
/* Get default qdisc if not otherwise specified */
void qdisc_get_default(char *name, size_t len)
{
read_lock(&qdisc_mod_lock);
strlcpy(name, default_qdisc_ops->id, len);
read_unlock(&qdisc_mod_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 35 | 100.00% | 1 | 100.00% |
Total | 35 | 100.00% | 1 | 100.00% |
static struct Qdisc_ops *qdisc_lookup_default(const char *name)
{
struct Qdisc_ops *q = NULL;
for (q = qdisc_base; q; q = q->next) {
if (!strcmp(name, q->id)) {
if (!try_module_get(q->owner))
q = NULL;
break;
}
}
return q;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 68 | 100.00% | 1 | 100.00% |
Total | 68 | 100.00% | 1 | 100.00% |
/* Set new default qdisc to use */
int qdisc_set_default(const char *name)
{
const struct Qdisc_ops *ops;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
write_lock(&qdisc_mod_lock);
ops = qdisc_lookup_default(name);
if (!ops) {
/* Not found, drop lock and try to load module */
write_unlock(&qdisc_mod_lock);
request_module("sch_%s", name);
write_lock(&qdisc_mod_lock);
ops = qdisc_lookup_default(name);
}
if (ops) {
/* Set new default */
module_put(default_qdisc_ops->owner);
default_qdisc_ops = ops;
}
write_unlock(&qdisc_mod_lock);
return ops ? 0 : -ENOENT;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 107 | 100.00% | 1 | 100.00% |
Total | 107 | 100.00% | 1 | 100.00% |
/* We know handle. Find qdisc among all qdisc's attached to device
* (root qdisc, all its children, children of children etc.)
* Note: caller either uses rtnl or rcu_read_lock()
*/
static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
{
struct Qdisc *q;
if (!qdisc_dev(root))
return (root->handle == handle ? root : NULL);
if (!(root->flags & TCQ_F_BUILTIN) &&
root->handle == handle)
return root;
hash_for_each_possible_rcu(qdisc_dev(root)->qdisc_hash, q, hash, handle) {
if (q->handle == handle)
return q;
}
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 61 | 69.32% | 1 | 25.00% |
Jiri Kosina | 26 | 29.55% | 2 | 50.00% |
Hannes Eder | 1 | 1.14% | 1 | 25.00% |
Total | 88 | 100.00% | 4 | 100.00% |
void qdisc_hash_add(struct Qdisc *q)
{
if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
struct Qdisc *root = qdisc_dev(q)->qdisc;
WARN_ON_ONCE(root == &noop_qdisc);
ASSERT_RTNL();
hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 44 | 60.27% | 3 | 50.00% |
Jarek Poplawski | 16 | 21.92% | 1 | 16.67% |
Jiri Kosina | 12 | 16.44% | 1 | 16.67% |
Patrick McHardy | 1 | 1.37% | 1 | 16.67% |
Total | 73 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL(qdisc_hash_add);
void qdisc_hash_del(struct Qdisc *q)
{
if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
ASSERT_RTNL();
hash_del_rcu(&q->hash);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jarek Poplawski | 34 | 80.95% | 1 | 33.33% |
Eric Dumazet | 5 | 11.90% | 1 | 33.33% |
Jiri Kosina | 3 | 7.14% | 1 | 33.33% |
Total | 42 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(qdisc_hash_del);
struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
{
struct Qdisc *q;
q = qdisc_match_from_root(dev->qdisc, handle);
if (q)
goto out;
if (dev_ingress_queue(dev))
q = qdisc_match_from_root(
dev_ingress_queue(dev)->qdisc_sleeping,
handle);
out:
return q;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 16 | 25.00% | 2 | 22.22% |
Jarek Poplawski | 15 | 23.44% | 2 | 22.22% |
Linus Torvalds (pre-git) | 15 | 23.44% | 1 | 11.11% |
Eric Dumazet | 10 | 15.62% | 1 | 11.11% |
Patrick McHardy | 8 | 12.50% | 3 | 33.33% |
Total | 64 | 100.00% | 9 | 100.00% |
static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
{
unsigned long cl;
struct Qdisc *leaf;
const struct Qdisc_class_ops *cops = p->ops->cl_ops;
if (cops == NULL)
return NULL;
cl = cops->get(p, classid);
if (cl == 0)
return NULL;
leaf = cops->leaf(p, cl);
cops->put(p, cl);
return leaf;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 87 | 97.75% | 2 | 50.00% |
Adrian Bunk | 1 | 1.12% | 1 | 25.00% |
Eric Dumazet | 1 | 1.12% | 1 | 25.00% |
Total | 89 | 100.00% | 4 | 100.00% |
/* Find queueing discipline by name */
static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
{
struct Qdisc_ops *q = NULL;
if (kind) {
read_lock(&qdisc_mod_lock);
for (q = qdisc_base; q; q = q->next) {
if (nla_strcmp(kind, q->id) == 0) {
if (!try_module_get(q->owner))
q = NULL;
break;
}
}
read_unlock(&qdisc_mod_lock);
}
return q;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 68 | 78.16% | 2 | 40.00% |
Patrick McHardy | 18 | 20.69% | 2 | 40.00% |
Adrian Bunk | 1 | 1.15% | 1 | 20.00% |
Total | 87 | 100.00% | 5 | 100.00% |
/* The linklayer setting were not transferred from iproute2, in older
* versions, and the rate tables lookup systems have been dropped in
* the kernel. To keep backward compatible with older iproute2 tc
* utils, we detect the linklayer setting by detecting if the rate
* table were modified.
*
* For linklayer ATM table entries, the rate table will be aligned to
* 48 bytes, thus some table entries will contain the same value. The
* mpu (min packet unit) is also encoded into the old rate table, thus
* starting from the mpu, we find low and high table entries for
* mapping this cell. If these entries contain the same value, when
* the rate tables have been modified for linklayer ATM.
*
* This is done by rounding mpu to the nearest 48 bytes cell/entry,
* and then roundup to the next cell, calc the table entry one below,
* and compare.
*/
static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
{
int low = roundup(r->mpu, 48);
int high = roundup(low+1, 48);
int cell_low = low >> r->cell_log;
int cell_high = (high >> r->cell_log) - 1;
/* rtab is too inaccurate at rates > 100Mbit/s */
if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
pr_debug("TC linklayer: Giving up ATM detection\n");
return TC_LINKLAYER_ETHERNET;
}
if ((cell_high > cell_low) && (cell_high < 256)
&& (rtab[cell_low] == rtab[cell_high])) {
pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
cell_low, cell_high, rtab[cell_high]);
return TC_LINKLAYER_ATM;
}
return TC_LINKLAYER_ETHERNET;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jesper Dangaard Brouer | 143 | 100.00% | 1 | 100.00% |
Total | 143 | 100.00% | 1 | 100.00% |
static struct qdisc_rate_table *qdisc_rtab_list;
struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
struct nlattr *tab)
{
struct qdisc_rate_table *rtab;
if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
nla_len(tab) != TC_RTAB_SIZE)
return NULL;
for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
!memcmp(&rtab->data, nla_data(tab), 1024)) {
rtab->refcnt++;
return rtab;
}
}
rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
if (rtab) {
rtab->rate = *r;
rtab->refcnt = 1;
memcpy(rtab->data, nla_data(tab), 1024);
if (r->linklayer == TC_LINKLAYER_UNAWARE)
r->linklayer = __detect_linklayer(r, rtab->data);
rtab->next = qdisc_rtab_list;
qdisc_rtab_list = rtab;
}
return rtab;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 123 | 64.40% | 1 | 25.00% |
Eric Dumazet | 45 | 23.56% | 1 | 25.00% |
Jesper Dangaard Brouer | 21 | 10.99% | 1 | 25.00% |
Patrick McHardy | 2 | 1.05% | 1 | 25.00% |
Total | 191 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(qdisc_get_rtab);
void qdisc_put_rtab(struct qdisc_rate_table *tab)
{
struct qdisc_rate_table *rtab, **rtabp;
if (!tab || --tab->refcnt)
return;
for (rtabp = &qdisc_rtab_list;
(rtab = *rtabp) != NULL;
rtabp = &rtab->next) {
if (rtab == tab) {
*rtabp = rtab->next;
kfree(rtab);
return;
}
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 76 | 100.00% | 1 | 100.00% |
Total | 76 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(qdisc_put_rtab);
static LIST_HEAD(qdisc_stab_list);
static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
[TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) },
[TCA_STAB_DATA] = { .type = NLA_BINARY },
};
static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
{
struct nlattr *tb[TCA_STAB_MAX + 1];
struct qdisc_size_table *stab;
struct tc_sizespec *s;
unsigned int tsize = 0;
u16 *tab = NULL;
int err;
err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy);
if (err < 0)
return ERR_PTR(err);
if (!tb[TCA_STAB_BASE])
return ERR_PTR(-EINVAL);
s = nla_data(tb[TCA_STAB_BASE]);
if (s->tsize > 0) {
if (!tb[TCA_STAB_DATA])
return ERR_PTR(-EINVAL);
tab = nla_data(tb[TCA_STAB_DATA]);
tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
}
if (tsize != s->tsize || (!tab && tsize > 0))
return ERR_PTR(-EINVAL);
list_for_each_entry(stab, &qdisc_stab_list, list) {
if (memcmp(&stab->szopts, s, sizeof(*s)))
continue;
if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
continue;
stab->refcnt++;
return stab;
}
stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
if (!stab)
return ERR_PTR(-ENOMEM);
stab->refcnt = 1;
stab->szopts = *s;
if (tsize > 0)
memcpy(stab->data, tab, tsize * sizeof(u16));
list_add_tail(&stab->list, &qdisc_stab_list);
return stab;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jussi Kivilinna | 313 | 100.00% | 1 | 100.00% |
Total | 313 | 100.00% | 1 | 100.00% |
static void stab_kfree_rcu(struct rcu_head *head)
{
kfree(container_of(head, struct qdisc_size_table, rcu));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 24 | 100.00% | 1 | 100.00% |
Total | 24 | 100.00% | 1 | 100.00% |
void qdisc_put_stab(struct qdisc_size_table *tab)
{
if (!tab)
return;
if (--tab->refcnt == 0) {
list_del(&tab->list);
call_rcu_bh(&tab->rcu, stab_kfree_rcu);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jussi Kivilinna | 39 | 86.67% | 1 | 50.00% |
Eric Dumazet | 6 | 13.33% | 1 | 50.00% |
Total | 45 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(qdisc_put_stab);
static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
{
struct nlattr *nest;
nest = nla_nest_start(skb, TCA_STAB);
if (nest == NULL)
goto nla_put_failure;
if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
goto nla_put_failure;
nla_nest_end(skb, nest);
return skb->len;
nla_put_failure:
return -1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jussi Kivilinna | 65 | 80.25% | 1 | 33.33% |
Patrick McHardy | 9 | 11.11% | 1 | 33.33% |
David S. Miller | 7 | 8.64% | 1 | 33.33% |
Total | 81 | 100.00% | 3 | 100.00% |
void __qdisc_calculate_pkt_len(struct sk_buff *skb,
const struct qdisc_size_table *stab)
{
int pkt_len, slot;
pkt_len = skb->len + stab->szopts.overhead;
if (unlikely(!stab->szopts.tsize))
goto out;
slot = pkt_len + stab->szopts.cell_align;
if (unlikely(slot < 0))
slot = 0;
slot >>= stab->szopts.cell_log;
if (likely(slot < stab->szopts.tsize))
pkt_len = stab->data[slot];
else
pkt_len = stab->data[stab->szopts.tsize - 1] *
(slot / stab->szopts.tsize) +
stab->data[slot % stab->szopts.tsize];
pkt_len <<= stab->szopts.size_log;
out:
if (unlikely(pkt_len < 1))
pkt_len = 1;
qdisc_skb_cb(skb)->pkt_len = pkt_len;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jussi Kivilinna | 170 | 98.84% | 1 | 50.00% |
Eric Dumazet | 2 | 1.16% | 1 | 50.00% |
Total | 172 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
{
if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
txt, qdisc->ops->id, qdisc->handle >> 16);
qdisc->flags |= TCQ_F_WARN_NONWC;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jarek Poplawski | 51 | 96.23% | 1 | 33.33% |
Florian Westphal | 1 | 1.89% | 1 | 33.33% |
Eric Dumazet | 1 | 1.89% | 1 | 33.33% |
Total | 53 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(qdisc_warn_nonwc);
static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
{
struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
timer);
rcu_read_lock();
__netif_schedule(qdisc_root(wd->qdisc));
rcu_read_unlock();
return HRTIMER_NORESTART;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 32 | 69.57% | 1 | 20.00% |
David S. Miller | 7 | 15.22% | 2 | 40.00% |
John Fastabend | 6 | 13.04% | 1 | 20.00% |
Stephen Hemminger | 1 | 2.17% | 1 | 20.00% |
Total | 46 | 100.00% | 5 | 100.00% |
void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
{
hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
wd->timer.function = qdisc_watchdog;
wd->qdisc = qdisc;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 31 | 75.61% | 1 | 33.33% |
David S. Miller | 9 | 21.95% | 1 | 33.33% |
Eric Dumazet | 1 | 2.44% | 1 | 33.33% |
Total | 41 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(qdisc_watchdog_init);
void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires)
{
if (test_bit(__QDISC_STATE_DEACTIVATED,
&qdisc_root_sleeping(wd->qdisc)->state))
return;
if (wd->last_expires == expires)
return;
wd->last_expires = expires;
hrtimer_start(&wd->timer,
ns_to_ktime(expires),
HRTIMER_MODE_ABS_PINNED);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 22 | 36.07% | 4 | 57.14% |
Patrick McHardy | 19 | 31.15% | 1 | 14.29% |
Jarek Poplawski | 18 | 29.51% | 1 | 14.29% |
Jiri Pirko | 2 | 3.28% | 1 | 14.29% |
Total | 61 | 100.00% | 7 | 100.00% |
EXPORT_SYMBOL(qdisc_watchdog_schedule_ns);
void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
{
hrtimer_cancel(&wd->timer);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 17 | 94.44% | 1 | 50.00% |
David S. Miller | 1 | 5.56% | 1 | 50.00% |
Total | 18 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(qdisc_watchdog_cancel);
static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
{
unsigned int size = n * sizeof(struct hlist_head), i;
struct hlist_head *h;
if (size <= PAGE_SIZE)
h = kmalloc(size, GFP_KERNEL);
else
h = (struct hlist_head *)
__get_free_pages(GFP_KERNEL, get_order(size));
if (h != NULL) {
for (i = 0; i < n; i++)
INIT_HLIST_HEAD(&h[i]);
}
return h;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 96 | 98.97% | 1 | 50.00% |
Adrian Bunk | 1 | 1.03% | 1 | 50.00% |
Total | 97 | 100.00% | 2 | 100.00% |
static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n)
{
unsigned int size = n * sizeof(struct hlist_head);
if (size <= PAGE_SIZE)
kfree(h);
else
free_pages((unsigned long)h, get_order(size));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 53 | 100.00% | 1 | 100.00% |
Total | 53 | 100.00% | 1 | 100.00% |
void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
{
struct Qdisc_class_common *cl;
struct hlist_node *next;
struct hlist_head *nhash, *ohash;
unsigned int nsize, nmask, osize;
unsigned int i, h;
/* Rehash when load factor exceeds 0.75 */
if (clhash->hashelems * 4 <= clhash->hashsize * 3)
return;
nsize = clhash->hashsize * 2;
nmask = nsize - 1;
nhash = qdisc_class_hash_alloc(nsize);
if (nhash == NULL)
return;
ohash = clhash->hash;
osize = clhash->hashsize;
sch_tree_lock(sch);
for (i = 0; i < osize; i++) {
hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
h = qdisc_class_hash(cl->classid, nmask);
hlist_add_head(&cl->hnode, &nhash[h]);
}
}
clhash->hash = nhash;
clhash->hashsize = nsize;
clhash->hashmask = nmask;
sch_tree_unlock(sch);
qdisc_class_hash_free(ohash, osize);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 190 | 100.00% | 1 | 100.00% |
Total | 190 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(qdisc_class_hash_grow);
int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
{
unsigned int size = 4;
clhash->hash = qdisc_class_hash_alloc(size);
if (clhash->hash == NULL)
return -ENOMEM;
clhash->hashsize = size;
clhash->hashmask = size - 1;
clhash->hashelems = 0;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 60 | 100.00% | 1 | 100.00% |
Total | 60 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(qdisc_class_hash_init);
void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
{
qdisc_class_hash_free(clhash->hash, clhash->hashsize);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 21 | 100.00% | 1 | 100.00% |
Total | 21 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(qdisc_class_hash_destroy);
void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
struct Qdisc_class_common *cl)
{
unsigned int h;
INIT_HLIST_NODE(&cl->hnode);
h = qdisc_class_hash(cl->classid, clhash->hashmask);
hlist_add_head(&