cregit-Linux how code gets into the kernel

Release 4.15 net/sched/sch_sfq.c

Directory: net/sched
/*
 * net/sched/sch_sfq.c  Stochastic Fairness Queueing discipline.
 *
 *              This program is free software; you can redistribute it and/or
 *              modify it under the terms of the GNU General Public License
 *              as published by the Free Software Foundation; either version
 *              2 of the License, or (at your option) any later version.
 *
 * Authors:     Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
 */

#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/string.h>
#include <linux/in.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/jhash.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
#include <net/red.h>


/*      Stochastic Fairness Queuing algorithm.
        =======================================

        Source:
        Paul E. McKenney "Stochastic Fairness Queuing",
        IEEE INFOCOMM'90 Proceedings, San Francisco, 1990.

        Paul E. McKenney "Stochastic Fairness Queuing",
        "Interworking: Research and Experience", v.2, 1991, p.113-131.


        See also:
        M. Shreedhar and George Varghese "Efficient Fair
        Queuing using Deficit Round Robin", Proc. SIGCOMM 95.


        This is not the thing that is usually called (W)FQ nowadays.
        It does not use any timestamp mechanism, but instead
        processes queues in round-robin order.

        ADVANTAGE:

        - It is very cheap. Both CPU and memory requirements are minimal.

        DRAWBACKS:

        - "Stochastic" -> It is not 100% fair.
        When hash collisions occur, several flows are considered as one.

        - "Round-robin" -> It introduces larger delays than virtual clock
        based schemes, and should not be used for isolating interactive
        traffic from non-interactive. It means, that this scheduler
        should be used as leaf of CBQ or P3, which put interactive traffic
        to higher priority band.

        We still need true WFQ for top level CSZ, but using WFQ
        for the best effort traffic is absolutely pointless:
        SFQ is superior for this purpose.

        IMPLEMENTATION:
        This implementation limits :
        - maximal queue length per flow to 127 packets.
        - max mtu to 2^18-1;
        - max 65408 flows,
        - number of hash buckets to 65536.

        It is easy to increase these values, but not in flight.  */


#define SFQ_MAX_DEPTH		127 
/* max number of packets per flow */

#define SFQ_DEFAULT_FLOWS	128

#define SFQ_MAX_FLOWS		(0x10000 - SFQ_MAX_DEPTH - 1) 
/* max number of flows */

#define SFQ_EMPTY_SLOT		0xffff

#define SFQ_DEFAULT_HASH_DIVISOR 1024

/* We use 16 bits to store allot, and want to handle packets up to 64K
 * Scale allot by 8 (1<<3) so that no overflow occurs.
 */

#define SFQ_ALLOT_SHIFT		3

#define SFQ_ALLOT_SIZE(X)	DIV_ROUND_UP(X, 1 << SFQ_ALLOT_SHIFT)

/* This type should contain at least SFQ_MAX_DEPTH + 1 + SFQ_MAX_FLOWS values */

typedef u16 sfq_index;

/*
 * We dont use pointers to save space.
 * Small indexes [0 ... SFQ_MAX_FLOWS - 1] are 'pointers' to slots[] array
 * while following values [SFQ_MAX_FLOWS ... SFQ_MAX_FLOWS + SFQ_MAX_DEPTH]
 * are 'pointers' to dep[] array
 */

struct sfq_head {
	
sfq_index	next;
	
sfq_index	prev;
};


struct sfq_slot {
	
struct sk_buff	*skblist_next;
	
struct sk_buff	*skblist_prev;
	
sfq_index	qlen; /* number of skbs in skblist */
	
sfq_index	next; /* next slot in sfq RR chain */
	
struct sfq_head dep; /* anchor in dep[] chains */
	
unsigned short	hash; /* hash value (index in ht[]) */
	
short		allot; /* credit for this slot */

	
unsigned int    backlog;
	
struct red_vars vars;
};


struct sfq_sched_data {
/* frequently used fields */
	
int		limit;		/* limit of total number of packets in this qdisc */
	
unsigned int	divisor;	/* number of slots in hash table */
	
u8		headdrop;
	
u8		maxdepth;	/* limit of packets per flow */

	
u32		perturbation;
	
u8		cur_depth;	/* depth of longest slot */
	
u8		flags;
	
unsigned short  scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
	
struct tcf_proto __rcu *filter_list;
	
struct tcf_block *block;
	
sfq_index	*ht;		/* Hash table ('divisor' slots) */
	
struct sfq_slot	*slots;		/* Flows table ('maxflows' entries) */

	
struct red_parms *red_parms;
	
struct tc_sfqred_stats stats;
	
struct sfq_slot *tail;		/* current slot in round */

	
struct sfq_head	dep[SFQ_MAX_DEPTH + 1];
					/* Linked lists of slots, indexed by depth
                                         * dep[0] : list of unused flows
                                         * dep[1] : list of flows with 1 packet
                                         * dep[X] : list of flows with X packets
                                         */

	
unsigned int	maxflows;	/* number of flows in flows array */
	
int		perturb_period;
	
unsigned int	quantum;	/* Allotment per round: MUST BE >= MTU */
	
struct timer_list perturb_timer;
	
struct Qdisc	*sch;
};

/*
 * sfq_head are either in a sfq_slot or in dep[] array
 */

static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val) { if (val < SFQ_MAX_FLOWS) return &q->slots[val].dep; return &q->dep[val - SFQ_MAX_FLOWS]; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet45100.00%2100.00%
Total45100.00%2100.00%


static unsigned int sfq_hash(const struct sfq_sched_data *q, const struct sk_buff *skb) { return skb_get_hash_perturb(skb, q->perturbation) & (q->divisor - 1); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)1951.35%120.00%
Eric Dumazet1643.24%360.00%
Tom Herbert25.41%120.00%
Total37100.00%5100.00%


static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) { struct sfq_sched_data *q = qdisc_priv(sch); struct tcf_result res; struct tcf_proto *fl; int result; if (TC_H_MAJ(skb->priority) == sch->handle && TC_H_MIN(skb->priority) > 0 && TC_H_MIN(skb->priority) <= q->divisor) return TC_H_MIN(skb->priority); fl = rcu_dereference_bh(q->filter_list); if (!fl) return sfq_hash(q, skb) + 1; *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; result = tcf_classify(skb, fl, &res, false); if (result >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { case TC_ACT_STOLEN: case TC_ACT_QUEUED: case TC_ACT_TRAP: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; /* fall through */ case TC_ACT_SHOT: return 0; } #endif if (TC_H_MIN(res.classid) <= q->divisor) return TC_H_MIN(res.classid); } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Patrick McHardy16182.56%111.11%
John Fastabend168.21%111.11%
Eric Dumazet63.08%111.11%
Jarek Poplawski52.56%222.22%
Jiri Pirko42.05%222.22%
Daniel Borkmann21.03%111.11%
Gustavo A. R. Silva10.51%111.11%
Total195100.00%9100.00%

/* * x : slot number [0 .. SFQ_MAX_FLOWS - 1] */
static inline void sfq_link(struct sfq_sched_data *q, sfq_index x) { sfq_index p, n; struct sfq_slot *slot = &q->slots[x]; int qlen = slot->qlen; p = qlen + SFQ_MAX_FLOWS; n = q->dep[qlen].next; slot->dep.next = n; slot->dep.prev = p; q->dep[qlen].next = x; /* sfq_dep_head(q, p)->next = x */ sfq_dep_head(q, n)->prev = x; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)6365.62%125.00%
Eric Dumazet3132.29%250.00%
David S. Miller22.08%125.00%
Total96100.00%4100.00%

#define sfq_unlink(q, x, n, p) \ do { \ n = q->slots[x].dep.next; \ p = q->slots[x].dep.prev; \ sfq_dep_head(q, p)->next = n; \ sfq_dep_head(q, n)->prev = p; \ } while (0)
static inline void sfq_dec(struct sfq_sched_data *q, sfq_index x) { sfq_index p, n; int d; sfq_unlink(q, x, n, p); d = q->slots[x].qlen--; if (n == p && q->cur_depth == d) q->cur_depth--; sfq_link(q, x); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)5071.43%133.33%
Eric Dumazet1825.71%133.33%
David S. Miller22.86%133.33%
Total70100.00%3100.00%


static inline void sfq_inc(struct sfq_sched_data *q, sfq_index x) { sfq_index p, n; int d; sfq_unlink(q, x, n, p); d = ++q->slots[x].qlen; if (q->cur_depth < d) q->cur_depth = d; sfq_link(q, x); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)5176.12%133.33%
Eric Dumazet1420.90%133.33%
David S. Miller22.99%133.33%
Total67100.00%3100.00%

/* helper functions : might be changed when/if skb use a standard list_head */ /* remove one skb from tail of slot queue */
static inline struct sk_buff *slot_dequeue_tail(struct sfq_slot *slot) { struct sk_buff *skb = slot->skblist_prev; slot->skblist_prev = skb->prev; skb->prev->next = (struct sk_buff *)slot; skb->next = skb->prev = NULL; return skb; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet57100.00%2100.00%
Total57100.00%2100.00%

/* remove one skb from head of slot queue */
static inline struct sk_buff *slot_dequeue_head(struct sfq_slot *slot) { struct sk_buff *skb = slot->skblist_next; slot->skblist_next = skb->next; skb->next->prev = (struct sk_buff *)slot; skb->next = skb->prev = NULL; return skb; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet57100.00%2100.00%
Total57100.00%2100.00%


static inline void slot_queue_init(struct sfq_slot *slot) { memset(slot, 0, sizeof(*slot)); slot->skblist_prev = slot->skblist_next = (struct sk_buff *)slot; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet40100.00%2100.00%
Total40100.00%2100.00%

/* add skb to slot queue (tail add) */
static inline void slot_queue_add(struct sfq_slot *slot, struct sk_buff *skb) { skb->prev = slot->skblist_prev; skb->next = (struct sk_buff *)slot; slot->skblist_prev->next = skb; slot->skblist_prev = skb; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet50100.00%1100.00%
Total50100.00%1100.00%


static unsigned int sfq_drop(struct Qdisc *sch, struct sk_buff **to_free) { struct sfq_sched_data *q = qdisc_priv(sch); sfq_index x, d = q->cur_depth; struct sk_buff *skb; unsigned int len; struct sfq_slot *slot; /* Queue is full! Find the longest slot and drop tail packet from it */ if (d > 1) { x = q->dep[d].next; slot = &q->slots[x]; drop: skb = q->headdrop ? slot_dequeue_head(slot) : slot_dequeue_tail(slot); len = qdisc_pkt_len(skb); slot->backlog -= len; sfq_dec(q, x); sch->q.qlen--; qdisc_qstats_backlog_dec(sch, skb); qdisc_drop(skb, sch, to_free); return len; } if (d == 1) { /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */ x = q->tail->next; slot = &q->slots[x]; q->tail->next = slot->next; q->ht[slot->hash] = SFQ_EMPTY_SLOT; goto drop; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet9246.23%325.00%
Linus Torvalds (pre-git)6834.17%216.67%
Gao Feng115.53%18.33%
Dmitry Torokhov115.53%18.33%
John Fastabend52.51%18.33%
Américo Wang42.01%18.33%
Jussi Kivilinna31.51%18.33%
Stephen Hemminger31.51%18.33%
Patrick McHardy21.01%18.33%
Total199100.00%12100.00%

/* Is ECN parameter configured */
static int sfq_prob_mark(const struct sfq_sched_data *q) { return q->flags & TC_RED_ECN; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet19100.00%1100.00%
Total19100.00%1100.00%

/* Should packets over max threshold just be marked */
static int sfq_hard_mark(const struct sfq_sched_data *q) { return (q->flags & (TC_RED_ECN | TC_RED_HARDDROP)) == TC_RED_ECN; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet27100.00%1100.00%
Total27100.00%1100.00%


static int sfq_headdrop(const struct sfq_sched_data *q) { return q->headdrop; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet17100.00%1100.00%
Total17100.00%1100.00%


static int sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct sfq_sched_data *q = qdisc_priv(sch); unsigned int hash, dropped; sfq_index x, qlen; struct sfq_slot *slot; int uninitialized_var(ret); struct sk_buff *head; int delta; hash = sfq_classify(skb, sch, &ret); if (hash == 0) { if (ret & __NET_XMIT_BYPASS) qdisc_qstats_drop(sch); __qdisc_drop(skb, to_free); return ret; } hash--; x = q->ht[hash]; slot = &q->slots[x]; if (x == SFQ_EMPTY_SLOT) { x = q->dep[0].next; /* get a free slot */ if (x >= SFQ_MAX_FLOWS) return qdisc_drop(skb, sch, to_free); q->ht[hash] = x; slot = &q->slots[x]; slot->hash = hash; slot->backlog = 0; /* should already be 0 anyway... */ red_set_vars(&slot->vars); goto enqueue; } if (q->red_parms) { slot->vars.qavg = red_calc_qavg_no_idle_time(q->red_parms, &slot->vars, slot->backlog); switch (red_action(q->red_parms, &slot->vars, slot->vars.qavg)) { case RED_DONT_MARK: break; case RED_PROB_MARK: qdisc_qstats_overlimit(sch); if (sfq_prob_mark(q)) { /* We know we have at least one packet in queue */ if (sfq_headdrop(q) && INET_ECN_set_ce(slot->skblist_next)) { q->stats.prob_mark_head++; break; } if (INET_ECN_set_ce(skb)) { q->stats.prob_mark++; break; } } q->stats.prob_drop++; goto congestion_drop; case RED_HARD_MARK: qdisc_qstats_overlimit(sch); if (sfq_hard_mark(q)) { /* We know we have at least one packet in queue */ if (sfq_headdrop(q) && INET_ECN_set_ce(slot->skblist_next)) { q->stats.forced_mark_head++; break; } if (INET_ECN_set_ce(skb)) { q->stats.forced_mark++; break; } } q->stats.forced_drop++; goto congestion_drop; } } if (slot->qlen >= q->maxdepth) { congestion_drop: if (!sfq_headdrop(q)) return qdisc_drop(skb, sch, to_free); /* We know we have at least one packet in queue */ head = slot_dequeue_head(slot); delta = qdisc_pkt_len(head) - qdisc_pkt_len(skb); sch->qstats.backlog -= delta; slot->backlog -= delta; qdisc_drop(head, sch, to_free); slot_queue_add(slot, skb); qdisc_tree_reduce_backlog(sch, 0, delta); return NET_XMIT_CN; } enqueue: qdisc_qstats_backlog_inc(sch, skb); slot->backlog += qdisc_pkt_len(skb); slot_queue_add(slot, skb); sfq_inc(q, x); if (slot->qlen == 1) { /* The flow is new */ if (q->tail == NULL) { /* It is the first flow */ slot->next = x; } else { slot->next = q->tail->next; q->tail->next = x; } /* We put this flow at the end of our flow list. * This might sound unfair for a new flow to wait after old ones, * but we could endup servicing new flows only, and freeze old ones. */ q->tail = slot; /* We could use a bigger initial quantum for new flows */ slot->allot = q->scaled_quantum; } if (++sch->q.qlen <= q->limit) return NET_XMIT_SUCCESS; qlen = slot->qlen; dropped = sfq_drop(sch, to_free); /* Return Congestion Notification only if we dropped a packet * from this flow. */ if (qlen != slot->qlen) { qdisc_tree_reduce_backlog(sch, 0, dropped - qdisc_pkt_len(skb)); return NET_XMIT_CN; } /* As we dropped a packet, better let upper stack know this */ qdisc_tree_reduce_backlog(sch, 1, dropped); return NET_XMIT_SUCCESS; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet40461.12%934.62%
Linus Torvalds (pre-git)13520.42%311.54%
Patrick McHardy426.35%27.69%
Konstantin Khlebnikov253.78%13.85%
Alexey Kuznetsov203.03%311.54%
John Fastabend121.82%13.85%
Américo Wang71.06%13.85%
Jarek Poplawski50.76%27.69%
Gao Feng50.76%13.85%
Stephen Hemminger30.45%13.85%
Jussi Kivilinna20.30%13.85%
Ben Greear10.15%13.85%
Total661100.00%26100.00%


static struct sk_buff * sfq_dequeue(struct Qdisc *sch) { struct sfq_sched_data *q = qdisc_priv(sch); struct sk_buff *skb; sfq_index a, next_a; struct sfq_slot *slot; /* No active slots */ if (q->tail == NULL) return NULL; next_slot: a = q->tail->next; slot = &q->slots[a]; if (slot->allot <= 0) { q->tail = slot; slot->allot += q->scaled_quantum; goto next_slot; } skb = slot_dequeue_head(slot); sfq_dec(q, a); qdisc_bstats_update(sch, skb); sch->q.qlen--; qdisc_qstats_backlog_dec(sch, skb); slot->backlog -= qdisc_pkt_len(skb); /* Is the slot empty? */ if (slot->qlen == 0) { q->ht[slot->hash] = SFQ_EMPTY_SLOT; next_a = slot->next; if (a == next_a) { q->tail = NULL; /* no more active slots */ return skb; } q->tail->next = next_a; } else { slot->allot -= SFQ_ALLOT_SIZE(qdisc_pkt_len(skb)); } return skb; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)11252.58%216.67%
Eric Dumazet7937.09%541.67%
Miguel Freitas94.23%18.33%
Jussi Kivilinna52.35%18.33%
John Fastabend31.41%18.33%
Stephen Hemminger31.41%18.33%
Patrick McHardy20.94%18.33%
Total213100.00%12100.00%


static void sfq_reset(struct Qdisc *sch) { struct sk_buff *skb; while ((skb = sfq_dequeue(sch)) != NULL) rtnl_kfree_skbs(skb, skb); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)3391.67%266.67%
Eric Dumazet38.33%133.33%
Total36100.00%3100.00%

/* * When q->perturbation is changed, we rehash all queued skbs * to avoid OOO (Out Of Order) effects. * We dont use sfq_dequeue()/sfq_enqueue() because we dont want to change * counters. */
static void sfq_rehash(struct Qdisc *sch) { struct sfq_sched_data *q = qdisc_priv(sch); struct sk_buff *skb; int i; struct sfq_slot *slot; struct sk_buff_head list; int dropped = 0; unsigned int drop_len = 0; __skb_queue_head_init(&list); for (i = 0; i < q->maxflows; i++) { slot = &q->slots[i]; if (!slot->qlen) continue; while (slot->qlen) { skb = slot_dequeue_head(slot); sfq_dec(q, i); __skb_queue_tail(&list, skb); } slot->backlog = 0; red_set_vars(&slot->vars); q->ht[slot->hash] = SFQ_EMPTY_SLOT; } q->tail = NULL; while ((skb = __skb_dequeue(&list)) != NULL) { unsigned int hash = sfq_hash(q, skb); sfq_index x = q->ht[hash]; slot = &q->slots[x]; if (x == SFQ_EMPTY_SLOT) { x = q->dep[0].next; /* get a free slot */ if (x >= SFQ_MAX_FLOWS) { drop: qdisc_qstats_backlog_dec(sch, skb); drop_len += qdisc_pkt_len(skb); kfree_skb(skb); dropped++; continue; } q->ht[hash] = x; slot = &q->slots[x]; slot->hash = hash; } if (slot->qlen >= q->maxdepth) goto drop; slot_queue_add(slot, skb); if (q->red_parms) slot->vars.qavg = red_calc_qavg(q->red_parms, &slot->vars, slot->backlog); slot->backlog += qdisc_pkt_len(skb); sfq_inc(q, x); if (slot->qlen == 1) { /* The flow is new */ if (q->tail == NULL) { /* It is the first flow */ slot->next = x; } else { slot->next = q->tail->next; q->tail->next = x; } q->tail = slot; slot->allot = q->scaled_quantum; } } sch->q.qlen -= dropped; qdisc_tree_reduce_backlog(sch, dropped, drop_len); }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet40195.48%360.00%
Américo Wang163.81%120.00%
John Fastabend30.71%120.00%
Total420100.00%5100.00%


static void sfq_perturbation(struct timer_list *t) { struct sfq_sched_data *q = from_timer(q, t, perturb_timer); struct Qdisc *sch = q->sch; spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch)); spin_lock(root_lock); q->perturbation = prandom_u32(); if (!q->filter_list && q->tail) sfq_rehash(sch); spin_unlock(root_lock); if (q->perturb_period) mod_timer(&q->perturb_timer, jiffies + q->perturb_period); }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet3838.38%228.57%
Linus Torvalds (pre-git)3636.36%114.29%
Kees Cook1818.18%114.29%
Alexey Kuznetsov44.04%114.29%
Stephen Hemminger22.02%114.29%
Aruna-Hewapathirane11.01%114.29%
Total99100.00%7100.00%


static int sfq_change(struct Qdisc *sch, struct nlattr *opt) { struct sfq_sched_data *q = qdisc_priv(sch); struct tc_sfq_qopt *ctl = nla_data(opt); struct tc_sfq_qopt_v1 *ctl_v1 = NULL; unsigned int qlen, dropped = 0; struct red_parms *p = NULL; struct sk_buff *to_free = NULL; struct sk_buff *tail = NULL; if (opt->nla_len < nla_attr_size(sizeof(*ctl))) return -EINVAL; if (opt->nla_len >= nla_attr_size(sizeof(*ctl_v1))) ctl_v1 = nla_data(opt); if (ctl->divisor && (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536)) return -EINVAL; if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max, ctl_v1->Wlog)) return -EINVAL; if (ctl_v1 && ctl_v1->qth_min) { p = kmalloc(sizeof(*p), GFP_KERNEL); if (!p) return -ENOMEM; } sch_tree_lock(sch); if (ctl->quantum) { q->quantum = ctl->quantum; q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum); } q->perturb_period = ctl->perturb_period * HZ; if (ctl->flows) q->maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS); if (ctl->divisor) { q->divisor = ctl->divisor; q->maxflows = min_t(u32, q->maxflows, q->divisor); } if (ctl_v1) { if (ctl_v1->depth) q->maxdepth = min_t(u32, ctl_v1->depth, SFQ_MAX_DEPTH); if (p) { swap(q->red_parms, p); red_set_parms(q->red_parms, ctl_v1->qth_min, ctl_v1->qth_max, ctl_v1->Wlog, ctl_v1->Plog, ctl_v1->Scell_log, NULL, ctl_v1->max_P); } q->flags = ctl_v1->flags; q->headdrop = ctl_v1->headdrop; } if (ctl->limit) { q->limit = min_t(u32, ctl->limit, q->maxdepth * q->maxflows); q->maxflows = min_t(u32, q->maxflows, q->limit); } qlen = sch->q.qlen; while (sch->q.qlen > q->limit) { dropped += sfq_drop(sch, &to_free); if (!tail) tail = to_free; } rtnl_kfree_skbs(to_free, tail); qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped); del_timer(&q->perturb_timer); if (q->perturb_period) { mod_timer(&q->perturb_timer, jiffies + q->perturb_period); q->perturbation = prandom_u32(); } sch_tree_unlock(sch); kfree(p); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet26649.17%422.22%
Linus Torvalds (pre-git)10719.78%211.11%
Alexey Kuznetsov407.39%316.67%
Gao Feng356.47%15.56%
Stephen Hemminger315.73%316.67%
Patrick McHardy285.18%211.11%
Nogah Frankel244.44%15.56%
Américo Wang91.66%15.56%
Aruna-Hewapathirane10.18%15.56%
Total541100.00%18100.00%


static void *sfq_alloc(size_t sz) { return kvmalloc(sz, GFP_KERNEL); }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet1688.89%150.00%
Michal Hocko211.11%150.00%
Total18100.00%2100.00%


static void sfq_free(void *addr) { kvfree(addr); }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet1493.33%150.00%
Américo Wang16.67%150.00%
Total15100.00%2100.00%


static void sfq_destroy(struct Qdisc *sch) { struct sfq_sched_data *q = qdisc_priv(sch); tcf_block_put(q->block); q->perturb_period = 0; del_timer_sync(&q->perturb_timer); sfq_free(q->ht); sfq_free(q->slots); kfree(q->red_parms); }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet6196.83%375.00%
Jiri Pirko23.17%125.00%
Total63100.00%4100.00%


static int sfq_init(struct Qdisc *sch, struct nlattr *opt) { struct sfq_sched_data *q = qdisc_priv(sch); int i; int err; q->sch = sch; timer_setup(&q->perturb_timer, sfq_perturbation, TIMER_DEFERRABLE); err = tcf_block_get(&q->block, &q->filter_list, sch); if (err) return err; for (i = 0; i < SFQ_MAX_DEPTH + 1; i++) { q->dep[i].next = i + SFQ_MAX_FLOWS; q->dep[i].prev = i + SFQ_MAX_FLOWS; } q->limit = SFQ_MAX_DEPTH; q->maxdepth = SFQ_MAX_DEPTH; q->cur_depth = 0; q->tail = NULL; q->divisor = SFQ_DEFAULT_HASH_DIVISOR; q->maxflows = SFQ_DEFAULT_FLOWS; q->quantum = psched_mtu(qdisc_dev(sch)); q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum); q->perturb_period = 0; q->perturbation = prandom_u32(); if (opt) { int err = sfq_change(sch, opt); if (err) return err; } q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor); q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows); if (!q->ht || !q->slots) { /* Note: sfq_destroy() will be called by our caller */ return -ENOMEM; } for (i = 0; i < q->divisor; i++) q->ht[i] = SFQ_EMPTY_SLOT; for (i = 0; i < q->maxflows; i++) { slot_queue_init(&q->slots[i]); sfq_link(q, i); } if (q->limit >= 1) sch->flags |= TCQ_F_CAN_BYPASS; else sch->flags &= ~TCQ_F_CAN_BYPASS; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet16445.94%937.50%
Linus Torvalds (pre-git)12936.13%312.50%
Jiri Pirko277.56%28.33%
Nikolay Aleksandrov102.80%14.17%
Alexey Kuznetsov92.52%28.33%
Paolo Abeni61.68%14.17%
Stephen Hemminger51.40%28.33%
David S. Miller30.84%14.17%
Kees Cook20.56%14.17%
Patrick McHardy10.28%14.17%
Aruna-Hewapathirane10.28%14.17%
Total357100.00%24100.00%


static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb) { struct sfq_sched_data *q = qdisc_priv(sch); unsigned char *b = skb_tail_pointer(skb); struct tc_sfq_qopt_v1 opt; struct red_parms *p = q->red_parms; memset(&opt, 0, sizeof(opt)); opt.v0.quantum = q->quantum; opt.v0.perturb_period = q->perturb_period / HZ; opt.v0.limit = q->limit; opt.v0.divisor = q->divisor; opt.v0.flows = q->maxflows; opt.depth = q->maxdepth; opt.headdrop = q->headdrop; if (p) { opt.qth_min = p->qth_min >> p->Wlog; opt.qth_max = p->qth_max >> p->Wlog; opt.Wlog = p->Wlog; opt.Plog = p->Plog; opt.Scell_log = p->Scell_log; opt.max_P = p->max_P; } memcpy(&opt.stats, &q->stats, sizeof(opt.stats)); opt.flags = q->flags; if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) goto nla_put_failure; return skb->len; nla_put_failure: nlmsg_trim(skb, b); return -1; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet14355.43%330.00%
Linus Torvalds (pre-git)9536.82%110.00%
David S. Miller72.71%110.00%
Alexey Kuznetsov51.94%110.00%
Arnaldo Carvalho de Melo41.55%220.00%
Stephen Hemminger31.16%110.00%
Patrick McHardy10.39%110.00%
Total258100.00%10100.00%


static struct Qdisc *sfq_leaf(struct Qdisc *sch, unsigned long arg) { return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Jarek Poplawski20100.00%1100.00%
Total20100.00%1100.00%


static unsigned long sfq_find(struct Qdisc *sch, u32 classid) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Patrick McHardy1794.44%150.00%
Américo Wang15.56%150.00%
Total18100.00%2100.00%


static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent, u32 classid) { /* we cannot bypass queue discipline anymore */ sch->flags &= ~TCQ_F_CAN_BYPASS; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Jarek Poplawski2273.33%150.00%
Eric Dumazet826.67%150.00%
Total30100.00%2100.00%


static void sfq_unbind(struct Qdisc *q, unsigned long cl) { }

Contributors

PersonTokensPropCommitsCommitProp
Jarek Poplawski1392.86%150.00%
Américo Wang17.14%150.00%
Total14100.00%2100.00%


static struct tcf_block *sfq_tcf_block(struct Qdisc *sch, unsigned long cl) { struct sfq_sched_data *q = qdisc_priv(sch); if (cl) return NULL; return q->block; }

Contributors

PersonTokensPropCommitsCommitProp
Patrick McHardy3692.31%150.00%
Jiri Pirko37.69%150.00%
Total39100.00%2100.00%


static int sfq_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, struct tcmsg *tcm) { tcm->tcm_handle |= TC_H_MIN(cl); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Patrick McHardy37100.00%1100.00%
Total37100.00%1100.00%


static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl, struct gnet_dump *d) { struct sfq_sched_data *q = qdisc_priv(sch); sfq_index idx = q->ht[cl - 1]; struct gnet_stats_queue qs = { 0 }; struct tc_sfq_xstats xstats = { 0 }; if (idx != SFQ_EMPTY_SLOT) { const struct sfq_slot *slot = &q->slots[idx]; xstats.allot = slot->allot << SFQ_ALLOT_SHIFT; qs.qlen = slot->qlen; qs.backlog = slot->backlog; } if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0) return -1; return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); }

Contributors

PersonTokensPropCommitsCommitProp
Patrick McHardy7854.93%114.29%
Eric Dumazet5840.85%457.14%
John Fastabend64.23%228.57%
Total142100.00%7100.00%


static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg) { struct sfq_sched_data *q = qdisc_priv(sch); unsigned int i; if (arg->stop) return; for (i = 0; i < q->divisor; i++) { if (q->ht[i] == SFQ_EMPTY_SLOT || arg->count < arg->skip) { arg->count++; continue; } if (arg->fn(sch, i + 1, arg) < 0) { arg->stop = 1; break; } arg->count++; } }

Contributors

PersonTokensPropCommitsCommitProp
Patrick McHardy10896.43%250.00%
Eric Dumazet43.57%250.00%
Total112100.00%4100.00%

static const struct Qdisc_class_ops sfq_class_ops = { .leaf = sfq_leaf, .find = sfq_find, .tcf_block = sfq_tcf_block, .bind_tcf = sfq_bind, .unbind_tcf = sfq_unbind, .dump = sfq_dump_class, .dump_stats = sfq_dump_class_stats, .walk = sfq_walk, }; static struct Qdisc_ops sfq_qdisc_ops __read_mostly = { .cl_ops = &sfq_class_ops, .id = "sfq", .priv_size = sizeof(struct sfq_sched_data), .enqueue = sfq_enqueue, .dequeue = sfq_dequeue, .peek = qdisc_peek_dequeued, .init = sfq_init, .reset = sfq_reset, .destroy = sfq_destroy, .change = NULL, .dump = sfq_dump, .owner = THIS_MODULE, };
static int __init sfq_module_init(void) { return register_qdisc(&sfq_qdisc_ops); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)1381.25%266.67%
Al Viro318.75%133.33%
Total16100.00%3100.00%


static void __exit sfq_module_exit(void) { unregister_qdisc(&sfq_qdisc_ops); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)1280.00%266.67%
Al Viro320.00%133.33%
Total15100.00%3100.00%

module_init(sfq_module_init) module_exit(sfq_module_exit) MODULE_LICENSE("GPL");

Overall Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet232651.59%2426.97%
Linus Torvalds (pre-git)102322.69%55.62%
Patrick McHardy54712.13%77.87%
Alexey Kuznetsov851.89%33.37%
Jarek Poplawski811.80%66.74%
Gao Feng511.13%11.12%
Stephen Hemminger501.11%33.37%
John Fastabend461.02%44.49%
Jiri Pirko461.02%55.62%
Américo Wang420.93%44.49%
Dave Jones300.67%11.12%
Konstantin Khlebnikov250.55%11.12%
Kees Cook250.55%11.12%
Nogah Frankel240.53%11.12%
David S. Miller210.47%44.49%
Al Viro150.33%11.12%
Dmitry Torokhov110.24%11.12%
Jussi Kivilinna100.22%11.12%
Nikolay Aleksandrov100.22%11.12%
Miguel Freitas90.20%11.12%
Paolo Abeni60.13%11.12%
Linus Torvalds60.13%22.25%
Arnaldo Carvalho de Melo40.09%22.25%
Tejun Heo30.07%11.12%
Aruna-Hewapathirane30.07%11.12%
Daniel Borkmann20.04%11.12%
Tom Herbert20.04%11.12%
Michal Hocko20.04%11.12%
Ben Greear10.02%11.12%
Gustavo A. R. Silva10.02%11.12%
Rusty Russell10.02%11.12%
Yang Yingliang10.02%11.12%
Total4509100.00%89100.00%
Directory: net/sched
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.