cregit-Linux how code gets into the kernel

Release 4.8 net/sched/sch_fq.c

Directory: net/sched
/*
 * net/sched/sch_fq.c Fair Queue Packet Scheduler (per flow pacing)
 *
 *  Copyright (C) 2013-2015 Eric Dumazet <edumazet@google.com>
 *
 *      This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 *
 *  Meant to be mostly used for locally generated traffic :
 *  Fast classification depends on skb->sk being set before reaching us.
 *  If not, (router workload), we use rxhash as fallback, with 32 bits wide hash.
 *  All packets belonging to a socket are considered as a 'flow'.
 *
 *  Flows are dynamically allocated and stored in a hash table of RB trees
 *  They are also part of one Round Robin 'queues' (new or old flows)
 *
 *  Burst avoidance (aka pacing) capability :
 *
 *  Transport (eg TCP) can set in sk->sk_pacing_rate a rate, enqueue a
 *  bunch of packets, and this packet scheduler adds delay between
 *  packets to respect rate limitation.
 *
 *  enqueue() :
 *   - lookup one RB tree (out of 1024 or more) to find the flow.
 *     If non existent flow, create it, add it to the tree.
 *     Add skb to the per flow list of skb (fifo).
 *   - Use a special fifo for high prio packets
 *
 *  dequeue() : serves flows in Round Robin
 *  Note : When a flow becomes empty, we do not immediately remove it from
 *  rb trees, for performance reasons (its expected to send additional packets,
 *  or SLAB cache will reuse socket for another flow)
 */

#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/string.h>
#include <linux/in.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
#include <linux/hash.h>
#include <linux/prefetch.h>
#include <linux/vmalloc.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <net/tcp.h>

/*
 * Per flow structure, dynamically allocated
 */

struct fq_flow {
	
struct sk_buff	*head;		/* list of skbs for this flow : first skb */
	union {
		
struct sk_buff *tail;	/* last skb in the list */
		
unsigned long  age;	/* jiffies when flow was emptied, for gc */
	};
	
struct rb_node	fq_node;	/* anchor in fq_root[] trees */
	
struct sock	*sk;
	
int		qlen;		/* number of packets in flow queue */
	
int		credit;
	
u32		socket_hash;	/* sk_hash */
	
struct fq_flow *next;		/* next pointer in RR lists, or &detached */

	
struct rb_node  rate_node;	/* anchor in q->delayed tree */
	
u64		time_next_packet;
};


struct fq_flow_head {
	
struct fq_flow *first;
	
struct fq_flow *last;
};


struct fq_sched_data {
	
struct fq_flow_head new_flows;

	
struct fq_flow_head old_flows;

	
struct rb_root	delayed;	/* for rate limited flows */
	
u64		time_next_delayed_flow;

	
struct fq_flow	internal;	/* for non classified or high prio packets */
	
u32		quantum;
	
u32		initial_quantum;
	
u32		flow_refill_delay;
	
u32		flow_max_rate;	/* optional max rate per flow */
	
u32		flow_plimit;	/* max packets per flow */
	
u32		orphan_mask;	/* mask for orphaned skb */
	
struct rb_root	*fq_root;
	
u8		rate_enable;
	
u8		fq_trees_log;

	
u32		flows;
	
u32		inactive_flows;
	
u32		throttled_flows;

	
u64		stat_gc_flows;
	
u64		stat_internal_packets;
	
u64		stat_tcp_retrans;
	
u64		stat_throttled;
	
u64		stat_flows_plimit;
	
u64		stat_pkts_too_long;
	
u64		stat_allocation_errors;
	
struct qdisc_watchdog watchdog;
};

/* special value to mark a detached flow (not on old/new list) */


static struct fq_flow detached, throttled;


static void fq_flow_set_detached(struct fq_flow *f) { f->next = &detached; f->age = jiffies; }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet24100.00%2100.00%
Total24100.00%2100.00%


static bool fq_flow_is_detached(const struct fq_flow *f) { return f->next == &detached; }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet20100.00%1100.00%
Total20100.00%1100.00%


static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f) { struct rb_node **p = &q->delayed.rb_node, *parent = NULL; while (*p) { struct fq_flow *aux; parent = *p; aux = container_of(parent, struct fq_flow, rate_node); if (f->time_next_packet >= aux->time_next_packet) p = &parent->rb_right; else p = &parent->rb_left; } rb_link_node(&f->rate_node, parent, p); rb_insert_color(&f->rate_node, &q->delayed); q->throttled_flows++; q->stat_throttled++; f->next = &throttled; if (q->time_next_delayed_flow > f->time_next_packet) q->time_next_delayed_flow = f->time_next_packet; }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet148100.00%1100.00%
Total148100.00%1100.00%

static struct kmem_cache *fq_flow_cachep __read_mostly;
static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow) { if (head->first) head->last->next = flow; else head->first = flow; head->last = flow; flow->next = NULL; }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet49100.00%1100.00%
Total49100.00%1100.00%

/* limit number of collected flows per round */ #define FQ_GC_MAX 8 #define FQ_GC_AGE (3*HZ)
static bool fq_gc_candidate(const struct fq_flow *f) { return fq_flow_is_detached(f) && time_after(jiffies, f->age + FQ_GC_AGE); }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet29100.00%1100.00%
Total29100.00%1100.00%


static void fq_gc(struct fq_sched_data *q, struct rb_root *root, struct sock *sk) { struct fq_flow *f, *tofree[FQ_GC_MAX]; struct rb_node **p, *parent; int fcnt = 0; p = &root->rb_node; parent = NULL; while (*p) { parent = *p; f = container_of(parent, struct fq_flow, fq_node); if (f->sk == sk) break; if (fq_gc_candidate(f)) { tofree[fcnt++] = f; if (fcnt == FQ_GC_MAX) break; } if (f->sk > sk) p = &parent->rb_right; else p = &parent->rb_left; } q->flows -= fcnt; q->inactive_flows -= fcnt; q->stat_gc_flows += fcnt; while (fcnt) { struct fq_flow *f = tofree[--fcnt]; rb_erase(&f->fq_node, root); kmem_cache_free(fq_flow_cachep, f); } }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet189100.00%1100.00%
Total189100.00%1100.00%


static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q) { struct rb_node **p, *parent; struct sock *sk = skb->sk; struct rb_root *root; struct fq_flow *f; /* warning: no starvation prevention... */ if (unlikely((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL)) return &q->internal; /* SYNACK messages are attached to a TCP_NEW_SYN_RECV request socket * or a listener (SYNCOOKIE mode) * 1) request sockets are not full blown, * they do not contain sk_pacing_rate * 2) They are not part of a 'flow' yet * 3) We do not want to rate limit them (eg SYNFLOOD attack), * especially if the listener set SO_MAX_PACING_RATE * 4) We pretend they are orphaned */ if (!sk || sk_listener(sk)) { unsigned long hash = skb_get_hash(skb) & q->orphan_mask; /* By forcing low order bit to 1, we make sure to not * collide with a local flow (socket pointers are word aligned) */ sk = (struct sock *)((hash << 1) | 1UL); skb_orphan(skb); } root = &q->fq_root[hash_32((u32)(long)sk, q->fq_trees_log)]; if (q->flows >= (2U << q->fq_trees_log) && q->inactive_flows > q->flows/2) fq_gc(q, root, sk); p = &root->rb_node; parent = NULL; while (*p) { parent = *p; f = container_of(parent, struct fq_flow, fq_node); if (f->sk == sk) { /* socket might have been reallocated, so check * if its sk_hash is the same. * It not, we need to refill credit with * initial quantum */ if (unlikely(skb->sk && f->socket_hash != sk->sk_hash)) { f->credit = q->initial_quantum; f->socket_hash = sk->sk_hash; f->time_next_packet = 0ULL; } return f; } if (f->sk > sk) p = &parent->rb_right; else p = &parent->rb_left; } f = kmem_cache_zalloc(fq_flow_cachep, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!f)) { q->stat_allocation_errors++; return &q->internal; } fq_flow_set_detached(f); f->sk = sk; if (skb->sk) f->socket_hash = sk->sk_hash; f->credit = q->initial_quantum; rb_link_node(&f->fq_node, parent, p); rb_insert_color(&f->fq_node, root); q->flows++; q->inactive_flows++; return f; }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet37998.19%480.00%
maciej zenczykowskimaciej zenczykowski71.81%120.00%
Total386100.00%5100.00%

/* remove one skb from head of flow queue */
static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow) { struct sk_buff *skb = flow->head; if (skb) { flow->head = skb->next; skb->next = NULL; flow->qlen--; qdisc_qstats_backlog_dec(sch, skb); sch->q.qlen--; } return skb; }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet6695.65%266.67%
john fastabendjohn fastabend34.35%133.33%
Total69100.00%3100.00%

/* We might add in the future detection of retransmits * For the time being, just return false */
static bool skb_is_retransmit(struct sk_buff *skb) { return false; }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet14100.00%1100.00%
Total14100.00%1100.00%

/* add skb to flow queue * flow queue is a linked list, kind of FIFO, except for TCP retransmits * We special case tcp retransmits to be transmitted before other packets. * We rely on fact that TCP retransmits are unlikely, so we do not waste * a separate queue or a pointer. * head-> [retrans pkt 1] * [retrans pkt 2] * [ normal pkt 1] * [ normal pkt 2] * [ normal pkt 3] * tail-> [ normal pkt 4] */
static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb) { struct sk_buff *prev, *head = flow->head; skb->next = NULL; if (!head) { flow->head = skb; flow->tail = skb; return; } if (likely(!skb_is_retransmit(skb))) { flow->tail->next = skb; flow->tail = skb; return; } /* This skb is a tcp retransmit, * find the last retrans packet in the queue */ prev = NULL; while (skb_is_retransmit(head)) { prev = head; head = head->next; if (!head) break; } if (!prev) { /* no rtx packet in queue, become the new head */ skb->next = flow->head; flow->head = skb; } else { if (prev == flow->tail) flow->tail = skb; else skb->next = prev->next; prev->next = skb; } }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet166100.00%1100.00%
Total166100.00%1100.00%


static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct fq_sched_data *q = qdisc_priv(sch); struct fq_flow *f; if (unlikely(sch->q.qlen >= sch->limit)) return qdisc_drop(skb, sch, to_free); f = fq_classify(skb, q); if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) { q->stat_flows_plimit++; return qdisc_drop(skb, sch, to_free); } f->qlen++; if (skb_is_retransmit(skb)) q->stat_tcp_retrans++; qdisc_qstats_backlog_inc(sch, skb); if (fq_flow_is_detached(f)) { fq_flow_add_tail(&q->new_flows, f); if (time_after(jiffies, f->age + q->flow_refill_delay)) f->credit = max_t(u32, f->credit, q->quantum); q->inactive_flows--; } /* Note: this overwrites f->age */ flow_queue_add(f, skb); if (unlikely(f == &q->internal)) { q->stat_internal_packets++; } sch->q.qlen++; return NET_XMIT_SUCCESS; }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet22298.67%375.00%
john fastabendjohn fastabend31.33%125.00%
Total225100.00%4100.00%


static void fq_check_throttled(struct fq_sched_data *q, u64 now) { struct rb_node *p; if (q->time_next_delayed_flow > now) return; q->time_next_delayed_flow = ~0ULL; while ((p = rb_first(&q->delayed)) != NULL) { struct fq_flow *f = container_of(p, struct fq_flow, rate_node); if (f->time_next_packet > now) { q->time_next_delayed_flow = f->time_next_packet; break; } rb_erase(p, &q->delayed); q->throttled_flows--; fq_flow_add_tail(&q->old_flows, f); } }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet112100.00%1100.00%
Total112100.00%1100.00%


static struct sk_buff *fq_dequeue(struct Qdisc *sch) { struct fq_sched_data *q = qdisc_priv(sch); u64 now = ktime_get_ns(); struct fq_flow_head *head; struct sk_buff *skb; struct fq_flow *f; u32 rate; skb = fq_dequeue_head(sch, &q->internal); if (skb) goto out; fq_check_throttled(q, now); begin: head = &q->new_flows; if (!head->first) { head = &q->old_flows; if (!head->first) { if (q->time_next_delayed_flow != ~0ULL) qdisc_watchdog_schedule_ns(&q->watchdog, q->time_next_delayed_flow); return NULL; } } f = head->first; if (f->credit <= 0) { f->credit += q->quantum; head->first = f->next; fq_flow_add_tail(&q->old_flows, f); goto begin; } skb = f->head; if (unlikely(skb && now < f->time_next_packet && !skb_is_tcp_pure_ack(skb))) { head->first = f->next; fq_flow_set_throttled(q, f); goto begin; } skb = fq_dequeue_head(sch, f); if (!skb) { head->first = f->next; /* force a pass through old_flows to prevent starvation */ if ((head == &q->new_flows) && q->old_flows.first) { fq_flow_add_tail(&q->old_flows, f); } else { fq_flow_set_detached(f); q->inactive_flows++; } goto begin; } prefetch(&skb->end); f->credit -= qdisc_pkt_len(skb); if (f->credit > 0 || !q->rate_enable) goto out; /* Do not pace locally generated ack packets */ if (skb_is_tcp_pure_ack(skb)) goto out; rate = q->flow_max_rate; if (skb->sk) rate = min(skb->sk->sk_pacing_rate, rate); if (rate != ~0U) { u32 plen = max(qdisc_pkt_len(skb), q->quantum); u64 len = (u64)plen * NSEC_PER_SEC; if (likely(rate)) do_div(len, rate); /* Since socket rate can change later, * clamp the delay to 1 second. * Really, providers of too big packets should be fixed ! */ if (unlikely(len > NSEC_PER_SEC)) { len = NSEC_PER_SEC; q->stat_pkts_too_long++; } f->time_next_packet = now + len; } out: qdisc_bstats_update(sch, skb); return skb; }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet449100.00%8100.00%
Total449100.00%8100.00%


static void fq_flow_purge(struct fq_flow *flow) { rtnl_kfree_skbs(flow->head, flow->tail); flow->head = NULL; flow->qlen = 0; }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet34100.00%1100.00%
Total34100.00%1100.00%


static void fq_reset(struct Qdisc *sch) { struct fq_sched_data *q = qdisc_priv(sch); struct rb_root *root; struct rb_node *p; struct fq_flow *f; unsigned int idx; sch->q.qlen = 0; sch->qstats.backlog = 0; fq_flow_purge(&q->internal); if (!q->fq_root) return; for (idx = 0; idx < (1U << q->fq_trees_log); idx++) { root = &q->fq_root[idx]; while ((p = rb_first(root)) != NULL) { f = container_of(p, struct fq_flow, fq_node); rb_erase(p, root); fq_flow_purge(f); kmem_cache_free(fq_flow_cachep, f); } } q->new_flows.first = NULL; q->old_flows.first = NULL; q->delayed = RB_ROOT; q->flows = 0; q->inactive_flows = 0; q->throttled_flows = 0; }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet189100.00%3100.00%
Total189100.00%3100.00%


static void fq_rehash(struct fq_sched_data *q, struct rb_root *old_array, u32 old_log, struct rb_root *new_array, u32 new_log) { struct rb_node *op, **np, *parent; struct rb_root *oroot, *nroot; struct fq_flow *of, *nf; int fcnt = 0; u32 idx; for (idx = 0; idx < (1U << old_log); idx++) { oroot = &old_array[idx]; while ((op = rb_first(oroot)) != NULL) { rb_erase(op, oroot); of = container_of(op, struct fq_flow, fq_node); if (fq_gc_candidate(of)) { fcnt++; kmem_cache_free(fq_flow_cachep, of); continue; } nroot = &new_array[hash_32((u32)(long)of->sk, new_log)]; np = &nroot->rb_node; parent = NULL; while (*np) { parent = *np; nf = container_of(parent, struct fq_flow, fq_node); BUG_ON(nf->sk == of->sk); if (nf->sk > of->sk) np = &parent->rb_right; else np = &parent->rb_left; } rb_link_node(&of->fq_node, parent, np); rb_insert_color(&of->fq_node, nroot); } } q->flows -= fcnt; q->inactive_flows -= fcnt; q->stat_gc_flows += fcnt; }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet276100.00%1100.00%
Total276100.00%1100.00%


static void *fq_alloc_node(size_t sz, int node) { void *ptr; ptr = kmalloc_node(sz, GFP_KERNEL | __GFP_REPEAT | __GFP_NOWARN, node); if (!ptr) ptr = vmalloc_node(sz, node); return ptr; }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet49100.00%1100.00%
Total49100.00%1100.00%


static void fq_free(void *addr) { kvfree(addr); }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet1493.33%150.00%
americo wangamerico wang16.67%150.00%
Total15100.00%2100.00%


static int fq_resize(struct Qdisc *sch, u32 log) { struct fq_sched_data *q = qdisc_priv(sch); struct rb_root *array; void *old_fq_root; u32 idx; if (q->fq_root && log == q->fq_trees_log) return 0; /* If XPS was setup, we can allocate memory on right NUMA node */ array = fq_alloc_node(sizeof(struct rb_root) << log, netdev_queue_numa_node_read(sch->dev_queue)); if (!array) return -ENOMEM; for (idx = 0; idx < (1U << log); idx++) array[idx] = RB_ROOT; sch_tree_lock(sch); old_fq_root = q->fq_root; if (old_fq_root) fq_rehash(q, old_fq_root, q->fq_trees_log, array, log); q->fq_root = array; q->fq_trees_log = log; sch_tree_unlock(sch); fq_free(old_fq_root); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet160100.00%3100.00%
Total160100.00%3100.00%

static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = { [TCA_FQ_PLIMIT] = { .type = NLA_U32 }, [TCA_FQ_FLOW_PLIMIT] = { .type = NLA_U32 }, [TCA_FQ_QUANTUM] = { .type = NLA_U32 }, [TCA_FQ_INITIAL_QUANTUM] = { .type = NLA_U32 }, [TCA_FQ_RATE_ENABLE] = { .type = NLA_U32 }, [TCA_FQ_FLOW_DEFAULT_RATE] = { .type = NLA_U32 }, [TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 }, [TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 }, [TCA_FQ_FLOW_REFILL_DELAY]