Release 4.14 net/sched/cls_api.c
/*
* net/sched/cls_api.c Packet classifier API.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
* Changes:
*
* Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
*
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/kmod.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
/* The list of all installed classifier types */
static LIST_HEAD(tcf_proto_base);
/* Protects list of registered TC modules. It is pure SMP lock. */
static DEFINE_RWLOCK(cls_mod_lock);
/* Find classifier type by string name */
static const struct tcf_proto_ops *tcf_proto_lookup_ops(const char *kind)
{
const struct tcf_proto_ops *t, *res = NULL;
if (kind) {
read_lock(&cls_mod_lock);
list_for_each_entry(t, &tcf_proto_base, head) {
if (strcmp(kind, t->kind) == 0) {
if (try_module_get(t->owner))
res = t;
break;
}
}
read_unlock(&cls_mod_lock);
}
return res;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 52 | 61.18% | 2 | 25.00% |
Patrick McHardy | 13 | 15.29% | 1 | 12.50% |
Eric Dumazet | 8 | 9.41% | 2 | 25.00% |
Américo Wang | 8 | 9.41% | 1 | 12.50% |
Jiri Pirko | 3 | 3.53% | 1 | 12.50% |
Chris Wright | 1 | 1.18% | 1 | 12.50% |
Total | 85 | 100.00% | 8 | 100.00% |
/* Register(unregister) new classifier type */
int register_tcf_proto_ops(struct tcf_proto_ops *ops)
{
struct tcf_proto_ops *t;
int rc = -EEXIST;
write_lock(&cls_mod_lock);
list_for_each_entry(t, &tcf_proto_base, head)
if (!strcmp(ops->kind, t->kind))
goto out;
list_add_tail(&ops->head, &tcf_proto_base);
rc = 0;
out:
write_unlock(&cls_mod_lock);
return rc;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 45 | 57.69% | 2 | 50.00% |
Arnaldo Carvalho de Melo | 17 | 21.79% | 1 | 25.00% |
Américo Wang | 16 | 20.51% | 1 | 25.00% |
Total | 78 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(register_tcf_proto_ops);
static struct workqueue_struct *tc_filter_wq;
int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
{
struct tcf_proto_ops *t;
int rc = -ENOENT;
/* Wait for outstanding call_rcu()s, if any, from a
* tcf_proto_ops's destroy() handler.
*/
rcu_barrier();
flush_workqueue(tc_filter_wq);
write_lock(&cls_mod_lock);
list_for_each_entry(t, &tcf_proto_base, head) {
if (t == ops) {
list_del(&t->head);
rc = 0;
break;
}
}
write_unlock(&cls_mod_lock);
return rc;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 38 | 50.00% | 2 | 28.57% |
Américo Wang | 18 | 23.68% | 2 | 28.57% |
Arnaldo Carvalho de Melo | 11 | 14.47% | 1 | 14.29% |
Eric Dumazet | 5 | 6.58% | 1 | 14.29% |
Daniel Borkmann | 4 | 5.26% | 1 | 14.29% |
Total | 76 | 100.00% | 7 | 100.00% |
EXPORT_SYMBOL(unregister_tcf_proto_ops);
bool tcf_queue_work(struct work_struct *work)
{
return queue_work(tc_filter_wq, work);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Américo Wang | 18 | 100.00% | 1 | 100.00% |
Total | 18 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(tcf_queue_work);
/* Select new prio value from the range, managed by kernel. */
static inline u32 tcf_auto_prio(struct tcf_proto *tp)
{
u32 first = TC_H_MAKE(0xC0000000U, 0U);
if (tp)
first = tp->prio - 1;
return TC_H_MAJ(first);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 36 | 90.00% | 1 | 33.33% |
Jiri Pirko | 3 | 7.50% | 1 | 33.33% |
Stephen Hemminger | 1 | 2.50% | 1 | 33.33% |
Total | 40 | 100.00% | 3 | 100.00% |
static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
u32 prio, u32 parent, struct Qdisc *q,
struct tcf_chain *chain)
{
struct tcf_proto *tp;
int err;
tp = kzalloc(sizeof(*tp), GFP_KERNEL);
if (!tp)
return ERR_PTR(-ENOBUFS);
err = -ENOENT;
tp->ops = tcf_proto_lookup_ops(kind);
if (!tp->ops) {
#ifdef CONFIG_MODULES
rtnl_unlock();
request_module("cls_%s", kind);
rtnl_lock();
tp->ops = tcf_proto_lookup_ops(kind);
/* We dropped the RTNL semaphore in order to perform
* the module load. So, even if we succeeded in loading
* the module we have to replay the request. We indicate
* this using -EAGAIN.
*/
if (tp->ops) {
module_put(tp->ops->owner);
err = -EAGAIN;
} else {
err = -ENOENT;
}
goto errout;
#endif
}
tp->classify = tp->ops->classify;
tp->protocol = protocol;
tp->prio = prio;
tp->classid = parent;
tp->q = q;
tp->chain = chain;
err = tp->ops->init(tp);
if (err) {
module_put(tp->ops->owner);
goto errout;
}
return tp;
errout:
kfree(tp);
return ERR_PTR(err);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Pirko | 234 | 100.00% | 3 | 100.00% |
Total | 234 | 100.00% | 3 | 100.00% |
static void tcf_proto_destroy(struct tcf_proto *tp)
{
tp->ops->destroy(tp);
module_put(tp->ops->owner);
kfree_rcu(tp, rcu);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Pirko | 34 | 94.44% | 1 | 50.00% |
Américo Wang | 2 | 5.56% | 1 | 50.00% |
Total | 36 | 100.00% | 2 | 100.00% |
static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
u32 chain_index)
{
struct tcf_chain *chain;
chain = kzalloc(sizeof(*chain), GFP_KERNEL);
if (!chain)
return NULL;
list_add_tail(&chain->list, &block->chain_list);
chain->block = block;
chain->index = chain_index;
chain->refcnt = 1;
return chain;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Pirko | 75 | 98.68% | 2 | 66.67% |
Américo Wang | 1 | 1.32% | 1 | 33.33% |
Total | 76 | 100.00% | 3 | 100.00% |
static void tcf_chain_flush(struct tcf_chain *chain)
{
struct tcf_proto *tp;
if (chain->p_filter_chain)
RCU_INIT_POINTER(*chain->p_filter_chain, NULL);
while ((tp = rtnl_dereference(chain->filter_chain)) != NULL) {
RCU_INIT_POINTER(chain->filter_chain, tp->next);
tcf_chain_put(chain);
tcf_proto_destroy(tp);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Pirko | 65 | 92.86% | 5 | 83.33% |
Américo Wang | 5 | 7.14% | 1 | 16.67% |
Total | 70 | 100.00% | 6 | 100.00% |
static void tcf_chain_destroy(struct tcf_chain *chain)
{
list_del(&chain->list);
kfree(chain);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Pirko | 20 | 83.33% | 1 | 50.00% |
Américo Wang | 4 | 16.67% | 1 | 50.00% |
Total | 24 | 100.00% | 2 | 100.00% |
static void tcf_chain_hold(struct tcf_chain *chain)
{
++chain->refcnt;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Américo Wang | 11 | 68.75% | 1 | 33.33% |
Jiri Pirko | 5 | 31.25% | 2 | 66.67% |
Total | 16 | 100.00% | 3 | 100.00% |
struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
bool create)
{
struct tcf_chain *chain;
list_for_each_entry(chain, &block->chain_list, list) {
if (chain->index == chain_index) {
tcf_chain_hold(chain);
return chain;
}
}
return create ? tcf_chain_create(block, chain_index) : NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Pirko | 46 | 73.02% | 2 | 50.00% |
Américo Wang | 17 | 26.98% | 2 | 50.00% |
Total | 63 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(tcf_chain_get);
void tcf_chain_put(struct tcf_chain *chain)
{
if (--chain->refcnt == 0)
tcf_chain_destroy(chain);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Pirko | 24 | 100.00% | 1 | 100.00% |
Total | 24 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(tcf_chain_put);
static void
tcf_chain_filter_chain_ptr_set(struct tcf_chain *chain,
struct tcf_proto __rcu **p_filter_chain)
{
chain->p_filter_chain = p_filter_chain;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Pirko | 24 | 100.00% | 2 | 100.00% |
Total | 24 | 100.00% | 2 | 100.00% |
int tcf_block_get(struct tcf_block **p_block,
struct tcf_proto __rcu **p_filter_chain)
{
struct tcf_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
struct tcf_chain *chain;
int err;
if (!block)
return -ENOMEM;
INIT_LIST_HEAD(&block->chain_list);
/* Create chain 0 by default, it has to be always present. */
chain = tcf_chain_create(block, 0);
if (!chain) {
err = -ENOMEM;
goto err_chain_create;
}
tcf_chain_filter_chain_ptr_set(chain, p_filter_chain);
*p_block = block;
return 0;
err_chain_create:
kfree(block);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Pirko | 109 | 100.00% | 3 | 100.00% |
Total | 109 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(tcf_block_get);
static void tcf_block_put_final(struct work_struct *work)
{
struct tcf_block *block = container_of(work, struct tcf_block, work);
struct tcf_chain *chain, *tmp;
rtnl_lock();
/* Only chain 0 should be still here. */
list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
tcf_chain_put(chain);
rtnl_unlock();
kfree(block);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Américo Wang | 46 | 75.41% | 2 | 50.00% |
Jiri Pirko | 15 | 24.59% | 2 | 50.00% |
Total | 61 | 100.00% | 4 | 100.00% |
/* XXX: Standalone actions are not allowed to jump to any chain, and bound
* actions should be all removed after flushing. However, filters are now
* destroyed in tc filter workqueue with RTNL lock, they can not race here.
*/
void tcf_block_put(struct tcf_block *block)
{
struct tcf_chain *chain, *tmp;
if (!block)
return;
list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
tcf_chain_flush(chain);
INIT_WORK(&block->work, tcf_block_put_final);
/* Wait for RCU callbacks to release the reference count and make
* sure their works have been queued before this.
*/
rcu_barrier();
tcf_queue_work(&block->work);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Américo Wang | 50 | 81.97% | 4 | 57.14% |
Jiri Pirko | 11 | 18.03% | 3 | 42.86% |
Total | 61 | 100.00% | 7 | 100.00% |
EXPORT_SYMBOL(tcf_block_put);
/* Main classifier routine: scans classifier chain attached
* to this qdisc, (optionally) tests for protocol and asks
* specific classifiers.
*/
int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res, bool compat_mode)
{
__be16 protocol = tc_skb_protocol(skb);
#ifdef CONFIG_NET_CLS_ACT
const int max_reclassify_loop = 4;
const struct tcf_proto *orig_tp = tp;
const struct tcf_proto *first_tp;
int limit = 0;
reclassify:
#endif
for (; tp; tp = rcu_dereference_bh(tp->next)) {
int err;
if (tp->protocol != protocol &&
tp->protocol != htons(ETH_P_ALL))
continue;
err = tp->classify(skb, tp, res);
#ifdef CONFIG_NET_CLS_ACT
if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
first_tp = orig_tp;
goto reset;
} else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
first_tp = res->goto_tp;
goto reset;
}
#endif
if (err >= 0)
return err;
}
return TC_ACT_UNSPEC; /* signal: continue lookup */
#ifdef CONFIG_NET_CLS_ACT
reset:
if (unlikely(limit++ >= max_reclassify_loop)) {
net_notice_ratelimited("%s: reclassify loop, rule prio %u, protocol %02x\n",
tp->q->ops->id, tp->prio & 0xffff,
ntohs(tp->protocol));
return TC_ACT_SHOT;
}
tp = first_tp;
protocol = tc_skb_protocol(skb);
goto reclassify;
#endif
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Pirko | 239 | 100.00% | 4 | 100.00% |
Total | 239 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(tcf_classify);
struct tcf_chain_info {
struct tcf_proto __rcu **pprev;
struct tcf_proto __rcu *next;
};
static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain_info *chain_info)
{
return rtnl_dereference(*chain_info->pprev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Pirko | 22 | 100.00% | 1 | 100.00% |
Total | 22 | 100.00% | 1 | 100.00% |
static void tcf_chain_tp_insert(struct tcf_chain *chain,
struct tcf_chain_info *chain_info,
struct tcf_proto *tp)
{
if (chain->p_filter_chain &&
*chain_info->pprev == chain->filter_chain)
rcu_assign_pointer(*chain->p_filter_chain, tp);
RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain_info));
rcu_assign_pointer(*chain_info->pprev, tp);
tcf_chain_hold(chain);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Pirko | 68 | 93.15% | 2 | 66.67% |
Américo Wang | 5 | 6.85% | 1 | 33.33% |
Total | 73 | 100.00% | 3 | 100.00% |
static void tcf_chain_tp_remove(struct tcf_chain *chain,
struct tcf_chain_info *chain_info,
struct tcf_proto *tp)
{
struct tcf_proto *next = rtnl_dereference(chain_info->next);
if (chain->p_filter_chain && tp == chain->filter_chain)
RCU_INIT_POINTER(*chain->p_filter_chain, next);
RCU_INIT_POINTER(*chain_info->pprev, next);
tcf_chain_put(chain);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Pirko | 65 | 92.86% | 2 | 66.67% |
Américo Wang | 5 | 7.14% | 1 | 33.33% |
Total | 70 | 100.00% | 3 | 100.00% |
static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
struct tcf_chain_info *chain_info,
u32 protocol, u32 prio,
bool prio_allocate)
{
struct tcf_proto **pprev;
struct tcf_proto *tp;
/* Check the chain for existence of proto-tcf with this priority */
for (pprev = &chain->filter_chain;
(tp = rtnl_dereference(*pprev)); pprev = &tp->next) {
if (tp->prio >= prio) {
if (tp->prio == prio) {
if (prio_allocate ||
(tp->protocol != protocol && protocol))
return ERR_PTR(-EINVAL);
} else {
tp = NULL;
}
break;
}
}
chain_info->pprev = pprev;
chain_info->next = tp ? tp->next : NULL;
return tp;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jiri Pirko | 137 | 100.00% | 2 | 100.00% |
Total | 137 | 100.00% | 2 | 100.00% |
static int tcf_fill_node(struct net *net, struct sk_buff *skb,
struct tcf_proto *tp, void *fh, u32 portid,
u32 seq, u16 flags, int event)
{
struct tcmsg *tcm;
struct nlmsghdr *nlh;
unsigned char *b = skb_tail_pointer(skb);
nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
if (!nlh)
goto out_nlmsg_trim;
tcm = nlmsg_data(nlh);
tcm->tcm_family = AF_UNSPEC;
tcm->tcm__pad1 = 0;
tcm->tcm__pad2 = 0;
tcm->tcm_ifindex = qdisc_dev(tp->q)->ifindex;
tcm->tcm_parent = tp->classid;
tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
goto nla_put_failure;
if (!fh) {
tcm->tcm_handle = 0;
} else {
if (tp->ops->dump && tp->ops->dump(net, tp, fh, skb, tcm) < 0)
goto nla_put_failure;
}
nlh->nlmsg_len = skb_tail_pointer(skb) - b;
return skb->len;
out_nlmsg_trim:
nla_put_failure:
nlmsg_trim(skb, b);
return -1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Américo Wang | 158 | 60.77% | 1 | 5.56% |
Linus Torvalds (pre-git) | 56 | 21.54% | 2 | 11.11% |
Patrick McHardy | 16 | 6.15% | 3 | 16.67% |
Hong Zhi Guo | 11 | 4.23% | 1 | 5.56% |
Stephen Hemminger | 6 | 2.31% | 1 | 5.56% |
Eric W. Biedermann | 4 | 1.54% | 3 | 16.67% |
Jiri Pirko | 3 | 1.15% | 2 | 11.11% |
David Ahern | 2 | 0.77% | 1 | 5.56% |
David S. Miller | 1 | 0.38% | 1 | 5.56% |
Stéphane Graber | 1 | 0.38% | 1 | 5.56% |
John Fastabend | 1 | 0.38% | 1 | 5.56% |
Daniel Borkmann | 1 | 0.38% | 1 | 5.56% |
Total | 260 | 100.00% | 18 | 100.00% |
static int tfilter_notify(struct net *net, struct sk_buff *oskb,
struct nlmsghdr *n, struct tcf_proto *tp,
void *fh, int event, bool unicast)
{
struct sk_buff *skb;
u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
if (!skb)
return -ENOBUFS;
if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq,
n->nlmsg_flags, event) <= 0) {
kfree_skb(skb);
return -EINVAL;
}
if (unicast)
return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
n->nlmsg_flags & NLM_F_ECHO);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Américo Wang | 115 | 78.23% | 2 | 33.33% |
Jiri Pirko | 21 | 14.29% | 2 | 33.33% |
Daniel Borkmann | 10 | 6.80% | 1 | 16.67% |
John Fastabend | 1 | 0.68% | 1 | 16.67% |
Total | 147 | 100.00% | 6 | 100.00% |
static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
struct nlmsghdr *n, struct tcf_proto *tp,
void *fh, bool unicast, bool *last)
{
struct sk_buff *skb;
u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
int err;
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
if (!skb)
return -ENOBUFS;
if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq,
n->nlmsg_flags, RTM_DELTFILTER) <= 0) {
kfree_skb(skb);
return -EINVAL;
}
err = tp->ops->delete(tp, fh, last);
if (err) {
kfree_skb(skb);
return err;
}
if (unicast)
return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
n->nlmsg_flags & NLM_F_ECHO);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Américo Wang | 144 | 80.00% | 1 | 11.11% |
Linus Torvalds (pre-git) | 17 | 9.44% | 2 | 22.22% |
Jiri Pirko | 16 | 8.89% | 4 | 44.44% |
Patrick McHardy | 2 | 1.11% | 1 | 11.11% |
Stephen Hemminger | 1 | 0.56% | 1 | 11.11% |
Total | 180 | 100.00% | 9 | 100.00% |
static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
struct nlmsghdr *n,
struct tcf_chain *chain, int event)
{
struct tcf_proto *tp;
for (tp = rtnl_dereference(chain->filter_chain);
tp; tp = rtnl_dereference(tp->next))
tfilter_notify(net, oskb, n, tp, 0, event, false);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Américo Wang | 63 | 86.30% | 1 | 25.00% |
Linus Torvalds (pre-git) | 9 | 12.33% | 2 | 50.00% |
Jiri Pirko | 1 | 1.37% | 1 | 25.00% |
Total | 73 | 100.00% | 4 | 100.00% |
/* Add/change/delete/get a filter node */
static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct nlattr *tca[TCA_MAX + 1];
struct tcmsg *t;
u32 protocol;
u32 prio;
bool prio_allocate;
u32 parent;
u32 chain_index;
struct net_device *dev;
struct Qdisc *q;
struct tcf_chain_info chain_info;
struct tcf_chain *chain = NULL;
struct tcf_block *block;
struct tcf_proto *tp;
const struct Qdisc_class_ops *cops;
unsigned long cl;
void *fh;
int err;
int tp_created;
if ((n->nlmsg_type != RTM_GETTFILTER) &&
!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
return -EPERM;
replay:
tp_created = 0;
err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack);
if (err < 0)
return err;
t = nlmsg_data(n);
protocol = TC_H_MIN(t->tcm_info);
prio = TC_H_MAJ(t->tcm_info);
prio_allocate = false;
parent = t->tcm_parent;
cl = 0;
if (prio == 0) {
switch (n->nlmsg_type) {
case RTM_DELTFILTER:
if (protocol || t->tcm_handle || tca[TCA_KIND])
return -ENOENT;
break;
case RTM_NEWTFILTER:
/* If no priority is provided by the user,
* we allocate one.
*/
if (n->nlmsg_flags & NLM_F_CREATE) {
prio = TC_H_MAKE(0x80000000U, 0U);
prio_allocate = true;
break;
}
/* fall-through */
default:
return -ENOENT;
}
}
/* Find head of filter chain. */
/* Find link */
dev = __dev_get_by_index(net, t->tcm_ifindex);
if (dev == NULL)
return -ENODEV;
/* Find qdisc */
if (!parent) {
q = dev->qdisc;
parent = q->handle;
} else {
q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent));
if (q == NULL)
return -EINVAL;
}
/* Is it classful? */
cops = q->ops->cl_ops;
if (!cops)
return -EINVAL;
if (!cops->tcf_block)
return -EOPNOTSUPP;
/* Do we search for filter, attached to class? */
if (TC_H_MIN(parent)) {
cl = cops->find(q, parent);
if (cl == 0)
return -ENOENT;
}
/* And the last stroke */
block = cops->tcf_block(q, cl);
if (!block) {
err = -EINVAL;
goto errout;
}
chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
if (chain_index > TC_ACT_EXT_VAL_MASK) {
err = -EINVAL;
goto errout;
}
chain = tcf_chain_get(block, chain_index,
n->nlmsg_type == RTM_NEWTFILTER);
if (!chain) {
err = n->nlmsg_type == RTM_NEWTFILTER ? -ENOMEM : -EINVAL;
goto errout;
}
if (n->nlmsg_type == RTM_DELTFILTER && prio == 0) {
tfilter_notify_chain(net, skb, n, chain, RTM_DELTFILTER)