Release 4.11 net/netfilter/xt_tcpudp.c
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
#include <linux/module.h>
#include <net/ip.h>
#include <linux/ipv6.h>
#include <net/ipv6.h>
#include <net/tcp.h>
#include <net/udp.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_tcpudp.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
MODULE_DESCRIPTION("Xtables: TCP, UDP and UDP-Lite match");
MODULE_LICENSE("GPL");
MODULE_ALIAS("xt_tcp");
MODULE_ALIAS("xt_udp");
MODULE_ALIAS("ipt_udp");
MODULE_ALIAS("ipt_tcp");
MODULE_ALIAS("ip6t_udp");
MODULE_ALIAS("ip6t_tcp");
/* Returns 1 if the port is matched by the range, 0 otherwise */
static inline bool
port_match(u_int16_t min, u_int16_t max, u_int16_t port, bool invert)
{
return (port >= min && port <= max) ^ invert;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Harald Welte | 29 | 90.62% | 1 | 50.00% |
Jan Engelhardt | 3 | 9.38% | 1 | 50.00% |
Total | 32 | 100.00% | 2 | 100.00% |
static bool
tcp_find_option(u_int8_t option,
const struct sk_buff *skb,
unsigned int protoff,
unsigned int optlen,
bool invert,
bool *hotdrop)
{
/* tcp.doff is only 4 bits, ie. max 15 * 4 bytes */
const u_int8_t *op;
u_int8_t _opt[60 - sizeof(struct tcphdr)];
unsigned int i;
pr_debug("finding option\n");
if (!optlen)
return invert;
/* If we don't have the whole header, drop packet. */
op = skb_header_pointer(skb, protoff + sizeof(struct tcphdr),
optlen, _opt);
if (op == NULL) {
*hotdrop = true;
return false;
}
for (i = 0; i < optlen; ) {
if (op[i] == option) return !invert;
if (op[i] < 2) i++;
else i += op[i+1]?:1;
}
return invert;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Harald Welte | 143 | 92.26% | 1 | 20.00% |
Jan Engelhardt | 12 | 7.74% | 4 | 80.00% |
Total | 155 | 100.00% | 5 | 100.00% |
static bool tcp_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct tcphdr *th;
struct tcphdr _tcph;
const struct xt_tcp *tcpinfo = par->matchinfo;
if (par->fragoff != 0) {
/* To quote Alan:
Don't allow a fragment of TCP 8 bytes in. Nobody normal
causes this. Its a cracker trying to break in by doing a
flag overwrite to pass the direction checks.
*/
if (par->fragoff == 1) {
pr_debug("Dropping evil TCP offset=1 frag.\n");
par->hotdrop = true;
}
/* Must not be a fragment. */
return false;
}
th = skb_header_pointer(skb, par->thoff, sizeof(_tcph), &_tcph);
if (th == NULL) {
/* We've been asked to examine this packet, and we
can't. Hence, no choice but to drop. */
pr_debug("Dropping evil TCP offset=0 tinygram.\n");
par->hotdrop = true;
return false;
}
if (!port_match(tcpinfo->spts[0], tcpinfo->spts[1],
ntohs(th->source),
!!(tcpinfo->invflags & XT_TCP_INV_SRCPT)))
return false;
if (!port_match(tcpinfo->dpts[0], tcpinfo->dpts[1],
ntohs(th->dest),
!!(tcpinfo->invflags & XT_TCP_INV_DSTPT)))
return false;
if (!NF_INVF(tcpinfo, XT_TCP_INV_FLAGS,
(((unsigned char *)th)[13] & tcpinfo->flg_mask) == tcpinfo->flg_cmp))
return false;
if (tcpinfo->option) {
if (th->doff * 4 < sizeof(_tcph)) {
par->hotdrop = true;
return false;
}
if (!tcp_find_option(tcpinfo->option, skb, par->thoff,
th->doff*4 - sizeof(_tcph),
tcpinfo->invflags & XT_TCP_INV_OPTION,
&par->hotdrop))
return false;
}
return true;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Harald Welte | 254 | 83.01% | 1 | 10.00% |
Jan Engelhardt | 47 | 15.36% | 8 | 80.00% |
Joe Perches | 5 | 1.63% | 1 | 10.00% |
Total | 306 | 100.00% | 10 | 100.00% |
static int tcp_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_tcp *tcpinfo = par->matchinfo;
/* Must specify no unknown invflags */
return (tcpinfo->invflags & ~XT_TCP_INV_MASK) ? -EINVAL : 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Harald Welte | 25 | 65.79% | 1 | 14.29% |
Jan Engelhardt | 11 | 28.95% | 4 | 57.14% |
Patrick McHardy | 2 | 5.26% | 2 | 28.57% |
Total | 38 | 100.00% | 7 | 100.00% |
static bool udp_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct udphdr *uh;
struct udphdr _udph;
const struct xt_udp *udpinfo = par->matchinfo;
/* Must not be a fragment. */
if (par->fragoff != 0)
return false;
uh = skb_header_pointer(skb, par->thoff, sizeof(_udph), &_udph);
if (uh == NULL) {
/* We've been asked to examine this packet, and we
can't. Hence, no choice but to drop. */
pr_debug("Dropping evil UDP tinygram.\n");
par->hotdrop = true;
return false;
}
return port_match(udpinfo->spts[0], udpinfo->spts[1],
ntohs(uh->source),
!!(udpinfo->invflags & XT_UDP_INV_SRCPT))
&& port_match(udpinfo->dpts[0], udpinfo->dpts[1],
ntohs(uh->dest),
!!(udpinfo->invflags & XT_UDP_INV_DSTPT));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Harald Welte | 135 | 84.38% | 1 | 12.50% |
Jan Engelhardt | 25 | 15.62% | 7 | 87.50% |
Total | 160 | 100.00% | 8 | 100.00% |
static int udp_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_udp *udpinfo = par->matchinfo;
/* Must specify no unknown invflags */
return (udpinfo->invflags & ~XT_UDP_INV_MASK) ? -EINVAL : 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Harald Welte | 23 | 60.53% | 1 | 12.50% |
Jan Engelhardt | 11 | 28.95% | 4 | 50.00% |
Patrick McHardy | 3 | 7.89% | 2 | 25.00% |
Jesper Bengtsson | 1 | 2.63% | 1 | 12.50% |
Total | 38 | 100.00% | 8 | 100.00% |
static struct xt_match tcpudp_mt_reg[] __read_mostly = {
{
.name = "tcp",
.family = NFPROTO_IPV4,
.checkentry = tcp_mt_check,
.match = tcp_mt,
.matchsize = sizeof(struct xt_tcp),
.proto = IPPROTO_TCP,
.me = THIS_MODULE,
},
{
.name = "tcp",
.family = NFPROTO_IPV6,
.checkentry = tcp_mt_check,
.match = tcp_mt,
.matchsize = sizeof(struct xt_tcp),
.proto = IPPROTO_TCP,
.me = THIS_MODULE,
},
{
.name = "udp",
.family = NFPROTO_IPV4,
.checkentry = udp_mt_check,
.match = udp_mt,
.matchsize = sizeof(struct xt_udp),
.proto = IPPROTO_UDP,
.me = THIS_MODULE,
},
{
.name = "udp",
.family = NFPROTO_IPV6,
.checkentry = udp_mt_check,
.match = udp_mt,
.matchsize = sizeof(struct xt_udp),
.proto = IPPROTO_UDP,
.me = THIS_MODULE,
},
{
.name = "udplite",
.family = NFPROTO_IPV4,
.checkentry = udp_mt_check,
.match = udp_mt,
.matchsize = sizeof(struct xt_udp),
.proto = IPPROTO_UDPLITE,
.me = THIS_MODULE,
},
{
.name = "udplite",
.family = NFPROTO_IPV6,
.checkentry = udp_mt_check,
.match = udp_mt,
.matchsize = sizeof(struct xt_udp),
.proto = IPPROTO_UDPLITE,
.me = THIS_MODULE,
},
};
static int __init tcpudp_mt_init(void)
{
return xt_register_matches(tcpudp_mt_reg, ARRAY_SIZE(tcpudp_mt_reg));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Harald Welte | 14 | 70.00% | 1 | 33.33% |
Jan Engelhardt | 3 | 15.00% | 1 | 33.33% |
Patrick McHardy | 3 | 15.00% | 1 | 33.33% |
Total | 20 | 100.00% | 3 | 100.00% |
static void __exit tcpudp_mt_exit(void)
{
xt_unregister_matches(tcpudp_mt_reg, ARRAY_SIZE(tcpudp_mt_reg));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Harald Welte | 13 | 68.42% | 1 | 33.33% |
Jan Engelhardt | 3 | 15.79% | 1 | 33.33% |
Patrick McHardy | 3 | 15.79% | 1 | 33.33% |
Total | 19 | 100.00% | 3 | 100.00% |
module_init(tcpudp_mt_init);
module_exit(tcpudp_mt_exit);
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Harald Welte | 770 | 69.12% | 1 | 4.35% |
Jan Engelhardt | 144 | 12.93% | 13 | 56.52% |
Patrick McHardy | 106 | 9.52% | 4 | 17.39% |
Gerrit Renker | 76 | 6.82% | 1 | 4.35% |
Pablo Neira Ayuso | 9 | 0.81% | 1 | 4.35% |
Joe Perches | 5 | 0.45% | 1 | 4.35% |
David S. Miller | 3 | 0.27% | 1 | 4.35% |
Jesper Bengtsson | 1 | 0.09% | 1 | 4.35% |
Total | 1114 | 100.00% | 23 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.