Release 4.11 net/netfilter/xt_TCPMSS.c
/*
* This is a module which is used for setting the MSS option in TCP packets.
*
* Copyright (C) 2000 Marc Boucher <marc@mbsi.ca>
* Copyright (C) 2007 Patrick McHardy <kaber@trash.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/gfp.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <net/dst.h>
#include <net/flow.h>
#include <net/ipv6.h>
#include <net/route.h>
#include <net/tcp.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_tcpudp.h>
#include <linux/netfilter/xt_TCPMSS.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
MODULE_DESCRIPTION("Xtables: TCP Maximum Segment Size (MSS) adjustment");
MODULE_ALIAS("ipt_TCPMSS");
MODULE_ALIAS("ip6t_TCPMSS");
static inline unsigned int
optlen(const u_int8_t *opt, unsigned int offset)
{
/* Beware zero-length options: make finite progress */
if (opt[offset] <= TCPOPT_NOP || opt[offset+1] == 0)
return 1;
else
return opt[offset+1];
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 48 | 100.00% | 1 | 100.00% |
Total | 48 | 100.00% | 1 | 100.00% |
static u_int32_t tcpmss_reverse_mtu(struct net *net,
const struct sk_buff *skb,
unsigned int family)
{
struct flowi fl;
const struct nf_afinfo *ai;
struct rtable *rt = NULL;
u_int32_t mtu = ~0U;
if (family == PF_INET) {
struct flowi4 *fl4 = &fl.u.ip4;
memset(fl4, 0, sizeof(*fl4));
fl4->daddr = ip_hdr(skb)->saddr;
} else {
struct flowi6 *fl6 = &fl.u.ip6;
memset(fl6, 0, sizeof(*fl6));
fl6->daddr = ipv6_hdr(skb)->saddr;
}
rcu_read_lock();
ai = nf_get_afinfo(family);
if (ai != NULL)
ai->route(net, (struct dst_entry **)&rt, &fl, false);
rcu_read_unlock();
if (rt != NULL) {
mtu = dst_mtu(&rt->dst);
dst_release(&rt->dst);
}
return mtu;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Gao Feng | 196 | 100.00% | 2 | 100.00% |
Total | 196 | 100.00% | 2 | 100.00% |
static int
tcpmss_mangle_packet(struct sk_buff *skb,
const struct xt_action_param *par,
unsigned int family,
unsigned int tcphoff,
unsigned int minlen)
{
const struct xt_tcpmss_info *info = par->targinfo;
struct tcphdr *tcph;
int len, tcp_hdrlen;
unsigned int i;
__be16 oldval;
u16 newmss;
u8 *opt;
/* This is a fragment, no TCP header is available */
if (par->fragoff != 0)
return 0;
if (!skb_make_writable(skb, skb->len))
return -1;
len = skb->len - tcphoff;
if (len < (int)sizeof(struct tcphdr))
return -1;
tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
tcp_hdrlen = tcph->doff * 4;
if (len < tcp_hdrlen || tcp_hdrlen < sizeof(struct tcphdr))
return -1;
if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
struct net *net = xt_net(par);
unsigned int in_mtu = tcpmss_reverse_mtu(net, skb, family);
unsigned int min_mtu = min(dst_mtu(skb_dst(skb)), in_mtu);
if (min_mtu <= minlen) {
net_err_ratelimited("unknown or invalid path-MTU (%u)\n",
min_mtu);
return -1;
}
newmss = min_mtu - minlen;
} else
newmss = info->mss;
opt = (u_int8_t *)tcph;
for (i = sizeof(struct tcphdr); i <= tcp_hdrlen - TCPOLEN_MSS; i += optlen(opt, i)) {
if (opt[i] == TCPOPT_MSS && opt[i+1] == TCPOLEN_MSS) {
u_int16_t oldmss;
oldmss = (opt[i+2] << 8) | opt[i+3];
/* Never increase MSS, even when setting it, as
* doing so results in problems for hosts that rely
* on MSS being set correctly.
*/
if (oldmss <= newmss)
return 0;
opt[i+2] = (newmss & 0xff00) >> 8;
opt[i+3] = newmss & 0x00ff;
inet_proto_csum_replace2(&tcph->check, skb,
htons(oldmss), htons(newmss),
false);
return 0;
}
}
/* There is data after the header so the option can't be added
* without moving it, and doing so may make the SYN packet
* itself too large. Accept the packet unmodified instead.
*/
if (len > tcp_hdrlen)
return 0;
/* tcph->doff has 4 bits, do not wrap it to 0 */
if (tcp_hdrlen >= 15 * 4)
return 0;
/*
* MSS Option not found ?! add it..
*/
if (skb_tailroom(skb) < TCPOLEN_MSS) {
if (pskb_expand_head(skb, 0,
TCPOLEN_MSS - skb_tailroom(skb),
GFP_ATOMIC))
return -1;
tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
}
skb_put(skb, TCPOLEN_MSS);
/*
* IPv4: RFC 1122 states "If an MSS option is not received at
* connection setup, TCP MUST assume a default send MSS of 536".
* IPv6: RFC 2460 states IPv6 has a minimum MTU of 1280 and a minimum
* length IPv6 header of 60, ergo the default MSS value is 1220
* Since no MSS was provided, we must use the default values
*/
if (xt_family(par) == NFPROTO_IPV4)
newmss = min(newmss, (u16)536);
else
newmss = min(newmss, (u16)1220);
opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr));
inet_proto_csum_replace2(&tcph->check, skb,
htons(len), htons(len + TCPOLEN_MSS), true);
opt[0] = TCPOPT_MSS;
opt[1] = TCPOLEN_MSS;
opt[2] = (newmss & 0xff00) >> 8;
opt[3] = newmss & 0x00ff;
inet_proto_csum_replace4(&tcph->check, skb, 0, *((__be32 *)opt), false);
oldval = ((__be16 *)tcph)[6];
tcph->doff += TCPOLEN_MSS/4;
inet_proto_csum_replace2(&tcph->check, skb,
oldval, ((__be16 *)tcph)[6], false);
return TCPOLEN_MSS;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 362 | 55.18% | 2 | 6.06% |
Phil Oester | 56 | 8.54% | 4 | 12.12% |
Patrick McHardy | 49 | 7.47% | 5 | 15.15% |
Pablo Neira Ayuso | 47 | 7.16% | 2 | 6.06% |
Gao Feng | 32 | 4.88% | 3 | 9.09% |
Herbert Xu | 24 | 3.66% | 3 | 9.09% |
Eric Dumazet | 23 | 3.51% | 2 | 6.06% |
Jan Engelhardt | 19 | 2.90% | 2 | 6.06% |
Al Viro | 17 | 2.59% | 2 | 6.06% |
Simon Arlott | 7 | 1.07% | 1 | 3.03% |
Arnaldo Carvalho de Melo | 6 | 0.91% | 1 | 3.03% |
Rusty Russell | 5 | 0.76% | 1 | 3.03% |
Tom Herbert | 4 | 0.61% | 1 | 3.03% |
David S. Miller | 2 | 0.30% | 1 | 3.03% |
Harald Welte | 1 | 0.15% | 1 | 3.03% |
Joe Perches | 1 | 0.15% | 1 | 3.03% |
Benjamin LaHaise | 1 | 0.15% | 1 | 3.03% |
Total | 656 | 100.00% | 33 | 100.00% |
static unsigned int
tcpmss_tg4(struct sk_buff *skb, const struct xt_action_param *par)
{
struct iphdr *iph = ip_hdr(skb);
__be16 newlen;
int ret;
ret = tcpmss_mangle_packet(skb, par,
PF_INET,
iph->ihl * 4,
sizeof(*iph) + sizeof(struct tcphdr));
if (ret < 0)
return NF_DROP;
if (ret > 0) {
iph = ip_hdr(skb);
newlen = htons(ntohs(iph->tot_len) + ret);
csum_replace2(&iph->check, iph->tot_len, newlen);
iph->tot_len = newlen;
}
return XT_CONTINUE;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 97 | 78.23% | 2 | 20.00% |
Linus Torvalds | 11 | 8.87% | 1 | 10.00% |
Arnaldo Carvalho de Melo | 6 | 4.84% | 1 | 10.00% |
Jan Engelhardt | 6 | 4.84% | 5 | 50.00% |
Herbert Xu | 4 | 3.23% | 1 | 10.00% |
Total | 124 | 100.00% | 10 | 100.00% |
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
static unsigned int
tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par)
{
struct ipv6hdr *ipv6h = ipv6_hdr(skb);
u8 nexthdr;
__be16 frag_off, oldlen, newlen;
int tcphoff;
int ret;
nexthdr = ipv6h->nexthdr;
tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr, &frag_off);
if (tcphoff < 0)
return NF_DROP;
ret = tcpmss_mangle_packet(skb, par,
PF_INET6,
tcphoff,
sizeof(*ipv6h) + sizeof(struct tcphdr));
if (ret < 0)
return NF_DROP;
if (ret > 0) {
ipv6h = ipv6_hdr(skb);
oldlen = ipv6h->payload_len;
newlen = htons(ntohs(oldlen) + ret);
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->csum = csum_add(csum_sub(skb->csum, oldlen),
newlen);
ipv6h->payload_len = newlen;
}
return XT_CONTINUE;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 116 | 64.44% | 1 | 10.00% |
Eric Dumazet | 41 | 22.78% | 1 | 10.00% |
Jesse Gross | 6 | 3.33% | 1 | 10.00% |
Jan Engelhardt | 6 | 3.33% | 5 | 50.00% |
Arnaldo Carvalho de Melo | 6 | 3.33% | 1 | 10.00% |
Herbert Xu | 5 | 2.78% | 1 | 10.00% |
Total | 180 | 100.00% | 10 | 100.00% |
#endif
/* Must specify -p tcp --syn */
static inline bool find_syn_match(const struct xt_entry_match *m)
{
const struct xt_tcp *tcpinfo = (const struct xt_tcp *)m->data;
if (strcmp(m->u.kernel.match->name, "tcp") == 0 &&
tcpinfo->flg_cmp & TCPHDR_SYN &&
!(tcpinfo->invflags & XT_TCP_INV_FLAGS))
return true;
return false;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 38 | 55.07% | 1 | 25.00% |
Patrick McHardy | 27 | 39.13% | 1 | 25.00% |
Jan Engelhardt | 3 | 4.35% | 1 | 25.00% |
Changli Gao | 1 | 1.45% | 1 | 25.00% |
Total | 69 | 100.00% | 4 | 100.00% |
static int tcpmss_tg4_check(const struct xt_tgchk_param *par)
{
const struct xt_tcpmss_info *info = par->targinfo;
const struct ipt_entry *e = par->entryinfo;
const struct xt_entry_match *ematch;
if (info->mss == XT_TCPMSS_CLAMP_PMTU &&
(par->hook_mask & ~((1 << NF_INET_FORWARD) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_POST_ROUTING))) != 0) {
pr_info("path-MTU clamping only supported in "
"FORWARD, OUTPUT and POSTROUTING hooks\n");
return -EINVAL;
}
if (par->nft_compat)
return 0;
xt_ematch_foreach(ematch, e)
if (find_syn_match(ematch))
return 0;
pr_info("Only works on TCP SYN packets\n");
return -EINVAL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 56 | 46.28% | 1 | 7.69% |
Jan Engelhardt | 34 | 28.10% | 6 | 46.15% |
Patrick McHardy | 15 | 12.40% | 4 | 30.77% |
Pablo Neira Ayuso | 9 | 7.44% | 1 | 7.69% |
Harald Welte | 7 | 5.79% | 1 | 7.69% |
Total | 121 | 100.00% | 13 | 100.00% |
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
static int tcpmss_tg6_check(const struct xt_tgchk_param *par)
{
const struct xt_tcpmss_info *info = par->targinfo;
const struct ip6t_entry *e = par->entryinfo;
const struct xt_entry_match *ematch;
if (info->mss == XT_TCPMSS_CLAMP_PMTU &&
(par->hook_mask & ~((1 << NF_INET_FORWARD) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_POST_ROUTING))) != 0) {
pr_info("path-MTU clamping only supported in "
"FORWARD, OUTPUT and POSTROUTING hooks\n");
return -EINVAL;
}
if (par->nft_compat)
return 0;
xt_ematch_foreach(ematch, e)
if (find_syn_match(ematch))
return 0;
pr_info("Only works on TCP SYN packets\n");
return -EINVAL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 73 | 60.33% | 2 | 20.00% |
Jan Engelhardt | 34 | 28.10% | 6 | 60.00% |
Pablo Neira Ayuso | 9 | 7.44% | 1 | 10.00% |
Linus Torvalds | 5 | 4.13% | 1 | 10.00% |
Total | 121 | 100.00% | 10 | 100.00% |
#endif
static struct xt_target tcpmss_tg_reg[] __read_mostly = {
{
.family = NFPROTO_IPV4,
.name = "TCPMSS",
.checkentry = tcpmss_tg4_check,
.target = tcpmss_tg4,
.targetsize = sizeof(struct xt_tcpmss_info),
.proto = IPPROTO_TCP,
.me = THIS_MODULE,
},
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
{
.family = NFPROTO_IPV6,
.name = "TCPMSS",
.checkentry = tcpmss_tg6_check,
.target = tcpmss_tg6,
.targetsize = sizeof(struct xt_tcpmss_info),
.proto = IPPROTO_TCP,
.me = THIS_MODULE,
},
#endif
};
static int __init tcpmss_tg_init(void)
{
return xt_register_targets(tcpmss_tg_reg, ARRAY_SIZE(tcpmss_tg_reg));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 12 | 60.00% | 1 | 33.33% |
Patrick McHardy | 5 | 25.00% | 1 | 33.33% |
Jan Engelhardt | 3 | 15.00% | 1 | 33.33% |
Total | 20 | 100.00% | 3 | 100.00% |
static void __exit tcpmss_tg_exit(void)
{
xt_unregister_targets(tcpmss_tg_reg, ARRAY_SIZE(tcpmss_tg_reg));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 11 | 57.89% | 1 | 33.33% |
Patrick McHardy | 5 | 26.32% | 1 | 33.33% |
Jan Engelhardt | 3 | 15.79% | 1 | 33.33% |
Total | 19 | 100.00% | 3 | 100.00% |
module_init(tcpmss_tg_init);
module_exit(tcpmss_tg_exit);
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 578 | 32.80% | 2 | 3.28% |
Patrick McHardy | 502 | 28.49% | 10 | 16.39% |
Gao Feng | 228 | 12.94% | 3 | 4.92% |
Jan Engelhardt | 134 | 7.60% | 14 | 22.95% |
Pablo Neira Ayuso | 65 | 3.69% | 3 | 4.92% |
Eric Dumazet | 64 | 3.63% | 3 | 4.92% |
Phil Oester | 56 | 3.18% | 4 | 6.56% |
Herbert Xu | 33 | 1.87% | 3 | 4.92% |
Harald Welte | 22 | 1.25% | 3 | 4.92% |
Arnaldo Carvalho de Melo | 18 | 1.02% | 3 | 4.92% |
Al Viro | 17 | 0.96% | 2 | 3.28% |
Art Haas | 12 | 0.68% | 1 | 1.64% |
Simon Arlott | 7 | 0.40% | 1 | 1.64% |
Jesse Gross | 6 | 0.34% | 1 | 1.64% |
Rusty Russell | 5 | 0.28% | 1 | 1.64% |
Tom Herbert | 4 | 0.23% | 1 | 1.64% |
Tejun Heo | 3 | 0.17% | 1 | 1.64% |
Igor Maravić | 3 | 0.17% | 1 | 1.64% |
David S. Miller | 2 | 0.11% | 1 | 1.64% |
Joe Perches | 1 | 0.06% | 1 | 1.64% |
Benjamin LaHaise | 1 | 0.06% | 1 | 1.64% |
Changli Gao | 1 | 0.06% | 1 | 1.64% |
Total | 1762 | 100.00% | 61 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.