Release 4.7 net/netfilter/nf_nat_core.c
/*
* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
* (C) 2011 Patrick McHardy <kaber@trash.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/skbuff.h>
#include <linux/gfp.h>
#include <net/xfrm.h>
#include <linux/jhash.h>
#include <linux/rtnetlink.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_l3proto.h>
#include <net/netfilter/nf_nat_l4proto.h>
#include <net/netfilter/nf_nat_core.h>
#include <net/netfilter/nf_nat_helper.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/nf_conntrack_l3proto.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <linux/netfilter/nf_nat.h>
static DEFINE_SPINLOCK(nf_nat_lock);
static DEFINE_MUTEX(nf_nat_proto_mutex);
static const struct nf_nat_l3proto __rcu *nf_nat_l3protos[NFPROTO_NUMPROTO]
__read_mostly;
static const struct nf_nat_l4proto __rcu **nf_nat_l4protos[NFPROTO_NUMPROTO]
__read_mostly;
static struct hlist_head *nf_nat_bysource __read_mostly;
static unsigned int nf_nat_htable_size __read_mostly;
static unsigned int nf_nat_hash_rnd __read_mostly;
inline const struct nf_nat_l3proto *
__nf_nat_l3proto_find(u8 family)
{
return rcu_dereference(nf_nat_l3protos[family]);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jozsef kadlecsik | jozsef kadlecsik | 11 | 52.38% | 1 | 25.00% |
patrick mchardy | patrick mchardy | 10 | 47.62% | 3 | 75.00% |
| Total | 21 | 100.00% | 4 | 100.00% |
inline const struct nf_nat_l4proto *
__nf_nat_l4proto_find(u8 family, u8 protonum)
{
return rcu_dereference(nf_nat_l4protos[family][protonum]);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
patrick mchardy | patrick mchardy | 16 | 59.26% | 2 | 66.67% |
jozsef kadlecsik | jozsef kadlecsik | 11 | 40.74% | 1 | 33.33% |
| Total | 27 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL_GPL(__nf_nat_l4proto_find);
#ifdef CONFIG_XFRM
static void __nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl)
{
const struct nf_nat_l3proto *l3proto;
const struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
enum ip_conntrack_dir dir;
unsigned long statusbit;
u8 family;
ct = nf_ct_get(skb, &ctinfo);
if (ct == NULL)
return;
family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
rcu_read_lock();
l3proto = __nf_nat_l3proto_find(family);
if (l3proto == NULL)
goto out;
dir = CTINFO2DIR(ctinfo);
if (dir == IP_CT_DIR_ORIGINAL)
statusbit = IPS_DST_NAT;
else
statusbit = IPS_SRC_NAT;
l3proto->decode_session(skb, ct, dir, statusbit, fl);
out:
rcu_read_unlock();
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
patrick mchardy | patrick mchardy | 113 | 83.09% | 3 | 75.00% |
jozsef kadlecsik | jozsef kadlecsik | 23 | 16.91% | 1 | 25.00% |
| Total | 136 | 100.00% | 4 | 100.00% |
int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family)
{
struct flowi fl;
unsigned int hh_len;
struct dst_entry *dst;
int err;
err = xfrm_decode_session(skb, &fl, family);
if (err < 0)
return err;
dst = skb_dst(skb);
if (dst->xfrm)
dst = ((struct xfrm_dst *)dst)->route;
dst_hold(dst);
dst = xfrm_lookup(net, dst, &fl, skb->sk, 0);
if (IS_ERR(dst))
return PTR_ERR(dst);
skb_dst_drop(skb);
skb_dst_set(skb, dst);
/* Change in oif may mean change in hh_len. */
hh_len = skb_dst(skb)->dev->hard_header_len;
if (skb_headroom(skb) < hh_len &&
pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC))
return -ENOMEM;
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
patrick mchardy | patrick mchardy | 142 | 81.61% | 3 | 50.00% |
jozsef kadlecsik | jozsef kadlecsik | 20 | 11.49% | 1 | 16.67% |
eric w. biederman | eric w. biederman | 6 | 3.45% | 1 | 16.67% |
dan carpenter | dan carpenter | 6 | 3.45% | 1 | 16.67% |
| Total | 174 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL(nf_xfrm_me_harder);
#endif /* CONFIG_XFRM */
/* We keep an extra hash for each conntrack, for fast searching. */
static inline unsigned int
hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple)
{
unsigned int hash;
get_random_once(&nf_nat_hash_rnd, sizeof(nf_nat_hash_rnd));
/* Original src, to ensure we map it consistently if poss. */
hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32),
tuple->dst.protonum ^ nf_nat_hash_rnd ^ net_hash_mix(n));
return reciprocal_scale(hash, nf_nat_htable_size);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
patrick mchardy | patrick mchardy | 41 | 49.40% | 1 | 14.29% |
florian westphal | florian westphal | 19 | 22.89% | 3 | 42.86% |
jozsef kadlecsik | jozsef kadlecsik | 17 | 20.48% | 1 | 14.29% |
daniel borkmann | daniel borkmann | 6 | 7.23% | 2 | 28.57% |
| Total | 83 | 100.00% | 7 | 100.00% |
/* Is this tuple already taken? (not by us) */
int
nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
const struct nf_conn *ignored_conntrack)
{
/* Conntrack tracking doesn't keep track of outgoing tuples; only
* incoming ones. NAT means they don't have a fixed mapping,
* so we invert the tuple and look for the incoming reply.
*
* We could keep a separate hash if this proves too slow.
*/
struct nf_conntrack_tuple reply;
nf_ct_invert_tuplepr(&reply, tuple);
return nf_conntrack_tuple_taken(&reply, ignored_conntrack);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
patrick mchardy | patrick mchardy | 39 | 100.00% | 1 | 100.00% |
| Total | 39 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(nf_nat_used_tuple);
/* If we source map this tuple so reply looks like reply_tuple, will
* that meet the constraints of range.
*/
static int in_range(const struct nf_nat_l3proto *l3proto,
const struct nf_nat_l4proto *l4proto,
const struct nf_conntrack_tuple *tuple,
const struct nf_nat_range *range)
{
/* If we are supposed to map IPs, then we must be in the
* range specified, otherwise let this drag us onto a new src IP.
*/
if (range->flags & NF_NAT_RANGE_MAP_IPS &&
!l3proto->in_range(tuple, range))
return 0;
if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) ||
l4proto->in_range(tuple, NF_NAT_MANIP_SRC,
&range->min_proto, &range->max_proto))
return 1;
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
patrick mchardy | patrick mchardy | 88 | 100.00% | 1 | 100.00% |
| Total | 88 | 100.00% | 1 | 100.00% |
static inline int
same_src(const struct nf_conn *ct,
const struct nf_conntrack_tuple *tuple)
{
const struct nf_conntrack_tuple *t;
t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
return (t->dst.protonum == tuple->dst.protonum &&
nf_inet_addr_cmp(&t->src.u3, &tuple->src.u3) &&
t->src.u.all == tuple->src.u.all);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
patrick mchardy | patrick mchardy | 48 | 56.47% | 1 | 50.00% |
jozsef kadlecsik | jozsef kadlecsik | 37 | 43.53% | 1 | 50.00% |
| Total | 85 | 100.00% | 2 | 100.00% |
/* Only called for SRC manip */
static int
find_appropriate_src(struct net *net,
const struct nf_conntrack_zone *zone,
const struct nf_nat_l3proto *l3proto,
const struct nf_nat_l4proto *l4proto,
const struct nf_conntrack_tuple *tuple,
struct nf_conntrack_tuple *result,
const struct nf_nat_range *range)
{
unsigned int h = hash_by_src(net, tuple);
const struct nf_conn_nat *nat;
const struct nf_conn *ct;
hlist_for_each_entry_rcu(nat, &nf_nat_bysource[h], bysource) {
ct = nat->ct;
if (same_src(ct, tuple) &&
net_eq(net, nf_ct_net(ct)) &&
nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) {
/* Copy source part from reply tuple. */
nf_ct_invert_tuplepr(result,
&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
result->dst = tuple->dst;
if (in_range(l3proto, l4proto, result, range))
return 1;
}
}
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jozsef kadlecsik | jozsef kadlecsik | 102 | 64.56% | 1 | 8.33% |
patrick mchardy | patrick mchardy | 26 | 16.46% | 4 | 33.33% |
florian westphal | florian westphal | 11 | 6.96% | 1 | 8.33% |
daniel borkmann | daniel borkmann | 9 | 5.70% | 2 | 16.67% |
alexey dobriyan | alexey dobriyan | 5 | 3.16% | 1 | 8.33% |
yasuyuki kozakai | yasuyuki kozakai | 3 | 1.90% | 2 | 16.67% |
jan engelhardt | jan engelhardt | 2 | 1.27% | 1 | 8.33% |
| Total | 158 | 100.00% | 12 | 100.00% |
/* For [FUTURE] fragmentation handling, we want the least-used
* src-ip/dst-ip/proto triple. Fairness doesn't come into it. Thus
* if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports
* 1-65535, we don't do pro-rata allocation based on ports; we choose
* the ip with the lowest src-ip/dst-ip/proto usage.
*/
static void
find_best_ips_proto(const struct nf_conntrack_zone *zone,
struct nf_conntrack_tuple *tuple,
const struct nf_nat_range *range,
const struct nf_conn *ct,
enum nf_nat_manip_type maniptype)
{
union nf_inet_addr *var_ipp;
unsigned int i, max;
/* Host order */
u32 minip, maxip, j, dist;
bool full_range;
/* No IP mapping? Do nothing. */
if (!(range->flags & NF_NAT_RANGE_MAP_IPS))
return;
if (maniptype == NF_NAT_MANIP_SRC)
var_ipp = &tuple->src.u3;
else
var_ipp = &tuple->dst.u3;
/* Fast path: only one choice. */
if (nf_inet_addr_cmp(&range->min_addr, &range->max_addr)) {
*var_ipp = range->min_addr;
return;
}
if (nf_ct_l3num(ct) == NFPROTO_IPV4)
max = sizeof(var_ipp->ip) / sizeof(u32) - 1;
else
max = sizeof(var_ipp->ip6) / sizeof(u32) - 1;
/* Hashing source and destination IPs gives a fairly even
* spread in practice (if there are a small number of IPs
* involved, there usually aren't that many connections
* anyway). The consistency means that servers see the same
* client coming from the same IP (some Internet Banking sites
* like this), even across reboots.
*/
j = jhash2((u32 *)&tuple->src.u3, sizeof(tuple->src.u3) / sizeof(u32),
range->flags & NF_NAT_RANGE_PERSISTENT ?
0 : (__force u32)tuple->dst.u3.all[max] ^ zone->id);
full_range = false;
for (i = 0; i <= max; i++) {
/* If first bytes of the address are at the maximum, use the
* distance. Otherwise use the full range.
*/
if (!full_range) {
minip = ntohl((__force __be32)range->min_addr.all[i]);
maxip = ntohl((__force __be32)range->max_addr.all[i]);
dist = maxip - minip + 1;
} else {
minip = 0;
dist = ~0;
}
var_ipp->all[i] = (__force __u32)
htonl(minip + reciprocal_scale(j, dist));
if (var_ipp->all[i] != range->max_addr.all[i])
full_range = true;
if (!(range->flags & NF_NAT_RANGE_PERSISTENT))
j ^= (__force u32)tuple->dst.u3.all[i];
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
patrick mchardy | patrick mchardy | 231 | 61.27% | 5 | 50.00% |
jozsef kadlecsik | jozsef kadlecsik | 129 | 34.22% | 1 | 10.00% |
daniel borkmann | daniel borkmann | 10 | 2.65% | 2 | 20.00% |
florian westphal | florian westphal | 5 | 1.33% | 1 | 10.00% |
maximilian engelhardt | maximilian engelhardt | 2 | 0.53% | 1 | 10.00% |
| Total | 377 | 100.00% | 10 | 100.00% |
/* Manipulate the tuple into the range given. For NF_INET_POST_ROUTING,
* we change the source to map into the range. For NF_INET_PRE_ROUTING
* and NF_INET_LOCAL_OUT, we change the destination to map into the
* range. It might not be possible to get a unique tuple, but we try.
* At worst (or if we race), we will end up with a final duplicate in
* __ip_conntrack_confirm and drop the packet. */
static void
get_unique_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_tuple *orig_tuple,
const struct nf_nat_range *range,
struct nf_conn *ct,
enum nf_nat_manip_type maniptype)
{
const struct nf_conntrack_zone *zone;
const struct nf_nat_l3proto *l3proto;
const struct nf_nat_l4proto *l4proto;
struct net *net = nf_ct_net(ct);
zone = nf_ct_zone(ct);
rcu_read_lock();
l3proto = __nf_nat_l3proto_find(orig_tuple->src.l3num);
l4proto = __nf_nat_l4proto_find(orig_tuple->src.l3num,
orig_tuple->dst.protonum);
/* 1) If this srcip/proto/src-proto-part is currently mapped,
* and that same mapping gives a unique tuple within the given
* range, use that.
*
* This is only required for source (ie. NAT/masq) mappings.
* So far, we don't do local source mappings, so multiple
* manips not an issue.
*/
if (maniptype == NF_NAT_MANIP_SRC &&
!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
/* try the original tuple first */
if (in_range(l3proto, l4proto, orig_tuple, range)) {
if (!nf_nat_used_tuple(orig_tuple, ct)) {
*tuple = *orig_tuple;
goto out;
}
} else if (find_appropriate_src(net, zone, l3proto, l4proto,
orig_tuple, tuple, range)) {
pr_debug("get_unique_tuple: Found current src map\n");
if (!nf_nat_used_tuple(tuple, ct))
goto out;
}
}
/* 2) Select the least-used IP/proto combination in the given range */
*tuple = *orig_tuple;
find_best_ips_proto(zone, tuple, range, ct, maniptype);
/* 3) The per-protocol part of the manip is made to map into
* the range to make a unique tuple.
*/
/* Only bother mapping if it's not already in range and unique */
if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
if (l4proto->in_range(tuple, maniptype,
&range->min_proto,
&range->max_proto) &&
(range->min_proto.all == range->max_proto.all ||
!nf_nat_used_tuple(tuple, ct)))
goto out;
} else if (!nf_nat_used_tuple(tuple, ct)) {
goto out;
}
}
/* Last change: get protocol to try to obtain unique tuple. */
l4proto->unique_tuple(l3proto, tuple, range, maniptype, ct);
out:
rcu_read_unlock();
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jozsef kadlecsik | jozsef kadlecsik | 132 | 41.12% | 1 | 7.14% |
patrick mchardy | patrick mchardy | 91 | 28.35% | 5 | 35.71% |
changli gao | changli gao | 70 | 21.81% | 4 | 28.57% |
alexey dobriyan | alexey dobriyan | 12 | 3.74% | 1 | 7.14% |
daniel borkmann | daniel borkmann | 10 | 3.12% | 2 | 14.29% |
eric leblond | eric leblond | 6 | 1.87% | 1 | 7.14% |
| Total | 321 | 100.00% | 14 | 100.00% |
struct nf_conn_nat *nf_ct_nat_ext_add(struct nf_conn *ct)
{
struct nf_conn_nat *nat = nfct_nat(ct);
if (nat)
return nat;
if (!nf_ct_is_confirmed(ct))
nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
return nat;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
florian westphal | florian westphal | 51 | 100.00% | 1 | 100.00% |
| Total | 51 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(nf_ct_nat_ext_add);
unsigned int
nf_nat_setup_info(struct nf_conn *ct,
const struct nf_nat_range *range,
enum nf_nat_manip_type maniptype)
{
struct net *net = nf_ct_net(ct);
struct nf_conntrack_tuple curr_tuple, new_tuple;
struct nf_conn_nat *nat;
/* nat helper or nfctnetlink also setup binding */
nat = nf_ct_nat_ext_add(ct);
if (nat == NULL)
return NF_ACCEPT;
NF_CT_ASSERT(maniptype == NF_NAT_MANIP_SRC ||
maniptype == NF_NAT_MANIP_DST);
BUG_ON(nf_nat_initialized(ct, maniptype));
/* What we've got will look like inverse of reply. Normally
* this is what is in the conntrack, except for prior
* manipulations (future optimization: if num_manips == 0,
* orig_tp = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
*/
nf_ct_invert_tuplepr(&curr_tuple,
&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype);
if (!nf_ct_tuple_equal(&new_tuple, &curr_tuple)) {
struct nf_conntrack_tuple reply;
/* Alter conntrack table so will recognize replies. */
nf_ct_invert_tuplepr(&reply, &new_tuple);
nf_conntrack_alter_reply(ct, &reply);
/* Non-atomic: we own this at the moment. */
if (maniptype == NF_NAT_MANIP_SRC)
ct->status |= IPS_SRC_NAT;
else
ct->status |= IPS_DST_NAT;
if (nfct_help(ct))
nfct_seqadj_ext_add(ct);
}
if (maniptype == NF_NAT_MANIP_SRC) {
unsigned int srchash;
srchash = hash_by_src(net,
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
spin_lock_bh(&nf_nat_lock);
/* nf_conntrack_alter_reply might re-allocate extension aera */
nat = nfct_nat(ct);
nat->ct = ct;
hlist_add_head_rcu(&nat->bysource,
&nf_nat_bysource[srchash]);
spin_unlock_bh(&nf_nat_lock);
}
/* It's done. */
if (maniptype == NF_NAT_MANIP_DST)
ct->status |= IPS_DST_NAT_DONE;
else
ct->status |= IPS_SRC_NAT_DONE;
return NF_ACCEPT;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jozsef kadlecsik | jozsef kadlecsik | 194 | 71.32% | 1 | 5.88% |
patrick mchardy | patrick mchardy | 30 | 11.03% | 7 | 41.18% |
yasuyuki kozakai | yasuyuki kozakai | 30 | 11.03% | 3 | 17.65% |
alexey dobriyan | alexey dobriyan | 10 | 3.68% | 1 | 5.88% |
changli gao | changli gao | 6 | 2.21% | 3 | 17.65% |
florian westphal | florian westphal | 2 | 0.74% | 2 | 11.76% |
| Total | 272 | 100.00% | 17 | 100.00% |
EXPORT_SYMBOL(nf_nat_setup_info);
static unsigned int
__nf_nat_alloc_null_binding(struct nf_conn *ct, enum nf_nat_manip_type manip)
{
/* Force range to this IP; let proto decide mapping for
* per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
* Use reply in case it's already been mangled (eg local packet).
*/
union nf_inet_addr ip =
(manip == NF_NAT_MANIP_SRC ?
ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3 :
ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3);
struct nf_nat_range range = {
.flags = NF_NAT_RANGE_MAP_IPS,
.min_addr = ip,
.max_addr = ip,
};
return nf_nat_setup_info(ct, &range, manip);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pablo neira ayuso | pablo neira ayuso | 85 | 100.00% | 2 | 100.00% |
| Total | 85 | 100.00% | 2 | 100.00% |
unsigned int
nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
{
return __nf_nat_alloc_null_binding(ct, HOOK2MANIP(hooknum));
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pablo neira ayuso | pablo neira ayuso | 26 | 100.00% | 2 | 100.00% |
| Total | 26 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL_GPL(nf_nat_alloc_null_binding);
/* Do packet manipulations according to nf_nat_setup_info. */
unsigned int nf_nat_packet(struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int hooknum,
struct sk_buff *skb)
{
const struct nf_nat_l3proto *l3proto;
const struct nf_nat_l4proto *l4proto;
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
unsigned long statusbit;
enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum);
if (mtype == NF_NAT_MANIP_SRC)
statusbit = IPS_SRC_NAT;
else
statusbit = IPS_DST_NAT;
/* Invert if this is reply dir. */
if (dir == IP_CT_DIR_REPLY)
statusbit ^= IPS_NAT_MASK;
/* Non-atomic: these bits don't change. */
if (ct->status & statusbit) {
struct nf_conntrack_tuple target;
/* We are aiming to look like inverse of other direction. */
nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
l3proto = __nf_nat_l3proto_find(target.src.l3num);
l4proto = __nf_nat_l4proto_find(target.src.l3num,
target.dst.protonum);
if (!l3proto->manip_pkt(skb, 0, l4proto, &target, mtype))
return NF_DROP;
}
return NF_ACCEPT;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
jozsef kadlecsik | jozsef kadlecsik | 124 | 72.94% | 1 | 25.00% |
patrick mchardy | patrick mchardy | 44 | 25.88% | 2 | 50.00% |
herbert xu | herbert xu | 2 | 1.18% | 1 | 25.00% |
| Total | 170 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL_GPL(nf_nat_packet);
struct nf_nat_proto_clean {
u8 l3proto;
u8 l4proto;
};
/* kill conntracks with affected NAT section */
static int nf_nat_proto_remove(struct nf_conn *i, void *data)
{
const struct nf_nat_proto_clean *clean = data;
struct nf_conn_nat *nat = nfct_nat(i);
if (!nat)
return 0;
if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) ||
(clean->l4proto && nf_ct_protonum(i) != clean->l4proto))
return 0;
return i->status & IPS_NAT_MASK ? 1 : 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
patrick mchardy | patrick mchardy | 42 | 48.28% | 1 | 33.33% |
jozsef kadlecsik | jozsef kadlecsik | 39 | 44.83% | 1 | 33.33% |
florian westphal | florian westphal | 6 | 6.90% | 1 | 33.33% |
| Total | 87 | 100.00% | 3 | 100.00% |
static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
{
struct nf_conn_nat *nat = nfct_nat(ct);
if (nf_nat_proto_remove(ct, data))
return 1;
if (!nat || !nat->ct)
return 0;
/* This netns is being destroyed, and conntrack has nat null binding.
* Remove it from bysource hash, as the table will be freed soon.
*
* Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
* will delete entry from already-freed table.
*/
if (!del_timer(&ct->timeout))
return 1;
spin_lock_bh(&nf_nat_lock);
hlist_del_rcu(&nat->bysource);
ct->status &= ~IPS_NAT_DONE_MASK;
nat->ct = NULL;
spin_unlock_bh(&nf_nat_lock);
add_timer(&ct->timeout);
/* don't delete conntrack. Although that would make things a lot
* simpler, we'd end up flushing all conntracks on nat rmmod.
*/
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
florian westphal | florian westphal | 110 | 100.00% | 1 | 100.00% |
| Total | 110 | 100.00% | 1 | 100.00% |
static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
{
struct nf_nat_proto_clean clean = {
.l3proto = l3proto,
.l4proto = l4proto,
};
struct net *net;
rtnl_lock();
for_each_net(net)
nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean, 0, 0);
rtnl_unlock();
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
patrick mchardy | patrick mchardy | 47 | 82.46% | 1 | 20.00% |
florian westphal | florian westphal | 5 | 8.77% | 2 | 40.00% |
julian anastasov | julian anastasov | 3 | 5.26% | 1 | 20.00% |
jozsef kadlecsik | jozsef kadlecsik | 2 | 3.51% | 1 | 20.00% |
| Total | 57 | 100.00% | 5 | 100.00% |
static void nf_nat_l3proto_clean(u8 l3proto)
{
struct nf_nat_proto_clean clean = {
.l3proto = l3proto,
};
struct net *net;
rtnl_lock();
for_each_net(net)
nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean, 0, 0);
rtnl_unlock();
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
patrick mchardy | patrick mchardy | 41 | 83.67% | 1 | 25.00% |
florian westphal | florian westphal | 5 | 10.20% | 2 | 50.00% |
jozsef kadlecsik | jozsef kadlecsik | 3 | 6.12% | 1 | 25.00% |
| Total | 49 | 100.00% | 4 | 100.00% |
/* Protocol registration. */
int nf_nat_l4proto_register(u8 l3proto, const struct nf_nat_l4proto *l4proto)
{
const struct nf_nat_l4proto **l4protos;
unsigned int i;
int ret = 0;
mutex_lock(&nf_nat_proto_mutex);
if (nf_nat_l4protos[l3proto] == NULL) {
l4protos = kmalloc(IPPROTO_MAX * sizeof(struct nf_nat_l4proto *),
GFP_KERNEL);
if (l4protos == NULL) {
ret = -ENOMEM;
goto out;
}
for (i = 0; i < IPPROTO_MAX; i++)
RCU_INIT_POINTER(l4protos[i], &nf_nat_l4proto_unknown);
/* Before making proto_array visible to lockless readers,
* we must make sure its content is committed to memory.
*/
smp_wmb();
nf_nat_l4protos[l3proto] = l4protos;
}
if (rcu_dereference_protected(
nf_nat_l4protos[l3proto][l4proto->l4proto],
lockdep_is_held(&nf_nat_proto_mutex)
) != &nf_nat_l4proto_unknown) {
ret = -EBUSY;
goto out;
}
RCU_INIT_POINTER(nf_nat_l4protos[l3proto][l4proto->l4proto], l4proto);
out:
mutex_unlock(&nf_nat_proto_mutex);
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
patrick mchardy | patrick mchardy | 154 | 88.51% | 1 | 50.00% |
jozsef kadlecsik | jozsef kadlecsik | 20 | 11.49% | 1 | 50.00% |
| Total | 174 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL_GPL(nf_nat_l4proto_register);
/* No one stores the protocol anywhere; simply delete it. */
void nf_nat_l4proto_unregister(u8 l3proto, const struct nf_nat_l4proto *l4proto)
{
mutex_lock(&nf_nat_proto_mutex);
RCU_INIT_POINTER(nf_nat_l4protos[l3proto][l4proto->l4proto],
&nf_nat_l4proto_unknown);
mutex_unlock(&nf_nat_proto_mutex);
synchronize_rcu();
nf_nat_l4proto_clean(l3proto, l4proto->l4proto);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
patrick mchardy | patrick mchardy | 43 | 79.63% | 1 | 50.00% |
jozsef kadlecsik | jozsef kadlecsik | 11 | 20.37% | 1 | 50.00% |
| Total | 54 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL_GPL(nf_nat_l4proto_unregister);
int nf_nat_l3proto_register(const struct nf_nat_l3proto *l3proto)
{
int err;
err = nf_ct_l3proto_try_module_get(l3proto->l3proto);
if (err < 0)
return err;
mutex_lock(&nf_nat_proto_mutex);
RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_TCP],
&nf_nat_l4proto_tcp);
RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_UDP],
&nf_nat_l4proto_udp);
mutex_unlock(&nf_nat_proto_mutex);
RCU_INIT_POINTER(nf_nat_l3protos[l3proto->l3proto], l3proto);
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
patrick mchardy | patrick mchardy | 58 | 63.74% | 3 | 50.00% |
jozsef kadlecsik | jozsef kadlecsik | 28 | 30.77% | 1 | 16.67% |
eric dumazet | eric dumazet | 4 | 4.40% | 1 | 16.67% |
stephen hemminger | stephen hemminger | 1 | 1.10% | 1 | 16.67% |
| Total | 91 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL_GPL(nf_nat_l3proto_register);
void nf_nat_l3proto_unregister(const struct nf_nat_l3proto *l3proto)
{
mutex_lock(&nf_nat_proto_mutex);
RCU_INIT_POINTER(nf_nat_l3protos[l3proto->l3proto], NULL);
mutex_unlock(&nf_nat_proto_mutex);
synchronize_rcu();
nf_nat_l3proto_clean(l3proto->l3proto);
nf_ct_l3proto_module_put(l3proto->l3proto);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
patrick mchardy | patrick mchardy | 30 | 57.69% | 3 | 60.00% |
jozsef kadlecsik | jozsef kadlecsik | 21 | 40.38% | 1 | 20.00% |
stephen hemminger | stephen hemminger | 1 | 1.92% | 1 | 20.00% |
| Total | 52 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL_GPL(nf_nat_l3proto_unregister);
/* No one using conntrack by the time this called. */
static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
{
struct nf_conn_nat *nat = nf_ct_ext_find(ct, NF_CT_EXT_NAT);
if (nat == NULL || nat->ct == NULL)
return;
NF_CT_ASSERT(nat->ct->status & IPS_SRC_NAT_DONE);
spin_lock_bh(&nf_nat_lock);
hlist_del_rcu(&nat->bysource);
spin_unlock_bh(&nf_nat_lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
yasuyuki kozakai | yasuyuki kozakai | 63 | 94.03% | 1 | 25.00% |
patrick mchardy | patrick mchardy | 3 | 4.48% | 2 | 50.00% |
changli gao | changli gao | 1 | 1.49% | 1 | 25.00% |
| Total | 67 | 100.00% | 4 | 100.00% |
static void nf_nat_move_storage(void *new, void *old)
{
struct nf_conn_nat *new_nat = new;
struct nf_conn_nat *old_nat = old;
struct nf_conn *ct = old_nat->ct;
if (!ct || !(ct->status & IPS_SRC_NAT_DONE))
return;
spin_lock_bh(&nf_nat_lock);
hlist_replace_rcu(&old_nat->bysource, &new_nat->bysource);
spin_unlock_bh(&nf_nat_lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
yasuyuki kozakai | yasuyuki kozakai | 67 | 87.01% | 1 | 16.67% |
patrick mchardy | patrick mchardy | 6 | 7.79% | 3 | 50.00% |
evgeniy polyakov | evgeniy polyakov | 3 | 3.90% | 1 | 16.67% |
changli gao | changli gao | 1 | 1.30% | 1 | 16.67% |
| Total | 77 | 100.00% | 6 | 100.00% |
static struct nf_ct_ext_type nat_extend __read_mostly = {
.len = sizeof(struct nf_conn_nat),
.align = __alignof__(struct nf_conn_nat),
.destroy = nf_nat_cleanup_conntrack,
.move = nf_nat_move_storage,
.id = NF_CT_EXT_NAT,
.flags = NF_CT_EXT_F_PREALLOC,
};
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = {
[CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 },
[CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 },
};
static int nfnetlink_parse_nat_proto(struct nlattr *attr,
const struct nf_conn *ct,
struct nf_nat_range *range)
{
struct nlattr *tb[CTA_PROTONAT_MAX+1];
const struct nf_nat_l4proto *l4proto;
int err;
err = nla_parse_nested(tb, CTA_PROTONAT_MAX, attr, protonat_nla_policy);
if (err < 0)
return err;
l4proto = __nf_nat_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
if (l4proto->nlattr_to_range)
err = l4proto->nlattr_to_range(tb, range);
return err;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pablo neira ayuso | pablo neira ayuso | 86 | 87.76% | 1 | 50.00% |
patrick mchardy | patrick mchardy | 12 | 12.24% | 1 | 50.00% |
| Total | 98 | 100.00% | 2 | 100.00% |
static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = {
[CTA_NAT_V4_MINIP] = { .type = NLA_U32 },
[CTA_NAT_V4_MAXIP] = { .type = NLA_U32 },
[CTA_NAT_V6_MINIP] = { .len = sizeof(struct in6_addr) },
[CTA_NAT_V6_MAXIP] = { .len = sizeof(struct in6_addr) },
[CTA_NAT_PROTO] = { .type = NLA_NESTED },
};
static int
nfnetlink_parse_nat(const struct nlattr *nat,
const struct nf_conn *ct, struct nf_nat_range *range,
const struct nf_nat_l3proto *l3proto)
{
struct nlattr *tb[CTA_NAT_MAX+1];
int err;
memset(range, 0, sizeof(*range));
err = nla_parse_nested(tb, CTA_NAT_MAX, nat, nat_nla_policy);
if (err < 0)
return err;
err = l3proto->nlattr_to_range(tb, range);
if (err < 0)
return err;
if (!tb[CTA_NAT_PROTO])
return 0;
return nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pablo neira ayuso | pablo neira ayuso | 105 | 86.78% | 2 | 50.00% |
patrick mchardy | patrick mchardy | 16 | 13.22% | 2 | 50.00% |
| Total | 121 | 100.00% | 4 | 100.00% |
/* This function is called under rcu_read_lock() */
static int
nfnetlink_parse_nat_setup(struct nf_conn *ct,
enum nf_nat_manip_type manip,
const struct nlattr *attr)
{
struct nf_nat_range range;
const struct nf_nat_l3proto *l3proto;
int err;
/* Should not happen, restricted to creating new conntracks
* via ctnetlink.
*/
if (WARN_ON_ONCE(nf_nat_initialized(ct, manip)))
return -EEXIST;
/* Make sure that L3 NAT is there by when we call nf_nat_setup_info to
* attach the null binding, otherwise this may oops.
*/
l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct));
if (l3proto == NULL)
return -EAGAIN;
/* No NAT information has been passed, allocate the null-binding */
if (attr == NULL)
return __nf_nat_alloc_null_binding(ct, manip);
err = nfnetlink_parse_nat(attr, ct, &range, l3proto);
if (err < 0)
return err;
return nf_nat_setup_info(ct, &range, manip);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pablo neira ayuso | pablo neira ayuso | 109 | 90.08% | 2 | 50.00% |
patrick mchardy | patrick mchardy | 12 | 9.92% | 2 | 50.00% |
| Total | 121 | 100.00% | 4 | 100.00% |
#else
static int
nfnetlink_parse_nat_setup(struct nf_conn *ct,
enum nf_nat_manip_type manip,
const struct nlattr *attr)
{
return -EOPNOTSUPP;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pablo neira ayuso | pablo neira ayuso | 24 | 96.00% | 1 | 50.00% |
patrick mchardy | patrick mchardy | 1 | 4.00% | 1 | 50.00% |
| Total | 25 | 100.00% | 2 | 100.00% |
#endif
static void __net_exit nf_nat_net_exit(struct net *net)
{
struct nf_nat_proto_clean clean = {};
nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean, 0, 0);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
alexey dobriyan | alexey dobriyan | 19 | 59.38% | 1 | 25.00% |
patrick mchardy | patrick mchardy | 8 | 25.00% | 1 | 25.00% |
florian westphal | florian westphal | 5 | 15.62% | 2 | 50.00% |
| Total | 32 | 100.00% | 4 | 100.00% |
static struct pernet_operations nf_nat_net_ops = {
.exit = nf_nat_net_exit,
};
static struct nf_ct_helper_expectfn follow_master_nat = {
.name = "nat-follow-master",
.expectfn = nf_nat_follow_master,
};
static int __init nf_nat_init(void)
{
int ret;
/* Leave them the same for the moment. */
nf_nat_htable_size = nf_conntrack_htable_size;
nf_nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 0);
if (!nf_nat_bysource)
return -ENOMEM;
ret = nf_ct_extend_register(&nat_extend);
if (ret < 0) {
nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size);
printk(KERN_ERR "nf_nat_core: Unable to register extension\n");
return ret;
}
ret = register_pernet_subsys(&nf_nat_net_ops);
if (ret < 0)
goto cleanup_extend;
nf_ct_helper_expectfn_register(&follow_master_nat);
/* Initialize fake conntrack so that NAT will skip it */
nf_ct_untracked_status_or(IPS_NAT_DONE_MASK);
BUG_ON(nfnetlink_parse_nat_setup_hook != NULL);
RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook,
nfnetlink_parse_nat_setup);
#ifdef CONFIG_XFRM
BUG_ON(nf_nat_decode_session_hook != NULL);
RCU_INIT_POINTER(nf_nat_decode_session_hook, __nf_nat_decode_session);
#endif
return 0;
cleanup_extend:
nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size);
nf_ct_extend_unregister(&nat_extend);
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
yasuyuki kozakai | yasuyuki kozakai | 43 | 28.48% | 1 | 10.00% |
florian westphal | florian westphal | 38 | 25.17% | 1 | 10.00% |
jozsef kadlecsik | jozsef kadlecsik | 25 | 16.56% | 1 | 10.00% |
patrick mchardy | patrick mchardy | 24 | 15.89% | 3 | 30.00% |
pablo neira ayuso | pablo neira ayuso | 11 | 7.28% | 1 | 10.00% |
alexey dobriyan | alexey dobriyan | 6 | 3.97% | 1 | 10.00% |
eric dumazet | eric dumazet | 3 | 1.99% | 1 | 10.00% |
stephen hemminger | stephen hemminger | 1 | 0.66% | 1 | 10.00% |
| Total | 151 | 100.00% | 10 | 100.00% |
static void __exit nf_nat_cleanup(void)
{
unsigned int i;
unregister_pernet_subsys(&nf_nat_net_ops);
nf_ct_extend_unregister(&nat_extend);
nf_ct_helper_expectfn_unregister(&follow_master_nat);
RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL);
#ifdef CONFIG_XFRM
RCU_INIT_POINTER(nf_nat_decode_session_hook, NULL);
#endif
for (i = 0; i < NFPROTO_NUMPROTO; i++)
kfree(nf_nat_l4protos[i]);
synchronize_net();
nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
patrick mchardy | patrick mchardy | 41 | 50.62% | 2 | 22.22% |
jozsef kadlecsik | jozsef kadlecsik | 13 | 16.05% | 1 | 11.11% |
pablo neira ayuso | pablo neira ayuso | 11 | 13.58% | 2 | 22.22% |
florian westphal | florian westphal | 7 | 8.64% | 1 | 11.11% |
yasuyuki kozakai | yasuyuki kozakai | 6 | 7.41% | 1 | 11.11% |
alexey dobriyan | alexey dobriyan | 2 | 2.47% | 1 | 11.11% |
stephen hemminger | stephen hemminger | 1 | 1.23% | 1 | 11.11% |
| Total | 81 | 100.00% | 9 | 100.00% |
MODULE_LICENSE("GPL");
module_init(nf_nat_init);
module_exit(nf_nat_cleanup);
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
patrick mchardy | patrick mchardy | 1583 | 39.79% | 22 | 33.85% |
jozsef kadlecsik | jozsef kadlecsik | 1061 | 26.67% | 1 | 1.54% |
pablo neira ayuso | pablo neira ayuso | 563 | 14.15% | 4 | 6.15% |
florian westphal | florian westphal | 289 | 7.26% | 8 | 12.31% |
yasuyuki kozakai | yasuyuki kozakai | 256 | 6.44% | 4 | 6.15% |
changli gao | changli gao | 78 | 1.96% | 6 | 9.23% |
alexey dobriyan | alexey dobriyan | 66 | 1.66% | 1 | 1.54% |
daniel borkmann | daniel borkmann | 35 | 0.88% | 4 | 6.15% |
eric dumazet | eric dumazet | 7 | 0.18% | 2 | 3.08% |
eric w. biederman | eric w. biederman | 6 | 0.15% | 1 | 1.54% |
dan carpenter | dan carpenter | 6 | 0.15% | 1 | 1.54% |
eric leblond | eric leblond | 6 | 0.15% | 1 | 1.54% |
stephen hemminger | stephen hemminger | 4 | 0.10% | 1 | 1.54% |
evgeniy polyakov | evgeniy polyakov | 3 | 0.08% | 1 | 1.54% |
julian anastasov | julian anastasov | 3 | 0.08% | 1 | 1.54% |
tejun heo | tejun heo | 3 | 0.08% | 1 | 1.54% |
jan engelhardt | jan engelhardt | 2 | 0.05% | 1 | 1.54% |
herbert xu | herbert xu | 2 | 0.05% | 1 | 1.54% |
maximilian engelhardt | maximilian engelhardt | 2 | 0.05% | 1 | 1.54% |
duan jiong | duan jiong | 1 | 0.03% | 1 | 1.54% |
lucas de marchi | lucas de marchi | 1 | 0.03% | 1 | 1.54% |
arnd bergmann | arnd bergmann | 1 | 0.03% | 1 | 1.54% |
| Total | 3978 | 100.00% | 65 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.