Release 4.11 net/netfilter/ipvs/ip_vs_xmit.c
/*
* ip_vs_xmit.c: various packet transmitters for IPVS
*
* Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
* Julian Anastasov <ja@ssi.bg>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Changes:
*
* Description of forwarding methods:
* - all transmitters are called from LOCAL_IN (remote clients) and
* LOCAL_OUT (local clients) but for ICMP can be called from FORWARD
* - not all connections have destination server, for example,
* connections in backup server when fwmark is used
* - bypass connections use daddr from packet
* - we can use dst without ref while sending in RCU section, we use
* ref when returning NF_ACCEPT for NAT-ed packet via loopback
* LOCAL_OUT rules:
* - skb->dev is NULL, skb->protocol is not set (both are set in POST_ROUTING)
* - skb->pkt_type is not set yet
* - the only place where we can see skb->sk != NULL
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/tcp.h> /* for tcphdr */
#include <net/ip.h>
#include <net/tcp.h> /* for csum_tcpudp_magic */
#include <net/udp.h>
#include <net/icmp.h> /* for icmp_send */
#include <net/route.h> /* for ip_route_output */
#include <net/ipv6.h>
#include <net/ip6_route.h>
#include <net/ip_tunnels.h>
#include <net/addrconf.h>
#include <linux/icmpv6.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <net/ip_vs.h>
enum {
IP_VS_RT_MODE_LOCAL = 1, /* Allow local dest */
IP_VS_RT_MODE_NON_LOCAL = 2, /* Allow non-local dest */
IP_VS_RT_MODE_RDR = 4, /* Allow redirect from remote daddr to
* local
*/
IP_VS_RT_MODE_CONNECT = 8, /* Always bind route to saddr */
IP_VS_RT_MODE_KNOWN_NH = 16,/* Route via remote addr */
IP_VS_RT_MODE_TUNNEL = 32,/* Tunnel mode */
};
static inline struct ip_vs_dest_dst *ip_vs_dest_dst_alloc(void)
{
return kmalloc(sizeof(struct ip_vs_dest_dst), GFP_ATOMIC);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Julian Anastasov | 23 | 100.00% | 1 | 100.00% |
Total | 23 | 100.00% | 1 | 100.00% |
static inline void ip_vs_dest_dst_free(struct ip_vs_dest_dst *dest_dst)
{
kfree(dest_dst);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Julian Anastasov | 17 | 100.00% | 1 | 100.00% |
Total | 17 | 100.00% | 1 | 100.00% |
/*
* Destination cache to speed up outgoing route lookup
*/
static inline void
__ip_vs_dst_set(struct ip_vs_dest *dest, struct ip_vs_dest_dst *dest_dst,
struct dst_entry *dst, u32 dst_cookie)
{
struct ip_vs_dest_dst *old;
old = rcu_dereference_protected(dest->dest_dst,
lockdep_is_held(&dest->dst_lock));
if (dest_dst) {
dest_dst->dst_cache = dst;
dest_dst->dst_cookie = dst_cookie;
}
rcu_assign_pointer(dest->dest_dst, dest_dst);
if (old)
call_rcu(&old->rcu_head, ip_vs_dest_dst_rcu_free);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Julian Anastasov | 48 | 54.55% | 1 | 33.33% |
Wensong Zhang | 33 | 37.50% | 1 | 33.33% |
Hans Schillstrom | 7 | 7.95% | 1 | 33.33% |
Total | 88 | 100.00% | 3 | 100.00% |
static inline struct ip_vs_dest_dst *
__ip_vs_dst_check(struct ip_vs_dest *dest)
{
struct ip_vs_dest_dst *dest_dst = rcu_dereference(dest->dest_dst);
struct dst_entry *dst;
if (!dest_dst)
return NULL;
dst = dest_dst->dst_cache;
if (dst->obsolete &&
dst->ops->check(dst, dest_dst->dst_cookie) == NULL)
return NULL;
return dest_dst;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wensong Zhang | 49 | 68.06% | 1 | 33.33% |
Julian Anastasov | 21 | 29.17% | 1 | 33.33% |
Hans Schillstrom | 2 | 2.78% | 1 | 33.33% |
Total | 72 | 100.00% | 3 | 100.00% |
static inline bool
__mtu_check_toobig_v6(const struct sk_buff *skb, u32 mtu)
{
if (IP6CB(skb)->frag_max_size) {
/* frag_max_size tell us that, this packet have been
* defragmented by netfilter IPv6 conntrack module.
*/
if (IP6CB(skb)->frag_max_size > mtu)
return true; /* largest fragment violate MTU */
}
else if (skb->len > mtu && !skb_is_gso(skb)) {
return true; /* Packet size violate MTU size */
}
return false;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jesper Dangaard Brouer | 39 | 58.21% | 1 | 50.00% |
Patrick McHardy | 28 | 41.79% | 1 | 50.00% |
Total | 67 | 100.00% | 2 | 100.00% |
/* Get route to daddr, update *saddr, optionally bind route to saddr */
static struct rtable *do_output_route4(struct net *net, __be32 daddr,
int rt_mode, __be32 *saddr)
{
struct flowi4 fl4;
struct rtable *rt;
int loop = 0;
memset(&fl4, 0, sizeof(fl4));
fl4.daddr = daddr;
fl4.flowi4_flags = (rt_mode & IP_VS_RT_MODE_KNOWN_NH) ?
FLOWI_FLAG_KNOWN_NH : 0;
retry:
rt = ip_route_output_key(net, &fl4);
if (IS_ERR(rt)) {
/* Invalid saddr ? */
if (PTR_ERR(rt) == -EINVAL && *saddr &&
rt_mode & IP_VS_RT_MODE_CONNECT && !loop) {
*saddr = 0;
flowi4_update_output(&fl4, 0, 0, daddr, 0);
goto retry;
}
IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n", &daddr);
return NULL;
} else if (!*saddr && rt_mode & IP_VS_RT_MODE_CONNECT && fl4.saddr) {
ip_rt_put(rt);
*saddr = fl4.saddr;
flowi4_update_output(&fl4, 0, 0, daddr, fl4.saddr);
loop++;
goto retry;
}
*saddr = fl4.saddr;
return rt;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Julian Anastasov | 208 | 100.00% | 3 | 100.00% |
Total | 208 | 100.00% | 3 | 100.00% |
#ifdef CONFIG_IP_VS_IPV6
static inline int __ip_vs_is_local_route6(struct rt6_info *rt)
{
return rt->dst.dev && rt->dst.dev->flags & IFF_LOOPBACK;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Gartrell | 29 | 100.00% | 1 | 100.00% |
Total | 29 | 100.00% | 1 | 100.00% |
#endif
static inline bool crosses_local_route_boundary(int skb_af, struct sk_buff *skb,
int rt_mode,
bool new_rt_is_local)
{
bool rt_mode_allow_local = !!(rt_mode & IP_VS_RT_MODE_LOCAL);
bool rt_mode_allow_non_local = !!(rt_mode & IP_VS_RT_MODE_LOCAL);
bool rt_mode_allow_redirect = !!(rt_mode & IP_VS_RT_MODE_RDR);
bool source_is_loopback;
bool old_rt_is_local;
#ifdef CONFIG_IP_VS_IPV6
if (skb_af == AF_INET6) {
int addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr);
source_is_loopback =
(!skb->dev || skb->dev->flags & IFF_LOOPBACK) &&
(addr_type & IPV6_ADDR_LOOPBACK);
old_rt_is_local = __ip_vs_is_local_route6(
(struct rt6_info *)skb_dst(skb));
} else
#endif
{
source_is_loopback = ipv4_is_loopback(ip_hdr(skb)->saddr);
old_rt_is_local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
}
if (unlikely(new_rt_is_local)) {
if (!rt_mode_allow_local)
return true;
if (!rt_mode_allow_redirect && !old_rt_is_local)
return true;
} else {
if (!rt_mode_allow_non_local)
return true;
if (source_is_loopback)
return true;
}
return false;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Gartrell | 200 | 100.00% | 1 | 100.00% |
Total | 200 | 100.00% | 1 | 100.00% |
static inline void maybe_update_pmtu(int skb_af, struct sk_buff *skb, int mtu)
{
struct sock *sk = skb->sk;
struct rtable *ort = skb_rtable(skb);
if (!skb->dev && sk && sk_fullsock(sk))
ort->dst.ops->update_pmtu(&ort->dst, sk, NULL, mtu);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Gartrell | 68 | 95.77% | 1 | 50.00% |
Eric Dumazet | 3 | 4.23% | 1 | 50.00% |
Total | 71 | 100.00% | 2 | 100.00% |
static inline bool ensure_mtu_is_adequate(struct netns_ipvs *ipvs, int skb_af,
int rt_mode,
struct ip_vs_iphdr *ipvsh,
struct sk_buff *skb, int mtu)
{
#ifdef CONFIG_IP_VS_IPV6
if (skb_af == AF_INET6) {
struct net *net = ipvs->net;
if (unlikely(__mtu_check_toobig_v6(skb, mtu))) {
if (!skb->dev)
skb->dev = net->loopback_dev;
/* only send ICMP too big on first fragment */
if (!ipvsh->fragoffs && !ip_vs_iph_icmp(ipvsh))
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
IP_VS_DBG(1, "frag needed for %pI6c\n",
&ipv6_hdr(skb)->saddr);
return false;
}
} else
#endif
{
/* If we're going to tunnel the packet and pmtu discovery
* is disabled, we'll just fragment it anyway
*/
if ((rt_mode & IP_VS_RT_MODE_TUNNEL) && !sysctl_pmtu_disc(ipvs))
return true;
if (unlikely(ip_hdr(skb)->frag_off & htons(IP_DF) &&
skb->len > mtu && !skb_is_gso(skb) &&
!ip_vs_iph_icmp(ipvsh))) {
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(mtu));
IP_VS_DBG(1, "frag needed for %pI4\n",
&ip_hdr(skb)->saddr);
return false;
}
}
return true;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Gartrell | 211 | 96.79% | 2 | 66.67% |
Eric W. Biedermann | 7 | 3.21% | 1 | 33.33% |
Total | 218 | 100.00% | 3 | 100.00% |
static inline bool decrement_ttl(struct netns_ipvs *ipvs,
int skb_af,
struct sk_buff *skb)
{
struct net *net = ipvs->net;
#ifdef CONFIG_IP_VS_IPV6
if (skb_af == AF_INET6) {
struct dst_entry *dst = skb_dst(skb);
/* check and decrement ttl */
if (ipv6_hdr(skb)->hop_limit <= 1) {
/* Force OUTPUT device used as source address */
skb->dev = dst->dev;
icmpv6_send(skb, ICMPV6_TIME_EXCEED,
ICMPV6_EXC_HOPLIMIT, 0);
__IP6_INC_STATS(net, ip6_dst_idev(dst),
IPSTATS_MIB_INHDRERRORS);
return false;
}
/* don't propagate ttl change to cloned packets */
if (!skb_make_writable(skb, sizeof(struct ipv6hdr)))
return false;
ipv6_hdr(skb)->hop_limit--;
} else
#endif
{
if (ip_hdr(skb)->ttl <= 1) {
/* Tell the sender its packet died... */
__IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0);
return false;
}
/* don't propagate ttl change to cloned packets */
if (!skb_make_writable(skb, sizeof(struct iphdr)))
return false;
/* Decrease ttl */
ip_decrease_ttl(ip_hdr(skb));
}
return true;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dwip Banerjee | 195 | 100.00% | 1 | 100.00% |
Total | 195 | 100.00% | 1 | 100.00% |
/* Get route to destination or remote server */
static int
__ip_vs_get_out_rt(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb,
struct ip_vs_dest *dest,
__be32 daddr, int rt_mode, __be32 *ret_saddr,
struct ip_vs_iphdr *ipvsh)
{
struct net *net = ipvs->net;
struct ip_vs_dest_dst *dest_dst;
struct rtable *rt; /* Route to the other host */
int mtu;
int local, noref = 1;
if (dest) {
dest_dst = __ip_vs_dst_check(dest);
if (likely(dest_dst))
rt = (struct rtable *) dest_dst->dst_cache;
else {
dest_dst = ip_vs_dest_dst_alloc();
spin_lock_bh(&dest->dst_lock);
if (!dest_dst) {
__ip_vs_dst_set(dest, NULL, NULL, 0);
spin_unlock_bh(&dest->dst_lock);
goto err_unreach;
}
rt = do_output_route4(net, dest->addr.ip, rt_mode,
&dest_dst->dst_saddr.ip);
if (!rt) {
__ip_vs_dst_set(dest, NULL, NULL, 0);
spin_unlock_bh(&dest->dst_lock);
ip_vs_dest_dst_free(dest_dst);
goto err_unreach;
}
__ip_vs_dst_set(dest, dest_dst, &rt->dst, 0);
spin_unlock_bh(&dest->dst_lock);
IP_VS_DBG(10, "new dst %pI4, src %pI4, refcnt=%d\n",
&dest->addr.ip, &dest_dst->dst_saddr.ip,
atomic_read(&rt->dst.__refcnt));
}
if (ret_saddr)
*ret_saddr = dest_dst->dst_saddr.ip;
} else {
__be32 saddr = htonl(INADDR_ANY);
noref = 0;
/* For such unconfigured boxes avoid many route lookups
* for performance reasons because we do not remember saddr
*/
rt_mode &= ~IP_VS_RT_MODE_CONNECT;
rt = do_output_route4(net, daddr, rt_mode, &saddr);
if (!rt)
goto err_unreach;
if (ret_saddr)
*ret_saddr = saddr;
}
local = (rt->rt_flags & RTCF_LOCAL) ? 1 : 0;
if (unlikely(crosses_local_route_boundary(skb_af, skb, rt_mode,
local))) {
IP_VS_DBG_RL("We are crossing local and non-local addresses"
" daddr=%pI4\n", &daddr);
goto err_put;
}
if (unlikely(local)) {
/* skb to local stack, preserve old route */
if (!noref)
ip_rt_put(rt);
return local;
}
if (!decrement_ttl(ipvs, skb_af, skb))
goto err_put;
if (likely(!(rt_mode & IP_VS_RT_MODE_TUNNEL))) {
mtu = dst_mtu(&rt->dst);
} else {
mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
if (mtu < 68) {
IP_VS_DBG_RL("%s(): mtu less than 68\n", __func__);
goto err_put;
}
maybe_update_pmtu(skb_af, skb, mtu);
}
if (!ensure_mtu_is_adequate(ipvs, skb_af, rt_mode, ipvsh, skb, mtu))
goto err_put;
skb_dst_drop(skb);
if (noref) {
if (!local)
skb_dst_set_noref(skb, &rt->dst);
else
skb_dst_set(skb, dst_clone(&rt->dst));
} else
skb_dst_set(skb, &rt->dst);
return local;
err_put:
if (!noref)
ip_rt_put(rt);
return -1;
err_unreach:
dst_link_failure(skb);
return -1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Julian Anastasov | 385 | 67.19% | 7 | 30.43% |
Wensong Zhang | 92 | 16.06% | 1 | 4.35% |
Alex Gartrell | 40 | 6.98% | 4 | 17.39% |
Dwip Banerjee | 15 | 2.62% | 1 | 4.35% |
Hans Schillstrom | 14 | 2.44% | 1 | 4.35% |
David S. Miller | 11 | 1.92% | 3 | 13.04% |
Eric W. Biedermann | 9 | 1.57% | 2 | 8.70% |
Julius Volz | 4 | 0.70% | 1 | 4.35% |
Hannes Frederic Sowa | 1 | 0.17% | 1 | 4.35% |
Denis V. Lunev | 1 | 0.17% | 1 | 4.35% |
Harvey Harrison | 1 | 0.17% | 1 | 4.35% |
Total | 573 | 100.00% | 23 | 100.00% |
#ifdef CONFIG_IP_VS_IPV6
static struct dst_entry *
__ip_vs_route_output_v6(struct net *net, struct in6_addr *daddr,
struct in6_addr *ret_saddr, int do_xfrm, int rt_mode)
{
struct dst_entry *dst;
struct flowi6 fl6 = {
.daddr = *daddr,
};
if (rt_mode & IP_VS_RT_MODE_KNOWN_NH)
fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH;
dst = ip6_route_output(net, NULL, &fl6);
if (dst->error)
goto out_err;
if (!ret_saddr)
return dst;
if (ipv6_addr_any(&fl6.saddr) &&
ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev,
&fl6.daddr, 0, &fl6.saddr) < 0)
goto out_err;
if (do_xfrm) {
dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
if (IS_ERR(dst)) {
dst = NULL;
goto out_err;
}
}
*ret_saddr = fl6.saddr;
return dst;
out_err:
dst_release(dst);
IP_VS_DBG_RL("ip6_route_output error, dest: %pI6\n", daddr);
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hans Schillstrom | 98 | 50.52% | 1 | 12.50% |
Julius Volz | 42 | 21.65% | 1 | 12.50% |
David S. Miller | 34 | 17.53% | 2 | 25.00% |
Martin KaFai Lau | 15 | 7.73% | 1 | 12.50% |
Alexey Dobriyan | 2 | 1.03% | 1 | 12.50% |
Julian Anastasov | 2 | 1.03% | 1 | 12.50% |
Harvey Harrison | 1 | 0.52% | 1 | 12.50% |
Total | 194 | 100.00% | 8 | 100.00% |
/*
* Get route to destination or remote server
*/
static int
__ip_vs_get_out_rt_v6(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb,
struct ip_vs_dest *dest,
struct in6_addr *daddr, struct in6_addr *ret_saddr,
struct ip_vs_iphdr *ipvsh, int do_xfrm, int rt_mode)
{
struct net *net = ipvs->net;
struct ip_vs_dest_dst *dest_dst;
struct rt6_info *rt; /* Route to the other host */
struct dst_entry *dst;
int mtu;
int local, noref = 1;
if (dest) {
dest_dst = __ip_vs_dst_check(dest);
if (likely(dest_dst))
rt = (struct rt6_info *) dest_dst->dst_cache;
else {
u32 cookie;
dest_dst = ip_vs_dest_dst_alloc();
spin_lock_bh(&dest->dst_lock);
if (!dest_dst) {
__ip_vs_dst_set(dest, NULL, NULL, 0);
spin_unlock_bh(&dest->dst_lock);
goto err_unreach;
}
dst = __ip_vs_route_output_v6(net, &dest->addr.in6,
&dest_dst->dst_saddr.in6,
do_xfrm, rt_mode);
if (!dst) {
__ip_vs_dst_set(dest, NULL, NULL, 0);
spin_unlock_bh(&dest->dst_lock);
ip_vs_dest_dst_free(dest_dst);
goto err_unreach;
}
rt = (struct rt6_info *) dst;
cookie = rt6_get_cookie(rt);
__ip_vs_dst_set(dest, dest_dst, &rt->dst, cookie);
spin_unlock_bh(&dest->dst_lock);
IP_VS_DBG(10, "new dst %pI6, src %pI6, refcnt=%d\n",
&dest->addr.in6, &dest_dst->dst_saddr.in6,
atomic_read(&rt->dst.__refcnt));
}
if (ret_saddr)
*ret_saddr = dest_dst->dst_saddr.in6;
} else {
noref = 0;
dst = __ip_vs_route_output_v6(net, daddr, ret_saddr, do_xfrm,
rt_mode);
if (!dst)
goto err_unreach;
rt = (struct rt6_info *) dst;
}
local = __ip_vs_is_local_route6(rt);
if (unlikely(crosses_local_route_boundary(skb_af, skb, rt_mode,
local))) {
IP_VS_DBG_RL("We are crossing local and non-local addresses"
" daddr=%pI6\n", daddr);
goto err_put;
}
if (unlikely(local)) {
/* skb to local stack, preserve old route */
if (!noref)
dst_release(&rt->dst);
return local;
}
if (!decrement_ttl(ipvs, skb_af, skb))
goto err_put;
/* MTU checking */
if (likely(!(rt_mode & IP_VS_RT_MODE_TUNNEL)))
mtu = dst_mtu(&rt->dst);
else {
mtu = dst_mtu(&rt->dst) - sizeof(struct ipv6hdr);
if (mtu < IPV6_MIN_MTU) {
IP_VS_DBG_RL("%s(): mtu less than %d\n", __func__,
IPV6_MIN_MTU);
goto err_put;
}
maybe_update_pmtu(skb_af, skb, mtu);
}
if (!ensure_mtu_is_adequate(ipvs, skb_af, rt_mode, ipvsh, skb, mtu))
goto err_put;
skb_dst_drop(skb);
if (noref) {
if (!local)
skb_dst_set_noref(skb, &rt->dst);
else
skb_dst_set(skb, dst_clone(&rt->dst));
} else
skb_dst_set(skb, &rt->dst);
return local;
err_put:
if (!noref)
dst_release(&rt->dst);
return -1;
err_unreach:
/* The ip6_link_failure function requires the dev field to be set
* in order to get the net (further for the sake of fwmark
* reflection).
*/
if (!skb->dev)
skb->dev = skb_dst(skb)->dev;
dst_link_failure(skb);
return -1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Julian Anastasov | 341 | 55.81% | 5 | 25.00% |
Hans Schillstrom | 142 | 23.24% | 1 | 5.00% |
Julius Volz | 49 | 8.02% | 1 | 5.00% |
Alex Gartrell | 44 | 7.20% | 5 | 25.00% |
Dwip Banerjee | 15 | 2.45% | 1 | 5.00% |
Eric W. Biedermann | 9 | 1.47% | 2 | 10.00% |
Martin KaFai Lau | 7 | 1.15% | 2 | 10.00% |
Alexey Dobriyan | 2 | 0.33% | 1 | 5.00% |
Hannes Frederic Sowa | 1 | 0.16% | 1 | 5.00% |
Harvey Harrison | 1 | 0.16% | 1 | 5.00% |
Total | 611 | 100.00% | 20 | 100.00% |
#endif
/* return NF_ACCEPT to allow forwarding or other NF_xxx on error */
static inline int ip_vs_tunnel_xmit_prepare(struct sk_buff *skb,
struct ip_vs_conn *cp)
{
int ret = NF_ACCEPT;
skb->ipvs_property = 1;
if (unlikely(cp->flags & IP_VS_CONN_F_NFCT))
ret = ip_vs_confirm_conntrack(skb);
if (ret == NF_ACCEPT) {
nf_reset(skb);
skb_forward_csum(skb);
}
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Julian Anastasov | 67 | 100.00% | 2 | 100.00% |
Total | 67 | 100.00% | 2 | 100.00% |
/* In the event of a remote destination, it's possible that we would have
* matches against an old socket (particularly a TIME-WAIT socket). This
* causes havoc down the line (ip_local_out et. al. expect regular sockets
* and invalid memory accesses will happen) so simply drop the association
* in this case.
*/
static inline void ip_vs_drop_early_demux_sk(struct sk_buff *skb)
{
/* If dev is set, the packet came from the LOCAL_IN callback and
* not from a local TCP socket.
*/
if (skb->dev)
skb_orphan(skb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Gartrell | 24 | 100.00% | 1 | 100.00% |
Total | 24 | 100.00% | 1 | 100.00% |
/* return NF_STOLEN (sent) or NF_ACCEPT if local=1 (not sent) */
static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
struct ip_vs_conn *cp, int local)
{
int ret = NF_STOLEN;
skb->ipvs_property = 1;
if (likely(!(cp->flags & IP_VS_CONN_F_NFCT)))
ip_vs_notrack(skb);
else
ip_vs_update_conntrack(skb, cp, 1);
/* Remove the early_demux association unless it's bound for the
* exact same port and address on this host after translation.
*/
if (!local || cp->vport != cp->dport ||
!ip_vs_addr_equal(cp->af, &cp->vaddr, &cp->daddr))
ip_vs_drop_early_demux_sk(skb);
if (!local) {
skb_forward_csum(skb);
NF_HOOK(pf, NF_INET_LOCAL_OUT, cp->ipvs->net, NULL, skb,
NULL, skb_dst(skb)->dev, dst_output);
} else
ret = NF_ACCEPT;
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Julian Anastasov | 102 | 68.92% | 3 | 37.50% |
Alex Gartrell | 37 | 25.00% | 1 | 12.50% |
Eric W. Biedermann | 7 | 4.73% | 3 | 37.50% |
David S. Miller | 2 | 1.35% | 1 | 12.50% |
Total | 148 | 100.00% | 8 | 100.00% |
/* return NF_STOLEN (sent) or NF_ACCEPT if local=1 (not sent) */
static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb,
struct ip_vs_conn *cp, int local)
{
int ret = NF_STOLEN;
skb->ipvs_property = 1;
if (likely(!(cp->flags & IP_VS_CONN_F_NFCT)))
ip_vs_notrack(skb);
if (!local) {
ip_vs_drop_early_demux_sk(skb);
skb_forward_csum(skb);
NF_HOOK(pf, NF_INET_LOCAL_OUT, cp->ipvs->net, NULL, skb,
NULL, skb_dst(skb)->dev, dst_output);
} else
ret = NF_ACCEPT;
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Julian Anastasov | 86 | 81.13% | 3 | 30.00% |
Eric W. Biedermann | 7 | 6.60% | 3 | 30.00% |
Alex Gartrell | 5 | 4.72% | 1 | 10.00% |
Wensong Zhang | 4 | 3.77% | 1 | 10.00% |
David S. Miller | 2 | 1.89% | 1 | 10.00% |
Julius Volz | 2 | 1.89% | 1 | 10.00% |
Total | 106 | 100.00% | 10 | 100.00% |
/*
* NULL transmitter (do nothing except return NF_ACCEPT)
*/
int
ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
{
/* we do not touch skb and do not need pskb ptr */
return ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 1);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wensong Zhang | 20 | 52.63% | 1 | 20.00% |
Julian Anastasov | 13 | 34.21% | 3 | 60.00% |
Jesper Dangaard Brouer | 5 | 13.16% | 1 | 20.00% |
Total | 38 | 100.00% | 5 | 100.00% |
/*
* Bypass transmitter
* Let packets bypass the destination when the destination is not
* available, it may be only used in transparent cache cluster.
*/
int
ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
{
struct iphdr *iph = ip_hdr(skb);
EnterFunction(10);
rcu_read_lock();
if (__ip_vs_get_out_rt(cp->ipvs, cp->af, skb, NULL, iph->daddr,
IP_VS_RT_MODE_NON_LOCAL, NULL, ipvsh) < 0)
goto tx_error;
ip_send_check(iph);
/* Another hack: avoid icmp_send in ip_fragment */
skb->ignore_df = 1;
ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 0);
rcu_read_unlock();
LeaveFunction(10);
return NF_STOLEN;
tx_error:
kfree_skb(skb);
rcu_read_unlock();
LeaveFunction(10);
return NF_STOLEN;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wensong Zhang | 62 | 48.82% | 1 | 5.00% |
Julian Anastasov | 42 | 33.07% | 8 | 40.00% |
Alex Gartrell | 6 | 4.72% | 2 | 10.00% |
Jesper Dangaard Brouer | 5 | 3.94% | 1 | 5.00% |
Eric W. Biedermann | 4 | 3.15% | 1 | 5.00% |
Arnaldo Carvalho de Melo | 2 | 1.57% | 1 | 5.00% |
Julius Volz | 1 | 0.79% | 1 | 5.00% |
Eric Dumazet | 1 | 0.79% | 1 | 5.00% |
Changli Gao | 1 | 0.79% | 1 | 5.00% |
Jan Engelhardt | 1 | 0.79% | 1 | 5.00% |
Hannes Eder | 1 | 0.79% | 1 | 5.00% |
Américo Wang | 1 | 0.79% | 1 | 5.00% |
Total | 127 | 100.00% | 20 | 100.00% |
#ifdef CONFIG_IP_VS_IPV6
int
ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
{
struct ipv6hdr *iph = ipv6_hdr(skb);
EnterFunction(10);
rcu_read_lock();
if (__ip_vs_get_out_rt_v6(cp->ipvs, cp->af, skb, NULL,
&iph->daddr, NULL,
ipvsh, 0, IP_VS_RT_MODE_NON_LOCAL) < 0)
goto tx_error;
/* Another hack: avoid icmp_send in ip_fragment */
skb->ignore_df = 1;
ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 0);
rcu_read_unlock();
LeaveFunction(10);
return NF_STOLEN;
tx_error:
kfree_skb(skb);
rcu_read_unlock();
LeaveFunction(10);
return NF_STOLEN;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Julian Anastasov | 54 | 43.20% | 7 | 41.18% |
Wensong Zhang | 38 | 30.40% | 1 | 5.88% |
Alex Gartrell | 15 | 12.00% | 2 | 11.76% |
Julius Volz | 7 | 5.60% | 2 | 11.76% |
Eric W. Biedermann | 4 | 3.20% | 1 | 5.88% |
Eric Dumazet | 3 | 2.40% | 1 | 5.88% |
Jesper Dangaard Brouer | 2 | 1.60% | 1 | 5.88% |
Jan Engelhardt | 1 | 0.80% | 1 | 5.88% |
Américo Wang | 1 | 0.80% | 1 | 5.88% |
Total | 125 | 100.00% | 17 | 100.00% |
#endif
/*
* NAT transmitter (only for outside-to-inside nat forwarding)
* Not used for related ICMP
*/
int
ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
{
struct rtable *rt; /* Route to the other host */
int local, rc, was_input;
EnterFunction(10);
rcu_read_lock();
/* check if it is a connection of no-client-port */
if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT)) {
__be16 _pt, *p;
p = skb_header_pointer(skb, ipvsh->len, sizeof(_pt), &_pt);
if (p == NULL)
goto tx_error;
ip_vs_conn_fill_cport(cp, *p);
IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
}
was_input = rt_is_input_route(skb_rtable(skb));
local = __ip_vs_get_out_rt(cp->ipvs, cp->af, skb, cp->dest, cp->daddr.ip,
IP_VS_RT_MODE_LOCAL |
IP_VS_RT_MODE_NON_LOCAL |
IP_VS_RT_MODE_RDR, NULL, ipvsh);
if (local < 0)
goto tx_error;
rt = skb_rtable(skb);
/*
* Avoid duplicate tuple in reply direction for NAT traffic
* to local address when connection is sync-ed
*/
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
if (cp->flags & IP_VS_CONN_F_SYNC && local) {
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
if (ct && !nf_ct_is_untracked(ct)) {
IP_VS_DBG_RL_PKT(10, AF_INET, pp, skb, ipvsh->off,
"ip_vs_nat_xmit(): "
"stopping DNAT to local address");
goto tx_error;
}
}
#endif
/* From world but DNAT to loopback address? */
if (local && ipv4_is_loopback(cp->daddr.ip) && was_input) {
IP_VS_DBG_RL_PKT(1, AF_INET, pp, skb, ipvsh->off,
"ip_vs_nat_xmit(): stopping DNAT to loopback "
"address");
goto tx_error;
}
/* copy-on-write the packet before mangling it */
if (!skb_make_writable(skb, sizeof(struct iphdr)))
goto tx_error;
if (skb_cow(skb, rt->dst.dev->hard_header_len))
goto tx_error;
/* mangle the packet */
if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp, ipvsh))
goto tx_error;
ip_hdr(skb)->daddr = cp->daddr.ip;
ip_send_check(ip_hdr(skb));
IP_VS_DBG_PKT(10, AF_INET, pp, skb, ipvsh->off, "After DNAT");
/* FIXME: when application helper enlarges the packet and the length
is larger than the MTU of outgoing device, there will be still
MTU problem. */
/* Another hack: avoid icmp_send in ip_fragment */
skb->ignore_df = 1;
rc = ip_vs_nat_send_or_cont(NFPROTO_IPV4, skb, cp, local);
rcu_read_unlock();
LeaveFunction(10);
return rc;
tx_error:
kfree_skb(skb);
rcu_read_unlock();
LeaveFunction(10);
return NF_STOLEN;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Julius Volz | 169 | 38.94% | 1 | 5.26% |
Julian Anastasov | 169 | 38.94% | 7 | 36.84% |
Wensong Zhang | 60 | 13.82% | 1 | 5.26% |
Alex Gartrell | 17 | 3.92% | 3 | 15.79% |
Jesper Dangaard Brouer | 7 | 1.61% | 1 | 5.26% |
Eric W. Biedermann | 4 | 0.92% | 1 | 5.26% |
Changli Gao | 3 | 0.69% | 1 | 5.26% |
Hans Schillstrom | 2 | 0.46% | 1 | 5.26% |
Jan Engelhardt | 1 | 0.23% | 1 | 5.26% |
Américo Wang | 1 | 0.23% | 1 | 5.26% |
Igor Maravić | 1 | 0.23% | 1 | 5.26% |
Total | 434 | 100.00% | 19 | 100.00% |
#ifdef CONFIG_IP_VS_IPV6
int
ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
{
struct rt6_info *rt; /* Route to the other host */
int local, rc;
EnterFunction(10);
rcu_read_lock();
/* check if it is a connection of no-client-port */
if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT && !ipvsh->fragoffs)) {
__be16 _pt, *p;
p = skb_header_pointer(skb, ipvsh->len, sizeof(_pt), &_pt);
if (p == NULL)
goto tx_error;
ip_vs_conn_fill_cport(cp, *p);
IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
}
local = __ip_vs_get_out_rt_v6(cp->ipvs, cp->af, skb, cp->dest,
&cp->daddr.in6,
NULL, ipvsh, 0,
IP_VS_RT_MODE_LOCAL |
IP_VS_RT_MODE_NON_LOCAL |
IP_VS_RT_MODE_RDR);
if (local < 0)
goto tx_error;
rt = (struct rt6_info *) skb_dst(skb);
/*
* Avoid duplicate tuple in reply direction for NAT traffic
* to local address when connection is sync-ed
*/
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
if (cp->flags & IP_VS_CONN_F_SYNC && local) {
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
if (ct && !nf_ct_is_untracked(ct)) {
IP_VS_DBG_RL_PKT(10, AF_INET6, pp, skb, ipvsh->off,
"ip_vs_nat_xmit_v6(): "
"stopping DNAT to local address");
goto tx_error;
}
}
#endif
/* From world but DNAT to loopback address? */
if (local && skb->dev && !(skb->dev->flags & IFF_LOOPBACK) &&
ipv6_addr_type(&cp->daddr.in6) & IPV6_ADDR_LOOPBACK) {
IP_VS_DBG_RL_PKT(1, AF_INET6, pp, skb, ipvsh->off,
"ip_vs_nat_xmit_v6(): "
"stopping DNAT to loopback address");
goto tx_error;
}
/* copy-on-write the packet before mangling it */
if (!skb_make_writable(skb, sizeof(struct ipv6hdr)))
goto tx_error;
if (skb_cow(skb, rt->dst.dev->hard_header_len))
goto tx_error;
/* mangle the packet */
if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp, ipvsh))
goto tx_error;
ipv6_hdr(skb)->daddr = cp->daddr.in6;
IP_VS_DBG_PKT(10, AF_INET6, pp, skb, ipvsh->off, "After DNAT");
/* FIXME: when application helper enlarges the packet and the length
is larger than the MTU of outgoing device, there will be still
MTU problem. */
/* Another hack: avoid icmp_send in ip_fragment */
skb->ignore_df = 1;
rc = ip_vs_nat_send_or_cont(NFPROTO_IPV6, skb, cp, local);
rcu_read_unlock();
LeaveFunction(10);
return rc;
tx_error:
LeaveFunction(10);
kfree_skb(skb);
rcu_read_unlock();
return NF_STOLEN;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Julius Volz | 224 | 50.56% | 1 | 4.76% |
Julian Anastasov | 174 | 39.28% | 6 | 28.57% |
Alex Gartrell | 13 | 2.93% | 2 | 9.52% |
Jesper Dangaard Brouer | 11 | 2.48% | 3 | 14.29% |
Hans Schillstrom | 6 | 1.35% | 1 | 4.76% |
Eric W. Biedermann | 4 | 0.90% | 1 | 4.76% |
David S. Miller | 3 | 0.68% | 1 | 4.76% |
Martin KaFai Lau | 3 | 0.68% | 1 | 4.76% |
Wensong Zhang | 1 | 0.23% | 1 | 4.76% |
Jan Engelhardt | 1 | 0.23% | 1 | 4.76% |
Américo Wang | 1 | 0.23% | 1 | 4.76% |
Alexey Dobriyan | 1 | 0.23% | 1 | 4.76% |
Igor Maravić | 1 | 0.23% | 1 | 4.76% |
Total | 443 | 100.00% | 21 | 100.00% |
#endif
/* When forwarding a packet, we must ensure that we've got enough headroom
* for the encapsulation packet in the skb. This also gives us an
* opportunity to figure out what the payload_len, dsfield, ttl, and df
* values should be, so that we won't need to look at the old ip header
* again
*/
static struct sk_buff *
ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
unsigned int max_headroom, __u8 *next_protocol,
__u32 *payload_len, __u8 *dsfield, __u8 *ttl,
__be16 *df)
{
struct sk_buff *new_skb = NULL;
struct iphdr *old_iph = NULL;
#ifdef CONFIG_IP_VS_IPV6
struct ipv6hdr *old_ipv6h = NULL;
#endif
ip_vs_drop_early_demux_sk(skb);
if (skb_headroom(skb) < max_headroom || skb_cloned(skb)) {
new_skb = skb_realloc_headroom(skb, max_headroom);
if (!new_skb)
goto error;
if (skb->sk)
skb_set_owner_w(new_skb, skb->sk);
consume_skb(skb);
skb = new_skb;
}
#ifdef CONFIG_IP_VS_IPV6
if (skb_af == AF_INET6) {
old_ipv6h = ipv6_hdr(skb);
*next_protocol = IPPROTO_IPV6;
if (payload_len)
*payload_len =
ntohs(old_ipv6h->payload_len) +
sizeof(*old_ipv6h);
*dsfield = ipv6_get_dsfield(old_ipv6h);
*ttl = old_ipv6h->hop_limit;
if (df)
*df = 0;
} else
#endif
{
old_iph = ip_hdr(skb);
/* Copy DF, reset fragment offset and MF */
if (df)
*df = (old_iph->frag_off & htons(IP_DF));
*next_protocol = IPPROTO_IPIP;
/* fix old IP header checksum */
ip_send_check(old_iph);
*dsfield = ipv4_get_dsfield(old_iph);
*ttl = old_iph->ttl;
if (payload_len)
*payload_len = ntohs(old_iph->tot_len);
}
return skb;
error:
kfree_skb(skb);
return ERR_PTR(-ENOMEM);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Gartrell | 268 | 94.70% | 2 | 66.67% |
Calvin Owens | 15 | 5.30% | 1 | 33.33% |
Total | 283 | 100.00% | 3 | 100.00% |
static inline int __tun_gso_type_mask(int encaps_af, int orig_af)
{
switch (encaps_af) {
case AF_INET:
return SKB_GSO_IPXIP4;
case AF_INET6:
return SKB_GSO_IPXIP6;
default:
return 0;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alex Gartrell | 25 | 71.43% | 1 | 50.00% |
Tom Herbert | 10 | 28.57% | 1 | 50.00% |
Total | 35 | 100.00% | 2 | 100.00% |
/*
* IP Tunneling transmitter
*
* This function encapsulates the packet in a new IP packet, its
* destination will be set to cp->daddr. Most code of this function
* is taken from ipip.c.
*
* It is used in VS/TUN cluster. The load balancer selects a real
* server from a cluster based on a scheduling algorithm,
* encapsulates the request packet and forwards it to the selected
* server. For example, all real servers are configured with
* "ifconfig tunl0 <Virtual IP Address> up". When the server receives
* the encapsulated packet, it will decapsulate the packet, processe
* the request and return the response packets directly to the client
* without passing the load balancer. This can greatly increase the
* scalability of virtual server.
*
* Used for ANY protocol
*/
int
ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
{
struct netns_ipvs *ipvs = cp->ipvs;
struct net *net = ipvs->net;
struct rtable *rt; /* Route to the other host */
__be32 saddr; /* Source for tunnel */
struct net_device *tdev; /* Device to other host */
__u8 next_protocol = 0;
__u8 dsfield = 0;
__u8 ttl = 0;
__be16 df = 0;
__be16 *dfp = NULL;
struct iphdr *iph; /* Our new IP header */
unsigned int max_headroom; /* The extra header space needed */
int ret, local;
EnterFunction(10);
rcu_read_lock();
local = __ip_vs_get_out_rt(ipvs, cp->af, skb, cp->dest, cp->daddr.ip,
IP_VS_RT_MODE_LOCAL |
IP_VS_RT_MODE_NON_LOCAL |
IP_VS_RT_MODE_CONNECT |
IP_VS_RT_MODE_TUNNEL, &saddr, ipvsh);
if (local < 0)
goto tx_error;
if (local) {
rcu_read_unlock();
return ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 1);
}
rt = skb_rtable(skb);
tdev = rt->dst.dev;
/*
* Okay, now see if we can stuff it in the buffer as-is.
*/
max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct iphdr);
/* We only care about the df field if sysctl_pmtu_disc(ipvs) is set */
dfp = sysctl_pmtu_disc(ipvs) ? &df : NULL;
skb = ip_vs_prepare_tunneled_skb(skb, cp->af, max_headroom,
&next_protocol, NULL, &dsfield,
&ttl, dfp);
if (IS_ERR(skb))
goto tx_error;
if (iptunnel_handle_offloads(skb, __tun_gso_type_mask(AF_INET, cp->af)))
goto tx_error;
skb->transport_header = skb->network_header;
skb_push(skb, sizeof(struct iphdr));
skb_reset_network_header(skb);
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
/*
* Push down and install the IPIP header.
*/
iph = ip_hdr(skb);
iph->version = 4;
iph->ihl = sizeof(struct iphdr)>>2;
iph->frag_off = df;
iph->protocol = next_protocol;
iph->tos = dsfield;
iph->daddr = cp->daddr.ip;
iph->saddr = saddr;
iph->ttl = ttl;
ip_select_ident(net, skb, NULL);
/* Another hack: avoid icmp_send in ip_fragment */
skb->ignore_df = 1;
ret = ip_vs_tunnel_xmit_prepare(skb, cp);
if (ret == NF_ACCEPT)
ip_local_out(net, skb->sk, skb);
else if (ret == NF_DROP)
kfree_skb(skb);
rcu_read_unlock();
LeaveFunction(10);
return NF_STOLEN;
tx_error:
if (!IS_ERR(skb))
kfree_skb(skb);
rcu_read_unlock();
LeaveFunction(10);
return NF_STOLEN;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Julius Volz | 240 | 50.21% | 1 | 4.00% |
Julian Anastasov | 126 | 26.36% | 9 | 36.00% |
Alex Gartrell | 70 | 14.64% | 3 | 12.00% |
Eric W. Biedermann | 17 | 3.56% | 4 | 16.00% |
Hannes Frederic Sowa | 7 | 1.46% | 1 | 4.00% |
Jesper Dangaard Brouer | 5 | 1.05% | 1 | 4.00% |
Hans Schillstrom | 5 | 1.05% | 1 | 4.00% |
Alexander Duyck | 2 | 0.42% | 1 | 4.00% |
Eric Dumazet | 2 | 0.42% | 1 | 4.00% |
Changli Gao | 2 | 0.42% | 1 | 4.00% |
Américo Wang | 1 | 0.21% | 1 | 4.00% |
Ansis Atteka | 1 | 0.21% | 1 | 4.00% |
Total | 478 | 100.00% | 25 | 100.00% |
#ifdef CONFIG_IP_VS_IPV6
int
ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
{
struct rt6_info *rt; /* Route to the other host */
struct in6_addr saddr; /* Source for tunnel */
struct net_device *tdev; /* Device to other host */
__u8 next_protocol = 0;
__u32 payload_len = 0;
__u8 dsfield = 0;
__u8 ttl = 0;
struct ipv6hdr *iph; /* Our new IP header */
unsigned int max_headroom; /* The extra header space needed */
int ret, local;
EnterFunction(10);
rcu_read_lock();
local = __ip_vs_get_out_rt_v6(cp->ipvs, cp->af, skb, cp->dest,
&cp->daddr.in6,
&saddr, ipvsh, 1,
IP_VS_RT_MODE_LOCAL |
IP_VS_RT_MODE_NON_LOCAL |
IP_VS_RT_MODE_TUNNEL);
if (local < 0)
goto tx_error;
if (local) {
rcu_read_unlock();
return ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 1);
}
rt = (struct rt6_info *) skb_dst(skb);
tdev = rt->dst.dev;
/*
* Okay, now see if we can stuff it in the buffer as-is.
*/
max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct ipv6hdr);
skb = ip_vs_prepare_tunneled_skb(skb, cp->af, max_headroom,
&next_protocol, &payload_len,
&dsfield, &ttl, NULL);
if (IS_ERR(skb))
goto tx_error;
if (iptunnel_handle_offloads(skb, __tun_gso_type_mask(AF_INET6, cp->af)))
goto tx_error;
skb->transport_header = skb->network_header;
skb_push(skb, sizeof(struct ipv6hdr));
skb_reset_network_header(skb);
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
/*
* Push down and install the IPIP header.
*/
iph = ipv6_hdr(skb);
iph->version = 6;
iph->nexthdr = next_protocol;
iph->payload_len = htons(payload_len);
memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl));
ipv6_change_dsfield(iph, 0, dsfield);
iph->daddr = cp->daddr.in6;
iph->saddr = saddr;
iph->hop_limit = ttl;
/* Another hack: avoid icmp_send in ip_fragment */
skb->ignore_df = 1;
ret = ip_vs_tunnel_xmit_prepare(skb, cp);
if (ret == NF_ACCEPT)
ip6_local_out(cp->ipvs->net, skb->sk, skb);
else if (ret == NF_DROP)
kfree_skb(skb);
rcu_read_unlock();
LeaveFunction(10);
return NF_STOLEN;
tx_error:
if (!IS_ERR(skb))
kfree_skb(skb);
rcu_read_unlock();
LeaveFunction(10);
return NF_STOLEN;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Julius Volz | 222 | 48.58% | 1 | 4.55% |
Julian Anastasov | 118 | 25.82% | 7 | 31.82% |
Alex Gartrell | 67 | 14.66% | 3 | 13.64% |
Hans Schillstrom | 19 | 4.16% | 1 | 4.55% |
Eric W. Biedermann | 14 | 3.06% | 3 | 13.64% |
Jesper Dangaard Brouer | 8 | 1.75% | 2 | 9.09% |
Harvey Harrison | 3 | 0.66% | 1 | 4.55% |
Alexander Duyck | 2 | 0.44% | 1 | 4.55% |
Alexey Dobriyan | 2 | 0.44% | 1 | 4.55% |
David S. Miller | 1 | 0.22% | 1 | 4.55% |
Américo Wang | 1 | 0.22% | 1 | 4.55% |
Total | 457 | 100.00% | 22 | 100.00% |
#endif
/*
* Direct Routing transmitter
* Used for ANY protocol
*/
int
ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
{
int local;
EnterFunction(10);
rcu_read_lock();
local = __ip_vs_get_out_rt(cp->ipvs, cp->af, skb, cp->dest, cp->daddr.ip,
IP_VS_RT_MODE_LOCAL |
IP_VS_RT_MODE_NON_LOCAL |
IP_VS_RT_MODE_KNOWN_NH, NULL, ipvsh);
if (local < 0)
goto tx_error;
if (local) {
rcu_read_unlock();
return ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 1);
}
ip_send_check(ip_hdr(skb));
/* Another hack: avoid icmp_send in ip_fragment */
skb->ignore_df = 1;
ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 0);
rcu_read_unlock();
LeaveFunction(10);
return NF_STOLEN;
tx_error:
kfree_skb(skb);
rcu_read_unlock();
LeaveFunction(10);
return NF_STOLEN;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Julius Volz | 68 | 43.59% | 1 | 5.88% |
Julian Anastasov | 56 | 35.90% | 7 | 41.18% |
Wensong Zhang | 11 | 7.05% | 1 | 5.88% |
Alex Gartrell | 6 | 3.85% | 2 | 11.76% |
Jesper Dangaard Brouer | 5 | 3.21% | 1 | 5.88% |
Eric W. Biedermann | 4 | 2.56% | 1 | 5.88% |
Changli Gao | 2 | 1.28% | 1 | 5.88% |
Hans Schillstrom | 2 | 1.28% | 1 | 5.88% |
Jan Engelhardt | 1 | 0.64% | 1 | 5.88% |
Américo Wang | 1 | 0.64% | 1 | 5.88% |
Total | 156 | 100.00% | 17 | 100.00% |
#ifdef CONFIG_IP_VS_IPV6
int
ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
{
int local;
EnterFunction(10);
rcu_read_lock();
local = __ip_vs_get_out_rt_v6(cp->ipvs, cp->af, skb, cp->dest,
&cp->daddr.in6,
NULL, ipvsh, 0,
IP_VS_RT_MODE_LOCAL |
IP_VS_RT_MODE_NON_LOCAL |
IP_VS_RT_MODE_KNOWN_NH);
if (local < 0)
goto tx_error;
if (local) {
rcu_read_unlock();
return ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 1);
}
/* Another hack: avoid icmp_send in ip_fragment */
skb->ignore_df = 1;
ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 0);
rcu_read_unlock();
LeaveFunction(10);
return NF_STOLEN;
tx_error:
kfree_skb(skb);
rcu_read_unlock();
LeaveFunction(10);
return NF_STOLEN;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Julian Anastasov | 71 | 47.02% | 7 | 41.18% |
Julius Volz | 30 | 19.87% | 1 | 5.88% |
Wensong Zhang | 26 | 17.22% | 1 | 5.88% |
Hans Schillstrom | 6 | 3.97% | 1 | 5.88% |
Alex Gartrell | 4 | 2.65% | 1 | 5.88% |
Jesper Dangaard Brouer | 4 | 2.65% | 1 | 5.88% |
Eric W. Biedermann | 4 | 2.65% | 1 | 5.88% |
Martin KaFai Lau | 2 | 1.32% | 1 | 5.88% |
David S. Miller | 2 | 1.32% | 1 | 5.88% |
Américo Wang | 1 | 0.66% | 1 | 5.88% |
Jan Engelhardt | 1 | 0.66% | 1 | 5.88% |
Total | 151 | 100.00% | 17 | 100.00% |
#endif
/*
* ICMP packet transmitter
* called by the ip_vs_in_icmp
*/
int
ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp, int offset, unsigned int hooknum,
struct ip_vs_iphdr *iph)
{
struct rtable *rt; /* Route to the other host */
int rc;
int local;
int rt_mode, was_input;
EnterFunction(10);
/* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be
forwarded directly here, because there is no need to
translate address/port back */
if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
if (cp->packet_xmit)
rc = cp->packet_xmit(skb, cp, pp, iph);
else
rc = NF_ACCEPT;
/* do not touch skb anymore */
atomic_inc(&cp->in_pkts);
goto out;
}
/*
* mangle and send the packet here (only for VS/NAT)
*/
was_input = rt_is_input_route(skb_rtable(skb));
/* LOCALNODE from FORWARD hook is not supported */
rt_mode = (hooknum != NF_INET_FORWARD) ?
IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL |
IP_VS_RT_MODE_RDR : IP_VS_RT_MODE_NON_LOCAL;
rcu_read_lock();
local = __ip_vs_get_out_rt(cp->ipvs, cp->af, skb, cp->dest, cp->daddr.ip, rt_mode,
NULL, iph);
if (local < 0)
goto tx_error;
rt = skb_rtable(skb);
/*
* Avoid duplicate tuple in reply direction for NAT traffic
* to local address when connection is sync-ed
*/
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
if (cp->flags & IP_VS_CONN_F_SYNC && local) {
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
if (ct && !nf_ct_is_untracked(ct)) {
IP_VS_DBG(10, "%s(): "
"stopping DNAT to local address %pI4\n",
__func__, &cp->daddr.ip);
goto tx_error;
}
}
#endif
/* From world but DNAT to loopback address? */
if (local && ipv4_is_loopback(cp->daddr.ip) && was_input) {
IP_VS_DBG(1, "%s(): "
"stopping DNAT to loopback %pI4\n",
__func__, &cp->daddr.ip);
goto tx_error;
}
/* copy-on-write the packet before mangling it */
if (!skb_make_writable(skb, offset))
goto tx_error;
if (skb_cow(skb, rt->dst.dev->hard_header_len))
goto tx_error;
ip_vs_nat_icmp(skb, pp, cp, 0);
/* Another hack: avoid icmp_send in ip_fragment */
skb->ignore_df = 1;
rc = ip_vs_nat_send_or_cont(NFPROTO_IPV4, skb, cp, local);
rcu_read_unlock();
goto out;
tx_error:
kfree_skb(skb);
rcu_read_unlock();
rc = NF_STOLEN;
out:
LeaveFunction(10);
return rc;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Julian Anastasov | 206 | 53.79% | 8 | 36.36% |
Julius Volz | 86 | 22.45% | 2 | 9.09% |
Wensong Zhang | 61 | 15.93% | 1 | 4.55% |
Jesper Dangaard Brouer | 7 | 1.83% | 1 | 4.55% |
Alex Gartrell | 6 | 1.57% | 2 | 9.09% |
Arnaldo Carvalho de Melo | 4 | 1.04% | 1 | 4.55% |
Eric W. Biedermann | 4 | 1.04% | 1 | 4.55% |
Hannes Eder | 2 | 0.52% | 1 | 4.55% |
Hans Schillstrom | 2 | 0.52% | 1 | 4.55% |
Simon Horman | 2 | 0.52% | 1 | 4.55% |
Jan Engelhardt | 1 | 0.26% | 1 | 4.55% |
Igor Maravić | 1 | 0.26% | 1 | 4.55% |
Américo Wang | 1 | 0.26% | 1 | 4.55% |
Total | 383 | 100.00% | 22 | 100.00% |
#ifdef CONFIG_IP_VS_IPV6
int
ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp, int offset, unsigned int hooknum,
struct ip_vs_iphdr *ipvsh)
{
struct rt6_info *rt; /* Route to the other host */
int rc;
int local;
int rt_mode;
EnterFunction(10);
/* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be
forwarded directly here, because there is no need to
translate address/port back */
if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
if (cp->packet_xmit)
rc = cp->packet_xmit(skb, cp, pp, ipvsh);
else
rc = NF_ACCEPT;
/* do not touch skb anymore */
atomic_inc(&cp->in_pkts);
goto out;
}
/*
* mangle and send the packet here (only for VS/NAT)
*/
/* LOCALNODE from FORWARD hook is not supported */
rt_mode = (hooknum != NF_INET_FORWARD) ?
IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL |
IP_VS_RT_MODE_RDR : IP_VS_RT_MODE_NON_LOCAL;
rcu_read_lock();
local = __ip_vs_get_out_rt_v6(cp->ipvs, cp->af, skb, cp->dest,
&cp->daddr.in6, NULL, ipvsh, 0, rt_mode);
if (local < 0)
goto tx_error;
rt = (struct rt6_info *) skb_dst(skb);
/*
* Avoid duplicate tuple in reply direction for NAT traffic
* to local address when connection is sync-ed
*/
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
if (cp->flags & IP_VS_CONN_F_SYNC && local) {
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
if (ct && !nf_ct_is_untracked(ct)) {
IP_VS_DBG(10, "%s(): "
"stopping DNAT to local address %pI6\n",
__func__, &cp->daddr.in6);
goto tx_error;
}
}
#endif
/* From world but DNAT to loopback address? */
if (local && skb->dev && !(skb->dev->flags & IFF_LOOPBACK) &&
ipv6_addr_type(&cp->daddr.in6) & IPV6_ADDR_LOOPBACK) {
IP_VS_DBG(1, "%s(): "
"stopping DNAT to loopback %pI6\n",
__func__, &cp->daddr.in6);
goto tx_error;
}
/* copy-on-write the packet before mangling it */
if (!skb_make_writable(skb, offset))
goto tx_error;
if (skb_cow(skb, rt->dst.dev->hard_header_len))
goto tx_error;
ip_vs_nat_icmp_v6(skb, pp, cp, 0);
/* Another hack: avoid icmp_send in ip_fragment */
skb->ignore_df = 1;
rc = ip_vs_nat_send_or_cont(NFPROTO_IPV6, skb, cp, local);
rcu_read_unlock();
goto out;
tx_error:
kfree_skb(skb);
rcu_read_unlock();
rc = NF_STOLEN;
out:
LeaveFunction(10);
return rc;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Julian Anastasov | 226 | 57.22% | 8 | 40.00% |
Wensong Zhang | 135 | 34.18% | 1 | 5.00% |
Julius Volz | 8 | 2.03% | 2 | 10.00% |
Hans Schillstrom | 6 | 1.52% | 1 | 5.00% |
Jesper Dangaard Brouer | 5 | 1.27% | 1 | 5.00% |
Alex Gartrell | 4 | 1.01% | 1 | 5.00% |
Eric W. Biedermann | 4 | 1.01% | 1 | 5.00% |
Martin KaFai Lau | 3 | 0.76% | 1 | 5.00% |
Américo Wang | 1 | 0.25% | 1 | 5.00% |
Igor Maravić | 1 | 0.25% | 1 | 5.00% |
Herbert Xu | 1 | 0.25% | 1 | 5.00% |
Jan Engelhardt | 1 | 0.25% | 1 | 5.00% |
Total | 395 | 100.00% | 20 | 100.00% |
#endif
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Julian Anastasov | 2581 | 39.32% | 16 | 19.28% |
Julius Volz | 1192 | 18.16% | 3 | 3.61% |
Alex Gartrell | 1166 | 17.76% | 11 | 13.25% |
Wensong Zhang | 626 | 9.54% | 1 | 1.20% |
Hans Schillstrom | 314 | 4.78% | 1 | 1.20% |
Dwip Banerjee | 225 | 3.43% | 1 | 1.20% |
Jesper Dangaard Brouer | 103 | 1.57% | 4 | 4.82% |
Eric W. Biedermann | 102 | 1.55% | 10 | 12.05% |
David S. Miller | 56 | 0.85% | 7 | 8.43% |
Martin KaFai Lau | 30 | 0.46% | 3 | 3.61% |
Changli Gao | 28 | 0.43% | 1 | 1.20% |
Patrick McHardy | 28 | 0.43% | 1 | 1.20% |
Calvin Owens | 15 | 0.23% | 1 | 1.20% |
Hannes Eder | 14 | 0.21% | 2 | 2.41% |
Tom Herbert | 10 | 0.15% | 1 | 1.20% |
Américo Wang | 10 | 0.15% | 1 | 1.20% |
Eric Dumazet | 9 | 0.14% | 2 | 2.41% |
Hannes Frederic Sowa | 9 | 0.14% | 2 | 2.41% |
Jan Engelhardt | 8 | 0.12% | 1 | 1.20% |
Alexey Dobriyan | 7 | 0.11% | 1 | 1.20% |
Harvey Harrison | 6 | 0.09% | 4 | 4.82% |
Arnaldo Carvalho de Melo | 6 | 0.09% | 1 | 1.20% |
Alexander Duyck | 4 | 0.06% | 1 | 1.20% |
Herbert Xu | 4 | 0.06% | 2 | 2.41% |
Igor Maravić | 4 | 0.06% | 1 | 1.20% |
Tejun Heo | 3 | 0.05% | 1 | 1.20% |
Simon Horman | 2 | 0.03% | 1 | 1.20% |
Denis V. Lunev | 1 | 0.02% | 1 | 1.20% |
Ansis Atteka | 1 | 0.02% | 1 | 1.20% |
Total | 6564 | 100.00% | 83 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.