Release 4.11 net/ipv6/output_core.c
/*
* IPv6 library code, needed by static components when full IPv6 support is
* not configured or static. These functions are needed by GSO/GRO implementation.
*/
#include <linux/export.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/ip6_fib.h>
#include <net/addrconf.h>
#include <net/secure_seq.h>
#include <linux/netfilter.h>
static u32 __ipv6_select_ident(struct net *net, u32 hashrnd,
const struct in6_addr *dst,
const struct in6_addr *src)
{
u32 hash, id;
hash = __ipv6_addr_jhash(dst, hashrnd);
hash = __ipv6_addr_jhash(src, hash);
hash ^= net_hash_mix(net);
/* Treat id of 0 as unset and if we get 0 back from ip_idents_reserve,
* set the hight order instead thus minimizing possible future
* collisions.
*/
id = ip_idents_reserve(hash, 1);
if (unlikely(!id))
id = 1 << 31;
return id;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Vlad Yasevich | 69 | 83.13% | 2 | 50.00% |
Hannes Frederic Sowa | 12 | 14.46% | 1 | 25.00% |
Martin KaFai Lau | 2 | 2.41% | 1 | 25.00% |
Total | 83 | 100.00% | 4 | 100.00% |
/* This function exists only for tap drivers that must support broken
* clients requesting UFO without specifying an IPv6 fragment ID.
*
* This is similar to ipv6_select_ident() but we use an independent hash
* seed to limit information leakage.
*
* The network header must be set before calling this.
*/
void ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb)
{
static u32 ip6_proxy_idents_hashrnd __read_mostly;
struct in6_addr buf[2];
struct in6_addr *addrs;
u32 id;
addrs = skb_header_pointer(skb,
skb_network_offset(skb) +
offsetof(struct ipv6hdr, saddr),
sizeof(buf), buf);
if (!addrs)
return;
net_get_random_once(&ip6_proxy_idents_hashrnd,
sizeof(ip6_proxy_idents_hashrnd));
id = __ipv6_select_ident(net, ip6_proxy_idents_hashrnd,
&addrs[1], &addrs[0]);
skb_shinfo(skb)->ip6_frag_id = htonl(id);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ben Hutchings | 89 | 79.46% | 1 | 25.00% |
Vlad Yasevich | 16 | 14.29% | 2 | 50.00% |
Hannes Frederic Sowa | 7 | 6.25% | 1 | 25.00% |
Total | 112 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
__be32 ipv6_select_ident(struct net *net,
const struct in6_addr *daddr,
const struct in6_addr *saddr)
{
static u32 ip6_idents_hashrnd __read_mostly;
u32 id;
net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
id = __ipv6_select_ident(net, ip6_idents_hashrnd, daddr, saddr);
return htonl(id);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Vlad Yasevich | 29 | 48.33% | 1 | 16.67% |
Martin KaFai Lau | 13 | 21.67% | 2 | 33.33% |
Ben Hutchings | 10 | 16.67% | 1 | 16.67% |
Hannes Frederic Sowa | 7 | 11.67% | 1 | 16.67% |
Eric Dumazet | 1 | 1.67% | 1 | 16.67% |
Total | 60 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL(ipv6_select_ident);
int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
{
u16 offset = sizeof(struct ipv6hdr);
struct ipv6_opt_hdr *exthdr =
(struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
unsigned int packet_len = skb_tail_pointer(skb) -
skb_network_header(skb);
int found_rhdr = 0;
*nexthdr = &ipv6_hdr(skb)->nexthdr;
while (offset + 1 <= packet_len) {
switch (**nexthdr) {
case NEXTHDR_HOP:
break;
case NEXTHDR_ROUTING:
found_rhdr = 1;
break;
case NEXTHDR_DEST:
#if IS_ENABLED(CONFIG_IPV6_MIP6)
if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0)
break;
#endif
if (found_rhdr)
return offset;
break;
default:
return offset;
}
offset += ipv6_optlen(exthdr);
*nexthdr = &exthdr->nexthdr;
exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
offset);
}
return offset;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Vlad Yasevich | 167 | 95.98% | 1 | 33.33% |
Simon Horman | 6 | 3.45% | 1 | 33.33% |
Ian Morris | 1 | 0.57% | 1 | 33.33% |
Total | 174 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(ip6_find_1stfragopt);
#if IS_ENABLED(CONFIG_IPV6)
int ip6_dst_hoplimit(struct dst_entry *dst)
{
int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT);
if (hoplimit == 0) {
struct net_device *dev = dst->dev;
struct inet6_dev *idev;
rcu_read_lock();
idev = __in6_dev_get(dev);
if (idev)
hoplimit = idev->cnf.hop_limit;
else
hoplimit = dev_net(dev)->ipv6.devconf_all->hop_limit;
rcu_read_unlock();
}
return hoplimit;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Américo Wang | 84 | 100.00% | 1 | 100.00% |
Total | 84 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(ip6_dst_hoplimit);
#endif
int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
{
int len;
len = skb->len - sizeof(struct ipv6hdr);
if (len > IPV6_MAXPLEN)
len = 0;
ipv6_hdr(skb)->payload_len = htons(len);
IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
/* if egress device is enslaved to an L3 master device pass the
* skb to its handler for processing
*/
skb = l3mdev_ip6_out(sk, skb);
if (unlikely(!skb))
return 0;
skb->protocol = htons(ETH_P_IPV6);
return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
net, sk, skb, NULL, skb_dst(skb)->dev,
dst_output);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Américo Wang | 65 | 51.18% | 1 | 10.00% |
David Ahern | 21 | 16.54% | 1 | 10.00% |
huizhang | 15 | 11.81% | 1 | 10.00% |
Eli Cooper | 9 | 7.09% | 1 | 10.00% |
Eric W. Biedermann | 9 | 7.09% | 4 | 40.00% |
David S. Miller | 8 | 6.30% | 2 | 20.00% |
Total | 127 | 100.00% | 10 | 100.00% |
EXPORT_SYMBOL_GPL(__ip6_local_out);
int ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
{
int err;
err = __ip6_local_out(net, sk, skb);
if (likely(err == 1))
err = dst_output(net, sk, skb);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Américo Wang | 36 | 63.16% | 1 | 12.50% |
Eric W. Biedermann | 12 | 21.05% | 6 | 75.00% |
David S. Miller | 9 | 15.79% | 1 | 12.50% |
Total | 57 | 100.00% | 8 | 100.00% |
EXPORT_SYMBOL_GPL(ip6_local_out);
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Vlad Yasevich | 302 | 39.84% | 4 | 14.81% |
Américo Wang | 211 | 27.84% | 2 | 7.41% |
Ben Hutchings | 107 | 14.12% | 1 | 3.70% |
Hannes Frederic Sowa | 29 | 3.83% | 2 | 7.41% |
Eric W. Biedermann | 21 | 2.77% | 7 | 25.93% |
David Ahern | 21 | 2.77% | 1 | 3.70% |
David S. Miller | 17 | 2.24% | 2 | 7.41% |
Martin KaFai Lau | 15 | 1.98% | 2 | 7.41% |
huizhang | 15 | 1.98% | 1 | 3.70% |
Eli Cooper | 9 | 1.19% | 1 | 3.70% |
Simon Horman | 6 | 0.79% | 1 | 3.70% |
Pablo Neira Ayuso | 3 | 0.40% | 1 | 3.70% |
Ian Morris | 1 | 0.13% | 1 | 3.70% |
Eric Dumazet | 1 | 0.13% | 1 | 3.70% |
Total | 758 | 100.00% | 27 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.