cregit-Linux how code gets into the kernel

Release 4.14 net/ipv6/af_inet6.c

Directory: net/ipv6
/*
 *      PF_INET6 socket protocol family
 *      Linux INET6 implementation
 *
 *      Authors:
 *      Pedro Roque             <roque@di.fc.ul.pt>
 *
 *      Adapted from linux/net/ipv4/af_inet.c
 *
 *      Fixes:
 *      piggy, Karl Knutson     :       Socket protocol table
 *      Hideaki YOSHIFUJI       :       sin6_scope_id support
 *      Arnaldo Melo            :       check proc_net_create return, cleanups
 *
 *      This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 */


#define pr_fmt(fmt) "IPv6: " fmt

#include <linux/module.h>
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/stat.h>
#include <linux/init.h>
#include <linux/slab.h>

#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/icmpv6.h>
#include <linux/netfilter_ipv6.h>

#include <net/ip.h>
#include <net/ipv6.h>
#include <net/udp.h>
#include <net/udplite.h>
#include <net/tcp.h>
#include <net/ping.h>
#include <net/protocol.h>
#include <net/inet_common.h>
#include <net/route.h>
#include <net/transp_v6.h>
#include <net/ip6_route.h>
#include <net/addrconf.h>
#include <net/ndisc.h>
#ifdef CONFIG_IPV6_TUNNEL
#include <net/ip6_tunnel.h>
#endif
#include <net/calipso.h>
#include <net/seg6.h>

#include <linux/uaccess.h>
#include <linux/mroute6.h>

#include "ip6_offload.h"

MODULE_AUTHOR("Cast of dozens");
MODULE_DESCRIPTION("IPv6 protocol stack for Linux");
MODULE_LICENSE("GPL");

/* The inetsw6 table contains everything that inet6_create needs to
 * build a new socket.
 */

static struct list_head inetsw6[SOCK_MAX];
static DEFINE_SPINLOCK(inetsw6_lock);


struct ipv6_params ipv6_defaults = {
	.disable_ipv6 = 0,
	.autoconf = 1,
};


static int disable_ipv6_mod;

module_param_named(disable, disable_ipv6_mod, int, 0444);
MODULE_PARM_DESC(disable, "Disable IPv6 module such that it is non-functional");

module_param_named(disable_ipv6, ipv6_defaults.disable_ipv6, int, 0444);
MODULE_PARM_DESC(disable_ipv6, "Disable IPv6 on all interfaces");

module_param_named(autoconf, ipv6_defaults.autoconf, int, 0444);
MODULE_PARM_DESC(autoconf, "Enable IPv6 address autoconfiguration on all interfaces");


bool ipv6_mod_enabled(void) { return disable_ipv6_mod == 0; }

Contributors

PersonTokensPropCommitsCommitProp
David Ahern12100.00%1100.00%
Total12100.00%1100.00%

EXPORT_SYMBOL_GPL(ipv6_mod_enabled); static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk) { const int offset = sk->sk_prot->obj_size - sizeof(struct ipv6_pinfo); return (struct ipv6_pinfo *)(((u8 *)sk) + offset); }
static int inet6_create(struct net *net, struct socket *sock, int protocol, int kern) { struct inet_sock *inet; struct ipv6_pinfo *np; struct sock *sk; struct inet_protosw *answer; struct proto *answer_prot; unsigned char answer_flags; int try_loading_module = 0; int err; if (protocol < 0 || protocol >= IPPROTO_MAX) return -EINVAL; /* Look for the requested type/protocol pair. */ lookup_protocol: err = -ESOCKTNOSUPPORT; rcu_read_lock(); list_for_each_entry_rcu(answer, &inetsw6[sock->type], list) { err = 0; /* Check the non-wild match. */ if (protocol == answer->protocol) { if (protocol != IPPROTO_IP) break; } else { /* Check for the two wild cases. */ if (IPPROTO_IP == protocol) { protocol = answer->protocol; break; } if (IPPROTO_IP == answer->protocol) break; } err = -EPROTONOSUPPORT; } if (err) { if (try_loading_module < 2) { rcu_read_unlock(); /* * Be more specific, e.g. net-pf-10-proto-132-type-1 * (net-pf-PF_INET6-proto-IPPROTO_SCTP-type-SOCK_STREAM) */ if (++try_loading_module == 1) request_module("net-pf-%d-proto-%d-type-%d", PF_INET6, protocol, sock->type); /* * Fall back to generic, e.g. net-pf-10-proto-132 * (net-pf-PF_INET6-proto-IPPROTO_SCTP) */ else request_module("net-pf-%d-proto-%d", PF_INET6, protocol); goto lookup_protocol; } else goto out_rcu_unlock; } err = -EPERM; if (sock->type == SOCK_RAW && !kern && !ns_capable(net->user_ns, CAP_NET_RAW)) goto out_rcu_unlock; sock->ops = answer->ops; answer_prot = answer->prot; answer_flags = answer->flags; rcu_read_unlock(); WARN_ON(!answer_prot->slab); err = -ENOBUFS; sk = sk_alloc(net, PF_INET6, GFP_KERNEL, answer_prot, kern); if (!sk) goto out; sock_init_data(sock, sk); err = 0; if (INET_PROTOSW_REUSE & answer_flags) sk->sk_reuse = SK_CAN_REUSE; inet = inet_sk(sk); inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0; if (SOCK_RAW == sock->type) { inet->inet_num = protocol; if (IPPROTO_RAW == protocol) inet->hdrincl = 1; } sk->sk_destruct = inet_sock_destruct; sk->sk_family = PF_INET6; sk->sk_protocol = protocol; sk->sk_backlog_rcv = answer->prot->backlog_rcv; inet_sk(sk)->pinet6 = np = inet6_sk_generic(sk); np->hop_limit = -1; np->mcast_hops = IPV6_DEFAULT_MCASTHOPS; np->mc_loop = 1; np->pmtudisc = IPV6_PMTUDISC_WANT; np->autoflowlabel = ip6_default_np_autolabel(net); np->repflow = net->ipv6.sysctl.flowlabel_reflect; sk->sk_ipv6only = net->ipv6.sysctl.bindv6only; /* Init the ipv4 part of the socket since we can have sockets * using v6 API for ipv4. */ inet->uc_ttl = -1; inet->mc_loop = 1; inet->mc_ttl = 1; inet->mc_index = 0; inet->mc_list = NULL; inet->rcv_tos = 0; if (net->ipv4.sysctl_ip_no_pmtu_disc) inet->pmtudisc = IP_PMTUDISC_DONT; else inet->pmtudisc = IP_PMTUDISC_WANT; /* * Increment only the relevant sk_prot->socks debug field, this changes * the previous behaviour of incrementing both the equivalent to * answer->prot->socks (inet6_sock_nr) and inet_sock_nr. * * This allows better debug granularity as we'll know exactly how many * UDPv6, TCPv6, etc socks were allocated, not the sum of all IPv6 * transport protocol socks. -acme */ sk_refcnt_debug_inc(sk); if (inet->inet_num) { /* It assumes that any protocol which allows * the user to assign a number at socket * creation time automatically shares. */ inet->inet_sport = htons(inet->inet_num); err = sk->sk_prot->hash(sk); if (err) { sk_common_release(sk); goto out; } } if (sk->sk_prot->init) { err = sk->sk_prot->init(sk); if (err) { sk_common_release(sk); goto out; } } if (!kern) { err = BPF_CGROUP_RUN_PROG_INET_SOCK(sk); if (err) { sk_common_release(sk); goto out; } } out: return err; out_rcu_unlock: rcu_read_unlock(); goto out; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)19630.20%1019.61%
Arnaldo Carvalho de Melo9915.25%611.76%
Linus Torvalds8112.48%11.96%
Hideaki Yoshifuji / 吉藤英明7912.17%47.84%
David S. Miller467.09%47.84%
David Ahern284.31%11.96%
Hannes Frederic Sowa182.77%23.92%
Craig Gallek162.47%11.96%
Eric W. Biedermann142.16%35.88%
Jakub Sitnicki132.00%23.92%
Eric Paris111.69%35.88%
Tom Herbert81.23%11.96%
Paul E. McKenney71.08%11.96%
Jiri Benc60.92%11.96%
Eric Dumazet60.92%23.92%
Stephen Hemminger50.77%11.96%
Daniel Lezcano50.77%11.96%
Paul Moore40.62%23.92%
Pavel Emelyanov30.46%23.92%
Ian Morris20.31%11.96%
Al Viro10.15%11.96%
Ilpo Järvinen10.15%11.96%
Total649100.00%51100.00%

/* bind for INET6 API */
int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sockaddr_in6 *addr = (struct sockaddr_in6 *)uaddr; struct sock *sk = sock->sk; struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct net *net = sock_net(sk); __be32 v4addr = 0; unsigned short snum; int addr_type = 0; int err = 0; /* If the socket has its own bind function then use it. */ if (sk->sk_prot->bind) return sk->sk_prot->bind(sk, uaddr, addr_len); if (addr_len < SIN6_LEN_RFC2133) return -EINVAL; if (addr->sin6_family != AF_INET6) return -EAFNOSUPPORT; addr_type = ipv6_addr_type(&addr->sin6_addr); if ((addr_type & IPV6_ADDR_MULTICAST) && sock->type == SOCK_STREAM) return -EINVAL; snum = ntohs(addr->sin6_port); if (snum && snum < inet_prot_sock(net) && !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) return -EACCES; lock_sock(sk); /* Check these errors (active socket, double bind). */ if (sk->sk_state != TCP_CLOSE || inet->inet_num) { err = -EINVAL; goto out; } /* Check if the address belongs to the host. */ if (addr_type == IPV6_ADDR_MAPPED) { int chk_addr_ret; /* Binding to v4-mapped address on a v6-only socket * makes no sense */ if (sk->sk_ipv6only) { err = -EINVAL; goto out; } /* Reproduce AF_INET checks to make the bindings consistent */ v4addr = addr->sin6_addr.s6_addr32[3]; chk_addr_ret = inet_addr_type(net, v4addr); if (!net->ipv4.sysctl_ip_nonlocal_bind && !(inet->freebind || inet->transparent) && v4addr != htonl(INADDR_ANY) && chk_addr_ret != RTN_LOCAL && chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST) { err = -EADDRNOTAVAIL; goto out; } } else { if (addr_type != IPV6_ADDR_ANY) { struct net_device *dev = NULL; rcu_read_lock(); if (__ipv6_addr_needs_scope_id(addr_type)) { if (addr_len >= sizeof(struct sockaddr_in6) && addr->sin6_scope_id) { /* Override any existing binding, if another one * is supplied by user. */ sk->sk_bound_dev_if = addr->sin6_scope_id; } /* Binding to link-local address requires an interface */ if (!sk->sk_bound_dev_if) { err = -EINVAL; goto out_unlock; } dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if); if (!dev) { err = -ENODEV; goto out_unlock; } } /* ipv4 addr of the socket is invalid. Only the * unspecified and mapped address have a v4 equivalent. */ v4addr = LOOPBACK4_IPV6; if (!(addr_type & IPV6_ADDR_MULTICAST)) { if (!net->ipv6.sysctl.ip_nonlocal_bind && !(inet->freebind || inet->transparent) && !ipv6_chk_addr(net, &addr->sin6_addr, dev, 0)) { err = -EADDRNOTAVAIL; goto out_unlock; } } rcu_read_unlock(); } } inet->inet_rcv_saddr = v4addr; inet->inet_saddr = v4addr; sk->sk_v6_rcv_saddr = addr->sin6_addr; if (!(addr_type & IPV6_ADDR_MULTICAST)) np->saddr = addr->sin6_addr; /* Make sure we are allowed to bind here. */ if ((snum || !inet->bind_address_no_port) && sk->sk_prot->get_port(sk, snum)) { inet_reset_saddr(sk); err = -EADDRINUSE; goto out; } if (addr_type != IPV6_ADDR_ANY) { sk->sk_userlocks |= SOCK_BINDADDR_LOCK; if (addr_type != IPV6_ADDR_MAPPED) sk->sk_ipv6only = 1; } if (snum) sk->sk_userlocks |= SOCK_BINDPORT_LOCK; inet->inet_sport = htons(inet->inet_num); inet->inet_dport = 0; inet->inet_daddr = 0; out: release_sock(sk); return err; out_unlock: rcu_read_unlock(); goto out; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)28544.60%920.93%
Ville Nuorvala13420.97%24.65%
Vladislav Yasevich6510.17%36.98%
Eric Dumazet396.10%511.63%
David S. Miller274.23%24.65%
Arnaldo Carvalho de Melo121.88%36.98%
Marcus Meissner121.88%24.65%
Benjamin Thery101.56%12.33%
Tom Herbert91.41%12.33%
Eric W. Biedermann71.10%36.98%
Bruno Prémont71.10%12.33%
Maciej Żenczykowski60.94%12.33%
Balazs Scheidler50.78%12.33%
Vincent Bernat40.63%12.33%
Krister Johansen40.63%12.33%
Hannes Frederic Sowa30.47%12.33%
Hideaki Yoshifuji / 吉藤英明30.47%12.33%
Alexey Dobriyan20.31%12.33%
Al Viro20.31%24.65%
Daniel Lezcano20.31%12.33%
Uwe Kleine-König10.16%12.33%
Total639100.00%43100.00%

EXPORT_SYMBOL(inet6_bind);
int inet6_release(struct socket *sock) { struct sock *sk = sock->sk; if (!sk) return -EINVAL; /* Free mc lists */ ipv6_sock_mc_close(sk); /* Free ac lists */ ipv6_sock_ac_close(sk); return inet_release(sock); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)3984.78%250.00%
David L Stevens613.04%125.00%
Ian Morris12.17%125.00%
Total46100.00%4100.00%

EXPORT_SYMBOL(inet6_release);
void inet6_destroy_sock(struct sock *sk) { struct ipv6_pinfo *np = inet6_sk(sk); struct sk_buff *skb; struct ipv6_txoptions *opt; /* Release rx options */ skb = xchg(&np->pktoptions, NULL); if (skb) kfree_skb(skb); skb = xchg(&np->rxpmtu, NULL); if (skb) kfree_skb(skb); /* Free flowlabels */ fl6_free_socklist(sk); /* Free tx options */ opt = xchg((__force struct ipv6_txoptions **)&np->opt, NULL); if (opt) { atomic_sub(opt->tot_len, &sk->sk_omem_alloc); txopt_put(opt); } }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)6049.18%228.57%
Eric Dumazet2016.39%114.29%
Brian Haley1814.75%228.57%
Eldad Zack129.84%114.29%
David S. Miller129.84%114.29%
Total122100.00%7100.00%

EXPORT_SYMBOL_GPL(inet6_destroy_sock); /* * This does both peername and sockname. */
int inet6_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { struct sockaddr_in6 *sin = (struct sockaddr_in6 *)uaddr; struct sock *sk = sock->sk; struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); sin->sin6_family = AF_INET6; sin->sin6_flowinfo = 0; sin->sin6_scope_id = 0; if (peer) { if (!inet->inet_dport) return -ENOTCONN; if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) && peer == 1) return -ENOTCONN; sin->sin6_port = inet->inet_dport; sin->sin6_addr = sk->sk_v6_daddr; if (np->sndflow) sin->sin6_flowinfo = np->flow_label; } else { if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) sin->sin6_addr = np->saddr; else sin->sin6_addr = sk->sk_v6_rcv_saddr; sin->sin6_port = inet->inet_sport; } sin->sin6_scope_id = ipv6_iface_scope_id(&sin->sin6_addr, sk->sk_bound_dev_if); *uaddr_len = sizeof(*sin); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)17278.18%640.00%
David S. Miller2511.36%213.33%
Eric Dumazet94.09%213.33%
Hannes Frederic Sowa73.18%16.67%
Arnaldo Carvalho de Melo31.36%213.33%
Alexey Dobriyan31.36%16.67%
Hideaki Yoshifuji / 吉藤英明10.45%16.67%
Total220100.00%15100.00%

EXPORT_SYMBOL(inet6_getname);
int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; struct net *net = sock_net(sk); switch (cmd) { case SIOCGSTAMP: return sock_get_timestamp(sk, (struct timeval __user *)arg); case SIOCGSTAMPNS: return sock_get_timestampns(sk, (struct timespec __user *)arg); case SIOCADDRT: case SIOCDELRT: return ipv6_route_ioctl(net, cmd, (void __user *)arg); case SIOCSIFADDR: return addrconf_add_ifaddr(net, (void __user *) arg); case SIOCDIFADDR: return addrconf_del_ifaddr(net, (void __user *) arg); case SIOCSIFDSTADDR: return addrconf_set_dstaddr(net, (void __user *) arg); default: if (!sk->sk_prot->ioctl) return -ENOIOCTLCMD; return sk->sk_prot->ioctl(sk, cmd, arg); } /*NOTREACHED*/ return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)12469.66%325.00%
Eric Dumazet179.55%18.33%
Daniel Lezcano158.43%216.67%
Christoph Hellwig63.37%18.33%
Andi Kleen52.81%18.33%
Al Viro42.25%18.33%
Hideaki Yoshifuji / 吉藤英明31.69%18.33%
Arnaldo Carvalho de Melo31.69%18.33%
Linus Torvalds10.56%18.33%
Total178100.00%12100.00%

EXPORT_SYMBOL(inet6_ioctl); const struct proto_ops inet6_stream_ops = { .family = PF_INET6, .owner = THIS_MODULE, .release = inet6_release, .bind = inet6_bind, .connect = inet_stream_connect, /* ok */ .socketpair = sock_no_socketpair, /* a do nothing */ .accept = inet_accept, /* ok */ .getname = inet6_getname, .poll = tcp_poll, /* ok */ .ioctl = inet6_ioctl, /* must change */ .listen = inet_listen, /* ok */ .shutdown = inet_shutdown, /* ok */ .setsockopt = sock_common_setsockopt, /* ok */ .getsockopt = sock_common_getsockopt, /* ok */ .sendmsg = inet_sendmsg, /* ok */ .recvmsg = inet_recvmsg, /* ok */ .mmap = sock_no_mmap, .sendpage = inet_sendpage, .sendmsg_locked = tcp_sendmsg_locked, .sendpage_locked = tcp_sendpage_locked, .splice_read = tcp_splice_read, .read_sock = tcp_read_sock, .peek_len = tcp_peek_len, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_sock_common_setsockopt, .compat_getsockopt = compat_sock_common_getsockopt, #endif }; const struct proto_ops inet6_dgram_ops = { .family = PF_INET6, .owner = THIS_MODULE, .release = inet6_release, .bind = inet6_bind, .connect = inet_dgram_connect, /* ok */ .socketpair = sock_no_socketpair, /* a do nothing */ .accept = sock_no_accept, /* a do nothing */ .getname = inet6_getname, .poll = udp_poll, /* ok */ .ioctl = inet6_ioctl, /* must change */ .listen = sock_no_listen, /* ok */ .shutdown = inet_shutdown, /* ok */ .setsockopt = sock_common_setsockopt, /* ok */ .getsockopt = sock_common_getsockopt, /* ok */ .sendmsg = inet_sendmsg, /* ok */ .recvmsg = inet_recvmsg, /* ok */ .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, .set_peek_off = sk_set_peek_off, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_sock_common_setsockopt, .compat_getsockopt = compat_sock_common_getsockopt, #endif }; static const struct net_proto_family inet6_family_ops = { .family = PF_INET6, .create = inet6_create, .owner = THIS_MODULE, };
int inet6_register_protosw(struct inet_protosw *p) { struct list_head *lh; struct inet_protosw *answer; struct list_head *last_perm; int protocol = p->protocol; int ret; spin_lock_bh(&inetsw6_lock); ret = -EINVAL; if (p->type >= SOCK_MAX) goto out_illegal; /* If we are trying to override a permanent protocol, bail. */ answer = NULL; ret = -EPERM; last_perm = &inetsw6[p->type]; list_for_each(lh, &inetsw6[p->type]) { answer = list_entry(lh, struct inet_protosw, list); /* Check only the non-wild match. */ if (INET_PROTOSW_PERMANENT & answer->flags) { if (protocol == answer->protocol) break; last_perm = lh; } answer = NULL; } if (answer) goto out_permanent; /* Add the new entry after the last permanent entry if any, so that * the new entry does not override a permanent entry when matched with * a wild-card protocol. But it is allowed to override any existing * non-permanent entry. This means that when we remove this entry, the * system automatically returns to the old behavior. */ list_add_rcu(&p->list, last_perm); ret = 0; out: spin_unlock_bh(&inetsw6_lock); return ret; out_permanent: pr_err("Attempt to override permanent protocol %d\n", protocol); goto out; out_illegal: pr_err("Ignoring attempt to register invalid socket type %d\n", p->type); goto out; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds12065.22%114.29%
Sridhar Samudrala2614.13%114.29%
Daniel Lezcano2513.59%114.29%
Stephen Hemminger73.80%114.29%
Joe Perches42.17%114.29%
Patrick McHardy10.54%114.29%
Hideaki Yoshifuji / 吉藤英明10.54%114.29%
Total184100.00%7100.00%

EXPORT_SYMBOL(inet6_register_protosw);
void inet6_unregister_protosw(struct inet_protosw *p) { if (INET_PROTOSW_PERMANENT & p->flags) { pr_err("Attempt to unregister permanent protocol %d\n", p->protocol); } else { spin_lock_bh(&inetsw6_lock); list_del_rcu(&p->list); spin_unlock_bh(&inetsw6_lock); synchronize_net(); } }

Contributors

PersonTokensPropCommitsCommitProp
Stephen Hemminger3970.91%250.00%
Linus Torvalds1425.45%125.00%
Joe Perches23.64%125.00%
Total55100.00%4100.00%

EXPORT_SYMBOL(inet6_unregister_protosw);
int inet6_sk_rebuild_header(struct sock *sk) { struct ipv6_pinfo *np = inet6_sk(sk); struct dst_entry *dst; dst = __sk_dst_check(sk, np->dst_cookie); if (!dst) { struct inet_sock *inet = inet_sk(sk); struct in6_addr *final_p, final; struct flowi6 fl6; memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = sk->sk_protocol; fl6.daddr = sk->sk_v6_daddr; fl6.saddr = np->saddr; fl6.flowlabel = np->flow_label; fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.flowi6_mark = sk->sk_mark; fl6.fl6_dport = inet->inet_dport; fl6.fl6_sport = inet->inet_sport; fl6.flowi6_uid = sk->sk_uid; security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); rcu_read_lock(); final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final); rcu_read_unlock(); dst = ip6_dst_lookup_flow(sk, &fl6, final_p); if (IS_ERR(dst)) { sk->sk_route_caps = 0; sk->sk_err_soft = -PTR_ERR(dst); return PTR_ERR(dst); } ip6_dst_store(sk, dst, NULL, NULL); } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Arnaldo Carvalho de Melo14761.00%16.67%
David S. Miller4819.92%320.00%
Eric Dumazet145.81%426.67%
Lorenzo Colitti83.32%16.67%
Arnaud Ebalard72.90%16.67%
Venkat Yekkirala72.90%16.67%
Brian Haley52.07%16.67%
Hideaki Yoshifuji / 吉藤英明20.83%16.67%
Alexey Dobriyan20.83%16.67%
Ian Morris10.41%16.67%
Total241100.00%15100.00%

EXPORT_SYMBOL_GPL(inet6_sk_rebuild_header);
bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb, const struct inet6_skb_parm *opt) { const struct ipv6_pinfo *np = inet6_sk(sk); if (np->rxopt.all) { if (((opt->flags & IP6SKB_HOPBYHOP) && (np->rxopt.bits.hopopts || np->rxopt.bits.ohopopts)) || (ip6_flowinfo((struct ipv6hdr *) skb_network_header(skb)) && np->rxopt.bits.rxflow) || (opt->srcrt && (np->rxopt.bits.srcrt || np->rxopt.bits.osrcrt)) || ((opt->dst1 || opt->dst0) && (np->rxopt.bits.dstopts || np->rxopt.bits.odstopts))) return true; } return false; }

Contributors

PersonTokensPropCommitsCommitProp
Arnaldo Carvalho de Melo13585.99%233.33%
Eric Dumazet127.64%233.33%
Florian Westphal53.18%116.67%
Florent Fourcot53.18%116.67%
Total157100.00%6100.00%

EXPORT_SYMBOL_GPL(ipv6_opt_accepted); static struct packet_type ipv6_packet_type __read_mostly = { .type = cpu_to_be16(ETH_P_IPV6), .func = ipv6_rcv, };
static int __init ipv6_packet_init(void) { dev_add_pack(&ipv6_packet_type); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明1161.11%150.00%
Vlad Yasevich738.89%150.00%
Total18100.00%2100.00%


static void ipv6_packet_cleanup(void) { dev_remove_pack(&ipv6_packet_type); }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明1071.43%150.00%
Vlad Yasevich428.57%150.00%
Total14100.00%2100.00%


static int __net_init ipv6_init_mibs(struct net *net) { int i; net->mib.udp_stats_in6 = alloc_percpu(struct udp_mib); if (!net->mib.udp_stats_in6) return -ENOMEM; net->mib.udplite_stats_in6 = alloc_percpu(struct udp_mib); if (!net->mib.udplite_stats_in6) goto err_udplite_mib; net->mib.ipv6_statistics = alloc_percpu(struct ipstats_mib); if (!net->mib.ipv6_statistics) goto err_ip_mib; for_each_possible_cpu(i) { struct ipstats_mib *af_inet6_stats; af_inet6_stats = per_cpu_ptr(net->mib.ipv6_statistics, i); u64_stats_init(&af_inet6_stats->syncp); } net->mib.icmpv6_statistics = alloc_percpu(struct icmpv6_mib); if (!net->mib.icmpv6_statistics) goto err_icmp_mib; net->mib.icmpv6msg_statistics = kzalloc(sizeof(struct icmpv6msg_mib), GFP_KERNEL); if (!net->mib.icmpv6msg_statistics) goto err_icmpmsg_mib; return 0; err_icmpmsg_mib: free_percpu(net->mib.icmpv6_statistics); err_icmp_mib: free_percpu(net->mib.ipv6_statistics); err_ip_mib: free_percpu(net->mib.udplite_stats_in6); err_udplite_mib: free_percpu(net->mib.udp_stats_in6); return -ENOMEM; }

Contributors

PersonTokensPropCommitsCommitProp
Denis V. Lunev6629.46%111.11%
Américo Wang4419.64%111.11%
John Stultz4118.30%111.11%
Eric Dumazet2611.61%222.22%
Ravikiran G. Thirumalai177.59%111.11%
Hideaki Yoshifuji / 吉藤英明146.25%111.11%
David L Stevens146.25%111.11%
Pavel Emelyanov20.89%111.11%
Total224100.00%9100.00%


static void ipv6_cleanup_mibs(struct net *net) { free_percpu(net->mib.udp_stats_in6); free_percpu(net->mib.udplite_stats_in6); free_percpu(net->mib.ipv6_statistics); free_percpu(net->mib.icmpv6_statistics); kfree(net->mib.icmpv6msg_statistics); }

Contributors

PersonTokensPropCommitsCommitProp
Denis V. Lunev5191.07%466.67%
Américo Wang47.14%116.67%
Eric Dumazet11.79%116.67%
Total56100.00%6100.00%


static int __net_init inet6_net_init(struct net *net) { int err = 0; net->ipv6.sysctl.bindv6only = 0; net->ipv6.sysctl.icmpv6_time = 1*HZ; net->ipv6.sysctl.flowlabel_consistency = 1; net->ipv6.sysctl.auto_flowlabels = IP6_DEFAULT_AUTO_FLOW_LABELS; net->ipv6.sysctl.idgen_retries = 3; net->ipv6.sysctl.idgen_delay = 1 * HZ; net->ipv6.sysctl.flowlabel_state_ranges = 0; atomic_set(&net->ipv6.fib6_sernum, 1); err = ipv6_init_mibs(net); if (err) return err; #ifdef CONFIG_PROC_FS err = udp6_proc_init(net); if (err) goto out; err = tcp6_proc_init(net); if (err) goto proc_tcp6_fail; err = ac6_proc_init(net); if (err) goto proc_ac6_fail; #endif return err; #ifdef CONFIG_PROC_FS proc_ac6_fail: tcp6_proc_exit(net); proc_tcp6_fail: udp6_proc_exit(net); out: ipv6_cleanup_mibs(net); return err; #endif }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Lezcano10855.10%741.18%
Hannes Frederic Sowa2412.24%211.76%
Denis V. Lunev2311.73%15.88%
Tom Herbert2010.20%423.53%
Fan Du105.10%15.88%
Florent Fourcot105.10%15.88%
Alexey Dobriyan10.51%15.88%
Total196100.00%17100.00%


static void __net_exit inet6_net_exit(struct net *net) { #ifdef CONFIG_PROC_FS udp6_proc_exit(net); tcp6_proc_exit(net); ac6_proc_exit(net); #endif ipv6_cleanup_mibs(net); }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Lezcano3183.78%466.67%
Denis V. Lunev513.51%116.67%
Alexey Dobriyan12.70%116.67%
Total37100.00%6100.00%

static struct pernet_operations inet6_net_ops = { .init = inet6_net_init, .exit = inet6_net_exit, }; static const struct ipv6_stub ipv6_stub_impl = { .ipv6_sock_mc_join = ipv6_sock_mc_join, .ipv6_sock_mc_drop = ipv6_sock_mc_drop, .ipv6_dst_lookup = ip6_dst_lookup, .udpv6_encap_enable = udpv6_encap_enable, .ndisc_send_na = ndisc_send_na, .nd_tbl = &nd_tbl, };
static int __init inet6_init(void) { struct list_head *r; int err = 0; sock_skb_cb_check_size(sizeof(struct inet6_skb_parm)); /* Register the socket-side information for inet6_create. */ for (r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r) INIT_LIST_HEAD(r); if (disable_ipv6_mod) { pr_info("Loaded, but administratively disabled, reboot required to enable\n"); goto out; } err = proto_register(&tcpv6_prot, 1); if (err) goto out; err = proto_register(&udpv6_prot, 1); if (err) goto out_unregister_tcp_proto; err = proto_register(&udplitev6_prot, 1); if (err) goto out_unregister_udp_proto; err = proto_register(&rawv6_prot, 1); if (err) goto out_unregister_udplite_proto; err = proto_register(&pingv6_prot, 1); if (err) goto out_unregister_ping_proto; /* We MUST register RAW sockets before we create the ICMP6, * IGMP6, or NDISC control sockets. */ err = rawv6_init(); if (err) goto out_unregister_raw_proto; /* Register the family here so that the init calls below will * be able to create sockets. (?? is this dangerous ??) */ err = sock_register(&inet6_family_ops); if (err) goto out_sock_register_fail; /* * ipngwg API draft makes clear that the correct semantics * for TCP and UDP is to consider one TCP and UDP instance * in a host available by both INET and INET6 APIs and * able to communicate via both network protocols. */ err = register_pernet_subsys(&inet6_net_ops); if (err) goto register_pernet_fail; err = ip6_mr_init(); if (err) goto ipmr_fail; err = icmpv6_init(); if (err) goto icmp_fail; err = ndisc_init(); if (err) goto ndisc_fail; err = igmp6_init(); if (err) goto igmp_fail; err = ipv6_netfilter_init(); if (err) goto netfilter_fail; /* Create /proc/foo6 entries. */ #ifdef CONFIG_PROC_FS err = -ENOMEM; if (raw6_proc_init()) goto proc_raw6_fail; if (udplite6_proc_init()) goto proc_udplite6_fail; if (ipv6_misc_proc_init()) goto proc_misc6_fail; if (if6_proc_init()) goto proc_if6_fail; #endif err = ip6_route_init(); if (err) goto ip6_route_fail; err = ndisc_late_init(); if (err) goto ndisc_late_fail; err = ip6_flowlabel_init(); if (err) goto ip6_flowlabel_fail; err = addrconf_init(); if (err) goto addrconf_fail; /* Init v6 extension headers. */ err = ipv6_exthdrs_init(); if (err) goto ipv6_exthdrs_fail; err = ipv6_frag_init(); if (err) goto ipv6_frag_fail; /* Init v6 transport protocols. */ err = udpv6_init(); if (err) goto udpv6_fail; err = udplitev6_init(); if (err) goto udplitev6_fail; err = udpv6_offload_init(); if (err) goto udpv6_offload_fail; err = tcpv6_init(); if (err) goto tcpv6_fail; err = ipv6_packet_init(); if (err) goto ipv6_packet_fail; err = pingv6_init(); if (err) goto pingv6_fail; err = calipso_init(); if (err) goto calipso_fail; err = seg6_init(); if (err) goto seg6_fail; err = igmp6_late_init(); if (err) goto igmp6_late_err; #ifdef CONFIG_SYSCTL err = ipv6_sysctl_register(); if (err) goto sysctl_fail; #endif /* ensure that ipv6 stubs are visible only after ipv6 is ready */ wmb(); ipv6_stub = &ipv6_stub_impl; out: return err; #ifdef CONFIG_SYSCTL sysctl_fail: igmp6_late_cleanup(); #endif igmp6_late_err: seg6_exit(); seg6_fail: calipso_exit(); calipso_fail: pingv6_exit(); pingv6_fail: ipv6_packet_cleanup(); ipv6_packet_fail: tcpv6_exit(); tcpv6_fail: udpv6_offload_exit(); udpv6_offload_fail: udplitev6_exit(); udplitev6_fail: udpv6_exit(); udpv6_fail: ipv6_frag_exit(); ipv6_frag_fail: ipv6_exthdrs_exit(); ipv6_exthdrs_fail: addrconf_cleanup(); addrconf_fail: ip6_flowlabel_cleanup(); ip6_flowlabel_fail: ndisc_late_cleanup(); ndisc_late_fail: ip6_route_cleanup(); ip6_route_fail: #ifdef CONFIG_PROC_FS if6_proc_exit(); proc_if6_fail: ipv6_misc_proc_exit(); proc_misc6_fail: udplite6_proc_exit(); proc_udplite6_fail: raw6_proc_exit(); proc_raw6_fail: #endif ipv6_netfilter_fini(); netfilter_fail: igmp6_cleanup(); igmp_fail: ndisc_cleanup(); ndisc_fail: ip6_mr_cleanup(); icmp_fail: unregister_pernet_subsys(&inet6_net_ops); ipmr_fail: icmpv6_cleanup(); register_pernet_fail: sock_unregister(PF_INET6); rtnl_unregister_all(PF_INET6); out_sock_register_fail: rawv6_exit(); out_unregister_ping_proto: proto_unregister(&pingv6_prot); out_unregister_raw_proto: proto_unregister(&rawv6_prot); out_unregister_udplite_proto: proto_unregister(&udplitev6_prot); out_unregister_udp_proto: proto_unregister(&udpv6_prot); out_unregister_tcp_proto: proto_unregister(&tcpv6_prot); goto out; }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Lezcano15121.18%612.00%
Linus Torvalds (pre-git)11015.43%816.00%
Arnaldo Carvalho de Melo659.12%24.00%
Gerrit Renker425.89%12.00%
Brian Haley415.75%24.00%
Lorenzo Colitti415.75%12.00%
David S. Miller365.05%36.00%
Benjamin Thery263.65%12.00%
Herbert Xu253.51%24.00%
Vlad Yasevich192.66%24.00%
Hideaki Yoshifuji / 吉藤英明172.38%510.00%
Harald Welte172.38%12.00%
David Lebrun172.38%12.00%
Tom Herbert172.38%12.00%
Huw Davies172.38%12.00%
Michal Kubeček172.38%12.00%
Wang Chen111.54%12.00%
Américo Wang91.26%12.00%
Paolo Abeni91.26%12.00%
Linus Torvalds60.84%12.00%
Randy Dunlap50.70%12.00%
Mitsuru Kanda50.70%12.00%
Denis V. Lunev30.42%12.00%
David L Stevens20.28%12.00%
Joe Perches20.28%12.00%
Eyal Birger10.14%12.00%
Ravikiran G. Thirumalai10.14%12.00%
Lucas De Marchi10.14%12.00%
Total713100.00%50100.00%

module_init(inet6_init); MODULE_ALIAS_NETPROTO(PF_INET6);

Overall Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)122327.24%2912.78%
Arnaldo Carvalho de Melo54612.16%156.61%
Daniel Lezcano3547.88%167.05%
Linus Torvalds2365.26%41.76%
David S. Miller2275.06%104.41%
Hideaki Yoshifuji / 吉藤英明1804.01%156.61%
Denis V. Lunev1483.30%52.20%
Eric Dumazet1463.25%135.73%
Brian Haley1423.16%52.20%
Ville Nuorvala1403.12%31.32%
Américo Wang992.20%52.20%
Rusty Russell771.71%20.88%
Tom Herbert671.49%73.08%
Vladislav Yasevich651.45%31.32%
Stephen Hemminger571.27%52.20%
Hannes Frederic Sowa521.16%52.20%
Lorenzo Colitti521.16%20.88%
Gerrit Renker451.00%10.44%
David Ahern451.00%20.88%
Vlad Yasevich430.96%31.32%
John Stultz410.91%10.44%
Benjamin Thery360.80%20.88%
Sridhar Samudrala260.58%10.44%
Herbert Xu250.56%20.88%
David L Stevens220.49%20.88%
Eric W. Biedermann210.47%62.64%
Huw Davies200.45%10.44%
David Lebrun200.45%10.44%
Harald Welte200.45%10.44%
Ravikiran G. Thirumalai180.40%10.44%
Michal Kubeček170.38%10.44%
Craig Gallek160.36%10.44%
Joe Perches150.33%10.44%
Florent Fourcot150.33%20.88%
Jakub Sitnicki130.29%20.88%
Marcus Meissner120.27%20.88%
Eldad Zack120.27%10.44%
Eric Paris110.24%31.32%
Wang Chen110.24%10.44%
John Fastabend100.22%10.44%
Fan Du100.22%10.44%
Paolo Abeni90.20%10.44%
Alexey Dobriyan90.20%31.32%
Christoph Hellwig80.18%20.88%
Randy Dunlap80.18%20.88%
Paul E. McKenney70.16%10.44%
Al Viro70.16%31.32%
Arnaud Ebalard70.16%10.44%
Bruno Prémont70.16%10.44%
Venkat Yekkirala70.16%10.44%
Jiri Benc60.13%10.44%
Maciej Żenczykowski60.13%10.44%
Pavel Emelyanov60.13%41.76%
Jens Axboe50.11%10.44%
Ian Morris50.11%20.88%
Andi Kleen50.11%10.44%
Mitsuru Kanda50.11%10.44%
samanthakumar50.11%10.44%
Florian Westphal50.11%10.44%
Balazs Scheidler50.11%10.44%
Thomas Gleixner40.09%10.44%
Krister Johansen40.09%10.44%
Paul Moore40.09%20.88%
Vincent Bernat40.09%10.44%
Changli Gao40.09%10.44%
KOVACS Krisztian30.07%10.44%
Tejun Heo30.07%10.44%
Andrew Morton20.04%10.44%
Ilpo Järvinen10.02%10.44%
Lucas De Marchi10.02%10.44%
Uwe Kleine-König10.02%10.44%
Eyal Birger10.02%10.44%
Patrick McHardy10.02%10.44%
Total4490100.00%227100.00%
Directory: net/ipv6
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.