Release 4.14 net/ipv6/tcp_ipv6.c
/*
* TCP over IPv6
* Linux INET6 implementation
*
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
* Based on:
* linux/net/ipv4/tcp.c
* linux/net/ipv4/tcp_input.c
* linux/net/ipv4/tcp_output.c
*
* Fixes:
* Hideaki YOSHIFUJI : sin6_scope_id support
* YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
* Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
* a single port at the same time.
* YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/bottom_half.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/jiffies.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/netdevice.h>
#include <linux/init.h>
#include <linux/jhash.h>
#include <linux/ipsec.h>
#include <linux/times.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/ipv6.h>
#include <linux/icmpv6.h>
#include <linux/random.h>
#include <net/tcp.h>
#include <net/ndisc.h>
#include <net/inet6_hashtables.h>
#include <net/inet6_connection_sock.h>
#include <net/ipv6.h>
#include <net/transp_v6.h>
#include <net/addrconf.h>
#include <net/ip6_route.h>
#include <net/ip6_checksum.h>
#include <net/inet_ecn.h>
#include <net/protocol.h>
#include <net/xfrm.h>
#include <net/snmp.h>
#include <net/dsfield.h>
#include <net/timewait_sock.h>
#include <net/inet_common.h>
#include <net/secure_seq.h>
#include <net/busy_poll.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <crypto/hash.h>
#include <linux/scatterlist.h>
static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
struct request_sock *req);
static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
static const struct inet_connection_sock_af_ops ipv6_mapped;
static const struct inet_connection_sock_af_ops ipv6_specific;
#ifdef CONFIG_TCP_MD5SIG
static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
#else
static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
const struct in6_addr *addr)
{
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hideaki Yoshifuji / 吉藤英明 | 21 | 91.30% | 1 | 33.33% |
Eric Dumazet | 2 | 8.70% | 2 | 66.67% |
Total | 23 | 100.00% | 3 | 100.00% |
#endif
static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
if (dst && dst_hold_safe(dst)) {
const struct rt6_info *rt = (const struct rt6_info *)dst;
sk->sk_rx_dst = dst;
inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Neal Cardwell | 67 | 82.72% | 1 | 25.00% |
Eric Dumazet | 11 | 13.58% | 2 | 50.00% |
Martin KaFai Lau | 3 | 3.70% | 1 | 25.00% |
Total | 81 | 100.00% | 4 | 100.00% |
static u32 tcp_v6_init_seq(const struct sk_buff *skb)
{
return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
ipv6_hdr(skb)->saddr.s6_addr32,
tcp_hdr(skb)->dest,
tcp_hdr(skb)->source);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 29 | 60.42% | 3 | 37.50% |
Arnaldo Carvalho de Melo | 12 | 25.00% | 2 | 25.00% |
Eric Dumazet | 6 | 12.50% | 2 | 25.00% |
Florian Westphal | 1 | 2.08% | 1 | 12.50% |
Total | 48 | 100.00% | 8 | 100.00% |
static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
{
return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
ipv6_hdr(skb)->saddr.s6_addr32);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 38 | 90.48% | 2 | 40.00% |
Linus Torvalds (pre-git) | 3 | 7.14% | 2 | 40.00% |
Florian Westphal | 1 | 2.38% | 1 | 20.00% |
Total | 42 | 100.00% | 5 | 100.00% |
static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
int addr_len)
{
struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
struct inet_sock *inet = inet_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct in6_addr *saddr = NULL, *final_p, final;
struct ipv6_txoptions *opt;
struct flowi6 fl6;
struct dst_entry *dst;
int addr_type;
int err;
struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
if (addr_len < SIN6_LEN_RFC2133)
return -EINVAL;
if (usin->sin6_family != AF_INET6)
return -EAFNOSUPPORT;
memset(&fl6, 0, sizeof(fl6));
if (np->sndflow) {
fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
IP6_ECN_flow_init(fl6.flowlabel);
if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
struct ip6_flowlabel *flowlabel;
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (!flowlabel)
return -EINVAL;
fl6_sock_release(flowlabel);
}
}
/*
* connect() to INADDR_ANY means loopback (BSD'ism).
*/
if (ipv6_addr_any(&usin->sin6_addr)) {
if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
&usin->sin6_addr);
else
usin->sin6_addr = in6addr_loopback;
}
addr_type = ipv6_addr_type(&usin->sin6_addr);
if (addr_type & IPV6_ADDR_MULTICAST)
return -ENETUNREACH;
if (addr_type&IPV6_ADDR_LINKLOCAL) {
if (addr_len >= sizeof(struct sockaddr_in6) &&
usin->sin6_scope_id) {
/* If interface is set while binding, indices
* must coincide.
*/
if (sk->sk_bound_dev_if &&
sk->sk_bound_dev_if != usin->sin6_scope_id)
return -EINVAL;
sk->sk_bound_dev_if = usin->sin6_scope_id;
}
/* Connect to link-local address requires an interface */
if (!sk->sk_bound_dev_if)
return -EINVAL;
}
if (tp->rx_opt.ts_recent_stamp &&
!ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
tp->rx_opt.ts_recent = 0;
tp->rx_opt.ts_recent_stamp = 0;
tp->write_seq = 0;
}
sk->sk_v6_daddr = usin->sin6_addr;
np->flow_label = fl6.flowlabel;
/*
* TCP over IPv4
*/
if (addr_type & IPV6_ADDR_MAPPED) {
u32 exthdrlen = icsk->icsk_ext_hdr_len;
struct sockaddr_in sin;
SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
if (__ipv6_only_sock(sk))
return -ENETUNREACH;
sin.sin_family = AF_INET;
sin.sin_port = usin->sin6_port;
sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
icsk->icsk_af_ops = &ipv6_mapped;
sk->sk_backlog_rcv = tcp_v4_do_rcv;
#ifdef CONFIG_TCP_MD5SIG
tp->af_specific = &tcp_sock_ipv6_mapped_specific;
#endif
err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
if (err) {
icsk->icsk_ext_hdr_len = exthdrlen;
icsk->icsk_af_ops = &ipv6_specific;
sk->sk_backlog_rcv = tcp_v6_do_rcv;
#ifdef CONFIG_TCP_MD5SIG
tp->af_specific = &tcp_sock_ipv6_specific;
#endif
goto failure;
}
np->saddr = sk->sk_v6_rcv_saddr;
return err;
}
if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
saddr = &sk->sk_v6_rcv_saddr;
fl6.flowi6_proto = IPPROTO_TCP;
fl6.daddr = sk->sk_v6_daddr;
fl6.saddr = saddr ? *saddr : np->saddr;
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.flowi6_mark = sk->sk_mark;
fl6.fl6_dport = usin->sin6_port;
fl6.fl6_sport = inet->inet_sport;
fl6.flowi6_uid = sk->sk_uid;
opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
final_p = fl6_update_dst(&fl6, opt, &final);
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
goto failure;
}
if (!saddr) {
saddr = &fl6.saddr;
sk->sk_v6_rcv_saddr = *saddr;
}
/* set the source address */
np->saddr = *saddr;
inet->inet_rcv_saddr = LOOPBACK4_IPV6;
sk->sk_gso_type = SKB_GSO_TCPV6;
ip6_dst_store(sk, dst, NULL, NULL);
icsk->icsk_ext_hdr_len = 0;
if (opt)
icsk->icsk_ext_hdr_len = opt->opt_flen +
opt->opt_nflen;
tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
inet->inet_dport = usin->sin6_port;
tcp_set_state(sk, TCP_SYN_SENT);
err = inet6_hash_connect(tcp_death_row, sk);
if (err)
goto late_failure;
sk_set_txhash(sk);
if (likely(!tp->repair)) {
if (!tp->write_seq)
tp->write_seq = secure_tcpv6_seq(np->saddr.s6_addr32,
sk->sk_v6_daddr.s6_addr32,
inet->inet_sport,
inet->inet_dport);
tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk),
np->saddr.s6_addr32,
sk->sk_v6_daddr.s6_addr32);
}
if (tcp_fastopen_defer_connect(sk, &err))
return err;
if (err)
goto late_failure;
err = tcp_connect(sk);
if (err)
goto late_failure;
return 0;
late_failure:
tcp_set_state(sk, TCP_CLOSE);
failure:
inet->inet_dport = 0;
sk->sk_route_caps = 0;
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 524 | 53.14% | 14 | 21.88% |
David S. Miller | 89 | 9.03% | 7 | 10.94% |
Eric Dumazet | 73 | 7.40% | 7 | 10.94% |
Arnaldo Carvalho de Melo | 44 | 4.46% | 7 | 10.94% |
Hideaki Yoshifuji / 吉藤英明 | 40 | 4.06% | 5 | 7.81% |
Jonathan T. Leighton | 28 | 2.84% | 1 | 1.56% |
Alexey Kuznetsov | 27 | 2.74% | 1 | 1.56% |
Linus Torvalds | 24 | 2.43% | 3 | 4.69% |
Kazunori Miyazawa | 21 | 2.13% | 1 | 1.56% |
Wei Wang | 18 | 1.83% | 1 | 1.56% |
Haishuang Yan | 15 | 1.52% | 1 | 1.56% |
Ville Nuorvala | 14 | 1.42% | 2 | 3.12% |
Alexey Dobriyan | 13 | 1.32% | 2 | 3.12% |
Alexey Kodanev | 8 | 0.81% | 1 | 1.56% |
Lorenzo Colitti | 8 | 0.81% | 1 | 1.56% |
Andrey Vagin | 7 | 0.71% | 1 | 1.56% |
Venkat Yekkirala | 7 | 0.71% | 1 | 1.56% |
Brian Haley | 6 | 0.61% | 2 | 3.12% |
Arnaud Ebalard | 6 | 0.61% | 1 | 1.56% |
Herbert Xu | 6 | 0.61% | 1 | 1.56% |
Sathya Perla | 4 | 0.41% | 1 | 1.56% |
Ian Morris | 2 | 0.20% | 1 | 1.56% |
Tom Herbert | 1 | 0.10% | 1 | 1.56% |
Hannes Frederic Sowa | 1 | 0.10% | 1 | 1.56% |
Total | 986 | 100.00% | 64 | 100.00% |
static void tcp_v6_mtu_reduced(struct sock *sk)
{
struct dst_entry *dst;
if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
return;
dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
if (!dst)
return;
if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
tcp_sync_mss(sk, dst_mtu(dst));
tcp_simple_retransmit(sk);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 84 | 100.00% | 1 | 100.00% |
Total | 84 | 100.00% | 1 | 100.00% |
static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
struct net *net = dev_net(skb->dev);
struct request_sock *fastopen;
struct ipv6_pinfo *np;
struct tcp_sock *tp;
__u32 seq, snd_una;
struct sock *sk;
bool fatal;
int err;
sk = __inet6_lookup_established(net, &tcp_hashinfo,
&hdr->daddr, th->dest,
&hdr->saddr, ntohs(th->source),
skb->dev->ifindex, inet6_sdif(skb));
if (!sk) {
__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
ICMP6_MIB_INERRORS);
return;
}
if (sk->sk_state == TCP_TIME_WAIT) {
inet_twsk_put(inet_twsk(sk));
return;
}
seq = ntohl(th->seq);
fatal = icmpv6_err_convert(type, code, &err);
if (sk->sk_state == TCP_NEW_SYN_RECV)
return tcp_req_err(sk, seq, fatal);
bh_lock_sock(sk);
if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
if (sk->sk_state == TCP_CLOSE)
goto out;
if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
goto out;
}
tp = tcp_sk(sk);
/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
fastopen = tp->fastopen_rsk;
snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
if (sk->sk_state != TCP_LISTEN &&
!between(seq, snd_una, tp->snd_nxt)) {
__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
goto out;
}
np = inet6_sk(sk);
if (type == NDISC_REDIRECT) {
if (!sock_owned_by_user(sk)) {
struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
if (dst)
dst->ops->redirect(dst, sk, skb);
}
goto out;
}
if (type == ICMPV6_PKT_TOOBIG) {
/* We are not interested in TCP_LISTEN and open_requests
* (SYN-ACKs send out by Linux are always <576bytes so
* they should go through unfragmented).
*/
if (sk->sk_state == TCP_LISTEN)
goto out;
if (!ip6_sk_accept_pmtu(sk))
goto out;
tp->mtu_info = ntohl(info);
if (!sock_owned_by_user(sk))
tcp_v6_mtu_reduced(sk);
else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
&sk->sk_tsq_flags))
sock_hold(sk);
goto out;
}
/* Might be for an request_sock */
switch (sk->sk_state) {
case TCP_SYN_SENT:
case TCP_SYN_RECV:
/* Only in fast or simultaneous open. If a fast open socket is
* is already accepted it is treated as a connected one below.
*/
if (fastopen && !fastopen->sk)
break;
if (!sock_owned_by_user(sk)) {
sk->sk_err = err;
sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
tcp_done(sk);
} else
sk->sk_err_soft = err;
goto out;
}
if (!sock_owned_by_user(sk) && np->recverr) {
sk->sk_err = err;
sk->sk_error_report(sk);
} else
sk->sk_err_soft = err;
out:
bh_unlock_sock(sk);
sock_put(sk);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 251 | 42.11% | 10 | 19.61% |
Eric Dumazet | 99 | 16.61% | 8 | 15.69% |
David S. Miller | 49 | 8.22% | 5 | 9.80% |
Yuchung Cheng | 37 | 6.21% | 1 | 1.96% |
Stephen Hemminger | 27 | 4.53% | 1 | 1.96% |
Linus Torvalds | 23 | 3.86% | 1 | 1.96% |
Arnaldo Carvalho de Melo | 19 | 3.19% | 5 | 9.80% |
Pavel Emelyanov | 12 | 2.01% | 3 | 5.88% |
Hannes Frederic Sowa | 11 | 1.85% | 1 | 1.96% |
Benjamin LaHaise | 11 | 1.85% | 1 | 1.96% |
Julian Anastasov | 10 | 1.68% | 1 | 1.96% |
Jon Maxwell | 10 | 1.68% | 1 | 1.96% |
Randy Dunlap | 7 | 1.17% | 1 | 1.96% |
Alexey Kuznetsov | 6 | 1.01% | 1 | 1.96% |
Hideaki Yoshifuji / 吉藤英明 | 6 | 1.01% | 2 | 3.92% |
David Ahern | 5 | 0.84% | 1 | 1.96% |
Christoph Paasch | 3 | 0.50% | 1 | 1.96% |
Brian Haley | 2 | 0.34% | 1 | 1.96% |
Denis V. Lunev | 2 | 0.34% | 1 | 1.96% |
Kazunori Miyazawa | 2 | 0.34% | 1 | 1.96% |
Venkat Yekkirala | 1 | 0.17% | 1 | 1.96% |
Al Viro | 1 | 0.17% | 1 | 1.96% |
Ian Morris | 1 | 0.17% | 1 | 1.96% |
Stephen Rothwell | 1 | 0.17% | 1 | 1.96% |
Total | 596 | 100.00% | 51 | 100.00% |
static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
struct flowi *fl,
struct request_sock *req,
struct tcp_fastopen_cookie *foc,
enum tcp_synack_type synack_type)
{
struct inet_request_sock *ireq = inet_rsk(req);
struct ipv6_pinfo *np = inet6_sk(sk);
struct ipv6_txoptions *opt;
struct flowi6 *fl6 = &fl->u.ip6;
struct sk_buff *skb;
int err = -ENOMEM;
/* First, grab a route. */
if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
IPPROTO_TCP)) == NULL)
goto done;
skb = tcp_make_synack(sk, dst, req, foc, synack_type);
if (skb) {
__tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
&ireq->ir_v6_rmt_addr);
fl6->daddr = ireq->ir_v6_rmt_addr;
if (np->repflow && ireq->pktopts)
fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
rcu_read_lock();
opt = ireq->ipv6_opt;
if (!opt)
opt = rcu_dereference(np->opt);
err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
rcu_read_unlock();
err = net_xmit_eval(err);
}
done:
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 75 | 31.65% | 2 | 7.41% |
Eric Dumazet | 28 | 11.81% | 7 | 25.93% |
Huw Davies | 25 | 10.55% | 1 | 3.70% |
Florent Fourcot | 24 | 10.13% | 1 | 3.70% |
Neal Cardwell | 24 | 10.13% | 2 | 7.41% |
David S. Miller | 16 | 6.75% | 3 | 11.11% |
Octavian Purdila | 14 | 5.91% | 1 | 3.70% |
Arnaldo Carvalho de Melo | 9 | 3.80% | 3 | 11.11% |
Daniel Lee | 5 | 2.11% | 1 | 3.70% |
Kazunori Miyazawa | 5 | 2.11% | 1 | 3.70% |
Pablo Neira Ayuso | 4 | 1.69% | 1 | 3.70% |
Gerrit Renker | 4 | 1.69% | 1 | 3.70% |
William Allen Simpson | 2 | 0.84% | 1 | 3.70% |
Herbert Xu | 1 | 0.42% | 1 | 3.70% |
Alexey Dobriyan | 1 | 0.42% | 1 | 3.70% |
Total | 237 | 100.00% | 27 | 100.00% |
static void tcp_v6_reqsk_destructor(struct request_sock *req)
{
kfree(inet_rsk(req)->ipv6_opt);
kfree_skb(inet_rsk(req)->pktopts);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 16 | 51.61% | 1 | 20.00% |
Huw Davies | 10 | 32.26% | 1 | 20.00% |
Arnaldo Carvalho de Melo | 4 | 12.90% | 2 | 40.00% |
Eric Dumazet | 1 | 3.23% | 1 | 20.00% |
Total | 31 | 100.00% | 5 | 100.00% |
#ifdef CONFIG_TCP_MD5SIG
static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
const struct in6_addr *addr)
{
return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 13 | 37.14% | 2 | 33.33% |
Eric Dumazet | 11 | 31.43% | 3 | 50.00% |
Hideaki Yoshifuji / 吉藤英明 | 11 | 31.43% | 1 | 16.67% |
Total | 35 | 100.00% | 6 | 100.00% |
static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
const struct sock *addr_sk)
{
return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hideaki Yoshifuji / 吉藤英明 | 16 | 51.61% | 1 | 20.00% |
Eric Dumazet | 15 | 48.39% | 4 | 80.00% |
Total | 31 | 100.00% | 5 | 100.00% |
static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
char __user *optval, int optlen)
{
struct tcp_md5sig cmd;
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
u8 prefixlen;
if (optlen < sizeof(cmd))
return -EINVAL;
if (copy_from_user(&cmd, optval, sizeof(cmd)))
return -EFAULT;
if (sin6->sin6_family != AF_INET6)
return -EINVAL;
if (optname == TCP_MD5SIG_EXT &&
cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
prefixlen = cmd.tcpm_prefixlen;
if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) &&
prefixlen > 32))
return -EINVAL;
} else {
prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
}
if (!cmd.tcpm_keylen) {
if (ipv6_addr_v4mapped(&sin6->sin6_addr))
return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
AF_INET, prefixlen);
return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
AF_INET6, prefixlen);
}
if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
return -EINVAL;
if (ipv6_addr_v4mapped(&sin6->sin6_addr))
return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
AF_INET, prefixlen, cmd.tcpm_key,
cmd.tcpm_keylen, GFP_KERNEL);
return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
AF_INET6, prefixlen, cmd.tcpm_key,
cmd.tcpm_keylen, GFP_KERNEL);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hideaki Yoshifuji / 吉藤英明 | 179 | 59.67% | 1 | 20.00% |
Ivan Delalande | 75 | 25.00% | 2 | 40.00% |
Eric Dumazet | 44 | 14.67% | 1 | 20.00% |
Brian Haley | 2 | 0.67% | 1 | 20.00% |
Total | 300 | 100.00% | 5 | 100.00% |
static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
const struct in6_addr *daddr,
const struct in6_addr *saddr,
const struct tcphdr *th, int nbytes)
{
struct tcp6_pseudohdr *bp;
struct scatterlist sg;
struct tcphdr *_th;
bp = hp->scratch;
/* 1. TCP pseudo-header (RFC2460) */
bp->saddr = *saddr;
bp->daddr = *daddr;
bp->protocol = cpu_to_be32(IPPROTO_TCP);
bp->len = cpu_to_be32(nbytes);
_th = (struct tcphdr *)(bp + 1);
memcpy(_th, th, sizeof(*th));
_th->check = 0;
sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
ahash_request_set_crypt(hp->md5_req, &sg, NULL,
sizeof(*bp) + sizeof(*th));
return crypto_ahash_update(hp->md5_req);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 59 | 34.91% | 2 | 25.00% |
Hideaki Yoshifuji / 吉藤英明 | 58 | 34.32% | 1 | 12.50% |
Adam Langley | 34 | 20.12% | 2 | 25.00% |
Herbert Xu | 12 | 7.10% | 1 | 12.50% |
Alexey Dobriyan | 4 | 2.37% | 1 | 12.50% |
David S. Miller | 2 | 1.18% | 1 | 12.50% |
Total | 169 | 100.00% | 8 | 100.00% |
static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
const struct in6_addr *daddr, struct in6_addr *saddr,
const struct tcphdr *th)
{
struct tcp_md5sig_pool *hp;
struct ahash_request *req;
hp = tcp_get_md5sig_pool();
if (!hp)
goto clear_hash_noput;
req = hp->md5_req;
if (crypto_ahash_init(req))
goto clear_hash;
if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
goto clear_hash;
if (tcp_md5_hash_key(hp, key))
goto clear_hash;
ahash_request_set_crypt(req, NULL, md5_hash, 0)