Release 4.11 net/core/secure_seq.c
/*
* Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/cryptohash.h>
#include <linux/module.h>
#include <linux/cache.h>
#include <linux/random.h>
#include <linux/hrtimer.h>
#include <linux/ktime.h>
#include <linux/string.h>
#include <linux/net.h>
#include <linux/siphash.h>
#include <net/secure_seq.h>
#if IS_ENABLED(CONFIG_IPV6) || IS_ENABLED(CONFIG_INET)
#include <linux/in6.h>
#include <net/tcp.h>
static siphash_key_t net_secret __read_mostly;
static siphash_key_t ts_secret __read_mostly;
static __always_inline void net_secret_init(void)
{
net_get_random_once(&ts_secret, sizeof(ts_secret));
net_get_random_once(&net_secret, sizeof(net_secret));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Florian Westphal | 11 | 35.48% | 1 | 14.29% |
David S. Miller | 10 | 32.26% | 1 | 14.29% |
Eric Dumazet | 6 | 19.35% | 2 | 28.57% |
Hannes Frederic Sowa | 3 | 9.68% | 2 | 28.57% |
Jason A. Donenfeld | 1 | 3.23% | 1 | 14.29% |
Total | 31 | 100.00% | 7 | 100.00% |
#endif
#ifdef CONFIG_INET
static u32 seq_scale(u32 seq)
{
/*
* As close as possible to RFC 793, which
* suggests using a 250 kHz clock.
* Further reading shows this assumes 2 Mb/s networks.
* For 10 Mb/s Ethernet, a 1 MHz clock is appropriate.
* For 10 Gb/s Ethernet, a 1 GHz clock should be ok, but
* we also need to limit the resolution so that the u32 seq
* overlaps less than one time per MSL (2 minutes).
* Choosing a clock of 64 ns period is OK. (period of 274 s)
*/
return seq + (ktime_get_real_ns() >> 6);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 19 | 95.00% | 1 | 50.00% |
Eric Dumazet | 1 | 5.00% | 1 | 50.00% |
Total | 20 | 100.00% | 2 | 100.00% |
#endif
#if IS_ENABLED(CONFIG_IPV6)
static u32 secure_tcpv6_ts_off(const __be32 *saddr, const __be32 *daddr)
{
const struct {
struct in6_addr saddr;
struct in6_addr daddr;
} __aligned(SIPHASH_ALIGNMENT) combined = {
.saddr = *(struct in6_addr *)saddr,
.daddr = *(struct in6_addr *)daddr,
};
if (sysctl_tcp_timestamps != 1)
return 0;
return siphash(&combined, offsetofend(typeof(combined), daddr),
&ts_secret);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Florian Westphal | 87 | 100.00% | 1 | 100.00% |
Total | 87 | 100.00% | 1 | 100.00% |
u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
__be16 sport, __be16 dport, u32 *tsoff)
{
const struct {
struct in6_addr saddr;
struct in6_addr daddr;
__be16 sport;
__be16 dport;
} __aligned(SIPHASH_ALIGNMENT) combined = {
.saddr = *(struct in6_addr *)saddr,
.daddr = *(struct in6_addr *)daddr,
.sport = sport,
.dport = dport
};
u64 hash;
net_secret_init();
hash = siphash(&combined, offsetofend(typeof(combined), dport),
&net_secret);
*tsoff = secure_tcpv6_ts_off(saddr, daddr);
return seq_scale(hash);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jason A. Donenfeld | 65 | 51.59% | 1 | 16.67% |
David S. Miller | 42 | 33.33% | 1 | 16.67% |
Florian Westphal | 15 | 11.90% | 2 | 33.33% |
Eric Dumazet | 4 | 3.17% | 2 | 33.33% |
Total | 126 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL(secure_tcpv6_sequence_number);
u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
__be16 dport)
{
const struct {
struct in6_addr saddr;
struct in6_addr daddr;
__be16 dport;
} __aligned(SIPHASH_ALIGNMENT) combined = {
.saddr = *(struct in6_addr *)saddr,
.daddr = *(struct in6_addr *)daddr,
.dport = dport
};
net_secret_init();
return siphash(&combined, offsetofend(typeof(combined), dport),
&net_secret);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jason A. Donenfeld | 54 | 59.34% | 1 | 50.00% |
David S. Miller | 37 | 40.66% | 1 | 50.00% |
Total | 91 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(secure_ipv6_port_ephemeral);
#endif
#ifdef CONFIG_INET
static u32 secure_tcp_ts_off(__be32 saddr, __be32 daddr)
{
if (sysctl_tcp_timestamps != 1)
return 0;
return siphash_2u32((__force u32)saddr, (__force u32)daddr,
&ts_secret);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Florian Westphal | 40 | 100.00% | 1 | 100.00% |
Total | 40 | 100.00% | 1 | 100.00% |
/* secure_tcp_sequence_number(a, b, 0, d) == secure_ipv4_port_ephemeral(a, b, d),
* but fortunately, `sport' cannot be 0 in any circumstances. If this changes,
* it would be easy enough to have the former function use siphash_4u32, passing
* the arguments as separate u32.
*/
u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
__be16 sport, __be16 dport, u32 *tsoff)
{
u64 hash;
net_secret_init();
hash = siphash_3u32((__force u32)saddr, (__force u32)daddr,
(__force u32)sport << 16 | (__force u32)dport,
&net_secret);
*tsoff = secure_tcp_ts_off(saddr, daddr);
return seq_scale(hash);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 50 | 64.94% | 1 | 20.00% |
Florian Westphal | 15 | 19.48% | 2 | 40.00% |
Jason A. Donenfeld | 9 | 11.69% | 1 | 20.00% |
Eric Dumazet | 3 | 3.90% | 1 | 20.00% |
Total | 77 | 100.00% | 5 | 100.00% |
u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
{
net_secret_init();
return siphash_3u32((__force u32)saddr, (__force u32)daddr,
(__force u16)dport, &net_secret);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 32 | 76.19% | 1 | 33.33% |
Jason A. Donenfeld | 7 | 16.67% | 1 | 33.33% |
Eric Dumazet | 3 | 7.14% | 1 | 33.33% |
Total | 42 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral);
#endif
#if IS_ENABLED(CONFIG_IP_DCCP)
u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
__be16 sport, __be16 dport)
{
u64 seq;
net_secret_init();
seq = siphash_3u32((__force u32)saddr, (__force u32)daddr,
(__force u32)sport << 16 | (__force u32)dport,
&net_secret);
seq += ktime_get_real_ns();
seq &= (1ull << 48) - 1;
return seq;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 50 | 66.67% | 1 | 20.00% |
Eric Dumazet | 17 | 22.67% | 3 | 60.00% |
Jason A. Donenfeld | 8 | 10.67% | 1 | 20.00% |
Total | 75 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL(secure_dccp_sequence_number);
#if IS_ENABLED(CONFIG_IPV6)
u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
__be16 sport, __be16 dport)
{
const struct {
struct in6_addr saddr;
struct in6_addr daddr;
__be16 sport;
__be16 dport;
} __aligned(SIPHASH_ALIGNMENT) combined = {
.saddr = *(struct in6_addr *)saddr,
.daddr = *(struct in6_addr *)daddr,
.sport = sport,
.dport = dport
};
u64 seq;
net_secret_init();
seq = siphash(&combined, offsetofend(typeof(combined), dport),
&net_secret);
seq += ktime_get_real_ns();
seq &= (1ull << 48) - 1;
return seq;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jason A. Donenfeld | 64 | 52.46% | 1 | 25.00% |
David S. Miller | 55 | 45.08% | 1 | 25.00% |
Eric Dumazet | 3 | 2.46% | 2 | 50.00% |
Total | 122 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(secure_dccpv6_sequence_number);
#endif
#endif
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 374 | 44.68% | 1 | 5.26% |
Jason A. Donenfeld | 218 | 26.05% | 1 | 5.26% |
Florian Westphal | 176 | 21.03% | 3 | 15.79% |
Eric Dumazet | 39 | 4.66% | 8 | 42.11% |
Fabio Estevam | 13 | 1.55% | 1 | 5.26% |
Hannes Frederic Sowa | 6 | 0.72% | 2 | 10.53% |
Patrick McHardy | 5 | 0.60% | 1 | 5.26% |
Stephen Boyd | 5 | 0.60% | 1 | 5.26% |
Igor Maravić | 1 | 0.12% | 1 | 5.26% |
Total | 837 | 100.00% | 19 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.