Release 4.11 net/ipv4/udp.c
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* The User Datagram Protocol (UDP).
*
* Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Arnt Gulbrandsen, <agulbra@nvg.unit.no>
* Alan Cox, <alan@lxorguk.ukuu.org.uk>
* Hirokazu Takahashi, <taka@valinux.co.jp>
*
* Fixes:
* Alan Cox : verify_area() calls
* Alan Cox : stopped close while in use off icmp
* messages. Not a fix but a botch that
* for udp at least is 'valid'.
* Alan Cox : Fixed icmp handling properly
* Alan Cox : Correct error for oversized datagrams
* Alan Cox : Tidied select() semantics.
* Alan Cox : udp_err() fixed properly, also now
* select and read wake correctly on errors
* Alan Cox : udp_send verify_area moved to avoid mem leak
* Alan Cox : UDP can count its memory
* Alan Cox : send to an unknown connection causes
* an ECONNREFUSED off the icmp, but
* does NOT close.
* Alan Cox : Switched to new sk_buff handlers. No more backlog!
* Alan Cox : Using generic datagram code. Even smaller and the PEEK
* bug no longer crashes it.
* Fred Van Kempen : Net2e support for sk->broadcast.
* Alan Cox : Uses skb_free_datagram
* Alan Cox : Added get/set sockopt support.
* Alan Cox : Broadcasting without option set returns EACCES.
* Alan Cox : No wakeup calls. Instead we now use the callbacks.
* Alan Cox : Use ip_tos and ip_ttl
* Alan Cox : SNMP Mibs
* Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support.
* Matt Dillon : UDP length checks.
* Alan Cox : Smarter af_inet used properly.
* Alan Cox : Use new kernel side addressing.
* Alan Cox : Incorrect return on truncated datagram receive.
* Arnt Gulbrandsen : New udp_send and stuff
* Alan Cox : Cache last socket
* Alan Cox : Route cache
* Jon Peatfield : Minor efficiency fix to sendto().
* Mike Shaver : RFC1122 checks.
* Alan Cox : Nonblocking error fix.
* Willy Konynenberg : Transparent proxying support.
* Mike McLagan : Routing by source
* David S. Miller : New socket lookup architecture.
* Last socket cache retained as it
* does have a high hit rate.
* Olaf Kirch : Don't linearise iovec on sendmsg.
* Andi Kleen : Some cleanups, cache destination entry
* for connect.
* Vitaly E. Lavrov : Transparent proxy revived after year coma.
* Melvin Smith : Check msg_name not msg_namelen in sendto(),
* return ENOTCONN for unconnected sockets (POSIX)
* Janos Farkas : don't deliver multi/broadcasts to a different
* bound-to-device socket
* Hirokazu Takahashi : HW checksumming for outgoing UDP
* datagrams.
* Hirokazu Takahashi : sendfile() on UDP works now.
* Arnaldo C. Melo : convert /proc/net/udp to seq_file
* YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
* Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind
* a single port at the same time.
* Derek Atkins <derek@ihtfp.com>: Add Encapulation Support
* James Chapman : Add L2TP encapsulation type.
*
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#define pr_fmt(fmt) "UDP: " fmt
#include <linux/uaccess.h>
#include <asm/ioctls.h>
#include <linux/bootmem.h>
#include <linux/highmem.h>
#include <linux/swap.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/module.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/igmp.h>
#include <linux/inetdevice.h>
#include <linux/in.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/mm.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <net/tcp_states.h>
#include <linux/skbuff.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <net/net_namespace.h>
#include <net/icmp.h>
#include <net/inet_hashtables.h>
#include <net/route.h>
#include <net/checksum.h>
#include <net/xfrm.h>
#include <trace/events/udp.h>
#include <linux/static_key.h>
#include <trace/events/skb.h>
#include <net/busy_poll.h>
#include "udp_impl.h"
#include <net/sock_reuseport.h>
#include <net/addrconf.h>
struct udp_table udp_table __read_mostly;
EXPORT_SYMBOL(udp_table);
long sysctl_udp_mem[3] __read_mostly;
EXPORT_SYMBOL(sysctl_udp_mem);
int sysctl_udp_rmem_min __read_mostly;
EXPORT_SYMBOL(sysctl_udp_rmem_min);
int sysctl_udp_wmem_min __read_mostly;
EXPORT_SYMBOL(sysctl_udp_wmem_min);
atomic_long_t udp_memory_allocated;
EXPORT_SYMBOL(udp_memory_allocated);
#define MAX_UDP_PORTS 65536
#define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN)
/* IPCB reference means this can not be used from early demux */
static bool udp_lib_exact_dif_match(struct net *net, struct sk_buff *skb)
{
#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
if (!net->ipv4.sysctl_udp_l3mdev_accept &&
skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
return true;
#endif
return false;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Robert Shearman | 51 | 100.00% | 1 | 100.00% |
Total | 51 | 100.00% | 1 | 100.00% |
static int udp_lib_lport_inuse(struct net *net, __u16 num,
const struct udp_hslot *hslot,
unsigned long *bitmap,
struct sock *sk, unsigned int log)
{
struct sock *sk2;
kuid_t uid = sock_i_uid(sk);
sk_for_each(sk2, &hslot->head) {
if (net_eq(sock_net(sk2), net) &&
sk2 != sk &&
(bitmap || udp_sk(sk2)->udp_port_hash == num) &&
(!sk2->sk_reuse || !sk->sk_reuse) &&
(!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
inet_rcv_saddr_equal(sk, sk2, true)) {
if (sk2->sk_reuseport && sk->sk_reuseport &&
!rcu_access_pointer(sk->sk_reuseport_cb) &&
uid_eq(uid, sock_i_uid(sk2))) {
if (!bitmap)
return 0;
} else {
if (!bitmap)
return 1;
__set_bit(udp_sk(sk2)->udp_port_hash >> log,
bitmap);
}
}
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 88 | 45.60% | 6 | 28.57% |
Eric Garver | 27 | 13.99% | 1 | 4.76% |
Tom Herbert | 23 | 11.92% | 1 | 4.76% |
Gerrit Renker | 19 | 9.84% | 2 | 9.52% |
Pavel Emelyanov | 9 | 4.66% | 1 | 4.76% |
Hideaki Yoshifuji / 吉藤英明 | 7 | 3.63% | 2 | 9.52% |
Craig Gallek | 6 | 3.11% | 1 | 4.76% |
Joe Perches | 6 | 3.11% | 1 | 4.76% |
David S. Miller | 3 | 1.55% | 2 | 9.52% |
Linus Torvalds (pre-git) | 2 | 1.04% | 1 | 4.76% |
Al Viro | 1 | 0.52% | 1 | 4.76% |
Stephen Hemminger | 1 | 0.52% | 1 | 4.76% |
Josef Bacik | 1 | 0.52% | 1 | 4.76% |
Total | 193 | 100.00% | 21 | 100.00% |
/*
* Note: we still hold spinlock of primary hash chain, so no other writer
* can insert/delete a socket with local_port == num
*/
static int udp_lib_lport_inuse2(struct net *net, __u16 num,
struct udp_hslot *hslot2,
struct sock *sk)
{
struct sock *sk2;
kuid_t uid = sock_i_uid(sk);
int res = 0;
spin_lock(&hslot2->lock);
udp_portaddr_for_each_entry(sk2, &hslot2->head) {
if (net_eq(sock_net(sk2), net) &&
sk2 != sk &&
(udp_sk(sk2)->udp_port_hash == num) &&
(!sk2->sk_reuse || !sk->sk_reuse) &&
(!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
inet_rcv_saddr_equal(sk, sk2, true)) {
if (sk2->sk_reuseport && sk->sk_reuseport &&
!rcu_access_pointer(sk->sk_reuseport_cb) &&
uid_eq(uid, sock_i_uid(sk2))) {
res = 0;
} else {
res = 1;
}
break;
}
}
spin_unlock(&hslot2->lock);
return res;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 126 | 69.61% | 1 | 16.67% |
Eric Garver | 23 | 12.71% | 1 | 16.67% |
Tom Herbert | 23 | 12.71% | 1 | 16.67% |
Craig Gallek | 6 | 3.31% | 1 | 16.67% |
Joe Perches | 2 | 1.10% | 1 | 16.67% |
Josef Bacik | 1 | 0.55% | 1 | 16.67% |
Total | 181 | 100.00% | 6 | 100.00% |
static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot)
{
struct net *net = sock_net(sk);
kuid_t uid = sock_i_uid(sk);
struct sock *sk2;
sk_for_each(sk2, &hslot->head) {
if (net_eq(sock_net(sk2), net) &&
sk2 != sk &&
sk2->sk_family == sk->sk_family &&
ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
(udp_sk(sk2)->udp_port_hash == udp_sk(sk)->udp_port_hash) &&
(sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
inet_rcv_saddr_equal(sk, sk2, false)) {
return reuseport_add_sock(sk, sk2);
}
}
/* Initial allocation may have already happened via setsockopt */
if (!rcu_access_pointer(sk->sk_reuseport_cb))
return reuseport_alloc(sk);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Craig Gallek | 158 | 98.75% | 1 | 33.33% |
Josef Bacik | 1 | 0.62% | 1 | 33.33% |
Eric Dumazet | 1 | 0.62% | 1 | 33.33% |
Total | 160 | 100.00% | 3 | 100.00% |
/**
* udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6
*
* @sk: socket struct in question
* @snum: port number to look up
* @hash2_nulladdr: AF-dependent hash value in secondary hash chains,
* with NULL address
*/
int udp_lib_get_port(struct sock *sk, unsigned short snum,
unsigned int hash2_nulladdr)
{
struct udp_hslot *hslot, *hslot2;
struct udp_table *udptable = sk->sk_prot->h.udp_table;
int error = 1;
struct net *net = sock_net(sk);
if (!snum) {
int low, high, remaining;
unsigned int rand;
unsigned short first, last;
DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
inet_get_local_port_range(net, &low, &high);
remaining = (high - low) + 1;
rand = prandom_u32();
first = reciprocal_scale(rand, remaining) + low;
/*
* force rand to be an odd multiple of UDP_HTABLE_SIZE
*/
rand = (rand | 1) * (udptable->mask + 1);
last = first + udptable->mask + 1;
do {
hslot = udp_hashslot(udptable, net, first);
bitmap_zero(bitmap, PORTS_PER_CHAIN);
spin_lock_bh(&hslot->lock);
udp_lib_lport_inuse(net, snum, hslot, bitmap, sk,
udptable->log);
snum = first;
/*
* Iterate on all possible values of snum for this hash.
* Using steps of an odd multiple of UDP_HTABLE_SIZE
* give us randomization and full range coverage.
*/
do {
if (low <= snum && snum <= high &&
!test_bit(snum >> udptable->log, bitmap) &&
!inet_is_local_reserved_port(net, snum))
goto found;
snum += rand;
} while (snum != first);
spin_unlock_bh(&hslot->lock);
cond_resched();
} while (++first != last);
goto fail;
} else {
hslot = udp_hashslot(udptable, net, snum);
spin_lock_bh(&hslot->lock);
if (hslot->count > 10) {
int exist;
unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum;
slot2 &= udptable->mask;
hash2_nulladdr &= udptable->mask;
hslot2 = udp_hashslot2(udptable, slot2);
if (hslot->count < hslot2->count)
goto scan_primary_hash;
exist = udp_lib_lport_inuse2(net, snum, hslot2, sk);
if (!exist && (hash2_nulladdr != slot2)) {
hslot2 = udp_hashslot2(udptable, hash2_nulladdr);
exist = udp_lib_lport_inuse2(net, snum, hslot2,
sk);
}
if (exist)
goto fail_unlock;
else
goto found;
}
scan_primary_hash:
if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, 0))
goto fail_unlock;
}
found:
inet_sk(sk)->inet_num = snum;
udp_sk(sk)->udp_port_hash = snum;
udp_sk(sk)->udp_portaddr_hash ^= snum;
if (sk_unhashed(sk)) {
if (sk->sk_reuseport &&
udp_reuseport_add_sock(sk, hslot)) {
inet_sk(sk)->inet_num = 0;
udp_sk(sk)->udp_port_hash = 0;
udp_sk(sk)->udp_portaddr_hash ^= snum;
goto fail_unlock;
}
sk_add_node_rcu(sk, &hslot->head);
hslot->count++;
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
spin_lock(&hslot2->lock);
if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
sk->sk_family == AF_INET6)
hlist_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node,
&hslot2->head);
else
hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
&hslot2->head);
hslot2->count++;
spin_unlock(&hslot2->lock);
}
sock_set_flag(sk, SOCK_RCU_FREE);
error = 0;
fail_unlock:
spin_unlock_bh(&hslot->lock);
fail:
return error;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 406 | 61.98% | 15 | 30.61% |
Craig Gallek | 78 | 11.91% | 2 | 4.08% |
Linus Torvalds (pre-git) | 50 | 7.63% | 8 | 16.33% |
Pavel Emelyanov | 28 | 4.27% | 5 | 10.20% |
Stephen Hemminger | 25 | 3.82% | 2 | 4.08% |
Gerrit Renker | 17 | 2.60% | 1 | 2.04% |
Anton Arapov | 13 | 1.98% | 1 | 2.04% |
Américo Wang | 8 | 1.22% | 2 | 4.08% |
Arnaldo Carvalho de Melo | 7 | 1.07% | 2 | 4.08% |
David S. Miller | 6 | 0.92% | 4 | 8.16% |
Daniel Borkmann | 4 | 0.61% | 1 | 2.04% |
Hideaki Yoshifuji / 吉藤英明 | 4 | 0.61% | 2 | 4.08% |
Linus Torvalds | 3 | 0.46% | 1 | 2.04% |
Eric Garver | 3 | 0.46% | 1 | 2.04% |
Eric W. Biedermann | 2 | 0.31% | 1 | 2.04% |
Aruna-Hewapathirane | 1 | 0.15% | 1 | 2.04% |
Total | 655 | 100.00% | 49 | 100.00% |
EXPORT_SYMBOL(udp_lib_get_port);
static u32 udp4_portaddr_hash(const struct net *net, __be32 saddr,
unsigned int port)
{
return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 36 | 100.00% | 3 | 100.00% |
Total | 36 | 100.00% | 3 | 100.00% |
int udp_v4_get_port(struct sock *sk, unsigned short snum)
{
unsigned int hash2_nulladdr =
udp4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum);
unsigned int hash2_partial =
udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0);
/* precompute partial secondary hash */
udp_sk(sk)->udp_portaddr_hash = hash2_partial;
return udp_lib_get_port(sk, snum, hash2_nulladdr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 51 | 68.92% | 3 | 42.86% |
David S. Miller | 19 | 25.68% | 1 | 14.29% |
Linus Torvalds (pre-git) | 2 | 2.70% | 1 | 14.29% |
Pavel Emelyanov | 1 | 1.35% | 1 | 14.29% |
Gerrit Renker | 1 | 1.35% | 1 | 14.29% |
Total | 74 | 100.00% | 7 | 100.00% |
static int compute_score(struct sock *sk, struct net *net,
__be32 saddr, __be16 sport,
__be32 daddr, unsigned short hnum, int dif,
bool exact_dif)
{
int score;
struct inet_sock *inet;
if (!net_eq(sock_net(sk), net) ||
udp_sk(sk)->udp_port_hash != hnum ||
ipv6_only_sock(sk))
return -1;
score = (sk->sk_family == PF_INET) ? 2 : 1;
inet = inet_sk(sk);
if (inet->inet_rcv_saddr) {
if (inet->inet_rcv_saddr != daddr)
return -1;
score += 4;
}
if (inet->inet_daddr) {
if (inet->inet_daddr != saddr)
return -1;
score += 4;
}
if (inet->inet_dport) {
if (inet->inet_dport != sport)
return -1;
score += 4;
}
if (sk->sk_bound_dev_if || exact_dif) {
if (sk->sk_bound_dev_if != dif)
return -1;
score += 4;
}
if (sk->sk_incoming_cpu == raw_smp_processor_id())
score++;
return score;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 107 | 51.44% | 1 | 6.67% |
Eric Dumazet | 50 | 24.04% | 4 | 26.67% |
Joe Perches | 21 | 10.10% | 1 | 6.67% |
Hideaki Yoshifuji / 吉藤英明 | 8 | 3.85% | 3 | 20.00% |
Tom Herbert | 6 | 2.88% | 1 | 6.67% |
Linus Torvalds (pre-git) | 6 | 2.88% | 1 | 6.67% |
Robert Shearman | 5 | 2.40% | 1 | 6.67% |
Xuemin Su | 3 | 1.44% | 1 | 6.67% |
Gerrit Renker | 1 | 0.48% | 1 | 6.67% |
Pavel Emelyanov | 1 | 0.48% | 1 | 6.67% |
Total | 208 | 100.00% | 15 | 100.00% |
static u32 udp_ehashfn(const struct net *net, const __be32 laddr,
const __u16 lport, const __be32 faddr,
const __be16 fport)
{
static u32 udp_ehash_secret __read_mostly;
net_get_random_once(&udp_ehash_secret, sizeof(udp_ehash_secret));
return __inet_ehashfn(laddr, lport, faddr, fport,
udp_ehash_secret + net_hash_mix(net));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hannes Frederic Sowa | 61 | 96.83% | 2 | 66.67% |
Eric Dumazet | 2 | 3.17% | 1 | 33.33% |
Total | 63 | 100.00% | 3 | 100.00% |
/* called with rcu_read_lock() */
static struct sock *udp4_lib_lookup2(struct net *net,
__be32 saddr, __be16 sport,
__be32 daddr, unsigned int hnum, int dif, bool exact_dif,
struct udp_hslot *hslot2,
struct sk_buff *skb)
{
struct sock *sk, *result;
int score, badness, matches = 0, reuseport = 0;
u32 hash = 0;
result = NULL;
badness = 0;
udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
score = compute_score(sk, net, saddr, sport,
daddr, hnum, dif, exact_dif);
if (score > badness) {
reuseport = sk->sk_reuseport;
if (reuseport) {
hash = udp_ehashfn(net, daddr, hnum,
saddr, sport);
result = reuseport_select_sock(sk, hash, skb,
sizeof(struct udphdr));
if (result)
return result;
matches = 1;
}
badness = score;
result = sk;
} else if (score == badness && reuseport) {
matches++;
if (reciprocal_scale(hash, matches) == 0)
result = sk;
hash = next_pseudo_random32(hash);
}
}
return result;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 110 | 51.16% | 3 | 27.27% |
Tom Herbert | 68 | 31.63% | 1 | 9.09% |
Craig Gallek | 26 | 12.09% | 3 | 27.27% |
Robert Shearman | 5 | 2.33% | 1 | 9.09% |
Daniel Borkmann | 4 | 1.86% | 1 | 9.09% |
Hannes Frederic Sowa | 1 | 0.47% | 1 | 9.09% |
Xuemin Su | 1 | 0.47% | 1 | 9.09% |
Total | 215 | 100.00% | 11 | 100.00% |
/* UDP is nearly always wildcards out the wazoo, it makes no sense to try
* harder than this. -DaveM
*/
struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
__be16 sport, __be32 daddr, __be16 dport,
int dif, struct udp_table *udptable, struct sk_buff *skb)
{
struct sock *sk, *result;
unsigned short hnum = ntohs(dport);
unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
bool exact_dif = udp_lib_exact_dif_match(net, skb);
int score, badness, matches = 0, reuseport = 0;
u32 hash = 0;
if (hslot->count > 10) {
hash2 = udp4_portaddr_hash(net, daddr, hnum);
slot2 = hash2 & udptable->mask;
hslot2 = &udptable->hash2[slot2];
if (hslot->count < hslot2->count)
goto begin;
result = udp4_lib_lookup2(net, saddr, sport,
daddr, hnum, dif,
exact_dif, hslot2, skb);
if (!result) {
unsigned int old_slot2 = slot2;
hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
slot2 = hash2 & udptable->mask;
/* avoid searching the same slot again. */
if (unlikely(slot2 == old_slot2))
return result;
hslot2 = &udptable->hash2[slot2];
if (hslot->count < hslot2->count)
goto begin;
result = udp4_lib_lookup2(net, saddr, sport,
daddr, hnum, dif,
exact_dif, hslot2, skb);
}
return result;
}
begin:
result = NULL;
badness = 0;
sk_for_each_rcu(sk, &hslot->head) {
score = compute_score(sk, net, saddr, sport,
daddr, hnum, dif, exact_dif);
if (score > badness) {
reuseport = sk->sk_reuseport;
if (reuseport) {
hash = udp_ehashfn(net, daddr, hnum,
saddr, sport);
result = reuseport_select_sock(sk, hash, skb,
sizeof(struct udphdr));
if (result)
return result;
matches = 1;
}
result = sk;
badness = score;
} else if (score == badness && reuseport) {
matches++;
if (reciprocal_scale(hash, matches) == 0)
result = sk;
hash = next_pseudo_random32(hash);
}
}
return result;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 278 | 63.47% | 7 | 41.18% |
Tom Herbert | 73 | 16.67% | 1 | 5.88% |
Craig Gallek | 29 | 6.62% | 3 | 17.65% |
Xuemin Su | 21 | 4.79% | 1 | 5.88% |
Robert Shearman | 16 | 3.65% | 1 | 5.88% |
David S. Miller | 15 | 3.42% | 1 | 5.88% |
Daniel Borkmann | 4 | 0.91% | 1 | 5.88% |
Hannes Frederic Sowa | 1 | 0.23% | 1 | 5.88% |
Jorge Boncompte | 1 | 0.23% | 1 | 5.88% |
Total | 438 | 100.00% | 17 | 100.00% |
EXPORT_SYMBOL_GPL(__udp4_lib_lookup);
static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
__be16 sport, __be16 dport,
struct udp_table *udptable)
{
const struct iphdr *iph = ip_hdr(skb);
return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport,
iph->daddr, dport, inet_iif(skb),
udptable, skb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
KOVACS Krisztian | 64 | 94.12% | 1 | 33.33% |
Craig Gallek | 2 | 2.94% | 1 | 33.33% |
Eric Dumazet | 2 | 2.94% | 1 | 33.33% |
Total | 68 | 100.00% | 3 | 100.00% |
struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
__be16 sport, __be16 dport)
{
return __udp4_lib_lookup_skb(skb, sport, dport, &udp_table);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Herbert | 29 | 93.55% | 1 | 50.00% |
Alexander Duyck | 2 | 6.45% | 1 | 50.00% |
Total | 31 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL_GPL(udp4_lib_lookup_skb);
/* Must be called under rcu_read_lock().
* Does increment socket refcount.
*/
#if IS_ENABLED(CONFIG_NETFILTER_XT_MATCH_SOCKET) || \
IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TPROXY) || \
IS_ENABLED(CONFIG_NF_SOCKET_IPV4)
struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
__be32 daddr, __be16 dport, int dif)
{
struct sock *sk;
sk = __udp4_lib_lookup(net, saddr, sport, daddr, dport,
dif, &udp_table, NULL);
if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
sk = NULL;
return sk;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
KOVACS Krisztian | 43 | 58.11% | 1 | 25.00% |
Eric Dumazet | 29 | 39.19% | 2 | 50.00% |
Craig Gallek | 2 | 2.70% | 1 | 25.00% |
Total | 74 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL_GPL(udp4_lib_lookup);
#endif
static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
__be16 loc_port, __be32 loc_addr,
__be16 rmt_port, __be32 rmt_addr,
int dif, unsigned short hnum)
{
struct inet_sock *inet = inet_sk(sk);
if (!net_eq(sock_net(sk), net) ||
udp_sk(sk)->udp_port_hash != hnum ||
(inet->inet_daddr && inet->inet_daddr != rmt_addr) ||
(inet->inet_dport != rmt_port && inet->inet_dport) ||
(inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) ||
ipv6_only_sock(sk) ||
(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
return false;
if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif))
return false;
return true;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 90 | 62.50% | 1 | 20.00% |
Shawn Bohrer | 29 | 20.14% | 1 | 20.00% |
Eric Dumazet | 25 | 17.36% | 3 | 60.00% |
Total | 144 | 100.00% | 5 | 100.00% |
/*
* This routine is called by the ICMP module when it gets some
* sort of error condition. If err < 0 then the socket should
* be closed and the error returned to the user. If err > 0
* it's just the icmp type << 8 | icmp code.
* Header points to the ip header of the error packet. We move
* on past this. Then (as it used to claim before adjustment)
* header points to the first 8 bytes of the udp header. We need
* to find the appropriate port.
*/
void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
{
struct inet_sock *inet;
const struct iphdr *iph = (const struct iphdr *)skb->data;
struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2));
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
struct sock *sk;
int harderr;
int err;
struct net *net = dev_net(skb->dev);
sk = __udp4_lib_lookup(net, iph->daddr, uh->dest,
iph->saddr, uh->source, skb->dev->ifindex, udptable,
NULL);
if (!sk) {
__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
return; /* No socket for error */
}
err = 0;
harderr = 0;
inet = inet_sk(sk);
switch (type) {
default:
case ICMP_TIME_EXCEEDED:
err = EHOSTUNREACH;
break;
case ICMP_SOURCE_QUENCH:
goto out;
case ICMP_PARAMETERPROB:
err = EPROTO;
harderr = 1;
break;
case ICMP_DEST_UNREACH:
if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
ipv4_sk_update_pmtu(skb, sk, info);
if (inet->pmtudisc != IP_PMTUDISC_DONT) {
err = EMSGSIZE;
harderr = 1;
break;
}
goto out;
}
err = EHOSTUNREACH;
if (code <= NR_ICMP_UNREACH) {
harderr = icmp_err_convert[code].fatal;
err = icmp_err_convert[code].errno;
}
break;
case ICMP_REDIRECT:
ipv4_sk_redirect(skb, sk);
goto out;
}
/*
* RFC1122: OK. Passes ICMP errors back to application, as per
* 4.1.3.3.
*/
if (!inet->recverr) {
if (!harderr || sk->sk_state != TCP_ESTABLISHED)
goto out;
} else
ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1));
sk->sk_err = err;
sk->sk_error_report(sk);
out:
return;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 330 | 92.18% | 3 | 23.08% |
Pavel Emelyanov | 13 | 3.63% | 2 | 15.38% |
Eric Dumazet | 6 | 1.68% | 4 | 30.77% |
Duan Jiong | 3 | 0.84% | 1 | 7.69% |
Hideaki Yoshifuji / 吉藤英明 | 3 | 0.84% | 1 | 7.69% |
Craig Gallek | 2 | 0.56% | 1 | 7.69% |
Ian Morris | 1 | 0.28% | 1 | 7.69% |
Total | 358 | 100.00% | 13 | 100.00% |
void udp_err(struct sk_buff *skb, u32 info)
{
__udp4_lib_err(skb, info, &udp_table);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 21 | 91.30% | 1 | 50.00% |
Eric Dumazet | 2 | 8.70% | 1 | 50.00% |
Total | 23 | 100.00% | 2 | 100.00% |
/*
* Throw away all pending data and cancel the corking. Socket is locked.
*/
void udp_flush_pending_frames(struct sock *sk)
{
struct udp_sock *up = udp_sk(sk);
if (up->pending) {
up->len = 0;
up->pending = 0;
ip_flush_pending_frames(sk);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 45 | 100.00% | 1 | 100.00% |
Total | 45 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(udp_flush_pending_frames);
/**
* udp4_hwcsum - handle outgoing HW checksumming
* @skb: sk_buff containing the filled-in UDP header
* (checksum field must be zeroed out)
* @src: source IP address
* @dst: destination IP address
*/
void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
{
struct udphdr *uh = udp_hdr(skb);
int offset = skb_transport_offset(skb);
int len = skb->len - offset;
int hlen = len;
__wsum csum = 0;
if (!skb_has_frag_list(skb)) {
/*
* Only one fragment on the socket.
*/
skb->csum_start = skb_transport_header(skb) - skb->head;
skb->csum_offset = offsetof(struct udphdr, check);
uh->check = ~csum_tcpudp_magic(src, dst, len,
IPPROTO_UDP, 0);
} else {
struct sk_buff *frags;
/*
* HW-checksum won't work as there are two or more
* fragments on the socket so that all csums of sk_buffs
* should be together
*/
skb_walk_frags(skb, frags) {
csum = csum_add(csum, frags->csum);
hlen -= frags->len;
}
csum = skb_checksum(skb, offset, hlen, csum);
skb->ip_summed = CHECKSUM_NONE;
uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum);
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 136 | 71.20% | 1 | 33.33% |
Herbert Xu | 40 | 20.94% | 1 | 33.33% |
Américo Wang | 15 | 7.85% | 1 | 33.33% |
Total | 191 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL_GPL(udp4_hwcsum);
/* Function to set UDP checksum for an IPv4 UDP packet. This is intended
* for the simple case like when setting the checksum for a UDP tunnel.
*/
void udp_set_csum(bool nocheck, struct sk_buff *skb,
__be32 saddr, __be32 daddr, int len)
{
struct udphdr *uh = udp_hdr(skb);
if (nocheck) {
uh->check = 0;
} else if (skb_is_gso(skb)) {
uh->check = ~udp_v4_check(len, saddr, daddr, 0);
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
uh->check = 0;
uh->check = udp_v4_check(len, saddr, daddr, lco_csum(skb));
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
} else {
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum_start = skb_transport_header(skb) - skb->head;
skb->csum_offset = offsetof(struct udphdr, check);
uh->check = ~udp_v4_check(len, saddr, daddr, 0);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Herbert | 115 | 68.05% | 1 | 33.33% |
Edward Cree | 54 | 31.95% | 2 | 66.67% |
Total | 169 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(udp_set_csum);
static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4)
{
struct sock *sk = skb->sk;
struct inet_sock *inet = inet_sk(sk);
struct udphdr *uh;
int err = 0;
int is_udplite = IS_UDPLITE(sk);
int offset = skb_transport_offset(skb);
int len = skb->len - offset;
__wsum csum = 0;
/*
* Create a UDP header
*/
uh = udp_hdr(skb);
uh->source = inet->inet_sport;
uh->dest = fl4->fl4_dport;
uh->len = htons(len);
uh->check = 0;
if (is_udplite) /* UDP-Lite */
csum = udplite_csum(skb);
else if (sk->sk_no_check_tx) { /* UDP csum disabled */
skb->ip_summed = CHECKSUM_NONE;
goto send;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
udp4_hwcsum(skb, fl4->saddr, fl4->daddr);
goto send;
} else
csum = udp_csum(skb);
/* add protocol-dependent pseudo-header */
uh->check = csum_tcpudp_magic(fl4->saddr, fl4->daddr, len,
sk->sk_protocol, csum);
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
send:
err = ip_send_skb(sock_net(sk), skb);
if (err) {
if (err == -ENOBUFS && !inet->recverr) {
UDP_INC_STATS(sock_net(sk),
UDP_MIB_SNDBUFERRORS, is_udplite);
err = 0;
}
} else
UDP_INC_STATS(sock_net(sk),
UDP_MIB_OUTDATAGRAMS, is_udplite);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 201 | 70.53% | 2 | 25.00% |
Herbert Xu | 40 | 14.04% | 1 | 12.50% |
Eric Dumazet | 38 | 13.33% | 3 | 37.50% |
Pavel Emelyanov | 5 | 1.75% | 1 | 12.50% |
Tom Herbert | 1 | 0.35% | 1 | 12.50% |
Total | 285 | 100.00% | 8 | 100.00% |
/*
* Push out all pending data as one UDP datagram. Socket is locked.
*/
int udp_push_pending_frames(struct sock *sk)
{
struct udp_sock *up = udp_sk(sk);
struct inet_sock *inet = inet_sk(sk);
struct flowi4 *fl4 = &inet->cork.fl.u.ip4;
struct sk_buff *skb;
int err = 0;
skb = ip_finish_skb(sk, fl4);
if (!skb)
goto out;
err = udp_send_skb(skb, fl4);
out:
up->len = 0;
up->pending = 0;
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 72 | 72.73% | 1 | 20.00% |
Eric Dumazet | 14 | 14.14% | 1 | 20.00% |
David S. Miller | 13 | 13.13% | 3 | 60.00% |
Total | 99 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL(udp_push_pending_frames);
int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
struct inet_sock *inet = inet_sk(sk);
struct udp_sock *up = udp_sk(sk);
struct flowi4 fl4_stack;
struct flowi4 *fl4;
int ulen = len;
struct ipcm_cookie ipc;
struct rtable *rt = NULL;
int free = 0;
int connected = 0;
__be32 daddr, faddr, saddr;
__be16 dport;
u8 tos;
int err, is_udplite = IS_UDPLITE(sk);
int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
struct sk_buff *skb;
struct ip_options_data opt_copy;
if (len > 0xFFFF)
return -EMSGSIZE;
/*
* Check the flags.
*/
if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */
return -EOPNOTSUPP;
ipc.opt = NULL;
ipc.tx_flags = 0;
ipc.ttl = 0;
ipc.tos = -1;
getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
fl4 = &inet->cork.fl.u.ip4;
if (up->pending) {
/*
* There are pending frames.
* The socket lock must be held while it's corked.
*/
lock_sock(sk);
if (likely(up->pending)) {
if (unlikely(up->pending != AF_INET)) {
release_sock(sk);
return -EINVAL;
}
goto do_append_data;
}
release_sock(sk);
}
ulen += sizeof(struct udphdr);
/*
* Get and verify the address.
*/
if (msg->msg_name) {
DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name);
if (msg->msg_namelen < sizeof(*usin))
return -EINVAL;
if (usin->sin_family != AF_INET) {
if (usin->sin_family != AF_UNSPEC)
return -EAFNOSUPPORT;
}
daddr = usin->sin_addr.s_addr;
dport = usin->sin_port;
if (dport == 0)
return -EINVAL;
} else {
if (sk->sk_state != TCP_ESTABLISHED)
return -EDESTADDRREQ;
daddr = inet->inet_daddr;
dport = inet->inet_dport;
/* Open fast path for connected socket.
Route will not be used, if at least one option is set.
*/
connected = 1;
}
ipc.sockc.tsflags = sk->sk_tsflags;
ipc.addr = inet->inet_saddr;
ipc.oif = sk->sk_bound_dev_if;
if (msg->msg_controllen) {
err = ip_cmsg_send(sk, msg, &ipc, sk->sk_family == AF_INET6);
if (unlikely(err)) {
kfree(ipc.opt);
return err;
}
if (ipc.opt)
free = 1;
connected = 0;
}
if (!ipc.opt) {
struct ip_options_rcu *inet_opt;
rcu_read_lock();
inet_opt = rcu_dereference(inet->inet_opt);
if (inet_opt) {
memcpy(&opt_copy, inet_opt,
sizeof(*inet_opt) + inet_opt->opt.optlen);
ipc.opt = &opt_copy.opt;
}
rcu_read_unlock();
}
saddr = ipc.addr;
ipc.addr = faddr = daddr;
sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags);
if (ipc.opt && ipc.opt->opt.srr) {
if (!daddr)
return -EINVAL;
faddr = ipc.opt->opt.faddr;
connected = 0;
}
tos = get_rttos(&ipc, inet);
if (sock_flag(sk, SOCK_LOCALROUTE) ||
(msg->msg_flags & MSG_DONTROUTE) ||
(ipc.opt && ipc.opt->opt.is_strictroute)) {
tos |= RTO_ONLINK;
connected = 0;
}
if (ipv4_is_multicast(daddr)) {
if (!ipc.oif)
ipc.oif = inet->mc_index;
if (!saddr)
saddr = inet->mc_addr;
connected = 0;
} else if (!ipc.oif)
ipc.oif = inet->uc_index;
if (connected)
rt = (struct rtable *)sk_dst_check(sk, 0);
if (!rt) {
struct net *net = sock_net(sk);
__u8 flow_flags = inet_sk_flowi_flags(sk);
fl4 = &fl4_stack;
flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
RT_SCOPE_UNIVERSE, sk->sk_protocol,
flow_flags,
faddr, saddr, dport, inet->inet_sport,
sk->sk_uid);
security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
rt = ip_route_output_flow(net, fl4, sk);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
rt = NULL;
if (err == -ENETUNREACH)
IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
goto out;
}
err = -EACCES;
if ((rt->rt_flags & RTCF_BROADCAST) &&
!sock_flag(sk, SOCK_BROADCAST))
goto out;
if (connected)
sk_dst_set(sk, dst_clone(&rt->dst));
}
if (msg->msg_flags&MSG_CONFIRM)
goto do_confirm;
back_from_confirm:
saddr = fl4->saddr;
if (!ipc.addr)
daddr = ipc.addr = fl4->daddr;
/* Lockless fast path for the non-corking case. */
if (!corkreq) {
skb = ip_make_skb(sk, fl4, getfrag, msg, ulen,
sizeof(struct udphdr), &ipc, &rt,
msg->msg_flags);
err = PTR_ERR(skb);
if (!IS_ERR_OR_NULL(skb))
err = udp_send_skb(skb, fl4);
goto out;
}
lock_sock(sk);
if (unlikely(up->pending)) {
/* The socket is already corked while preparing it. */
/* ... which is an evident application bug. --ANK */
release_sock(sk);
net_dbg_ratelimited("cork app bug 2\n");
err = -EINVAL;
goto out;
}
/*
* Now cork the socket to pend data.
*/
fl4 = &inet->cork.fl.u.ip4;
fl4->daddr = daddr;
fl4->saddr = saddr;
fl4->fl4_dport = dport;
fl4->fl4_sport = inet->inet_sport;
up->pending = AF_INET;
do_append_data:
up->len += ulen;
err = ip_append_data(sk, fl4, getfrag, msg, ulen,
sizeof(struct udphdr), &ipc, &rt,
corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
if (err)
udp_flush_pending_frames(sk);
else if (!corkreq)
err = udp_push_pending_frames(sk);
else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
up->pending = 0;
release_sock(sk);
out:
ip_rt_put(rt);
if (free)
kfree(ipc.opt);
if (!err)
return len;
/*
* ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
* ENOBUFS might not be good (it's not tunable per se), but otherwise
* we don't have a good statistic (IpOutDiscards but it can be too many
* things). We could add another new stat but at least for now that
* seems like overkill.
*/
if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
UDP_INC_STATS(sock_net(sk),
UDP_MIB_SNDBUFERRORS, is_udplite);
}
return err;
do_confirm:
if (msg->msg_flags & MSG_PROBE)
dst_confirm_neigh(&rt->dst, &fl4->daddr);
if (!(msg->msg_flags&MSG_PROBE) || len)
goto back_from_confirm;
err = 0;
goto out;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 995 | 77.31% | 11 | 28.95% |
Eric Dumazet | 80 | 6.22% | 6 | 15.79% |
Herbert Xu | 75 | 5.83% | 1 | 2.63% |
Soheil Hassas Yeganeh | 26 | 2.02% | 1 | 2.63% |
David Ahern | 21 | 1.63% | 1 | 2.63% |
Francesco Fusco | 17 | 1.32% | 1 | 2.63% |
Erich E. Hoover | 16 | 1.24% | 1 | 2.63% |
Julian Anastasov | 14 | 1.09% | 1 | 2.63% |
Pavel Emelyanov | 8 | 0.62% | 3 | 7.89% |
Steffen Hurrle | 7 | 0.54% | 1 | 2.63% |
Patrick Ohly | 7 | 0.54% | 1 | 2.63% |
Hannes Frederic Sowa | 6 | 0.47% | 1 | 2.63% |
Lorenzo Colitti | 4 | 0.31% | 1 | 2.63% |
Joe Perches | 3 | 0.23% | 2 | 5.26% |
Atis Elsts | 3 | 0.23% | 1 | 2.63% |
Denis V. Lunev | 2 | 0.16% | 2 | 5.26% |
Hideaki Yoshifuji / 吉藤英明 | 1 | 0.08% | 1 | 2.63% |
Oliver Hartkopp | 1 | 0.08% | 1 | 2.63% |
Ian Morris | 1 | 0.08% | 1 | 2.63% |
Total | 1287 | 100.00% | 38 | 100.00% |
EXPORT_SYMBOL(udp_sendmsg);
int udp_sendpage(struct sock *sk, struct page *page, int offset,
size_t size, int flags)
{
struct inet_sock *inet = inet_sk(sk);
struct udp_sock *up = udp_sk(sk);
int ret;
if (flags & MSG_SENDPAGE_NOTLAST)
flags |= MSG_MORE;
if (!up->pending) {
struct msghdr msg = { .msg_flags = flags|MSG_MORE };
/* Call udp_sendmsg to specify destination address which
* sendpage interface can't pass.
* This will succeed only when the socket is connected.
*/
ret = udp_sendmsg(sk, &msg, 0);
if (ret < 0)
return ret;
}
lock_sock(sk);
if (unlikely(!up->pending)) {
release_sock(sk);
net_dbg_ratelimited("udp cork app bug 3\n");
return -EINVAL;
}
ret = ip_append_page(sk, &inet->cork.fl.u.ip4,
page, offset, size, flags);
if (ret == -EOPNOTSUPP) {
release_sock(sk);
return sock_no_sendpage(sk->sk_socket, page, offset,
size, flags);
}
if (ret < 0) {
udp_flush_pending_frames(sk);
goto out;
}
up->len += size;
if (!(up->corkflag || (flags&MSG_MORE)))
ret = udp_push_pending_frames(sk);
if (!ret)
ret = size;
out:
release_sock(sk);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 239 | 95.22% | 2 | 50.00% |
Shawn Landden | 10 | 3.98% | 1 | 25.00% |
Joe Perches | 2 | 0.80% | 1 | 25.00% |
Total | 251 | 100.00% | 4 | 100.00% |
/* fully reclaim rmem/fwd memory allocated for skb */
static void udp_rmem_release(struct sock *sk, int size, int partial)
{
struct udp_sock *up = udp_sk(sk);
int amt;
if (likely(partial)) {
up->forward_deficit += size;
size = up->forward_deficit;
if (size < (sk->sk_rcvbuf >> 2) &&
!skb_queue_empty(&sk->sk_receive_queue))
return;
} else {
size += up->forward_deficit;
}
up->forward_deficit = 0;
sk->sk_forward_alloc += size;
amt = (sk->sk_forward_alloc - partial) & ~(SK_MEM_QUANTUM - 1);
sk->sk_forward_alloc -= amt;
if (amt)
__sk_mem_reduce_allocated(sk, amt >> SK_MEM_QUANTUM_SHIFT);
atomic_sub(size, &sk->sk_rmem_alloc);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 78 | 55.71% | 2 | 66.67% |
Paolo Abeni | 62 | 44.29% | 1 | 33.33% |
Total | 140 | 100.00% | 3 | 100.00% |
/* Note: called with sk_receive_queue.lock held.
* Instead of using skb->truesize here, find a copy of it in skb->dev_scratch
* This avoids a cache line miss while receive_queue lock is held.
* Look at __udp_enqueue_schedule_skb() to find where this copy is done.
*/
void udp_skb_destructor(struct sock *sk, struct sk_buff *skb)
{
udp_rmem_release(sk, skb->dev_scratch, 1);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paolo Abeni | 25 | 96.15% | 2 | 66.67% |
Eric Dumazet | 1 | 3.85% | 1 | 33.33% |
Total | 26 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(udp_skb_destructor);
/* Idea of busylocks is to let producers grab an extra spinlock
* to relieve pressure on the receive_queue spinlock shared by consumer.
* Under flood, this means that only one producer can be in line
* trying to acquire the receive_queue spinlock.
* These busylock can be allocated on a per cpu manner, instead of a
* per socket one (that would consume a cache line per socket)
*/
static int udp_busylocks_log __read_mostly;
static spinlock_t *udp_busylocks __read_mostly;
static spinlock_t *busylock_acquire(void *ptr)
{
spinlock_t *busy;
busy = udp_busylocks + hash_ptr(ptr, udp_busylocks_log);
spin_lock(busy);
return busy;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 34 | 100.00% | 1 | 100.00% |
Total | 34 | 100.00% | 1 | 100.00% |
static void busylock_release(spinlock_t *busy)
{
if (busy)
spin_unlock(busy);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 19 | 100.00% | 1 | 100.00% |
Total | 19 | 100.00% | 1 | 100.00% |
int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
{
struct sk_buff_head *list = &sk->sk_receive_queue;
int rmem, delta, amt, err = -ENOMEM;
spinlock_t *busy = NULL;
int size;
/* try to avoid the costly atomic add/sub pair when the receive
* queue is full; always allow at least a packet
*/
rmem = atomic_read(&sk->sk_rmem_alloc);
if (rmem > sk->sk_rcvbuf)
goto drop;
/* Under mem pressure, it might be helpful to help udp_recvmsg()
* having linear skbs :
* - Reduce memory overhead and thus increase receive queue capacity
* - Less cache line misses at copyout() time
* - Less work at consume_skb() (less alien page frag freeing)
*/
if (rmem > (sk->sk_rcvbuf >> 1)) {
skb_condense(skb);
busy = busylock_acquire(sk);
}
size = skb->truesize;
/* Copy skb->truesize into skb->dev_scratch to avoid a cache line miss
* in udp_skb_destructor()
*/
skb->dev_scratch = size;
/* we drop only if the receive buf is full and the receive
* queue contains some other skb
*/
rmem = atomic_add_return(size, &sk->sk_rmem_alloc);
if (rmem > (size + sk->sk_rcvbuf))
goto uncharge_drop;
spin_lock(&list->lock);
if (size >= sk->sk_forward_alloc) {
amt = sk_mem_pages(size);
delta = amt << SK_MEM_QUANTUM_SHIFT;
if (!__sk_mem_raise_allocated(sk, delta, amt, SK_MEM_RECV)) {
err = -ENOBUFS;
spin_unlock(&list->lock);
goto uncharge_drop;
}
sk->sk_forward_alloc += delta;
}
sk->sk_forward_alloc -= size;
/* no need to setup a destructor, we will explicitly release the
* forward allocated memory on dequeue
*/
sock_skb_set_dropcount(sk, skb);
__skb_queue_tail(list, skb);
spin_unlock(&list->lock);
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_data_ready(sk);
busylock_release(busy);
return 0;
uncharge_drop:
atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
drop:
atomic_inc(&sk->sk_drops);
busylock_release(busy);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paolo Abeni | 235 | 80.76% | 3 | 50.00% |
Eric Dumazet | 56 | 19.24% | 3 | 50.00% |
Total | 291 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL_GPL(__udp_enqueue_schedule_skb);
void udp_destruct_sock(struct sock *sk)
{
/* reclaim completely the forward allocated memory */
unsigned int total = 0;
struct sk_buff *skb;
while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
total += skb->truesize;
kfree_skb(skb);
}
udp_rmem_release(sk, total, 0);
inet_sock_destruct(sk);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paolo Abeni | 65 | 100.00% | 2 | 100.00% |
Total | 65 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL_GPL(udp_destruct_sock);
int udp_init_sock(struct sock *sk)
{
sk->sk_destruct = udp_destruct_sock;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paolo Abeni | 19 | 100.00% | 1 | 100.00% |
Total | 19 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(udp_init_sock);
void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len)
{
if (unlikely(READ_ONCE(sk->sk_peek_off) >= 0)) {
bool slow = lock_sock_fast(sk);
sk_peek_offset_bwd(sk, len);
unlock_sock_fast(sk, slow);
}
consume_skb(skb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Paolo Abeni | 61 | 100.00% | 1 | 100.00% |
Total | 61 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(skb_consume_udp);
/**
* first_packet_length - return length of first packet in receive queue
* @sk: socket
*
* Drops all bad checksum frames, until a valid one is found.
* Returns the length of found skb, or -1 if none is found.
*/
static int first_packet_length(struct sock *sk)
{
struct sk_buff_head *rcvq = &sk->sk_receive_queue;
struct sk_buff *skb;
int total = 0;
int res;
spin_lock_bh(&rcvq->lock);
while ((skb = skb_peek(rcvq)) != NULL &&
udp_lib_checksum_complete(skb)) {
__UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS,
IS_UDPLITE(sk));
__UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
IS_UDPLITE(sk));
atomic_inc(&sk->sk_drops);
__skb_unlink(skb, rcvq);
total += skb->truesize;
kfree_skb(skb);
}
res = skb ? skb->len : -1;
if (total)
udp_rmem_release(sk, total, 1);
spin_unlock_bh(&rcvq->lock);
return res;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 128 | 83.66% | 5 | 83.33% |
Paolo Abeni | 25 | 16.34% | 1 | 16.67% |
Total | 153 | 100.00% | 6 | 100.00% |
/*
* IOCTL requests applicable to the UDP protocol
*/
int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
switch (cmd) {
case SIOCOUTQ:
{
int amount = sk_wmem_alloc_get(sk);
return put_user(amount, (int __user *)arg);
}
case SIOCINQ:
{
int amount = max_t(int, 0, first_packet_length(sk));
return put_user(amount, (int __user *)arg);
}
default:
return -ENOIOCTLCMD;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 78 | 86.67% | 1 | 25.00% |
Eric Dumazet | 12 | 13.33% | 3 | 75.00% |
Total | 90 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(udp_ioctl);
/*
* This should be easy, if there is something there we
* return it, otherwise we block.
*/
int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
int flags, int *addr_len)
{
struct inet_sock *inet = inet_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
struct sk_buff *skb;
unsigned int ulen, copied;
int peeked, peeking, off;
int err;
int is_udplite = IS_UDPLITE(sk);
bool checksum_valid = false;
if (flags & MSG_ERRQUEUE)
return ip_recv_error(sk, msg, len, addr_len);
try_again:
peeking = off = sk_peek_offset(sk, flags);
skb = __skb_recv_udp(sk, flags, noblock, &peeked, &off, &err);
if (!skb)
return err;
ulen = skb->len;
copied = len;
if (copied > ulen - off)
copied = ulen - off;
else if (copied < ulen)
msg->msg_flags |= MSG_TRUNC;
/*
* If checksum is needed at all, try to do it while copying the
* data. If the data is truncated, or if we only want a partial
* coverage checksum (UDP-Lite), do it before the copy.
*/
if (copied < ulen || peeking ||
(is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
checksum_valid = !udp_lib_checksum_complete(skb);
if (!checksum_valid)
goto csum_copy_err;
}
if (checksum_valid || skb_csum_unnecessary(skb))
err = skb_copy_datagram_msg(skb, off, msg, copied);
else {
err = skb_copy_and_csum_datagram_msg(skb, off, msg);
if (err == -EINVAL)
goto csum_copy_err;
}
if (unlikely(err)) {
if (!peeked) {
atomic_inc(&sk->sk_drops);
UDP_INC_STATS(sock_net(sk),
UDP_MIB_INERRORS, is_udplite);
}
kfree_skb(skb);
return err;
}
if (!peeked)
UDP_INC_STATS(sock_net(sk),
UDP_MIB_INDATAGRAMS, is_udplite);
sock_recv_ts_and_drops(msg, sk, skb);
/* Copy the address. */
if (sin) {
sin->sin_family = AF_INET;
sin->sin_port = udp_hdr(skb)->source;
sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
*addr_len = sizeof(*sin);
}
if (inet->cmsg_flags)
ip_cmsg_recv_offset(msg, sk, skb, sizeof(struct udphdr), off);
err = copied;
if (flags & MSG_TRUNC)
err = ulen;
skb_consume_udp(sk, skb, peeking ? -err : err);
return err;
csum_copy_err:
if (!__sk_queue_drop_skb(sk, skb, flags, udp_skb_destructor)) {
UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
}
kfree_skb(skb);
/* starting over for a new packet, but check if we need to yield */
cond_resched();
msg->msg_flags &= ~MSG_TRUNC;
goto try_again;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 341 | 66.60% | 3 | 11.11% |
Eric Dumazet | 76 | 14.84% | 10 | 37.04% |
samanthakumar | 36 | 7.03% | 1 | 3.70% |
Pavel Emelyanov | 15 | 2.93% | 2 | 7.41% |
Hannes Frederic Sowa | 11 | 2.15% | 2 | 7.41% |
Paolo Abeni | 9 | 1.76% | 3 | 11.11% |
Xufeng Zhang | 7 | 1.37% | 1 | 3.70% |
Tom Herbert | 7 | 1.37% | 1 | 3.70% |
Steffen Hurrle | 7 | 1.37% | 1 | 3.70% |
Willem de Bruijn | 1 | 0.20% | 1 | 3.70% |
Neil Horman | 1 | 0.20% | 1 | 3.70% |
Al Viro | 1 | 0.20% | 1 | 3.70% |
Total | 512 | 100.00% | 27 | 100.00% |
int __udp_disconnect(struct sock *sk, int flags)
{
struct inet_sock *inet = inet_sk(sk);
/*
* 1003.1g - break association.
*/
sk->sk_state = TCP_CLOSE;
inet->inet_daddr = 0;
inet->inet_dport = 0;
sock_rps_reset_rxhash(sk);
sk->sk_bound_dev_if = 0;
if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
inet_reset_saddr(sk);
if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) {
sk->sk_prot->unhash(sk);
inet->inet_sport = 0;
}
sk_dst_reset(sk);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 62 | 59.05% | 1 | 16.67% |
Eric Dumazet | 38 | 36.19% | 3 | 50.00% |
Tom Herbert | 5 | 4.76% | 2 | 33.33% |
Total | 105 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL(__udp_disconnect);
int udp_disconnect(struct sock *sk, int flags)
{
lock_sock(sk);
__udp_disconnect(sk, flags);
release_sock(sk);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 33 | 100.00% | 1 | 100.00% |
Total | 33 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(udp_disconnect);
void udp_lib_unhash(struct sock *sk)
{
if (sk_hashed(sk)) {
struct udp_table *udptable = sk->sk_prot->h.udp_table;
struct udp_hslot *hslot, *hslot2;
hslot = udp_hashslot(udptable, sock_net(sk),
udp_sk(sk)->udp_port_hash);
hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
spin_lock_bh(&hslot->lock);
if (rcu_access_pointer(sk->sk_reuseport_cb))
reuseport_detach_sock(sk);
if (sk_del_node_init_rcu(sk)) {
hslot->count--;
inet_sk(sk)->inet_num = 0;
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
spin_lock(&hslot2->lock);
hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
hslot2->count--;
spin_unlock(&hslot2->lock);
}
spin_unlock_bh(&hslot->lock);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 144 | 83.24% | 9 | 81.82% |
David S. Miller | 15 | 8.67% | 1 | 9.09% |
Craig Gallek | 14 | 8.09% | 1 | 9.09% |
Total | 173 | 100.00% | 11 | 100.00% |
EXPORT_SYMBOL(udp_lib_unhash);
/*
* inet_rcv_saddr was changed, we must rehash secondary hash
*/
void udp_lib_rehash(struct sock *sk, u16 newhash)
{
if (sk_hashed(sk)) {
struct udp_table *udptable = sk->sk_prot->h.udp_table;
struct udp_hslot *hslot, *hslot2, *nhslot2;
hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
nhslot2 = udp_hashslot2(udptable, newhash);
udp_sk(sk)->udp_portaddr_hash = newhash;
if (hslot2 != nhslot2 ||
rcu_access_pointer(sk->sk_reuseport_cb)) {
hslot = udp_hashslot(udptable, sock_net(sk),
udp_sk(sk)->udp_port_hash);
/* we must lock primary chain too */
spin_lock_bh(&hslot->lock);
if (rcu_access_pointer(sk->sk_reuseport_cb))
reuseport_detach_sock(sk);
if (hslot2 != nhslot2) {
spin_lock(&hslot2->lock);
hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
hslot2->count--;
spin_unlock(&hslot2->lock);
spin_lock(&nhslot2->lock);
hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
&nhslot2->head);
nhslot2->count++;
spin_unlock(&nhslot2->lock);
}
spin_unlock_bh(&hslot->lock);
}
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 191 | 86.82% | 2 | 66.67% |
Craig Gallek | 29 | 13.18% | 1 | 33.33% |
Total | 220 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(udp_lib_rehash);
static void udp_v4_rehash(struct sock *sk)
{
u16 new_hash = udp4_portaddr_hash(sock_net(sk),
inet_sk(sk)->inet_rcv_saddr,
inet_sk(sk)->inet_num);
udp_lib_rehash(sk, new_hash);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 43 | 100.00% | 1 | 100.00% |
Total | 43 | 100.00% | 1 | 100.00% |
int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
int rc;
if (inet_sk(sk)->inet_daddr) {
sock_rps_save_rxhash(sk, skb);
sk_mark_napi_id(sk, skb);
sk_incoming_cpu_update(sk);
} else {
sk_mark_napi_id_once(sk, skb);
}
rc = __udp_enqueue_schedule_skb(sk, skb);
if (rc < 0) {
int is_udplite = IS_UDPLITE(sk);
/* Note that an ENOMEM error is charged twice */
if (rc == -ENOMEM)
UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS,
is_udplite);
UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
kfree_skb(skb);
trace_udp_fail_queue_rcv_skb(rc, sk);
return -1;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 65 | 48.51% | 1 | 10.00% |
Eric Dumazet | 32 | 23.88% | 4 | 40.00% |
Tom Herbert | 19 | 14.18% | 1 | 10.00% |
Shawn Bohrer | 9 | 6.72% | 1 | 10.00% |
Satoru Moriya | 7 | 5.22% | 1 | 10.00% |
Paolo Abeni | 1 | 0.75% | 1 | 10.00% |
David S. Miller | 1 | 0.75% | 1 | 10.00% |
Total | 134 | 100.00% | 10 | 100.00% |
static struct static_key udp_encap_needed __read_mostly;
void udp_encap_enable(void)
{
if (!static_key_enabled(&udp_encap_needed))
static_key_slow_inc(&udp_encap_needed);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 22 | 100.00% | 1 | 100.00% |
Total | 22 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(udp_encap_enable);
/* returns:
* -1: error
* 0: success
* >0: "udp encap" protocol resubmission
*
* Note that in the success and error cases, the skb is assumed to
* have either been requeued or freed.
*/
int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
struct udp_sock *up = udp_sk(sk);
int is_udplite = IS_UDPLITE(sk);
/*
* Charge it to the socket, dropping if the queue is full.
*/
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
goto drop;
nf_reset(skb);
if (static_key_false(&udp_encap_needed) && up->encap_type) {
int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
/*
* This is an encapsulation socket so pass the skb to
* the socket's udp_encap_rcv() hook. Otherwise, just
* fall through and pass this up the UDP socket.
* up->encap_rcv() returns the following value:
* =0 if skb was successfully passed to the encap
* handler or was discarded by it.
* >0 if skb should be passed on to UDP.
* <0 if skb should be resubmitted as proto -N
*/
/* if we're overly short, let UDP handle it */
encap_rcv = ACCESS_ONCE(up->encap_rcv);
if (encap_rcv) {
int ret;
/* Verify checksum before giving to encap */
if (udp_lib_checksum_complete(skb))
goto csum_error;
ret = encap_rcv(sk, skb);
if (ret <= 0) {
__UDP_INC_STATS(sock_net(sk),
UDP_MIB_INDATAGRAMS,
is_udplite);
return -ret;
}
}
/* FALLTHROUGH -- it's a UDP Packet */
}
/*
* UDP-Lite specific tests, ignored on UDP sockets
*/
if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
/*
* MIB statistics other than incrementing the error count are
* disabled for the following two types of errors: these depend
* on the application settings, not on the functioning of the
* protocol stack as such.
*
* RFC 3828 here recommends (sec 3.3): "There should also be a
* way ... to ... at least let the receiving application block
* delivery of packets with coverage values less than a value
* provided by the application."
*/
if (up->pcrlen == 0) { /* full coverage was set */
net_dbg_ratelimited("UDPLite: partial coverage %d while full coverage %d requested\n",
UDP_SKB_CB(skb)->cscov, skb->len);
goto drop;
}
/* The next case involves violating the min. coverage requested
* by the receiver. This is subtle: if receiver wants x and x is
* greater than the buffersize/MTU then receiver will complain
* that it wants x while sender emits packets of smaller size y.
* Therefore the above ...()->partial_cov statement is essential.
*/
if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
net_dbg_ratelimited("UDPLite: coverage %d too small, need min %d\n",
UDP_SKB_CB(skb)->cscov, up->pcrlen);
goto drop;
}
}
if (rcu_access_pointer(sk->sk_filter) &&
udp_lib_checksum_complete(skb))
goto csum_error;
if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr)))
goto drop;
udp_csum_pull_header(skb);
ipv4_pktinfo_prepare(sk, skb);
return __udp_queue_rcv_skb(sk, skb);
csum_error:
__UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
drop:
__UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
atomic_inc(&sk->sk_drops);
kfree_skb(skb);
return -1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 211 | 63.17% | 1 | 4.76% |
Eric Dumazet | 68 | 20.36% | 10 | 47.62% |
Tom Herbert | 11 | 3.29% | 1 | 4.76% |
Pavel Emelyanov | 11 | 3.29% | 1 | 4.76% |
Michal Kubeček | 10 | 2.99% | 1 | 4.76% |
samanthakumar | 9 | 2.69% | 1 | 4.76% |
Herbert Xu | 5 | 1.50% | 1 | 4.76% |
Joe Perches | 4 | 1.20% | 2 | 9.52% |
Daniel Borkmann | 2 | 0.60% | 1 | 4.76% |
Shawn Bohrer | 2 | 0.60% | 1 | 4.76% |
Paolo Abeni | 1 | 0.30% | 1 | 4.76% |
Total | 334 | 100.00% | 21 | 100.00% |
/* For TCP sockets, sk_rx_dst is protected by socket lock
* For UDP, we use xchg() to guard against concurrent changes.
*/
static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
{
struct dst_entry *old;
dst_hold(dst);
old = xchg(&sk->sk_rx_dst, dst);
dst_release(old);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Shawn Bohrer | 24 | 55.81% | 1 | 33.33% |
Eric Dumazet | 19 | 44.19% | 2 | 66.67% |
Total | 43 | 100.00% | 3 | 100.00% |
/*
* Multicasts and broadcasts go to each listener.
*
* Note: called only from the BH handler context.
*/
static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
struct udphdr *uh,
__be32 saddr, __be32 daddr,
struct udp_table *udptable,
int proto)
{
struct sock *sk, *first = NULL;
unsigned short hnum = ntohs(uh->dest);
struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
unsigned int offset = offsetof(typeof(*sk), sk_node);
int dif = skb->dev->ifindex;
struct hlist_node *node;
struct sk_buff *nskb;
if (use_hash2) {
hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) &
udptable->mask;
hash2 = udp4_portaddr_hash(net, daddr, hnum) & udptable->mask;
start_lookup:
hslot = &udptable->hash2[hash2];
offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
}
sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
if (!__udp_is_mcast_sock(net, sk, uh->dest, daddr,
uh->source, saddr, dif, hnum))
continue;
if (!first) {
first = sk;
continue;
}
nskb = skb_clone(skb, GFP_ATOMIC);
if (unlikely(!nskb)) {
atomic_inc(&sk->sk_drops);
__UDP_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
IS_UDPLITE(sk));
__UDP_INC_STATS(net, UDP_MIB_INERRORS,
IS_UDPLITE(sk));
continue;
}
if (udp_queue_rcv_skb(sk, nskb) > 0)
consume_skb(nskb);
}
/* Also lookup *:port if we are using hash2 and haven't done so yet. */
if (use_hash2 && hash2 != hash2_any) {
hash2 = hash2_any;
goto start_lookup;
}
if (first) {
if (udp_queue_rcv_skb(first, skb) > 0)
consume_skb(skb);
} else {
kfree_skb(skb);
__UDP_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
proto == IPPROTO_UDPLITE);
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 148 | 39.89% | 6 | 42.86% |
David Held | 125 | 33.69% | 2 | 14.29% |
David S. Miller | 69 | 18.60% | 1 | 7.14% |
Rick Jones | 14 | 3.77% | 1 | 7.14% |
Pavel Emelyanov | 9 | 2.43% | 3 | 21.43% |
Pablo Neira Ayuso | 6 | 1.62% | 1 | 7.14% |
Total | 371 | 100.00% | 14 | 100.00% |
/* Initialize UDP checksum. If exited with zero value (success),
* CHECKSUM_UNNECESSARY means, that no more checks are required.
* Otherwise, csum completion requires chacksumming packet body,
* including udp header and folding it to skb->csum.
*/
static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
int proto)
{
int err;
UDP_SKB_CB(skb)->partial_cov = 0;
UDP_SKB_CB(skb)->cscov = skb->len;
if (proto == IPPROTO_UDPLITE) {
err = udplite_checksum_init(skb, uh);
if (err)
return err;
}
/* Note, we are only interested in != 0 or == 0, thus the
* force to int.
*/
return (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
inet_compute_pseudo);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 76 | 88.37% | 1 | 33.33% |
Tom Herbert | 5 | 5.81% | 1 | 33.33% |
Hannes Frederic Sowa | 5 | 5.81% | 1 | 33.33% |
Total | 86 | 100.00% | 3 | 100.00% |
/*
* All we need to do is get the socket, and then do a checksum.
*/
int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
int proto)
{
struct sock *sk;
struct udphdr *uh;
unsigned short ulen;
struct rtable *rt = skb_rtable(skb);
__be32 saddr, daddr;
struct net *net = dev_net(skb->dev);
/*
* Validate the packet.
*/
if (!pskb_may_pull(skb, sizeof(struct udphdr)))
goto drop; /* No space for header. */
uh = udp_hdr(skb);
ulen = ntohs(uh->len);
saddr = ip_hdr(skb)->saddr;
daddr = ip_hdr(skb)->daddr;
if (ulen > skb->len)
goto short_packet;
if (proto == IPPROTO_UDP) {
/* UDP validates ulen. */
if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen))
goto short_packet;
uh = udp_hdr(skb);
}
if (udp4_csum_init(skb, uh, proto))
goto csum_error;
sk = skb_steal_sock(skb);
if (sk) {
struct dst_entry *dst = skb_dst(skb);
int ret;
if (unlikely(sk->sk_rx_dst != dst))
udp_sk_rx_dst_set(sk, dst);
ret = udp_queue_rcv_skb(sk, skb);
sock_put(sk);
/* a return value > 0 means to resubmit the input, but
* it wants the return to be -protocol, or 0
*/
if (ret > 0)
return -ret;
return 0;
}
if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
return __udp4_lib_mcast_deliver(net, skb, uh,
saddr, daddr, udptable, proto);
sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
if (sk) {
int ret;
if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
inet_compute_pseudo);
ret = udp_queue_rcv_skb(sk, skb);
/* a return value > 0 means to resubmit the input, but
* it wants the return to be -protocol, or 0
*/
if (ret > 0)
return -ret;
return 0;
}
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
goto drop;
nf_reset(skb);
/* No socket. Drop packet silently, if checksum is wrong */
if (udp_lib_checksum_complete(skb))
goto csum_error;
__UDP_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
/*
* Hmm. We got an UDP packet to a port to which we
* don't wanna listen. Ignore it.
*/
kfree_skb(skb);
return 0;
short_packet:
net_dbg_ratelimited("UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n",
proto == IPPROTO_UDPLITE ? "Lite" : "",
&saddr, ntohs(uh->source),
ulen, skb->len,
&daddr, ntohs(uh->dest));
goto drop;
csum_error:
/*
* RFC1122: OK. Discards the bad packet silently (as far as
* the network is concerned, anyway) as per 4.1.3.4 (MUST).
*/
net_dbg_ratelimited("UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n",
proto == IPPROTO_UDPLITE ? "Lite" : "",
&saddr, ntohs(uh->source), &daddr, ntohs(uh->dest),
ulen);
__UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
drop:
__UDP_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
kfree_skb(skb);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 340 | 64.15% | 1 | 4.17% |
Shawn Bohrer | 47 | 8.87% | 1 | 4.17% |
Eric Dumazet | 43 | 8.11% | 6 | 25.00% |
Tom Herbert | 30 | 5.66% | 2 | 8.33% |
Pavel Emelyanov | 18 | 3.40% | 2 | 8.33% |
Björn Mork | 18 | 3.40% | 1 | 4.17% |
Jesper Dangaard Brouer | 8 | 1.51% | 2 | 8.33% |
Harvey Harrison | 6 | 1.13% | 1 | 4.17% |
Eliezer Tamir | 4 | 0.75% | 1 | 4.17% |
Joe Perches | 4 | 0.75% | 2 | 8.33% |
Gerrit Renker | 3 | 0.57% | 1 | 4.17% |
Hideaki Yoshifuji / 吉藤英明 | 3 | 0.57% | 1 | 4.17% |
KOVACS Krisztian | 2 | 0.38% | 1 | 4.17% |
Alexey Kuznetsov | 2 | 0.38% | 1 | 4.17% |
Rick Jones | 2 | 0.38% | 1 | 4.17% |
Total | 530 | 100.00% | 24 | 100.00% |
/* We can only early demux multicast if there is a single matching socket.
* If more than one socket found returns NULL
*/
static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net,
__be16 loc_port, __be32 loc_addr,
__be16 rmt_port, __be32 rmt_addr,
int dif)
{
struct sock *sk, *result;
unsigned short hnum = ntohs(loc_port);
unsigned int slot = udp_hashfn(net, hnum, udp_table.mask);
struct udp_hslot *hslot = &udp_table.hash[slot];
/* Do not bother scanning a too big list */
if (hslot->count > 10)
return NULL;
result = NULL;
sk_for_each_rcu(sk, &hslot->head) {
if (__udp_is_mcast_sock(net, sk, loc_port, loc_addr,
rmt_port, rmt_addr, dif, hnum)) {
if (result)
return NULL;
result = sk;
}
}
return result;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Shawn Bohrer | 118 | 88.06% | 1 | 33.33% |
Eric Dumazet | 16 | 11.94% | 2 | 66.67% |
Total | 134 | 100.00% | 3 | 100.00% |
/* For unicast we should only early demux connected sockets or we can
* break forwarding setups. The chains here can be long so only check
* if the first socket is an exact match and if not move on.
*/
static struct sock *__udp4_lib_demux_lookup(struct net *net,
__be16 loc_port, __be32 loc_addr,
__be16 rmt_port, __be32 rmt_addr,
int dif)
{
unsigned short hnum = ntohs(loc_port);
unsigned int hash2 = udp4_portaddr_hash(net, loc_addr, hnum);
unsigned int slot2 = hash2 & udp_table.mask;
struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr);
const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
struct sock *sk;
udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
if (INET_MATCH(sk, net, acookie, rmt_addr,
loc_addr, ports, dif))
return sk;
/* Only check first socket in chain */
break;
}
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Shawn Bohrer | 121 | 90.98% | 1 | 33.33% |
Eric Dumazet | 8 | 6.02% | 1 | 33.33% |
Joe Perches | 4 | 3.01% | 1 | 33.33% |
Total | 133 | 100.00% | 3 | 100.00% |
void udp_v4_early_demux(struct sk_buff *skb)
{
struct net *net = dev_net(skb->dev);
const struct iphdr *iph;
const struct udphdr *uh;
struct sock *sk = NULL;
struct dst_entry *dst;
int dif = skb->dev->ifindex;
int ours;
/* validate the packet */
if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
return;
iph = ip_hdr(skb);
uh = udp_hdr(skb);
if (skb->pkt_type == PACKET_BROADCAST ||
skb->pkt_type == PACKET_MULTICAST) {
struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
if (!in_dev)
return;
/* we are supposed to accept bcast packets */
if (skb->pkt_type == PACKET_MULTICAST) {
ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
iph->protocol);
if (!ours)
return;
}
sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
uh->source, iph->saddr, dif);
} else if (skb->pkt_type == PACKET_HOST) {
sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr,
uh->source, iph->saddr, dif);
}
if (!sk || !atomic_inc_not_zero_hint(&sk->sk_refcnt, 2))
return;
skb->sk = sk;
skb->destructor = sock_efree;
dst = READ_ONCE(sk->sk_rx_dst);
if (dst)
dst = dst_check(dst, 0);
if (dst) {
/* DST_NOCACHE can not be used without taking a reference */
if (dst->flags & DST_NOCACHE) {
if (likely(atomic_inc_not_zero(&dst->__refcnt)))
skb_dst_set(skb, dst);
} else {
skb_dst_set_noref(skb, dst);
}
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Shawn Bohrer | 233 | 72.36% | 2 | 28.57% |
Eric Dumazet | 77 | 23.91% | 3 | 42.86% |
Paolo Abeni | 11 | 3.42% | 1 | 14.29% |
Alexander Duyck | 1 | 0.31% | 1 | 14.29% |
Total | 322 | 100.00% | 7 | 100.00% |
int udp_rcv(struct sk_buff *skb)
{
return __udp4_lib_rcv(skb, &udp_table, IPPROTO_UDP);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 16 | 76.19% | 1 | 33.33% |
Hideaki Yoshifuji / 吉藤英明 | 3 | 14.29% | 1 | 33.33% |
Eric Dumazet | 2 | 9.52% | 1 | 33.33% |
Total | 21 | 100.00% | 3 | 100.00% |
void udp_destroy_sock(struct sock *sk)
{
struct udp_sock *up = udp_sk(sk);
bool slow = lock_sock_fast(sk);
udp_flush_pending_frames(sk);
unlock_sock_fast(sk, slow);
if (static_key_false(&udp_encap_needed) && up->encap_type) {
void (*encap_destroy)(struct sock *sk);
encap_destroy = ACCESS_ONCE(up->encap_destroy);
if (encap_destroy)
encap_destroy(sk);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Parkin | 54 | 64.29% | 1 | 20.00% |
Alexey Kuznetsov | 19 | 22.62% | 1 | 20.00% |
Eric Dumazet | 8 | 9.52% | 1 | 20.00% |
David S. Miller | 2 | 2.38% | 1 | 20.00% |
Brian Haley | 1 | 1.19% | 1 | 20.00% |
Total | 84 | 100.00% | 5 | 100.00% |
/*
* Socket option code for UDP
*/
int udp_lib_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen,
int (*push_pending_frames)(struct sock *))
{
struct udp_sock *up = udp_sk(sk);
int val, valbool;
int err = 0;
int is_udplite = IS_UDPLITE(sk);
if (optlen < sizeof(int))
return -EINVAL;
if (get_user(val, (int __user *)optval))
return -EFAULT;
valbool = val ? 1 : 0;
switch (optname) {
case UDP_CORK:
if (val != 0) {
up->corkflag = 1;
} else {
up->corkflag = 0;
lock_sock(sk);
push_pending_frames(sk);
release_sock(sk);
}
break;
case UDP_ENCAP:
switch (val) {
case 0:
case UDP_ENCAP_ESPINUDP:
case UDP_ENCAP_ESPINUDP_NON_IKE:
up->encap_rcv = xfrm4_udp_encap_rcv;
/* FALLTHROUGH */
case UDP_ENCAP_L2TPINUDP:
up->encap_type = val;
udp_encap_enable();
break;
default:
err = -ENOPROTOOPT;
break;
}
break;
case UDP_NO_CHECK6_TX:
up->no_check6_tx = valbool;
break;
case UDP_NO_CHECK6_RX:
up->no_check6_rx = valbool;
break;
/*
* UDP-Lite's partial checksum coverage (RFC 3828).
*/
/* The sender sets actual checksum coverage length via this option.
* The case coverage > packet length is handled by send module. */
case UDPLITE_SEND_CSCOV:
if (!is_udplite) /* Disable the option on UDP sockets */
return -ENOPROTOOPT;
if (val != 0 && val < 8) /* Illegal coverage: use default (8) */
val = 8;
else if (val > USHRT_MAX)
val = USHRT_MAX;
up->pcslen = val;
up->pcflag |= UDPLITE_SEND_CC;
break;
/* The receiver specifies a minimum checksum coverage value. To make
* sense, this should be set to at least 8 (as done below). If zero is
* used, this again means full checksum coverage. */
case UDPLITE_RECV_CSCOV:
if (!is_udplite) /* Disable the option on UDP sockets */
return -ENOPROTOOPT;
if (val != 0 && val < 8) /* Avoid silly minimal values. */
val = 8;
else if (val > USHRT_MAX)
val = USHRT_MAX;
up->pcrlen = val;
up->pcflag |= UDPLITE_RECV_CC;
break;
default:
err = -ENOPROTOOPT;
break;
}
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexey Kuznetsov | 106 | 31.74% | 1 | 4.55% |
Gerrit Renker | 95 | 28.44% | 3 | 13.64% |
David S. Miller | 31 | 9.28% | 2 | 9.09% |
Tom Herbert | 30 | 8.98% | 1 | 4.55% |
Linus Torvalds (pre-git) | 16 | 4.79% | 5 | 22.73% |
Herbert Xu | 15 | 4.49% | 1 | 4.55% |
Derek Atkins | 10 | 2.99% | 1 | 4.55% |
James Chapman | 10 | 2.99% | 2 | 9.09% |
Wang Chen | 10 | 2.99% | 1 | 4.55% |
Alexey Dobriyan | 4 | 1.20% | 1 | 4.55% |
Eric Dumazet | 3 | 0.90% | 1 | 4.55% |
Al Viro | 2 | 0.60% | 1 | 4.55% |
Hideaki Yoshifuji / 吉藤英明 | 1 | 0.30% | 1 | 4.55% |
Arnaldo Carvalho de Melo | 1 | 0.30% | 1 | 4.55% |
Total | 334 | 100.00% | 22 | 100.00% |
EXPORT_SYMBOL(udp_lib_setsockopt);
int udp_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
if (level == SOL_UDP || level == SOL_UDPLITE)
return udp_lib_setsockopt(sk, level, optname, optval, optlen,
udp_push_pending_frames);
return ip_setsockopt(sk, level, optname, optval, optlen);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 65 | 100.00% | 2 | 100.00% |
Total | 65 | 100.00% | 2 | 100.00% |
#ifdef CONFIG_COMPAT
int compat_udp_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
if (level == SOL_UDP || level == SOL_UDPLITE)
return udp_lib_setsockopt(sk, level, optname, optval, optlen,
udp_push_pending_frames);
return compat_ip_setsockopt(sk, level, optname, optval, optlen);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 62 | 95.38% | 2 | 50.00% |
Dmitry Mishin | 2 | 3.08% | 1 | 25.00% |
Alexey Kuznetsov | 1 | 1.54% | 1 | 25.00% |
Total | 65 | 100.00% | 4 | 100.00% |
#endif
int udp_lib_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
struct udp_sock *up = udp_sk(sk);
int val, len;
if (get_user(len, optlen))
return -EFAULT;
len = min_t(unsigned int, len, sizeof(int));
if (len < 0)
return -EINVAL;
switch (optname) {
case UDP_CORK:
val = up->corkflag;
break;
case UDP_ENCAP:
val = up->encap_type;
break;
case UDP_NO_CHECK6_TX:
val = up->no_check6_tx;
break;
case UDP_NO_CHECK6_RX:
val = up->no_check6_rx;
break;
/* The following two cannot be changed on UDP sockets, the return is
* always 0 (which corresponds to the full checksum coverage of UDP). */
case UDPLITE_SEND_CSCOV:
val = up->pcslen;
break;
case UDPLITE_RECV_CSCOV:
val = up->pcrlen;
break;
default:
return -ENOPROTOOPT;
}
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &val, len))
return -EFAULT;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexey Kuznetsov | 70 | 39.11% | 1 | 10.00% |
Dmitry Mishin | 39 | 21.79% | 1 | 10.00% |
Gerrit Renker | 22 | 12.29% | 2 | 20.00% |
Tom Herbert | 20 | 11.17% | 1 | 10.00% |
Linus Torvalds (pre-git) | 11 | 6.15% | 3 | 30.00% |
Derek Atkins | 10 | 5.59% | 1 | 10.00% |
Linus Torvalds | 7 | 3.91% | 1 | 10.00% |
Total | 179 | 100.00% | 10 | 100.00% |
EXPORT_SYMBOL(udp_lib_getsockopt);
int udp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
if (level == SOL_UDP || level == SOL_UDPLITE)
return udp_lib_getsockopt(sk, level, optname, optval, optlen);
return ip_getsockopt(sk, level, optname, optval, optlen);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 64 | 100.00% | 1 | 100.00% |
Total | 64 | 100.00% | 1 | 100.00% |
#ifdef CONFIG_COMPAT
int compat_udp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
if (level == SOL_UDP || level == SOL_UDPLITE)
return udp_lib_getsockopt(sk, level, optname, optval, optlen);
return compat_ip_getsockopt(sk, level, optname, optval, optlen);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 64 | 100.00% | 1 | 100.00% |
Total | 64 | 100.00% | 1 | 100.00% |
#endif
/**
* udp_poll - wait for a UDP event.
* @file - file struct
* @sock - socket
* @wait - poll table
*
* This is same as datagram poll, except for the special case of
* blocking sockets. If application is using a blocking fd
* and a packet with checksum error is in the queue;
* then it could get return from select indicating data available
* but then block when reading it. Add special case code
* to work around these arguably broken applications.
*/
unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
{
unsigned int mask = datagram_poll(file, sock, wait);
struct sock *sk = sock->sk;
sock_rps_record_flow(sk);
/* Check for false positives due to checksum errors */
if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
!(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1)
mask &= ~(POLLIN | POLLRDNORM);
return mask;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 67 | 71.28% | 1 | 14.29% |
Hideaki Yoshifuji / 吉藤英明 | 14 | 14.89% | 1 | 14.29% |
David Majnemer | 5 | 5.32% | 1 | 14.29% |
Eric Dumazet | 4 | 4.26% | 2 | 28.57% |
Pavel Emelyanov | 3 | 3.19% | 1 | 14.29% |
Herbert Xu | 1 | 1.06% | 1 | 14.29% |
Total | 94 | 100.00% | 7 | 100.00% |
EXPORT_SYMBOL(udp_poll);
int udp_abort(struct sock *sk, int err)
{
lock_sock(sk);
sk->sk_err = err;
sk->sk_error_report(sk);
__udp_disconnect(sk, 0);
release_sock(sk);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David Ahern | 45 | 97.83% | 1 | 50.00% |
Eric Dumazet | 1 | 2.17% | 1 | 50.00% |
Total | 46 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL_GPL(udp_abort);
struct proto udp_prot = {
.name = "UDP",
.owner = THIS_MODULE,
.close = udp_lib_close,
.connect = ip4_datagram_connect,
.disconnect = udp_disconnect,
.ioctl = udp_ioctl,
.init = udp_init_sock,
.destroy = udp_destroy_sock,
.setsockopt = udp_setsockopt,
.getsockopt = udp_getsockopt,
.sendmsg = udp_sendmsg,
.recvmsg = udp_recvmsg,
.sendpage = udp_sendpage,
.release_cb = ip4_datagram_release_cb,
.hash = udp_lib_hash,
.unhash = udp_lib_unhash,
.rehash = udp_v4_rehash,
.get_port = udp_v4_get_port,
.memory_allocated = &udp_memory_allocated,
.sysctl_mem = sysctl_udp_mem,
.sysctl_wmem = &sysctl_udp_wmem_min,
.sysctl_rmem = &sysctl_udp_rmem_min,
.obj_size = sizeof(struct udp_sock),
.h.udp_table = &udp_table,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_udp_setsockopt,
.compat_getsockopt = compat_udp_getsockopt,
#endif
.diag_destroy = udp_abort,
};
EXPORT_SYMBOL(udp_prot);
/* ------------------------------------------------------------------------ */
#ifdef CONFIG_PROC_FS
static struct sock *udp_get_first(struct seq_file *seq, int start)
{
struct sock *sk;
struct udp_iter_state *state = seq->private;
struct net *net = seq_file_net(seq);
for (state->bucket = start; state->bucket <= state->udp_table->mask;
++state->bucket) {
struct udp_hslot *hslot = &state->udp_table->hash[state->bucket];
if (hlist_empty(&hslot->head))
continue;
spin_lock_bh(&hslot->lock);
sk_for_each(sk, &hslot->head) {
if (!net_eq(sock_net(sk), net))
continue;
if (sk->sk_family == state->family)
goto found;
}
spin_unlock_bh(&hslot->lock);
}
sk = NULL;
found:
return sk;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 55 | 35.95% | 3 | 25.00% |
Hideaki Yoshifuji / 吉藤英明 | 48 | 31.37% | 4 | 33.33% |
Arnaldo Carvalho de Melo | 33 | 21.57% | 3 | 25.00% |
Daniel Lezcano | 13 | 8.50% | 1 | 8.33% |
Denis V. Lunev | 4 | 2.61% | 1 | 8.33% |
Total | 153 | 100.00% | 12 | 100.00% |
static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk)
{
struct udp_iter_state *state = seq->private;
struct net *net = seq_file_net(seq);
do {
sk = sk_next(sk);
} while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family));
if (!sk) {
if (state->bucket <= state->udp_table->mask)
spin_unlock_bh(&state->udp_table->hash[state->bucket].lock);
return udp_get_first(seq, state->bucket + 1);
}
return sk;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hideaki Yoshifuji / 吉藤英明 | 64 | 51.61% | 3 | 21.43% |
Eric Dumazet | 28 | 22.58% | 4 | 28.57% |
Daniel Lezcano | 9 | 7.26% | 1 | 7.14% |
Arnaldo Carvalho de Melo | 8 | 6.45% | 2 | 14.29% |
Vitaly Mayatskikh | 6 | 4.84% | 1 | 7.14% |
Denis V. Lunev | 4 | 3.23% | 1 | 7.14% |
Pavel Emelyanov | 3 | 2.42% | 1 | 7.14% |
Gerrit Renker | 2 | 1.61% | 1 | 7.14% |
Total | 124 | 100.00% | 14 | 100.00% |
static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos)
{
struct sock *sk = udp_get_first(seq, 0);
if (sk)
while (pos && (sk = udp_get_next(seq, sk)) != NULL)
--pos;
return pos ? NULL : sk;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hideaki Yoshifuji / 吉藤英明 | 50 | 84.75% | 1 | 25.00% |
Arnaldo Carvalho de Melo | 7 | 11.86% | 2 | 50.00% |
Eric Dumazet | 2 | 3.39% | 1 | 25.00% |
Total | 59 | 100.00% | 4 | 100.00% |
static void *udp_seq_start(struct seq_file *seq, loff_t *pos)
{
struct udp_iter_state *state = seq->private;
state->bucket = MAX_UDP_PORTS;
return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arnaldo Carvalho de Melo | 26 | 55.32% | 1 | 20.00% |
Vitaly Mayatskikh | 14 | 29.79% | 1 | 20.00% |
Hideaki Yoshifuji / 吉藤英明 | 6 | 12.77% | 2 | 40.00% |
Eric Dumazet | 1 | 2.13% | 1 | 20.00% |
Total | 47 | 100.00% | 5 | 100.00% |
static void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct sock *sk;
if (v == SEQ_START_TOKEN)
sk = udp_get_idx(seq, 0);
else
sk = udp_get_next(seq, v);
++*pos;
return sk;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arnaldo Carvalho de Melo | 51 | 89.47% | 1 | 33.33% |
Hideaki Yoshifuji / 吉藤英明 | 6 | 10.53% | 2 | 66.67% |
Total | 57 | 100.00% | 3 | 100.00% |
static void udp_seq_stop(struct seq_file *seq, void *v)
{
struct udp_iter_state *state = seq->private;
if (state->bucket <= state->udp_table->mask)
spin_unlock_bh(&state->udp_table->hash[state->bucket].lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 34 | 64.15% | 2 | 66.67% |
Arnaldo Carvalho de Melo | 19 | 35.85% | 1 | 33.33% |
Total | 53 | 100.00% | 3 | 100.00% |
int udp_seq_open(struct inode *inode, struct file *file)
{
struct udp_seq_afinfo *afinfo = PDE_DATA(inode);
struct udp_iter_state *s;
int err;
err = seq_open_net(inode, file, &afinfo->seq_ops,
sizeof(struct udp_iter_state));
if (err < 0)
return err;
s = ((struct seq_file *)file->private_data)->private;
s->family = afinfo->family;
s->udp_table = afinfo->udp_table;
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Denis V. Lunev | 36 | 37.50% | 2 | 28.57% |
Hideaki Yoshifuji / 吉藤英明 | 26 | 27.08% | 1 | 14.29% |
Daniel Lezcano | 16 | 16.67% | 1 | 14.29% |
Arnaldo Carvalho de Melo | 15 | 15.62% | 1 | 14.29% |
Eric Dumazet | 2 | 2.08% | 1 | 14.29% |
Al Viro | 1 | 1.04% | 1 | 14.29% |
Total | 96 | 100.00% | 7 | 100.00% |
EXPORT_SYMBOL(udp_seq_open);
/* ------------------------------------------------------------------------ */
int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo)
{
struct proc_dir_entry *p;
int rc = 0;
afinfo->seq_ops.start = udp_seq_start;
afinfo->seq_ops.next = udp_seq_next;
afinfo->seq_ops.stop = udp_seq_stop;
p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
afinfo->seq_fops, afinfo);
if (!p)
rc = -ENOMEM;
return rc;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hideaki Yoshifuji / 吉藤英明 | 37 | 44.58% | 2 | 33.33% |
Denis V. Lunev | 30 | 36.14% | 2 | 33.33% |
Arnaldo Carvalho de Melo | 11 | 13.25% | 1 | 16.67% |
Daniel Lezcano | 5 | 6.02% | 1 | 16.67% |
Total | 83 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL(udp_proc_register);
void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo)
{
remove_proc_entry(afinfo->name, net->proc_net);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hideaki Yoshifuji / 吉藤英明 | 9 | 34.62% | 1 | 25.00% |
Arnaldo Carvalho de Melo | 7 | 26.92% | 1 | 25.00% |
Gao Feng | 5 | 19.23% | 1 | 25.00% |
Daniel Lezcano | 5 | 19.23% | 1 | 25.00% |
Total | 26 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(udp_proc_unregister);
/* ------------------------------------------------------------------------ */
static void udp4_format_sock(struct sock *sp, struct seq_file *f,
int bucket)
{
struct inet_sock *inet = inet_sk(sp);
__be32 dest = inet->inet_daddr;
__be32 src = inet->inet_rcv_saddr;
__u16 destp = ntohs(inet->inet_dport);
__u16 srcp = ntohs(inet->inet_sport);
seq_printf(f, "%5d: %08X:%04X %08X:%04X"
" %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d",
bucket, src, srcp, dest, destp, sp->sk_state,
sk_wmem_alloc_get(sp),
sk_rmem_alloc_get(sp),
0, 0L, 0,
from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
0, sock_i_ino(sp),
atomic_read(&sp->sk_refcnt), sp,
atomic_read(&sp->sk_drops));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 110 | 79.14% | 1 | 12.50% |
Eric Dumazet | 14 | 10.07% | 4 | 50.00% |
Eric W. Biedermann | 8 | 5.76% | 1 | 12.50% |
Pavel Emelyanov | 6 | 4.32% | 1 | 12.50% |
Tetsuo Handa | 1 | 0.72% | 1 | 12.50% |
Total | 139 | 100.00% | 8 | 100.00% |
int udp4_seq_show(struct seq_file *seq, void *v)
{
seq_setwidth(seq, 127);
if (v == SEQ_START_TOKEN)
seq_puts(seq, " sl local_address rem_address st tx_queue "
"rx_queue tr tm->when retrnsmt uid timeout "
"inode ref pointer drops");
else {
struct udp_iter_state *state = seq->private;
udp4_format_sock(v, seq, state->bucket);
}
seq_pad(seq, '\n');
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 56 | 81.16% | 1 | 25.00% |
Tetsuo Handa | 11 | 15.94% | 1 | 25.00% |
Eric Dumazet | 1 | 1.45% | 1 | 25.00% |
Pavel Emelyanov | 1 | 1.45% | 1 | 25.00% |
Total | 69 | 100.00% | 4 | 100.00% |
static const struct file_operations udp_afinfo_seq_fops = {
.owner = THIS_MODULE,
.open = udp_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net
};
/* ------------------------------------------------------------------------ */
static struct udp_seq_afinfo udp4_seq_afinfo = {
.name = "udp",
.family = AF_INET,
.udp_table = &udp_table,
.seq_fops = &udp_afinfo_seq_fops,
.seq_ops = {
.show = udp4_seq_show,
},
};
static int __net_init udp4_proc_init_net(struct net *net)
{
return udp_proc_register(net, &udp4_seq_afinfo);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pavel Emelyanov | 12 | 57.14% | 1 | 33.33% |
David S. Miller | 8 | 38.10% | 1 | 33.33% |
Alexey Dobriyan | 1 | 4.76% | 1 | 33.33% |
Total | 21 | 100.00% | 3 | 100.00% |
static void __net_exit udp4_proc_exit_net(struct net *net)
{
udp_proc_unregister(net, &udp4_seq_afinfo);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pavel Emelyanov | 15 | 75.00% | 1 | 25.00% |
David S. Miller | 2 | 10.00% | 1 | 25.00% |
Daniel Lezcano | 2 | 10.00% | 1 | 25.00% |
Alexey Dobriyan | 1 | 5.00% | 1 | 25.00% |
Total | 20 | 100.00% | 4 | 100.00% |
static struct pernet_operations udp4_net_ops = {
.init = udp4_proc_init_net,
.exit = udp4_proc_exit_net,
};
int __init udp4_proc_init(void)
{
return register_pernet_subsys(&udp4_net_ops);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pavel Emelyanov | 13 | 86.67% | 1 | 50.00% |
David S. Miller | 2 | 13.33% | 1 | 50.00% |
Total | 15 | 100.00% | 2 | 100.00% |
void udp4_proc_exit(void)
{
unregister_pernet_subsys(&udp4_net_ops);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 11 | 84.62% | 1 | 50.00% |
Pavel Emelyanov | 2 | 15.38% | 1 | 50.00% |
Total | 13 | 100.00% | 2 | 100.00% |
#endif /* CONFIG_PROC_FS */
static __initdata unsigned long uhash_entries;
static int __init set_uhash_entries(char *str)
{
ssize_t ret;
if (!str)
return 0;
ret = kstrtoul(str, 0, &uhash_entries);
if (ret)
return 0;
if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN)
uhash_entries = UDP_HTABLE_SIZE_MIN;
return 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 41 | 73.21% | 1 | 50.00% |
Eldad Zack | 15 | 26.79% | 1 | 50.00% |
Total | 56 | 100.00% | 2 | 100.00% |
__setup("uhash_entries=", set_uhash_entries);
void __init udp_table_init(struct udp_table *table, const char *name)
{
unsigned int i;
table->hash = alloc_large_system_hash(name,
2 * sizeof(struct udp_hslot),
uhash_entries,
21, /* one slot per 2 MB */
0,
&table->log,
&table->mask,
UDP_HTABLE_SIZE_MIN,
64 * 1024);
table->hash2 = table->hash + (table->mask + 1);
for (i = 0; i <= table->mask; i++) {
INIT_HLIST_HEAD(&table->hash[i].head);
table->hash[i].count = 0;
spin_lock_init(&table->hash[i].lock);
}
for (i = 0; i <= table->mask; i++) {
INIT_HLIST_HEAD(&table->hash2[i].head);
table->hash2[i].count = 0;
spin_lock_init(&table->hash2[i].lock);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 182 | 98.91% | 5 | 83.33% |
Tim Bird | 2 | 1.09% | 1 | 16.67% |
Total | 184 | 100.00% | 6 | 100.00% |
u32 udp_flow_hashrnd(void)
{
static u32 hashrnd __read_mostly;
net_get_random_once(&hashrnd, sizeof(hashrnd));
return hashrnd;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Tom Herbert | 26 | 100.00% | 1 | 100.00% |
Total | 26 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(udp_flow_hashrnd);
void __init udp_init(void)
{
unsigned long limit;
unsigned int i;
udp_table_init(&udp_table, "UDP");
limit = nr_free_buffer_pages() / 8;
limit = max(limit, 128UL);
sysctl_udp_mem[0] = limit / 4 * 3;
sysctl_udp_mem[1] = limit;
sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2;
sysctl_udp_rmem_min = SK_MEM_QUANTUM;
sysctl_udp_wmem_min = SK_MEM_QUANTUM;
/* 16 spinlocks per cpu */
udp_busylocks_log = ilog2(nr_cpu_ids) + 4;
udp_busylocks = kmalloc(sizeof(spinlock_t) << udp_busylocks_log,
GFP_KERNEL);
if (!udp_busylocks)
panic("UDP: failed to alloc udp_busylocks\n");
for (i = 0; i < (1U << udp_busylocks_log); i++)
spin_lock_init(udp_busylocks + i);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 74 | 54.41% | 4 | 80.00% |
Hideo Aoki | 62 | 45.59% | 1 | 20.00% |
Total | 136 | 100.00% | 5 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 4075 | 32.75% | 22 | 7.31% |
Eric Dumazet | 3257 | 26.17% | 80 | 26.58% |
Shawn Bohrer | 591 | 4.75% | 4 | 1.33% |
Paolo Abeni | 545 | 4.38% | 7 | 2.33% |
Tom Herbert | 507 | 4.07% | 13 | 4.32% |
Craig Gallek | 355 | 2.85% | 4 | 1.33% |
Herbert Xu | 315 | 2.53% | 5 | 1.66% |
Hideaki Yoshifuji / 吉藤英明 | 292 | 2.35% | 11 | 3.65% |
Arnaldo Carvalho de Melo | 205 | 1.65% | 7 | 2.33% |
Alexey Kuznetsov | 201 | 1.62% | 2 | 0.66% |
Pavel Emelyanov | 197 | 1.58% | 17 | 5.65% |
Gerrit Renker | 163 | 1.31% | 4 | 1.33% |
Linus Torvalds (pre-git) | 136 | 1.09% | 17 | 5.65% |
David Held | 125 | 1.00% | 2 | 0.66% |
KOVACS Krisztian | 114 | 0.92% | 2 | 0.66% |
Stephen Hemminger | 93 | 0.75% | 3 | 1.00% |
Denis V. Lunev | 93 | 0.75% | 8 | 2.66% |
Hideo Aoki | 93 | 0.75% | 1 | 0.33% |
Hannes Frederic Sowa | 90 | 0.72% | 7 | 2.33% |
Robert Shearman | 78 | 0.63% | 1 | 0.33% |
David Ahern | 74 | 0.59% | 2 | 0.66% |
Tom Parkin | 54 | 0.43% | 1 | 0.33% |
Edward Cree | 54 | 0.43% | 2 | 0.66% |
Eric Garver | 53 | 0.43% | 1 | 0.33% |
Joe Perches | 53 | 0.43% | 5 | 1.66% |
Daniel Lezcano | 50 | 0.40% | 2 | 0.66% |
samanthakumar | 45 | 0.36% | 2 | 0.66% |
Dmitry Mishin | 41 | 0.33% | 1 | 0.33% |
Arjan van de Ven | 40 | 0.32% | 1 | 0.33% |
Xuemin Su | 26 | 0.21% | 1 | 0.33% |
Soheil Hassas Yeganeh | 26 | 0.21% | 1 | 0.33% |
Américo Wang | 23 | 0.18% | 3 | 1.00% |
Vitaly Mayatskikh | 20 | 0.16% | 1 | 0.33% |
Derek Atkins | 20 | 0.16% | 1 | 0.33% |
Björn Mork | 18 | 0.14% | 1 | 0.33% |
Francesco Fusco | 17 | 0.14% | 1 | 0.33% |
Rick Jones | 16 | 0.13% | 1 | 0.33% |
Erich E. Hoover | 16 | 0.13% | 1 | 0.33% |
Eldad Zack | 15 | 0.12% | 1 | 0.33% |
Julian Anastasov | 14 | 0.11% | 1 | 0.33% |
Linus Torvalds | 14 | 0.11% | 3 | 1.00% |
Daniel Borkmann | 14 | 0.11% | 2 | 0.66% |
Steffen Hurrle | 14 | 0.11% | 1 | 0.33% |
Eric W. Biedermann | 13 | 0.10% | 3 | 1.00% |
Anton Arapov | 13 | 0.10% | 1 | 0.33% |
Tetsuo Handa | 12 | 0.10% | 1 | 0.33% |
Michal Kubeček | 10 | 0.08% | 1 | 0.33% |
Wang Chen | 10 | 0.08% | 1 | 0.33% |
James Chapman | 10 | 0.08% | 2 | 0.66% |
Shawn Landden | 10 | 0.08% | 1 | 0.33% |
Satoru Moriya | 10 | 0.08% | 1 | 0.33% |
Jesper Dangaard Brouer | 8 | 0.06% | 2 | 0.66% |
Eliezer Tamir | 7 | 0.06% | 2 | 0.66% |
Xufeng Zhang | 7 | 0.06% | 1 | 0.33% |
Patrick Ohly | 7 | 0.06% | 1 | 0.33% |
Alexey Dobriyan | 6 | 0.05% | 2 | 0.66% |
Pablo Neira Ayuso | 6 | 0.05% | 1 | 0.33% |
Arnd Bergmann | 6 | 0.05% | 1 | 0.33% |
Harvey Harrison | 6 | 0.05% | 1 | 0.33% |
Steffen Klassert | 5 | 0.04% | 1 | 0.33% |
David Majnemer | 5 | 0.04% | 1 | 0.33% |
Gao Feng | 5 | 0.04% | 1 | 0.33% |
Al Viro | 5 | 0.04% | 4 | 1.33% |
Thomas Graf | 5 | 0.04% | 1 | 0.33% |
Josef Bacik | 4 | 0.03% | 1 | 0.33% |
Lorenzo Colitti | 4 | 0.03% | 1 | 0.33% |
Thomas Gleixner | 3 | 0.02% | 1 | 0.33% |
Tejun Heo | 3 | 0.02% | 1 | 0.33% |
Alexander Duyck | 3 | 0.02% | 2 | 0.66% |
Duan Jiong | 3 | 0.02% | 1 | 0.33% |
Atis Elsts | 3 | 0.02% | 1 | 0.33% |
Tim Bird | 2 | 0.02% | 1 | 0.33% |
Ian Morris | 2 | 0.02% | 1 | 0.33% |
Octavian Purdila | 2 | 0.02% | 1 | 0.33% |
Willem de Bruijn | 1 | 0.01% | 1 | 0.33% |
Oliver Hartkopp | 1 | 0.01% | 1 | 0.33% |
Alan Cox | 1 | 0.01% | 1 | 0.33% |
Brian Haley | 1 | 0.01% | 1 | 0.33% |
Aruna-Hewapathirane | 1 | 0.01% | 1 | 0.33% |
Neil Horman | 1 | 0.01% | 1 | 0.33% |
Jorge Boncompte | 1 | 0.01% | 1 | 0.33% |
Total | 12444 | 100.00% | 301 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.