cregit-Linux how code gets into the kernel

Release 4.11 net/ipv4/udp.c

Directory: net/ipv4
/*
 * INET         An implementation of the TCP/IP protocol suite for the LINUX
 *              operating system.  INET is implemented using the  BSD Socket
 *              interface as the means of communication with the user level.
 *
 *              The User Datagram Protocol (UDP).
 *
 * Authors:     Ross Biro
 *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
 *              Alan Cox, <alan@lxorguk.ukuu.org.uk>
 *              Hirokazu Takahashi, <taka@valinux.co.jp>
 *
 * Fixes:
 *              Alan Cox        :       verify_area() calls
 *              Alan Cox        :       stopped close while in use off icmp
 *                                      messages. Not a fix but a botch that
 *                                      for udp at least is 'valid'.
 *              Alan Cox        :       Fixed icmp handling properly
 *              Alan Cox        :       Correct error for oversized datagrams
 *              Alan Cox        :       Tidied select() semantics.
 *              Alan Cox        :       udp_err() fixed properly, also now
 *                                      select and read wake correctly on errors
 *              Alan Cox        :       udp_send verify_area moved to avoid mem leak
 *              Alan Cox        :       UDP can count its memory
 *              Alan Cox        :       send to an unknown connection causes
 *                                      an ECONNREFUSED off the icmp, but
 *                                      does NOT close.
 *              Alan Cox        :       Switched to new sk_buff handlers. No more backlog!
 *              Alan Cox        :       Using generic datagram code. Even smaller and the PEEK
 *                                      bug no longer crashes it.
 *              Fred Van Kempen :       Net2e support for sk->broadcast.
 *              Alan Cox        :       Uses skb_free_datagram
 *              Alan Cox        :       Added get/set sockopt support.
 *              Alan Cox        :       Broadcasting without option set returns EACCES.
 *              Alan Cox        :       No wakeup calls. Instead we now use the callbacks.
 *              Alan Cox        :       Use ip_tos and ip_ttl
 *              Alan Cox        :       SNMP Mibs
 *              Alan Cox        :       MSG_DONTROUTE, and 0.0.0.0 support.
 *              Matt Dillon     :       UDP length checks.
 *              Alan Cox        :       Smarter af_inet used properly.
 *              Alan Cox        :       Use new kernel side addressing.
 *              Alan Cox        :       Incorrect return on truncated datagram receive.
 *      Arnt Gulbrandsen        :       New udp_send and stuff
 *              Alan Cox        :       Cache last socket
 *              Alan Cox        :       Route cache
 *              Jon Peatfield   :       Minor efficiency fix to sendto().
 *              Mike Shaver     :       RFC1122 checks.
 *              Alan Cox        :       Nonblocking error fix.
 *      Willy Konynenberg       :       Transparent proxying support.
 *              Mike McLagan    :       Routing by source
 *              David S. Miller :       New socket lookup architecture.
 *                                      Last socket cache retained as it
 *                                      does have a high hit rate.
 *              Olaf Kirch      :       Don't linearise iovec on sendmsg.
 *              Andi Kleen      :       Some cleanups, cache destination entry
 *                                      for connect.
 *      Vitaly E. Lavrov        :       Transparent proxy revived after year coma.
 *              Melvin Smith    :       Check msg_name not msg_namelen in sendto(),
 *                                      return ENOTCONN for unconnected sockets (POSIX)
 *              Janos Farkas    :       don't deliver multi/broadcasts to a different
 *                                      bound-to-device socket
 *      Hirokazu Takahashi      :       HW checksumming for outgoing UDP
 *                                      datagrams.
 *      Hirokazu Takahashi      :       sendfile() on UDP works now.
 *              Arnaldo C. Melo :       convert /proc/net/udp to seq_file
 *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
 *      Alexey Kuznetsov:               allow both IPv4 and IPv6 sockets to bind
 *                                      a single port at the same time.
 *      Derek Atkins <derek@ihtfp.com>: Add Encapulation Support
 *      James Chapman           :       Add L2TP encapsulation type.
 *
 *
 *              This program is free software; you can redistribute it and/or
 *              modify it under the terms of the GNU General Public License
 *              as published by the Free Software Foundation; either version
 *              2 of the License, or (at your option) any later version.
 */


#define pr_fmt(fmt) "UDP: " fmt

#include <linux/uaccess.h>
#include <asm/ioctls.h>
#include <linux/bootmem.h>
#include <linux/highmem.h>
#include <linux/swap.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/module.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/igmp.h>
#include <linux/inetdevice.h>
#include <linux/in.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/mm.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <net/tcp_states.h>
#include <linux/skbuff.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <net/net_namespace.h>
#include <net/icmp.h>
#include <net/inet_hashtables.h>
#include <net/route.h>
#include <net/checksum.h>
#include <net/xfrm.h>
#include <trace/events/udp.h>
#include <linux/static_key.h>
#include <trace/events/skb.h>
#include <net/busy_poll.h>
#include "udp_impl.h"
#include <net/sock_reuseport.h>
#include <net/addrconf.h>


struct udp_table udp_table __read_mostly;

EXPORT_SYMBOL(udp_table);


long sysctl_udp_mem[3] __read_mostly;

EXPORT_SYMBOL(sysctl_udp_mem);


int sysctl_udp_rmem_min __read_mostly;

EXPORT_SYMBOL(sysctl_udp_rmem_min);


int sysctl_udp_wmem_min __read_mostly;

EXPORT_SYMBOL(sysctl_udp_wmem_min);


atomic_long_t udp_memory_allocated;

EXPORT_SYMBOL(udp_memory_allocated);


#define MAX_UDP_PORTS 65536

#define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN)

/* IPCB reference means this can not be used from early demux */

static bool udp_lib_exact_dif_match(struct net *net, struct sk_buff *skb) { #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) if (!net->ipv4.sysctl_udp_l3mdev_accept && skb && ipv4_l3mdev_skb(IPCB(skb)->flags)) return true; #endif return false; }

Contributors

PersonTokensPropCommitsCommitProp
Robert Shearman51100.00%1100.00%
Total51100.00%1100.00%


static int udp_lib_lport_inuse(struct net *net, __u16 num, const struct udp_hslot *hslot, unsigned long *bitmap, struct sock *sk, unsigned int log) { struct sock *sk2; kuid_t uid = sock_i_uid(sk); sk_for_each(sk2, &hslot->head) { if (net_eq(sock_net(sk2), net) && sk2 != sk && (bitmap || udp_sk(sk2)->udp_port_hash == num) && (!sk2->sk_reuse || !sk->sk_reuse) && (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && inet_rcv_saddr_equal(sk, sk2, true)) { if (sk2->sk_reuseport && sk->sk_reuseport && !rcu_access_pointer(sk->sk_reuseport_cb) && uid_eq(uid, sock_i_uid(sk2))) { if (!bitmap) return 0; } else { if (!bitmap) return 1; __set_bit(udp_sk(sk2)->udp_port_hash >> log, bitmap); } } } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet8845.60%628.57%
Eric Garver2713.99%14.76%
Tom Herbert2311.92%14.76%
Gerrit Renker199.84%29.52%
Pavel Emelyanov94.66%14.76%
Hideaki Yoshifuji / 吉藤英明73.63%29.52%
Craig Gallek63.11%14.76%
Joe Perches63.11%14.76%
David S. Miller31.55%29.52%
Linus Torvalds (pre-git)21.04%14.76%
Al Viro10.52%14.76%
Stephen Hemminger10.52%14.76%
Josef Bacik10.52%14.76%
Total193100.00%21100.00%

/* * Note: we still hold spinlock of primary hash chain, so no other writer * can insert/delete a socket with local_port == num */
static int udp_lib_lport_inuse2(struct net *net, __u16 num, struct udp_hslot *hslot2, struct sock *sk) { struct sock *sk2; kuid_t uid = sock_i_uid(sk); int res = 0; spin_lock(&hslot2->lock); udp_portaddr_for_each_entry(sk2, &hslot2->head) { if (net_eq(sock_net(sk2), net) && sk2 != sk && (udp_sk(sk2)->udp_port_hash == num) && (!sk2->sk_reuse || !sk->sk_reuse) && (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && inet_rcv_saddr_equal(sk, sk2, true)) { if (sk2->sk_reuseport && sk->sk_reuseport && !rcu_access_pointer(sk->sk_reuseport_cb) && uid_eq(uid, sock_i_uid(sk2))) { res = 0; } else { res = 1; } break; } } spin_unlock(&hslot2->lock); return res; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet12669.61%116.67%
Eric Garver2312.71%116.67%
Tom Herbert2312.71%116.67%
Craig Gallek63.31%116.67%
Joe Perches21.10%116.67%
Josef Bacik10.55%116.67%
Total181100.00%6100.00%


static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot) { struct net *net = sock_net(sk); kuid_t uid = sock_i_uid(sk); struct sock *sk2; sk_for_each(sk2, &hslot->head) { if (net_eq(sock_net(sk2), net) && sk2 != sk && sk2->sk_family == sk->sk_family && ipv6_only_sock(sk2) == ipv6_only_sock(sk) && (udp_sk(sk2)->udp_port_hash == udp_sk(sk)->udp_port_hash) && (sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) && inet_rcv_saddr_equal(sk, sk2, false)) { return reuseport_add_sock(sk, sk2); } } /* Initial allocation may have already happened via setsockopt */ if (!rcu_access_pointer(sk->sk_reuseport_cb)) return reuseport_alloc(sk); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Craig Gallek15898.75%133.33%
Josef Bacik10.62%133.33%
Eric Dumazet10.62%133.33%
Total160100.00%3100.00%

/** * udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6 * * @sk: socket struct in question * @snum: port number to look up * @hash2_nulladdr: AF-dependent hash value in secondary hash chains, * with NULL address */
int udp_lib_get_port(struct sock *sk, unsigned short snum, unsigned int hash2_nulladdr) { struct udp_hslot *hslot, *hslot2; struct udp_table *udptable = sk->sk_prot->h.udp_table; int error = 1; struct net *net = sock_net(sk); if (!snum) { int low, high, remaining; unsigned int rand; unsigned short first, last; DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN); inet_get_local_port_range(net, &low, &high); remaining = (high - low) + 1; rand = prandom_u32(); first = reciprocal_scale(rand, remaining) + low; /* * force rand to be an odd multiple of UDP_HTABLE_SIZE */ rand = (rand | 1) * (udptable->mask + 1); last = first + udptable->mask + 1; do { hslot = udp_hashslot(udptable, net, first); bitmap_zero(bitmap, PORTS_PER_CHAIN); spin_lock_bh(&hslot->lock); udp_lib_lport_inuse(net, snum, hslot, bitmap, sk, udptable->log); snum = first; /* * Iterate on all possible values of snum for this hash. * Using steps of an odd multiple of UDP_HTABLE_SIZE * give us randomization and full range coverage. */ do { if (low <= snum && snum <= high && !test_bit(snum >> udptable->log, bitmap) && !inet_is_local_reserved_port(net, snum)) goto found; snum += rand; } while (snum != first); spin_unlock_bh(&hslot->lock); cond_resched(); } while (++first != last); goto fail; } else { hslot = udp_hashslot(udptable, net, snum); spin_lock_bh(&hslot->lock); if (hslot->count > 10) { int exist; unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum; slot2 &= udptable->mask; hash2_nulladdr &= udptable->mask; hslot2 = udp_hashslot2(udptable, slot2); if (hslot->count < hslot2->count) goto scan_primary_hash; exist = udp_lib_lport_inuse2(net, snum, hslot2, sk); if (!exist && (hash2_nulladdr != slot2)) { hslot2 = udp_hashslot2(udptable, hash2_nulladdr); exist = udp_lib_lport_inuse2(net, snum, hslot2, sk); } if (exist) goto fail_unlock; else goto found; } scan_primary_hash: if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, 0)) goto fail_unlock; } found: inet_sk(sk)->inet_num = snum; udp_sk(sk)->udp_port_hash = snum; udp_sk(sk)->udp_portaddr_hash ^= snum; if (sk_unhashed(sk)) { if (sk->sk_reuseport && udp_reuseport_add_sock(sk, hslot)) { inet_sk(sk)->inet_num = 0; udp_sk(sk)->udp_port_hash = 0; udp_sk(sk)->udp_portaddr_hash ^= snum; goto fail_unlock; } sk_add_node_rcu(sk, &hslot->head); hslot->count++; sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); spin_lock(&hslot2->lock); if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && sk->sk_family == AF_INET6) hlist_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node, &hslot2->head); else hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, &hslot2->head); hslot2->count++; spin_unlock(&hslot2->lock); } sock_set_flag(sk, SOCK_RCU_FREE); error = 0; fail_unlock: spin_unlock_bh(&hslot->lock); fail: return error; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet40661.98%1530.61%
Craig Gallek7811.91%24.08%
Linus Torvalds (pre-git)507.63%816.33%
Pavel Emelyanov284.27%510.20%
Stephen Hemminger253.82%24.08%
Gerrit Renker172.60%12.04%
Anton Arapov131.98%12.04%
Américo Wang81.22%24.08%
Arnaldo Carvalho de Melo71.07%24.08%
David S. Miller60.92%48.16%
Daniel Borkmann40.61%12.04%
Hideaki Yoshifuji / 吉藤英明40.61%24.08%
Linus Torvalds30.46%12.04%
Eric Garver30.46%12.04%
Eric W. Biedermann20.31%12.04%
Aruna-Hewapathirane10.15%12.04%
Total655100.00%49100.00%

EXPORT_SYMBOL(udp_lib_get_port);
static u32 udp4_portaddr_hash(const struct net *net, __be32 saddr, unsigned int port) { return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet36100.00%3100.00%
Total36100.00%3100.00%


int udp_v4_get_port(struct sock *sk, unsigned short snum) { unsigned int hash2_nulladdr = udp4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum); unsigned int hash2_partial = udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0); /* precompute partial secondary hash */ udp_sk(sk)->udp_portaddr_hash = hash2_partial; return udp_lib_get_port(sk, snum, hash2_nulladdr); }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet5168.92%342.86%
David S. Miller1925.68%114.29%
Linus Torvalds (pre-git)22.70%114.29%
Pavel Emelyanov11.35%114.29%
Gerrit Renker11.35%114.29%
Total74100.00%7100.00%


static int compute_score(struct sock *sk, struct net *net, __be32 saddr, __be16 sport, __be32 daddr, unsigned short hnum, int dif, bool exact_dif) { int score; struct inet_sock *inet; if (!net_eq(sock_net(sk), net) || udp_sk(sk)->udp_port_hash != hnum || ipv6_only_sock(sk)) return -1; score = (sk->sk_family == PF_INET) ? 2 : 1; inet = inet_sk(sk); if (inet->inet_rcv_saddr) { if (inet->inet_rcv_saddr != daddr) return -1; score += 4; } if (inet->inet_daddr) { if (inet->inet_daddr != saddr) return -1; score += 4; } if (inet->inet_dport) { if (inet->inet_dport != sport) return -1; score += 4; } if (sk->sk_bound_dev_if || exact_dif) { if (sk->sk_bound_dev_if != dif) return -1; score += 4; } if (sk->sk_incoming_cpu == raw_smp_processor_id()) score++; return score; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller10751.44%16.67%
Eric Dumazet5024.04%426.67%
Joe Perches2110.10%16.67%
Hideaki Yoshifuji / 吉藤英明83.85%320.00%
Tom Herbert62.88%16.67%
Linus Torvalds (pre-git)62.88%16.67%
Robert Shearman52.40%16.67%
Xuemin Su31.44%16.67%
Gerrit Renker10.48%16.67%
Pavel Emelyanov10.48%16.67%
Total208100.00%15100.00%


static u32 udp_ehashfn(const struct net *net, const __be32 laddr, const __u16 lport, const __be32 faddr, const __be16 fport) { static u32 udp_ehash_secret __read_mostly; net_get_random_once(&udp_ehash_secret, sizeof(udp_ehash_secret)); return __inet_ehashfn(laddr, lport, faddr, fport, udp_ehash_secret + net_hash_mix(net)); }

Contributors

PersonTokensPropCommitsCommitProp
Hannes Frederic Sowa6196.83%266.67%
Eric Dumazet23.17%133.33%
Total63100.00%3100.00%

/* called with rcu_read_lock() */
static struct sock *udp4_lib_lookup2(struct net *net, __be32 saddr, __be16 sport, __be32 daddr, unsigned int hnum, int dif, bool exact_dif, struct udp_hslot *hslot2, struct sk_buff *skb) { struct sock *sk, *result; int score, badness, matches = 0, reuseport = 0; u32 hash = 0; result = NULL; badness = 0; udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { score = compute_score(sk, net, saddr, sport, daddr, hnum, dif, exact_dif); if (score > badness) { reuseport = sk->sk_reuseport; if (reuseport) { hash = udp_ehashfn(net, daddr, hnum, saddr, sport); result = reuseport_select_sock(sk, hash, skb, sizeof(struct udphdr)); if (result) return result; matches = 1; } badness = score; result = sk; } else if (score == badness && reuseport) { matches++; if (reciprocal_scale(hash, matches) == 0) result = sk; hash = next_pseudo_random32(hash); } } return result; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet11051.16%327.27%
Tom Herbert6831.63%19.09%
Craig Gallek2612.09%327.27%
Robert Shearman52.33%19.09%
Daniel Borkmann41.86%19.09%
Hannes Frederic Sowa10.47%19.09%
Xuemin Su10.47%19.09%
Total215100.00%11100.00%

/* UDP is nearly always wildcards out the wazoo, it makes no sense to try * harder than this. -DaveM */
struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, __be32 daddr, __be16 dport, int dif, struct udp_table *udptable, struct sk_buff *skb) { struct sock *sk, *result; unsigned short hnum = ntohs(dport); unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask); struct udp_hslot *hslot2, *hslot = &udptable->hash[slot]; bool exact_dif = udp_lib_exact_dif_match(net, skb); int score, badness, matches = 0, reuseport = 0; u32 hash = 0; if (hslot->count > 10) { hash2 = udp4_portaddr_hash(net, daddr, hnum); slot2 = hash2 & udptable->mask; hslot2 = &udptable->hash2[slot2]; if (hslot->count < hslot2->count) goto begin; result = udp4_lib_lookup2(net, saddr, sport, daddr, hnum, dif, exact_dif, hslot2, skb); if (!result) { unsigned int old_slot2 = slot2; hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum); slot2 = hash2 & udptable->mask; /* avoid searching the same slot again. */ if (unlikely(slot2 == old_slot2)) return result; hslot2 = &udptable->hash2[slot2]; if (hslot->count < hslot2->count) goto begin; result = udp4_lib_lookup2(net, saddr, sport, daddr, hnum, dif, exact_dif, hslot2, skb); } return result; } begin: result = NULL; badness = 0; sk_for_each_rcu(sk, &hslot->head) { score = compute_score(sk, net, saddr, sport, daddr, hnum, dif, exact_dif); if (score > badness) { reuseport = sk->sk_reuseport; if (reuseport) { hash = udp_ehashfn(net, daddr, hnum, saddr, sport); result = reuseport_select_sock(sk, hash, skb, sizeof(struct udphdr)); if (result) return result; matches = 1; } result = sk; badness = score; } else if (score == badness && reuseport) { matches++; if (reciprocal_scale(hash, matches) == 0) result = sk; hash = next_pseudo_random32(hash); } } return result; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet27863.47%741.18%
Tom Herbert7316.67%15.88%
Craig Gallek296.62%317.65%
Xuemin Su214.79%15.88%
Robert Shearman163.65%15.88%
David S. Miller153.42%15.88%
Daniel Borkmann40.91%15.88%
Hannes Frederic Sowa10.23%15.88%
Jorge Boncompte10.23%15.88%
Total438100.00%17100.00%

EXPORT_SYMBOL_GPL(__udp4_lib_lookup);
static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb, __be16 sport, __be16 dport, struct udp_table *udptable) { const struct iphdr *iph = ip_hdr(skb); return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport, iph->daddr, dport, inet_iif(skb), udptable, skb); }

Contributors

PersonTokensPropCommitsCommitProp
KOVACS Krisztian6494.12%133.33%
Craig Gallek22.94%133.33%
Eric Dumazet22.94%133.33%
Total68100.00%3100.00%


struct sock *udp4_lib_lookup_skb(struct sk_buff *skb, __be16 sport, __be16 dport) { return __udp4_lib_lookup_skb(skb, sport, dport, &udp_table); }

Contributors

PersonTokensPropCommitsCommitProp
Tom Herbert2993.55%150.00%
Alexander Duyck26.45%150.00%
Total31100.00%2100.00%

EXPORT_SYMBOL_GPL(udp4_lib_lookup_skb); /* Must be called under rcu_read_lock(). * Does increment socket refcount. */ #if IS_ENABLED(CONFIG_NETFILTER_XT_MATCH_SOCKET) || \ IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TPROXY) || \ IS_ENABLED(CONFIG_NF_SOCKET_IPV4)
struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, __be32 daddr, __be16 dport, int dif) { struct sock *sk; sk = __udp4_lib_lookup(net, saddr, sport, daddr, dport, dif, &udp_table, NULL); if (sk && !atomic_inc_not_zero(&sk->sk_refcnt)) sk = NULL; return sk; }

Contributors

PersonTokensPropCommitsCommitProp
KOVACS Krisztian4358.11%125.00%
Eric Dumazet2939.19%250.00%
Craig Gallek22.70%125.00%
Total74100.00%4100.00%

EXPORT_SYMBOL_GPL(udp4_lib_lookup); #endif
static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk, __be16 loc_port, __be32 loc_addr, __be16 rmt_port, __be32 rmt_addr, int dif, unsigned short hnum) { struct inet_sock *inet = inet_sk(sk); if (!net_eq(sock_net(sk), net) || udp_sk(sk)->udp_port_hash != hnum || (inet->inet_daddr && inet->inet_daddr != rmt_addr) || (inet->inet_dport != rmt_port && inet->inet_dport) || (inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) || ipv6_only_sock(sk) || (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) return false; if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif)) return false; return true; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller9062.50%120.00%
Shawn Bohrer2920.14%120.00%
Eric Dumazet2517.36%360.00%
Total144100.00%5100.00%

/* * This routine is called by the ICMP module when it gets some * sort of error condition. If err < 0 then the socket should * be closed and the error returned to the user. If err > 0 * it's just the icmp type << 8 | icmp code. * Header points to the ip header of the error packet. We move * on past this. Then (as it used to claim before adjustment) * header points to the first 8 bytes of the udp header. We need * to find the appropriate port. */
void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) { struct inet_sock *inet; const struct iphdr *iph = (const struct iphdr *)skb->data; struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2)); const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; struct sock *sk; int harderr; int err; struct net *net = dev_net(skb->dev); sk = __udp4_lib_lookup(net, iph->daddr, uh->dest, iph->saddr, uh->source, skb->dev->ifindex, udptable, NULL); if (!sk) { __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); return; /* No socket for error */ } err = 0; harderr = 0; inet = inet_sk(sk); switch (type) { default: case ICMP_TIME_EXCEEDED: err = EHOSTUNREACH; break; case ICMP_SOURCE_QUENCH: goto out; case ICMP_PARAMETERPROB: err = EPROTO; harderr = 1; break; case ICMP_DEST_UNREACH: if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */ ipv4_sk_update_pmtu(skb, sk, info); if (inet->pmtudisc != IP_PMTUDISC_DONT) { err = EMSGSIZE; harderr = 1; break; } goto out; } err = EHOSTUNREACH; if (code <= NR_ICMP_UNREACH) { harderr = icmp_err_convert[code].fatal; err = icmp_err_convert[code].errno; } break; case ICMP_REDIRECT: ipv4_sk_redirect(skb, sk); goto out; } /* * RFC1122: OK. Passes ICMP errors back to application, as per * 4.1.3.3. */ if (!inet->recverr) { if (!harderr || sk->sk_state != TCP_ESTABLISHED) goto out; } else ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); sk->sk_err = err; sk->sk_error_report(sk); out: return; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller33092.18%323.08%
Pavel Emelyanov133.63%215.38%
Eric Dumazet61.68%430.77%
Duan Jiong30.84%17.69%
Hideaki Yoshifuji / 吉藤英明30.84%17.69%
Craig Gallek20.56%17.69%
Ian Morris10.28%17.69%
Total358100.00%13100.00%


void udp_err(struct sk_buff *skb, u32 info) { __udp4_lib_err(skb, info, &udp_table); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller2191.30%150.00%
Eric Dumazet28.70%150.00%
Total23100.00%2100.00%

/* * Throw away all pending data and cancel the corking. Socket is locked. */
void udp_flush_pending_frames(struct sock *sk) { struct udp_sock *up = udp_sk(sk); if (up->pending) { up->len = 0; up->pending = 0; ip_flush_pending_frames(sk); } }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller45100.00%1100.00%
Total45100.00%1100.00%

EXPORT_SYMBOL(udp_flush_pending_frames); /** * udp4_hwcsum - handle outgoing HW checksumming * @skb: sk_buff containing the filled-in UDP header * (checksum field must be zeroed out) * @src: source IP address * @dst: destination IP address */
void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst) { struct udphdr *uh = udp_hdr(skb); int offset = skb_transport_offset(skb); int len = skb->len - offset; int hlen = len; __wsum csum = 0; if (!skb_has_frag_list(skb)) { /* * Only one fragment on the socket. */ skb->csum_start = skb_transport_header(skb) - skb->head; skb->csum_offset = offsetof(struct udphdr, check); uh->check = ~csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, 0); } else { struct sk_buff *frags; /* * HW-checksum won't work as there are two or more * fragments on the socket so that all csums of sk_buffs * should be together */ skb_walk_frags(skb, frags) { csum = csum_add(csum, frags->csum); hlen -= frags->len; } csum = skb_checksum(skb, offset, hlen, csum); skb->ip_summed = CHECKSUM_NONE; uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum); if (uh->check == 0) uh->check = CSUM_MANGLED_0; } }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller13671.20%133.33%
Herbert Xu4020.94%133.33%
Américo Wang157.85%133.33%
Total191100.00%3100.00%

EXPORT_SYMBOL_GPL(udp4_hwcsum); /* Function to set UDP checksum for an IPv4 UDP packet. This is intended * for the simple case like when setting the checksum for a UDP tunnel. */
void udp_set_csum(bool nocheck, struct sk_buff *skb, __be32 saddr, __be32 daddr, int len) { struct udphdr *uh = udp_hdr(skb); if (nocheck) { uh->check = 0; } else if (skb_is_gso(skb)) { uh->check = ~udp_v4_check(len, saddr, daddr, 0); } else if (skb->ip_summed == CHECKSUM_PARTIAL) { uh->check = 0; uh->check = udp_v4_check(len, saddr, daddr, lco_csum(skb)); if (uh->check == 0) uh->check = CSUM_MANGLED_0; } else { skb->ip_summed = CHECKSUM_PARTIAL; skb->csum_start = skb_transport_header(skb) - skb->head; skb->csum_offset = offsetof(struct udphdr, check); uh->check = ~udp_v4_check(len, saddr, daddr, 0); } }

Contributors

PersonTokensPropCommitsCommitProp
Tom Herbert11568.05%133.33%
Edward Cree5431.95%266.67%
Total169100.00%3100.00%

EXPORT_SYMBOL(udp_set_csum);
static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4) { struct sock *sk = skb->sk; struct inet_sock *inet = inet_sk(sk); struct udphdr *uh; int err = 0; int is_udplite = IS_UDPLITE(sk); int offset = skb_transport_offset(skb); int len = skb->len - offset; __wsum csum = 0; /* * Create a UDP header */ uh = udp_hdr(skb); uh->source = inet->inet_sport; uh->dest = fl4->fl4_dport; uh->len = htons(len); uh->check = 0; if (is_udplite) /* UDP-Lite */ csum = udplite_csum(skb); else if (sk->sk_no_check_tx) { /* UDP csum disabled */ skb->ip_summed = CHECKSUM_NONE; goto send; } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ udp4_hwcsum(skb, fl4->saddr, fl4->daddr); goto send; } else csum = udp_csum(skb); /* add protocol-dependent pseudo-header */ uh->check = csum_tcpudp_magic(fl4->saddr, fl4->daddr, len, sk->sk_protocol, csum); if (uh->check == 0) uh->check = CSUM_MANGLED_0; send: err = ip_send_skb(sock_net(sk), skb); if (err) { if (err == -ENOBUFS && !inet->recverr) { UDP_INC_STATS(sock_net(sk), UDP_MIB_SNDBUFERRORS, is_udplite); err = 0; } } else UDP_INC_STATS(sock_net(sk), UDP_MIB_OUTDATAGRAMS, is_udplite); return err; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller20170.53%225.00%
Herbert Xu4014.04%112.50%
Eric Dumazet3813.33%337.50%
Pavel Emelyanov51.75%112.50%
Tom Herbert10.35%112.50%
Total285100.00%8100.00%

/* * Push out all pending data as one UDP datagram. Socket is locked. */
int udp_push_pending_frames(struct sock *sk) { struct udp_sock *up = udp_sk(sk); struct inet_sock *inet = inet_sk(sk); struct flowi4 *fl4 = &inet->cork.fl.u.ip4; struct sk_buff *skb; int err = 0; skb = ip_finish_skb(sk, fl4); if (!skb) goto out; err = udp_send_skb(skb, fl4); out: up->len = 0; up->pending = 0; return err; }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu7272.73%120.00%
Eric Dumazet1414.14%120.00%
David S. Miller1313.13%360.00%
Total99100.00%5100.00%

EXPORT_SYMBOL(udp_push_pending_frames);
int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) { struct inet_sock *inet = inet_sk(sk); struct udp_sock *up = udp_sk(sk); struct flowi4 fl4_stack; struct flowi4 *fl4; int ulen = len; struct ipcm_cookie ipc; struct rtable *rt = NULL; int free = 0; int connected = 0; __be32 daddr, faddr, saddr; __be16 dport; u8 tos; int err, is_udplite = IS_UDPLITE(sk); int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); struct sk_buff *skb; struct ip_options_data opt_copy; if (len > 0xFFFF) return -EMSGSIZE; /* * Check the flags. */ if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */ return -EOPNOTSUPP; ipc.opt = NULL; ipc.tx_flags = 0; ipc.ttl = 0; ipc.tos = -1; getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; fl4 = &inet->cork.fl.u.ip4; if (up->pending) { /* * There are pending frames. * The socket lock must be held while it's corked. */ lock_sock(sk); if (likely(up->pending)) { if (unlikely(up->pending != AF_INET)) { release_sock(sk); return -EINVAL; } goto do_append_data; } release_sock(sk); } ulen += sizeof(struct udphdr); /* * Get and verify the address. */ if (msg->msg_name) { DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name); if (msg->msg_namelen < sizeof(*usin)) return -EINVAL; if (usin->sin_family != AF_INET) { if (usin->sin_family != AF_UNSPEC) return -EAFNOSUPPORT; } daddr = usin->sin_addr.s_addr; dport = usin->sin_port; if (dport == 0) return -EINVAL; } else { if (sk->sk_state != TCP_ESTABLISHED) return -EDESTADDRREQ; daddr = inet->inet_daddr; dport = inet->inet_dport; /* Open fast path for connected socket. Route will not be used, if at least one option is set. */ connected = 1; } ipc.sockc.tsflags = sk->sk_tsflags; ipc.addr = inet->inet_saddr; ipc.oif = sk->sk_bound_dev_if; if (msg->msg_controllen) { err = ip_cmsg_send(sk, msg, &ipc, sk->sk_family == AF_INET6); if (unlikely(err)) { kfree(ipc.opt); return err; } if (ipc.opt) free = 1; connected = 0; } if (!ipc.opt) { struct ip_options_rcu *inet_opt; rcu_read_lock(); inet_opt = rcu_dereference(inet->inet_opt); if (inet_opt) { memcpy(&opt_copy, inet_opt, sizeof(*inet_opt) + inet_opt->opt.optlen); ipc.opt = &opt_copy.opt; } rcu_read_unlock(); } saddr = ipc.addr; ipc.addr = faddr = daddr; sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags); if (ipc.opt && ipc.opt->opt.srr) { if (!daddr) return -EINVAL; faddr = ipc.opt->opt.faddr; connected = 0; } tos = get_rttos(&ipc, inet); if (sock_flag(sk, SOCK_LOCALROUTE) || (msg->msg_flags & MSG_DONTROUTE) || (ipc.opt && ipc.opt->opt.is_strictroute)) { tos |= RTO_ONLINK; connected = 0; } if (ipv4_is_multicast(daddr)) { if (!ipc.oif) ipc.oif = inet->mc_index; if (!saddr) saddr = inet->mc_addr; connected = 0; } else if (!ipc.oif) ipc.oif = inet->uc_index; if (connected) rt = (struct rtable *)sk_dst_check(sk, 0); if (!rt) { struct net *net = sock_net(sk); __u8 flow_flags = inet_sk_flowi_flags(sk); fl4 = &fl4_stack; flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE, sk->sk_protocol, flow_flags, faddr, saddr, dport, inet->inet_sport, sk->sk_uid); security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); rt = ip_route_output_flow(net, fl4, sk); if (IS_ERR(rt)) { err = PTR_ERR(rt); rt = NULL; if (err == -ENETUNREACH) IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); goto out; } err = -EACCES; if ((rt->rt_flags & RTCF_BROADCAST) && !sock_flag(sk, SOCK_BROADCAST)) goto out; if (connected) sk_dst_set(sk, dst_clone(&rt->dst)); } if (msg->msg_flags&MSG_CONFIRM) goto do_confirm; back_from_confirm: saddr = fl4->saddr; if (!ipc.addr) daddr = ipc.addr = fl4->daddr; /* Lockless fast path for the non-corking case. */ if (!corkreq) { skb = ip_make_skb(sk, fl4, getfrag, msg, ulen, sizeof(struct udphdr), &ipc, &rt, msg->msg_flags); err = PTR_ERR(skb); if (!IS_ERR_OR_NULL(skb)) err = udp_send_skb(skb, fl4); goto out; } lock_sock(sk); if (unlikely(up->pending)) { /* The socket is already corked while preparing it. */ /* ... which is an evident application bug. --ANK */ release_sock(sk); net_dbg_ratelimited("cork app bug 2\n"); err = -EINVAL; goto out; } /* * Now cork the socket to pend data. */ fl4 = &inet->cork.fl.u.ip4; fl4->daddr = daddr; fl4->saddr = saddr; fl4->fl4_dport = dport; fl4->fl4_sport = inet->inet_sport; up->pending = AF_INET; do_append_data: up->len += ulen; err = ip_append_data(sk, fl4, getfrag, msg, ulen, sizeof(struct udphdr), &ipc, &rt, corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); if (err) udp_flush_pending_frames(sk); else if (!corkreq) err = udp_push_pending_frames(sk); else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) up->pending = 0; release_sock(sk); out: ip_rt_put(rt); if (free) kfree(ipc.opt); if (!err) return len; /* * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting * ENOBUFS might not be good (it's not tunable per se), but otherwise * we don't have a good statistic (IpOutDiscards but it can be too many * things). We could add another new stat but at least for now that * seems like overkill. */ if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { UDP_INC_STATS(sock_net(sk), UDP_MIB_SNDBUFERRORS, is_udplite); } return err; do_confirm: if (msg->msg_flags & MSG_PROBE) dst_confirm_neigh(&rt->dst, &fl4->daddr); if (!(msg->msg_flags&MSG_PROBE) || len) goto back_from_confirm; err = 0; goto out; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller99577.31%1128.95%
Eric Dumazet806.22%615.79%
Herbert Xu755.83%12.63%
Soheil Hassas Yeganeh262.02%12.63%
David Ahern211.63%12.63%
Francesco Fusco171.32%12.63%
Erich E. Hoover161.24%12.63%
Julian Anastasov141.09%12.63%
Pavel Emelyanov80.62%37.89%
Steffen Hurrle70.54%12.63%
Patrick Ohly70.54%12.63%
Hannes Frederic Sowa60.47%12.63%
Lorenzo Colitti40.31%12.63%
Joe Perches30.23%25.26%
Atis Elsts30.23%12.63%
Denis V. Lunev20.16%25.26%
Hideaki Yoshifuji / 吉藤英明10.08%12.63%
Oliver Hartkopp10.08%12.63%
Ian Morris10.08%12.63%
Total1287100.00%38100.00%

EXPORT_SYMBOL(udp_sendmsg);
int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags) { struct inet_sock *inet = inet_sk(sk); struct udp_sock *up = udp_sk(sk); int ret; if (flags & MSG_SENDPAGE_NOTLAST) flags |= MSG_MORE; if (!up->pending) { struct msghdr msg = { .msg_flags = flags|MSG_MORE }; /* Call udp_sendmsg to specify destination address which * sendpage interface can't pass. * This will succeed only when the socket is connected. */ ret = udp_sendmsg(sk, &msg, 0); if (ret < 0) return ret; } lock_sock(sk); if (unlikely(!up->pending)) { release_sock(sk); net_dbg_ratelimited("udp cork app bug 3\n"); return -EINVAL; } ret = ip_append_page(sk, &inet->cork.fl.u.ip4, page, offset, size, flags); if (ret == -EOPNOTSUPP) { release_sock(sk); return sock_no_sendpage(sk->sk_socket, page, offset, size, flags); } if (ret < 0) { udp_flush_pending_frames(sk); goto out; } up->len += size; if (!(up->corkflag || (flags&MSG_MORE))) ret = udp_push_pending_frames(sk); if (!ret) ret = size; out: release_sock(sk); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller23995.22%250.00%
Shawn Landden103.98%125.00%
Joe Perches20.80%125.00%
Total251100.00%4100.00%

/* fully reclaim rmem/fwd memory allocated for skb */
static void udp_rmem_release(struct sock *sk, int size, int partial) { struct udp_sock *up = udp_sk(sk); int amt; if (likely(partial)) { up->forward_deficit += size; size = up->forward_deficit; if (size < (sk->sk_rcvbuf >> 2) && !skb_queue_empty(&sk->sk_receive_queue)) return; } else { size += up->forward_deficit; } up->forward_deficit = 0; sk->sk_forward_alloc += size; amt = (sk->sk_forward_alloc - partial) & ~(SK_MEM_QUANTUM - 1); sk->sk_forward_alloc -= amt; if (amt) __sk_mem_reduce_allocated(sk, amt >> SK_MEM_QUANTUM_SHIFT); atomic_sub(size, &sk->sk_rmem_alloc); }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet7855.71%266.67%
Paolo Abeni6244.29%133.33%
Total140100.00%3100.00%

/* Note: called with sk_receive_queue.lock held. * Instead of using skb->truesize here, find a copy of it in skb->dev_scratch * This avoids a cache line miss while receive_queue lock is held. * Look at __udp_enqueue_schedule_skb() to find where this copy is done. */
void udp_skb_destructor(struct sock *sk, struct sk_buff *skb) { udp_rmem_release(sk, skb->dev_scratch, 1); }

Contributors

PersonTokensPropCommitsCommitProp
Paolo Abeni2596.15%266.67%
Eric Dumazet13.85%133.33%
Total26100.00%3100.00%

EXPORT_SYMBOL(udp_skb_destructor); /* Idea of busylocks is to let producers grab an extra spinlock * to relieve pressure on the receive_queue spinlock shared by consumer. * Under flood, this means that only one producer can be in line * trying to acquire the receive_queue spinlock. * These busylock can be allocated on a per cpu manner, instead of a * per socket one (that would consume a cache line per socket) */ static int udp_busylocks_log __read_mostly; static spinlock_t *udp_busylocks __read_mostly;
static spinlock_t *busylock_acquire(void *ptr) { spinlock_t *busy; busy = udp_busylocks + hash_ptr(ptr, udp_busylocks_log); spin_lock(busy); return busy; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet34100.00%1100.00%
Total34100.00%1100.00%


static void busylock_release(spinlock_t *busy) { if (busy) spin_unlock(busy); }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet19100.00%1100.00%
Total19100.00%1100.00%


int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb) { struct sk_buff_head *list = &sk->sk_receive_queue; int rmem, delta, amt, err = -ENOMEM; spinlock_t *busy = NULL; int size; /* try to avoid the costly atomic add/sub pair when the receive * queue is full; always allow at least a packet */ rmem = atomic_read(&sk->sk_rmem_alloc); if (rmem > sk->sk_rcvbuf) goto drop; /* Under mem pressure, it might be helpful to help udp_recvmsg() * having linear skbs : * - Reduce memory overhead and thus increase receive queue capacity * - Less cache line misses at copyout() time * - Less work at consume_skb() (less alien page frag freeing) */ if (rmem > (sk->sk_rcvbuf >> 1)) { skb_condense(skb); busy = busylock_acquire(sk); } size = skb->truesize; /* Copy skb->truesize into skb->dev_scratch to avoid a cache line miss * in udp_skb_destructor() */ skb->dev_scratch = size; /* we drop only if the receive buf is full and the receive * queue contains some other skb */ rmem = atomic_add_return(size, &sk->sk_rmem_alloc); if (rmem > (size + sk->sk_rcvbuf)) goto uncharge_drop; spin_lock(&list->lock); if (size >= sk->sk_forward_alloc) { amt = sk_mem_pages(size); delta = amt << SK_MEM_QUANTUM_SHIFT; if (!__sk_mem_raise_allocated(sk, delta, amt, SK_MEM_RECV)) { err = -ENOBUFS; spin_unlock(&list->lock); goto uncharge_drop; } sk->sk_forward_alloc += delta; } sk->sk_forward_alloc -= size; /* no need to setup a destructor, we will explicitly release the * forward allocated memory on dequeue */ sock_skb_set_dropcount(sk, skb); __skb_queue_tail(list, skb); spin_unlock(&list->lock); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk); busylock_release(busy); return 0; uncharge_drop: atomic_sub(skb->truesize, &sk->sk_rmem_alloc); drop: atomic_inc(&sk->sk_drops); busylock_release(busy); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Paolo Abeni23580.76%350.00%
Eric Dumazet5619.24%350.00%
Total291100.00%6100.00%

EXPORT_SYMBOL_GPL(__udp_enqueue_schedule_skb);
void udp_destruct_sock(struct sock *sk) { /* reclaim completely the forward allocated memory */ unsigned int total = 0; struct sk_buff *skb; while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { total += skb->truesize; kfree_skb(skb); } udp_rmem_release(sk, total, 0); inet_sock_destruct(sk); }

Contributors

PersonTokensPropCommitsCommitProp
Paolo Abeni65100.00%2100.00%
Total65100.00%2100.00%

EXPORT_SYMBOL_GPL(udp_destruct_sock);
int udp_init_sock(struct sock *sk) { sk->sk_destruct = udp_destruct_sock; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Paolo Abeni19100.00%1100.00%
Total19100.00%1100.00%

EXPORT_SYMBOL_GPL(udp_init_sock);
void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len) { if (unlikely(READ_ONCE(sk->sk_peek_off) >= 0)) { bool slow = lock_sock_fast(sk); sk_peek_offset_bwd(sk, len); unlock_sock_fast(sk, slow); } consume_skb(skb); }

Contributors

PersonTokensPropCommitsCommitProp
Paolo Abeni61100.00%1100.00%
Total61100.00%1100.00%

EXPORT_SYMBOL_GPL(skb_consume_udp); /** * first_packet_length - return length of first packet in receive queue * @sk: socket * * Drops all bad checksum frames, until a valid one is found. * Returns the length of found skb, or -1 if none is found. */
static int first_packet_length(struct sock *sk) { struct sk_buff_head *rcvq = &sk->sk_receive_queue; struct sk_buff *skb; int total = 0; int res; spin_lock_bh(&rcvq->lock); while ((skb = skb_peek(rcvq)) != NULL && udp_lib_checksum_complete(skb)) { __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, IS_UDPLITE(sk)); __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, IS_UDPLITE(sk)); atomic_inc(&sk->sk_drops); __skb_unlink(skb, rcvq); total += skb->truesize; kfree_skb(skb); } res = skb ? skb->len : -1; if (total) udp_rmem_release(sk, total, 1); spin_unlock_bh(&rcvq->lock); return res; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet12883.66%583.33%
Paolo Abeni2516.34%116.67%
Total153100.00%6100.00%

/* * IOCTL requests applicable to the UDP protocol */
int udp_ioctl(struct sock *sk, int cmd, unsigned long arg) { switch (cmd) { case SIOCOUTQ: { int amount = sk_wmem_alloc_get(sk); return put_user(amount, (int __user *)arg); } case SIOCINQ: { int amount = max_t(int, 0, first_packet_length(sk)); return put_user(amount, (int __user *)arg); } default: return -ENOIOCTLCMD; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller7886.67%125.00%
Eric Dumazet1213.33%375.00%
Total90100.00%4100.00%

EXPORT_SYMBOL(udp_ioctl); /* * This should be easy, if there is something there we * return it, otherwise we block. */
int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct inet_sock *inet = inet_sk(sk); DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name); struct sk_buff *skb; unsigned int ulen, copied; int peeked, peeking, off; int err; int is_udplite = IS_UDPLITE(sk); bool checksum_valid = false; if (flags & MSG_ERRQUEUE) return ip_recv_error(sk, msg, len, addr_len); try_again: peeking = off = sk_peek_offset(sk, flags); skb = __skb_recv_udp(sk, flags, noblock, &peeked, &off, &err); if (!skb) return err; ulen = skb->len; copied = len; if (copied > ulen - off) copied = ulen - off; else if (copied < ulen) msg->msg_flags |= MSG_TRUNC; /* * If checksum is needed at all, try to do it while copying the * data. If the data is truncated, or if we only want a partial * coverage checksum (UDP-Lite), do it before the copy. */ if (copied < ulen || peeking || (is_udplite && UDP_SKB_CB(skb)->partial_cov)) { checksum_valid = !udp_lib_checksum_complete(skb); if (!checksum_valid) goto csum_copy_err; } if (checksum_valid || skb_csum_unnecessary(skb)) err = skb_copy_datagram_msg(skb, off, msg, copied); else { err = skb_copy_and_csum_datagram_msg(skb, off, msg); if (err == -EINVAL) goto csum_copy_err; } if (unlikely(err)) { if (!peeked) { atomic_inc(&sk->sk_drops); UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } kfree_skb(skb); return err; } if (!peeked) UDP_INC_STATS(sock_net(sk), UDP_MIB_INDATAGRAMS, is_udplite); sock_recv_ts_and_drops(msg, sk, skb); /* Copy the address. */ if (sin) { sin->sin_family = AF_INET; sin->sin_port = udp_hdr(skb)->source; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); *addr_len = sizeof(*sin); } if (inet->cmsg_flags) ip_cmsg_recv_offset(msg, sk, skb, sizeof(struct udphdr), off); err = copied; if (flags & MSG_TRUNC) err = ulen; skb_consume_udp(sk, skb, peeking ? -err : err); return err; csum_copy_err: if (!__sk_queue_drop_skb(sk, skb, flags, udp_skb_destructor)) { UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } kfree_skb(skb); /* starting over for a new packet, but check if we need to yield */ cond_resched(); msg->msg_flags &= ~MSG_TRUNC; goto try_again; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller34166.60%311.11%
Eric Dumazet7614.84%1037.04%
samanthakumar367.03%13.70%
Pavel Emelyanov152.93%27.41%
Hannes Frederic Sowa112.15%27.41%
Paolo Abeni91.76%311.11%
Xufeng Zhang71.37%13.70%
Tom Herbert71.37%13.70%
Steffen Hurrle71.37%13.70%
Willem de Bruijn10.20%13.70%
Neil Horman10.20%13.70%
Al Viro10.20%13.70%
Total512100.00%27100.00%


int __udp_disconnect(struct sock *sk, int flags) { struct inet_sock *inet = inet_sk(sk); /* * 1003.1g - break association. */ sk->sk_state = TCP_CLOSE; inet->inet_daddr = 0; inet->inet_dport = 0; sock_rps_reset_rxhash(sk); sk->sk_bound_dev_if = 0; if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) inet_reset_saddr(sk); if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) { sk->sk_prot->unhash(sk); inet->inet_sport = 0; } sk_dst_reset(sk); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller6259.05%116.67%
Eric Dumazet3836.19%350.00%
Tom Herbert54.76%233.33%
Total105100.00%6100.00%

EXPORT_SYMBOL(__udp_disconnect);
int udp_disconnect(struct sock *sk, int flags) { lock_sock(sk); __udp_disconnect(sk, flags); release_sock(sk); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet33100.00%1100.00%
Total33100.00%1100.00%

EXPORT_SYMBOL(udp_disconnect);
void udp_lib_unhash(struct sock *sk) { if (sk_hashed(sk)) { struct udp_table *udptable = sk->sk_prot->h.udp_table; struct udp_hslot *hslot, *hslot2; hslot = udp_hashslot(udptable, sock_net(sk), udp_sk(sk)->udp_port_hash); hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); spin_lock_bh(&hslot->lock); if (rcu_access_pointer(sk->sk_reuseport_cb)) reuseport_detach_sock(sk); if (sk_del_node_init_rcu(sk)) { hslot->count--; inet_sk(sk)->inet_num = 0; sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); spin_lock(&hslot2->lock); hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); hslot2->count--; spin_unlock(&hslot2->lock); } spin_unlock_bh(&hslot->lock); } }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet14483.24%981.82%
David S. Miller158.67%19.09%
Craig Gallek148.09%19.09%
Total173100.00%11100.00%

EXPORT_SYMBOL(udp_lib_unhash); /* * inet_rcv_saddr was changed, we must rehash secondary hash */
void udp_lib_rehash(struct sock *sk, u16 newhash) { if (sk_hashed(sk)) { struct udp_table *udptable = sk->sk_prot->h.udp_table; struct udp_hslot *hslot, *hslot2, *nhslot2; hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); nhslot2 = udp_hashslot2(udptable, newhash); udp_sk(sk)->udp_portaddr_hash = newhash; if (hslot2 != nhslot2 || rcu_access_pointer(sk->sk_reuseport_cb)) { hslot = udp_hashslot(udptable, sock_net(sk), udp_sk(sk)->udp_port_hash); /* we must lock primary chain too */ spin_lock_bh(&hslot->lock); if (rcu_access_pointer(sk->sk_reuseport_cb)) reuseport_detach_sock(sk); if (hslot2 != nhslot2) { spin_lock(&hslot2->lock); hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); hslot2->count--; spin_unlock(&hslot2->lock); spin_lock(&nhslot2->lock); hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, &nhslot2->head); nhslot2->count++; spin_unlock(&nhslot2->lock); } spin_unlock_bh(&hslot->lock); } } }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet19186.82%266.67%
Craig Gallek2913.18%133.33%
Total220100.00%3100.00%

EXPORT_SYMBOL(udp_lib_rehash);
static void udp_v4_rehash(struct sock *sk) { u16 new_hash = udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, inet_sk(sk)->inet_num); udp_lib_rehash(sk, new_hash); }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet43100.00%1100.00%
Total43100.00%1100.00%


int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { int rc; if (inet_sk(sk)->inet_daddr) { sock_rps_save_rxhash(sk, skb); sk_mark_napi_id(sk, skb); sk_incoming_cpu_update(sk); } else { sk_mark_napi_id_once(sk, skb); } rc = __udp_enqueue_schedule_skb(sk, skb); if (rc < 0) { int is_udplite = IS_UDPLITE(sk); /* Note that an ENOMEM error is charged twice */ if (rc == -ENOMEM) UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS, is_udplite); UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); kfree_skb(skb); trace_udp_fail_queue_rcv_skb(rc, sk); return -1; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu6548.51%110.00%
Eric Dumazet3223.88%440.00%
Tom Herbert1914.18%110.00%
Shawn Bohrer96.72%110.00%
Satoru Moriya75.22%110.00%
Paolo Abeni10.75%110.00%
David S. Miller10.75%110.00%
Total134100.00%10100.00%

static struct static_key udp_encap_needed __read_mostly;
void udp_encap_enable(void) { if (!static_key_enabled(&udp_encap_needed)) static_key_slow_inc(&udp_encap_needed); }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet22100.00%1100.00%
Total22100.00%1100.00%

EXPORT_SYMBOL(udp_encap_enable); /* returns: * -1: error * 0: success * >0: "udp encap" protocol resubmission * * Note that in the success and error cases, the skb is assumed to * have either been requeued or freed. */
int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { struct udp_sock *up = udp_sk(sk); int is_udplite = IS_UDPLITE(sk); /* * Charge it to the socket, dropping if the queue is full. */ if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) goto drop; nf_reset(skb); if (static_key_false(&udp_encap_needed) && up->encap_type) { int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); /* * This is an encapsulation socket so pass the skb to * the socket's udp_encap_rcv() hook. Otherwise, just * fall through and pass this up the UDP socket. * up->encap_rcv() returns the following value: * =0 if skb was successfully passed to the encap * handler or was discarded by it. * >0 if skb should be passed on to UDP. * <0 if skb should be resubmitted as proto -N */ /* if we're overly short, let UDP handle it */ encap_rcv = ACCESS_ONCE(up->encap_rcv); if (encap_rcv) { int ret; /* Verify checksum before giving to encap */ if (udp_lib_checksum_complete(skb)) goto csum_error; ret = encap_rcv(sk, skb); if (ret <= 0) { __UDP_INC_STATS(sock_net(sk), UDP_MIB_INDATAGRAMS, is_udplite); return -ret; } } /* FALLTHROUGH -- it's a UDP Packet */ } /* * UDP-Lite specific tests, ignored on UDP sockets */ if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { /* * MIB statistics other than incrementing the error count are * disabled for the following two types of errors: these depend * on the application settings, not on the functioning of the * protocol stack as such. * * RFC 3828 here recommends (sec 3.3): "There should also be a * way ... to ... at least let the receiving application block * delivery of packets with coverage values less than a value * provided by the application." */ if (up->pcrlen == 0) { /* full coverage was set */ net_dbg_ratelimited("UDPLite: partial coverage %d while full coverage %d requested\n", UDP_SKB_CB(skb)->cscov, skb->len); goto drop; } /* The next case involves violating the min. coverage requested * by the receiver. This is subtle: if receiver wants x and x is * greater than the buffersize/MTU then receiver will complain * that it wants x while sender emits packets of smaller size y. * Therefore the above ...()->partial_cov statement is essential. */ if (UDP_SKB_CB(skb)->cscov < up->pcrlen) { net_dbg_ratelimited("UDPLite: coverage %d too small, need min %d\n", UDP_SKB_CB(skb)->cscov, up->pcrlen); goto drop; } } if (rcu_access_pointer(sk->sk_filter) && udp_lib_checksum_complete(skb)) goto csum_error; if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) goto drop; udp_csum_pull_header(skb); ipv4_pktinfo_prepare(sk, skb); return __udp_queue_rcv_skb(sk, skb); csum_error: __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); drop: __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); atomic_inc(&sk->sk_drops); kfree_skb(skb); return -1; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller21163.17%14.76%
Eric Dumazet6820.36%1047.62%
Tom Herbert113.29%14.76%
Pavel Emelyanov113.29%14.76%
Michal Kubeček102.99%14.76%
samanthakumar92.69%14.76%
Herbert Xu51.50%14.76%
Joe Perches41.20%29.52%
Daniel Borkmann20.60%14.76%
Shawn Bohrer20.60%14.76%
Paolo Abeni10.30%14.76%
Total334100.00%21100.00%

/* For TCP sockets, sk_rx_dst is protected by socket lock * For UDP, we use xchg() to guard against concurrent changes. */
static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) { struct dst_entry *old; dst_hold(dst); old = xchg(&sk->sk_rx_dst, dst); dst_release(old); }

Contributors

PersonTokensPropCommitsCommitProp
Shawn Bohrer2455.81%133.33%
Eric Dumazet1944.19%266.67%
Total43100.00%3100.00%

/* * Multicasts and broadcasts go to each listener. * * Note: called only from the BH handler context. */
static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, struct udphdr *uh, __be32 saddr, __be32 daddr, struct udp_table *udptable, int proto) { struct sock *sk, *first = NULL; unsigned short hnum = ntohs(uh->dest); struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum); unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10); unsigned int offset = offsetof(typeof(*sk), sk_node); int dif = skb->dev->ifindex; struct hlist_node *node; struct sk_buff *nskb; if (use_hash2) { hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) & udptable->mask; hash2 = udp4_portaddr_hash(net, daddr, hnum) & udptable->mask; start_lookup: hslot = &udptable->hash2[hash2]; offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); } sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) { if (!__udp_is_mcast_sock(net, sk, uh->dest, daddr, uh->source, saddr, dif, hnum)) continue; if (!first) { first = sk; continue; } nskb = skb_clone(skb, GFP_ATOMIC); if (unlikely(!nskb)) { atomic_inc(&sk->sk_drops); __UDP_INC_STATS(net, UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk)); __UDP_INC_STATS(net, UDP_MIB_INERRORS, IS_UDPLITE(sk)); continue; } if (udp_queue_rcv_skb(sk, nskb) > 0) consume_skb(nskb); } /* Also lookup *:port if we are using hash2 and haven't done so yet. */ if (use_hash2 && hash2 != hash2_any) { hash2 = hash2_any; goto start_lookup; } if (first) { if (udp_queue_rcv_skb(first, skb) > 0) consume_skb(skb); } else { kfree_skb(skb); __UDP_INC_STATS(net, UDP_MIB_IGNOREDMULTI, proto == IPPROTO_UDPLITE); } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet14839.89%642.86%
David Held12533.69%214.29%
David S. Miller6918.60%17.14%
Rick Jones143.77%17.14%
Pavel Emelyanov92.43%321.43%
Pablo Neira Ayuso61.62%17.14%
Total371100.00%14100.00%

/* Initialize UDP checksum. If exited with zero value (success), * CHECKSUM_UNNECESSARY means, that no more checks are required. * Otherwise, csum completion requires chacksumming packet body, * including udp header and folding it to skb->csum. */
static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto) { int err; UDP_SKB_CB(skb)->partial_cov = 0; UDP_SKB_CB(skb)->cscov = skb->len; if (proto == IPPROTO_UDPLITE) { err = udplite_checksum_init(skb, uh); if (err) return err; } /* Note, we are only interested in != 0 or == 0, thus the * force to int. */ return (__force int)skb_checksum_init_zero_check(skb, proto, uh->check, inet_compute_pseudo); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller7688.37%133.33%
Tom Herbert55.81%133.33%
Hannes Frederic Sowa55.81%133.33%
Total86100.00%3100.00%

/* * All we need to do is get the socket, and then do a checksum. */
int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, int proto) { struct sock *sk; struct udphdr *uh; unsigned short ulen; struct rtable *rt = skb_rtable(skb); __be32 saddr, daddr; struct net *net = dev_net(skb->dev); /* * Validate the packet. */ if (!pskb_may_pull(skb, sizeof(struct udphdr))) goto drop; /* No space for header. */ uh = udp_hdr(skb); ulen = ntohs(uh->len); saddr = ip_hdr(skb)->saddr; daddr = ip_hdr(skb)->daddr; if (ulen > skb->len) goto short_packet; if (proto == IPPROTO_UDP) { /* UDP validates ulen. */ if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen)) goto short_packet; uh = udp_hdr(skb); } if (udp4_csum_init(skb, uh, proto)) goto csum_error; sk = skb_steal_sock(skb); if (sk) { struct dst_entry *dst = skb_dst(skb); int ret; if (unlikely(sk->sk_rx_dst != dst)) udp_sk_rx_dst_set(sk, dst); ret = udp_queue_rcv_skb(sk, skb); sock_put(sk); /* a return value > 0 means to resubmit the input, but * it wants the return to be -protocol, or 0 */ if (ret > 0) return -ret; return 0; } if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) return __udp4_lib_mcast_deliver(net, skb, uh, saddr, daddr, udptable, proto); sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable); if (sk) { int ret; if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk)) skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check, inet_compute_pseudo); ret = udp_queue_rcv_skb(sk, skb); /* a return value > 0 means to resubmit the input, but * it wants the return to be -protocol, or 0 */ if (ret > 0) return -ret; return 0; } if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) goto drop; nf_reset(skb); /* No socket. Drop packet silently, if checksum is wrong */ if (udp_lib_checksum_complete(skb)) goto csum_error; __UDP_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); /* * Hmm. We got an UDP packet to a port to which we * don't wanna listen. Ignore it. */ kfree_skb(skb); return 0; short_packet: net_dbg_ratelimited("UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n", proto == IPPROTO_UDPLITE ? "Lite" : "", &saddr, ntohs(uh->source), ulen, skb->len, &daddr, ntohs(uh->dest)); goto drop; csum_error: /* * RFC1122: OK. Discards the bad packet silently (as far as * the network is concerned, anyway) as per 4.1.3.4 (MUST). */ net_dbg_ratelimited("UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n", proto == IPPROTO_UDPLITE ? "Lite" : "", &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest), ulen); __UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); drop: __UDP_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); kfree_skb(skb); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller34064.15%14.17%
Shawn Bohrer478.87%14.17%
Eric Dumazet438.11%625.00%
Tom Herbert305.66%28.33%
Pavel Emelyanov183.40%28.33%
Björn Mork183.40%14.17%
Jesper Dangaard Brouer81.51%28.33%
Harvey Harrison61.13%14.17%
Eliezer Tamir40.75%14.17%
Joe Perches40.75%28.33%
Gerrit Renker30.57%14.17%
Hideaki Yoshifuji / 吉藤英明30.57%14.17%
KOVACS Krisztian20.38%14.17%
Alexey Kuznetsov20.38%14.17%
Rick Jones20.38%14.17%
Total530100.00%24100.00%

/* We can only early demux multicast if there is a single matching socket. * If more than one socket found returns NULL */
static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net, __be16 loc_port, __be32 loc_addr, __be16 rmt_port, __be32 rmt_addr, int dif) { struct sock *sk, *result; unsigned short hnum = ntohs(loc_port); unsigned int slot = udp_hashfn(net, hnum, udp_table.mask); struct udp_hslot *hslot = &udp_table.hash[slot]; /* Do not bother scanning a too big list */ if (hslot->count > 10) return NULL; result = NULL; sk_for_each_rcu(sk, &hslot->head) { if (__udp_is_mcast_sock(net, sk, loc_port, loc_addr, rmt_port, rmt_addr, dif, hnum)) { if (result) return NULL; result = sk; } } return result; }

Contributors

PersonTokensPropCommitsCommitProp
Shawn Bohrer11888.06%133.33%
Eric Dumazet1611.94%266.67%
Total134100.00%3100.00%

/* For unicast we should only early demux connected sockets or we can * break forwarding setups. The chains here can be long so only check * if the first socket is an exact match and if not move on. */
static struct sock *__udp4_lib_demux_lookup(struct net *net, __be16 loc_port, __be32 loc_addr, __be16 rmt_port, __be32 rmt_addr, int dif) { unsigned short hnum = ntohs(loc_port); unsigned int hash2 = udp4_portaddr_hash(net, loc_addr, hnum); unsigned int slot2 = hash2 & udp_table.mask; struct udp_hslot *hslot2 = &udp_table.hash2[slot2]; INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr); const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum); struct sock *sk; udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { if (INET_MATCH(sk, net, acookie, rmt_addr, loc_addr, ports, dif)) return sk; /* Only check first socket in chain */ break; } return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Shawn Bohrer12190.98%133.33%
Eric Dumazet86.02%133.33%
Joe Perches43.01%133.33%
Total133100.00%3100.00%


void udp_v4_early_demux(struct sk_buff *skb) { struct net *net = dev_net(skb->dev); const struct iphdr *iph; const struct udphdr *uh; struct sock *sk = NULL; struct dst_entry *dst; int dif = skb->dev->ifindex; int ours; /* validate the packet */ if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr))) return; iph = ip_hdr(skb); uh = udp_hdr(skb); if (skb->pkt_type == PACKET_BROADCAST || skb->pkt_type == PACKET_MULTICAST) { struct in_device *in_dev = __in_dev_get_rcu(skb->dev); if (!in_dev) return; /* we are supposed to accept bcast packets */ if (skb->pkt_type == PACKET_MULTICAST) { ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr, iph->protocol); if (!ours) return; } sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr, uh->source, iph->saddr, dif); } else if (skb->pkt_type == PACKET_HOST) { sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr, uh->source, iph->saddr, dif); } if (!sk || !atomic_inc_not_zero_hint(&sk->sk_refcnt, 2)) return; skb->sk = sk; skb->destructor = sock_efree; dst = READ_ONCE(sk->sk_rx_dst); if (dst) dst = dst_check(dst, 0); if (dst) { /* DST_NOCACHE can not be used without taking a reference */ if (dst->flags & DST_NOCACHE) { if (likely(atomic_inc_not_zero(&dst->__refcnt))) skb_dst_set(skb, dst); } else { skb_dst_set_noref(skb, dst); } } }

Contributors

PersonTokensPropCommitsCommitProp
Shawn Bohrer23372.36%228.57%
Eric Dumazet7723.91%342.86%
Paolo Abeni113.42%114.29%
Alexander Duyck10.31%114.29%
Total322100.00%7100.00%


int udp_rcv(struct sk_buff *skb) { return __udp4_lib_rcv(skb, &udp_table, IPPROTO_UDP); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller1676.19%133.33%
Hideaki Yoshifuji / 吉藤英明314.29%133.33%
Eric Dumazet29.52%133.33%
Total21100.00%3100.00%


void udp_destroy_sock(struct sock *sk) { struct udp_sock *up = udp_sk(sk); bool slow = lock_sock_fast(sk); udp_flush_pending_frames(sk); unlock_sock_fast(sk, slow); if (static_key_false(&udp_encap_needed) && up->encap_type) { void (*encap_destroy)(struct sock *sk); encap_destroy = ACCESS_ONCE(up->encap_destroy); if (encap_destroy) encap_destroy(sk); } }

Contributors

PersonTokensPropCommitsCommitProp
Tom Parkin5464.29%120.00%
Alexey Kuznetsov1922.62%120.00%
Eric Dumazet89.52%120.00%
David S. Miller22.38%120.00%
Brian Haley11.19%120.00%
Total84100.00%5100.00%

/* * Socket option code for UDP */
int udp_lib_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen, int (*push_pending_frames)(struct sock *)) { struct udp_sock *up = udp_sk(sk); int val, valbool; int err = 0; int is_udplite = IS_UDPLITE(sk); if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; valbool = val ? 1 : 0; switch (optname) { case UDP_CORK: if (val != 0) { up->corkflag = 1; } else { up->corkflag = 0; lock_sock(sk); push_pending_frames(sk); release_sock(sk); } break; case UDP_ENCAP: switch (val) { case 0: case UDP_ENCAP_ESPINUDP: case UDP_ENCAP_ESPINUDP_NON_IKE: up->encap_rcv = xfrm4_udp_encap_rcv; /* FALLTHROUGH */ case UDP_ENCAP_L2TPINUDP: up->encap_type = val; udp_encap_enable(); break; default: err = -ENOPROTOOPT; break; } break; case UDP_NO_CHECK6_TX: up->no_check6_tx = valbool; break; case UDP_NO_CHECK6_RX: up->no_check6_rx = valbool; break; /* * UDP-Lite's partial checksum coverage (RFC 3828). */ /* The sender sets actual checksum coverage length via this option. * The case coverage > packet length is handled by send module. */ case UDPLITE_SEND_CSCOV: if (!is_udplite) /* Disable the option on UDP sockets */ return -ENOPROTOOPT; if (val != 0 && val < 8) /* Illegal coverage: use default (8) */ val = 8; else if (val > USHRT_MAX) val = USHRT_MAX; up->pcslen = val; up->pcflag |= UDPLITE_SEND_CC; break; /* The receiver specifies a minimum checksum coverage value. To make * sense, this should be set to at least 8 (as done below). If zero is * used, this again means full checksum coverage. */ case UDPLITE_RECV_CSCOV: if (!is_udplite) /* Disable the option on UDP sockets */ return -ENOPROTOOPT; if (val != 0 && val < 8) /* Avoid silly minimal values. */ val = 8; else if (val > USHRT_MAX) val = USHRT_MAX; up->pcrlen = val; up->pcflag |= UDPLITE_RECV_CC; break; default: err = -ENOPROTOOPT; break; } return err; }

Contributors

PersonTokensPropCommitsCommitProp
Alexey Kuznetsov10631.74%14.55%
Gerrit Renker9528.44%313.64%
David S. Miller319.28%29.09%
Tom Herbert308.98%14.55%
Linus Torvalds (pre-git)164.79%522.73%
Herbert Xu154.49%14.55%
Derek Atkins102.99%14.55%
James Chapman102.99%29.09%
Wang Chen102.99%14.55%
Alexey Dobriyan41.20%14.55%
Eric Dumazet30.90%14.55%
Al Viro20.60%14.55%
Hideaki Yoshifuji / 吉藤英明10.30%14.55%
Arnaldo Carvalho de Melo10.30%14.55%
Total334100.00%22100.00%

EXPORT_SYMBOL(udp_lib_setsockopt);
int udp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) return udp_lib_setsockopt(sk, level, optname, optval, optlen, udp_push_pending_frames); return ip_setsockopt(sk, level, optname, optval, optlen); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller65100.00%2100.00%
Total65100.00%2100.00%

#ifdef CONFIG_COMPAT
int compat_udp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) return udp_lib_setsockopt(sk, level, optname, optval, optlen, udp_push_pending_frames); return compat_ip_setsockopt(sk, level, optname, optval, optlen); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller6295.38%250.00%
Dmitry Mishin23.08%125.00%
Alexey Kuznetsov11.54%125.00%
Total65100.00%4100.00%

#endif
int udp_lib_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { struct udp_sock *up = udp_sk(sk); int val, len; if (get_user(len, optlen)) return -EFAULT; len = min_t(unsigned int, len, sizeof(int)); if (len < 0) return -EINVAL; switch (optname) { case UDP_CORK: val = up->corkflag; break; case UDP_ENCAP: val = up->encap_type; break; case UDP_NO_CHECK6_TX: val = up->no_check6_tx; break; case UDP_NO_CHECK6_RX: val = up->no_check6_rx; break; /* The following two cannot be changed on UDP sockets, the return is * always 0 (which corresponds to the full checksum coverage of UDP). */ case UDPLITE_SEND_CSCOV: val = up->pcslen; break; case UDPLITE_RECV_CSCOV: val = up->pcrlen; break; default: return -ENOPROTOOPT; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Alexey Kuznetsov7039.11%110.00%
Dmitry Mishin3921.79%110.00%
Gerrit Renker2212.29%220.00%
Tom Herbert2011.17%110.00%
Linus Torvalds (pre-git)116.15%330.00%
Derek Atkins105.59%110.00%
Linus Torvalds73.91%110.00%
Total179100.00%10100.00%

EXPORT_SYMBOL(udp_lib_getsockopt);
int udp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) return udp_lib_getsockopt(sk, level, optname, optval, optlen); return ip_getsockopt(sk, level, optname, optval, optlen); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller64100.00%1100.00%
Total64100.00%1100.00%

#ifdef CONFIG_COMPAT
int compat_udp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) return udp_lib_getsockopt(sk, level, optname, optval, optlen); return compat_ip_getsockopt(sk, level, optname, optval, optlen); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller64100.00%1100.00%
Total64100.00%1100.00%

#endif /** * udp_poll - wait for a UDP event. * @file - file struct * @sock - socket * @wait - poll table * * This is same as datagram poll, except for the special case of * blocking sockets. If application is using a blocking fd * and a packet with checksum error is in the queue; * then it could get return from select indicating data available * but then block when reading it. Add special case code * to work around these arguably broken applications. */
unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait) { unsigned int mask = datagram_poll(file, sock, wait); struct sock *sk = sock->sk; sock_rps_record_flow(sk); /* Check for false positives due to checksum errors */ if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) && !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1) mask &= ~(POLLIN | POLLRDNORM); return mask; }

Contributors

PersonTokensPropCommitsCommitProp
Stephen Hemminger6771.28%114.29%
Hideaki Yoshifuji / 吉藤英明1414.89%114.29%
David Majnemer55.32%114.29%
Eric Dumazet44.26%228.57%
Pavel Emelyanov33.19%114.29%
Herbert Xu11.06%114.29%
Total94100.00%7100.00%

EXPORT_SYMBOL(udp_poll);
int udp_abort(struct sock *sk, int err) { lock_sock(sk); sk->sk_err = err; sk->sk_error_report(sk); __udp_disconnect(sk, 0); release_sock(sk); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
David Ahern4597.83%150.00%
Eric Dumazet12.17%150.00%
Total46100.00%2100.00%

EXPORT_SYMBOL_GPL(udp_abort); struct proto udp_prot = { .name = "UDP", .owner = THIS_MODULE, .close = udp_lib_close, .connect = ip4_datagram_connect, .disconnect = udp_disconnect, .ioctl = udp_ioctl, .init = udp_init_sock, .destroy = udp_destroy_sock, .setsockopt = udp_setsockopt, .getsockopt = udp_getsockopt, .sendmsg = udp_sendmsg, .recvmsg = udp_recvmsg, .sendpage = udp_sendpage, .release_cb = ip4_datagram_release_cb, .hash = udp_lib_hash, .unhash = udp_lib_unhash, .rehash = udp_v4_rehash, .get_port = udp_v4_get_port, .memory_allocated = &udp_memory_allocated, .sysctl_mem = sysctl_udp_mem, .sysctl_wmem = &sysctl_udp_wmem_min, .sysctl_rmem = &sysctl_udp_rmem_min, .obj_size = sizeof(struct udp_sock), .h.udp_table = &udp_table, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_udp_setsockopt, .compat_getsockopt = compat_udp_getsockopt, #endif .diag_destroy = udp_abort, }; EXPORT_SYMBOL(udp_prot); /* ------------------------------------------------------------------------ */ #ifdef CONFIG_PROC_FS
static struct sock *udp_get_first(struct seq_file *seq, int start) { struct sock *sk; struct udp_iter_state *state = seq->private; struct net *net = seq_file_net(seq); for (state->bucket = start; state->bucket <= state->udp_table->mask; ++state->bucket) { struct udp_hslot *hslot = &state->udp_table->hash[state->bucket]; if (hlist_empty(&hslot->head)) continue; spin_lock_bh(&hslot->lock); sk_for_each(sk, &hslot->head) { if (!net_eq(sock_net(sk), net)) continue; if (sk->sk_family == state->family) goto found; } spin_unlock_bh(&hslot->lock); } sk = NULL; found: return sk; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet5535.95%325.00%
Hideaki Yoshifuji / 吉藤英明4831.37%433.33%
Arnaldo Carvalho de Melo3321.57%325.00%
Daniel Lezcano138.50%18.33%
Denis V. Lunev42.61%18.33%
Total153100.00%12100.00%


static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk) { struct udp_iter_state *state = seq->private; struct net *net = seq_file_net(seq); do { sk = sk_next(sk); } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family)); if (!sk) { if (state->bucket <= state->udp_table->mask) spin_unlock_bh(&state->udp_table->hash[state->bucket].lock); return udp_get_first(seq, state->bucket + 1); } return sk; }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明6451.61%321.43%
Eric Dumazet2822.58%428.57%
Daniel Lezcano97.26%17.14%
Arnaldo Carvalho de Melo86.45%214.29%
Vitaly Mayatskikh64.84%17.14%
Denis V. Lunev43.23%17.14%
Pavel Emelyanov32.42%17.14%
Gerrit Renker21.61%17.14%
Total124100.00%14100.00%


static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos) { struct sock *sk = udp_get_first(seq, 0); if (sk) while (pos && (sk = udp_get_next(seq, sk)) != NULL) --pos; return pos ? NULL : sk; }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明5084.75%125.00%
Arnaldo Carvalho de Melo711.86%250.00%
Eric Dumazet23.39%125.00%
Total59100.00%4100.00%


static void *udp_seq_start(struct seq_file *seq, loff_t *pos) { struct udp_iter_state *state = seq->private; state->bucket = MAX_UDP_PORTS; return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN; }

Contributors

PersonTokensPropCommitsCommitProp
Arnaldo Carvalho de Melo2655.32%120.00%
Vitaly Mayatskikh1429.79%120.00%
Hideaki Yoshifuji / 吉藤英明612.77%240.00%
Eric Dumazet12.13%120.00%
Total47100.00%5100.00%


static void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct sock *sk; if (v == SEQ_START_TOKEN) sk = udp_get_idx(seq, 0); else sk = udp_get_next(seq, v); ++*pos; return sk; }

Contributors

PersonTokensPropCommitsCommitProp
Arnaldo Carvalho de Melo5189.47%133.33%
Hideaki Yoshifuji / 吉藤英明610.53%266.67%
Total57100.00%3100.00%


static void udp_seq_stop(struct seq_file *seq, void *v) { struct udp_iter_state *state = seq->private; if (state->bucket <= state->udp_table->mask) spin_unlock_bh(&state->udp_table->hash[state->bucket].lock); }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet3464.15%266.67%
Arnaldo Carvalho de Melo1935.85%133.33%
Total53100.00%3100.00%


int udp_seq_open(struct inode *inode, struct file *file) { struct udp_seq_afinfo *afinfo = PDE_DATA(inode); struct udp_iter_state *s; int err; err = seq_open_net(inode, file, &afinfo->seq_ops, sizeof(struct udp_iter_state)); if (err < 0) return err; s = ((struct seq_file *)file->private_data)->private; s->family = afinfo->family; s->udp_table = afinfo->udp_table; return err; }

Contributors

PersonTokensPropCommitsCommitProp
Denis V. Lunev3637.50%228.57%
Hideaki Yoshifuji / 吉藤英明2627.08%114.29%
Daniel Lezcano1616.67%114.29%
Arnaldo Carvalho de Melo1515.62%114.29%
Eric Dumazet22.08%114.29%
Al Viro11.04%114.29%
Total96100.00%7100.00%

EXPORT_SYMBOL(udp_seq_open); /* ------------------------------------------------------------------------ */
int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo) { struct proc_dir_entry *p; int rc = 0; afinfo->seq_ops.start = udp_seq_start; afinfo->seq_ops.next = udp_seq_next; afinfo->seq_ops.stop = udp_seq_stop; p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net, afinfo->seq_fops, afinfo); if (!p) rc = -ENOMEM; return rc; }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明3744.58%233.33%
Denis V. Lunev3036.14%233.33%
Arnaldo Carvalho de Melo1113.25%116.67%
Daniel Lezcano56.02%116.67%
Total83100.00%6100.00%

EXPORT_SYMBOL(udp_proc_register);
void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo) { remove_proc_entry(afinfo->name, net->proc_net); }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明934.62%125.00%
Arnaldo Carvalho de Melo726.92%125.00%
Gao Feng519.23%125.00%
Daniel Lezcano519.23%125.00%
Total26100.00%4100.00%

EXPORT_SYMBOL(udp_proc_unregister); /* ------------------------------------------------------------------------ */
static void udp4_format_sock(struct sock *sp, struct seq_file *f, int bucket) { struct inet_sock *inet = inet_sk(sp); __be32 dest = inet->inet_daddr; __be32 src = inet->inet_rcv_saddr; __u16 destp = ntohs(inet->inet_dport); __u16 srcp = ntohs(inet->inet_sport); seq_printf(f, "%5d: %08X:%04X %08X:%04X" " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d", bucket, src, srcp, dest, destp, sp->sk_state, sk_wmem_alloc_get(sp), sk_rmem_alloc_get(sp), 0, 0L, 0, from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)), 0, sock_i_ino(sp), atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops)); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller11079.14%112.50%
Eric Dumazet1410.07%450.00%
Eric W. Biedermann85.76%112.50%
Pavel Emelyanov64.32%112.50%
Tetsuo Handa10.72%112.50%
Total139100.00%8100.00%


int udp4_seq_show(struct seq_file *seq, void *v) { seq_setwidth(seq, 127); if (v == SEQ_START_TOKEN) seq_puts(seq, " sl local_address rem_address st tx_queue " "rx_queue tr tm->when retrnsmt uid timeout " "inode ref pointer drops"); else { struct udp_iter_state *state = seq->private; udp4_format_sock(v, seq, state->bucket); } seq_pad(seq, '\n'); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller5681.16%125.00%
Tetsuo Handa1115.94%125.00%
Eric Dumazet11.45%125.00%
Pavel Emelyanov11.45%125.00%
Total69100.00%4100.00%

static const struct file_operations udp_afinfo_seq_fops = { .owner = THIS_MODULE, .open = udp_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net }; /* ------------------------------------------------------------------------ */ static struct udp_seq_afinfo udp4_seq_afinfo = { .name = "udp", .family = AF_INET, .udp_table = &udp_table, .seq_fops = &udp_afinfo_seq_fops, .seq_ops = { .show = udp4_seq_show, }, };
static int __net_init udp4_proc_init_net(struct net *net) { return udp_proc_register(net, &udp4_seq_afinfo); }

Contributors

PersonTokensPropCommitsCommitProp
Pavel Emelyanov1257.14%133.33%
David S. Miller838.10%133.33%
Alexey Dobriyan14.76%133.33%
Total21100.00%3100.00%


static void __net_exit udp4_proc_exit_net(struct net *net) { udp_proc_unregister(net, &udp4_seq_afinfo); }

Contributors

PersonTokensPropCommitsCommitProp
Pavel Emelyanov1575.00%125.00%
David S. Miller210.00%125.00%
Daniel Lezcano210.00%125.00%
Alexey Dobriyan15.00%125.00%
Total20100.00%4100.00%

static struct pernet_operations udp4_net_ops = { .init = udp4_proc_init_net, .exit = udp4_proc_exit_net, };
int __init udp4_proc_init(void) { return register_pernet_subsys(&udp4_net_ops); }

Contributors

PersonTokensPropCommitsCommitProp
Pavel Emelyanov1386.67%150.00%
David S. Miller213.33%150.00%
Total15100.00%2100.00%


void udp4_proc_exit(void) { unregister_pernet_subsys(&udp4_net_ops); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller1184.62%150.00%
Pavel Emelyanov215.38%150.00%
Total13100.00%2100.00%

#endif /* CONFIG_PROC_FS */ static __initdata unsigned long uhash_entries;
static int __init set_uhash_entries(char *str) { ssize_t ret; if (!str) return 0; ret = kstrtoul(str, 0, &uhash_entries); if (ret) return 0; if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN) uhash_entries = UDP_HTABLE_SIZE_MIN; return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet4173.21%150.00%
Eldad Zack1526.79%150.00%
Total56100.00%2100.00%

__setup("uhash_entries=", set_uhash_entries);
void __init udp_table_init(struct udp_table *table, const char *name) { unsigned int i; table->hash = alloc_large_system_hash(name, 2 * sizeof(struct udp_hslot), uhash_entries, 21, /* one slot per 2 MB */ 0, &table->log, &table->mask, UDP_HTABLE_SIZE_MIN, 64 * 1024); table->hash2 = table->hash + (table->mask + 1); for (i = 0; i <= table->mask; i++) { INIT_HLIST_HEAD(&table->hash[i].head); table->hash[i].count = 0; spin_lock_init(&table->hash[i].lock); } for (i = 0; i <= table->mask; i++) { INIT_HLIST_HEAD(&table->hash2[i].head); table->hash2[i].count = 0; spin_lock_init(&table->hash2[i].lock); } }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet18298.91%583.33%
Tim Bird21.09%116.67%
Total184100.00%6100.00%


u32 udp_flow_hashrnd(void) { static u32 hashrnd __read_mostly; net_get_random_once(&hashrnd, sizeof(hashrnd)); return hashrnd; }

Contributors

PersonTokensPropCommitsCommitProp
Tom Herbert26100.00%1100.00%
Total26100.00%1100.00%

EXPORT_SYMBOL(udp_flow_hashrnd);
void __init udp_init(void) { unsigned long limit; unsigned int i; udp_table_init(&udp_table, "UDP"); limit = nr_free_buffer_pages() / 8; limit = max(limit, 128UL); sysctl_udp_mem[0] = limit / 4 * 3; sysctl_udp_mem[1] = limit; sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2; sysctl_udp_rmem_min = SK_MEM_QUANTUM; sysctl_udp_wmem_min = SK_MEM_QUANTUM; /* 16 spinlocks per cpu */ udp_busylocks_log = ilog2(nr_cpu_ids) + 4; udp_busylocks = kmalloc(sizeof(spinlock_t) << udp_busylocks_log, GFP_KERNEL); if (!udp_busylocks) panic("UDP: failed to alloc udp_busylocks\n"); for (i = 0; i < (1U << udp_busylocks_log); i++) spin_lock_init(udp_busylocks + i); }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet7454.41%480.00%
Hideo Aoki6245.59%120.00%
Total136100.00%5100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
David S. Miller407532.75%227.31%
Eric Dumazet325726.17%8026.58%
Shawn Bohrer5914.75%41.33%
Paolo Abeni5454.38%72.33%
Tom Herbert5074.07%134.32%
Craig Gallek3552.85%41.33%
Herbert Xu3152.53%51.66%
Hideaki Yoshifuji / 吉藤英明2922.35%113.65%
Arnaldo Carvalho de Melo2051.65%72.33%
Alexey Kuznetsov2011.62%20.66%
Pavel Emelyanov1971.58%175.65%
Gerrit Renker1631.31%41.33%
Linus Torvalds (pre-git)1361.09%175.65%
David Held1251.00%20.66%
KOVACS Krisztian1140.92%20.66%
Stephen Hemminger930.75%31.00%
Denis V. Lunev930.75%82.66%
Hideo Aoki930.75%10.33%
Hannes Frederic Sowa900.72%72.33%
Robert Shearman780.63%10.33%
David Ahern740.59%20.66%
Tom Parkin540.43%10.33%
Edward Cree540.43%20.66%
Eric Garver530.43%10.33%
Joe Perches530.43%51.66%
Daniel Lezcano500.40%20.66%
samanthakumar450.36%20.66%
Dmitry Mishin410.33%10.33%
Arjan van de Ven400.32%10.33%
Xuemin Su260.21%10.33%
Soheil Hassas Yeganeh260.21%10.33%
Américo Wang230.18%31.00%
Vitaly Mayatskikh200.16%10.33%
Derek Atkins200.16%10.33%
Björn Mork180.14%10.33%
Francesco Fusco170.14%10.33%
Rick Jones160.13%10.33%
Erich E. Hoover160.13%10.33%
Eldad Zack150.12%10.33%
Julian Anastasov140.11%10.33%
Linus Torvalds140.11%31.00%
Daniel Borkmann140.11%20.66%
Steffen Hurrle140.11%10.33%
Eric W. Biedermann130.10%31.00%
Anton Arapov130.10%10.33%
Tetsuo Handa120.10%10.33%
Michal Kubeček100.08%10.33%
Wang Chen100.08%10.33%
James Chapman100.08%20.66%
Shawn Landden100.08%10.33%
Satoru Moriya100.08%10.33%
Jesper Dangaard Brouer80.06%20.66%
Eliezer Tamir70.06%20.66%
Xufeng Zhang70.06%10.33%
Patrick Ohly70.06%10.33%
Alexey Dobriyan60.05%20.66%
Pablo Neira Ayuso60.05%10.33%
Arnd Bergmann60.05%10.33%
Harvey Harrison60.05%10.33%
Steffen Klassert50.04%10.33%
David Majnemer50.04%10.33%
Gao Feng50.04%10.33%
Al Viro50.04%41.33%
Thomas Graf50.04%10.33%
Josef Bacik40.03%10.33%
Lorenzo Colitti40.03%10.33%
Thomas Gleixner30.02%10.33%
Tejun Heo30.02%10.33%
Alexander Duyck30.02%20.66%
Duan Jiong30.02%10.33%
Atis Elsts30.02%10.33%
Tim Bird20.02%10.33%
Ian Morris20.02%10.33%
Octavian Purdila20.02%10.33%
Willem de Bruijn10.01%10.33%
Oliver Hartkopp10.01%10.33%
Alan Cox10.01%10.33%
Brian Haley10.01%10.33%
Aruna-Hewapathirane10.01%10.33%
Neil Horman10.01%10.33%
Jorge Boncompte10.01%10.33%
Total12444100.00%301100.00%
Directory: net/ipv4
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.