cregit-Linux how code gets into the kernel

Release 4.8 net/ipv4/udp.c

Directory: net/ipv4
/*
 * INET         An implementation of the TCP/IP protocol suite for the LINUX
 *              operating system.  INET is implemented using the  BSD Socket
 *              interface as the means of communication with the user level.
 *
 *              The User Datagram Protocol (UDP).
 *
 * Authors:     Ross Biro
 *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
 *              Alan Cox, <alan@lxorguk.ukuu.org.uk>
 *              Hirokazu Takahashi, <taka@valinux.co.jp>
 *
 * Fixes:
 *              Alan Cox        :       verify_area() calls
 *              Alan Cox        :       stopped close while in use off icmp
 *                                      messages. Not a fix but a botch that
 *                                      for udp at least is 'valid'.
 *              Alan Cox        :       Fixed icmp handling properly
 *              Alan Cox        :       Correct error for oversized datagrams
 *              Alan Cox        :       Tidied select() semantics.
 *              Alan Cox        :       udp_err() fixed properly, also now
 *                                      select and read wake correctly on errors
 *              Alan Cox        :       udp_send verify_area moved to avoid mem leak
 *              Alan Cox        :       UDP can count its memory
 *              Alan Cox        :       send to an unknown connection causes
 *                                      an ECONNREFUSED off the icmp, but
 *                                      does NOT close.
 *              Alan Cox        :       Switched to new sk_buff handlers. No more backlog!
 *              Alan Cox        :       Using generic datagram code. Even smaller and the PEEK
 *                                      bug no longer crashes it.
 *              Fred Van Kempen :       Net2e support for sk->broadcast.
 *              Alan Cox        :       Uses skb_free_datagram
 *              Alan Cox        :       Added get/set sockopt support.
 *              Alan Cox        :       Broadcasting without option set returns EACCES.
 *              Alan Cox        :       No wakeup calls. Instead we now use the callbacks.
 *              Alan Cox        :       Use ip_tos and ip_ttl
 *              Alan Cox        :       SNMP Mibs
 *              Alan Cox        :       MSG_DONTROUTE, and 0.0.0.0 support.
 *              Matt Dillon     :       UDP length checks.
 *              Alan Cox        :       Smarter af_inet used properly.
 *              Alan Cox        :       Use new kernel side addressing.
 *              Alan Cox        :       Incorrect return on truncated datagram receive.
 *      Arnt Gulbrandsen        :       New udp_send and stuff
 *              Alan Cox        :       Cache last socket
 *              Alan Cox        :       Route cache
 *              Jon Peatfield   :       Minor efficiency fix to sendto().
 *              Mike Shaver     :       RFC1122 checks.
 *              Alan Cox        :       Nonblocking error fix.
 *      Willy Konynenberg       :       Transparent proxying support.
 *              Mike McLagan    :       Routing by source
 *              David S. Miller :       New socket lookup architecture.
 *                                      Last socket cache retained as it
 *                                      does have a high hit rate.
 *              Olaf Kirch      :       Don't linearise iovec on sendmsg.
 *              Andi Kleen      :       Some cleanups, cache destination entry
 *                                      for connect.
 *      Vitaly E. Lavrov        :       Transparent proxy revived after year coma.
 *              Melvin Smith    :       Check msg_name not msg_namelen in sendto(),
 *                                      return ENOTCONN for unconnected sockets (POSIX)
 *              Janos Farkas    :       don't deliver multi/broadcasts to a different
 *                                      bound-to-device socket
 *      Hirokazu Takahashi      :       HW checksumming for outgoing UDP
 *                                      datagrams.
 *      Hirokazu Takahashi      :       sendfile() on UDP works now.
 *              Arnaldo C. Melo :       convert /proc/net/udp to seq_file
 *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
 *      Alexey Kuznetsov:               allow both IPv4 and IPv6 sockets to bind
 *                                      a single port at the same time.
 *      Derek Atkins <derek@ihtfp.com>: Add Encapulation Support
 *      James Chapman           :       Add L2TP encapsulation type.
 *
 *
 *              This program is free software; you can redistribute it and/or
 *              modify it under the terms of the GNU General Public License
 *              as published by the Free Software Foundation; either version
 *              2 of the License, or (at your option) any later version.
 */


#define pr_fmt(fmt) "UDP: " fmt

#include <asm/uaccess.h>
#include <asm/ioctls.h>
#include <linux/bootmem.h>
#include <linux/highmem.h>
#include <linux/swap.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/module.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/igmp.h>
#include <linux/inetdevice.h>
#include <linux/in.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/mm.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <net/tcp_states.h>
#include <linux/skbuff.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <net/net_namespace.h>
#include <net/icmp.h>
#include <net/inet_hashtables.h>
#include <net/route.h>
#include <net/checksum.h>
#include <net/xfrm.h>
#include <trace/events/udp.h>
#include <linux/static_key.h>
#include <trace/events/skb.h>
#include <net/busy_poll.h>
#include "udp_impl.h"
#include <net/sock_reuseport.h>


struct udp_table udp_table __read_mostly;

EXPORT_SYMBOL(udp_table);


long sysctl_udp_mem[3] __read_mostly;

EXPORT_SYMBOL(sysctl_udp_mem);


int sysctl_udp_rmem_min __read_mostly;

EXPORT_SYMBOL(sysctl_udp_rmem_min);


int sysctl_udp_wmem_min __read_mostly;

EXPORT_SYMBOL(sysctl_udp_wmem_min);


atomic_long_t udp_memory_allocated;

EXPORT_SYMBOL(udp_memory_allocated);


#define MAX_UDP_PORTS 65536

#define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN)


static int udp_lib_lport_inuse(struct net *net, __u16 num, const struct udp_hslot *hslot, unsigned long *bitmap, struct sock *sk, int (*saddr_comp)(const struct sock *sk1, const struct sock *sk2, bool match_wildcard), unsigned int log) { struct sock *sk2; kuid_t uid = sock_i_uid(sk); sk_for_each(sk2, &hslot->head) { if (net_eq(sock_net(sk2), net) && sk2 != sk && (bitmap || udp_sk(sk2)->udp_port_hash == num) && (!sk2->sk_reuse || !sk->sk_reuse) && (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && (!sk2->sk_reuseport || !sk->sk_reuseport || rcu_access_pointer(sk->sk_reuseport_cb) || !uid_eq(uid, sock_i_uid(sk2))) && saddr_comp(sk, sk2, true)) { if (!bitmap) return 1; __set_bit(udp_sk(sk2)->udp_port_hash >> log, bitmap); } } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet11355.39%631.58%
tom herberttom herbert3115.20%15.26%
gerrit renkergerrit renker199.31%210.53%
craig gallekcraig gallek125.88%15.26%
pavel emelianovpavel emelianov94.41%15.26%
hideaki yoshifujihideaki yoshifuji73.43%210.53%
joe perchesjoe perches62.94%15.26%
david s. millerdavid s. miller31.47%210.53%
pre-gitpre-git20.98%15.26%
al viroal viro10.49%15.26%
stephen hemmingerstephen hemminger10.49%15.26%
Total204100.00%19100.00%

/* * Note: we still hold spinlock of primary hash chain, so no other writer * can insert/delete a socket with local_port == num */
static int udp_lib_lport_inuse2(struct net *net, __u16 num, struct udp_hslot *hslot2, struct sock *sk, int (*saddr_comp)(const struct sock *sk1, const struct sock *sk2, bool match_wildcard)) { struct sock *sk2; kuid_t uid = sock_i_uid(sk); int res = 0; spin_lock(&hslot2->lock); udp_portaddr_for_each_entry(sk2, &hslot2->head) { if (net_eq(sock_net(sk2), net) && sk2 != sk && (udp_sk(sk2)->udp_port_hash == num) && (!sk2->sk_reuse || !sk->sk_reuse) && (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && (!sk2->sk_reuseport || !sk->sk_reuseport || rcu_access_pointer(sk->sk_reuseport_cb) || !uid_eq(uid, sock_i_uid(sk2))) && saddr_comp(sk, sk2, true)) { res = 1; break; } } spin_unlock(&hslot2->lock); return res; }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet15177.04%125.00%
tom herberttom herbert3115.82%125.00%
craig gallekcraig gallek126.12%125.00%
joe perchesjoe perches21.02%125.00%
Total196100.00%4100.00%


static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot, int (*saddr_same)(const struct sock *sk1, const struct sock *sk2, bool match_wildcard)) { struct net *net = sock_net(sk); kuid_t uid = sock_i_uid(sk); struct sock *sk2; sk_for_each(sk2, &hslot->head) { if (net_eq(sock_net(sk2), net) && sk2 != sk && sk2->sk_family == sk->sk_family && ipv6_only_sock(sk2) == ipv6_only_sock(sk) && (udp_sk(sk2)->udp_port_hash == udp_sk(sk)->udp_port_hash) && (sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) && (*saddr_same)(sk, sk2, false)) { return reuseport_add_sock(sk, sk2); } } /* Initial allocation may have already happened via setsockopt */ if (!rcu_access_pointer(sk->sk_reuseport_cb)) return reuseport_alloc(sk); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
craig gallekcraig gallek18499.46%150.00%
eric dumazeteric dumazet10.54%150.00%
Total185100.00%2100.00%

/** * udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6 * * @sk: socket struct in question * @snum: port number to look up * @saddr_comp: AF-dependent comparison of bound local IP addresses * @hash2_nulladdr: AF-dependent hash value in secondary hash chains, * with NULL address */
int udp_lib_get_port(struct sock *sk, unsigned short snum, int (*saddr_comp)(const struct sock *sk1, const struct sock *sk2, bool match_wildcard), unsigned int hash2_nulladdr) { struct udp_hslot *hslot, *hslot2; struct udp_table *udptable = sk->sk_prot->h.udp_table; int error = 1; struct net *net = sock_net(sk); if (!snum) { int low, high, remaining; unsigned int rand; unsigned short first, last; DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN); inet_get_local_port_range(net, &low, &high); remaining = (high - low) + 1; rand = prandom_u32(); first = reciprocal_scale(rand, remaining) + low; /* * force rand to be an odd multiple of UDP_HTABLE_SIZE */ rand = (rand | 1) * (udptable->mask + 1); last = first + udptable->mask + 1; do { hslot = udp_hashslot(udptable, net, first); bitmap_zero(bitmap, PORTS_PER_CHAIN); spin_lock_bh(&hslot->lock); udp_lib_lport_inuse(net, snum, hslot, bitmap, sk, saddr_comp, udptable->log); snum = first; /* * Iterate on all possible values of snum for this hash. * Using steps of an odd multiple of UDP_HTABLE_SIZE * give us randomization and full range coverage. */ do { if (low <= snum && snum <= high && !test_bit(snum >> udptable->log, bitmap) && !inet_is_local_reserved_port(net, snum)) goto found; snum += rand; } while (snum != first); spin_unlock_bh(&hslot->lock); } while (++first != last); goto fail; } else { hslot = udp_hashslot(udptable, net, snum); spin_lock_bh(&hslot->lock); if (hslot->count > 10) { int exist; unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum; slot2 &= udptable->mask; hash2_nulladdr &= udptable->mask; hslot2 = udp_hashslot2(udptable, slot2); if (hslot->count < hslot2->count) goto scan_primary_hash; exist = udp_lib_lport_inuse2(net, snum, hslot2, sk, saddr_comp); if (!exist && (hash2_nulladdr != slot2)) { hslot2 = udp_hashslot2(udptable, hash2_nulladdr); exist = udp_lib_lport_inuse2(net, snum, hslot2, sk, saddr_comp); } if (exist) goto fail_unlock; else goto found; } scan_primary_hash: if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, saddr_comp, 0)) goto fail_unlock; } found: inet_sk(sk)->inet_num = snum; udp_sk(sk)->udp_port_hash = snum; udp_sk(sk)->udp_portaddr_hash ^= snum; if (sk_unhashed(sk)) { if (sk->sk_reuseport && udp_reuseport_add_sock(sk, hslot, saddr_comp)) { inet_sk(sk)->inet_num = 0; udp_sk(sk)->udp_port_hash = 0; udp_sk(sk)->udp_portaddr_hash ^= snum; goto fail_unlock; } sk_add_node_rcu(sk, &hslot->head); hslot->count++; sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); spin_lock(&hslot2->lock); if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && sk->sk_family == AF_INET6) hlist_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node, &hslot2->head); else hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, &hslot2->head); hslot2->count++; spin_unlock(&hslot2->lock); } sock_set_flag(sk, SOCK_RCU_FREE); error = 0; fail_unlock: spin_unlock_bh(&hslot->lock); fail: return error; }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet41460.53%1530.00%
craig gallekcraig gallek8312.13%24.00%
pre-gitpre-git507.31%816.00%
pavel emelianovpavel emelianov284.09%510.00%
stephen hemmingerstephen hemminger253.65%24.00%
david s. millerdavid s. miller223.22%510.00%
gerrit renkergerrit renker202.92%24.00%
anton arapovanton arapov131.90%12.00%
americo wangamerico wang81.17%24.00%
arnaldo carvalho de meloarnaldo carvalho de melo71.02%24.00%
hideaki yoshifujihideaki yoshifuji40.58%24.00%
daniel borkmanndaniel borkmann40.58%12.00%
linus torvaldslinus torvalds30.44%12.00%
eric w. biedermaneric w. biederman20.29%12.00%
aruna-hewapathiranearuna-hewapathirane10.15%12.00%
Total684100.00%50100.00%

EXPORT_SYMBOL(udp_lib_get_port); /* match_wildcard == true: 0.0.0.0 equals to any IPv4 addresses * match_wildcard == false: addresses must be exactly the same, i.e. * 0.0.0.0 only equals to 0.0.0.0 */
int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2, bool match_wildcard) { struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2); if (!ipv6_only_sock(sk2)) { if (inet1->inet_rcv_saddr == inet2->inet_rcv_saddr) return 1; if (!inet1->inet_rcv_saddr || !inet2->inet_rcv_saddr) return match_wildcard; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
david s. millerdavid s. miller4151.90%342.86%
craig gallekcraig gallek2632.91%114.29%
gerrit renkergerrit renker78.86%114.29%
eric dumazeteric dumazet45.06%114.29%
hideaki yoshifujihideaki yoshifuji11.27%114.29%
Total79100.00%7100.00%


static u32 udp4_portaddr_hash(const struct net *net, __be32 saddr, unsigned int port) { return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port; }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet36100.00%3100.00%
Total36100.00%3100.00%


int udp_v4_get_port(struct sock *sk, unsigned short snum) { unsigned int hash2_nulladdr = udp4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum); unsigned int hash2_partial = udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0); /* precompute partial secondary hash */ udp_sk(sk)->udp_portaddr_hash = hash2_partial; return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal, hash2_nulladdr); }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet5268.42%342.86%
david s. millerdavid s. miller2026.32%114.29%
pre-gitpre-git22.63%114.29%
pavel emelianovpavel emelianov11.32%114.29%
gerrit renkergerrit renker11.32%114.29%
Total76100.00%7100.00%


static int compute_score(struct sock *sk, struct net *net, __be32 saddr, __be16 sport, __be32 daddr, unsigned short hnum, int dif) { int score; struct inet_sock *inet; if (!net_eq(sock_net(sk), net) || udp_sk(sk)->udp_port_hash != hnum || ipv6_only_sock(sk)) return -1; score = (sk->sk_family == PF_INET) ? 2 : 1; inet = inet_sk(sk); if (inet->inet_rcv_saddr) { if (inet->inet_rcv_saddr != daddr) return -1; score += 4; } if (inet->inet_daddr) { if (inet->inet_daddr != saddr) return -1; score += 4; } if (inet->inet_dport) { if (inet->inet_dport != sport) return -1; score += 4; } if (sk->sk_bound_dev_if) { if (sk->sk_bound_dev_if != dif) return -1; score += 4; } if (sk->sk_incoming_cpu == raw_smp_processor_id()) score++; return score; }

Contributors

PersonTokensPropCommitsCommitProp
david s. millerdavid s. miller10752.71%17.14%
eric dumazeteric dumazet5024.63%428.57%
joe perchesjoe perches2110.34%17.14%
hideaki yoshifujihideaki yoshifuji83.94%321.43%
tom herberttom herbert62.96%17.14%
pre-gitpre-git62.96%17.14%
xuemin suxuemin su31.48%17.14%
gerrit renkergerrit renker10.49%17.14%
pavel emelianovpavel emelianov10.49%17.14%
Total203100.00%14100.00%


static u32 udp_ehashfn(const struct net *net, const __be32 laddr, const __u16 lport, const __be32 faddr, const __be16 fport) { static u32 udp_ehash_secret __read_mostly; net_get_random_once(&udp_ehash_secret, sizeof(udp_ehash_secret)); return __inet_ehashfn(laddr, lport, faddr, fport, udp_ehash_secret + net_hash_mix(net)); }

Contributors

PersonTokensPropCommitsCommitProp
hannes frederic sowahannes frederic sowa6196.83%266.67%
eric dumazeteric dumazet23.17%133.33%
Total63100.00%3100.00%

/* called with rcu_read_lock() */
static struct sock *udp4_lib_lookup2(struct net *net, __be32 saddr, __be16 sport, __be32 daddr, unsigned int hnum, int dif, struct udp_hslot *hslot2, struct sk_buff *skb) { struct sock *sk, *result; int score, badness, matches = 0, reuseport = 0; u32 hash = 0; result = NULL; badness = 0; udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { score = compute_score(sk, net, saddr, sport, daddr, hnum, dif); if (score > badness) { reuseport = sk->sk_reuseport; if (reuseport) { hash = udp_ehashfn(net, daddr, hnum, saddr, sport); result = reuseport_select_sock(sk, hash, skb, sizeof(struct udphdr)); if (result) return result; matches = 1; } badness = score; result = sk; } else if (score == badness && reuseport) { matches++; if (reciprocal_scale(hash, matches) == 0) result = sk; hash = next_pseudo_random32(hash); } } return result; }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet11052.38%330.00%
tom herberttom herbert6832.38%110.00%
craig gallekcraig gallek2612.38%330.00%
daniel borkmanndaniel borkmann41.90%110.00%
hannes frederic sowahannes frederic sowa10.48%110.00%
xuemin suxuemin su10.48%110.00%
Total210100.00%10100.00%

/* UDP is nearly always wildcards out the wazoo, it makes no sense to try * harder than this. -DaveM */
struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, __be32 daddr, __be16 dport, int dif, struct udp_table *udptable, struct sk_buff *skb) { struct sock *sk, *result; unsigned short hnum = ntohs(dport); unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask); struct udp_hslot *hslot2, *hslot = &udptable->hash[slot]; int score, badness, matches = 0, reuseport = 0; u32 hash = 0; if (hslot->count > 10) { hash2 = udp4_portaddr_hash(net, daddr, hnum); slot2 = hash2 & udptable->mask; hslot2 = &udptable->hash2[slot2]; if (hslot->count < hslot2->count) goto begin; result = udp4_lib_lookup2(net, saddr, sport, daddr, hnum, dif, hslot2, skb); if (!result) { unsigned int old_slot2 = slot2; hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum); slot2 = hash2 & udptable->mask; /* avoid searching the same slot again. */ if (unlikely(slot2 == old_slot2)) return result; hslot2 = &udptable->hash2[slot2]; if (hslot->count < hslot2->count) goto begin; result = udp4_lib_lookup2(net, saddr, sport, daddr, hnum, dif, hslot2, skb); } return result; } begin: result = NULL; badness = 0; sk_for_each_rcu(sk, &hslot->head) { score = compute_score(sk, net, saddr, sport, daddr, hnum, dif); if (score > badness) { reuseport = sk->sk_reuseport; if (reuseport) { hash = udp_ehashfn(net, daddr, hnum, saddr, sport); result = reuseport_select_sock(sk, hash, skb, sizeof(struct udphdr)); if (result) return result; matches = 1; } result = sk; badness = score; } else if (score == badness && reuseport) { matches++; if (reciprocal_scale(hash, matches) == 0) result = sk; hash = next_pseudo_random32(hash); } } return result; }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet27865.88%743.75%
tom herberttom herbert7317.30%16.25%
craig gallekcraig gallek296.87%318.75%
xuemin suxuemin su214.98%16.25%
david s. millerdavid s. miller153.55%16.25%
daniel borkmanndaniel borkmann40.95%16.25%
hannes frederic sowahannes frederic sowa10.24%16.25%
jorge boncomptejorge boncompte10.24%16.25%
Total422100.00%16100.00%

EXPORT_SYMBOL_GPL(__udp4_lib_lookup);
static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb, __be16 sport, __be16 dport, struct udp_table *udptable) { const struct iphdr *iph = ip_hdr(skb); return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport, iph->daddr, dport, inet_iif(skb), udptable, skb); }

Contributors

PersonTokensPropCommitsCommitProp
kovacs krisztiankovacs krisztian6494.12%133.33%
craig gallekcraig gallek22.94%133.33%
eric dumazeteric dumazet22.94%133.33%
Total68100.00%3100.00%


struct sock *udp4_lib_lookup_skb(struct sk_buff *skb, __be16 sport, __be16 dport) { return __udp4_lib_lookup_skb(skb, sport, dport, &udp_table); }

Contributors

PersonTokensPropCommitsCommitProp
tom herberttom herbert2993.55%150.00%
alexander duyckalexander duyck26.45%150.00%
Total31100.00%2100.00%

EXPORT_SYMBOL_GPL(udp4_lib_lookup_skb); /* Must be called under rcu_read_lock(). * Does increment socket refcount. */ #if IS_ENABLED(CONFIG_NETFILTER_XT_MATCH_SOCKET) || \ IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TPROXY)
struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, __be32 daddr, __be16 dport, int dif) { struct sock *sk; sk = __udp4_lib_lookup(net, saddr, sport, daddr, dport, dif, &udp_table, NULL); if (sk && !atomic_inc_not_zero(&sk->sk_refcnt)) sk = NULL; return sk; }

Contributors

PersonTokensPropCommitsCommitProp
kovacs krisztiankovacs krisztian4358.11%125.00%
eric dumazeteric dumazet2939.19%250.00%
craig gallekcraig gallek22.70%125.00%
Total74100.00%4100.00%

EXPORT_SYMBOL_GPL(udp4_lib_lookup); #endif
static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk, __be16 loc_port, __be32 loc_addr, __be16 rmt_port, __be32 rmt_addr, int dif, unsigned short hnum) { struct inet_sock *inet = inet_sk(sk); if (!net_eq(sock_net(sk), net) || udp_sk(sk)->udp_port_hash != hnum || (inet->inet_daddr && inet->inet_daddr != rmt_addr) || (inet->inet_dport != rmt_port && inet->inet_dport) || (inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) || ipv6_only_sock(sk) || (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) return false; if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif)) return false; return true; }

Contributors

PersonTokensPropCommitsCommitProp
david s. millerdavid s. miller9062.50%120.00%
shawn bohrershawn bohrer2920.14%120.00%
eric dumazeteric dumazet2517.36%360.00%
Total144100.00%5100.00%

/* * This routine is called by the ICMP module when it gets some * sort of error condition. If err < 0 then the socket should * be closed and the error returned to the user. If err > 0 * it's just the icmp type << 8 | icmp code. * Header points to the ip header of the error packet. We move * on past this. Then (as it used to claim before adjustment) * header points to the first 8 bytes of the udp header. We need * to find the appropriate port. */
void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) { struct inet_sock *inet; const struct iphdr *iph = (const struct iphdr *)skb->data; struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2))