cregit-Linux how code gets into the kernel

Release 4.11 net/ipv4/udp.c

Directory: net/ipv4
/*
 * INET         An implementation of the TCP/IP protocol suite for the LINUX
 *              operating system.  INET is implemented using the  BSD Socket
 *              interface as the means of communication with the user level.
 *
 *              The User Datagram Protocol (UDP).
 *
 * Authors:     Ross Biro
 *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
 *              Alan Cox, <alan@lxorguk.ukuu.org.uk>
 *              Hirokazu Takahashi, <taka@valinux.co.jp>
 *
 * Fixes:
 *              Alan Cox        :       verify_area() calls
 *              Alan Cox        :       stopped close while in use off icmp
 *                                      messages. Not a fix but a botch that
 *                                      for udp at least is 'valid'.
 *              Alan Cox        :       Fixed icmp handling properly
 *              Alan Cox        :       Correct error for oversized datagrams
 *              Alan Cox        :       Tidied select() semantics.
 *              Alan Cox        :       udp_err() fixed properly, also now
 *                                      select and read wake correctly on errors
 *              Alan Cox        :       udp_send verify_area moved to avoid mem leak
 *              Alan Cox        :       UDP can count its memory
 *              Alan Cox        :       send to an unknown connection causes
 *                                      an ECONNREFUSED off the icmp, but
 *                                      does NOT close.
 *              Alan Cox        :       Switched to new sk_buff handlers. No more backlog!
 *              Alan Cox        :       Using generic datagram code. Even smaller and the PEEK
 *                                      bug no longer crashes it.
 *              Fred Van Kempen :       Net2e support for sk->broadcast.
 *              Alan Cox        :       Uses skb_free_datagram
 *              Alan Cox        :       Added get/set sockopt support.
 *              Alan Cox        :       Broadcasting without option set returns EACCES.
 *              Alan Cox        :       No wakeup calls. Instead we now use the callbacks.
 *              Alan Cox        :       Use ip_tos and ip_ttl
 *              Alan Cox        :       SNMP Mibs
 *              Alan Cox        :       MSG_DONTROUTE, and 0.0.0.0 support.
 *              Matt Dillon     :       UDP length checks.
 *              Alan Cox        :       Smarter af_inet used properly.
 *              Alan Cox        :       Use new kernel side addressing.
 *              Alan Cox        :       Incorrect return on truncated datagram receive.
 *      Arnt Gulbrandsen        :       New udp_send and stuff
 *              Alan Cox        :       Cache last socket
 *              Alan Cox        :       Route cache
 *              Jon Peatfield   :       Minor efficiency fix to sendto().
 *              Mike Shaver     :       RFC1122 checks.
 *              Alan Cox        :       Nonblocking error fix.
 *      Willy Konynenberg       :       Transparent proxying support.
 *              Mike McLagan    :       Routing by source
 *              David S. Miller :       New socket lookup architecture.
 *                                      Last socket cache retained as it
 *                                      does have a high hit rate.
 *              Olaf Kirch      :       Don't linearise iovec on sendmsg.
 *              Andi Kleen      :       Some cleanups, cache destination entry
 *                                      for connect.
 *      Vitaly E. Lavrov        :       Transparent proxy revived after year coma.
 *              Melvin Smith    :       Check msg_name not msg_namelen in sendto(),
 *                                      return ENOTCONN for unconnected sockets (POSIX)
 *              Janos Farkas    :       don't deliver multi/broadcasts to a different
 *                                      bound-to-device socket
 *      Hirokazu Takahashi      :       HW checksumming for outgoing UDP
 *                                      datagrams.
 *      Hirokazu Takahashi      :       sendfile() on UDP works now.
 *              Arnaldo C. Melo :       convert /proc/net/udp to seq_file
 *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
 *      Alexey Kuznetsov:               allow both IPv4 and IPv6 sockets to bind
 *                                      a single port at the same time.
 *      Derek Atkins <derek@ihtfp.com>: Add Encapulation Support
 *      James Chapman           :       Add L2TP encapsulation type.
 *
 *
 *              This program is free software; you can redistribute it and/or
 *              modify it under the terms of the GNU General Public License
 *              as published by the Free Software Foundation; either version
 *              2 of the License, or (at your option) any later version.
 */


#define pr_fmt(fmt) "UDP: " fmt

#include <linux/uaccess.h>
#include <asm/ioctls.h>
#include <linux/bootmem.h>
#include <linux/highmem.h>
#include <linux/swap.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/module.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/igmp.h>
#include <linux/inetdevice.h>
#include <linux/in.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/mm.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <net/tcp_states.h>
#include <linux/skbuff.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <net/net_namespace.h>
#include <net/icmp.h>
#include <net/inet_hashtables.h>
#include <net/route.h>
#include <net/checksum.h>
#include <net/xfrm.h>
#include <trace/events/udp.h>
#include <linux/static_key.h>
#include <trace/events/skb.h>
#include <net/busy_poll.h>
#include "udp_impl.h"
#include <net/sock_reuseport.h>
#include <net/addrconf.h>


struct udp_table udp_table __read_mostly;

EXPORT_SYMBOL(udp_table);


long sysctl_udp_mem[3] __read_mostly;

EXPORT_SYMBOL(sysctl_udp_mem);


int sysctl_udp_rmem_min __read_mostly;

EXPORT_SYMBOL(sysctl_udp_rmem_min);


int sysctl_udp_wmem_min __read_mostly;

EXPORT_SYMBOL(sysctl_udp_wmem_min);


atomic_long_t udp_memory_allocated;

EXPORT_SYMBOL(udp_memory_allocated);


#define MAX_UDP_PORTS 65536

#define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN)

/* IPCB reference means this can not be used from early demux */

static bool udp_lib_exact_dif_match(struct net *net, struct sk_buff *skb) { #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) if (!net->ipv4.sysctl_udp_l3mdev_accept && skb && ipv4_l3mdev_skb(IPCB(skb)->flags)) return true; #endif return false; }

Contributors

PersonTokensPropCommitsCommitProp
Robert Shearman51100.00%1100.00%
Total51100.00%1100.00%


static int udp_lib_lport_inuse(struct net *net, __u16 num, const struct udp_hslot *hslot, unsigned long *bitmap, struct sock *sk, unsigned int log) { struct sock *sk2; kuid_t uid = sock_i_uid(sk); sk_for_each(sk2, &hslot->head) { if (net_eq(sock_net(sk2), net) && sk2 != sk && (bitmap || udp_sk(sk2)->udp_port_hash == num) && (!sk2->sk_reuse || !sk->sk_reuse) && (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && inet_rcv_saddr_equal(sk, sk2, true)) { if (sk2->sk_reuseport && sk->sk_reuseport && !rcu_access_pointer(sk->sk_reuseport_cb) && uid_eq(uid, sock_i_uid(sk2))) { if (!bitmap) return 0; } else { if (!bitmap) return 1; __set_bit(udp_sk(sk2)->udp_port_hash >> log, bitmap); } } } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet8845.60%628.57%
Eric Garver2713.99%14.76%
Tom Herbert2311.92%14.76%
Gerrit Renker199.84%29.52%
Pavel Emelyanov94.66%14.76%
Hideaki Yoshifuji / 吉藤英明73.63%29.52%
Craig Gallek63.11%14.76%
Joe Perches63.11%14.76%
David S. Miller31.55%29.52%
Linus Torvalds (pre-git)21.04%14.76%
Al Viro10.52%14.76%
Stephen Hemminger10.52%14.76%
Josef Bacik10.52%14.76%
Total193100.00%21100.00%

/* * Note: we still hold spinlock of primary hash chain, so no other writer * can insert/delete a socket with local_port == num */
static int udp_lib_lport_inuse2(struct net *net, __u16 num, struct udp_hslot *hslot2, struct sock *sk) { struct sock *sk2; kuid_t uid = sock_i_uid(sk); int res = 0; spin_lock(&hslot2->lock); udp_portaddr_for_each_entry(sk2, &hslot2->head) { if (net_eq(sock_net(sk2), net) && sk2 != sk && (udp_sk(sk2)->udp_port_hash == num) && (!sk2->sk_reuse || !sk->sk_reuse) && (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && inet_rcv_saddr_equal(sk, sk2, true)) { if (sk2->sk_reuseport && sk->sk_reuseport && !rcu_access_pointer(sk->sk_reuseport_cb) && uid_eq(uid, sock_i_uid(sk2))) { res = 0; } else { res = 1; } break; } } spin_unlock(&hslot2->lock); return res; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet12669.61%116.67%
Eric Garver2312.71%116.67%
Tom Herbert2312.71%116.67%
Craig Gallek63.31%116.67%
Joe Perches21.10%116.67%
Josef Bacik10.55%116.67%
Total181100.00%6100.00%


static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot) { struct net *net = sock_net(sk); kuid_t uid = sock_i_uid(sk); struct sock *sk2; sk_for_each(sk2, &hslot->head) { if (net_eq(sock_net(sk2), net) && sk2 != sk && sk2->sk_family == sk->sk_family && ipv6_only_sock(sk2) == ipv6_only_sock(sk) && (udp_sk(sk2)->udp_port_hash == udp_sk(sk)->udp_port_hash) && (sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) && inet_rcv_saddr_equal(sk, sk2, false)) { return reuseport_add_sock(sk, sk2); } } /* Initial allocation may have already happened via setsockopt */ if (!rcu_access_pointer(sk->sk_reuseport_cb)) return reuseport_alloc(sk); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Craig Gallek15898.75%133.33%
Josef Bacik10.62%133.33%
Eric Dumazet10.62%133.33%
Total160100.00%3100.00%

/** * udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6 * * @sk: socket struct in question * @snum: port number to look up * @hash2_nulladdr: AF-dependent hash value in secondary hash chains, * with NULL address */
int udp_lib_get_port(struct sock *sk, unsigned short snum, unsigned int hash2_nulladdr) { struct udp_hslot *hslot, *hslot2; struct udp_table *udptable = sk->sk_prot->h.udp_table; int error = 1; struct net *net = sock_net(sk); if (!snum) { int low, high, remaining; unsigned int rand; unsigned short first, last; DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN); inet_get_local_port_range(net, &low, &high); remaining = (high - low) + 1; rand = prandom_u32(); first = reciprocal_scale(rand, remaining) + low; /* * force rand to be an odd multiple of UDP_HTABLE_SIZE */ rand = (rand | 1) * (udptable->mask + 1); last = first + udptable->mask + 1; do { hslot = udp_hashslot(udptable, net, first); bitmap_zero(bitmap, PORTS_PER_CHAIN); spin_lock_bh(&hslot->lock); udp_lib_lport_inuse(net, snum, hslot, bitmap, sk, udptable->log); snum = first; /* * Iterate on all possible values of snum for this hash. * Using steps of an odd multiple of UDP_HTABLE_SIZE * give us randomization and full range coverage. */ do { if (low <= snum && snum <= high && !test_bit(snum >> udptable->log, bitmap) && !inet_is_local_reserved_port(net, snum)) goto found; snum += rand; } while (snum != first); spin_unlock_bh(&hslot->lock); cond_resched(); } while (++first != last); goto fail; } else { hslot = udp_hashslot(udptable, net, snum); spin_lock_bh(&hslot->lock); if (hslot->count > 10) { int exist; unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum; slot2 &= udptable->mask; hash2_nulladdr &= udptable->mask; hslot2 = udp_hashslot2(udptable, slot2); if (hslot->count < hslot2->count) goto scan_primary_hash; exist = udp_lib_lport_inuse2(net, snum, hslot2, sk); if (!exist && (hash2_nulladdr != slot2)) { hslot2 = udp_hashslot2(udptable, hash2_nulladdr); exist = udp_lib_lport_inuse2(net, snum, hslot2, sk); } if (exist) goto fail_unlock; else goto found; } scan_primary_hash: if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, 0)) goto fail_unlock; } found: inet_sk(sk)->inet_num = snum; udp_sk(sk)->udp_port_hash = snum; udp_sk(sk)->udp_portaddr_hash ^= snum; if (sk_unhashed(sk)) { if (sk->sk_reuseport && udp_reuseport_add_sock(sk, hslot)) { inet_sk(sk)->inet_num = 0; udp_sk(sk)->udp_port_hash = 0; udp_sk(sk)->udp_portaddr_hash ^= snum; goto fail_unlock; } sk_add_node_rcu(sk, &hslot->head); hslot->count++; sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); spin_lock(&hslot2->lock); if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && sk->sk_family == AF_INET6) hlist_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node, &hslot2->head); else hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, &hslot2->head); hslot2->count++; spin_unlock(&hslot2->lock); } sock_set_flag(sk, SOCK_RCU_FREE); error = 0; fail_unlock: spin_unlock_bh(&hslot->lock); fail: return error; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet40661.98%1530.61%
Craig Gallek7811.91%24.08%
Linus Torvalds (pre-git)507.63%816.33%
Pavel Emelyanov284.27%510.20%
Stephen Hemminger253.82%24.08%
Gerrit Renker172.60%12.04%
Anton Arapov131.98%12.04%
Américo Wang81.22%24.08%
Arnaldo Carvalho de Melo71.07%24.08%
David S. Miller60.92%48.16%
Daniel Borkmann40.61%12.04%
Hideaki Yoshifuji / 吉藤英明40.61%24.08%
Linus Torvalds30.46%12.04%
Eric Garver30.46%12.04%
Eric W. Biedermann20.31%12.04%
Aruna-Hewapathirane10.15%12.04%
Total655100.00%49100.00%

EXPORT_SYMBOL(udp_lib_get_port);
static u32 udp4_portaddr_hash(const struct net *net, __be32 saddr, unsigned int port) { return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet36100.00%3100.00%
Total36100.00%3100.00%


int udp_v4_get_port(struct sock *sk, unsigned short snum) { unsigned int hash2_nulladdr = udp4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum); unsigned int hash2_partial = udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0); /* precompute partial secondary hash */ udp_sk(sk)->udp_portaddr_hash = hash2_partial; return udp_lib_get_port(sk, snum, hash2_nulladdr); }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet5168.92%342.86%
David S. Miller1925.68%114.29%
Linus Torvalds (pre-git)22.70%114.29%
Pavel Emelyanov11.35%114.29%
Gerrit Renker11.35%114.29%
Total74100.00%7100.00%


static int compute_score(struct sock *sk, struct net *net, __be32 saddr, __be16 sport, __be32 daddr, unsigned short hnum, int dif, bool exact_dif) { int score; struct inet_sock *inet; if (!net_eq(sock_net(sk), net) || udp_sk(sk)->udp_port_hash != hnum || ipv6_only_sock(sk)) return -1; score = (sk->sk_family == PF_INET) ? 2 : 1; inet = inet_sk(sk); if (inet->inet_rcv_saddr) { if (inet->inet_rcv_saddr != daddr) return -1; score += 4; } if (inet->inet_daddr) { if (inet->inet_daddr != saddr) return -1; score += 4; } if (inet->inet_dport) { if (inet->inet_dport != sport) return -1; score += 4; } if (sk->sk_bound_dev_if || exact_dif) { if (sk->sk_bound_dev_if != dif) return -1; score += 4; } if (sk->sk_incoming_cpu == raw_smp_processor_id()) score++; return score; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller10751.44%16.67%
Eric Dumazet5024.04%426.67%
Joe Perches2110.10%16.67%
Hideaki Yoshifuji / 吉藤英明83.85%320.00%
Tom Herbert62.88%16.67%
Linus Torvalds (pre-git)62.88%16.67%
Robert Shearman52.40%16.67%
Xuemin Su31.44%16.67%
Gerrit Renker10.48%16.67%
Pavel Emelyanov10.48%16.67%
Total208100.00%15100.00%


static u32 udp_ehashfn(const struct net *net, const __be32 laddr, const __u16 lport, const __be32 faddr, const __be16 fport) { static u32 udp_ehash_secret __read_mostly; net_get_random_once(&udp_ehash_secret, sizeof(udp_ehash_secret)); return __inet_ehashfn(laddr, lport, faddr, fport, udp_ehash_secret + net_hash_mix(net)); }

Contributors

PersonTokensPropCommitsCommitProp
Hannes Frederic Sowa6196.83%266.67%
Eric Dumazet23.17%133.33%
Total63100.00%3100.00%

/* called with rcu_read_lock() */
static struct sock *udp4_lib_lookup2(struct net *net, __be32 saddr, __be16 sport, __be32 daddr, unsigned int hnum, int dif, bool exact_dif, struct udp_hslot *hslot2, struct sk_buff *skb) { struct sock *sk, *result; int score, badness, matches = 0, reuseport = 0; u32 hash = 0; result = NULL; badness = 0; udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { score = compute_score(sk, net, saddr, sport, daddr, hnum, dif, exact_dif); if (score > badness) { reuseport = sk->sk_reuseport; if (reuseport) { hash = udp_ehashfn(net, daddr, hnum, saddr, sport); result = reuseport_select_sock(sk, hash, skb, sizeof(struct udphdr)); if (result) return result; matches = 1; } badness = score; result = sk; } else if (score == badness && reuseport) { matches++; if (reciprocal_scale(hash, matches) == 0) result = sk; hash = next_pseudo_random32(hash); } } return result; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet11051.16%327.27%
Tom Herbert6831.63%19.09%
Craig Gallek2612.09%327.27%
Robert Shearman52.33%19.09%
Daniel Borkmann41.86%19.09%
Hannes Frederic Sowa10.47%19.09%
Xuemin Su10.47%19.09%
Total215100.00%11100.00%

/* UDP is nearly always wildcards out the wazoo, it makes no sense to try * harder than this. -DaveM */
struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, __be32 daddr, __be16 dport, int dif, struct udp_table *udptable, struct sk_buff *skb) { struct sock *sk, *result; unsigned short hnum = ntohs(dport); unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask); struct udp_hslot *hslot2, *hslot = &udptable->hash[slot]; bool exact_dif = udp_lib_exact_dif_match(net, skb); int score, badness, matches = 0, reuseport = 0; u32 hash = 0; if (hslot->count > 10) { hash2 = udp4_portaddr_hash(net, daddr, hnum); slot2 = hash2 & udptable->mask; hslot2 = &udptable->hash2[slot2]; if (hslot->count < hslot2->count) goto begin; result = udp4_lib_lookup2(net, saddr, sport, daddr, hnum, dif, exact_dif, hslot2, skb); if (!result) { unsigned int old_slot2 = slot2; hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum); slot2 = hash2 & udptable->mask; /* avoid searching the same slot again. */ if (unlikely(slot2 == old_slot2)) return result; hslot2 = &udptable->hash2[slot2]; if (hslot->count < hslot2->count) goto begin; result = udp4_lib_lookup2(net, saddr, sport, daddr, hnum, dif, exact_dif, hslot2, skb); } return result; } begin: result = NULL; badness = 0; sk_for_each_rcu(sk, &hslot->head) { score = compute_score(sk, net, saddr, sport, daddr, hnum, dif, exact_dif); if (score > badness) { reuseport = sk->sk_reuseport; if (reuseport) { hash = udp_ehashfn(net, daddr, hnum, saddr, sport); result = reuseport_select_sock(sk, hash, skb, sizeof(struct udphdr)); if (result) return result; matches = 1; } result = sk; badness = score; } else if (score == badness && reuseport) { matches++; if (reciprocal_scale(hash, matches) == 0) result = sk; hash = next_pseudo_random32(hash); } } return result; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet27863.47%741.18%
Tom Herbert7316.67%15.88%
Craig Gallek296.62%317.65%
Xuemin Su214.79%15.88%
Robert Shearman163.65%15.88%
David S. Miller153.42%15.88%
Daniel Borkmann40.91%15.88%
Hannes Frederic Sowa10.23%15.88%
Jorge Boncompte10.23%15.88%
Total438100.00%17100.00%

EXPORT_SYMBOL_GPL(__udp4_lib_lookup);
static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb, __be16 sport, __be16 dport, struct udp_table *udptable) { const struct iphdr *iph = ip_hdr(skb); return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport, iph->daddr, dport, inet_iif(skb), udptable, skb); }

Contributors

PersonTokensPropCommitsCommitProp
KOVACS Krisztian6494.12%133.33%
Craig Gallek22.94%133.33%
Eric Dumazet22.94%133.33%
Total68100.00%3100.00%


struct sock *udp4_lib_lookup_skb(struct sk_buff *skb, __be16 sport, __be16 dport) { return __udp4_lib_lookup_skb(skb, sport, dport, &udp_table); }

Contributors

PersonTokensPropCommitsCommitProp
Tom Herbert2993.55%150.00%
Alexander Duyck26.45%150.00%
Total31100.00%2100.00%

EXPORT_SYMBOL_GPL(udp4_lib_lookup_skb); /* Must be called under rcu_read_lock(). * Does increment socket refcount. */ #if IS_ENABLED(CONFIG_NETFILTER_XT_MATCH_SOCKET) || \ IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TPROXY) || \ IS_ENABLED(CONFIG_NF_SOCKET_IPV4)
struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, __be32 daddr, __be16 dport, int dif) { struct sock *sk; sk = __udp4_lib_lookup(net, saddr, sport, daddr, dport, dif, &udp_table, NULL); if (sk && !atomic_inc_not_zero(&sk->sk_refcnt)) sk = NULL; return sk; }

Contributors

PersonTokensPropCommitsCommitProp
KOVACS Krisztian4358.11%125.00%
Eric Dumazet2939.19%250.00%
Craig Gallek22.70%125.00%
Total74100.00%4100.00%

EXPORT_SYMBOL_GPL(udp4_lib_lookup); #endif
static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk, __be16 loc_port, __be32 loc_addr, __be16 rmt_port, __be32 rmt_addr, int dif, unsigned short hnum) { struct inet_sock *inet = inet_sk(sk); if (!net_eq(sock_net(sk), net) || udp_sk(sk)->udp_port_hash != hnum || (inet->inet_daddr && inet->inet_daddr != rmt_addr) || (inet->inet_dport != rmt_port && inet->inet_dport) || (inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) || ipv6_only_sock(sk) || (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) return false; if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif)) return false; return true; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller9062.50%120.00%
Shawn Bohrer2920.14%120.00%
Eric Dumazet2517.36%360.00%
Total144100.00%5100.00%

/* * This routine is called by the ICMP module when it gets some * sort of error condition. If err < 0 then the socket should * be closed and the error returned to the user. If err > 0 * it's just the icmp type << 8 | icmp code. * Header points to the ip header of the error packet. We move * on past this. Then (as it used to claim before adjustment) * header points to the first 8 bytes of the udp header. We need * to find the appropriate port. */
void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) { struct inet_sock *inet; const struct iphdr *iph = (const struct iphdr *)skb->data; struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2)); const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; struct sock *sk; int harderr; int err; struct net *net = dev_net(skb->dev); sk = __udp4_lib_lookup(net, iph->daddr, uh->dest, iph->saddr, uh->source, skb->dev->ifindex, udptable, NULL); if (!sk) { __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); return; /* No socket for error */ } err = 0; harderr = 0; inet = inet_sk(sk); switch (type) { default: case ICMP_TIME_EXCEEDED: err = EHOSTUNREACH; break; case ICMP_SOURCE_QUENCH: goto out; case ICMP_PARAMETERPROB: err = EPROTO; harderr = 1; break; case ICMP_DEST_UNREACH: if (code == ICMP_FRAG_NEEDED) {