Contributors: 19
Author Tokens Token Proportion Commits Commit Proportion
Hideaki Yoshifuji / 吉藤英明 214 32.42% 4 7.55%
Steffen Klassert 106 16.06% 1 1.89%
Eric Dumazet 98 14.85% 10 18.87%
Linus Torvalds (pre-git) 91 13.79% 15 28.30%
David S. Miller 88 13.33% 6 11.32%
Wei Dong 15 2.27% 1 1.89%
Richard Gobert 13 1.97% 1 1.89%
Vasiliy Kulikov 6 0.91% 1 1.89%
Tom Herbert 5 0.76% 2 3.77%
Arnaldo Carvalho de Melo 5 0.76% 3 5.66%
Robert Shearman 4 0.61% 1 1.89%
Willem de Bruijn 4 0.61% 1 1.89%
Craig Gallek 3 0.45% 1 1.89%
Thomas Gleixner 2 0.30% 1 1.89%
Pavel Emelyanov 2 0.30% 1 1.89%
Jason A. Donenfeld 1 0.15% 1 1.89%
Kuniyuki Iwashima 1 0.15% 1 1.89%
Al Viro 1 0.15% 1 1.89%
Joe Perches 1 0.15% 1 1.89%
Total 660 53


// SPDX-License-Identifier: GPL-2.0-or-later
/*
 *	common UDP/RAW code
 *	Linux INET implementation
 *
 * Authors:
 * 	Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
 */

#include <linux/types.h>
#include <linux/module.h>
#include <linux/in.h>
#include <net/ip.h>
#include <net/sock.h>
#include <net/route.h>
#include <net/tcp_states.h>
#include <net/sock_reuseport.h>

int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
	struct inet_sock *inet = inet_sk(sk);
	struct sockaddr_in *usin = (struct sockaddr_in *) uaddr;
	struct flowi4 *fl4;
	struct rtable *rt;
	__be32 saddr;
	int oif;
	int err;


	if (addr_len < sizeof(*usin))
		return -EINVAL;

	if (usin->sin_family != AF_INET)
		return -EAFNOSUPPORT;

	sk_dst_reset(sk);

	oif = sk->sk_bound_dev_if;
	saddr = inet->inet_saddr;
	if (ipv4_is_multicast(usin->sin_addr.s_addr)) {
		if (!oif || netif_index_is_l3_master(sock_net(sk), oif))
			oif = READ_ONCE(inet->mc_index);
		if (!saddr)
			saddr = READ_ONCE(inet->mc_addr);
	} else if (!oif) {
		oif = READ_ONCE(inet->uc_index);
	}
	fl4 = &inet->cork.fl.u.ip4;
	rt = ip_route_connect(fl4, usin->sin_addr.s_addr, saddr, oif,
			      sk->sk_protocol, inet->inet_sport,
			      usin->sin_port, sk);
	if (IS_ERR(rt)) {
		err = PTR_ERR(rt);
		if (err == -ENETUNREACH)
			IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
		goto out;
	}

	if ((rt->rt_flags & RTCF_BROADCAST) && !sock_flag(sk, SOCK_BROADCAST)) {
		ip_rt_put(rt);
		err = -EACCES;
		goto out;
	}
	if (!inet->inet_saddr)
		inet->inet_saddr = fl4->saddr;	/* Update source address */
	if (!inet->inet_rcv_saddr) {
		inet->inet_rcv_saddr = fl4->saddr;
		if (sk->sk_prot->rehash)
			sk->sk_prot->rehash(sk);
	}
	inet->inet_daddr = fl4->daddr;
	inet->inet_dport = usin->sin_port;
	reuseport_has_conns_set(sk);
	sk->sk_state = TCP_ESTABLISHED;
	sk_set_txhash(sk);
	atomic_set(&inet->inet_id, get_random_u16());

	sk_dst_set(sk, &rt->dst);
	err = 0;
out:
	return err;
}
EXPORT_SYMBOL(__ip4_datagram_connect);

int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
	int res;

	lock_sock(sk);
	res = __ip4_datagram_connect(sk, uaddr, addr_len);
	release_sock(sk);
	return res;
}
EXPORT_SYMBOL(ip4_datagram_connect);

/* Because UDP xmit path can manipulate sk_dst_cache without holding
 * socket lock, we need to use sk_dst_set() here,
 * even if we own the socket lock.
 */
void ip4_datagram_release_cb(struct sock *sk)
{
	const struct inet_sock *inet = inet_sk(sk);
	const struct ip_options_rcu *inet_opt;
	__be32 daddr = inet->inet_daddr;
	struct dst_entry *dst;
	struct flowi4 fl4;
	struct rtable *rt;

	rcu_read_lock();

	dst = __sk_dst_get(sk);
	if (!dst || !dst->obsolete || dst->ops->check(dst, 0)) {
		rcu_read_unlock();
		return;
	}
	inet_opt = rcu_dereference(inet->inet_opt);
	if (inet_opt && inet_opt->opt.srr)
		daddr = inet_opt->opt.faddr;
	rt = ip_route_output_ports(sock_net(sk), &fl4, sk, daddr,
				   inet->inet_saddr, inet->inet_dport,
				   inet->inet_sport, sk->sk_protocol,
				   RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);

	dst = !IS_ERR(rt) ? &rt->dst : NULL;
	sk_dst_set(sk, dst);

	rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(ip4_datagram_release_cb);