Release 4.11 net/core/sock.c
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* Generic socket support routines. Memory allocators, socket lock/release
* handler for protocols to use and generic option handler.
*
*
* Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Florian La Roche, <flla@stud.uni-sb.de>
* Alan Cox, <A.Cox@swansea.ac.uk>
*
* Fixes:
* Alan Cox : Numerous verify_area() problems
* Alan Cox : Connecting on a connecting socket
* now returns an error for tcp.
* Alan Cox : sock->protocol is set correctly.
* and is not sometimes left as 0.
* Alan Cox : connect handles icmp errors on a
* connect properly. Unfortunately there
* is a restart syscall nasty there. I
* can't match BSD without hacking the C
* library. Ideas urgently sought!
* Alan Cox : Disallow bind() to addresses that are
* not ours - especially broadcast ones!!
* Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
* Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
* instead they leave that for the DESTROY timer.
* Alan Cox : Clean up error flag in accept
* Alan Cox : TCP ack handling is buggy, the DESTROY timer
* was buggy. Put a remove_sock() in the handler
* for memory when we hit 0. Also altered the timer
* code. The ACK stuff can wait and needs major
* TCP layer surgery.
* Alan Cox : Fixed TCP ack bug, removed remove sock
* and fixed timer/inet_bh race.
* Alan Cox : Added zapped flag for TCP
* Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
* Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
* Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
* Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
* Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
* Rick Sladkey : Relaxed UDP rules for matching packets.
* C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
* Pauline Middelink : identd support
* Alan Cox : Fixed connect() taking signals I think.
* Alan Cox : SO_LINGER supported
* Alan Cox : Error reporting fixes
* Anonymous : inet_create tidied up (sk->reuse setting)
* Alan Cox : inet sockets don't set sk->type!
* Alan Cox : Split socket option code
* Alan Cox : Callbacks
* Alan Cox : Nagle flag for Charles & Johannes stuff
* Alex : Removed restriction on inet fioctl
* Alan Cox : Splitting INET from NET core
* Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
* Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
* Alan Cox : Split IP from generic code
* Alan Cox : New kfree_skbmem()
* Alan Cox : Make SO_DEBUG superuser only.
* Alan Cox : Allow anyone to clear SO_DEBUG
* (compatibility fix)
* Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
* Alan Cox : Allocator for a socket is settable.
* Alan Cox : SO_ERROR includes soft errors.
* Alan Cox : Allow NULL arguments on some SO_ opts
* Alan Cox : Generic socket allocation to make hooks
* easier (suggested by Craig Metz).
* Michael Pall : SO_ERROR returns positive errno again
* Steve Whitehouse: Added default destructor to free
* protocol private data.
* Steve Whitehouse: Added various other default routines
* common to several socket families.
* Chris Evans : Call suser() check last on F_SETOWN
* Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
* Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
* Andi Kleen : Fix write_space callback
* Chris Evans : Security fixes - signedness again
* Arnaldo C. Melo : cleanups, use skb_queue_purge
*
* To Fix:
*
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/errqueue.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/poll.h>
#include <linux/tcp.h>
#include <linux/init.h>
#include <linux/highmem.h>
#include <linux/user_namespace.h>
#include <linux/static_key.h>
#include <linux/memcontrol.h>
#include <linux/prefetch.h>
#include <linux/uaccess.h>
#include <linux/netdevice.h>
#include <net/protocol.h>
#include <linux/skbuff.h>
#include <net/net_namespace.h>
#include <net/request_sock.h>
#include <net/sock.h>
#include <linux/net_tstamp.h>
#include <net/xfrm.h>
#include <linux/ipsec.h>
#include <net/cls_cgroup.h>
#include <net/netprio_cgroup.h>
#include <linux/sock_diag.h>
#include <linux/filter.h>
#include <net/sock_reuseport.h>
#include <trace/events/sock.h>
#ifdef CONFIG_INET
#include <net/tcp.h>
#endif
#include <net/busy_poll.h>
static DEFINE_MUTEX(proto_list_mutex);
static LIST_HEAD(proto_list);
/**
* sk_ns_capable - General socket capability test
* @sk: Socket to use a capability on or through
* @user_ns: The user namespace of the capability to use
* @cap: The capability to use
*
* Test to see if the opener of the socket had when the socket was
* created and the current process has the capability @cap in the user
* namespace @user_ns.
*/
bool sk_ns_capable(const struct sock *sk,
struct user_namespace *user_ns, int cap)
{
return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
ns_capable(user_ns, cap);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric W. Biedermann | 40 | 100.00% | 1 | 100.00% |
Total | 40 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(sk_ns_capable);
/**
* sk_capable - Socket global capability test
* @sk: Socket to use a capability on or through
* @cap: The global capability to use
*
* Test to see if the opener of the socket had when the socket was
* created and the current process has the capability @cap in all user
* namespaces.
*/
bool sk_capable(const struct sock *sk, int cap)
{
return sk_ns_capable(sk, &init_user_ns, cap);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric W. Biedermann | 25 | 100.00% | 1 | 100.00% |
Total | 25 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(sk_capable);
/**
* sk_net_capable - Network namespace socket capability test
* @sk: Socket to use a capability on or through
* @cap: The capability to use
*
* Test to see if the opener of the socket had when the socket was created
* and the current process has the capability @cap over the network namespace
* the socket is a member of.
*/
bool sk_net_capable(const struct sock *sk, int cap)
{
return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric W. Biedermann | 29 | 100.00% | 1 | 100.00% |
Total | 29 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(sk_net_capable);
/*
* Each address family might have different locking rules, so we have
* one slock key per address family and separate keys for internal and
* userspace sockets.
*/
static struct lock_class_key af_family_keys[AF_MAX];
static struct lock_class_key af_family_kern_keys[AF_MAX];
static struct lock_class_key af_family_slock_keys[AF_MAX];
static struct lock_class_key af_family_kern_slock_keys[AF_MAX];
/*
* Make lock validator output more readable. (we pre-construct these
* strings build-time, so that runtime initialization of socket
* locks is fast):
*/
#define _sock_locks(x) \
x "AF_UNSPEC", x "AF_UNIX" , x "AF_INET" , \
x "AF_AX25" , x "AF_IPX" , x "AF_APPLETALK", \
x "AF_NETROM", x "AF_BRIDGE" , x "AF_ATMPVC" , \
x "AF_X25" , x "AF_INET6" , x "AF_ROSE" , \
x "AF_DECnet", x "AF_NETBEUI" , x "AF_SECURITY" , \
x "AF_KEY" , x "AF_NETLINK" , x "AF_PACKET" , \
x "AF_ASH" , x "AF_ECONET" , x "AF_ATMSVC" , \
x "AF_RDS" , x "AF_SNA" , x "AF_IRDA" , \
x "AF_PPPOX" , x "AF_WANPIPE" , x "AF_LLC" , \
x "27" , x "28" , x "AF_CAN" , \
x "AF_TIPC" , x "AF_BLUETOOTH", x "IUCV" , \
x "AF_RXRPC" , x "AF_ISDN" , x "AF_PHONET" , \
x "AF_IEEE802154", x "AF_CAIF" , x "AF_ALG" , \
x "AF_NFC" , x "AF_VSOCK" , x "AF_KCM" , \
x "AF_QIPCRTR", x "AF_SMC" , x "AF_MAX"
static const char *const af_family_key_strings[AF_MAX+1] = {
_sock_locks("sk_lock-")
};
static const char *const af_family_slock_key_strings[AF_MAX+1] = {
_sock_locks("slock-")
};
static const char *const af_family_clock_key_strings[AF_MAX+1] = {
_sock_locks("clock-")
};
static const char *const af_family_kern_key_strings[AF_MAX+1] = {
_sock_locks("k-sk_lock-")
};
static const char *const af_family_kern_slock_key_strings[AF_MAX+1] = {
_sock_locks("k-slock-")
};
static const char *const af_family_kern_clock_key_strings[AF_MAX+1] = {
_sock_locks("k-clock-")
};
/*
* sk_callback_lock locking rules are per-address-family,
* so split the lock classes by using a per-AF key:
*/
static struct lock_class_key af_callback_keys[AF_MAX];
static struct lock_class_key af_kern_callback_keys[AF_MAX];
/* Take into consideration the size of the struct sk_buff overhead in the
* determination of these values, since that is non-constant across
* platforms. This makes socket queueing behavior and performance
* not depend upon such differences.
*/
#define _SK_MEM_PACKETS 256
#define _SK_MEM_OVERHEAD SKB_TRUESIZE(256)
#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
/* Run time adjustable parameters. */
__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
EXPORT_SYMBOL(sysctl_wmem_max);
__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
EXPORT_SYMBOL(sysctl_rmem_max);
__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
/* Maximal space eaten by iovec or ancillary data plus some space */
int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
EXPORT_SYMBOL(sysctl_optmem_max);
int sysctl_tstamp_allow_data __read_mostly = 1;
struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE;
EXPORT_SYMBOL_GPL(memalloc_socks);
/**
* sk_set_memalloc - sets %SOCK_MEMALLOC
* @sk: socket to set it on
*
* Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
* It's the responsibility of the admin to adjust min_free_kbytes
* to meet the requirements
*/
void sk_set_memalloc(struct sock *sk)
{
sock_set_flag(sk, SOCK_MEMALLOC);
sk->sk_allocation |= __GFP_MEMALLOC;
static_key_slow_inc(&memalloc_socks);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mel Gorman | 29 | 100.00% | 2 | 100.00% |
Total | 29 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL_GPL(sk_set_memalloc);
void sk_clear_memalloc(struct sock *sk)
{
sock_reset_flag(sk, SOCK_MEMALLOC);
sk->sk_allocation &= ~__GFP_MEMALLOC;
static_key_slow_dec(&memalloc_socks);
/*
* SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
* progress of swapping. SOCK_MEMALLOC may be cleared while
* it has rmem allocations due to the last swapfile being deactivated
* but there is a risk that the socket is unusable due to exceeding
* the rmem limits. Reclaim the reserves and obey rmem limits again.
*/
sk_mem_reclaim(sk);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mel Gorman | 36 | 100.00% | 4 | 100.00% |
Total | 36 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL_GPL(sk_clear_memalloc);
int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
int ret;
unsigned long pflags = current->flags;
/* these should have been dropped before queueing */
BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
current->flags |= PF_MEMALLOC;
ret = sk->sk_backlog_rcv(sk, skb);
tsk_restore_flags(current, pflags, PF_MEMALLOC);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mel Gorman | 67 | 100.00% | 1 | 100.00% |
Total | 67 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(__sk_backlog_rcv);
static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
{
struct timeval tv;
if (optlen < sizeof(tv))
return -EINVAL;
if (copy_from_user(&tv, optval, sizeof(tv)))
return -EFAULT;
if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
return -EDOM;
if (tv.tv_sec < 0) {
static int warned __read_mostly;
*timeo_p = 0;
if (warned < 10 && net_ratelimit()) {
warned++;
pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
__func__, current->comm, task_pid_nr(current));
}
return 0;
}
*timeo_p = MAX_SCHEDULE_TIMEOUT;
if (tv.tv_sec == 0 && tv.tv_usec == 0)
return 0;
if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
*timeo_p = tv.tv_sec * HZ + DIV_ROUND_UP(tv.tv_usec, USEC_PER_SEC / HZ);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 107 | 58.15% | 1 | 12.50% |
Vasily Averin | 61 | 33.15% | 1 | 12.50% |
Gao Feng | 5 | 2.72% | 1 | 12.50% |
Joe Perches | 4 | 2.17% | 1 | 12.50% |
Pavel Emelyanov | 3 | 1.63% | 1 | 12.50% |
Ilpo Järvinen | 2 | 1.09% | 1 | 12.50% |
Andrew Morton | 1 | 0.54% | 1 | 12.50% |
Linus Torvalds | 1 | 0.54% | 1 | 12.50% |
Total | 184 | 100.00% | 8 | 100.00% |
static void sock_warn_obsolete_bsdism(const char *name)
{
static int warned;
static char warncomm[TASK_COMM_LEN];
if (strcmp(warncomm, current->comm) && warned < 5) {
strcpy(warncomm, current->comm);
pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
warncomm, name);
warned++;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Andi Kleen | 40 | 66.67% | 1 | 25.00% |
James Morris | 17 | 28.33% | 1 | 25.00% |
Joe Perches | 2 | 3.33% | 1 | 25.00% |
Hideaki Yoshifuji / 吉藤英明 | 1 | 1.67% | 1 | 25.00% |
Total | 60 | 100.00% | 4 | 100.00% |
static bool sock_needs_netstamp(const struct sock *sk)
{
switch (sk->sk_family) {
case AF_UNSPEC:
case AF_UNIX:
return false;
default:
return true;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hannes Frederic Sowa | 33 | 100.00% | 1 | 100.00% |
Total | 33 | 100.00% | 1 | 100.00% |
static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
{
if (sk->sk_flags & flags) {
sk->sk_flags &= ~flags;
if (sock_needs_netstamp(sk) &&
!(sk->sk_flags & SK_FLAGS_TIMESTAMP))
net_disable_timestamp();
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Christoph Hellwig | 20 | 39.22% | 1 | 20.00% |
Eric Dumazet | 18 | 35.29% | 1 | 20.00% |
Patrick Ohly | 6 | 11.76% | 1 | 20.00% |
Hannes Frederic Sowa | 5 | 9.80% | 1 | 20.00% |
Patrick McHardy | 2 | 3.92% | 1 | 20.00% |
Total | 51 | 100.00% | 5 | 100.00% |
int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
unsigned long flags;
struct sk_buff_head *list = &sk->sk_receive_queue;
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
atomic_inc(&sk->sk_drops);
trace_sock_rcvqueue_full(sk, skb);
return -ENOMEM;
}
if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
atomic_inc(&sk->sk_drops);
return -ENOBUFS;
}
skb->dev = NULL;
skb_set_owner_r(skb, sk);
/* we escape from rcu protected region, make sure we dont leak
* a norefcounted dst
*/
skb_dst_force(skb);
spin_lock_irqsave(&list->lock, flags);
sock_skb_set_dropcount(sk, skb);
__skb_queue_tail(list, skb);
spin_unlock_irqrestore(&list->lock, flags);
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_data_ready(sk);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Denys Vlasenko | 70 | 42.42% | 1 | 11.11% |
Neil Horman | 39 | 23.64% | 1 | 11.11% |
Eric Dumazet | 27 | 16.36% | 2 | 22.22% |
Hideo Aoki | 16 | 9.70% | 1 | 11.11% |
Satoru Moriya | 7 | 4.24% | 1 | 11.11% |
Eyal Birger | 3 | 1.82% | 1 | 11.11% |
Mel Gorman | 2 | 1.21% | 1 | 11.11% |
samanthakumar | 1 | 0.61% | 1 | 11.11% |
Total | 165 | 100.00% | 9 | 100.00% |
EXPORT_SYMBOL(__sock_queue_rcv_skb);
int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
int err;
err = sk_filter(sk, skb);
if (err)
return err;
return __sock_queue_rcv_skb(sk, skb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
samanthakumar | 42 | 100.00% | 1 | 100.00% |
Total | 42 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(sock_queue_rcv_skb);
int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
const int nested, unsigned int trim_cap, bool refcounted)
{
int rc = NET_RX_SUCCESS;
if (sk_filter_trim_cap(sk, skb, trim_cap))
goto discard_and_relse;
skb->dev = NULL;
if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
atomic_inc(&sk->sk_drops);
goto discard_and_relse;
}
if (nested)
bh_lock_sock_nested(sk);
else
bh_lock_sock(sk);
if (!sock_owned_by_user(sk)) {
/*
* trylock + unlock semantics:
*/
mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
rc = sk_backlog_rcv(sk, skb);
mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
} else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
bh_unlock_sock(sk);
atomic_inc(&sk->sk_drops);
goto discard_and_relse;
}
bh_unlock_sock(sk);
out:
if (refcounted)
sock_put(sk);
return rc;
discard_and_relse:
kfree_skb(skb);
goto out;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Denys Vlasenko | 89 | 44.28% | 1 | 11.11% |
Eric Dumazet | 35 | 17.41% | 3 | 33.33% |
Ingo Molnar | 33 | 16.42% | 1 | 11.11% |
Yi Zhu | 22 | 10.95% | 2 | 22.22% |
Arnaldo Carvalho de Melo | 14 | 6.97% | 1 | 11.11% |
Willem de Bruijn | 8 | 3.98% | 1 | 11.11% |
Total | 201 | 100.00% | 9 | 100.00% |
EXPORT_SYMBOL(__sk_receive_skb);
struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
{
struct dst_entry *dst = __sk_dst_get(sk);
if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
sk_tx_queue_clear(sk);
sk->sk_dst_pending_confirm = 0;
RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
dst_release(dst);
return NULL;
}
return dst;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Denys Vlasenko | 61 | 77.22% | 1 | 20.00% |
Julian Anastasov | 6 | 7.59% | 1 | 20.00% |
Eric Dumazet | 6 | 7.59% | 1 | 20.00% |
Krishna Kumar | 5 | 6.33% | 1 | 20.00% |
Stephen Hemminger | 1 | 1.27% | 1 | 20.00% |
Total | 79 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL(__sk_dst_check);
struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
{
struct dst_entry *dst = sk_dst_get(sk);
if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
sk_dst_reset(sk);
dst_release(dst);
return NULL;
}
return dst;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Denys Vlasenko | 64 | 100.00% | 1 | 100.00% |
Total | 64 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(sk_dst_check);
static int sock_setbindtodevice(struct sock *sk, char __user *optval,
int optlen)
{
int ret = -ENOPROTOOPT;
#ifdef CONFIG_NETDEVICES
struct net *net = sock_net(sk);
char devname[IFNAMSIZ];
int index;
/* Sorry... */
ret = -EPERM;
if (!ns_capable(net->user_ns, CAP_NET_RAW))
goto out;
ret = -EINVAL;
if (optlen < 0)
goto out;
/* Bind this socket to a particular device like "eth0",
* as specified in the passed interface name. If the
* name is "" or the option length is zero the socket
* is not bound.
*/
if (optlen > IFNAMSIZ - 1)
optlen = IFNAMSIZ - 1;
memset(devname, 0, sizeof(devname));
ret = -EFAULT;
if (copy_from_user(devname, optval, optlen))
goto out;
index = 0;
if (devname[0] != '\0') {
struct net_device *dev;
rcu_read_lock();
dev = dev_get_by_name_rcu(net, devname);
if (dev)
index = dev->ifindex;
rcu_read_unlock();
ret = -ENODEV;
if (!dev)
goto out;
}
lock_sock(sk);
sk->sk_bound_dev_if = index;
sk_dst_reset(sk);
release_sock(sk);
ret = 0;
out:
#endif
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 179 | 82.11% | 2 | 28.57% |
Eric Dumazet | 21 | 9.63% | 1 | 14.29% |
Eric W. Biedermann | 14 | 6.42% | 2 | 28.57% |
Hideaki Yoshifuji / 吉藤英明 | 3 | 1.38% | 1 | 14.29% |
Brian Haley | 1 | 0.46% | 1 | 14.29% |
Total | 218 | 100.00% | 7 | 100.00% |
static int sock_getbindtodevice(struct sock *sk, char __user *optval,
int __user *optlen, int len)
{
int ret = -ENOPROTOOPT;
#ifdef CONFIG_NETDEVICES
struct net *net = sock_net(sk);
char devname[IFNAMSIZ];
if (sk->sk_bound_dev_if == 0) {
len = 0;
goto zero;
}
ret = -EINVAL;
if (len < IFNAMSIZ)
goto out;
ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
if (ret)
goto out;
len = strlen(devname) + 1;
ret = -EFAULT;
if (copy_to_user(optval, devname, len))
goto out;
zero:
ret = -EFAULT;
if (put_user(len, optlen))
goto out;
ret = 0;
out:
#endif
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Brian Haley | 153 | 96.84% | 1 | 50.00% |
Nicolas Schichan | 5 | 3.16% | 1 | 50.00% |
Total | 158 | 100.00% | 2 | 100.00% |
static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
{
if (valbool)
sock_set_flag(sk, bit);
else
sock_reset_flag(sk, bit);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pavel Emelyanov | 37 | 100.00% | 1 | 100.00% |
Total | 37 | 100.00% | 1 | 100.00% |
bool sk_mc_loop(struct sock *sk)
{
if (dev_recursion_level())
return false;
if (!sk)
return true;
switch (sk->sk_family) {
case AF_INET:
return inet_sk(sk)->mc_loop;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
return inet6_sk(sk)->mc_loop;
#endif
}
WARN_ON(1);
return true;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hannes Frederic Sowa | 72 | 100.00% | 1 | 100.00% |
Total | 72 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(sk_mc_loop);
/*
* This is meant for all protocols to use and covers goings on
* at the socket level. Everything here is generic.
*/
int sock_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
int val;
int valbool;
struct linger ling;
int ret = 0;
/*
* Options without arguments
*/
if (optname == SO_BINDTODEVICE)
return sock_setbindtodevice(sk, optval, optlen);
if (optlen < sizeof(int))
return -EINVAL;
if (get_user(val, (int __user *)optval))
return -EFAULT;
valbool = val ? 1 : 0;
lock_sock(sk);
switch (optname) {
case SO_DEBUG:
if (val && !capable(CAP_NET_ADMIN))
ret = -EACCES;
else
sock_valbool_flag(sk, SOCK_DBG, valbool);
break;
case SO_REUSEADDR:
sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
break;
case SO_REUSEPORT:
sk->sk_reuseport = valbool;
break;
case SO_TYPE:
case SO_PROTOCOL:
case SO_DOMAIN:
case SO_ERROR:
ret = -ENOPROTOOPT;
break;
case SO_DONTROUTE:
sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
break;
case SO_BROADCAST:
sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
break;
case SO_SNDBUF:
/* Don't error on this BSD doesn't and if you think
* about it this is right. Otherwise apps have to
* play 'guess the biggest size' games. RCVBUF/SNDBUF
* are treated in BSD as hints
*/
val = min_t(u32, val, sysctl_wmem_max);
set_sndbuf:
sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
/* Wake up sending tasks if we upped the value. */
sk->sk_write_space(sk);
break;
case SO_SNDBUFFORCE:
if (!capable(CAP_NET_ADMIN)) {
ret = -EPERM;
break;
}
goto set_sndbuf;
case SO_RCVBUF:
/* Don't error on this BSD doesn't and if you think
* about it this is right. Otherwise apps have to
* play 'guess the biggest size' games. RCVBUF/SNDBUF
* are treated in BSD as hints
*/
val = min_t(u32, val, sysctl_rmem_max);
set_rcvbuf:
sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
/*
* We double it on the way in to account for
* "struct sk_buff" etc. overhead. Applications
* assume that the SO_RCVBUF setting they make will
* allow that much actual data to be received on that
* socket.
*
* Applications are unaware that "struct sk_buff" and
* other overheads allocate from the receive buffer
* during socket buffer allocation.
*
* And after considering the possible alternatives,
* returning the value we actually used in getsockopt
* is the most desirable behavior.
*/
sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
break;
case SO_RCVBUFFORCE:
if (!capable(CAP_NET_ADMIN)) {
ret = -EPERM;
break;
}
goto set_rcvbuf;
case SO_KEEPALIVE:
if (sk->sk_prot->keepalive)
sk->sk_prot->keepalive(sk, valbool);
sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
break;
case SO_OOBINLINE:
sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
break;
case SO_NO_CHECK:
sk->sk_no_check_tx = valbool;
break;
case SO_PRIORITY:
if ((val >= 0 && val <= 6) ||
ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
sk->sk_priority = val;
else
ret = -EPERM;
break;
case SO_LINGER:
if (optlen < sizeof(ling)) {
ret = -EINVAL; /* 1003.1g */
break;
}
if (copy_from_user(&ling, optval, sizeof(ling))) {
ret = -EFAULT;
break;
}
if (!ling.l_onoff)
sock_reset_flag(sk, SOCK_LINGER);
else {
#if (BITS_PER_LONG == 32)
if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
else
#endif
sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
sock_set_flag(sk, SOCK_LINGER);
}
break;
case SO_BSDCOMPAT:
sock_warn_obsolete_bsdism("setsockopt");
break;
case SO_PASSCRED:
if (valbool)
set_bit(SOCK_PASSCRED, &sock->flags);
else
clear_bit(SOCK_PASSCRED, &sock->flags);
break;
case SO_TIMESTAMP:
case SO_TIMESTAMPNS:
if (valbool) {
if (optname == SO_TIMESTAMP)
sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
else
sock_set_flag(sk, SOCK_RCVTSTAMPNS);
sock_set_flag(sk, SOCK_RCVTSTAMP);
sock_enable_timestamp(sk, SOCK_TIMESTAMP);
} else {
sock_reset_flag(sk, SOCK_RCVTSTAMP);
sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
}
break;
case SO_TIMESTAMPING:
if (val & ~SOF_TIMESTAMPING_MASK) {
ret = -EINVAL;
break;
}
if (val & SOF_TIMESTAMPING_OPT_ID &&
!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
if (sk->sk_protocol == IPPROTO_TCP &&
sk->sk_type == SOCK_STREAM) {
if ((1 << sk->sk_state) &
(TCPF_CLOSE | TCPF_LISTEN)) {
ret = -EINVAL;
break;
}
sk->sk_tskey = tcp_sk(sk)->snd_una;
} else {
sk->sk_tskey = 0;
}
}
if (val & SOF_TIMESTAMPING_OPT_STATS &&
!(val & SOF_TIMESTAMPING_OPT_TSONLY)) {
ret = -EINVAL;
break;
}
sk->sk_tsflags = val;
if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
sock_enable_timestamp(sk,
SOCK_TIMESTAMPING_RX_SOFTWARE);
else
sock_disable_timestamp(sk,
(1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
break;
case SO_RCVLOWAT:
if (val < 0)
val = INT_MAX;
sk->sk_rcvlowat = val ? : 1;
break;
case SO_RCVTIMEO:
ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
break;
case SO_SNDTIMEO:
ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
break;
case SO_ATTACH_FILTER:
ret