cregit-Linux how code gets into the kernel

Release 4.15 net/socket.c

Directory: net
 * NET          An implementation of the SOCKET network access protocol.
 * Version:     @(#)socket.c    1.1.93  18/02/95
 * Authors:     Orest Zborowski, <obz@Kodak.COM>
 *              Ross Biro
 *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 * Fixes:
 *              Anonymous       :       NOTSOCK/BADF cleanup. Error fix in
 *                                      shutdown()
 *              Alan Cox        :       verify_area() fixes
 *              Alan Cox        :       Removed DDI
 *              Jonathan Kamens :       SOCK_DGRAM reconnect bug
 *              Alan Cox        :       Moved a load of checks to the very
 *                                      top level.
 *              Alan Cox        :       Move address structures to/from user
 *                                      mode above the protocol layers.
 *              Rob Janssen     :       Allow 0 length sends.
 *              Alan Cox        :       Asynchronous I/O support (cribbed from the
 *                                      tty drivers).
 *              Niibe Yutaka    :       Asynchronous I/O for writes (4.4BSD style)
 *              Jeff Uphoff     :       Made max number of sockets command-line
 *                                      configurable.
 *              Matti Aarnio    :       Made the number of sockets dynamic,
 *                                      to be allocated when needed, and mr.
 *                                      Uphoff's max is used as max to be
 *                                      allowed to allocate.
 *              Linus           :       Argh. removed all the socket allocation
 *                                      altogether: it's in the inode now.
 *              Alan Cox        :       Made sock_alloc()/sock_release() public
 *                                      for NetROM and future kernel nfsd type
 *                                      stuff.
 *              Alan Cox        :       sendmsg/recvmsg basics.
 *              Tom Dyas        :       Export net symbols.
 *              Marcin Dalecki  :       Fixed problems with CONFIG_NET="n".
 *              Alan Cox        :       Added thread locking to sys_* calls
 *                                      for sockets. May have errors at the
 *                                      moment.
 *              Kevin Buhr      :       Fixed the dumb errors in the above.
 *              Andi Kleen      :       Some small cleanups, optimizations,
 *                                      and fixed a copy_from_user() bug.
 *              Tigran Aivazian :       sys_send(args) calls sys_sendto(args, NULL, 0)
 *              Tigran Aivazian :       Made listen(2) backlog sanity checks
 *                                      protocol-independent
 *              This program is free software; you can redistribute it and/or
 *              modify it under the terms of the GNU General Public License
 *              as published by the Free Software Foundation; either version
 *              2 of the License, or (at your option) any later version.
 *      This module is effectively the top level interface to the BSD socket
 *      paradigm.
 *      Based upon Swansea University Computer Society NET3.039

#include <linux/mm.h>
#include <linux/socket.h>
#include <linux/file.h>
#include <linux/net.h>
#include <linux/interrupt.h>
#include <linux/thread_info.h>
#include <linux/rcupdate.h>
#include <linux/netdevice.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/mutex.h>
#include <linux/if_bridge.h>
#include <linux/if_frad.h>
#include <linux/if_vlan.h>
#include <linux/ptp_classify.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/cache.h>
#include <linux/module.h>
#include <linux/highmem.h>
#include <linux/mount.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/compat.h>
#include <linux/kmod.h>
#include <linux/audit.h>
#include <linux/wireless.h>
#include <linux/nsproxy.h>
#include <linux/magic.h>
#include <linux/slab.h>
#include <linux/xattr.h>

#include <linux/uaccess.h>
#include <asm/unistd.h>

#include <net/compat.h>
#include <net/wext.h>
#include <net/cls_cgroup.h>

#include <net/sock.h>
#include <linux/netfilter.h>

#include <linux/if_tun.h>
#include <linux/ipv6_route.h>
#include <linux/route.h>
#include <linux/sockios.h>
#include <linux/atalk.h>
#include <net/busy_poll.h>
#include <linux/errqueue.h>


unsigned int sysctl_net_busy_read __read_mostly;

unsigned int sysctl_net_busy_poll __read_mostly;

static ssize_t sock_read_iter(struct kiocb *iocb, struct iov_iter *to);
static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from);
static int sock_mmap(struct file *file, struct vm_area_struct *vma);

static int sock_close(struct inode *inode, struct file *file);
static unsigned int sock_poll(struct file *file,
			      struct poll_table_struct *wait);
static long sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
static long compat_sock_ioctl(struct file *file,
			      unsigned int cmd, unsigned long arg);
static int sock_fasync(int fd, struct file *filp, int on);
static ssize_t sock_sendpage(struct file *file, struct page *page,
			     int offset, size_t size, loff_t *ppos, int more);
static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
				struct pipe_inode_info *pipe, size_t len,
				unsigned int flags);

 *      Socket files have a set of 'special' operations as well as the generic file ones. These don't appear
 *      in the operation structures but are done directly via the socketcall() multiplexor.

static const struct file_operations socket_file_ops = {
	.owner =	THIS_MODULE,
	.llseek =	no_llseek,
	.read_iter =	sock_read_iter,
	.write_iter =	sock_write_iter,
	.poll =		sock_poll,
	.unlocked_ioctl = sock_ioctl,
	.compat_ioctl = compat_sock_ioctl,
	.mmap =		sock_mmap,
	.release =	sock_close,
	.fasync =	sock_fasync,
	.sendpage =	sock_sendpage,
	.splice_write = generic_splice_sendpage,
	.splice_read =	sock_splice_read,

 *      The protocol list. Each protocol is registered in here.

static DEFINE_SPINLOCK(net_family_lock);

static const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;

 *      Statistics counters of the socket lists

static DEFINE_PER_CPU(int, sockets_in_use);

 * Support routines.
 * Move socket addresses back and forth across the kernel/user
 * divide and look after the messy bits.

 *      move_addr_to_kernel     -       copy a socket address into kernel space
 *      @uaddr: Address in user space
 *      @kaddr: Address in kernel space
 *      @ulen: Length in user space
 *      The address is copied into kernel space. If the provided address is
 *      too long an error code of -EINVAL is returned. If the copy gives
 *      invalid addresses -EFAULT is returned. On a success 0 is returned.

int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr) { if (ulen < 0 || ulen > sizeof(struct sockaddr_storage)) return -EINVAL; if (ulen == 0) return 0; if (copy_from_user(kaddr, uaddr, ulen)) return -EFAULT; return audit_sockaddr(ulen, kaddr); }


Linus Torvalds (pre-git)5479.41%342.86%
Hideaki Yoshifuji / 吉藤英明68.82%114.29%
David Woodhouse68.82%114.29%
Linus Torvalds11.47%114.29%
Maciej Żenczykowski11.47%114.29%

/** * move_addr_to_user - copy an address to user space * @kaddr: kernel space address * @klen: length of address in kernel * @uaddr: user space address * @ulen: pointer to user length field * * The value pointed to by ulen on entry is the buffer length available. * This is overwritten with the buffer space used. -EINVAL is returned * if an overlong buffer is specified or a negative buffer size. -EFAULT * is returned if either the buffer or the length field are not * accessible. * After copying the data up to the limit the user specifies, the true * length of the data is written over the length limit the user * specified. Zero is returned for a success. */
static int move_addr_to_user(struct sockaddr_storage *kaddr, int klen, void __user *uaddr, int __user *ulen) { int err; int len; BUG_ON(klen > sizeof(struct sockaddr_storage)); err = get_user(len, ulen); if (err) return err; if (len > klen) len = klen; if (len < 0) return -EINVAL; if (len) { if (audit_sockaddr(klen, kaddr)) return -ENOMEM; if (copy_to_user(uaddr, kaddr, len)) return -EFAULT; } /* * "fromlen shall refer to the value before truncation.." * 1003.1g */ return __put_user(klen, ulen); }


Linus Torvalds (pre-git)8671.67%646.15%
Steve Grubb1310.83%17.69%
Hannes Frederic Sowa119.17%17.69%
Stephen Hemminger65.00%215.38%
Linus Torvalds21.67%17.69%
Maciej Żenczykowski10.83%17.69%
Hideaki Yoshifuji / 吉藤英明10.83%17.69%

static struct kmem_cache *sock_inode_cachep __read_mostly;
static struct inode *sock_alloc_inode(struct super_block *sb) { struct socket_alloc *ei; struct socket_wq *wq; ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL); if (!ei) return NULL; wq = kmalloc(sizeof(*wq), GFP_KERNEL); if (!wq) { kmem_cache_free(sock_inode_cachep, ei); return NULL; } init_waitqueue_head(&wq->wait); wq->fasync_list = NULL; wq->flags = 0; RCU_INIT_POINTER(ei->socket.wq, wq); ei->socket.state = SS_UNCONNECTED; ei->socket.flags = 0; ei->socket.ops = NULL; ei-> = NULL; ei->socket.file = NULL; return &ei->vfs_inode; }


Eric Dumazet5034.01%233.33%
Al Viro4631.29%116.67%
Petr Vandrovec4429.93%116.67%
Nicolai Stange64.08%116.67%
Christoph Lameter10.68%116.67%

static void sock_destroy_inode(struct inode *inode) { struct socket_alloc *ei; struct socket_wq *wq; ei = container_of(inode, struct socket_alloc, vfs_inode); wq = rcu_dereference_protected(ei->socket.wq, 1); kfree_rcu(wq, rcu); kmem_cache_free(sock_inode_cachep, ei); }


Eric Dumazet3660.00%240.00%
Al Viro2135.00%120.00%
Lai Jiangshan23.33%120.00%
Neil Brown11.67%120.00%

static void init_once(void *foo) { struct socket_alloc *ei = (struct socket_alloc *)foo; inode_init_once(&ei->vfs_inode); }


Al Viro2790.00%150.00%
Christoph Lameter310.00%150.00%

static void init_inodecache(void) { sock_inode_cachep = kmem_cache_create("sock_inode_cache", sizeof(struct socket_alloc), 0, (SLAB_HWCACHE_ALIGN | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT), init_once); BUG_ON(sock_inode_cachep == NULL); }


Al Viro2969.05%116.67%
yuan linyu511.90%116.67%
Paul Jackson49.52%233.33%
Vladimir Davydov24.76%116.67%
Andrew Morton24.76%116.67%

static const struct super_operations sockfs_ops = { .alloc_inode = sock_alloc_inode, .destroy_inode = sock_destroy_inode, .statfs = simple_statfs, }; /* * sockfs_dname() is called from d_path(). */
static char *sockfs_dname(struct dentry *dentry, char *buffer, int buflen) { return dynamic_dname(dentry, buffer, buflen, "socket:[%lu]", d_inode(dentry)->i_ino); }


Al Viro3592.11%150.00%
David Howells37.89%150.00%

static const struct dentry_operations sockfs_dentry_operations = { .d_dname = sockfs_dname, };
static int sockfs_xattr_get(const struct xattr_handler *handler, struct dentry *dentry, struct inode *inode, const char *suffix, void *value, size_t size) { if (value) { if (dentry->d_name.len + 1 > size) return -ERANGE; memcpy(value, dentry->, dentry->d_name.len + 1); } return dentry->d_name.len + 1; }


Andreas Gruenbacher84100.00%1100.00%

#define XATTR_SOCKPROTONAME_SUFFIX "sockprotoname" #define XATTR_NAME_SOCKPROTONAME (XATTR_SYSTEM_PREFIX XATTR_SOCKPROTONAME_SUFFIX) #define XATTR_NAME_SOCKPROTONAME_LEN (sizeof(XATTR_NAME_SOCKPROTONAME)-1) static const struct xattr_handler sockfs_xattr_handler = { .name = XATTR_NAME_SOCKPROTONAME, .get = sockfs_xattr_get, };
static int sockfs_security_xattr_set(const struct xattr_handler *handler, struct dentry *dentry, struct inode *inode, const char *suffix, const void *value, size_t size, int flags) { /* Handled by LSM. */ return -EAGAIN; }


Andreas Gruenbacher43100.00%1100.00%

static const struct xattr_handler sockfs_security_xattr_handler = { .prefix = XATTR_SECURITY_PREFIX, .set = sockfs_security_xattr_set, }; static const struct xattr_handler *sockfs_xattr_handlers[] = { &sockfs_xattr_handler, &sockfs_security_xattr_handler, NULL };
static struct dentry *sockfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_pseudo_xattr(fs_type, "socket:", &sockfs_ops, sockfs_xattr_handlers, &sockfs_dentry_operations, SOCKFS_MAGIC); }


Al Viro3786.05%457.14%
Andreas Gruenbacher36.98%114.29%
Linus Torvalds (pre-git)24.65%114.29%
Andries E. Brouwer12.33%114.29%

static struct vfsmount *sock_mnt __read_mostly; static struct file_system_type sock_fs_type = { .name = "sockfs", .mount = sockfs_mount, .kill_sb = kill_anon_super, }; /* * Obtains the first available file descriptor and sets it up for use. * * These functions create file structures and maps them to fd space * of the current process. On success it returns file descriptor * and file struct implicitly stored in sock->file. * Note that another thread may close file descriptor before we return * from this function. We use the fact that now we do not refer * to socket after mapping. If one day we will need it, this * function will increment ref. count on file by 1. * * In any case returned fd MAY BE not valid! * This race condition is unavoidable * with shared fd spaces, we cannot solve it inside kernel, * but we take care of internal coherence yet. */
struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname) { struct qstr name = { .name = "" }; struct path path; struct file *file; if (dname) { = dname; name.len = strlen(; } else if (sock->sk) { = sock->sk->sk_prot_creator->name; name.len = strlen(; } path.dentry = d_alloc_pseudo(sock_mnt->mnt_sb, &name); if (unlikely(!path.dentry)) { sock_release(sock); return ERR_PTR(-ENOMEM); } path.mnt = mntget(sock_mnt); d_instantiate(path.dentry, SOCK_INODE(sock)); file = alloc_file(&path, FMODE_READ | FMODE_WRITE, &socket_file_ops); if (IS_ERR(file)) { /* drop dentry, keep inode for a bit */ ihold(d_inode(path.dentry)); path_put(&path); /* ... and now kill it properly */ sock_release(sock); return file; } sock->file = file; file->f_flags = O_RDWR | (flags & O_NONBLOCK); file->private_data = sock; return file; }


Al Viro9541.13%728.00%
Masatake YAMATO6025.97%14.00%
Linus Torvalds (pre-git)3515.15%624.00%
Dave Hansen93.90%14.00%
David S. Miller83.46%14.00%
Ulrich Drepper83.46%28.00%
Benjamin LaHaise41.73%14.00%
Anatol Pomozov31.30%14.00%
David Howells31.30%14.00%
Josef 'Jeff' Sipek31.30%14.00%
Eric Dumazet20.87%28.00%
Nicholas Piggin10.43%14.00%

static int sock_map_fd(struct socket *sock, int flags) { struct file *newfile; int fd = get_unused_fd_flags(flags); if (unlikely(fd < 0)) { sock_release(sock); return fd; } newfile = sock_alloc_file(sock, flags, NULL); if (likely(!IS_ERR(newfile))) { fd_install(fd, newfile); return fd; } put_unused_fd(fd); return PTR_ERR(newfile); }


Al Viro4650.55%440.00%
David S. Miller2628.57%110.00%
Linus Torvalds (pre-git)1010.99%220.00%
Ulrich Drepper44.40%110.00%
Eric Dumazet33.30%110.00%
Masatake YAMATO22.20%110.00%

struct socket *sock_from_file(struct file *file, int *err) { if (file->f_op == &socket_file_ops) return file->private_data; /* set in sock_map_fd */ *err = -ENOTSOCK; return NULL; }


Linus Torvalds (pre-git)2255.00%360.00%
Benjamin LaHaise1845.00%240.00%

EXPORT_SYMBOL(sock_from_file); /** * sockfd_lookup - Go from a file number to its socket slot * @fd: file handle * @err: pointer to an error code return * * The file handle passed in is locked and the socket it is bound * to is returned. If an error occurs the err pointer is overwritten * with a negative errno code and NULL is returned. The function checks * for both invalid handles and passing a handle which is not a socket. * * On a success the socket object pointer is returned. */
struct socket *sockfd_lookup(int fd, int *err) { struct file *file; struct socket *sock; file = fget(fd); if (!file) { *err = -EBADF; return NULL; } sock = sock_from_file(file, err); if (!sock) fput(file); return sock; }


Benjamin LaHaise6492.75%150.00%
Stephen Hemminger57.25%150.00%

static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed) { struct fd f = fdget(fd); struct socket *sock; *err = -EBADF; if (f.file) { sock = sock_from_file(f.file, err); if (likely(sock)) { *fput_needed = f.flags; return sock; } fdput(f); } return NULL; }


Benjamin LaHaise5463.53%133.33%
Al Viro2529.41%133.33%
Hua Zhong67.06%133.33%

static ssize_t sockfs_listxattr(struct dentry *dentry, char *buffer, size_t size) { ssize_t len; ssize_t used = 0; len = security_inode_listsecurity(d_inode(dentry), buffer, size); if (len < 0) return len; used += len; if (buffer) { if (size < used) return -ERANGE; buffer += len; } len = (XATTR_NAME_SOCKPROTONAME_LEN + 1); used += len; if (buffer) { if (size < used) return -ERANGE; memcpy(buffer, XATTR_NAME_SOCKPROTONAME, len); buffer += len; } return used; }


Masatake YAMATO11497.44%150.00%
David Howells32.56%150.00%

static int sockfs_setattr(struct dentry *dentry, struct iattr *iattr) { int err = simple_setattr(dentry, iattr); if (!err && (iattr->ia_valid & ATTR_UID)) { struct socket *sock = SOCKET_I(d_inode(dentry)); sock->sk->sk_uid = iattr->ia_uid; } return err; }


Lorenzo Colitti5886.57%133.33%
Eric Biggers811.94%133.33%
Tobias Klauser11.49%133.33%

static const struct inode_operations sockfs_inode_ops = { .listxattr = sockfs_listxattr, .setattr = sockfs_setattr, }; /** * sock_alloc - allocate a socket * * Allocate a new inode and socket object. The two are bound together * and initialised. The socket is then returned. If we are out of inodes * NULL is returned. */
struct socket *sock_alloc(void) { struct inode *inode; struct socket *sock; inode = new_inode_pseudo(sock_mnt->mnt_sb); if (!inode) return NULL; sock = SOCKET_I(inode); inode->i_ino = get_next_ino(); inode->i_mode = S_IFSOCK | S_IRWXUGO; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); inode->i_op = &sockfs_inode_ops; this_cpu_add(sockets_in_use, 1); return sock; }


Linus Torvalds (pre-git)5764.04%535.71%
Masatake YAMATO77.87%17.14%
Christoph Hellwig77.87%17.14%
Linus Torvalds55.62%17.14%
David Howells44.49%17.14%
Ravikiran G. Thirumalai44.49%17.14%
Eric Dumazet33.37%214.29%
Alex Shi11.12%17.14%
Al Viro11.12%17.14%

EXPORT_SYMBOL(sock_alloc); /** * sock_release - close a socket * @sock: socket to close * * The socket is released from the protocol stack if it has a release * callback, and the inode is then released if the socket is bound to * an inode not a file. */
void sock_release(struct socket *sock) { if (sock->ops) { struct module *owner = sock->ops->owner; sock->ops->release(sock); sock->ops = NULL; module_put(owner); } if (rcu_dereference_protected(sock->wq, 1)->fasync_list) pr_err("%s: fasync list not empty!\n", __func__); this_cpu_sub(sockets_in_use, 1); if (!sock->file) { iput(SOCK_INODE(sock)); return; } sock->file = NULL; }


Linus Torvalds (pre-git)5555.00%430.77%
Arnaldo Carvalho de Melo2424.00%215.38%
Eric Dumazet99.00%323.08%
Ravikiran G. Thirumalai44.00%17.69%
Yang Yingliang44.00%17.69%
Al Viro33.00%17.69%
Alex Shi11.00%17.69%

void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags) { u8 flags = *tx_flags; if (tsflags & SOF_TIMESTAMPING_TX_HARDWARE) flags |= SKBTX_HW_TSTAMP; if (tsflags & SOF_TIMESTAMPING_TX_SOFTWARE) flags |= SKBTX_SW_TSTAMP; if (tsflags & SOF_TIMESTAMPING_TX_SCHED) flags |= SKBTX_SCHED_TSTAMP; *tx_flags = flags; }


Patrick Ohly1426.42%111.11%
Eric Dumazet1222.64%111.11%
Willem de Bruijn1120.75%333.33%
Oliver Hartkopp815.09%111.11%
Soheil Hassas Yeganeh59.43%111.11%
Johannes Berg23.77%111.11%
Daniel Borkmann11.89%111.11%

static inline int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg) { int ret = sock->ops->sendmsg(sock, msg, msg_data_left(msg)); BUG_ON(ret == -EIOCBQUEUED); return ret; }


Al Viro1838.30%222.22%
Anton Blanchard1327.66%111.11%
Linus Torvalds (pre-git)1327.66%333.33%
Benjamin LaHaise24.26%222.22%
Ying Xue12.13%111.11%

int sock_sendmsg(struct socket *sock, struct msghdr *msg) { int err = security_socket_sendmsg(sock, msg, msg_data_left(msg)); return err ?: sock_sendmsg_nosec(sock, msg); }


Anton Blanchard1843.90%114.29%
James Morris921.95%114.29%
Gu Zheng717.07%114.29%
Al Viro49.76%228.57%
Ying Xue24.88%114.29%
Linus Torvalds (pre-git)12.44%114.29%

int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t num, size_t size) { iov_iter_kvec(&msg->msg_iter, WRITE | ITER_KVEC, vec, num, size); return sock_sendmsg(sock, msg); }


Al Viro52100.00%3100.00%

int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg, struct kvec *vec, size_t num, size_t size) { struct socket *sock = sk->sk_socket; if (!sock->ops->sendmsg_locked) return sock_no_sendmsg_locked(sk, msg, size); iov_iter_kvec(&msg->msg_iter, WRITE | ITER_KVEC, vec, num, size); return sock->ops->sendmsg_locked(sk, msg, msg_data_left(msg)); }


Tom Herbert8797.75%150.00%
John Fastabend22.25%150.00%

static bool skb_is_err_queue(const struct sk_buff *skb) { /* pkt_type of skbs enqueued on the error queue are set to * PACKET_OUTGOING in skb_set_err_queue(). This is only safe to do * in recvmsg, since skbs received on a local socket will never * have a pkt_type of PACKET_OUTGOING. */ return skb->pkt_type == PACKET_OUTGOING; }


Soheil Hassas Yeganeh20100.00%1100.00%

/* On transmit, software and hardware timestamps are returned independently. * As the two skb clones share the hardware timestamp, which may be updated * before the software timestamp is received, a hardware TX timestamp may be * returned only if there is no software TX timestamp. Ignore false software * timestamps, which may be made in the __sock_recv_timestamp() call when the * option SO_TIMESTAMP(NS) is enabled on the socket, even when the skb has a * hardware timestamp. */
static bool skb_is_swtx_tstamp(const struct sk_buff *skb, int false_tstamp) { return skb->tstamp && !false_tstamp && skb_is_err_queue(skb); }


Miroslav Lichvar28100.00%1100.00%

static void put_ts_pktinfo(struct msghdr *msg, struct sk_buff *skb) { struct scm_ts_pktinfo ts_pktinfo; struct net_device *orig_dev; if (!skb_mac_header_was_set(skb)) return; memset(&ts_pktinfo, 0, sizeof(ts_pktinfo)); rcu_read_lock(); orig_dev = dev_get_by_napi_id(skb_napi_id(skb)); if (orig_dev) ts_pktinfo.if_index = orig_dev->ifindex; rcu_read_unlock(); ts_pktinfo.pkt_length = skb->len - skb_mac_offset(skb); put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPING_PKTINFO, sizeof(ts_pktinfo), &ts_pktinfo); }


Miroslav Lichvar105100.00%1100.00%

/* * called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP) */
void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { int need_software_tstamp = sock_flag(sk, SOCK_RCVTSTAMP); struct scm_timestamping tss; int empty = 1, false_tstamp = 0; struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb)