Release 4.11 net/unix/af_unix.c
/*
* NET4: Implementation of BSD Unix domain sockets.
*
* Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Fixes:
* Linus Torvalds : Assorted bug cures.
* Niibe Yutaka : async I/O support.
* Carsten Paeth : PF_UNIX check, address fixes.
* Alan Cox : Limit size of allocated blocks.
* Alan Cox : Fixed the stupid socketpair bug.
* Alan Cox : BSD compatibility fine tuning.
* Alan Cox : Fixed a bug in connect when interrupted.
* Alan Cox : Sorted out a proper draft version of
* file descriptor passing hacked up from
* Mike Shaver's work.
* Marty Leisner : Fixes to fd passing
* Nick Nevin : recvmsg bugfix.
* Alan Cox : Started proper garbage collector
* Heiko EiBfeldt : Missing verify_area check
* Alan Cox : Started POSIXisms
* Andreas Schwab : Replace inode by dentry for proper
* reference counting
* Kirk Petersen : Made this a module
* Christoph Rohland : Elegant non-blocking accept/connect algorithm.
* Lots of bug fixes.
* Alexey Kuznetosv : Repaired (I hope) bugs introduces
* by above two patches.
* Andrea Arcangeli : If possible we block in connect(2)
* if the max backlog of the listen socket
* is been reached. This won't break
* old apps and it will avoid huge amount
* of socks hashed (this for unix_gc()
* performances reasons).
* Security fix that limits the max
* number of socks to 2*max_files and
* the number of skb queueable in the
* dgram receiver.
* Artur Skawina : Hash function optimizations
* Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
* Malcolm Beattie : Set peercred for socketpair
* Michal Ostrowski : Module initialization cleanup.
* Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
* the core infrastructure is doing that
* for all net proto families now (2.5.69+)
*
*
* Known differences from reference BSD that was tested:
*
* [TO FIX]
* ECONNREFUSED is not returned from one end of a connected() socket to the
* other the moment one end closes.
* fstat() doesn't return st_dev=0, and give the blksize as high water mark
* and a fake inode identifier (nor the BSD first socket fstat twice bug).
* [NOT TO FIX]
* accept() returns a path name even if the connecting socket has closed
* in the meantime (BSD loses the path and gives up).
* accept() returns 0 length path for an unbound connector. BSD returns 16
* and a null first byte in the path (but not for gethost/peername - BSD bug ??)
* socketpair(...SOCK_RAW..) doesn't panic the kernel.
* BSD af_unix apparently has connect forgetting to block properly.
* (need to check this with the POSIX spec in detail)
*
* Differences from 2.0.0-11-... (ANK)
* Bug fixes and improvements.
* - client shutdown killed server socket.
* - removed all useless cli/sti pairs.
*
* Semantic changes/extensions.
* - generic control message passing.
* - SCM_CREDENTIALS control message.
* - "Abstract" (not FS based) socket bindings.
* Abstract names are sequences of bytes (not zero terminated)
* started by 0, so that this name space does not intersect
* with BSD names.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/sched/signal.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/stat.h>
#include <linux/dcache.h>
#include <linux/namei.h>
#include <linux/socket.h>
#include <linux/un.h>
#include <linux/fcntl.h>
#include <linux/termios.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/in.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <net/af_unix.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <net/scm.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/rtnetlink.h>
#include <linux/mount.h>
#include <net/checksum.h>
#include <linux/security.h>
#include <linux/freezer.h>
#include <linux/file.h>
struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
EXPORT_SYMBOL_GPL(unix_socket_table);
DEFINE_SPINLOCK(unix_table_lock);
EXPORT_SYMBOL_GPL(unix_table_lock);
static atomic_long_t unix_nr_socks;
static struct hlist_head *unix_sockets_unbound(void *addr)
{
unsigned long hash = (unsigned long)addr;
hash ^= hash >> 16;
hash ^= hash >> 8;
hash %= UNIX_HASH_SIZE;
return &unix_socket_table[UNIX_HASH_SIZE + hash];
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 46 | 97.87% | 1 | 50.00% |
Linus Torvalds (pre-git) | 1 | 2.13% | 1 | 50.00% |
Total | 47 | 100.00% | 2 | 100.00% |
#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
#ifdef CONFIG_SECURITY_NETWORK
static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
{
UNIXCB(skb).secid = scm->secid;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Catherine Zhang | 23 | 85.19% | 2 | 66.67% |
Stephen D. Smalley | 4 | 14.81% | 1 | 33.33% |
Total | 27 | 100.00% | 3 | 100.00% |
static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
{
scm->secid = UNIXCB(skb).secid;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Catherine Zhang | 25 | 89.29% | 2 | 66.67% |
Stephen D. Smalley | 3 | 10.71% | 1 | 33.33% |
Total | 28 | 100.00% | 3 | 100.00% |
static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
{
return (scm->secid == UNIXCB(skb).secid);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen D. Smalley | 31 | 100.00% | 1 | 100.00% |
Total | 31 | 100.00% | 1 | 100.00% |
#else
static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
{ }
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Catherine Zhang | 15 | 93.75% | 2 | 66.67% |
Andrew Morton | 1 | 6.25% | 1 | 33.33% |
Total | 16 | 100.00% | 3 | 100.00% |
static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
{ }
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Catherine Zhang | 16 | 100.00% | 1 | 100.00% |
Total | 16 | 100.00% | 1 | 100.00% |
static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
{
return true;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen D. Smalley | 20 | 100.00% | 1 | 100.00% |
Total | 20 | 100.00% | 1 | 100.00% |
#endif /* CONFIG_SECURITY_NETWORK */
/*
* SMP locking strategy:
* hash table is protected with spinlock unix_table_lock
* each socket state is protected by separate spin lock.
*/
static inline unsigned int unix_hash_fold(__wsum n)
{
unsigned int hash = (__force unsigned int)csum_fold(n);
hash ^= hash>>8;
return hash&(UNIX_HASH_SIZE-1);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 21 | 52.50% | 3 | 42.86% |
Al Viro | 11 | 27.50% | 1 | 14.29% |
Anton Blanchard | 3 | 7.50% | 1 | 14.29% |
Eric Dumazet | 3 | 7.50% | 1 | 14.29% |
Linus Torvalds | 2 | 5.00% | 1 | 14.29% |
Total | 40 | 100.00% | 7 | 100.00% |
#define unix_peer(sk) (unix_sk(sk)->peer)
static inline int unix_our_peer(struct sock *sk, struct sock *osk)
{
return unix_peer(osk) == sk;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 19 | 76.00% | 2 | 50.00% |
Arnaldo Carvalho de Melo | 4 | 16.00% | 1 | 25.00% |
Linus Torvalds | 2 | 8.00% | 1 | 25.00% |
Total | 25 | 100.00% | 4 | 100.00% |
static inline int unix_may_send(struct sock *sk, struct sock *osk)
{
return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 26 | 81.25% | 5 | 71.43% |
Arnaldo Carvalho de Melo | 4 | 12.50% | 1 | 14.29% |
Linus Torvalds | 2 | 6.25% | 1 | 14.29% |
Total | 32 | 100.00% | 7 | 100.00% |
static inline int unix_recvq_full(struct sock const *sk)
{
return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rainer Weikusat | 26 | 100.00% | 1 | 100.00% |
Total | 26 | 100.00% | 1 | 100.00% |
struct sock *unix_peer_get(struct sock *s)
{
struct sock *peer;
unix_state_lock(s);
peer = unix_peer(s);
if (peer)
sock_hold(peer);
unix_state_unlock(s);
return peer;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 38 | 82.61% | 6 | 75.00% |
Arnaldo Carvalho de Melo | 6 | 13.04% | 1 | 12.50% |
David S. Miller | 2 | 4.35% | 1 | 12.50% |
Total | 46 | 100.00% | 8 | 100.00% |
EXPORT_SYMBOL_GPL(unix_peer_get);
static inline void unix_release_addr(struct unix_address *addr)
{
if (atomic_dec_and_test(&addr->refcnt))
kfree(addr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 25 | 92.59% | 2 | 50.00% |
Linus Torvalds | 1 | 3.70% | 1 | 25.00% |
David S. Miller | 1 | 3.70% | 1 | 25.00% |
Total | 27 | 100.00% | 4 | 100.00% |
/*
* Check unix socket name:
* - should be not zero length.
* - if started by not zero, should be NULL terminated (FS object)
* - if started by zero, it is abstract name.
*/
static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp)
{
if (len <= sizeof(short) || len > sizeof(*sunaddr))
return -EINVAL;
if (!sunaddr || sunaddr->sun_family != AF_UNIX)
return -EINVAL;
if (sunaddr->sun_path[0]) {
/*
* This may look like an off by one error but it is a bit more
* subtle. 108 is the longest valid AF_UNIX path for a binding.
* sun_path[108] doesn't as such exist. However in kernel space
* we are guaranteed that it is a valid memory location in our
* kernel address buffer.
*/
((char *)sunaddr)[len] = 0;
len = strlen(sunaddr->sun_path)+1+sizeof(short);
return len;
}
*hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
return len;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 115 | 98.29% | 2 | 50.00% |
Eric Dumazet | 1 | 0.85% | 1 | 25.00% |
Lucas De Marchi | 1 | 0.85% | 1 | 25.00% |
Total | 117 | 100.00% | 4 | 100.00% |
static void __unix_remove_socket(struct sock *sk)
{
sk_del_node_init(sk);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 11 | 68.75% | 3 | 60.00% |
Arnaldo Carvalho de Melo | 5 | 31.25% | 2 | 40.00% |
Total | 16 | 100.00% | 5 | 100.00% |
static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
{
WARN_ON(!sk_unhashed(sk));
sk_add_node(sk, list);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 19 | 59.38% | 3 | 42.86% |
Arnaldo Carvalho de Melo | 10 | 31.25% | 2 | 28.57% |
Ilpo Järvinen | 2 | 6.25% | 1 | 14.29% |
David S. Miller | 1 | 3.12% | 1 | 14.29% |
Total | 32 | 100.00% | 7 | 100.00% |
static inline void unix_remove_socket(struct sock *sk)
{
spin_lock(&unix_table_lock);
__unix_remove_socket(sk);
spin_unlock(&unix_table_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 24 | 82.76% | 2 | 40.00% |
David S. Miller | 2 | 6.90% | 1 | 20.00% |
Arnaldo Carvalho de Melo | 2 | 6.90% | 1 | 20.00% |
Linus Torvalds | 1 | 3.45% | 1 | 20.00% |
Total | 29 | 100.00% | 5 | 100.00% |
static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
{
spin_lock(&unix_table_lock);
__unix_insert_socket(list, sk);
spin_unlock(&unix_table_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 29 | 80.56% | 2 | 33.33% |
Arnaldo Carvalho de Melo | 4 | 11.11% | 2 | 33.33% |
David S. Miller | 2 | 5.56% | 1 | 16.67% |
Linus Torvalds | 1 | 2.78% | 1 | 16.67% |
Total | 36 | 100.00% | 6 | 100.00% |
static struct sock *__unix_find_socket_byname(struct net *net,
struct sockaddr_un *sunname,
int len, int type, unsigned int hash)
{
struct sock *s;
sk_for_each(s, &unix_socket_table[hash ^ type]) {
struct unix_sock *u = unix_sk(s);
if (!net_eq(sock_net(s), net))
continue;
if (u->addr->len == len &&
!memcmp(u->addr->name, sunname, len))
goto found;
}
s = NULL;
found:
return s;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 48 | 47.52% | 3 | 30.00% |
Arnaldo Carvalho de Melo | 19 | 18.81% | 2 | 20.00% |
David S. Miller | 14 | 13.86% | 1 | 10.00% |
Denis V. Lunev | 11 | 10.89% | 1 | 10.00% |
Hideaki Yoshifuji / 吉藤英明 | 8 | 7.92% | 2 | 20.00% |
Eric Dumazet | 1 | 0.99% | 1 | 10.00% |
Total | 101 | 100.00% | 10 | 100.00% |
static inline struct sock *unix_find_socket_byname(struct net *net,
struct sockaddr_un *sunname,
int len, int type,
unsigned int hash)
{
struct sock *s;
spin_lock(&unix_table_lock);
s = __unix_find_socket_byname(net, sunname, len, type, hash);
if (s)
sock_hold(s);
spin_unlock(&unix_table_lock);
return s;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 58 | 79.45% | 3 | 37.50% |
Denis V. Lunev | 7 | 9.59% | 1 | 12.50% |
Arnaldo Carvalho de Melo | 4 | 5.48% | 1 | 12.50% |
David S. Miller | 2 | 2.74% | 1 | 12.50% |
Linus Torvalds | 1 | 1.37% | 1 | 12.50% |
Eric Dumazet | 1 | 1.37% | 1 | 12.50% |
Total | 73 | 100.00% | 8 | 100.00% |
static struct sock *unix_find_socket_byinode(struct inode *i)
{
struct sock *s;
spin_lock(&unix_table_lock);
sk_for_each(s,
&unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
struct dentry *dentry = unix_sk(s)->path.dentry;
if (dentry && d_backing_inode(dentry) == i) {
sock_hold(s);
goto found;
}
}
s = NULL;
found:
spin_unlock(&unix_table_lock);
return s;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 54 | 64.29% | 4 | 36.36% |
Arnaldo Carvalho de Melo | 20 | 23.81% | 2 | 18.18% |
David S. Miller | 5 | 5.95% | 2 | 18.18% |
Al Viro | 2 | 2.38% | 1 | 9.09% |
David Howells | 2 | 2.38% | 1 | 9.09% |
Miklos Szeredi | 1 | 1.19% | 1 | 9.09% |
Total | 84 | 100.00% | 11 | 100.00% |
/* Support code for asymmetrically connected dgram sockets
*
* If a datagram socket is connected to a socket not itself connected
* to the first socket (eg, /dev/log), clients may only enqueue more
* messages if the present receive queue of the server socket is not
* "too large". This means there's a second writeability condition
* poll and sendmsg need to test. The dgram recv code will do a wake
* up on the peer_wait wait queue of a socket upon reception of a
* datagram which needs to be propagated to sleeping would-be writers
* since these might not have sent anything so far. This can't be
* accomplished via poll_wait because the lifetime of the server
* socket might be less than that of its clients if these break their
* association with it or if the server socket is closed while clients
* are still connected to it and there's no way to inform "a polling
* implementation" that it should let go of a certain wait queue
*
* In order to propagate a wake up, a wait_queue_t of the client
* socket is enqueued on the peer_wait queue of the server socket
* whose wake function does a wake_up on the ordinary client socket
* wait queue. This connection is established whenever a write (or
* poll for write) hit the flow control condition and broken when the
* association to the server socket is dissolved or after a wake up
* was relayed.
*/
static int unix_dgram_peer_wake_relay(wait_queue_t *q, unsigned mode, int flags,
void *key)
{
struct unix_sock *u;
wait_queue_head_t *u_sleep;
u = container_of(q, struct unix_sock, peer_wake);
__remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
q);
u->peer_wake.private = NULL;
/* relaying can only happen while the wq still exists */
u_sleep = sk_sleep(&u->sk);
if (u_sleep)
wake_up_interruptible_poll(u_sleep, key);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rainer Weikusat | 91 | 100.00% | 1 | 100.00% |
Total | 91 | 100.00% | 1 | 100.00% |
static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
{
struct unix_sock *u, *u_other;
int rc;
u = unix_sk(sk);
u_other = unix_sk(other);
rc = 0;
spin_lock(&u_other->peer_wait.lock);
if (!u->peer_wake.private) {
u->peer_wake.private = other;
__add_wait_queue(&u_other->peer_wait, &u->peer_wake);
rc = 1;
}
spin_unlock(&u_other->peer_wait.lock);
return rc;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rainer Weikusat | 104 | 100.00% | 1 | 100.00% |
Total | 104 | 100.00% | 1 | 100.00% |
static void unix_dgram_peer_wake_disconnect(struct sock *sk,
struct sock *other)
{
struct unix_sock *u, *u_other;
u = unix_sk(sk);
u_other = unix_sk(other);
spin_lock(&u_other->peer_wait.lock);
if (u->peer_wake.private == other) {
__remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
u->peer_wake.private = NULL;
}
spin_unlock(&u_other->peer_wait.lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rainer Weikusat | 91 | 100.00% | 1 | 100.00% |
Total | 91 | 100.00% | 1 | 100.00% |
static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
struct sock *other)
{
unix_dgram_peer_wake_disconnect(sk, other);
wake_up_interruptible_poll(sk_sleep(sk),
POLLOUT |
POLLWRNORM |
POLLWRBAND);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rainer Weikusat | 37 | 100.00% | 1 | 100.00% |
Total | 37 | 100.00% | 1 | 100.00% |
/* preconditions:
* - unix_peer(sk) == other
* - association is stable
*/
static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
{
int connected;
connected = unix_dgram_peer_wake_connect(sk, other);
if (unix_recvq_full(other))
return 1;
if (connected)
unix_dgram_peer_wake_disconnect(sk, other);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Rainer Weikusat | 52 | 100.00% | 1 | 100.00% |
Total | 52 | 100.00% | 1 | 100.00% |
static int unix_writable(const struct sock *sk)
{
return sk->sk_state != TCP_LISTEN &&
(atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 26 | 74.29% | 3 | 60.00% |
Eric Dumazet | 7 | 20.00% | 1 | 20.00% |
Arnaldo Carvalho de Melo | 2 | 5.71% | 1 | 20.00% |
Total | 35 | 100.00% | 5 | 100.00% |
static void unix_write_space(struct sock *sk)
{
struct socket_wq *wq;
rcu_read_lock();
if (unix_writable(sk)) {
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait,
POLLOUT | POLLWRNORM | POLLWRBAND);
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
}
rcu_read_unlock();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 38 | 54.29% | 7 | 63.64% |
Eric Dumazet | 30 | 42.86% | 2 | 18.18% |
Pavel Emelyanov | 1 | 1.43% | 1 | 9.09% |
Herbert Xu | 1 | 1.43% | 1 | 9.09% |
Total | 70 | 100.00% | 11 | 100.00% |
/* When dgram socket disconnects (or changes its peer), we clear its receive
* queue of packets arrived from previous peer. First, it allows to do
* flow control based only on wmem_alloc; second, sk connected to peer
* may receive messages only from that peer. */
static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
{
if (!skb_queue_empty(&sk->sk_receive_queue)) {
skb_queue_purge(&sk->sk_receive_queue);
wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
/* If one link of bidirectional dgram pipe is disconnected,
* we signal error. Messages are lost. Do not make this,
* when peer was not connected to us.
*/
if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
other->sk_err = ECONNRESET;
other->sk_error_report(other);
}
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 67 | 82.72% | 1 | 16.67% |
Arnaldo Carvalho de Melo | 7 | 8.64% | 2 | 33.33% |
David S. Miller | 5 | 6.17% | 2 | 33.33% |
James Morris | 2 | 2.47% | 1 | 16.67% |
Total | 81 | 100.00% | 6 | 100.00% |
static void unix_sock_destructor(struct sock *sk)
{
struct unix_sock *u = unix_sk(sk);
skb_queue_purge(&sk->sk_receive_queue);
WARN_ON(atomic_read(&sk->sk_wmem_alloc));
WARN_ON(!sk_unhashed(sk));
WARN_ON(sk->sk_socket);
if (!sock_flag(sk, SOCK_DEAD)) {
pr_info("Attempt to release alive unix socket: %p\n", sk);
return;
}
if (u->addr)
unix_release_addr(u->addr);
atomic_long_dec(&unix_nr_socks);
local_bh_disable();
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
local_bh_enable();
#ifdef UNIX_REFCNT_DEBUG
pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
atomic_long_read(&unix_nr_socks));
#endif
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 79 | 58.96% | 3 | 23.08% |
Eric Dumazet | 18 | 13.43% | 2 | 15.38% |
David S. Miller | 18 | 13.43% | 2 | 15.38% |
Arnaldo Carvalho de Melo | 10 | 7.46% | 3 | 23.08% |
Ilpo Järvinen | 4 | 2.99% | 1 | 7.69% |
James Morris | 3 | 2.24% | 1 | 7.69% |
Wang Weidong | 2 | 1.49% | 1 | 7.69% |
Total | 134 | 100.00% | 13 | 100.00% |
static void unix_release_sock(struct sock *sk, int embrion)
{
struct unix_sock *u = unix_sk(sk);
struct path path;
struct sock *skpair;
struct sk_buff *skb;
int state;
unix_remove_socket(sk);
/* Clear state */
unix_state_lock(sk);
sock_orphan(sk);
sk->sk_shutdown = SHUTDOWN_MASK;
path = u->path;
u->path.dentry = NULL;
u->path.mnt = NULL;
state = sk->sk_state;
sk->sk_state = TCP_CLOSE;
unix_state_unlock(sk);
wake_up_interruptible_all(&u->peer_wait);
skpair = unix_peer(sk);
if (skpair != NULL) {
if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
unix_state_lock(skpair);
/* No more writes */
skpair->sk_shutdown = SHUTDOWN_MASK;
if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
skpair->sk_err = ECONNRESET;
unix_state_unlock(skpair);
skpair->sk_state_change(skpair);
sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
}
unix_dgram_peer_wake_disconnect(sk, skpair);
sock_put(skpair); /* It may now die */
unix_peer(sk) = NULL;
}
/* Try to flush out this socket. Throw out buffers at least */
while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
if (state == TCP_LISTEN)
unix_release_sock(skb->sk, 1);
/* passed fds are erased in the kfree_skb hook */
UNIXCB(skb).consumed = skb->len;
kfree_skb(skb);
}
if (path.dentry)
path_put(&path);
sock_put(sk);
/* ---- Socket is dead now and most probably destroyed ---- */
/*
* Fixme: BSD difference: In BSD all sockets connected to us get
* ECONNRESET and we die on the spot. In Linux we behave
* like files and pipes do and wait for the last
* dereference.
*
* Can't we simply set sock->err?
*
* What the above comment does talk about? --ANK(980817)
*/
if (unix_tot_inflight)
unix_gc(); /* Garbage collect fds */
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 219 | 75.26% | 11 | 50.00% |
David S. Miller | 18 | 6.19% | 2 | 9.09% |
Arnaldo Carvalho de Melo | 14 | 4.81% | 2 | 9.09% |
Al Viro | 13 | 4.47% | 1 | 4.55% |
Hannes Frederic Sowa | 11 | 3.78% | 1 | 4.55% |
Rainer Weikusat | 7 | 2.41% | 1 | 4.55% |
Steven Dake | 6 | 2.06% | 1 | 4.55% |
Paul Moore | 1 | 0.34% | 1 | 4.55% |
Alan Cox | 1 | 0.34% | 1 | 4.55% |
Pavel Emelyanov | 1 | 0.34% | 1 | 4.55% |
Total | 291 | 100.00% | 22 | 100.00% |
static void init_peercred(struct sock *sk)
{
put_pid(sk->sk_peer_pid);
if (sk->sk_peer_cred)
put_cred(sk->sk_peer_cred);
sk->sk_peer_pid = get_pid(task_tgid(current));
sk->sk_peer_cred = get_current_cred();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric W. Biedermann | 50 | 100.00% | 1 | 100.00% |
Total | 50 | 100.00% | 1 | 100.00% |
static void copy_peercred(struct sock *sk, struct sock *peersk)
{
put_pid(sk->sk_peer_pid);
if (sk->sk_peer_cred)
put_cred(sk->sk_peer_cred);
sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric W. Biedermann | 58 | 100.00% | 1 | 100.00% |
Total | 58 | 100.00% | 1 | 100.00% |
static int unix_listen(struct socket *sock, int backlog)
{
int err;
struct sock *sk = sock->sk;
struct unix_sock *u = unix_sk(sk);
struct pid *old_pid = NULL;
err = -EOPNOTSUPP;
if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
goto out; /* Only stream/seqpacket sockets accept */
err = -EINVAL;
if (!u->addr)
goto out; /* No listens on an unbound socket */
unix_state_lock(sk);
if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
goto out_unlock;
if (backlog > sk->sk_max_ack_backlog)
wake_up_interruptible_all(&u->peer_wait);
sk->sk_max_ack_backlog = backlog;
sk->sk_state = TCP_LISTEN;
/* set credentials so connect can copy them */
init_peercred(sk);
err = 0;
out_unlock:
unix_state_unlock(sk);
put_pid(old_pid);
out:
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 118 | 74.21% | 7 | 50.00% |
David S. Miller | 14 | 8.81% | 2 | 14.29% |
Eric W. Biedermann | 13 | 8.18% | 1 | 7.14% |
Steven Dake | 7 | 4.40% | 1 | 7.14% |
Arnaldo Carvalho de Melo | 5 | 3.14% | 1 | 7.14% |
Pavel Emelyanov | 1 | 0.63% | 1 | 7.14% |
David Howells | 1 | 0.63% | 1 | 7.14% |
Total | 159 | 100.00% | 14 | 100.00% |
static int unix_release(struct socket *);
static int unix_bind(struct socket *, struct sockaddr *, int);
static int unix_stream_connect(struct socket *, struct sockaddr *,
int addr_len, int flags);
static int unix_socketpair(struct socket *, struct socket *);
static int unix_accept(struct socket *, struct socket *, int, bool);
static int unix_getname(struct socket *, struct sockaddr *, int *, int);
static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
static unsigned int unix_dgram_poll(struct file *, struct socket *,
poll_table *);
static int unix_ioctl(struct socket *, unsigned int, unsigned long);
static int unix_shutdown(struct socket *, int);
static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
static ssize_t unix_stream_sendpage(struct socket *, struct page *, int offset,
size_t size, int flags);
static ssize_t unix_stream_splice_read(struct socket *, loff_t *ppos,
struct pipe_inode_info *, size_t size,
unsigned int flags);
static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
static int unix_dgram_connect(struct socket *, struct sockaddr *,
int, int);
static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
int);
static int unix_set_peek_off(struct sock *sk, int val)
{
struct unix_sock *u = unix_sk(sk);
if (mutex_lock_interruptible(&u->iolock))
return -EINTR;
sk->sk_peek_off = val;
mutex_unlock(&u->iolock);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pavel Emelyanov | 41 | 74.55% | 1 | 33.33% |
Sasha Levin | 12 | 21.82% | 1 | 33.33% |
Linus Torvalds | 2 | 3.64% | 1 | 33.33% |
Total | 55 | 100.00% | 3 | 100.00% |
static const struct proto_ops unix_stream_ops = {
.family = PF_UNIX,
.owner = THIS_MODULE,
.release = unix_release,
.bind = unix_bind,
.connect = unix_stream_connect,
.socketpair = unix_socketpair,
.accept = unix_accept,
.getname = unix_getname,
.poll = unix_poll,
.ioctl = unix_ioctl,
.listen = unix_listen,
.shutdown = unix_shutdown,
.setsockopt = sock_no_setsockopt,
.getsockopt = sock_no_getsockopt,
.sendmsg = unix_stream_sendmsg,
.recvmsg = unix_stream_recvmsg,
.mmap = sock_no_mmap,
.sendpage = unix_stream_sendpage,
.splice_read = unix_stream_splice_read,
.set_peek_off = unix_set_peek_off,
};
static const struct proto_ops unix_dgram_ops = {
.family = PF_UNIX,
.owner = THIS_MODULE,
.release = unix_release,
.bind = unix_bind,
.connect = unix_dgram_connect,
.socketpair = unix_socketpair,
.accept = sock_no_accept,
.getname = unix_getname,
.poll = unix_dgram_poll,
.ioctl = unix_ioctl,
.listen = sock_no_listen,
.shutdown = unix_shutdown,
.setsockopt = sock_no_setsockopt,
.getsockopt = sock_no_getsockopt,
.sendmsg = unix_dgram_sendmsg,
.recvmsg = unix_dgram_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
.set_peek_off = unix_set_peek_off,
};
static const struct proto_ops unix_seqpacket_ops = {
.family = PF_UNIX,
.owner = THIS_MODULE,
.release = unix_release,
.bind = unix_bind,
.connect = unix_stream_connect,
.socketpair = unix_socketpair,
.accept = unix_accept,
.getname = unix_getname,
.poll = unix_dgram_poll,
.ioctl = unix_ioctl,
.listen = unix_listen,
.shutdown = unix_shutdown,
.setsockopt = sock_no_setsockopt,
.getsockopt = sock_no_getsockopt,
.sendmsg = unix_seqpacket_sendmsg,
.recvmsg = unix_seqpacket_recvmsg,