cregit-Linux how code gets into the kernel

Release 4.8 net/unix/garbage.c

Directory: net/unix
 * NET3:        Garbage Collector For AF_UNIX sockets
 * Garbage Collector:
 *      Copyright (C) Barak A. Pearlmutter.
 *      Released under the GPL version 2 or later.
 * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
 * If it doesn't work blame me, it worked when Barak sent it.
 * Assumptions:
 *  - object w/ a bit
 *  - free list
 * Current optimizations:
 *  - explicit stack instead of recursion
 *  - tail recurse on first born instead of immediate push/pop
 *  - we gather the stuff that should not be killed into tree
 *    and stack is just a path from root to the current pointer.
 *  Future optimizations:
 *  - don't just push entire root set; process in place
 *      This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 *  Fixes:
 *      Alan Cox        07 Sept 1997    Vmalloc internal stack as needed.
 *                                      Cope with changing max_files.
 *      Al Viro         11 Oct 1998
 *              Graph may have cycles. That is, we can send the descriptor
 *              of foo to bar and vice versa. Current code chokes on that.
 *              Fix: move SCM_RIGHTS ones into the separate list and then
 *              skb_free() them all instead of doing explicit fput's.
 *              Another problem: since fput() may block somebody may
 *              create a new unix_socket when we are in the middle of sweep
 *              phase. Fix: revert the logic wrt MARKED. Mark everything
 *              upon the beginning and unmark non-junk ones.
 *              [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
 *              sent to connect()'ed but still not accept()'ed sockets.
 *              Fixed. Old code had slightly different problem here:
 *              extra fput() in situation when we passed the descriptor via
 *              such socket and closed it (descriptor). That would happen on
 *              each unix_gc() until the accept(). Since the struct file in
 *              question would go to the free list and might be reused...
 *              That might be the reason of random oopses on filp_close()
 *              in unrelated processes.
 *      AV              28 Feb 1999
 *              Kill the explicit allocation of stack. Now we keep the tree
 *              with root in dummy + pointer (gc_current) to one of the nodes.
 *              Stack is represented as path from gc_current to dummy. Unmark
 *              now means "add to tree". Push == "make it a son of gc_current".
 *              Pop == "move gc_current to parent". We keep only pointers to
 *              parents (->gc_tree).
 *      AV              1 Mar 1999
 *              Damn. Added missing check for ->dead in listen queues scanning.
 *      Miklos Szeredi 25 Jun 2007
 *              Reimplement with a cycle collecting algorithm. This should
 *              solve several problems with the previous code, like being racy
 *              wrt receive and holding up unrelated socket operations.

#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/un.h>
#include <linux/net.h>
#include <linux/fs.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/file.h>
#include <linux/proc_fs.h>
#include <linux/mutex.h>
#include <linux/wait.h>

#include <net/sock.h>
#include <net/af_unix.h>
#include <net/scm.h>
#include <net/tcp_states.h>

/* Internal data structures and random procedures: */

static LIST_HEAD(gc_inflight_list);
static LIST_HEAD(gc_candidates);
static DEFINE_SPINLOCK(unix_gc_lock);
static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);

unsigned int unix_tot_inflight;

struct sock *unix_get_socket(struct file *filp) { struct sock *u_sock = NULL; struct inode *inode = file_inode(filp); /* Socket ? */ if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) { struct socket *sock = SOCKET_I(inode); struct sock *s = sock->sk; /* PF_UNIX ? */ if (s && sock->ops && sock->ops->family == PF_UNIX) u_sock = s; } return u_sock; }


al viroal viro1516.13%330.00%
arnaldo carvalho de meloarnaldo carvalho de melo44.30%110.00%
matthew wilcoxmatthew wilcox44.30%110.00%
jason eastmanjason eastman22.15%110.00%

/* Keep the number of times in flight count for the file * descriptor if it is for an AF_UNIX socket. */
void unix_inflight(struct user_struct *user, struct file *fp) { struct sock *s = unix_get_socket(fp); spin_lock(&unix_gc_lock); if (s) { struct unix_sock *u = unix_sk(s); if (atomic_long_inc_return(&u->inflight) == 1) { BUG_ON(!list_empty(&u->link)); list_add_tail(&u->link, &gc_inflight_list); } else { BUG_ON(list_empty(&u->link)); } unix_tot_inflight++; } user->unix_inflight++; spin_unlock(&unix_gc_lock); }


miklos szeredimiklos szeredi5851.79%110.00%
willy tarreauwilly tarreau1210.71%110.00%
hannes frederic sowahannes frederic sowa54.46%110.00%
david s. millerdavid s. miller32.68%110.00%
arnaldo carvalho de meloarnaldo carvalho de melo21.79%110.00%
al viroal viro10.89%110.00%
pavel emelianovpavel emelianov10.89%110.00%

void unix_notinflight(struct user_struct *user, struct file *fp) { struct sock *s = unix_get_socket(fp); spin_lock(&unix_gc_lock); if (s) { struct unix_sock *u = unix_sk(s); BUG_ON(list_empty(&u->link)); if (atomic_long_dec_and_test(&u->inflight)) list_del_init(&u->link); unix_tot_inflight--; } user->unix_inflight--; spin_unlock(&unix_gc_lock); }


miklos szeredimiklos szeredi3640.00%110.00%
willy tarreauwilly tarreau1213.33%110.00%
hannes frederic sowahannes frederic sowa55.56%110.00%
david s. millerdavid s. miller33.33%110.00%
arnaldo carvalho de meloarnaldo carvalho de melo22.22%110.00%
al viroal viro11.11%110.00%
pavel emelianovpavel emelianov11.11%110.00%

static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), struct sk_buff_head *hitlist) { struct sk_buff *skb; struct sk_buff *next; spin_lock(&x->sk_receive_queue.lock); skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { /* Do we have file descriptors ? */ if (UNIXCB(skb).fp) { bool hit = false; /* Process the descriptors of this socket */ int nfd = UNIXCB(skb).fp->count; struct file **fp = UNIXCB(skb).fp->fp; while (nfd--) { /* Get the socket the fd matches if it indeed does so */ struct sock *sk = unix_get_socket(*fp++); if (sk) { struct unix_sock *u = unix_sk(sk); /* Ignore non-candidates, they could * have been added to the queues after * starting the garbage collection */ if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) { hit = true; func(u); } } } if (hit && hitlist != NULL) { __skb_unlink(skb, &x->sk_receive_queue); __skb_queue_tail(hitlist, skb); } } } spin_unlock(&x->sk_receive_queue.lock); }


miklos szeredimiklos szeredi8241.21%212.50%
eric dumazeteric dumazet73.52%16.25%
jason eastmanjason eastman42.01%16.25%
arnaldo carvalho de meloarnaldo carvalho de melo42.01%212.50%
ilpo jarvinenilpo jarvinen42.01%16.25%
linus torvaldslinus torvalds21.01%16.25%
pavel emelianovpavel emelianov21.01%16.25%
andrew mortonandrew morton10.50%16.25%
david s. millerdavid s. miller10.50%16.25%

static void scan_children(struct sock *x, void (*func)(struct unix_sock *), struct sk_buff_head *hitlist) { if (x->sk_state != TCP_LISTEN) { scan_inflight(x, func, hitlist); } else { struct sk_buff *skb; struct sk_buff *next; struct unix_sock *u; LIST_HEAD(embryos); /* For a listening socket collect the queued embryos * and perform a scan on them as well. */ spin_lock(&x->sk_receive_queue.lock); skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { u = unix_sk(skb->sk); /* An embryo cannot be in-flight, so it's safe * to use the list link. */ BUG_ON(!list_empty(&u->link)); list_add_tail(&u->link, &embryos); } spin_unlock(&x->sk_receive_queue.lock); while (!list_empty(&embryos)) { u = list_entry(, struct unix_sock, link); scan_inflight(&u->sk, func, hitlist); list_del_init(&u->link); } } }


miklos szeredimiklos szeredi12871.91%18.33%
jason eastmanjason eastman42.25%18.33%
david s. millerdavid s. miller42.25%216.67%
ilpo jarvinenilpo jarvinen42.25%18.33%
arnaldo carvalho de meloarnaldo carvalho de melo10.56%18.33%
pavel emelianovpavel emelianov10.56%18.33%

static void dec_inflight(struct unix_sock *usk) { atomic_long_dec(&usk->inflight); }


miklos szeredimiklos szeredi1578.95%133.33%
pavel emelianovpavel emelianov315.79%133.33%
al viroal viro15.26%133.33%

static void inc_inflight(struct unix_sock *usk) { atomic_long_inc(&usk->inflight); }


miklos szeredimiklos szeredi1578.95%133.33%
pavel emelianovpavel emelianov315.79%133.33%
al viroal viro15.26%133.33%

static void inc_inflight_move_tail(struct unix_sock *u) { atomic_long_inc(&u->inflight); /* If this still might be part of a cycle, move it to the end * of the list, so that it's checked even if it was already * passed over */ if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags)) list_move_tail(&u->link, &gc_candidates); }


miklos szeredimiklos szeredi2865.12%114.29%
eric dumazeteric dumazet716.28%114.29%
pavel emelianovpavel emelianov24.65%114.29%
al viroal viro12.33%114.29%
jason eastmanjason eastman12.33%114.29%

static bool gc_in_progress; #define UNIX_INFLIGHT_TRIGGER_GC 16000
void wait_for_unix_gc(void) { /* If number of inflight sockets is insane, * force a garbage collect right now. */ if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress) unix_gc(); wait_event(unix_gc_wait, gc_in_progress == false); }


dann frazierdann frazier1655.17%133.33%
eric dumazeteric dumazet1241.38%133.33%
jason eastmanjason eastman13.45%133.33%

/* The external entry point: unix_gc() */
void unix_gc(void) { struct unix_sock *u; struct unix_sock *next; struct sk_buff_head hitlist; struct list_head cursor; LIST_HEAD(not_cycle_list); spin_lock(&unix_gc_lock); /* Avoid a recursive GC. */ if (gc_in_progress) goto out; gc_in_progress = true; /* First, select candidates for garbage collection. Only * in-flight sockets are considered, and from those only ones * which don't have any external reference. * * Holding unix_gc_lock will protect these candidates from * being detached, and hence from gaining an external * reference. Since there are no possible receivers, all * buffers currently on the candidates' queues stay there * during the garbage collection. * * We also know that no new candidate can be added onto the * receive queues. Other, non candidate sockets _can_ be * added to queue, so we must make sure only to touch * candidates. */ list_for_each_entry_safe(u, next, &gc_inflight_list, link) { long total_refs; long inflight_refs; total_refs = file_count(u->sk.sk_socket->file); inflight_refs = atomic_long_read(&u->inflight); BUG_ON(inflight_refs < 1); BUG_ON(total_refs < inflight_refs); if (total_refs == inflight_refs) { list_move_tail(&u->link, &gc_candidates); __set_bit(UNIX_GC_CANDIDATE, &u->gc_flags); __set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); } } /* Now remove all internal in-flight reference to children of * the candidates. */ list_for_each_entry(u, &gc_candidates, link) scan_children(&u->sk, dec_inflight, NULL); /* Restore the references for children of all candidates, * which have remaining references. Do this recursively, so * only those remain, which form cyclic references. * * Use a "cursor" link, to make the list traversal safe, even * though elements might be moved about. */ list_add(&cursor, &gc_candidates); while ( != &gc_candidates) { u = list_entry(, struct unix_sock, link); /* Move cursor to after the current position. */ list_move(&cursor, &u->link); if (atomic_long_read(&u->inflight) > 0) { list_move_tail(&u->link, &not_cycle_list); __clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); scan_children(&u->sk, inc_inflight_move_tail, NULL); } } list_del(&cursor); /* not_cycle_list contains those sockets which do not make up a * cycle. Restore these to the inflight list. */ while (!list_empty(&not_cycle_list)) { u = list_entry(, struct unix_sock, link); __clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags); list_move_tail(&u->link, &gc_inflight_list); } /* Now gc_candidates contains only garbage. Restore original * inflight counters for these as well, and remove the skbuffs * which are creating the cycle(s). */ skb_queue_head_init(&hitlist); list_for_each_entry(u, &gc_candidates, link) scan_children(&u->sk, inc_inflight, &hitlist); spin_unlock(&unix_gc_lock); /* Here we are. Hitlist is filled. Die. */ __skb_queue_purge(&hitlist); spin_lock(&unix_gc_lock); /* All candidates should have been detached by now. */ BUG_ON(!list_empty(&gc_candidates)); gc_in_progress = false; wake_up(&unix_gc_wait); out: spin_unlock(&unix_gc_lock); }


miklos szeredimiklos szeredi31581.40%216.67%
eric dumazeteric dumazet287.24%18.33%
dann frazierdann frazier61.55%18.33%
jason eastmanjason eastman51.29%18.33%
al viroal viro41.03%18.33%
david s. millerdavid s. miller20.52%216.67%
linus torvaldslinus torvalds10.26%18.33%

Overall Contributors

miklos szeredimiklos szeredi69655.37%25.71%
eric dumazeteric dumazet584.61%25.71%
dann frazierdann frazier352.78%12.86%
al viroal viro241.91%411.43%
willy tarreauwilly tarreau241.91%12.86%
jason eastmanjason eastman181.43%12.86%
arnaldo carvalho de meloarnaldo carvalho de melo171.35%38.57%
pavel emelianovpavel emelianov151.19%25.71%
david s. millerdavid s. miller131.03%38.57%
hannes frederic sowahannes frederic sowa100.80%12.86%
ilpo jarvinenilpo jarvinen80.64%12.86%
matthew wilcoxmatthew wilcox40.32%12.86%
arjan van de venarjan van de ven30.24%12.86%
linus torvaldslinus torvalds30.24%25.71%
andrew mortonandrew morton10.08%12.86%
Directory: net/unix
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.