cregit-Linux how code gets into the kernel

Release 4.8 net/netlink/af_netlink.c

Directory: net/netlink
/*
 * NETLINK      Kernel-user communication protocol.
 *
 *              Authors:        Alan Cox <alan@lxorguk.ukuu.org.uk>
 *                              Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
 *                              Patrick McHardy <kaber@trash.net>
 *
 *              This program is free software; you can redistribute it and/or
 *              modify it under the terms of the GNU General Public License
 *              as published by the Free Software Foundation; either version
 *              2 of the License, or (at your option) any later version.
 *
 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
 *                               added netlink_proto_exit
 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
 *                               use nlk_sk, as sk->protinfo is on a diet 8)
 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
 *                               - inc module use count of module that owns
 *                                 the kernel socket in case userspace opens
 *                                 socket of same protocol
 *                               - remove all module support, since netlink is
 *                                 mandatory if CONFIG_NET=y these days
 */

#include <linux/module.h>

#include <linux/capability.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/stat.h>
#include <linux/socket.h>
#include <linux/un.h>
#include <linux/fcntl.h>
#include <linux/termios.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/notifier.h>
#include <linux/security.h>
#include <linux/jhash.h>
#include <linux/jiffies.h>
#include <linux/random.h>
#include <linux/bitops.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/audit.h>
#include <linux/mutex.h>
#include <linux/vmalloc.h>
#include <linux/if_arp.h>
#include <linux/rhashtable.h>
#include <asm/cacheflush.h>
#include <linux/hash.h>
#include <linux/genetlink.h>

#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/scm.h>
#include <net/netlink.h>

#include "af_netlink.h"


struct listeners {
	
struct rcu_head		rcu;
	
unsigned long		masks[0];
};

/* state bits */

#define NETLINK_S_CONGESTED		0x0

/* flags */

#define NETLINK_F_KERNEL_SOCKET		0x1

#define NETLINK_F_RECV_PKTINFO		0x2

#define NETLINK_F_BROADCAST_SEND_ERROR	0x4

#define NETLINK_F_RECV_NO_ENOBUFS	0x8

#define NETLINK_F_LISTEN_ALL_NSID	0x10

#define NETLINK_F_CAP_ACK		0x20


static inline int netlink_is_kernel(struct sock *sk) { return nlk_sk(sk)->flags & NETLINK_F_KERNEL_SOCKET; }

Contributors

PersonTokensPropCommitsCommitProp
denis v. lunevdenis v. lunev2195.45%150.00%
nicolas dichtelnicolas dichtel14.55%150.00%
Total22100.00%2100.00%

struct netlink_table *nl_table __read_mostly; EXPORT_SYMBOL_GPL(nl_table); static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait); static int netlink_dump(struct sock *sk); static void netlink_skb_destructor(struct sk_buff *skb); /* nl_table locking explained: * Lookup and traversal are protected with an RCU read-side lock. Insertion * and removal are protected with per bucket lock while using RCU list * modification primitives and may run in parallel to RCU protected lookups. * Destruction of the Netlink socket may only occur *after* nl_table_lock has * been acquired * either during or after the socket has been removed from * the list and after an RCU grace period. */ DEFINE_RWLOCK(nl_table_lock); EXPORT_SYMBOL_GPL(nl_table_lock); static atomic_t nl_table_users = ATOMIC_INIT(0); #define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock)); static ATOMIC_NOTIFIER_HEAD(netlink_chain); static DEFINE_SPINLOCK(netlink_tap_lock); static struct list_head netlink_tap_all __read_mostly; static const struct rhashtable_params netlink_rhashtable_params;
static inline u32 netlink_group_mask(u32 group) { return group ? 1 << (group - 1) : 0; }

Contributors

PersonTokensPropCommitsCommitProp
patrick mchardypatrick mchardy2295.65%150.00%
stephen hemmingerstephen hemminger14.35%150.00%
Total23100.00%2100.00%


static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb, gfp_t gfp_mask) { unsigned int len = skb_end_offset(skb); struct sk_buff *new; new = alloc_skb(len, gfp_mask); if (new == NULL) return NULL; NETLINK_CB(new).portid = NETLINK_CB(skb).portid; NETLINK_CB(new).dst_group = NETLINK_CB(skb).dst_group; NETLINK_CB(new).creds = NETLINK_CB(skb).creds; memcpy(skb_put(new, len), skb->data, len); return new; }

Contributors

PersonTokensPropCommitsCommitProp
daniel borkmanndaniel borkmann110100.00%1100.00%
Total110100.00%1100.00%


int netlink_add_tap(struct netlink_tap *nt) { if (unlikely(nt->dev->type != ARPHRD_NETLINK)) return -EINVAL; spin_lock(&netlink_tap_lock); list_add_rcu(&nt->list, &netlink_tap_all); spin_unlock(&netlink_tap_lock); __module_get(nt->module); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
daniel borkmanndaniel borkmann60100.00%1100.00%
Total60100.00%1100.00%

EXPORT_SYMBOL_GPL(netlink_add_tap);
static int __netlink_remove_tap(struct netlink_tap *nt) { bool found = false; struct netlink_tap *tmp; spin_lock(&netlink_tap_lock); list_for_each_entry(tmp, &netlink_tap_all, list) { if (nt == tmp) { list_del_rcu(&nt->list); found = true; goto out; } } pr_warn("__netlink_remove_tap: %p not found\n", nt); out: spin_unlock(&netlink_tap_lock); if (found) module_put(nt->module); return found ? 0 : -ENODEV; }

Contributors

PersonTokensPropCommitsCommitProp
daniel borkmanndaniel borkmann9398.94%150.00%
stephen hemmingerstephen hemminger11.06%150.00%
Total94100.00%2100.00%


int netlink_remove_tap(struct netlink_tap *nt) { int ret; ret = __netlink_remove_tap(nt); synchronize_net(); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
daniel borkmanndaniel borkmann26100.00%1100.00%
Total26100.00%1100.00%

EXPORT_SYMBOL_GPL(netlink_remove_tap);
static bool netlink_filter_tap(const struct sk_buff *skb) { struct sock *sk = skb->sk; /* We take the more conservative approach and * whitelist socket protocols that may pass. */ switch (sk->sk_protocol) { case NETLINK_ROUTE: case NETLINK_USERSOCK: case NETLINK_SOCK_DIAG: case NETLINK_NFLOG: case NETLINK_XFRM: case NETLINK_FIB_LOOKUP: case NETLINK_NETFILTER: case NETLINK_GENERIC: return true; } return false; }

Contributors

PersonTokensPropCommitsCommitProp
daniel borkmanndaniel borkmann5795.00%150.00%
varka bhadramvarka bhadram35.00%150.00%
Total60100.00%2100.00%


static int __netlink_deliver_tap_skb(struct sk_buff *skb, struct net_device *dev) { struct sk_buff *nskb; struct sock *sk = skb->sk; int ret = -ENOMEM; dev_hold(dev); if (is_vmalloc_addr(skb->head)) nskb = netlink_to_full_skb(skb, GFP_ATOMIC); else nskb = skb_clone(skb, GFP_ATOMIC); if (nskb) { nskb->dev = dev; nskb->protocol = htons((u16) sk->sk_protocol); nskb->pkt_type = netlink_is_kernel(sk) ? PACKET_KERNEL : PACKET_USER; skb_reset_network_header(nskb); ret = dev_queue_xmit(nskb); if (unlikely(ret > 0)) ret = net_xmit_errno(ret); } dev_put(dev); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
daniel borkmanndaniel borkmann144100.00%5100.00%
Total144100.00%5100.00%


static void __netlink_deliver_tap(struct sk_buff *skb) { int ret; struct netlink_tap *tmp; if (!netlink_filter_tap(skb)) return; list_for_each_entry_rcu(tmp, &netlink_tap_all, list) { ret = __netlink_deliver_tap_skb(skb, tmp->dev); if (unlikely(ret)) break; } }

Contributors

PersonTokensPropCommitsCommitProp
daniel borkmanndaniel borkmann57100.00%2100.00%
Total57100.00%2100.00%


static void netlink_deliver_tap(struct sk_buff *skb) { rcu_read_lock(); if (unlikely(!list_empty(&netlink_tap_all))) __netlink_deliver_tap(skb); rcu_read_unlock(); }

Contributors

PersonTokensPropCommitsCommitProp
daniel borkmanndaniel borkmann34100.00%1100.00%
Total34100.00%1100.00%


static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src, struct sk_buff *skb) { if (!(netlink_is_kernel(dst) && netlink_is_kernel(src))) netlink_deliver_tap(skb); }

Contributors

PersonTokensPropCommitsCommitProp
daniel borkmanndaniel borkmann41100.00%1100.00%
Total41100.00%1100.00%


static void netlink_overrun(struct sock *sk) { struct netlink_sock *nlk = nlk_sk(sk); if (!(nlk->flags & NETLINK_F_RECV_NO_ENOBUFS)) { if (!test_and_set_bit(NETLINK_S_CONGESTED, &nlk_sk(sk)->state)) { sk->sk_err = ENOBUFS; sk->sk_error_report(sk); } } atomic_inc(&sk->sk_drops); }

Contributors

PersonTokensPropCommitsCommitProp
patrick mchardypatrick mchardy7197.26%150.00%
nicolas dichtelnicolas dichtel22.74%150.00%
Total73100.00%2100.00%


static void netlink_rcv_wake(struct sock *sk) { struct netlink_sock *nlk = nlk_sk(sk); if (skb_queue_empty(&sk->sk_receive_queue)) clear_bit(NETLINK_S_CONGESTED, &nlk->state); if (!test_bit(NETLINK_S_CONGESTED, &nlk->state)) wake_up_interruptible(&nlk->wait); }

Contributors

PersonTokensPropCommitsCommitProp
patrick mchardypatrick mchardy6096.77%150.00%
nicolas dichtelnicolas dichtel23.23%150.00%
Total62100.00%2100.00%


static void netlink_skb_destructor(struct sk_buff *skb) { if (is_vmalloc_addr(skb->head)) { if (!skb->cloned || !atomic_dec_return(&(skb_shinfo(skb)->dataref))) vfree(skb->head); skb->head = NULL; } if (skb->sk != NULL) sock_rfree(skb); }

Contributors

PersonTokensPropCommitsCommitProp
florian westphalflorian westphal5782.61%150.00%
patrick mchardypatrick mchardy1217.39%150.00%
Total69100.00%2100.00%


static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk) { WARN_ON(skb->sk != NULL); skb->sk = sk; skb->destructor = netlink_skb_destructor; atomic_add(skb->truesize, &sk->sk_rmem_alloc); sk_mem_charge(sk, skb->truesize); }

Contributors

PersonTokensPropCommitsCommitProp
florian westphalflorian westphal4068.97%133.33%
patrick mchardypatrick mchardy1831.03%266.67%
Total58100.00%3100.00%


static void netlink_sock_destruct(struct sock *sk) { struct netlink_sock *nlk = nlk_sk(sk); if (nlk->cb_running) { if (nlk->cb.done) nlk->cb.done(&nlk->cb); module_put(nlk->cb.module); kfree_skb(nlk->cb.skb); } skb_queue_purge(&sk->sk_receive_queue); if (!sock_flag(sk, SOCK_DEAD)) { printk(KERN_ERR "Freeing alive netlink socket %p\n", sk); return; } WARN_ON(atomic_read(&sk->sk_rmem_alloc)); WARN_ON(atomic_read(&sk->sk_wmem_alloc)); WARN_ON(nlk_sk(sk)->groups); }

Contributors

PersonTokensPropCommitsCommitProp
florian westphalflorian westphal4938.28%19.09%
pre-gitpre-git3728.91%218.18%
patrick mchardypatrick mchardy2922.66%327.27%
arnaldo carvalho de meloarnaldo carvalho de melo53.91%218.18%
ilpo jarvinenilpo jarvinen32.34%19.09%
david s. millerdavid s. miller32.34%19.09%
james morrisjames morris21.56%19.09%
Total128100.00%11100.00%

/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on * SMP. Look, when several writers sleep and reader wakes them up, all but one * immediately hit write lock and grab all the cpus. Exclusive sleep solves * this, _but_ remember, it adds useless work on UP machines. */
void netlink_table_grab(void) __acquires (nl_table_lock) { might_sleep(); write_lock_irq(&nl_table_lock); if (atomic_read(&nl_table_users)) { DECLARE_WAITQUEUE(wait, current); add_wait_queue_exclusive(&nl_table_wait, &wait); for (;;) { set_current_state(TASK_UNINTERRUPTIBLE); if (atomic_read(&nl_table_users) == 0) break; write_unlock_irq(&nl_table_lock); schedule(); write_lock_irq(&nl_table_lock); } __set_current_state(TASK_RUNNING); remove_wait_queue(&nl_table_wait, &wait); } }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git8889.80%457.14%
eric dumazeteric dumazet44.08%114.29%
arjan van de venarjan van de ven33.06%114.29%
johannes bergjohannes berg33.06%114.29%
Total98100.00%7100.00%


void netlink_table_ungrab(void) __releases (nl_table_lock) { write_unlock_irq(&nl_table_lock); wake_up(&nl_table_wait); }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git1878.26%360.00%
eric dumazeteric dumazet417.39%120.00%
arjan van de venarjan van de ven14.35%120.00%
Total23100.00%5100.00%


static inline void netlink_lock_table(void) { /* read_lock() synchronizes us to netlink_table_grab */ read_lock(&nl_table_lock); atomic_inc(&nl_table_users); read_unlock(&nl_table_lock); }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git2796.43%266.67%
patrick mchardypatrick mchardy13.57%133.33%
Total28100.00%3100.00%


static inline void netlink_unlock_table(void) { if (atomic_dec_and_test(&nl_table_users)) wake_up(&nl_table_wait); }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git2295.65%266.67%
patrick mchardypatrick mchardy14.35%133.33%
Total23100.00%3100.00%

struct netlink_compare_arg { possible_net_t pnet; u32 portid; }; /* Doing sizeof directly may yield 4 extra bytes on 64-bit. */ #define netlink_compare_arg_len \ (offsetof(struct netlink_compare_arg, portid) + sizeof(u32))
static inline int netlink_compare(struct rhashtable_compare_arg *arg, const void *ptr) { const struct netlink_compare_arg *x = arg->key; const struct netlink_sock *nlk = ptr; return nlk->portid != x->portid || !net_eq(sock_net(&nlk->sk), read_pnet(&x->pnet)); }

Contributors

PersonTokensPropCommitsCommitProp
herbert xuherbert xu2640.62%250.00%
thomas grafthomas graf2335.94%125.00%
gao fenggao feng1523.44%125.00%
Total64100.00%4100.00%


static void netlink_compare_arg_init(struct netlink_compare_arg *arg, struct net *net, u32 portid) { memset(arg, 0, sizeof(*arg)); write_pnet(&arg->pnet, net); arg->portid = portid; }

Contributors

PersonTokensPropCommitsCommitProp
herbert xuherbert xu48100.00%1100.00%
Total48100.00%1100.00%


static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid, struct net *net) { struct netlink_compare_arg arg; netlink_compare_arg_init(&arg, net, portid); return rhashtable_lookup_fast(&table->hash, &arg, netlink_rhashtable_params); }

Contributors

PersonTokensPropCommitsCommitProp
thomas grafthomas graf3367.35%250.00%
herbert xuherbert xu1224.49%125.00%
gao fenggao feng48.16%125.00%
Total49100.00%4100.00%


static int __netlink_insert(struct netlink_table *table, struct sock *sk) { struct netlink_compare_arg arg; netlink_compare_arg_init(&arg, sock_net(sk), nlk_sk(sk)->portid); return rhashtable_lookup_insert_key(&table->hash, &arg, &nlk_sk(sk)->node, netlink_rhashtable_params); }

Contributors

PersonTokensPropCommitsCommitProp
ying xueying xue4066.67%125.00%
herbert xuherbert xu2033.33%375.00%
Total60100.00%4100.00%


static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid) { struct netlink_table *table = &nl_table[protocol]; struct sock *sk; rcu_read_lock(); sk = __netlink_lookup(table, portid, net); if (sk) sock_hold(sk); rcu_read_unlock(); return sk; }

Contributors

PersonTokensPropCommitsCommitProp
patrick mchardypatrick mchardy2132.81%112.50%
pre-gitpre-git1726.56%225.00%
thomas grafthomas graf914.06%112.50%
herbert xuherbert xu812.50%112.50%
gao fenggao feng46.25%112.50%
arnaldo carvalho de meloarnaldo carvalho de melo34.69%112.50%
eric w. biedermaneric w. biederman23.12%112.50%
Total64100.00%8100.00%

static const struct proto_ops netlink_ops;
static void netlink_update_listeners(struct sock *sk) { struct netlink_table *tbl = &nl_table[sk->sk_protocol]; unsigned long mask; unsigned int i; struct listeners *listeners; listeners = nl_deref_protected(tbl->listeners); if (!listeners) return; for (i = 0; i < NLGRPLONGS(tbl->groups); i++) { mask = 0; sk_for_each_bound(sk, &tbl->mc_list) { if (i < NLGRPLONGS(nlk_sk(sk)->ngroups)) mask |= nlk_sk(sk)->groups[i]; } listeners->masks[i] = mask; } /* this function is only called with the netlink table "grabbed", which * makes sure updates are visible before bind or setsockopt return. */ }

Contributors

PersonTokensPropCommitsCommitProp
patrick mchardypatrick mchardy8167.50%125.00%
eric dumazeteric dumazet2218.33%250.00%
johannes bergjohannes berg1714.17%125.00%
Total120100.00%4100.00%


static int netlink_insert(struct sock *sk, u32 portid) { struct netlink_table *table = &nl_table[sk->sk_protocol]; int err; lock_sock(sk); err = nlk_sk(sk)->portid == portid ? 0 : -EBUSY; if (nlk_sk(sk)->bound) goto err; err = -ENOMEM; if (BITS_PER_LONG > 32 && unlikely(atomic_read(&table->hash.nelems) >= UINT_MAX)) goto err; nlk_sk(sk)->portid = portid; sock_hold(sk); err = __netlink_insert(table, sk); if (err) { /* In case the hashtable backend returns with -EBUSY * from here, it must not escape to the caller. */ if (unlikely(err == -EBUSY)) err = -EOVERFLOW; if (err == -EEXIST) err = -EADDRINUSE; sock_put(sk); goto err; } /* We need to ensure that the socket is hashed and visible. */ smp_wmb(); nlk_sk(sk)->bound = portid; err: release_sock(sk); return err; }

Contributors

PersonTokensPropCommitsCommitProp
herbert xuherbert xu12569.44%650.00%
thomas grafthomas graf2413.33%216.67%
daniel borkmanndaniel borkmann168.89%18.33%
ying xueying xue105.56%18.33%
gao fenggao feng31.67%18.33%
eric w. biedermaneric w. biederman21.11%18.33%
Total180100.00%12100.00%


static void netlink_remove(struct sock *sk) { struct netlink_table *table; table = &nl_table[sk->sk_protocol]; if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node, netlink_rhashtable_params)) { WARN_ON(atomic_read(&sk->sk_refcnt) == 1); __sock_put(sk); } netlink_table_grab(); if (nlk_sk(sk)->subscriptions) { __sk_del_bind_node(sk); netlink_update_listeners(sk); } if (sk->sk_protocol == NETLINK_GENERIC) atomic_inc(&genl_sk_destructing_cnt); netlink_table_ungrab(); }

Contributors

PersonTokensPropCommitsCommitProp
thomas grafthomas graf4743.52%112.50%
herbert xuherbert xu3330.56%225.00%
johannes bergjohannes berg2119.44%225.00%
pre-gitpre-git65.56%225.00%
patrick mchardypatrick mchardy10.93%112.50%
Total108100.00%8100.00%

static struct proto netlink_proto = { .name = "NETLINK", .owner = THIS_MODULE, .obj_size = sizeof(struct netlink_sock), };
static int __netlink_create(struct net *net, struct socket *sock, struct mutex *cb_mutex, int protocol, int kern) { struct sock *sk; struct netlink_sock *nlk; sock->ops = &netlink_ops; sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto, kern); if (!sk) return -ENOMEM; sock_init_data(sock, sk); nlk = nlk_sk(sk); if (cb_mutex) { nlk->cb_mutex = cb_mutex; } else { nlk->cb_mutex = &nlk->cb_def_mutex; mutex_init(nlk->cb_mutex); } init_waitqueue_head(&nlk->wait); sk->sk_destruct = netlink_sock_destruct; sk->sk_protocol = protocol; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
patrick mchardypatrick mchardy10173.72%333.33%
pre-gitpre-git1712.41%111.11%
eric w. biedermaneric w. biederman128.76%222.22%
david s. millerdavid s. miller42.92%111.11%
eric dumazeteric dumazet21.46%111.11%
arnaldo carvalho de meloarnaldo carvalho de melo10.73%111.11%
Total137100.00%9100.00%


static int netlink_create(struct net *net, struct socket *sock, int protocol, int kern) { struct module *module = NULL; struct mutex *cb_mutex; struct netlink_sock *nlk; int (*bind)(struct net *net, int group); void (*unbind)(struct net *net, int group); int err = 0; sock->state = SS_UNCONNECTED; if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM) return -ESOCKTNOSUPPORT; if (protocol < 0 || protocol >= MAX_LINKS) return -EPROTONOSUPPORT; netlink_lock_table(); #ifdef CONFIG_MODULES if (!nl_table[protocol].registered) { netlink_unlock_table(); request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol); netlink_lock_table(); } #endif if (nl_table[protocol].registered && try_module_get(nl_table[protocol].module)) module = nl_table[protocol].module; else err = -EPROTONOSUPPORT; cb_mutex = nl_table[protocol].cb_mutex; bind = nl_table[protocol].bind; unbind = nl_table[protocol].unbind; netlink_unlock_table(); if (err < 0) goto out; err = __netlink_create(net, sock, cb_mutex, protocol, kern); if (err < 0) goto out_module; local_bh_disable(); sock_prot_inuse_add(net, &netlink_proto, 1); local_bh_enable(); nlk = nlk_sk(sock->sk); nlk->module = module; nlk->netlink_bind = bind; nlk->netlink_unbind = unbind; out: return err; out_module: module_put(module); goto out; }

Contributors

PersonTokensPropCommitsCommitProp
patrick mchardypatrick mchardy11538.08%526.32%
pre-gitpre-git5016.56%210.53%
harald welteharald welte3210.60%15.26%
richard guy briggsrichard guy briggs268.61%15.26%
pablo neira ayusopablo neira ayuso247.95%15.26%
alexey dobriyanalexey dobriyan154.97%15.26%
johannes bergjohannes berg113.64%210.53%
eric dumazeteric dumazet103.31%15.26%
eric w. biedermaneric w. biederman92.98%210.53%
david s. millerdavid s. miller61.99%15.26%
eric pariseric paris30.99%15.26%
arnaldo carvalho de meloarnaldo carvalho de melo10.33%15.26%
Total302100.00%19100.00%


static void deferred_put_nlk_sk(struct rcu_head *head) { struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu); sock_put(&nlk->sk); }

Contributors

PersonTokensPropCommitsCommitProp
thomas grafthomas graf34100.00%1100.00%
Total34100.00%1100.00%


static int netlink_release(struct socket *sock) { struct sock *sk = sock->sk; struct netlink_sock *nlk; if (!sk) return 0; netlink_remove(sk); sock_orphan(sk); nlk = nlk_sk(sk); /* * OK. Socket is unlinked, any packets that arrive now * will be purged. */ /* must not acquire netlink_table_lock in any way again before unbind * and notifying genetlink is done as otherwise it might deadlock */ if (nlk->netlink_unbind) { int i; for (i = 0; i < nlk->ngroups; i++) if (test_bit(i, nlk->groups)) nlk->netlink_unbind(sock_net(sk), i + 1); } if (sk->sk_protocol == NETLINK_GENERIC && atomic_dec_return(&genl_sk_destructing_cnt) == 0) wake_up(