Release 4.11 net/netlink/af_netlink.c
/*
* NETLINK Kernel-user communication protocol.
*
* Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
* Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
* Patrick McHardy <kaber@trash.net>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
* added netlink_proto_exit
* Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br>
* use nlk_sk, as sk->protinfo is on a diet 8)
* Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org>
* - inc module use count of module that owns
* the kernel socket in case userspace opens
* socket of same protocol
* - remove all module support, since netlink is
* mandatory if CONFIG_NET=y these days
*/
#include <linux/module.h>
#include <linux/capability.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/stat.h>
#include <linux/socket.h>
#include <linux/un.h>
#include <linux/fcntl.h>
#include <linux/termios.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/notifier.h>
#include <linux/security.h>
#include <linux/jhash.h>
#include <linux/jiffies.h>
#include <linux/random.h>
#include <linux/bitops.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/audit.h>
#include <linux/mutex.h>
#include <linux/vmalloc.h>
#include <linux/if_arp.h>
#include <linux/rhashtable.h>
#include <asm/cacheflush.h>
#include <linux/hash.h>
#include <linux/genetlink.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/scm.h>
#include <net/netlink.h>
#include "af_netlink.h"
struct listeners {
struct rcu_head rcu;
unsigned long masks[0];
};
/* state bits */
#define NETLINK_S_CONGESTED 0x0
/* flags */
#define NETLINK_F_KERNEL_SOCKET 0x1
#define NETLINK_F_RECV_PKTINFO 0x2
#define NETLINK_F_BROADCAST_SEND_ERROR 0x4
#define NETLINK_F_RECV_NO_ENOBUFS 0x8
#define NETLINK_F_LISTEN_ALL_NSID 0x10
#define NETLINK_F_CAP_ACK 0x20
static inline int netlink_is_kernel(struct sock *sk)
{
return nlk_sk(sk)->flags & NETLINK_F_KERNEL_SOCKET;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Denis V. Lunev | 21 | 95.45% | 1 | 50.00% |
Nicolas Dichtel | 1 | 4.55% | 1 | 50.00% |
Total | 22 | 100.00% | 2 | 100.00% |
struct netlink_table *nl_table __read_mostly;
EXPORT_SYMBOL_GPL(nl_table);
static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
static struct lock_class_key nlk_cb_mutex_keys[MAX_LINKS];
static const char *const nlk_cb_mutex_key_strings[MAX_LINKS + 1] = {
"nlk_cb_mutex-ROUTE",
"nlk_cb_mutex-1",
"nlk_cb_mutex-USERSOCK",
"nlk_cb_mutex-FIREWALL",
"nlk_cb_mutex-SOCK_DIAG",
"nlk_cb_mutex-NFLOG",
"nlk_cb_mutex-XFRM",
"nlk_cb_mutex-SELINUX",
"nlk_cb_mutex-ISCSI",
"nlk_cb_mutex-AUDIT",
"nlk_cb_mutex-FIB_LOOKUP",
"nlk_cb_mutex-CONNECTOR",
"nlk_cb_mutex-NETFILTER",
"nlk_cb_mutex-IP6_FW",
"nlk_cb_mutex-DNRTMSG",
"nlk_cb_mutex-KOBJECT_UEVENT",
"nlk_cb_mutex-GENERIC",
"nlk_cb_mutex-17",
"nlk_cb_mutex-SCSITRANSPORT",
"nlk_cb_mutex-ECRYPTFS",
"nlk_cb_mutex-RDMA",
"nlk_cb_mutex-CRYPTO",
"nlk_cb_mutex-SMC",
"nlk_cb_mutex-23",
"nlk_cb_mutex-24",
"nlk_cb_mutex-25",
"nlk_cb_mutex-26",
"nlk_cb_mutex-27",
"nlk_cb_mutex-28",
"nlk_cb_mutex-29",
"nlk_cb_mutex-30",
"nlk_cb_mutex-31",
"nlk_cb_mutex-MAX_LINKS"
};
static int netlink_dump(struct sock *sk);
static void netlink_skb_destructor(struct sk_buff *skb);
/* nl_table locking explained:
* Lookup and traversal are protected with an RCU read-side lock. Insertion
* and removal are protected with per bucket lock while using RCU list
* modification primitives and may run in parallel to RCU protected lookups.
* Destruction of the Netlink socket may only occur *after* nl_table_lock has
* been acquired * either during or after the socket has been removed from
* the list and after an RCU grace period.
*/
DEFINE_RWLOCK(nl_table_lock);
EXPORT_SYMBOL_GPL(nl_table_lock);
static atomic_t nl_table_users = ATOMIC_INIT(0);
#define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
static BLOCKING_NOTIFIER_HEAD(netlink_chain);
static DEFINE_SPINLOCK(netlink_tap_lock);
static struct list_head netlink_tap_all __read_mostly;
static const struct rhashtable_params netlink_rhashtable_params;
static inline u32 netlink_group_mask(u32 group)
{
return group ? 1 << (group - 1) : 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 22 | 95.65% | 1 | 50.00% |
Stephen Hemminger | 1 | 4.35% | 1 | 50.00% |
Total | 23 | 100.00% | 2 | 100.00% |
static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
gfp_t gfp_mask)
{
unsigned int len = skb_end_offset(skb);
struct sk_buff *new;
new = alloc_skb(len, gfp_mask);
if (new == NULL)
return NULL;
NETLINK_CB(new).portid = NETLINK_CB(skb).portid;
NETLINK_CB(new).dst_group = NETLINK_CB(skb).dst_group;
NETLINK_CB(new).creds = NETLINK_CB(skb).creds;
memcpy(skb_put(new, len), skb->data, len);
return new;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Daniel Borkmann | 110 | 100.00% | 1 | 100.00% |
Total | 110 | 100.00% | 1 | 100.00% |
int netlink_add_tap(struct netlink_tap *nt)
{
if (unlikely(nt->dev->type != ARPHRD_NETLINK))
return -EINVAL;
spin_lock(&netlink_tap_lock);
list_add_rcu(&nt->list, &netlink_tap_all);
spin_unlock(&netlink_tap_lock);
__module_get(nt->module);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Daniel Borkmann | 60 | 100.00% | 1 | 100.00% |
Total | 60 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(netlink_add_tap);
static int __netlink_remove_tap(struct netlink_tap *nt)
{
bool found = false;
struct netlink_tap *tmp;
spin_lock(&netlink_tap_lock);
list_for_each_entry(tmp, &netlink_tap_all, list) {
if (nt == tmp) {
list_del_rcu(&nt->list);
found = true;
goto out;
}
}
pr_warn("__netlink_remove_tap: %p not found\n", nt);
out:
spin_unlock(&netlink_tap_lock);
if (found)
module_put(nt->module);
return found ? 0 : -ENODEV;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Daniel Borkmann | 93 | 98.94% | 1 | 50.00% |
Stephen Hemminger | 1 | 1.06% | 1 | 50.00% |
Total | 94 | 100.00% | 2 | 100.00% |
int netlink_remove_tap(struct netlink_tap *nt)
{
int ret;
ret = __netlink_remove_tap(nt);
synchronize_net();
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Daniel Borkmann | 26 | 100.00% | 1 | 100.00% |
Total | 26 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(netlink_remove_tap);
static bool netlink_filter_tap(const struct sk_buff *skb)
{
struct sock *sk = skb->sk;
/* We take the more conservative approach and
* whitelist socket protocols that may pass.
*/
switch (sk->sk_protocol) {
case NETLINK_ROUTE:
case NETLINK_USERSOCK:
case NETLINK_SOCK_DIAG:
case NETLINK_NFLOG:
case NETLINK_XFRM:
case NETLINK_FIB_LOOKUP:
case NETLINK_NETFILTER:
case NETLINK_GENERIC:
return true;
}
return false;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Daniel Borkmann | 57 | 95.00% | 1 | 50.00% |
Varka Bhadram | 3 | 5.00% | 1 | 50.00% |
Total | 60 | 100.00% | 2 | 100.00% |
static int __netlink_deliver_tap_skb(struct sk_buff *skb,
struct net_device *dev)
{
struct sk_buff *nskb;
struct sock *sk = skb->sk;
int ret = -ENOMEM;
dev_hold(dev);
if (is_vmalloc_addr(skb->head))
nskb = netlink_to_full_skb(skb, GFP_ATOMIC);
else
nskb = skb_clone(skb, GFP_ATOMIC);
if (nskb) {
nskb->dev = dev;
nskb->protocol = htons((u16) sk->sk_protocol);
nskb->pkt_type = netlink_is_kernel(sk) ?
PACKET_KERNEL : PACKET_USER;
skb_reset_network_header(nskb);
ret = dev_queue_xmit(nskb);
if (unlikely(ret > 0))
ret = net_xmit_errno(ret);
}
dev_put(dev);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Daniel Borkmann | 144 | 100.00% | 5 | 100.00% |
Total | 144 | 100.00% | 5 | 100.00% |
static void __netlink_deliver_tap(struct sk_buff *skb)
{
int ret;
struct netlink_tap *tmp;
if (!netlink_filter_tap(skb))
return;
list_for_each_entry_rcu(tmp, &netlink_tap_all, list) {
ret = __netlink_deliver_tap_skb(skb, tmp->dev);
if (unlikely(ret))
break;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Daniel Borkmann | 57 | 100.00% | 2 | 100.00% |
Total | 57 | 100.00% | 2 | 100.00% |
static void netlink_deliver_tap(struct sk_buff *skb)
{
rcu_read_lock();
if (unlikely(!list_empty(&netlink_tap_all)))
__netlink_deliver_tap(skb);
rcu_read_unlock();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Daniel Borkmann | 34 | 100.00% | 1 | 100.00% |
Total | 34 | 100.00% | 1 | 100.00% |
static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src,
struct sk_buff *skb)
{
if (!(netlink_is_kernel(dst) && netlink_is_kernel(src)))
netlink_deliver_tap(skb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Daniel Borkmann | 41 | 100.00% | 1 | 100.00% |
Total | 41 | 100.00% | 1 | 100.00% |
static void netlink_overrun(struct sock *sk)
{
struct netlink_sock *nlk = nlk_sk(sk);
if (!(nlk->flags & NETLINK_F_RECV_NO_ENOBUFS)) {
if (!test_and_set_bit(NETLINK_S_CONGESTED,
&nlk_sk(sk)->state)) {
sk->sk_err = ENOBUFS;
sk->sk_error_report(sk);
}
}
atomic_inc(&sk->sk_drops);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 71 | 97.26% | 1 | 50.00% |
Nicolas Dichtel | 2 | 2.74% | 1 | 50.00% |
Total | 73 | 100.00% | 2 | 100.00% |
static void netlink_rcv_wake(struct sock *sk)
{
struct netlink_sock *nlk = nlk_sk(sk);
if (skb_queue_empty(&sk->sk_receive_queue))
clear_bit(NETLINK_S_CONGESTED, &nlk->state);
if (!test_bit(NETLINK_S_CONGESTED, &nlk->state))
wake_up_interruptible(&nlk->wait);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 60 | 96.77% | 1 | 50.00% |
Nicolas Dichtel | 2 | 3.23% | 1 | 50.00% |
Total | 62 | 100.00% | 2 | 100.00% |
static void netlink_skb_destructor(struct sk_buff *skb)
{
if (is_vmalloc_addr(skb->head)) {
if (!skb->cloned ||
!atomic_dec_return(&(skb_shinfo(skb)->dataref)))
vfree(skb->head);
skb->head = NULL;
}
if (skb->sk != NULL)
sock_rfree(skb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Florian Westphal | 57 | 82.61% | 1 | 50.00% |
Patrick McHardy | 12 | 17.39% | 1 | 50.00% |
Total | 69 | 100.00% | 2 | 100.00% |
static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
{
WARN_ON(skb->sk != NULL);
skb->sk = sk;
skb->destructor = netlink_skb_destructor;
atomic_add(skb->truesize, &sk->sk_rmem_alloc);
sk_mem_charge(sk, skb->truesize);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Florian Westphal | 40 | 68.97% | 1 | 33.33% |
Patrick McHardy | 18 | 31.03% | 2 | 66.67% |
Total | 58 | 100.00% | 3 | 100.00% |
static void netlink_sock_destruct(struct sock *sk)
{
struct netlink_sock *nlk = nlk_sk(sk);
if (nlk->cb_running) {
if (nlk->cb.done)
nlk->cb.done(&nlk->cb);
module_put(nlk->cb.module);
kfree_skb(nlk->cb.skb);
}
skb_queue_purge(&sk->sk_receive_queue);
if (!sock_flag(sk, SOCK_DEAD)) {
printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
return;
}
WARN_ON(atomic_read(&sk->sk_rmem_alloc));
WARN_ON(atomic_read(&sk->sk_wmem_alloc));
WARN_ON(nlk_sk(sk)->groups);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 37 | 28.91% | 2 | 16.67% |
Florian Westphal | 32 | 25.00% | 1 | 8.33% |
Patrick McHardy | 25 | 19.53% | 3 | 25.00% |
Herbert Xu | 21 | 16.41% | 1 | 8.33% |
Arnaldo Carvalho de Melo | 5 | 3.91% | 2 | 16.67% |
David S. Miller | 3 | 2.34% | 1 | 8.33% |
Ilpo Järvinen | 3 | 2.34% | 1 | 8.33% |
James Morris | 2 | 1.56% | 1 | 8.33% |
Total | 128 | 100.00% | 12 | 100.00% |
static void netlink_sock_destruct_work(struct work_struct *work)
{
struct netlink_sock *nlk = container_of(work, struct netlink_sock,
work);
sk_free(&nlk->sk);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 34 | 100.00% | 2 | 100.00% |
Total | 34 | 100.00% | 2 | 100.00% |
/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
* SMP. Look, when several writers sleep and reader wakes them up, all but one
* immediately hit write lock and grab all the cpus. Exclusive sleep solves
* this, _but_ remember, it adds useless work on UP machines.
*/
void netlink_table_grab(void)
__acquires
(nl_table_lock)
{
might_sleep();
write_lock_irq(&nl_table_lock);
if (atomic_read(&nl_table_users)) {
DECLARE_WAITQUEUE(wait, current);
add_wait_queue_exclusive(&nl_table_wait, &wait);
for (;;) {
set_current_state(TASK_UNINTERRUPTIBLE);
if (atomic_read(&nl_table_users) == 0)
break;
write_unlock_irq(&nl_table_lock);
schedule();
write_lock_irq(&nl_table_lock);
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(&nl_table_wait, &wait);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 88 | 89.80% | 4 | 57.14% |
Eric Dumazet | 4 | 4.08% | 1 | 14.29% |
Arjan van de Ven | 3 | 3.06% | 1 | 14.29% |
Johannes Berg | 3 | 3.06% | 1 | 14.29% |
Total | 98 | 100.00% | 7 | 100.00% |
void netlink_table_ungrab(void)
__releases
(nl_table_lock)
{
write_unlock_irq(&nl_table_lock);
wake_up(&nl_table_wait);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 18 | 78.26% | 3 | 60.00% |
Eric Dumazet | 4 | 17.39% | 1 | 20.00% |
Arjan van de Ven | 1 | 4.35% | 1 | 20.00% |
Total | 23 | 100.00% | 5 | 100.00% |
static inline void
netlink_lock_table(void)
{
/* read_lock() synchronizes us to netlink_table_grab */
read_lock(&nl_table_lock);
atomic_inc(&nl_table_users);
read_unlock(&nl_table_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 27 | 96.43% | 2 | 66.67% |
Patrick McHardy | 1 | 3.57% | 1 | 33.33% |
Total | 28 | 100.00% | 3 | 100.00% |
static inline void
netlink_unlock_table(void)
{
if (atomic_dec_and_test(&nl_table_users))
wake_up(&nl_table_wait);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 22 | 95.65% | 2 | 66.67% |
Patrick McHardy | 1 | 4.35% | 1 | 33.33% |
Total | 23 | 100.00% | 3 | 100.00% |
struct netlink_compare_arg
{
possible_net_t pnet;
u32 portid;
};
/* Doing sizeof directly may yield 4 extra bytes on 64-bit. */
#define netlink_compare_arg_len \
(offsetof(struct netlink_compare_arg, portid) + sizeof(u32))
static inline int netlink_compare(struct rhashtable_compare_arg *arg,
const void *ptr)
{
const struct netlink_compare_arg *x = arg->key;
const struct netlink_sock *nlk = ptr;
return nlk->portid != x->portid ||
!net_eq(sock_net(&nlk->sk), read_pnet(&x->pnet));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 26 | 40.62% | 2 | 50.00% |
Thomas Graf | 23 | 35.94% | 1 | 25.00% |
Gao Feng | 15 | 23.44% | 1 | 25.00% |
Total | 64 | 100.00% | 4 | 100.00% |
static void netlink_compare_arg_init(struct netlink_compare_arg *arg,
struct net *net, u32 portid)
{
memset(arg, 0, sizeof(*arg));
write_pnet(&arg->pnet, net);
arg->portid = portid;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 48 | 100.00% | 1 | 100.00% |
Total | 48 | 100.00% | 1 | 100.00% |
static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid,
struct net *net)
{
struct netlink_compare_arg arg;
netlink_compare_arg_init(&arg, net, portid);
return rhashtable_lookup_fast(&table->hash, &arg,
netlink_rhashtable_params);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Graf | 33 | 67.35% | 2 | 50.00% |
Herbert Xu | 12 | 24.49% | 1 | 25.00% |
Gao Feng | 4 | 8.16% | 1 | 25.00% |
Total | 49 | 100.00% | 4 | 100.00% |
static int __netlink_insert(struct netlink_table *table, struct sock *sk)
{
struct netlink_compare_arg arg;
netlink_compare_arg_init(&arg, sock_net(sk), nlk_sk(sk)->portid);
return rhashtable_lookup_insert_key(&table->hash, &arg,
&nlk_sk(sk)->node,
netlink_rhashtable_params);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ying Xue | 40 | 66.67% | 1 | 25.00% |
Herbert Xu | 20 | 33.33% | 3 | 75.00% |
Total | 60 | 100.00% | 4 | 100.00% |
static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
{
struct netlink_table *table = &nl_table[protocol];
struct sock *sk;
rcu_read_lock();
sk = __netlink_lookup(table, portid, net);
if (sk)
sock_hold(sk);
rcu_read_unlock();
return sk;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 21 | 32.81% | 1 | 12.50% |
Linus Torvalds (pre-git) | 17 | 26.56% | 2 | 25.00% |
Thomas Graf | 9 | 14.06% | 1 | 12.50% |
Herbert Xu | 8 | 12.50% | 1 | 12.50% |
Gao Feng | 4 | 6.25% | 1 | 12.50% |
Arnaldo Carvalho de Melo | 3 | 4.69% | 1 | 12.50% |
Eric W. Biedermann | 2 | 3.12% | 1 | 12.50% |
Total | 64 | 100.00% | 8 | 100.00% |
static const struct proto_ops netlink_ops;
static void
netlink_update_listeners(struct sock *sk)
{
struct netlink_table *tbl = &nl_table[sk->sk_protocol];
unsigned long mask;
unsigned int i;
struct listeners *listeners;
listeners = nl_deref_protected(tbl->listeners);
if (!listeners)
return;
for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
mask = 0;
sk_for_each_bound(sk, &tbl->mc_list) {
if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
mask |= nlk_sk(sk)->groups[i];
}
listeners->masks[i] = mask;
}
/* this function is only called with the netlink table "grabbed", which
* makes sure updates are visible before bind or setsockopt return. */
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 81 | 67.50% | 1 | 25.00% |
Eric Dumazet | 22 | 18.33% | 2 | 50.00% |
Johannes Berg | 17 | 14.17% | 1 | 25.00% |
Total | 120 | 100.00% | 4 | 100.00% |
static int netlink_insert(struct sock *sk, u32 portid)
{
struct netlink_table *table = &nl_table[sk->sk_protocol];
int err;
lock_sock(sk);
err = nlk_sk(sk)->portid == portid ? 0 : -EBUSY;
if (nlk_sk(sk)->bound)
goto err;
err = -ENOMEM;
if (BITS_PER_LONG > 32 &&
unlikely(atomic_read(&table->hash.nelems) >= UINT_MAX))
goto err;
nlk_sk(sk)->portid = portid;
sock_hold(sk);
err = __netlink_insert(table, sk);
if (err) {
/* In case the hashtable backend returns with -EBUSY
* from here, it must not escape to the caller.
*/
if (unlikely(err == -EBUSY))
err = -EOVERFLOW;
if (err == -EEXIST)
err = -EADDRINUSE;
sock_put(sk);
goto err;
}
/* We need to ensure that the socket is hashed and visible. */
smp_wmb();
nlk_sk(sk)->bound = portid;
err:
release_sock(sk);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 125 | 69.44% | 6 | 50.00% |
Thomas Graf | 24 | 13.33% | 2 | 16.67% |
Daniel Borkmann | 16 | 8.89% | 1 | 8.33% |
Ying Xue | 10 | 5.56% | 1 | 8.33% |
Gao Feng | 3 | 1.67% | 1 | 8.33% |
Eric W. Biedermann | 2 | 1.11% | 1 | 8.33% |
Total | 180 | 100.00% | 12 | 100.00% |
static void netlink_remove(struct sock *sk)
{
struct netlink_table *table;
table = &nl_table[sk->sk_protocol];
if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node,
netlink_rhashtable_params)) {
WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
__sock_put(sk);
}
netlink_table_grab();
if (nlk_sk(sk)->subscriptions) {
__sk_del_bind_node(sk);
netlink_update_listeners(sk);
}
if (sk->sk_protocol == NETLINK_GENERIC)
atomic_inc(&genl_sk_destructing_cnt);
netlink_table_ungrab();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Graf | 47 | 43.52% | 1 | 12.50% |
Herbert Xu | 33 | 30.56% | 2 | 25.00% |
Johannes Berg | 21 | 19.44% | 2 | 25.00% |
Linus Torvalds (pre-git) | 6 | 5.56% | 2 | 25.00% |
Patrick McHardy | 1 | 0.93% | 1 | 12.50% |
Total | 108 | 100.00% | 8 | 100.00% |
static struct proto netlink_proto = {
.name = "NETLINK",
.owner = THIS_MODULE,
.obj_size = sizeof(struct netlink_sock),
};
static int __netlink_create(struct net *net, struct socket *sock,
struct mutex *cb_mutex, int protocol,
int kern)
{
struct sock *sk;
struct netlink_sock *nlk;
sock->ops = &netlink_ops;
sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto, kern);
if (!sk)
return -ENOMEM;
sock_init_data(sock, sk);
nlk = nlk_sk(sk);
if (cb_mutex) {
nlk->cb_mutex = cb_mutex;
} else {
nlk->cb_mutex = &nlk->cb_def_mutex;
mutex_init(nlk->cb_mutex);
lockdep_set_class_and_name(nlk->cb_mutex,
nlk_cb_mutex_keys + protocol,
nlk_cb_mutex_key_strings[protocol]);
}
init_waitqueue_head(&nlk->wait);
sk->sk_destruct = netlink_sock_destruct;
sk->sk_protocol = protocol;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 101 | 66.01% | 3 | 30.00% |
Linus Torvalds (pre-git) | 17 | 11.11% | 1 | 10.00% |
Herbert Xu | 16 | 10.46% | 1 | 10.00% |
Eric W. Biedermann | 12 | 7.84% | 2 | 20.00% |
David S. Miller | 4 | 2.61% | 1 | 10.00% |
Eric Dumazet | 2 | 1.31% | 1 | 10.00% |
Arnaldo Carvalho de Melo | 1 | 0.65% | 1 | 10.00% |
Total | 153 | 100.00% | 10 | 100.00% |
static int netlink_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct module *module = NULL;
struct mutex *cb_mutex;
struct netlink_sock *nlk;
int (*bind)(struct net *net, int group);
void (*unbind)(struct net *net, int group);
int err = 0;
sock->state = SS_UNCONNECTED;
if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
return -ESOCKTNOSUPPORT;
if (protocol < 0 || protocol >= MAX_LINKS)
return -EPROTONOSUPPORT;
netlink_lock_table();
#ifdef CONFIG_MODULES
if (!nl_table[protocol].registered) {
netlink_unlock_table();
request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol);
netlink_lock_table();
}
#endif
if (nl_table[protocol].registered &&
try_module_get(nl_table[protocol].module))
module = nl_table[protocol].module;
else
err = -EPROTONOSUPPORT;
cb_mutex = nl_table[protocol].cb_mutex;
bind = nl_table[protocol].bind;
unbind = nl_table[protocol].unbind;
netlink_unlock_table();
if (err < 0)
goto out;
err = __netlink_create(net, sock, cb_mutex, protocol, kern);
if (err < 0)
goto out_module;
local_bh_disable();
sock_prot_inuse_add(net, &netlink_proto, 1);
local_bh_enable();
nlk = nlk_sk(sock->sk);
nlk->module = module;
nlk->netlink_bind = bind;
nlk->netlink_unbind = unbind;
out:
return err;
out_module:
module_put(module);
goto out;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 115 | 38.08% | 5 | 26.32% |
Linus Torvalds (pre-git) | 50 | 16.56% | 2 | 10.53% |
Harald Welte | 32 | 10.60% | 1 | 5.26% |
Richard Guy Briggs | 26 | 8.61% | 1 | 5.26% |
Pablo Neira Ayuso | 24 | 7.95% | 1 | 5.26% |
Alexey Dobriyan | 15 | 4.97% | 1 | 5.26% |
Johannes Berg | 11 | 3.64% | 2 | 10.53% |
Eric Dumazet | 10 | 3.31% | 1 | 5.26% |
Eric W. Biedermann | 9 | 2.98% | 2 | 10.53% |
David S. Miller | 6 | 1.99% | 1 | 5.26% |
Eric Paris | 3 | 0.99% | 1 | 5.26% |
Arnaldo Carvalho de Melo | 1 | 0.33% | 1 | 5.26% |
Total | 302 | 100.00% | 19 | 100.00% |
static void deferred_put_nlk_sk(struct rcu_head *head)
{
struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
struct sock *sk = &nlk->sk;
if (!atomic_dec_and_test(&sk->sk_refcnt))
return;
if (nlk->cb_running && nlk->cb.done) {
INIT_WORK(&nlk->work, netlink_sock_destruct_work);
schedule_work(&nlk->work);
return;
}
sk_free(sk);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
|