Release 4.11 net/netrom/af_netrom.c
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
* Copyright Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
* Copyright Darryl Miles G7LED (dlm@g7led.demon.co.uk)
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/sched/signal.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/stat.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <linux/uaccess.h>
#include <linux/fcntl.h>
#include <linux/termios.h> /* For TIOCINQ/OUTQ */
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <net/netrom.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <net/ip.h>
#include <net/tcp_states.h>
#include <net/arp.h>
#include <linux/init.h>
static int nr_ndevs = 4;
int sysctl_netrom_default_path_quality = NR_DEFAULT_QUAL;
int sysctl_netrom_obsolescence_count_initialiser = NR_DEFAULT_OBS;
int sysctl_netrom_network_ttl_initialiser = NR_DEFAULT_TTL;
int sysctl_netrom_transport_timeout = NR_DEFAULT_T1;
int sysctl_netrom_transport_maximum_tries = NR_DEFAULT_N2;
int sysctl_netrom_transport_acknowledge_delay = NR_DEFAULT_T2;
int sysctl_netrom_transport_busy_delay = NR_DEFAULT_T4;
int sysctl_netrom_transport_requested_window_size = NR_DEFAULT_WINDOW;
int sysctl_netrom_transport_no_activity_timeout = NR_DEFAULT_IDLE;
int sysctl_netrom_routing_control = NR_DEFAULT_ROUTING;
int sysctl_netrom_link_fails_count = NR_DEFAULT_FAILS;
int sysctl_netrom_reset_circuit = NR_DEFAULT_RESET;
static unsigned short circuit = 0x101;
static HLIST_HEAD(nr_list);
static DEFINE_SPINLOCK(nr_list_lock);
static const struct proto_ops nr_proto_ops;
/*
* NETROM network devices are virtual network devices encapsulating NETROM
* frames into AX.25 which will be sent through an AX.25 device, so form a
* special "super class" of normal net devices; split their locks off into a
* separate class since they always nest.
*/
static struct lock_class_key nr_netdev_xmit_lock_key;
static struct lock_class_key nr_netdev_addr_lock_key;
static void nr_set_lockdep_one(struct net_device *dev,
struct netdev_queue *txq,
void *_unused)
{
lockdep_set_class(&txq->_xmit_lock, &nr_netdev_xmit_lock_key);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 31 | 100.00% | 2 | 100.00% |
Total | 31 | 100.00% | 2 | 100.00% |
static void nr_set_lockdep_key(struct net_device *dev)
{
lockdep_set_class(&dev->addr_list_lock, &nr_netdev_addr_lock_key);
netdev_for_each_tx_queue(dev, nr_set_lockdep_one, NULL);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 31 | 100.00% | 3 | 100.00% |
Total | 31 | 100.00% | 3 | 100.00% |
/*
* Socket removal during an interrupt is now safe.
*/
static void nr_remove_socket(struct sock *sk)
{
spin_lock_bh(&nr_list_lock);
sk_del_node_init(sk);
spin_unlock_bh(&nr_list_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 21 | 75.00% | 2 | 40.00% |
Ralf Bächle | 6 | 21.43% | 2 | 40.00% |
Arnaldo Carvalho de Melo | 1 | 3.57% | 1 | 20.00% |
Total | 28 | 100.00% | 5 | 100.00% |
/*
* Kill all bound sockets on a dropped device.
*/
static void nr_kill_by_device(struct net_device *dev)
{
struct sock *s;
spin_lock_bh(&nr_list_lock);
sk_for_each(s, &nr_list)
if (nr_sk(s)->device == dev)
nr_disconnect(s, ENETUNREACH);
spin_unlock_bh(&nr_list_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 31 | 59.62% | 3 | 42.86% |
Ralf Bächle | 12 | 23.08% | 2 | 28.57% |
Arnaldo Carvalho de Melo | 6 | 11.54% | 1 | 14.29% |
David S. Miller | 3 | 5.77% | 1 | 14.29% |
Total | 52 | 100.00% | 7 | 100.00% |
/*
* Handle device status changes.
*/
static int nr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
if (!net_eq(dev_net(dev), &init_net))
return NOTIFY_DONE;
if (event != NETDEV_DOWN)
return NOTIFY_DONE;
nr_kill_by_device(dev);
nr_rt_device_down(dev);
return NOTIFY_DONE;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 48 | 70.59% | 6 | 60.00% |
Eric W. Biedermann | 9 | 13.24% | 1 | 10.00% |
Hideaki Yoshifuji / 吉藤英明 | 8 | 11.76% | 2 | 20.00% |
Jiri Pirko | 3 | 4.41% | 1 | 10.00% |
Total | 68 | 100.00% | 10 | 100.00% |
/*
* Add a socket to the bound sockets list.
*/
static void nr_insert_socket(struct sock *sk)
{
spin_lock_bh(&nr_list_lock);
sk_add_node(sk, &nr_list);
spin_unlock_bh(&nr_list_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 20 | 64.52% | 2 | 40.00% |
Ralf Bächle | 6 | 19.35% | 2 | 40.00% |
Arnaldo Carvalho de Melo | 5 | 16.13% | 1 | 20.00% |
Total | 31 | 100.00% | 5 | 100.00% |
/*
* Find a socket that wants to accept the Connect Request we just
* received.
*/
static struct sock *nr_find_listener(ax25_address *addr)
{
struct sock *s;
spin_lock_bh(&nr_list_lock);
sk_for_each(s, &nr_list)
if (!ax25cmp(&nr_sk(s)->source_addr, addr) &&
s->sk_state == TCP_LISTEN) {
bh_lock_sock(s);
goto found;
}
s = NULL;
found:
spin_unlock_bh(&nr_list_lock);
return s;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 43 | 56.58% | 2 | 25.00% |
Arnaldo Carvalho de Melo | 16 | 21.05% | 2 | 25.00% |
Jeroen Vreeken | 7 | 9.21% | 1 | 12.50% |
Ralf Bächle | 6 | 7.89% | 2 | 25.00% |
David S. Miller | 4 | 5.26% | 1 | 12.50% |
Total | 76 | 100.00% | 8 | 100.00% |
/*
* Find a connected NET/ROM socket given my circuit IDs.
*/
static struct sock *nr_find_socket(unsigned char index, unsigned char id)
{
struct sock *s;
spin_lock_bh(&nr_list_lock);
sk_for_each(s, &nr_list) {
struct nr_sock *nr = nr_sk(s);
if (nr->my_index == index && nr->my_id == id) {
bh_lock_sock(s);
goto found;
}
}
s = NULL;
found:
spin_unlock_bh(&nr_list_lock);
return s;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 45 | 53.57% | 3 | 33.33% |
Arnaldo Carvalho de Melo | 16 | 19.05% | 1 | 11.11% |
Ralf Bächle | 8 | 9.52% | 3 | 33.33% |
David S. Miller | 8 | 9.52% | 1 | 11.11% |
Jeroen Vreeken | 7 | 8.33% | 1 | 11.11% |
Total | 84 | 100.00% | 9 | 100.00% |
/*
* Find a connected NET/ROM socket given their circuit IDs.
*/
static struct sock *nr_find_peer(unsigned char index, unsigned char id,
ax25_address *dest)
{
struct sock *s;
spin_lock_bh(&nr_list_lock);
sk_for_each(s, &nr_list) {
struct nr_sock *nr = nr_sk(s);
if (nr->your_index == index && nr->your_id == id &&
!ax25cmp(&nr->dest_addr, dest)) {
bh_lock_sock(s);
goto found;
}
}
s = NULL;
found:
spin_unlock_bh(&nr_list_lock);
return s;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 59 | 59.60% | 3 | 33.33% |
Arnaldo Carvalho de Melo | 16 | 16.16% | 1 | 11.11% |
David S. Miller | 9 | 9.09% | 1 | 11.11% |
Ralf Bächle | 8 | 8.08% | 3 | 33.33% |
Jeroen Vreeken | 7 | 7.07% | 1 | 11.11% |
Total | 99 | 100.00% | 9 | 100.00% |
/*
* Find next free circuit ID.
*/
static unsigned short nr_find_next_circuit(void)
{
unsigned short id = circuit;
unsigned char i, j;
struct sock *sk;
for (;;) {
i = id / 256;
j = id % 256;
if (i != 0 && j != 0) {
if ((sk=nr_find_socket(i, j)) == NULL)
break;
bh_unlock_sock(sk);
}
id++;
}
return id;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 68 | 80.95% | 1 | 50.00% |
Jeroen Vreeken | 16 | 19.05% | 1 | 50.00% |
Total | 84 | 100.00% | 2 | 100.00% |
/*
* Deferred destroy.
*/
void nr_destroy_socket(struct sock *);
/*
* Handler for deferred kills.
*/
static void nr_destroy_timer(unsigned long data)
{
struct sock *sk=(struct sock *)data;
bh_lock_sock(sk);
sock_hold(sk);
nr_destroy_socket(sk);
bh_unlock_sock(sk);
sock_put(sk);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jeroen Vreeken | 29 | 61.70% | 1 | 33.33% |
Linus Torvalds (pre-git) | 18 | 38.30% | 2 | 66.67% |
Total | 47 | 100.00% | 3 | 100.00% |
/*
* This is called from user mode and the timers. Thus it protects itself
* against interrupt users but doesn't worry about being called during
* work. Once it is removed from the queue no interrupt or bottom half
* will touch it and we are (fairly 8-) ) safe.
*/
void nr_destroy_socket(struct sock *sk)
{
struct sk_buff *skb;
nr_remove_socket(sk);
nr_stop_heartbeat(sk);
nr_stop_t1timer(sk);
nr_stop_t2timer(sk);
nr_stop_t4timer(sk);
nr_stop_idletimer(sk);
nr_clear_queues(sk); /* Flush the queues */
while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
if (skb->sk != sk) { /* A pending connection */
/* Queue the unaccepted socket for death */
sock_set_flag(skb->sk, SOCK_DEAD);
nr_start_heartbeat(skb->sk);
nr_sk(skb->sk)->state = NR_STATE_0;
}
kfree_skb(skb);
}
if (sk_has_allocations(sk)) {
/* Defer: outstanding buffers */
sk->sk_timer.function = nr_destroy_timer;
sk->sk_timer.expires = jiffies + 2 * HZ;
add_timer(&sk->sk_timer);
} else
sock_put(sk);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 135 | 85.99% | 6 | 40.00% |
Arnaldo Carvalho de Melo | 8 | 5.10% | 2 | 13.33% |
Vinay K. Nallamothu | 4 | 2.55% | 1 | 6.67% |
David S. Miller | 3 | 1.91% | 1 | 6.67% |
James Morris | 2 | 1.27% | 1 | 6.67% |
Ralf Bächle | 2 | 1.27% | 1 | 6.67% |
Jeroen Vreeken | 2 | 1.27% | 2 | 13.33% |
Eric Dumazet | 1 | 0.64% | 1 | 6.67% |
Total | 157 | 100.00% | 15 | 100.00% |
/*
* Handling for system calls applied via the various interfaces to a
* NET/ROM socket object.
*/
static int nr_setsockopt(struct socket *sock, int level, int optname,
char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
struct nr_sock *nr = nr_sk(sk);
unsigned long opt;
if (level != SOL_NETROM)
return -ENOPROTOOPT;
if (optlen < sizeof(unsigned int))
return -EINVAL;
if (get_user(opt, (unsigned int __user *)optval))
return -EFAULT;
switch (optname) {
case NETROM_T1:
if (opt < 1 || opt > ULONG_MAX / HZ)
return -EINVAL;
nr->t1 = opt * HZ;
return 0;
case NETROM_T2:
if (opt < 1 || opt > ULONG_MAX / HZ)
return -EINVAL;
nr->t2 = opt * HZ;
return 0;
case NETROM_N2:
if (opt < 1 || opt > 31)
return -EINVAL;
nr->n2 = opt;
return 0;
case NETROM_T4:
if (opt < 1 || opt > ULONG_MAX / HZ)
return -EINVAL;
nr->t4 = opt * HZ;
return 0;
case NETROM_IDLE:
if (opt > ULONG_MAX / (60 * HZ))
return -EINVAL;
nr->idle = opt * 60 * HZ;
return 0;
default:
return -ENOPROTOOPT;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 205 | 81.67% | 7 | 58.33% |
Xi Wang | 33 | 13.15% | 1 | 8.33% |
David S. Miller | 9 | 3.59% | 2 | 16.67% |
Al Viro | 2 | 0.80% | 1 | 8.33% |
Ralf Bächle | 2 | 0.80% | 1 | 8.33% |
Total | 251 | 100.00% | 12 | 100.00% |
static int nr_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
struct nr_sock *nr = nr_sk(sk);
int val = 0;
int len;
if (level != SOL_NETROM)
return -ENOPROTOOPT;
if (get_user(len, optlen))
return -EFAULT;
if (len < 0)
return -EINVAL;
switch (optname) {
case NETROM_T1:
val = nr->t1 / HZ;
break;
case NETROM_T2:
val = nr->t2 / HZ;
break;
case NETROM_N2:
val = nr->n2;
break;
case NETROM_T4:
val = nr->t4 / HZ;
break;
case NETROM_IDLE:
val = nr->idle / (60 * HZ);
break;
default:
return -ENOPROTOOPT;
}
len = min_t(unsigned int, len, sizeof(int));
if (put_user(len, optlen))
return -EFAULT;
return copy_to_user(optval, &val, len) ? -EFAULT : 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 173 | 86.50% | 9 | 60.00% |
Linus Torvalds | 15 | 7.50% | 3 | 20.00% |
David S. Miller | 8 | 4.00% | 1 | 6.67% |
Al Viro | 2 | 1.00% | 1 | 6.67% |
Ralf Bächle | 2 | 1.00% | 1 | 6.67% |
Total | 200 | 100.00% | 15 | 100.00% |
static int nr_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
lock_sock(sk);
if (sk->sk_state != TCP_LISTEN) {
memset(&nr_sk(sk)->user_addr, 0, AX25_ADDR_LEN);
sk->sk_max_ack_backlog = backlog;
sk->sk_state = TCP_LISTEN;
release_sock(sk);
return 0;
}
release_sock(sk);
return -EOPNOTSUPP;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 60 | 73.17% | 4 | 57.14% |
Jeroen Vreeken | 15 | 18.29% | 1 | 14.29% |
David S. Miller | 4 | 4.88% | 1 | 14.29% |
Arnaldo Carvalho de Melo | 3 | 3.66% | 1 | 14.29% |
Total | 82 | 100.00% | 7 | 100.00% |
static struct proto nr_proto = {
.name = "NETROM",
.owner = THIS_MODULE,
.obj_size = sizeof(struct nr_sock),
};
static int nr_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
struct nr_sock *nr;
if (!net_eq(net, &init_net))
return -EAFNOSUPPORT;
if (sock->type != SOCK_SEQPACKET || protocol != 0)
return -ESOCKTNOSUPPORT;
sk = sk_alloc(net, PF_NETROM, GFP_ATOMIC, &nr_proto, kern);
if (sk == NULL)
return -ENOMEM;
nr = nr_sk(sk);
sock_init_data(sock, sk);
sock->ops = &nr_proto_ops;
sk->sk_protocol = protocol;
skb_queue_head_init(&nr->ack_queue);
skb_queue_head_init(&nr->reseq_queue);
skb_queue_head_init(&nr->frag_queue);
nr_init_timers(sk);
nr->t1 =
msecs_to_jiffies(sysctl_netrom_transport_timeout);
nr->t2 =
msecs_to_jiffies(sysctl_netrom_transport_acknowledge_delay);
nr->n2 =
msecs_to_jiffies(sysctl_netrom_transport_maximum_tries);
nr->t4 =
msecs_to_jiffies(sysctl_netrom_transport_busy_delay);
nr->idle =
msecs_to_jiffies(sysctl_netrom_transport_no_activity_timeout);
nr->window = sysctl_netrom_transport_requested_window_size;
nr->bpqext = 1;
nr->state = NR_STATE_0;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 148 | 70.14% | 11 | 50.00% |
Eric W. Biedermann | 19 | 9.00% | 2 | 9.09% |
Ralf Bächle | 17 | 8.06% | 2 | 9.09% |
Arnaldo Carvalho de Melo | 10 | 4.74% | 2 | 9.09% |
Octavian Purdila | 5 | 2.37% | 1 | 4.55% |
Pavel Emelyanov | 4 | 1.90% | 1 | 4.55% |
David S. Miller | 3 | 1.42% | 1 | 4.55% |
Eric Paris | 3 | 1.42% | 1 | 4.55% |
Vinay K. Nallamothu | 2 | 0.95% | 1 | 4.55% |
Total | 211 | 100.00% | 22 | 100.00% |
static struct sock *nr_make_new(struct sock *osk)
{
struct sock *sk;
struct nr_sock *nr, *onr;
if (osk->sk_type != SOCK_SEQPACKET)
return NULL;
sk = sk_alloc(sock_net(osk), PF_NETROM, GFP_ATOMIC, osk->sk_prot, 0);
if (sk == NULL)
return NULL;
nr = nr_sk(sk);
sock_init_data(NULL, sk);
sk->sk_type = osk->sk_type;
sk->sk_priority = osk->sk_priority;
sk->sk_protocol = osk->sk_protocol;
sk->sk_rcvbuf = osk->sk_rcvbuf;
sk->sk_sndbuf = osk->sk_sndbuf;
sk->sk_state = TCP_ESTABLISHED;
sock_copy_flags(sk, osk);
skb_queue_head_init(&nr->ack_queue);
skb_queue_head_init(&nr->reseq_queue);
skb_queue_head_init(&nr->frag_queue);
nr_init_timers(sk);
onr = nr_sk(osk);
nr->t1 = onr->t1;
nr->t2 = onr->t2;
nr->n2 = onr->n2;
nr->t4 = onr->t4;
nr->idle = onr->idle;
nr->window = onr->window;
nr->device = onr->device;
nr->bpqext = onr->bpqext;
return sk;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 171 | 72.46% | 10 | 45.45% |
Arnaldo Carvalho de Melo | 22 | 9.32% | 2 | 9.09% |
David S. Miller | 21 | 8.90% | 1 | 4.55% |
Thomas Graf | 6 | 2.54% | 2 | 9.09% |
Pavel Emelyanov | 4 | 1.69% | 1 | 4.55% |
Eric W. Biedermann | 4 | 1.69% | 2 | 9.09% |
Ralf Bächle | 3 | 1.27% | 2 | 9.09% |
Hideaki Yoshifuji / 吉藤英明 | 3 | 1.27% | 1 | 4.55% |
Vinay K. Nallamothu | 2 | 0.85% | 1 | 4.55% |
Total | 236 | 100.00% | 22 | 100.00% |
static int nr_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct nr_sock *nr;
if (sk == NULL) return 0;
sock_hold(sk);
sock_orphan(sk);
lock_sock(sk);
nr = nr_sk(sk);
switch (nr->state) {
case NR_STATE_0:
case NR_STATE_1:
case NR_STATE_2:
nr_disconnect(sk, 0);
nr_destroy_socket(sk);
break;
case NR_STATE_3:
nr_clear_queues(sk);
nr->n2count = 0;
nr_write_internal(sk, NR_DISCREQ);
nr_start_t1timer(sk);
nr_stop_t2timer(sk);
nr_stop_t4timer(sk);
nr_stop_idletimer(sk);
nr->state = NR_STATE_2;
sk->sk_state = TCP_CLOSE;
sk->sk_shutdown |= SEND_SHUTDOWN;
sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DESTROY);
break;
default:
break;
}
sock->sk = NULL;
release_sock(sk);
sock_put(sk);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 136 | 75.14% | 10 | 55.56% |
Stephen Hemminger | 10 | 5.52% | 1 | 5.56% |
Jeroen Vreeken | 10 | 5.52% | 1 | 5.56% |
David S. Miller | 10 | 5.52% | 1 | 5.56% |
Arnaldo Carvalho de Melo | 6 | 3.31% | 2 | 11.11% |
Jarek Poplawski | 5 | 2.76% | 1 | 5.56% |
Ralf Bächle | 2 | 1.10% | 1 | 5.56% |
James Morris | 2 | 1.10% | 1 | 5.56% |
Total | 181 | 100.00% | 18 | 100.00% |
static int nr_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
struct sock *sk = sock->sk;
struct nr_sock *nr = nr_sk(sk);
struct full_sockaddr_ax25 *addr = (struct full_sockaddr_ax25 *)uaddr;
struct net_device *dev;
ax25_uid_assoc *user;
ax25_address *source;
lock_sock(sk);
if (!sock_flag(sk, SOCK_ZAPPED)) {
release_sock(sk);
return -EINVAL;
}
if (addr_len < sizeof(struct sockaddr_ax25) || addr_len > sizeof(struct full_sockaddr_ax25)) {
release_sock(sk);
return -EINVAL;
}
if (addr_len < (addr->fsa_ax25.sax25_ndigis * sizeof(ax25_address) + sizeof(struct sockaddr_ax25))) {
release_sock(sk);
return -EINVAL;
}
if (addr->fsa_ax25.sax25_family != AF_NETROM) {
release_sock(sk);
return -EINVAL;
}
if ((dev = nr_dev_get(&addr->fsa_ax25.sax25_call)) == NULL) {
release_sock(sk);
return -EADDRNOTAVAIL;
}
/*
* Only the super user can set an arbitrary user callsign.
*/
if (addr->fsa_ax25.sax25_ndigis == 1) {
if (!capable(CAP_NET_BIND_SERVICE)) {
dev_put(dev);
release_sock(sk);
return -EPERM;
}
nr->user_addr = addr->fsa_digipeater[0];
nr->source_addr = addr->fsa_ax25.sax25_call;
} else {
source = &addr->fsa_ax25.sax25_call;
user = ax25_findbyuid(current_euid());
if (user) {
nr->user_addr = user->call;
ax25_uid_put(user);
} else {
if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) {
release_sock(sk);
dev_put(dev);
return -EPERM;
}
nr->user_addr = *source;
}
nr->source_addr = *source;
}
nr->device = dev;
nr_insert_socket(sk);
sock_reset_flag(sk, SOCK_ZAPPED);
dev_put(dev);
release_sock(sk);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 252 | 67.56% | 12 | 60.00% |
Jeroen Vreeken | 72 | 19.30% | 1 | 5.00% |
Ralf Bächle | 27 | 7.24% | 2 | 10.00% |
Thomas Graf | 10 | 2.68% | 1 | 5.00% |
David S. Miller | 8 | 2.14% | 1 | 5.00% |
David Howells | 2 | 0.54% | 1 | 5.00% |
Arnaldo Carvalho de Melo | 1 | 0.27% | 1 | 5.00% |
Zhao Hongjiang | 1 | 0.27% | 1 | 5.00% |
Total | 373 | 100.00% | 20 | 100.00% |
static int nr_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags)
{
struct sock *sk = sock->sk;
struct nr_sock *nr = nr_sk(sk);
struct sockaddr_ax25 *addr = (struct sockaddr_ax25 *)uaddr;
ax25_address *source = NULL;
ax25_uid_assoc *user;
struct net_device *dev;
int err = 0;
lock_sock(sk);
if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
sock->state = SS_CONNECTED;
goto out_release; /* Connect completed during a ERESTARTSYS event */
}
if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
sock->state = SS_UNCONNECTED;
err = -ECONNREFUSED;
goto out_release;
}
if (sk->sk_state == TCP_ESTABLISHED) {
err = -EISCONN; /* No reconnect on a seqpacket socket */
goto out_release;
}
sk->sk_state = TCP_CLOSE;
sock->state = SS_UNCONNECTED;
if (addr_len != sizeof(struct sockaddr_ax25) && addr_len != sizeof(struct full_sockaddr_ax25)) {
err = -EINVAL;
goto out_release;
}
if (addr->sax25_family != AF_NETROM) {
err = -EINVAL;
goto out_release;
}
if (sock_flag(sk, SOCK_ZAPPED)) { /* Must bind first - autobinding in this may or may not work */
sock_reset_flag(sk, SOCK_ZAPPED);
if ((dev = nr_dev_first()) == NULL) {
err = -ENETUNREACH;
goto out_release;
}
source = (ax25_address *)dev->dev_addr;
user = ax25_findbyuid(current_euid());
if (user) {
nr->user_addr = user->call;
ax25_uid_put(user);
} else {
if (ax25_uid_policy && !capable(CAP_NET_ADMIN)) {
dev_put(dev);
err = -EPERM;
goto out_release;
}
nr->user_addr = *source;
}
nr->source_addr = *source;
nr->device = dev;
dev_put(dev);
nr_insert_socket(sk); /* Finish the bind */
}
nr->dest_addr = addr->sax25_call;
release_sock(sk);
circuit = nr_find_next_circuit();
lock_sock(sk);
nr->my_index = circuit / 256;
nr->my_id = circuit % 256;
circuit++;
/* Move to connecting socket, start sending Connect Requests */
sock->state = SS_CONNECTING;
sk->sk_state = TCP_SYN_SENT;
nr_establish_data_link(sk);
nr->state = NR_STATE_1;
nr_start_heartbeat(sk);
/* Now the loop */
if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) {
err = -EINPROGRESS;
goto out_release;
}
/*
* A Connect Ack with Choke or timeout or failed routing will go to
* closed.
*/
if (sk->sk_state == TCP_SYN_SENT) {
DEFINE_WAIT(wait);
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
if (sk->sk_state != TCP_SYN_SENT)
break;
if (!signal_pending(current)) {
release_sock(sk);
schedule();
lock_sock(sk);
continue;
}
err = -ERESTARTSYS;
break;
}
finish_wait(sk_sleep(sk), &wait);
if (err)
goto out_release;
}
if (sk->sk_state != TCP_ESTABLISHED) {
sock->state = SS_UNCONNECTED;
err = sock_error(sk); /* Always set at this point */
goto out_release;
}
sock->state = SS_CONNECTED;
out_release:
release_sock(sk);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 351 | 60.62% | 17 | 60.71% |
Ralf Bächle | 144 | 24.87% | 4 | 14.29% |
Jeroen Vreeken | 48 | 8.29% | 2 | 7.14% |
Thomas Graf | 10 | 1.73% | 1 | 3.57% |
Arnaldo Carvalho de Melo | 10 | 1.73% | 1 | 3.57% |
David S. Miller | 8 | 1.38% | 1 | 3.57% |
Eric Dumazet | 6 | 1.04% | 1 | 3.57% |
David Howells | 2 | 0.35% | 1 | 3.57% |
Total | 579 | 100.00% | 28 | 100.00% |
static int nr_accept(struct socket *sock, struct socket *newsock, int flags,
bool kern)
{
struct sk_buff *skb;
struct sock *newsk;
DEFINE_WAIT(wait);
struct sock *sk;
int err = 0;
if ((sk = sock->sk) == NULL)
return -EINVAL;
lock_sock(sk);
if (sk->sk_type != SOCK_SEQPACKET) {
err = -EOPNOTSUPP;
goto out_release;
}
if (sk->sk_state != TCP_LISTEN) {
err = -EINVAL;
goto out_release;
}
/*
* The write queue this time is holding sockets ready to use
* hooked into the SABM we saved
*/
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
skb = skb_dequeue(&sk->sk_receive_queue);
if (skb)
break;
if (flags & O_NONBLOCK) {
err = -EWOULDBLOCK;
break;
}
if (!signal_pending(current)) {
release_sock(sk);
schedule();
lock_sock(sk);
continue;
}
err = -ERESTARTSYS;
break;
}
finish_wait(sk_sleep(sk), &wait);
if (err)
goto out_release;
newsk = skb->sk;
sock_graft(newsk, newsock);
/* Now attach up the new socket */
kfree_skb(skb);
sk_acceptq_removed(sk);
out_release:
release_sock(sk);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 109 | 46.19% | 11 | 55.00% |
Ralf Bächle | 100 | 42.37% | 3 | 15.00% |
Jeroen Vreeken | 9 | 3.81% | 2 | 10.00% |
Eric Dumazet | 6 | 2.54% | 1 | 5.00% |
Arnaldo Carvalho de Melo | 5 | 2.12% | 1 | 5.00% |
David S. Miller | 4 | 1.69% | 1 | 5.00% |
David Howells | 3 | 1.27% | 1 | 5.00% |
Total | 236 | 100.00% | 20 | 100.00% |
static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
struct full_sockaddr_ax25 *sax = (struct full_sockaddr_ax25 *)uaddr;
struct sock *sk = sock->sk;
struct nr_sock *nr = nr_sk(sk);
memset(&sax->fsa_ax25, 0, sizeof(struct sockaddr_ax25));
lock_sock(sk);
if (peer != 0) {
if (sk->sk_state != TCP_ESTABLISHED) {
release_sock(sk);
return -ENOTCONN;
}
sax->fsa_ax25.sax25_family = AF_NETROM;
sax->fsa_ax25.sax25_ndigis = 1;
sax->fsa_ax25.sax25_call = nr->user_addr;
memset(sax->fsa_digipeater, 0, sizeof(sax->fsa_digipeater));
sax->fsa_digipeater[0] = nr->dest_addr;
*uaddr_len = sizeof(struct full_sockaddr_ax25);
} else {
sax->fsa_ax25.sax25_family = AF_NETROM;
sax->fsa_ax25.sax25_ndigis = 0;
sax->fsa_ax25.sax25_call = nr->source_addr;
*uaddr_len = sizeof(struct sockaddr_ax25);
}
release_sock(sk);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 150 | 71.43% | 7 | 53.85% |
Jeroen Vreeken | 17 | 8.10% | 1 | 7.69% |
Eric Dumazet | 16 | 7.62% | 1 | 7.69% |
Dan Carpenter | 16 | 7.62% | 1 | 7.69% |
David S. Miller | 8 | 3.81% | 1 | 7.69% |
Ralf Bächle | 2 | 0.95% | 1 | 7.69% |
Arnaldo Carvalho de Melo | 1 | 0.48% | 1 | 7.69% |
Total | 210 | 100.00% | 13 | 100.00% |
int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
{
struct sock *sk;
struct sock *make;
struct nr_sock *nr_make;
ax25_address *src, *dest, *user;
unsigned short circuit_index, circuit_id;
unsigned short peer_circuit_index, peer_circuit_id;
unsigned short frametype, flags, window, timeout;
int ret;
skb->sk = NULL; /* Initially we don't know who it's for */
/*
* skb->data points to the netrom frame start
*/
src = (ax25_address *)(skb->data + 0);
dest = (ax25_address *)(skb->data + 7);
circuit_index = skb->data[15];
circuit_id = skb->data[16];
peer_circuit_index = skb->data[17];
peer_circuit_id = skb->data[18];
frametype = skb->data[19] & 0x0F;
flags = skb->data[19] & 0xF0;
/*
* Check for an incoming IP over NET/ROM frame.
*/
if (frametype == NR_PROTOEXT &&
circuit_index == NR_PROTO_IP && circuit_id == NR_PROTO_IP) {
skb_pull(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN);
skb_reset_transport_header(skb);
return nr_rx_ip(skb, dev);
}
/*
* Find an existing socket connection, based on circuit ID, if it's
* a Connect Request base it on their circuit ID.
*
* Circuit ID 0/0 is not valid but it could still be a "reset" for a
* circuit that no longer exists at the other end ...
*/
sk = NULL;
if (circuit_index == 0 && circuit_id == 0) {
if (frametype == NR_CONNACK && flags == NR_CHOKE_FLAG)
sk = nr_find_peer(peer_circuit_index, peer_circuit_id, src);
} else {
if (frametype == NR_CONNREQ)
sk = nr_find_peer(circuit_index, circuit_id, src);
else
sk = nr_find_socket(circuit_index, circuit_id);
}
if (sk != NULL) {
skb_reset_transport_header(skb);
if (frametype == NR_CONNACK && skb->len == 22)
nr_sk(sk)->bpqext = 1;
else
nr_sk(sk)->bpqext = 0;
ret = nr_process_rx_frame(sk, skb);
bh_unlock_sock(sk);
return ret;
}
/*
* Now it should be a CONNREQ.
*/
if (frametype != NR_CONNREQ) {
/*
* Here it would be nice to be able to send a reset but
* NET/ROM doesn't have one. We've tried to extend the protocol
* by sending NR_CONNACK | NR_CHOKE_FLAGS replies but that
* apparently kills BPQ boxes... :-(
* So now we try to follow the established behaviour of
* G8PZT's Xrouter which is sending packets with command type 7
* as an extension of the protocol.
*/
if (sysctl_netrom_reset_circuit &&
(frametype != NR_RESET || flags != 0))
nr_transmit_reset(skb, 1);
return 0;
}
sk = nr_find_listener(dest);
user = (ax25_address *)(skb->data + 21);
if (sk == NULL || sk_acceptq_is_full(sk) ||
(make = nr_make_new(sk)) == NULL) {
nr_transmit_refusal(skb, 0);
if (sk)
bh_unlock_sock(sk);
return 0;
}
window = skb->data[20];
skb->sk = make;
make->sk_state = TCP_ESTABLISHED;
/* Fill in his circuit details */
nr_make = nr_sk(make);
nr_make->source_addr = *dest;
nr_make->dest_addr = *src;
nr_make->user_addr = *user;
nr_make->your_index = circuit_index;
nr_make->your_id = circuit_id;
bh_unlock_sock(sk);
circuit = nr_find_next_circuit();
bh_lock_sock(sk);
nr_make->my_index = circuit / 256;
nr_make->my_id = circuit % 256;
circuit++;
/* Window negotiation */
if (window < nr_make->window)
nr_make->window = window;
/* L4 timeout negotiation */
if (skb->len == 37) {
timeout = skb->data[36] * 256 + skb->data[35];
if (timeout * HZ < nr_make->t1)
nr_make->t1 = timeout * HZ;
nr_make->bpqext = 1;
} else {
nr_make->bpqext = 0;
}
nr_write_internal(make, NR_CONNACK);
nr_make->condition = 0x00;
nr_make->vs = 0;
nr_make->va = 0;
nr_make->vr = 0;
nr_make->vl = 0;
nr_make->state = NR_STATE_3;
sk_acceptq_added(sk);
skb_queue_head(&sk->sk_receive_queue, skb);
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_data_ready(sk);
bh_unlock_sock(sk);
nr_insert_socket(make);
nr_start_heartbeat(make);
nr_start_idletimer(make);
return 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 565 | 81.18% | 16 | 59.26% |
Ralf Bächle | 41 | 5.89% | 4 | 14.81% |
Jeroen Vreeken | 37 | 5.32% | 1 | 3.70% |
David S. Miller | 34 | 4.89% | 1 | 3.70% |
Arnaldo Carvalho de Melo | 17 | 2.44% | 4 | 14.81% |
James Morris | 2 | 0.29% | 1 | 3.70% |
Total | 696 | 100.00% | 27 | 100.00% |
static int nr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct nr_sock *nr = nr_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_ax25 *, usax, msg->msg_name);
int err;
struct sockaddr_ax25 sax;
struct sk_buff *skb;
unsigned char *asmptr;
int size;
if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
return -EINVAL;
lock_sock(sk);
if (sock_flag(sk, SOCK_ZAPPED)) {
err = -EADDRNOTAVAIL;
goto out;
}
if (sk->sk_shutdown & SEND_SHUTDOWN) {
send_sig(SIGPIPE, current, 0);
err = -EPIPE;
goto out;
}
if (nr->device == NULL) {
err = -ENETUNREACH;
goto out;
}
if (usax) {
if (msg->msg_namelen < sizeof(sax)) {
err = -EINVAL;
goto out;
}
sax = *usax;
if (ax25cmp(&nr->dest_addr, &sax.sax25_call) != 0) {
err = -EISCONN;
goto out;
}
if (sax.sax25_family != AF_NETROM) {
err = -EINVAL;
goto out;
}
} else {
if (sk->sk_state != TCP_ESTABLISHED) {
err = -ENOTCONN;
goto out;
}
sax.sax25_family = AF_NETROM;
sax.sax25_call = nr->dest_addr;
}
/* Build a packet - the conventional user limit is 236 bytes. We can
do ludicrously large NetROM frames but must not overflow */
if (len > 65536) {
err = -EMSGSIZE;
goto out;
}
size = len + NR_NETWORK_LEN + NR_TRANSPORT_LEN;
if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL)
goto out;
skb_reserve(skb, size - len);
skb_reset_transport_header(skb);
/*
* Push down the NET/ROM header
*/
asmptr = skb_push(skb, NR_TRANSPORT_LEN);
/* Build a NET/ROM Transport header */
*asmptr++ = nr->your_index;
*asmptr++ = nr->your_id;
*asmptr++ = 0; /* To be filled in later */
*asmptr++ = 0; /* Ditto */
*asmptr++ = NR_INFO;
/*
* Put the data on the end
*/
skb_put(skb, len);
/* User data follows immediately after the NET/ROM transport header */
if (memcpy_from_msg(skb_transport_header(skb), msg, len)) {
kfree_skb(skb);
err = -EFAULT;
goto out;
}
if (sk->sk_state != TCP_ESTABLISHED) {
kfree_skb(skb);
err = -ENOTCONN;
goto out;
}
nr_output(sk, skb); /* Shove it onto the queue */
err = len;
out:
release_sock(sk);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 315 | 67.45% | 11 | 45.83% |
Jeroen Vreeken | 80 | 17.13% | 1 | 4.17% |
Chris Wright | 17 | 3.64% | 1 | 4.17% |
Arnaldo Carvalho de Melo | 12 | 2.57% | 2 | 8.33% |
Alan Cox | 9 | 1.93% | 1 | 4.17% |
David S. Miller | 8 | 1.71% | 1 | 4.17% |
Jean Delvare | 8 | 1.71% | 1 | 4.17% |
Steffen Hurrle | 7 | 1.50% | 1 | 4.17% |
Thomas Graf | 5 | 1.07% | 1 | 4.17% |
Olaf Hering | 2 | 0.43% | 1 | 4.17% |
Ralf Bächle | 2 | 0.43% | 1 | 4.17% |
Al Viro | 1 | 0.21% | 1 | 4.17% |
Stephen Hemminger | 1 | 0.21% | 1 | 4.17% |
Total | 467 | 100.00% | 24 | 100.00% |
static int nr_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
int flags)
{
struct sock *sk = sock->sk;
DECLARE_SOCKADDR(struct sockaddr_ax25 *, sax, msg->msg_name);
size_t copied;
struct sk_buff *skb;
int er;
/*
* This works for seqpacket too. The receiver has ordered the queue for
* us! We do one quick check first though
*/
lock_sock(sk);
if (sk->sk_state != TCP_ESTABLISHED) {
release_sock(sk);
return -ENOTCONN;
}
/* Now we can treat all alike */
if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) {
release_sock(sk);
return er;
}
skb_reset_transport_header(skb);
copied = skb->len;
if (copied > size) {
copied = size;
msg->msg_flags |= MSG_TRUNC;
}
er = skb_copy_datagram_msg(skb, 0, msg, copied);
if (er < 0) {
skb_free_datagram(sk, skb);
release_sock(sk);
return er;
}
if (sax != NULL) {
memset(sax, 0, sizeof(*sax));
sax->sax25_family = AF_NETROM;
skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call,
AX25_ADDR_LEN);
msg->msg_namelen = sizeof(*sax);
}
skb_free_datagram(sk, skb);
release_sock(sk);
return copied;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 166 | 66.94% | 12 | 52.17% |
Alan Cox | 25 | 10.08% | 1 | 4.35% |
Jeroen Vreeken | 24 | 9.68% | 1 | 4.35% |
Mathias Krause | 12 | 4.84% | 1 | 4.35% |
Arnaldo Carvalho de Melo | 9 | 3.63% | 3 | 13.04% |
Steffen Hurrle | 7 | 2.82% | 1 | 4.35% |
Stephen Hemminger | 2 | 0.81% | 1 | 4.35% |
Hannes Frederic Sowa | 1 | 0.40% | 1 | 4.35% |
David S. Miller | 1 | 0.40% | 1 | 4.35% |
Wei Yongjun | 1 | 0.40% | 1 | 4.35% |
Total | 248 | 100.00% | 23 | 100.00% |
static int nr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk = sock->sk;
void __user *argp = (void __user *)arg;
int ret;
switch (cmd) {
case TIOCOUTQ: {
long amount;
lock_sock(sk);
amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
if (amount < 0)
amount = 0;
release_sock(sk);
return put_user(amount, (int __user *)argp);
}
case TIOCINQ: {
struct sk_buff *skb;
long amount = 0L;
lock_sock(sk);
/* These two are safe on a single CPU system as only user tasks fiddle here */
if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
amount = skb->len;
release_sock(sk);
return put_user(amount, (int __user *)argp);
}
case SIOCGSTAMP:
lock_sock(sk);
ret = sock_get_timestamp(sk, argp);
release_sock(sk);
return ret;
case SIOCGSTAMPNS:
lock_sock(sk);
ret = sock_get_timestampns(sk, argp);
release_sock(sk);
return ret;
case SIOCGIFADDR:
case SIOCSIFADDR:
case SIOCGIFDSTADDR:
case SIOCSIFDSTADDR:
case SIOCGIFBRDADDR:
case SIOCSIFBRDADDR:
case SIOCGIFNETMASK:
case SIOCSIFNETMASK:
case SIOCGIFMETRIC:
case SIOCSIFMETRIC:
return -EINVAL;
case SIOCADDRT:
case SIOCDELRT:
case SIOCNRDECOBS:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
return nr_rt_ioctl(cmd, argp);
default:
return -ENOIOCTLCMD;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 196 | 69.26% | 9 | 52.94% |
Eric Dumazet | 26 | 9.19% | 2 | 11.76% |
Jeroen Vreeken | 23 | 8.13% | 1 | 5.88% |
Al Viro | 18 | 6.36% | 1 | 5.88% |
Christoph Hellwig | 17 | 6.01% | 2 | 11.76% |
Arnaldo Carvalho de Melo | 2 | 0.71% | 1 | 5.88% |
Andi Kleen | 1 | 0.35% | 1 | 5.88% |
Total | 283 | 100.00% | 17 | 100.00% |
#ifdef CONFIG_PROC_FS
static void *nr_info_start(struct seq_file *seq, loff_t *pos)
{
spin_lock_bh(&nr_list_lock);
return seq_hlist_start_head(&nr_list, *pos);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 13 | 40.62% | 1 | 12.50% |
Linus Torvalds (pre-git) | 8 | 25.00% | 3 | 37.50% |
Ralf Bächle | 5 | 15.62% | 2 | 25.00% |
Li Zefan | 4 | 12.50% | 1 | 12.50% |
Arnaldo Carvalho de Melo | 2 | 6.25% | 1 | 12.50% |
Total | 32 | 100.00% | 8 | 100.00% |
static void *nr_info_next(struct seq_file *seq, void *v, loff_t *pos)
{
return seq_hlist_next(v, &nr_list, pos);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 26 | 83.87% | 1 | 50.00% |
Li Zefan | 5 | 16.13% | 1 | 50.00% |
Total | 31 | 100.00% | 2 | 100.00% |
static void nr_info_stop(struct seq_file *seq, void *v)
{
spin_unlock_bh(&nr_list_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 21 | 100.00% | 1 | 100.00% |
Total | 21 | 100.00% | 1 | 100.00% |
static int nr_info_show(struct seq_file *seq, void *v)
{
struct sock *s = sk_entry(v);
struct net_device *dev;
struct nr_sock *nr;
const char *devname;
char buf[11];
if (v == SEQ_START_TOKEN)
seq_puts(seq,
"user_addr dest_node src_node dev my your st vs vr va t1 t2 t4 idle n2 wnd Snd-Q Rcv-Q inode\n");
else {
bh_lock_sock(s);
nr = nr_sk(s);
if ((dev = nr->device) == NULL)
devname = "???";
else
devname = dev->name;
seq_printf(seq, "%-9s ", ax2asc(buf, &nr->user_addr));
seq_printf(seq, "%-9s ", ax2asc(buf, &nr->dest_addr));
seq_printf(seq,
"%-9s %-3s %02X/%02X %02X/%02X %2d %3d %3d %3d %3lu/%03lu %2lu/%02lu %3lu/%03lu %3lu/%03lu %2d/%02d %3d %5d %5d %ld\n",
ax2asc(buf, &nr->source_addr),
devname,
nr->my_index,
nr->my_id,
nr->your_index,
nr->your_id,
nr->state,
nr->vs,
nr->vr,
nr->va,
ax25_display_timer(&nr->t1timer) / HZ,
nr->t1 / HZ,
ax25_display_timer(&nr->t2timer) / HZ,
nr->t2 / HZ,
ax25_display_timer(&nr->t4timer) / HZ,
nr->t4 / HZ,
ax25_display_timer(&nr->idletimer) / (60 * HZ),
nr->idle / (60 * HZ),
nr->n2count,
nr->n2,
nr->window,
sk_wmem_alloc_get(s),
sk_rmem_alloc_get(s),
s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L);
bh_unlock_sock(s);
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 199 | 66.56% | 6 | 37.50% |
Stephen Hemminger | 55 | 18.39% | 1 | 6.25% |
Jeroen Vreeken | 14 | 4.68% | 1 | 6.25% |
Ralf Bächle | 14 | 4.68% | 2 | 12.50% |
David S. Miller | 6 | 2.01% | 1 | 6.25% |
Al Viro | 3 | 1.00% | 1 | 6.25% |
Li Zefan | 3 | 1.00% | 1 | 6.25% |
Eric Dumazet | 2 | 0.67% | 1 | 6.25% |
Arnaldo Carvalho de Melo | 2 | 0.67% | 1 | 6.25% |
Joe Perches | 1 | 0.33% | 1 | 6.25% |
Total | 299 | 100.00% | 16 | 100.00% |
static const struct seq_operations nr_info_seqops = {
.start = nr_info_start,
.next = nr_info_next,
.stop = nr_info_stop,
.show = nr_info_show,
};
static int nr_info_open(struct inode *inode, struct file *file)
{
return seq_open(file, &nr_info_seqops);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Stephen Hemminger | 22 | 88.00% | 1 | 50.00% |
Linus Torvalds (pre-git) | 3 | 12.00% | 1 | 50.00% |
Total | 25 | 100.00% | 2 | 100.00% |
static const struct file_operations nr_info_fops = {
.owner = THIS_MODULE,
.open = nr_info_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
#endif /* CONFIG_PROC_FS */
static const struct net_proto_family nr_family_ops = {
.family = PF_NETROM,
.create = nr_create,
.owner = THIS_MODULE,
};
static const struct proto_ops nr_proto_ops = {
.family = PF_NETROM,
.owner = THIS_MODULE,
.release = nr_release,
.bind = nr_bind,
.connect = nr_connect,
.socketpair = sock_no_socketpair,
.accept = nr_accept,
.getname = nr_getname,
.poll = datagram_poll,
.ioctl = nr_ioctl,
.listen = nr_listen,
.shutdown = sock_no_shutdown,
.setsockopt = nr_setsockopt,
.getsockopt = nr_getsockopt,
.sendmsg = nr_sendmsg,
.recvmsg = nr_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
};
static struct notifier_block nr_dev_notifier = {
.notifier_call = nr_device_event,
};
static struct net_device **dev_nr;
static struct ax25_protocol nr_pid = {
.pid = AX25_P_NETROM,
.func = nr_route_frame
};
static struct ax25_linkfail nr_linkfail_notifier = {
.func = nr_link_failed,
};
static int __init nr_proto_init(void)
{
int i;
int rc = proto_register(&nr_proto, 0);
if (rc != 0)
goto out;
if (nr_ndevs > 0x7fffffff/sizeof(struct net_device *)) {
printk(KERN_ERR "NET/ROM: nr_proto_init - nr_ndevs parameter to large\n");
return -1;
}
dev_nr = kzalloc(nr_ndevs * sizeof(struct net_device *), GFP_KERNEL);
if (dev_nr == NULL) {
printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device array\n");
return -1;
}
for (i = 0; i < nr_ndevs; i++) {
char name[IFNAMSIZ];
struct net_device *dev;
sprintf(name, "nr%d", i);
dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, nr_setup);
if (!dev) {
printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device structure\n");
goto fail;
}
dev->base_addr = i;
if (register_netdev(dev)) {
printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register network device\n");
free_netdev(dev);
goto fail;
}
nr_set_lockdep_key(dev);
dev_nr[i] = dev;
}
if (sock_register(&nr_family_ops)) {
printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register socket family\n");
goto fail;
}
register_netdevice_notifier(&nr_dev_notifier);
ax25_register_pid(&nr_pid);
ax25_linkfail_register(&nr_linkfail_notifier);
#ifdef CONFIG_SYSCTL
nr_register_sysctl();
#endif
nr_loopback_init();
proc_create("nr", S_IRUGO, init_net.proc_net, &nr_info_fops);
proc_create("nr_neigh", S_IRUGO, init_net.proc_net, &nr_neigh_fops);
proc_create("nr_nodes", S_IRUGO, init_net.proc_net, &nr_nodes_fops);
out:
return rc;
fail:
while (--i >= 0) {
unregister_netdev(dev_nr[i]);
free_netdev(dev_nr[i]);
}
kfree(dev_nr);
proto_unregister(&nr_proto);
rc = -1;
goto out;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 124 | 36.80% | 13 | 46.43% |
Stephen Hemminger | 110 | 32.64% | 3 | 10.71% |
Arnaldo Carvalho de Melo | 35 | 10.39% | 1 | 3.57% |
Linus Torvalds | 22 | 6.53% | 2 | 7.14% |
Alexander Viro | 16 | 4.75% | 1 | 3.57% |
Gao Feng | 15 | 4.45% | 1 | 3.57% |
Ralf Bächle | 10 | 2.97% | 4 | 14.29% |
Tom Gundersen | 2 | 0.59% | 1 | 3.57% |
Jeroen Vreeken | 2 | 0.59% | 1 | 3.57% |
David S. Miller | 1 | 0.30% | 1 | 3.57% |
Total | 337 | 100.00% | 28 | 100.00% |
module_init(nr_proto_init);
module_param(nr_ndevs, int, 0);
MODULE_PARM_DESC(nr_ndevs, "number of NET/ROM devices");
MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>");
MODULE_DESCRIPTION("The amateur radio NET/ROM network and transport layer protocol");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_NETROM);
static void __exit nr_exit(void)
{
int i;
remove_proc_entry("nr", init_net.proc_net);
remove_proc_entry("nr_neigh", init_net.proc_net);
remove_proc_entry("nr_nodes", init_net.proc_net);
nr_loopback_clear();
nr_rt_free();
#ifdef CONFIG_SYSCTL
nr_unregister_sysctl();
#endif
ax25_linkfail_release(&nr_linkfail_notifier);
ax25_protocol_release(AX25_P_NETROM);
unregister_netdevice_notifier(&nr_dev_notifier);
sock_unregister(PF_NETROM);
for (i = 0; i < nr_ndevs; i++) {
struct net_device *dev = dev_nr[i];
if (dev) {
unregister_netdev(dev);
free_netdev(dev);
}
}
kfree(dev_nr);
proto_unregister(&nr_proto);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 73 | 57.48% | 7 | 53.85% |
Gao Feng | 15 | 11.81% | 1 | 7.69% |
Jeroen Vreeken | 14 | 11.02% | 1 | 7.69% |
Stephen Hemminger | 10 | 7.87% | 1 | 7.69% |
Alexander Viro | 7 | 5.51% | 1 | 7.69% |
Arnaldo Carvalho de Melo | 6 | 4.72% | 1 | 7.69% |
Ralf Bächle | 2 | 1.57% | 1 | 7.69% |
Total | 127 | 100.00% | 13 | 100.00% |
module_exit(nr_exit);
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 4137 | 64.14% | 60 | 34.88% |
Ralf Bächle | 531 | 8.23% | 18 | 10.47% |
Jeroen Vreeken | 433 | 6.71% | 4 | 2.33% |
Stephen Hemminger | 345 | 5.35% | 8 | 4.65% |
Arnaldo Carvalho de Melo | 252 | 3.91% | 11 | 6.40% |
David S. Miller | 229 | 3.55% | 7 | 4.07% |
Eric Dumazet | 59 | 0.91% | 6 | 3.49% |
Linus Torvalds | 42 | 0.65% | 4 | 2.33% |
Eric W. Biedermann | 35 | 0.54% | 4 | 2.33% |
Alan Cox | 34 | 0.53% | 2 | 1.16% |
Xi Wang | 33 | 0.51% | 1 | 0.58% |
Thomas Graf | 31 | 0.48% | 2 | 1.16% |
Gao Feng | 30 | 0.47% | 2 | 1.16% |
Rusty Russell | 29 | 0.45% | 2 | 1.16% |
Al Viro | 26 | 0.40% | 4 | 2.33% |
Alexander Viro | 23 | 0.36% | 1 | 0.58% |
Chris Wright | 17 | 0.26% | 1 | 0.58% |
Christoph Hellwig | 17 | 0.26% | 2 | 1.16% |
Dan Carpenter | 16 | 0.25% | 1 | 0.58% |
Steffen Hurrle | 14 | 0.22% | 1 | 0.58% |
Mathias Krause | 12 | 0.19% | 1 | 0.58% |
Li Zefan | 12 | 0.19% | 1 | 0.58% |
Hideaki Yoshifuji / 吉藤英明 | 11 | 0.17% | 3 | 1.74% |
Jean Delvare | 8 | 0.12% | 1 | 0.58% |
Vinay K. Nallamothu | 8 | 0.12% | 1 | 0.58% |
Pavel Emelyanov | 8 | 0.12% | 1 | 0.58% |
David Howells | 7 | 0.11% | 2 | 1.16% |
James Morris | 6 | 0.09% | 1 | 0.58% |
Octavian Purdila | 5 | 0.08% | 1 | 0.58% |
Jarek Poplawski | 5 | 0.08% | 1 | 0.58% |
Dave Jones | 5 | 0.08% | 1 | 0.58% |
Thomas Gleixner | 4 | 0.06% | 1 | 0.58% |
Randy Dunlap | 3 | 0.05% | 1 | 0.58% |
Eric Paris | 3 | 0.05% | 1 | 0.58% |
Tejun Heo | 3 | 0.05% | 1 | 0.58% |
Jiri Pirko | 3 | 0.05% | 1 | 0.58% |
Olaf Hering | 2 | 0.03% | 1 | 0.58% |
Tom Gundersen | 2 | 0.03% | 1 | 0.58% |
Andi Kleen | 1 | 0.02% | 1 | 0.58% |
Philippe De Muyter | 1 | 0.02% | 1 | 0.58% |
Fabian Frederick | 1 | 0.02% | 1 | 0.58% |
Zhao Hongjiang | 1 | 0.02% | 1 | 0.58% |
Wei Yongjun | 1 | 0.02% | 1 | 0.58% |
Arjan van de Ven | 1 | 0.02% | 1 | 0.58% |
Adrian Bunk | 1 | 0.02% | 1 | 0.58% |
Hannes Frederic Sowa | 1 | 0.02% | 1 | 0.58% |
Joe Perches | 1 | 0.02% | 1 | 0.58% |
Ingo Molnar | 1 | 0.02% | 1 | 0.58% |
Total | 6450 | 100.00% | 172 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.