Release 4.11 net/bluetooth/af_bluetooth.c
/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2000-2001 Qualcomm Incorporated
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation;
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
SOFTWARE IS DISCLAIMED.
*/
/* Bluetooth address family and sockets. */
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/stringify.h>
#include <linux/sched/signal.h>
#include <asm/ioctls.h>
#include <net/bluetooth/bluetooth.h>
#include <linux/proc_fs.h>
#include "leds.h"
#include "selftest.h"
/* Bluetooth sockets */
#define BT_MAX_PROTO 8
static const struct net_proto_family *bt_proto[BT_MAX_PROTO];
static DEFINE_RWLOCK(bt_proto_lock);
static struct lock_class_key bt_lock_key[BT_MAX_PROTO];
static const char *const bt_key_strings[BT_MAX_PROTO] = {
"sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP",
"sk_lock-AF_BLUETOOTH-BTPROTO_HCI",
"sk_lock-AF_BLUETOOTH-BTPROTO_SCO",
"sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM",
"sk_lock-AF_BLUETOOTH-BTPROTO_BNEP",
"sk_lock-AF_BLUETOOTH-BTPROTO_CMTP",
"sk_lock-AF_BLUETOOTH-BTPROTO_HIDP",
"sk_lock-AF_BLUETOOTH-BTPROTO_AVDTP",
};
static struct lock_class_key bt_slock_key[BT_MAX_PROTO];
static const char *const bt_slock_key_strings[BT_MAX_PROTO] = {
"slock-AF_BLUETOOTH-BTPROTO_L2CAP",
"slock-AF_BLUETOOTH-BTPROTO_HCI",
"slock-AF_BLUETOOTH-BTPROTO_SCO",
"slock-AF_BLUETOOTH-BTPROTO_RFCOMM",
"slock-AF_BLUETOOTH-BTPROTO_BNEP",
"slock-AF_BLUETOOTH-BTPROTO_CMTP",
"slock-AF_BLUETOOTH-BTPROTO_HIDP",
"slock-AF_BLUETOOTH-BTPROTO_AVDTP",
};
void bt_sock_reclassify_lock(struct sock *sk, int proto)
{
BUG_ON(!sk);
BUG_ON(!sock_allow_reclassification(sk));
sock_lock_init_class_and_name(sk,
bt_slock_key_strings[proto], &bt_slock_key[proto],
bt_key_strings[proto], &bt_lock_key[proto]);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Marcel Holtmann | 47 | 85.45% | 2 | 50.00% |
Octavian Purdila | 6 | 10.91% | 1 | 25.00% |
Hannes Frederic Sowa | 2 | 3.64% | 1 | 25.00% |
Total | 55 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(bt_sock_reclassify_lock);
int bt_sock_register(int proto, const struct net_proto_family *ops)
{
int err = 0;
if (proto < 0 || proto >= BT_MAX_PROTO)
return -EINVAL;
write_lock(&bt_proto_lock);
if (bt_proto[proto])
err = -EEXIST;
else
bt_proto[proto] = ops;
write_unlock(&bt_proto_lock);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 36 | 52.94% | 1 | 16.67% |
Marcel Holtmann | 22 | 32.35% | 1 | 16.67% |
Maksim Krasnyanskiy | 5 | 7.35% | 2 | 33.33% |
Marcus Meissner | 4 | 5.88% | 1 | 16.67% |
Stephen Hemminger | 1 | 1.47% | 1 | 16.67% |
Total | 68 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL(bt_sock_register);
void bt_sock_unregister(int proto)
{
if (proto < 0 || proto >= BT_MAX_PROTO)
return;
write_lock(&bt_proto_lock);
bt_proto[proto] = NULL;
write_unlock(&bt_proto_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 16 | 42.11% | 1 | 16.67% |
Marcel Holtmann | 12 | 31.58% | 1 | 16.67% |
Maksim Krasnyanskiy | 4 | 10.53% | 2 | 33.33% |
Marcus Meissner | 4 | 10.53% | 1 | 16.67% |
David Herrmann | 2 | 5.26% | 1 | 16.67% |
Total | 38 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL(bt_sock_unregister);
static int bt_sock_create(struct net *net, struct socket *sock, int proto,
int kern)
{
int err;
if (net != &init_net)
return -EAFNOSUPPORT;
if (proto < 0 || proto >= BT_MAX_PROTO)
return -EINVAL;
if (!bt_proto[proto])
request_module("bt-proto-%d", proto);
err = -EPROTONOSUPPORT;
read_lock(&bt_proto_lock);
if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) {
err = bt_proto[proto]->create(net, sock, proto, kern);
if (!err)
bt_sock_reclassify_lock(sock->sk, proto);
module_put(bt_proto[proto]->owner);
}
read_unlock(&bt_proto_lock);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 52 | 35.62% | 2 | 16.67% |
Maksim Krasnyanskiy | 40 | 27.40% | 2 | 16.67% |
Eric W. Biedermann | 18 | 12.33% | 1 | 8.33% |
Marcel Holtmann | 13 | 8.90% | 2 | 16.67% |
Octavian Purdila | 7 | 4.79% | 1 | 8.33% |
Dave Young | 6 | 4.11% | 1 | 8.33% |
Eric Paris | 5 | 3.42% | 1 | 8.33% |
Marcus Meissner | 4 | 2.74% | 1 | 8.33% |
Randy Dunlap | 1 | 0.68% | 1 | 8.33% |
Total | 146 | 100.00% | 12 | 100.00% |
void bt_sock_link(struct bt_sock_list *l, struct sock *sk)
{
write_lock(&l->lock);
sk_add_node(sk, &l->head);
write_unlock(&l->lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 32 | 78.05% | 1 | 25.00% |
Arnaldo Carvalho de Melo | 5 | 12.20% | 1 | 25.00% |
Gustavo Fernando Padovan | 2 | 4.88% | 1 | 25.00% |
Maksim Krasnyanskiy | 2 | 4.88% | 1 | 25.00% |
Total | 41 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(bt_sock_link);
void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk)
{
write_lock(&l->lock);
sk_del_node_init(sk);
write_unlock(&l->lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 29 | 80.56% | 1 | 25.00% |
Arnaldo Carvalho de Melo | 3 | 8.33% | 1 | 25.00% |
Gustavo Fernando Padovan | 2 | 5.56% | 1 | 25.00% |
Maksim Krasnyanskiy | 2 | 5.56% | 1 | 25.00% |
Total | 36 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(bt_sock_unlink);
void bt_accept_enqueue(struct sock *parent, struct sock *sk)
{
BT_DBG("parent %p, sk %p", parent, sk);
sock_hold(sk);
list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q);
bt_sk(sk)->parent = parent;
parent->sk_ack_backlog++;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Maksim Krasnyanskiy | 42 | 67.74% | 2 | 50.00% |
Linus Torvalds | 19 | 30.65% | 1 | 25.00% |
Arnaldo Carvalho de Melo | 1 | 1.61% | 1 | 25.00% |
Total | 62 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(bt_accept_enqueue);
void bt_accept_unlink(struct sock *sk)
{
BT_DBG("sk %p state %d", sk, sk->sk_state);
list_del_init(&bt_sk(sk)->accept_q);
bt_sk(sk)->parent->sk_ack_backlog--;
bt_sk(sk)->parent = NULL;
sock_put(sk);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Maksim Krasnyanskiy | 40 | 71.43% | 2 | 50.00% |
Linus Torvalds | 14 | 25.00% | 1 | 25.00% |
Arnaldo Carvalho de Melo | 2 | 3.57% | 1 | 25.00% |
Total | 56 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(bt_accept_unlink);
struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
{
struct bt_sock *s, *n;
struct sock *sk;
BT_DBG("parent %p", parent);
list_for_each_entry_safe(s, n, &bt_sk(parent)->accept_q, accept_q) {
sk = (struct sock *)s;
lock_sock(sk);
/* FIXME: Is this check still needed */
if (sk->sk_state == BT_CLOSED) {
bt_accept_unlink(sk);
release_sock(sk);
continue;
}
if (sk->sk_state == BT_CONNECTED || !newsock ||
test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) {
bt_accept_unlink(sk);
if (newsock)
sock_graft(sk, newsock);
release_sock(sk);
return sk;
}
release_sock(sk);
}
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Maksim Krasnyanskiy | 115 | 80.42% | 2 | 20.00% |
Gustavo Fernando Padovan | 9 | 6.29% | 2 | 20.00% |
Geliang Tang | 7 | 4.90% | 1 | 10.00% |
Marcel Holtmann | 7 | 4.90% | 2 | 20.00% |
Yichen Zhao | 2 | 1.40% | 1 | 10.00% |
Arnaldo Carvalho de Melo | 2 | 1.40% | 1 | 10.00% |
Vinicius Costa Gomes | 1 | 0.70% | 1 | 10.00% |
Total | 143 | 100.00% | 10 | 100.00% |
EXPORT_SYMBOL(bt_accept_dequeue);
int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags)
{
int noblock = flags & MSG_DONTWAIT;
struct sock *sk = sock->sk;
struct sk_buff *skb;
size_t copied;
size_t skblen;
int err;
BT_DBG("sock %p sk %p len %zu", sock, sk, len);
if (flags & MSG_OOB)
return -EOPNOTSUPP;
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb) {
if (sk->sk_shutdown & RCV_SHUTDOWN)
return 0;
return err;
}
skblen = skb->len;
copied = skb->len;
if (len < copied) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
skb_reset_transport_header(skb);
err = skb_copy_datagram_msg(skb, 0, msg, copied);
if (err == 0) {
sock_recv_ts_and_drops(msg, sk, skb);
if (msg->msg_name && bt_sk(sk)->skb_msg_name)
bt_sk(sk)->skb_msg_name(skb, msg->msg_name,
&msg->msg_namelen);
}
skb_free_datagram(sk, skb);
if (flags & MSG_TRUNC)
copied = skblen;
return err ? : copied;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Maksim Krasnyanskiy | 144 | 63.44% | 2 | 14.29% |
Marcel Holtmann | 45 | 19.82% | 3 | 21.43% |
Denis Kenzior | 18 | 7.93% | 1 | 7.14% |
Andrei Emeltchenko | 5 | 2.20% | 1 | 7.14% |
Stephen Hemminger | 4 | 1.76% | 1 | 7.14% |
Ezequiel García | 4 | 1.76% | 1 | 7.14% |
Arnaldo Carvalho de Melo | 4 | 1.76% | 2 | 14.29% |
Luiz Augusto von Dentz | 1 | 0.44% | 1 | 7.14% |
David S. Miller | 1 | 0.44% | 1 | 7.14% |
Neil Horman | 1 | 0.44% | 1 | 7.14% |
Total | 227 | 100.00% | 14 | 100.00% |
EXPORT_SYMBOL(bt_sock_recvmsg);
static long bt_sock_data_wait(struct sock *sk, long timeo)
{
DECLARE_WAITQUEUE(wait, current);
add_wait_queue(sk_sleep(sk), &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
if (!skb_queue_empty(&sk->sk_receive_queue))
break;
if (sk->sk_err || (sk->sk_shutdown & RCV_SHUTDOWN))
break;
if (signal_pending(current) || !timeo)
break;
sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
return timeo;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mat Martineau | 128 | 96.97% | 1 | 50.00% |
Eric Dumazet | 4 | 3.03% | 1 | 50.00% |
Total | 132 | 100.00% | 2 | 100.00% |
int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
size_t size, int flags)
{
struct sock *sk = sock->sk;
int err = 0;
size_t target, copied = 0;
long timeo;
if (flags & MSG_OOB)
return -EOPNOTSUPP;
BT_DBG("sk %p size %zu", sk, size);
lock_sock(sk);
target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
do {
struct sk_buff *skb;
int chunk;
skb = skb_dequeue(&sk->sk_receive_queue);
if (!skb) {
if (copied >= target)
break;
err = sock_error(sk);
if (err)
break;
if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
err = -EAGAIN;
if (!timeo)
break;
timeo = bt_sock_data_wait(sk, timeo);
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
goto out;
}
continue;
}
chunk = min_t(unsigned int, skb->len, size);
if (skb_copy_datagram_msg(skb, 0, msg, chunk)) {
skb_queue_head(&sk->sk_receive_queue, skb);
if (!copied)
copied = -EFAULT;
break;
}
copied += chunk;
size -= chunk;
sock_recv_ts_and_drops(msg, sk, skb);
if (!(flags & MSG_PEEK)) {
int skb_len = skb_headlen(skb);
if (chunk <= skb_len) {
__skb_pull(skb, chunk);
} else {
struct sk_buff *frag;
__skb_pull(skb, skb_len);
chunk -= skb_len;
skb_walk_frags(skb, frag) {
if (chunk <= frag->len) {
/* Pulling partial data */
skb->len -= chunk;
skb->data_len -= chunk;
__skb_pull(frag, chunk);
break;
} else if (frag->len) {
/* Pulling all frag data */
chunk -= frag->len;
skb->len -= frag->len;
skb->data_len -= frag->len;
__skb_pull(frag, frag->len);
}
}
}
if (skb->len) {
skb_queue_head(&sk->sk_receive_queue, skb);
break;
}
kfree_skb(skb);
} else {
/* put message back and return */
skb_queue_head(&sk->sk_receive_queue, skb);
break;
}
} while (size);
out:
release_sock(sk);
return copied ? : err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Mat Martineau | 438 | 98.87% | 2 | 50.00% |
Andrei Emeltchenko | 4 | 0.90% | 1 | 25.00% |
David S. Miller | 1 | 0.23% | 1 | 25.00% |
Total | 443 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(bt_sock_stream_recvmsg);
static inline unsigned int bt_accept_poll(struct sock *parent)
{
struct bt_sock *s, *n;
struct sock *sk;
list_for_each_entry_safe(s, n, &bt_sk(parent)->accept_q, accept_q) {
sk = (struct sock *)s;
if (sk->sk_state == BT_CONNECTED ||
(test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags) &&
sk->sk_state == BT_CONNECT2))
return POLLIN | POLLRDNORM;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Marcel Holtmann | 70 | 83.33% | 2 | 50.00% |
Gustavo Fernando Padovan | 7 | 8.33% | 1 | 25.00% |
Geliang Tang | 7 | 8.33% | 1 | 25.00% |
Total | 84 | 100.00% | 4 | 100.00% |
unsigned int bt_sock_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
unsigned int mask = 0;
BT_DBG("sock %p, sk %p", sock, sk);
poll_wait(file, sk_sleep(sk), wait);
if (sk->sk_state == BT_LISTEN)
return bt_accept_poll(sk);
if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
mask |= POLLERR |
(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
if (sk->sk_shutdown & RCV_SHUTDOWN)
mask |= POLLRDHUP | POLLIN | POLLRDNORM;
if (sk->sk_shutdown == SHUTDOWN_MASK)
mask |= POLLHUP;
if (!skb_queue_empty(&sk->sk_receive_queue))
mask |= POLLIN | POLLRDNORM;
if (sk->sk_state == BT_CLOSED)
mask |= POLLHUP;
if (sk->sk_state == BT_CONNECT ||
sk->sk_state == BT_CONNECT2 ||
sk->sk_state == BT_CONFIG)
return mask;
if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk))
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
else
sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
return mask;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Maksim Krasnyanskiy | 129 | 58.11% | 2 | 15.38% |
Marcel Holtmann | 22 | 9.91% | 1 | 7.69% |
Maxim Krasnyansky | 15 | 6.76% | 1 | 7.69% |
Gustavo Fernando Padovan | 14 | 6.31% | 2 | 15.38% |
Jacob E Keller | 13 | 5.86% | 2 | 15.38% |
Davide Libenzi | 12 | 5.41% | 1 | 7.69% |
Eric Dumazet | 9 | 4.05% | 3 | 23.08% |
Arnaldo Carvalho de Melo | 8 | 3.60% | 1 | 7.69% |
Total | 222 | 100.00% | 13 | 100.00% |
EXPORT_SYMBOL(bt_sock_poll);
int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
long amount;
int err;
BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg);
switch (cmd) {
case TIOCOUTQ:
if (sk->sk_state == BT_LISTEN)
return -EINVAL;
amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
if (amount < 0)
amount = 0;
err = put_user(amount, (int __user *) arg);
break;
case TIOCINQ:
if (sk->sk_state == BT_LISTEN)
return -EINVAL;
lock_sock(sk);
skb = skb_peek(&sk->sk_receive_queue);
amount = skb ? skb->len : 0;
release_sock(sk);
err = put_user(amount, (int __user *) arg);
break;
case SIOCGSTAMP:
err = sock_get_timestamp(sk, (struct timeval __user *) arg);
break;
case SIOCGSTAMPNS:
err = sock_get_timestampns(sk, (struct timespec __user *) arg);
break;
default:
err = -ENOIOCTLCMD;
break;
}
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Marcel Holtmann | 213 | 99.53% | 2 | 66.67% |
Eric Dumazet | 1 | 0.47% | 1 | 33.33% |
Total | 214 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(bt_sock_ioctl);
/* This function expects the sk lock to be held when called */
int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
{
DECLARE_WAITQUEUE(wait, current);
int err = 0;
BT_DBG("sk %p", sk);
add_wait_queue(sk_sleep(sk), &wait);
set_current_state(TASK_INTERRUPTIBLE);
while (sk->sk_state != state) {
if (!timeo) {
err = -EINPROGRESS;
break;
}
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
break;
}
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
set_current_state(TASK_INTERRUPTIBLE);
err = sock_error(sk);
if (err)
break;
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Maksim Krasnyanskiy | 120 | 82.76% | 2 | 28.57% |
Peter Hurley | 11 | 7.59% | 1 | 14.29% |
Eric Dumazet | 6 | 4.14% | 1 | 14.29% |
Benjamin LaHaise | 4 | 2.76% | 1 | 14.29% |
Arnaldo Carvalho de Melo | 3 | 2.07% | 1 | 14.29% |
Marcel Holtmann | 1 | 0.69% | 1 | 14.29% |
Total | 145 | 100.00% | 7 | 100.00% |
EXPORT_SYMBOL(bt_sock_wait_state);
/* This function expects the sk lock to be held when called */
int bt_sock_wait_ready(struct sock *sk, unsigned long flags)
{
DECLARE_WAITQUEUE(wait, current);
unsigned long timeo;
int err = 0;
BT_DBG("sk %p", sk);
timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
add_wait_queue(sk_sleep(sk), &wait);
set_current_state(TASK_INTERRUPTIBLE);
while (test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags)) {
if (!timeo) {
err = -EAGAIN;
break;
}
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
break;
}
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
set_current_state(TASK_INTERRUPTIBLE);
err = sock_error(sk);
if (err)
break;
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Johan Hedberg | 164 | 100.00% | 1 | 100.00% |
Total | 164 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(bt_sock_wait_ready);
#ifdef CONFIG_PROC_FS
struct bt_seq_state {
struct bt_sock_list *l;
};
static void *bt_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(seq->private->l->lock)
{
struct bt_seq_state *s = seq->private;
struct bt_sock_list *l = s->l;
read_lock(&l->lock);
return seq_hlist_start_head(&l->head, *pos);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Masatake YAMATO | 64 | 100.00% | 1 | 100.00% |
Total | 64 | 100.00% | 1 | 100.00% |
static void *bt_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct bt_seq_state *s = seq->private;
struct bt_sock_list *l = s->l;
return seq_hlist_next(v, &l->head, pos);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Masatake YAMATO | 51 | 100.00% | 1 | 100.00% |
Total | 51 | 100.00% | 1 | 100.00% |
static void bt_seq_stop(struct seq_file *seq, void *v)
__releases(seq->private->l->lock)
{
struct bt_seq_state *s = seq->private;
struct bt_sock_list *l = s->l;
read_unlock(&l->lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Masatake YAMATO | 51 | 100.00% | 1 | 100.00% |
Total | 51 | 100.00% | 1 | 100.00% |
static int bt_seq_show(struct seq_file *seq, void *v)
{
struct bt_seq_state *s = seq->private;
struct bt_sock_list *l = s->l;
if (v == SEQ_START_TOKEN) {
seq_puts(seq ,"sk RefCnt Rmem Wmem User Inode Parent");
if (l->custom_seq_show) {
seq_putc(seq, ' ');
l->custom_seq_show(seq, v);
}
seq_putc(seq, '\n');
} else {
struct sock *sk = sk_entry(v);
struct bt_sock *bt = bt_sk(sk);
seq_printf(seq,
"%pK %-6d %-6u %-6u %-6u %-6lu %-6lu",
sk,
atomic_read(&sk->sk_refcnt),
sk_rmem_alloc_get(sk),
sk_wmem_alloc_get(sk),
from_kuid(seq_user_ns(seq), sock_i_uid(sk)),
sock_i_ino(sk),
bt->parent? sock_i_ino(bt->parent): 0LU);
if (l->custom_seq_show) {
seq_putc(seq, ' ');
l->custom_seq_show(seq, v);
}
seq_putc(seq, '\n');
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Masatake YAMATO | 174 | 89.69% | 1 | 20.00% |
Andrei Emeltchenko | 10 | 5.15% | 1 | 20.00% |
Eric W. Biedermann | 8 | 4.12% | 1 | 20.00% |
Marcel Holtmann | 2 | 1.03% | 2 | 40.00% |
Total | 194 | 100.00% | 5 | 100.00% |
static const struct seq_operations bt_seq_ops = {
.start = bt_seq_start,
.next = bt_seq_next,
.stop = bt_seq_stop,
.show = bt_seq_show,
};
static int bt_seq_open(struct inode *inode, struct file *file)
{
struct bt_sock_list *sk_list;
struct bt_seq_state *s;
sk_list = PDE_DATA(inode);
s = __seq_open_private(file, &bt_seq_ops,
sizeof(struct bt_seq_state));
if (!s)
return -ENOMEM;
s->l = sk_list;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Masatake YAMATO | 65 | 97.01% | 1 | 33.33% |
Al Viro | 1 | 1.49% | 1 | 33.33% |
Andrei Emeltchenko | 1 | 1.49% | 1 | 33.33% |
Total | 67 | 100.00% | 3 | 100.00% |
static const struct file_operations bt_fops = {
.open = bt_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private
};
int bt_procfs_init(struct net *net, const char *name,
struct bt_sock_list *sk_list,
int (* seq_show)(struct seq_file *, void *))
{
sk_list->custom_seq_show = seq_show;
if (!proc_create_data(name, 0, net->proc_net, &bt_fops, sk_list))
return -ENOMEM;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Masatake YAMATO | 56 | 84.85% | 1 | 25.00% |
Al Viro | 6 | 9.09% | 2 | 50.00% |
Gao Feng | 4 | 6.06% | 1 | 25.00% |
Total | 66 | 100.00% | 4 | 100.00% |
void bt_procfs_cleanup(struct net *net, const char *name)
{
remove_proc_entry(name, net->proc_net);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Masatake YAMATO | 19 | 79.17% | 1 | 50.00% |
Gao Feng | 5 | 20.83% | 1 | 50.00% |
Total | 24 | 100.00% | 2 | 100.00% |
#else
int bt_procfs_init(struct net *net, const char *name,
struct bt_sock_list *sk_list,
int (* seq_show)(struct seq_file *, void *))
{
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Masatake YAMATO | 37 | 100.00% | 1 | 100.00% |
Total | 37 | 100.00% | 1 | 100.00% |
void bt_procfs_cleanup(struct net *net, const char *name)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Masatake YAMATO | 14 | 100.00% | 1 | 100.00% |
Total | 14 | 100.00% | 1 | 100.00% |
#endif
EXPORT_SYMBOL(bt_procfs_init);
EXPORT_SYMBOL(bt_procfs_cleanup);
static struct net_proto_family bt_sock_family_ops = {
.owner = THIS_MODULE,
.family = PF_BLUETOOTH,
.create = bt_sock_create,
};
struct dentry *bt_debugfs;
EXPORT_SYMBOL_GPL(bt_debugfs);
#define VERSION __stringify(BT_SUBSYS_VERSION) "." \
__stringify(BT_SUBSYS_REVISION)
static int __init bt_init(void)
{
int err;
sock_skb_cb_check_size(sizeof(struct bt_skb_cb));
BT_INFO("Core ver %s", VERSION);
err = bt_selftest();
if (err < 0)
return err;
bt_debugfs = debugfs_create_dir("bluetooth", NULL);
bt_leds_init();
err = bt_sysfs_init();
if (err < 0)
return err;
err = sock_register(&bt_sock_family_ops);
if (err < 0) {
bt_sysfs_cleanup();
return err;
}
BT_INFO("HCI device and connection manager initialized");
err = hci_sock_init();
if (err < 0)
goto error;
err = l2cap_init();
if (err < 0)
goto sock_err;
err = sco_init();
if (err < 0) {
l2cap_exit();
goto sock_err;
}
err = mgmt_init();
if (err < 0) {
sco_exit();
l2cap_exit();
goto sock_err;
}
return 0;
sock_err:
hci_sock_cleanup();
error:
sock_unregister(PF_BLUETOOTH);
bt_sysfs_cleanup();
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Marcel Holtmann | 68 | 36.76% | 6 | 50.00% |
Gustavo Fernando Padovan | 62 | 33.51% | 1 | 8.33% |
Maksim Krasnyanskiy | 32 | 17.30% | 3 | 25.00% |
Johan Hedberg | 22 | 11.89% | 1 | 8.33% |
Eyal Birger | 1 | 0.54% | 1 | 8.33% |
Total | 185 | 100.00% | 12 | 100.00% |
static void __exit bt_exit(void)
{
mgmt_exit();
sco_exit();
l2cap_exit();
hci_sock_cleanup();
sock_unregister(PF_BLUETOOTH);
bt_sysfs_cleanup();
bt_leds_cleanup();
debugfs_remove_recursive(bt_debugfs);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Maksim Krasnyanskiy | 16 | 43.24% | 1 | 14.29% |
Marcel Holtmann | 12 | 32.43% | 4 | 57.14% |
Gustavo Fernando Padovan | 6 | 16.22% | 1 | 14.29% |
Johan Hedberg | 3 | 8.11% | 1 | 14.29% |
Total | 37 | 100.00% | 7 | 100.00% |
subsys_initcall(bt_init);
module_exit(bt_exit);
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth Core ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_BLUETOOTH);
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Maksim Krasnyanskiy | 718 | 21.02% | 7 | 7.53% |
Marcel Holtmann | 640 | 18.74% | 25 | 26.88% |
Masatake YAMATO | 588 | 17.22% | 1 | 1.08% |
Mat Martineau | 571 | 16.72% | 2 | 2.15% |
Linus Torvalds | 229 | 6.71% | 3 | 3.23% |
Johan Hedberg | 196 | 5.74% | 3 | 3.23% |
Gustavo Fernando Padovan | 102 | 2.99% | 5 | 5.38% |
Dave Young | 69 | 2.02% | 1 | 1.08% |
Arnaldo Carvalho de Melo | 35 | 1.02% | 4 | 4.30% |
Al Viro | 35 | 1.02% | 3 | 3.23% |
Eric W. Biedermann | 26 | 0.76% | 2 | 2.15% |
Andrei Emeltchenko | 20 | 0.59% | 3 | 3.23% |
Eric Dumazet | 20 | 0.59% | 4 | 4.30% |
Octavian Purdila | 18 | 0.53% | 1 | 1.08% |
Denis Kenzior | 18 | 0.53% | 1 | 1.08% |
Maxim Krasnyansky | 15 | 0.44% | 1 | 1.08% |
Geliang Tang | 14 | 0.41% | 1 | 1.08% |
Jacob E Keller | 13 | 0.38% | 2 | 2.15% |
Davide Libenzi | 12 | 0.35% | 1 | 1.08% |
Marcus Meissner | 12 | 0.35% | 1 | 1.08% |
Stephen Hemminger | 11 | 0.32% | 3 | 3.23% |
Peter Hurley | 11 | 0.32% | 1 | 1.08% |
Gao Feng | 9 | 0.26% | 2 | 2.15% |
Eric Paris | 5 | 0.15% | 1 | 1.08% |
Benjamin LaHaise | 4 | 0.12% | 1 | 1.08% |
Ezequiel García | 4 | 0.12% | 1 | 1.08% |
Ingo Molnar | 3 | 0.09% | 1 | 1.08% |
David Herrmann | 2 | 0.06% | 1 | 1.08% |
Hannes Frederic Sowa | 2 | 0.06% | 1 | 1.08% |
Yichen Zhao | 2 | 0.06% | 1 | 1.08% |
David S. Miller | 2 | 0.06% | 1 | 1.08% |
Jan Engelhardt | 2 | 0.06% | 1 | 1.08% |
Randy Dunlap | 1 | 0.03% | 1 | 1.08% |
Luiz Augusto von Dentz | 1 | 0.03% | 1 | 1.08% |
Neil Horman | 1 | 0.03% | 1 | 1.08% |
Hideaki Yoshifuji / 吉藤英明 | 1 | 0.03% | 1 | 1.08% |
Fabian Frederick | 1 | 0.03% | 1 | 1.08% |
Eyal Birger | 1 | 0.03% | 1 | 1.08% |
Vinicius Costa Gomes | 1 | 0.03% | 1 | 1.08% |
Total | 3415 | 100.00% | 93 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.