Release 4.18 net/bluetooth/af_bluetooth.c
/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2000-2001 Qualcomm Incorporated
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation;
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
SOFTWARE IS DISCLAIMED.
*/
/* Bluetooth address family and sockets. */
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/stringify.h>
#include <linux/sched/signal.h>
#include <asm/ioctls.h>
#include <net/bluetooth/bluetooth.h>
#include <linux/proc_fs.h>
#include "leds.h"
#include "selftest.h"
/* Bluetooth sockets */
#define BT_MAX_PROTO 8
static const struct net_proto_family *bt_proto[BT_MAX_PROTO];
static DEFINE_RWLOCK(bt_proto_lock);
static struct lock_class_key bt_lock_key[BT_MAX_PROTO];
static const char *const bt_key_strings[BT_MAX_PROTO] = {
"sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP",
"sk_lock-AF_BLUETOOTH-BTPROTO_HCI",
"sk_lock-AF_BLUETOOTH-BTPROTO_SCO",
"sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM",
"sk_lock-AF_BLUETOOTH-BTPROTO_BNEP",
"sk_lock-AF_BLUETOOTH-BTPROTO_CMTP",
"sk_lock-AF_BLUETOOTH-BTPROTO_HIDP",
"sk_lock-AF_BLUETOOTH-BTPROTO_AVDTP",
};
static struct lock_class_key bt_slock_key[BT_MAX_PROTO];
static const char *const bt_slock_key_strings[BT_MAX_PROTO] = {
"slock-AF_BLUETOOTH-BTPROTO_L2CAP",
"slock-AF_BLUETOOTH-BTPROTO_HCI",
"slock-AF_BLUETOOTH-BTPROTO_SCO",
"slock-AF_BLUETOOTH-BTPROTO_RFCOMM",
"slock-AF_BLUETOOTH-BTPROTO_BNEP",
"slock-AF_BLUETOOTH-BTPROTO_CMTP",
"slock-AF_BLUETOOTH-BTPROTO_HIDP",
"slock-AF_BLUETOOTH-BTPROTO_AVDTP",
};
void bt_sock_reclassify_lock(struct sock *sk, int proto)
{
BUG_ON(!sk);
BUG_ON(!sock_allow_reclassification(sk));
sock_lock_init_class_and_name(sk,
bt_slock_key_strings[proto], &bt_slock_key[proto],
bt_key_strings[proto], &bt_lock_key[proto]);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Marcel Holtmann | 47 | 85.45% | 2 | 50.00% |
| Octavian Purdila | 6 | 10.91% | 1 | 25.00% |
| Hannes Frederic Sowa | 2 | 3.64% | 1 | 25.00% |
| Total | 55 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(bt_sock_reclassify_lock);
int bt_sock_register(int proto, const struct net_proto_family *ops)
{
int err = 0;
if (proto < 0 || proto >= BT_MAX_PROTO)
return -EINVAL;
write_lock(&bt_proto_lock);
if (bt_proto[proto])
err = -EEXIST;
else
bt_proto[proto] = ops;
write_unlock(&bt_proto_lock);
return err;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Linus Torvalds | 36 | 52.94% | 1 | 16.67% |
| Marcel Holtmann | 22 | 32.35% | 1 | 16.67% |
| Maksim Krasnyanskiy | 5 | 7.35% | 2 | 33.33% |
| Marcus Meissner | 4 | 5.88% | 1 | 16.67% |
| Stephen Hemminger | 1 | 1.47% | 1 | 16.67% |
| Total | 68 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL(bt_sock_register);
void bt_sock_unregister(int proto)
{
if (proto < 0 || proto >= BT_MAX_PROTO)
return;
write_lock(&bt_proto_lock);
bt_proto[proto] = NULL;
write_unlock(&bt_proto_lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Linus Torvalds | 16 | 42.11% | 1 | 16.67% |
| Marcel Holtmann | 12 | 31.58% | 1 | 16.67% |
| Marcus Meissner | 4 | 10.53% | 1 | 16.67% |
| Maksim Krasnyanskiy | 4 | 10.53% | 2 | 33.33% |
| David Herrmann | 2 | 5.26% | 1 | 16.67% |
| Total | 38 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL(bt_sock_unregister);
static int bt_sock_create(struct net *net, struct socket *sock, int proto,
int kern)
{
int err;
if (net != &init_net)
return -EAFNOSUPPORT;
if (proto < 0 || proto >= BT_MAX_PROTO)
return -EINVAL;
if (!bt_proto[proto])
request_module("bt-proto-%d", proto);
err = -EPROTONOSUPPORT;
read_lock(&bt_proto_lock);
if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) {
err = bt_proto[proto]->create(net, sock, proto, kern);
if (!err)
bt_sock_reclassify_lock(sock->sk, proto);
module_put(bt_proto[proto]->owner);
}
read_unlock(&bt_proto_lock);
return err;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Linus Torvalds | 52 | 35.62% | 2 | 16.67% |
| Maksim Krasnyanskiy | 40 | 27.40% | 2 | 16.67% |
| Eric W. Biedermann | 18 | 12.33% | 1 | 8.33% |
| Marcel Holtmann | 13 | 8.90% | 2 | 16.67% |
| Octavian Purdila | 7 | 4.79% | 1 | 8.33% |
| Dave Young | 6 | 4.11% | 1 | 8.33% |
| Eric Paris | 5 | 3.42% | 1 | 8.33% |
| Marcus Meissner | 4 | 2.74% | 1 | 8.33% |
| Randy Dunlap | 1 | 0.68% | 1 | 8.33% |
| Total | 146 | 100.00% | 12 | 100.00% |
void bt_sock_link(struct bt_sock_list *l, struct sock *sk)
{
write_lock(&l->lock);
sk_add_node(sk, &l->head);
write_unlock(&l->lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Linus Torvalds | 32 | 78.05% | 1 | 25.00% |
| Arnaldo Carvalho de Melo | 5 | 12.20% | 1 | 25.00% |
| Maksim Krasnyanskiy | 2 | 4.88% | 1 | 25.00% |
| Gustavo Fernando Padovan | 2 | 4.88% | 1 | 25.00% |
| Total | 41 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(bt_sock_link);
void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk)
{
write_lock(&l->lock);
sk_del_node_init(sk);
write_unlock(&l->lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Linus Torvalds | 29 | 80.56% | 1 | 25.00% |
| Arnaldo Carvalho de Melo | 3 | 8.33% | 1 | 25.00% |
| Gustavo Fernando Padovan | 2 | 5.56% | 1 | 25.00% |
| Maksim Krasnyanskiy | 2 | 5.56% | 1 | 25.00% |
| Total | 36 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(bt_sock_unlink);
void bt_accept_enqueue(struct sock *parent, struct sock *sk)
{
BT_DBG("parent %p, sk %p", parent, sk);
sock_hold(sk);
lock_sock(sk);
list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q);
bt_sk(sk)->parent = parent;
release_sock(sk);
parent->sk_ack_backlog++;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Maksim Krasnyanskiy | 42 | 58.33% | 2 | 40.00% |
| Linus Torvalds | 19 | 26.39% | 1 | 20.00% |
| Dean Jenkins | 10 | 13.89% | 1 | 20.00% |
| Arnaldo Carvalho de Melo | 1 | 1.39% | 1 | 20.00% |
| Total | 72 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL(bt_accept_enqueue);
/* Calling function must hold the sk lock.
* bt_sk(sk)->parent must be non-NULL meaning sk is in the parent list.
*/
void bt_accept_unlink(struct sock *sk)
{
BT_DBG("sk %p state %d", sk, sk->sk_state);
list_del_init(&bt_sk(sk)->accept_q);
bt_sk(sk)->parent->sk_ack_backlog--;
bt_sk(sk)->parent = NULL;
sock_put(sk);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Maksim Krasnyanskiy | 45 | 80.36% | 2 | 50.00% |
| Linus Torvalds | 9 | 16.07% | 1 | 25.00% |
| Arnaldo Carvalho de Melo | 2 | 3.57% | 1 | 25.00% |
| Total | 56 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(bt_accept_unlink);
struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
{
struct bt_sock *s, *n;
struct sock *sk;
BT_DBG("parent %p", parent);
restart:
list_for_each_entry_safe(s, n, &bt_sk(parent)->accept_q, accept_q) {
sk = (struct sock *)s;
/* Prevent early freeing of sk due to unlink and sock_kill */
sock_hold(sk);
lock_sock(sk);
/* Check sk has not already been unlinked via
* bt_accept_unlink() due to serialisation caused by sk locking
*/
if (!bt_sk(sk)->parent) {
BT_DBG("sk %p, already unlinked", sk);
release_sock(sk);
sock_put(sk);
/* Restart the loop as sk is no longer in the list
* and also avoid a potential infinite loop because
* list_for_each_entry_safe() is not thread safe.
*/
goto restart;
}
/* sk is safely in the parent list so reduce reference count */
sock_put(sk);
/* FIXME: Is this check still needed */
if (sk->sk_state == BT_CLOSED) {
bt_accept_unlink(sk);
release_sock(sk);
continue;
}
if (sk->sk_state == BT_CONNECTED || !newsock ||
test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) {
bt_accept_unlink(sk);
if (newsock)
sock_graft(sk, newsock);
release_sock(sk);
return sk;
}
release_sock(sk);
}
return NULL;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Maksim Krasnyanskiy | 115 | 60.21% | 2 | 18.18% |
| Dean Jenkins | 48 | 25.13% | 1 | 9.09% |
| Gustavo Fernando Padovan | 9 | 4.71% | 2 | 18.18% |
| Geliang Tang | 7 | 3.66% | 1 | 9.09% |
| Marcel Holtmann | 7 | 3.66% | 2 | 18.18% |
| Yichen Zhao | 2 | 1.05% | 1 | 9.09% |
| Arnaldo Carvalho de Melo | 2 | 1.05% | 1 | 9.09% |
| Vinicius Costa Gomes | 1 | 0.52% | 1 | 9.09% |
| Total | 191 | 100.00% | 11 | 100.00% |
EXPORT_SYMBOL(bt_accept_dequeue);
int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags)
{
int noblock = flags & MSG_DONTWAIT;
struct sock *sk = sock->sk;
struct sk_buff *skb;
size_t copied;
size_t skblen;
int err;
BT_DBG("sock %p sk %p len %zu", sock, sk, len);
if (flags & MSG_OOB)
return -EOPNOTSUPP;
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb) {
if (sk->sk_shutdown & RCV_SHUTDOWN)
return 0;
return err;
}
skblen = skb->len;
copied = skb->len;
if (len < copied) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
skb_reset_transport_header(skb);
err = skb_copy_datagram_msg(skb, 0, msg, copied);
if (err == 0) {
sock_recv_ts_and_drops(msg, sk, skb);
if (msg->msg_name && bt_sk(sk)->skb_msg_name)
bt_sk(sk)->skb_msg_name(skb, msg->msg_name,
&msg->msg_namelen);
}
skb_free_datagram(sk, skb);
if (flags & MSG_TRUNC)
copied = skblen;
return err ? : copied;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Maksim Krasnyanskiy | 144 | 63.44% | 2 | 14.29% |
| Marcel Holtmann | 45 | 19.82% | 3 | 21.43% |
| Denis Kenzior | 18 | 7.93% | 1 | 7.14% |
| Andrei Emeltchenko | 5 | 2.20% | 1 | 7.14% |
| Stephen Hemminger | 4 | 1.76% | 1 | 7.14% |
| Ezequiel García | 4 | 1.76% | 1 | 7.14% |
| Arnaldo Carvalho de Melo | 4 | 1.76% | 2 | 14.29% |
| Neil Horman | 1 | 0.44% | 1 | 7.14% |
| Luiz Augusto von Dentz | 1 | 0.44% | 1 | 7.14% |
| David S. Miller | 1 | 0.44% | 1 | 7.14% |
| Total | 227 | 100.00% | 14 | 100.00% |
EXPORT_SYMBOL(bt_sock_recvmsg);
static long bt_sock_data_wait(struct sock *sk, long timeo)
{
DECLARE_WAITQUEUE(wait, current);
add_wait_queue(sk_sleep(sk), &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
if (!skb_queue_empty(&sk->sk_receive_queue))
break;
if (sk->sk_err || (sk->sk_shutdown & RCV_SHUTDOWN))
break;
if (signal_pending(current) || !timeo)
break;
sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
return timeo;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Mat Martineau | 128 | 96.97% | 1 | 50.00% |
| Eric Dumazet | 4 | 3.03% | 1 | 50.00% |
| Total | 132 | 100.00% | 2 | 100.00% |
int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
size_t size, int flags)
{
struct sock *sk = sock->sk;
int err = 0;
size_t target, copied = 0;
long timeo;
if (flags & MSG_OOB)
return -EOPNOTSUPP;
BT_DBG("sk %p size %zu", sk, size);
lock_sock(sk);
target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
do {
struct sk_buff *skb;
int chunk;
skb = skb_dequeue(&sk->sk_receive_queue);
if (!skb) {
if (copied >= target)
break;
err = sock_error(sk);
if (err)
break;
if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
err = -EAGAIN;
if (!timeo)
break;
timeo = bt_sock_data_wait(sk, timeo);
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
goto out;
}
continue;
}
chunk = min_t(unsigned int, skb->len, size);
if (skb_copy_datagram_msg(skb, 0, msg, chunk)) {
skb_queue_head(&sk->sk_receive_queue, skb);
if (!copied)
copied = -EFAULT;
break;
}
copied += chunk;
size -= chunk;
sock_recv_ts_and_drops(msg, sk, skb);
if (!(flags & MSG_PEEK)) {
int skb_len = skb_headlen(skb);
if (chunk <= skb_len) {
__skb_pull(skb, chunk);
} else {
struct sk_buff *frag;
__skb_pull(skb, skb_len);
chunk -= skb_len;
skb_walk_frags(skb, frag) {
if (chunk <= frag->len) {
/* Pulling partial data */
skb->len -= chunk;
skb->data_len -= chunk;
__skb_pull(frag, chunk);
break;
} else if (frag->len) {
/* Pulling all frag data */
chunk -= frag->len;
skb->len -= frag->len;
skb->data_len -= frag->len;
__skb_pull(frag, frag->len);
}
}
}
if (skb->len) {
skb_queue_head(&sk->sk_receive_queue, skb);
break;
}
kfree_skb(skb);
} else {
/* put message back and return */
skb_queue_head(&sk->sk_receive_queue, skb);
break;
}
} while (size);
out:
release_sock(sk);
return copied ? : err;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Mat Martineau | 438 | 98.87% | 2 | 50.00% |
| Andrei Emeltchenko | 4 | 0.90% | 1 | 25.00% |
| David S. Miller | 1 | 0.23% | 1 | 25.00% |
| Total | 443 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(bt_sock_stream_recvmsg);
static inline __poll_t bt_accept_poll(struct sock *parent)
{
struct bt_sock *s, *n;
struct sock *sk;
list_for_each_entry_safe(s, n, &bt_sk(parent)->accept_q, accept_q) {
sk = (struct sock *)s;
if (sk->sk_state == BT_CONNECTED ||
(test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags) &&
sk->sk_state == BT_CONNECT2))
return EPOLLIN | EPOLLRDNORM;
}
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Marcel Holtmann | 66 | 79.52% | 2 | 33.33% |
| Gustavo Fernando Padovan | 7 | 8.43% | 1 | 16.67% |
| Geliang Tang | 7 | 8.43% | 1 | 16.67% |
| Linus Torvalds | 2 | 2.41% | 1 | 16.67% |
| Al Viro | 1 | 1.20% | 1 | 16.67% |
| Total | 83 | 100.00% | 6 | 100.00% |
__poll_t bt_sock_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
__poll_t mask = 0;
BT_DBG("sock %p, sk %p", sock, sk);
poll_wait(file, sk_sleep(sk), wait);
if (sk->sk_state == BT_LISTEN)
return bt_accept_poll(sk);
if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
mask |= EPOLLERR |
(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
if (sk->sk_shutdown & RCV_SHUTDOWN)
mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
if (sk->sk_shutdown == SHUTDOWN_MASK)
mask |= EPOLLHUP;
if (!skb_queue_empty(&sk->sk_receive_queue))
mask |= EPOLLIN | EPOLLRDNORM;
if (sk->sk_state == BT_CLOSED)
mask |= EPOLLHUP;
if (sk->sk_state == BT_CONNECT ||
sk->sk_state == BT_CONNECT2 ||
sk->sk_state == BT_CONFIG)
return mask;
if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk))
mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
else
sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
return mask;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Maksim Krasnyanskiy | 100 | 45.45% | 1 | 7.14% |
| Linus Torvalds | 33 | 15.00% | 2 | 14.29% |
| Marcel Holtmann | 22 | 10.00% | 1 | 7.14% |
| Maxim Krasnyansky | 15 | 6.82% | 1 | 7.14% |
| Gustavo Fernando Padovan | 14 | 6.36% | 2 | 14.29% |
| Jacob E Keller | 12 | 5.45% | 2 | 14.29% |
| Davide Libenzi | 11 | 5.00% | 1 | 7.14% |
| Arnaldo Carvalho de Melo | 7 | 3.18% | 1 | 7.14% |
| Eric Dumazet | 4 | 1.82% | 2 | 14.29% |
| Al Viro | 2 | 0.91% | 1 | 7.14% |
| Total | 220 | 100.00% | 14 | 100.00% |
EXPORT_SYMBOL(bt_sock_poll);
int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
long amount;
int err;
BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg);
switch (cmd) {
case TIOCOUTQ:
if (sk->sk_state == BT_LISTEN)
return -EINVAL;
amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
if (amount < 0)
amount = 0;
err = put_user(amount, (int __user *) arg);
break;
case TIOCINQ:
if (sk->sk_state == BT_LISTEN)
return -EINVAL;
lock_sock(sk);
skb = skb_peek(&sk->sk_receive_queue);
amount = skb ? skb->len : 0;
release_sock(sk);
err = put_user(amount, (int __user *) arg);
break;
case SIOCGSTAMP:
err = sock_get_timestamp(sk, (struct timeval __user *) arg);
break;
case SIOCGSTAMPNS:
err = sock_get_timestampns(sk, (struct timespec __user *) arg);
break;
default:
err = -ENOIOCTLCMD;
break;
}
return err;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Marcel Holtmann | 213 | 99.53% | 2 | 66.67% |
| Eric Dumazet | 1 | 0.47% | 1 | 33.33% |
| Total | 214 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(bt_sock_ioctl);
/* This function expects the sk lock to be held when called */
int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
{
DECLARE_WAITQUEUE(wait, current);
int err = 0;
BT_DBG("sk %p", sk);
add_wait_queue(sk_sleep(sk), &wait);
set_current_state(TASK_INTERRUPTIBLE);
while (sk->sk_state != state) {
if (!timeo) {
err = -EINPROGRESS;
break;
}
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
break;
}
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
set_current_state(TASK_INTERRUPTIBLE);
err = sock_error(sk);
if (err)
break;
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
return err;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Maksim Krasnyanskiy | 120 | 82.76% | 2 | 28.57% |
| Peter Hurley | 11 | 7.59% | 1 | 14.29% |
| Eric Dumazet | 6 | 4.14% | 1 | 14.29% |
| Benjamin LaHaise | 4 | 2.76% | 1 | 14.29% |
| Arnaldo Carvalho de Melo | 3 | 2.07% | 1 | 14.29% |
| Marcel Holtmann | 1 | 0.69% | 1 | 14.29% |
| Total | 145 | 100.00% | 7 | 100.00% |
EXPORT_SYMBOL(bt_sock_wait_state);
/* This function expects the sk lock to be held when called */
int bt_sock_wait_ready(struct sock *sk, unsigned long flags)
{
DECLARE_WAITQUEUE(wait, current);
unsigned long timeo;
int err = 0;
BT_DBG("sk %p", sk);
timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
add_wait_queue(sk_sleep(sk), &wait);
set_current_state(TASK_INTERRUPTIBLE);
while (test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags)) {
if (!timeo) {
err = -EAGAIN;
break;
}
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
break;
}
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
set_current_state(TASK_INTERRUPTIBLE);
err = sock_error(sk);
if (err)
break;
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
return err;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Johan Hedberg | 164 | 100.00% | 1 | 100.00% |
| Total | 164 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(bt_sock_wait_ready);
#ifdef CONFIG_PROC_FS
static void *bt_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(seq->private->l->lock)
{
struct bt_sock_list *l = PDE_DATA(file_inode(seq->file));
read_lock(&l->lock);
return seq_hlist_start_head(&l->head, *pos);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Masatake YAMATO | 53 | 86.89% | 1 | 50.00% |
| Christoph Hellwig | 8 | 13.11% | 1 | 50.00% |
| Total | 61 | 100.00% | 2 | 100.00% |
static void *bt_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct bt_sock_list *l = PDE_DATA(file_inode(seq->file));
return seq_hlist_next(v, &l->head, pos);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Masatake YAMATO | 40 | 83.33% | 1 | 50.00% |
| Christoph Hellwig | 8 | 16.67% | 1 | 50.00% |
| Total | 48 | 100.00% | 2 | 100.00% |
static void bt_seq_stop(struct seq_file *seq, void *v)
__releases(seq->private->l->lock)
{
struct bt_sock_list *l = PDE_DATA(file_inode(seq->file));
read_unlock(&l->lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Masatake YAMATO | 40 | 83.33% | 1 | 50.00% |
| Christoph Hellwig | 8 | 16.67% | 1 | 50.00% |
| Total | 48 | 100.00% | 2 | 100.00% |
static int bt_seq_show(struct seq_file *seq, void *v)
{
struct bt_sock_list *l = PDE_DATA(file_inode(seq->file));
if (v == SEQ_START_TOKEN) {
seq_puts(seq ,"sk RefCnt Rmem Wmem User Inode Parent");
if (l->custom_seq_show) {
seq_putc(seq, ' ');
l->custom_seq_show(seq, v);
}
seq_putc(seq, '\n');
} else {
struct sock *sk = sk_entry(v);
struct bt_sock *bt = bt_sk(sk);
seq_printf(seq,
"%pK %-6d %-6u %-6u %-6u %-6lu %-6lu",
sk,
refcount_read(&sk->sk_refcnt),
sk_rmem_alloc_get(sk),
sk_wmem_alloc_get(sk),
from_kuid(seq_user_ns(seq), sock_i_uid(sk)),
sock_i_ino(sk),
bt->parent? sock_i_ino(bt->parent): 0LU);
if (l->custom_seq_show) {
seq_putc(seq, ' ');
l->custom_seq_show(seq, v);
}
seq_putc(seq, '\n');
}
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Masatake YAMATO | 162 | 84.82% | 1 | 14.29% |
| Andrei Emeltchenko | 10 | 5.24% | 1 | 14.29% |
| Eric W. Biedermann | 8 | 4.19% | 1 | 14.29% |
| Christoph Hellwig | 8 | 4.19% | 1 | 14.29% |
| Marcel Holtmann | 2 | 1.05% | 2 | 28.57% |
| Elena Reshetova | 1 | 0.52% | 1 | 14.29% |
| Total | 191 | 100.00% | 7 | 100.00% |
static const struct seq_operations bt_seq_ops = {
.start = bt_seq_start,
.next = bt_seq_next,
.stop = bt_seq_stop,
.show = bt_seq_show,
};
int bt_procfs_init(struct net *net, const char *name,
struct bt_sock_list *sk_list,
int (* seq_show)(struct seq_file *, void *))
{
sk_list->custom_seq_show = seq_show;
if (!proc_create_seq_data(name, 0, net->proc_net, &bt_seq_ops, sk_list))
return -ENOMEM;
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Masatake YAMATO | 56 | 84.85% | 1 | 25.00% |
| Gao Feng | 4 | 6.06% | 1 | 25.00% |
| Al Viro | 4 | 6.06% | 1 | 25.00% |
| Christoph Hellwig | 2 | 3.03% | 1 | 25.00% |
| Total | 66 | 100.00% | 4 | 100.00% |
void bt_procfs_cleanup(struct net *net, const char *name)
{
remove_proc_entry(name, net->proc_net);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Masatake YAMATO | 19 | 79.17% | 1 | 50.00% |
| Gao Feng | 5 | 20.83% | 1 | 50.00% |
| Total | 24 | 100.00% | 2 | 100.00% |
#else
int bt_procfs_init(struct net *net, const char *name,
struct bt_sock_list *sk_list,
int (* seq_show)(struct seq_file *, void *))
{
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Masatake YAMATO | 37 | 100.00% | 1 | 100.00% |
| Total | 37 | 100.00% | 1 | 100.00% |
void bt_procfs_cleanup(struct net *net, const char *name)
{
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Masatake YAMATO | 14 | 100.00% | 1 | 100.00% |
| Total | 14 | 100.00% | 1 | 100.00% |
#endif
EXPORT_SYMBOL(bt_procfs_init);
EXPORT_SYMBOL(bt_procfs_cleanup);
static const struct net_proto_family bt_sock_family_ops = {
.owner = THIS_MODULE,
.family = PF_BLUETOOTH,
.create = bt_sock_create,
};
struct dentry *bt_debugfs;
EXPORT_SYMBOL_GPL(bt_debugfs);
#define VERSION __stringify(BT_SUBSYS_VERSION) "." \
__stringify(BT_SUBSYS_REVISION)
static int __init bt_init(void)
{
int err;
sock_skb_cb_check_size(sizeof(struct bt_skb_cb));
BT_INFO("Core ver %s", VERSION);
err = bt_selftest();
if (err < 0)
return err;
bt_debugfs = debugfs_create_dir("bluetooth", NULL);
bt_leds_init();
err = bt_sysfs_init();
if (err < 0)
return err;
err = sock_register(&bt_sock_family_ops);
if (err)
goto cleanup_sysfs;
BT_INFO("HCI device and connection manager initialized");
err = hci_sock_init();
if (err)
goto unregister_socket;
err = l2cap_init();
if (err)
goto cleanup_socket;
err = sco_init();
if (err)
goto cleanup_cap;
err = mgmt_init();
if (err)
goto cleanup_sco;
return 0;
cleanup_sco:
sco_exit();
cleanup_cap:
l2cap_exit();
cleanup_socket:
hci_sock_cleanup();
unregister_socket:
sock_unregister(PF_BLUETOOTH);
cleanup_sysfs:
bt_sysfs_cleanup();
return err;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Marcel Holtmann | 58 | 34.32% | 6 | 46.15% |
| Gustavo Fernando Padovan | 46 | 27.22% | 1 | 7.69% |
| Maksim Krasnyanskiy | 29 | 17.16% | 3 | 23.08% |
| SF Markus Elfring | 20 | 11.83% | 1 | 7.69% |
| Johan Hedberg | 15 | 8.88% | 1 | 7.69% |
| Eyal Birger | 1 | 0.59% | 1 | 7.69% |
| Total | 169 | 100.00% | 13 | 100.00% |
static void __exit bt_exit(void)
{
mgmt_exit();
sco_exit();
l2cap_exit();
hci_sock_cleanup();
sock_unregister(PF_BLUETOOTH);
bt_sysfs_cleanup();
bt_leds_cleanup();
debugfs_remove_recursive(bt_debugfs);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Maksim Krasnyanskiy | 15 | 40.54% | 1 | 12.50% |
| Marcel Holtmann | 12 | 32.43% | 4 | 50.00% |
| Gustavo Fernando Padovan | 6 | 16.22% | 1 | 12.50% |
| Johan Hedberg | 3 | 8.11% | 1 | 12.50% |
| Linus Torvalds | 1 | 2.70% | 1 | 12.50% |
| Total | 37 | 100.00% | 8 | 100.00% |
subsys_initcall(bt_init);
module_exit(bt_exit);
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth Core ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_BLUETOOTH);
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Maksim Krasnyanskiy | 690 | 20.66% | 7 | 7.07% |
| Marcel Holtmann | 625 | 18.72% | 25 | 25.25% |
| Mat Martineau | 571 | 17.10% | 2 | 2.02% |
| Masatake YAMATO | 468 | 14.02% | 1 | 1.01% |
| Linus Torvalds | 261 | 7.82% | 5 | 5.05% |
| Johan Hedberg | 189 | 5.66% | 3 | 3.03% |
| Gustavo Fernando Padovan | 86 | 2.58% | 5 | 5.05% |
| Dave Young | 69 | 2.07% | 1 | 1.01% |
| Dean Jenkins | 59 | 1.77% | 2 | 2.02% |
| Arnaldo Carvalho de Melo | 34 | 1.02% | 4 | 4.04% |
| Christoph Hellwig | 34 | 1.02% | 1 | 1.01% |
| Eric W. Biedermann | 26 | 0.78% | 2 | 2.02% |
| SF Markus Elfring | 20 | 0.60% | 1 | 1.01% |
| Andrei Emeltchenko | 19 | 0.57% | 2 | 2.02% |
| Denis Kenzior | 18 | 0.54% | 1 | 1.01% |
| Octavian Purdila | 18 | 0.54% | 1 | 1.01% |
| Maxim Krasnyansky | 15 | 0.45% | 1 | 1.01% |
| Eric Dumazet | 15 | 0.45% | 4 | 4.04% |
| Geliang Tang | 14 | 0.42% | 1 | 1.01% |
| Jacob E Keller | 12 | 0.36% | 2 | 2.02% |
| Marcus Meissner | 12 | 0.36% | 1 | 1.01% |
| Davide Libenzi | 11 | 0.33% | 1 | 1.01% |
| Stephen Hemminger | 11 | 0.33% | 3 | 3.03% |
| Peter Hurley | 11 | 0.33% | 1 | 1.01% |
| Gao Feng | 9 | 0.27% | 2 | 2.02% |
| Al Viro | 7 | 0.21% | 2 | 2.02% |
| Eric Paris | 5 | 0.15% | 1 | 1.01% |
| Ezequiel García | 4 | 0.12% | 1 | 1.01% |
| Benjamin LaHaise | 4 | 0.12% | 1 | 1.01% |
| Ingo Molnar | 3 | 0.09% | 1 | 1.01% |
| David Herrmann | 2 | 0.06% | 1 | 1.01% |
| Jan Engelhardt | 2 | 0.06% | 1 | 1.01% |
| Hannes Frederic Sowa | 2 | 0.06% | 1 | 1.01% |
| Yichen Zhao | 2 | 0.06% | 1 | 1.01% |
| David S. Miller | 2 | 0.06% | 1 | 1.01% |
| Elena Reshetova | 1 | 0.03% | 1 | 1.01% |
| Luiz Augusto von Dentz | 1 | 0.03% | 1 | 1.01% |
| Neil Horman | 1 | 0.03% | 1 | 1.01% |
| Eyal Birger | 1 | 0.03% | 1 | 1.01% |
| Fabian Frederick | 1 | 0.03% | 1 | 1.01% |
| Randy Dunlap | 1 | 0.03% | 1 | 1.01% |
| Lin Zhang | 1 | 0.03% | 1 | 1.01% |
| Hideaki Yoshifuji / 吉藤英明 | 1 | 0.03% | 1 | 1.01% |
| Vinicius Costa Gomes | 1 | 0.03% | 1 | 1.01% |
| Ying Xue | | 0.00% | 0 | 0.00% |
| Total | 3339 | 100.00% | 99 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.