Release 4.7 net/bluetooth/af_bluetooth.c
/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2000-2001 Qualcomm Incorporated
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation;
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
SOFTWARE IS DISCLAIMED.
*/
/* Bluetooth address family and sockets. */
#include <linux/module.h>
#include <linux/debugfs.h>
#include <asm/ioctls.h>
#include <net/bluetooth/bluetooth.h>
#include <linux/proc_fs.h>
#include "selftest.h"
/* Bluetooth sockets */
#define BT_MAX_PROTO 8
static const struct net_proto_family *bt_proto[BT_MAX_PROTO];
static DEFINE_RWLOCK(bt_proto_lock);
static struct lock_class_key bt_lock_key[BT_MAX_PROTO];
static const char *const bt_key_strings[BT_MAX_PROTO] = {
"sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP",
"sk_lock-AF_BLUETOOTH-BTPROTO_HCI",
"sk_lock-AF_BLUETOOTH-BTPROTO_SCO",
"sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM",
"sk_lock-AF_BLUETOOTH-BTPROTO_BNEP",
"sk_lock-AF_BLUETOOTH-BTPROTO_CMTP",
"sk_lock-AF_BLUETOOTH-BTPROTO_HIDP",
"sk_lock-AF_BLUETOOTH-BTPROTO_AVDTP",
};
static struct lock_class_key bt_slock_key[BT_MAX_PROTO];
static const char *const bt_slock_key_strings[BT_MAX_PROTO] = {
"slock-AF_BLUETOOTH-BTPROTO_L2CAP",
"slock-AF_BLUETOOTH-BTPROTO_HCI",
"slock-AF_BLUETOOTH-BTPROTO_SCO",
"slock-AF_BLUETOOTH-BTPROTO_RFCOMM",
"slock-AF_BLUETOOTH-BTPROTO_BNEP",
"slock-AF_BLUETOOTH-BTPROTO_CMTP",
"slock-AF_BLUETOOTH-BTPROTO_HIDP",
"slock-AF_BLUETOOTH-BTPROTO_AVDTP",
};
void bt_sock_reclassify_lock(struct sock *sk, int proto)
{
BUG_ON(!sk);
BUG_ON(!sock_allow_reclassification(sk));
sock_lock_init_class_and_name(sk,
bt_slock_key_strings[proto], &bt_slock_key[proto],
bt_key_strings[proto], &bt_lock_key[proto]);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
marcel holtmann | marcel holtmann | 47 | 85.45% | 2 | 50.00% |
octavian purdila | octavian purdila | 6 | 10.91% | 1 | 25.00% |
hannes frederic sowa | hannes frederic sowa | 2 | 3.64% | 1 | 25.00% |
| Total | 55 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(bt_sock_reclassify_lock);
int bt_sock_register(int proto, const struct net_proto_family *ops)
{
int err = 0;
if (proto < 0 || proto >= BT_MAX_PROTO)
return -EINVAL;
write_lock(&bt_proto_lock);
if (bt_proto[proto])
err = -EEXIST;
else
bt_proto[proto] = ops;
write_unlock(&bt_proto_lock);
return err;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
linus torvalds | linus torvalds | 36 | 52.94% | 1 | 16.67% |
marcel holtmann | marcel holtmann | 22 | 32.35% | 1 | 16.67% |
maksim krasnyanskiy | maksim krasnyanskiy | 5 | 7.35% | 2 | 33.33% |
marcus meissner | marcus meissner | 4 | 5.88% | 1 | 16.67% |
stephen hemminger | stephen hemminger | 1 | 1.47% | 1 | 16.67% |
| Total | 68 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL(bt_sock_register);
void bt_sock_unregister(int proto)
{
if (proto < 0 || proto >= BT_MAX_PROTO)
return;
write_lock(&bt_proto_lock);
bt_proto[proto] = NULL;
write_unlock(&bt_proto_lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
linus torvalds | linus torvalds | 16 | 42.11% | 1 | 16.67% |
marcel holtmann | marcel holtmann | 12 | 31.58% | 1 | 16.67% |
marcus meissner | marcus meissner | 4 | 10.53% | 1 | 16.67% |
maksim krasnyanskiy | maksim krasnyanskiy | 4 | 10.53% | 2 | 33.33% |
david herrmann | david herrmann | 2 | 5.26% | 1 | 16.67% |
| Total | 38 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL(bt_sock_unregister);
static int bt_sock_create(struct net *net, struct socket *sock, int proto,
int kern)
{
int err;
if (net != &init_net)
return -EAFNOSUPPORT;
if (proto < 0 || proto >= BT_MAX_PROTO)
return -EINVAL;
if (!bt_proto[proto])
request_module("bt-proto-%d", proto);
err = -EPROTONOSUPPORT;
read_lock(&bt_proto_lock);
if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) {
err = bt_proto[proto]->create(net, sock, proto, kern);
if (!err)
bt_sock_reclassify_lock(sock->sk, proto);
module_put(bt_proto[proto]->owner);
}
read_unlock(&bt_proto_lock);
return err;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
linus torvalds | linus torvalds | 52 | 35.62% | 2 | 16.67% |
maksim krasnyanskiy | maksim krasnyanskiy | 40 | 27.40% | 2 | 16.67% |
eric w. biederman | eric w. biederman | 18 | 12.33% | 1 | 8.33% |
marcel holtmann | marcel holtmann | 13 | 8.90% | 2 | 16.67% |
octavian purdila | octavian purdila | 7 | 4.79% | 1 | 8.33% |
dave young | dave young | 6 | 4.11% | 1 | 8.33% |
eric paris | eric paris | 5 | 3.42% | 1 | 8.33% |
marcus meissner | marcus meissner | 4 | 2.74% | 1 | 8.33% |
randy dunlap | randy dunlap | 1 | 0.68% | 1 | 8.33% |
| Total | 146 | 100.00% | 12 | 100.00% |
void bt_sock_link(struct bt_sock_list *l, struct sock *sk)
{
write_lock(&l->lock);
sk_add_node(sk, &l->head);
write_unlock(&l->lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
linus torvalds | linus torvalds | 32 | 78.05% | 1 | 25.00% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 5 | 12.20% | 1 | 25.00% |
maksim krasnyanskiy | maksim krasnyanskiy | 2 | 4.88% | 1 | 25.00% |
gustavo f. padovan | gustavo f. padovan | 2 | 4.88% | 1 | 25.00% |
| Total | 41 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(bt_sock_link);
void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk)
{
write_lock(&l->lock);
sk_del_node_init(sk);
write_unlock(&l->lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
linus torvalds | linus torvalds | 29 | 80.56% | 1 | 25.00% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 3 | 8.33% | 1 | 25.00% |
gustavo f. padovan | gustavo f. padovan | 2 | 5.56% | 1 | 25.00% |
maksim krasnyanskiy | maksim krasnyanskiy | 2 | 5.56% | 1 | 25.00% |
| Total | 36 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(bt_sock_unlink);
void bt_accept_enqueue(struct sock *parent, struct sock *sk)
{
BT_DBG("parent %p, sk %p", parent, sk);
sock_hold(sk);
list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q);
bt_sk(sk)->parent = parent;
parent->sk_ack_backlog++;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
maksim krasnyanskiy | maksim krasnyanskiy | 42 | 67.74% | 2 | 50.00% |
linus torvalds | linus torvalds | 19 | 30.65% | 1 | 25.00% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 1 | 1.61% | 1 | 25.00% |
| Total | 62 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(bt_accept_enqueue);
void bt_accept_unlink(struct sock *sk)
{
BT_DBG("sk %p state %d", sk, sk->sk_state);
list_del_init(&bt_sk(sk)->accept_q);
bt_sk(sk)->parent->sk_ack_backlog--;
bt_sk(sk)->parent = NULL;
sock_put(sk);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
maksim krasnyanskiy | maksim krasnyanskiy | 40 | 71.43% | 2 | 50.00% |
linus torvalds | linus torvalds | 14 | 25.00% | 1 | 25.00% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 2 | 3.57% | 1 | 25.00% |
| Total | 56 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(bt_accept_unlink);
struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
{
struct bt_sock *s, *n;
struct sock *sk;
BT_DBG("parent %p", parent);
list_for_each_entry_safe(s, n, &bt_sk(parent)->accept_q, accept_q) {
sk = (struct sock *)s;
lock_sock(sk);
/* FIXME: Is this check still needed */
if (sk->sk_state == BT_CLOSED) {
bt_accept_unlink(sk);
release_sock(sk);
continue;
}
if (sk->sk_state == BT_CONNECTED || !newsock ||
test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) {
bt_accept_unlink(sk);
if (newsock)
sock_graft(sk, newsock);
release_sock(sk);
return sk;
}
release_sock(sk);
}
return NULL;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
maksim krasnyanskiy | maksim krasnyanskiy | 115 | 80.42% | 2 | 20.00% |
marcel holtmann | marcel holtmann | 7 | 4.90% | 2 | 20.00% |
geliang tang | geliang tang | 7 | 4.90% | 1 | 10.00% |
gustavo padovan | gustavo padovan | 6 | 4.20% | 1 | 10.00% |
gustavo f. padovan | gustavo f. padovan | 3 | 2.10% | 1 | 10.00% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 2 | 1.40% | 1 | 10.00% |
yichen zhao | yichen zhao | 2 | 1.40% | 1 | 10.00% |
vinicius costa gomes | vinicius costa gomes | 1 | 0.70% | 1 | 10.00% |
| Total | 143 | 100.00% | 10 | 100.00% |
EXPORT_SYMBOL(bt_accept_dequeue);
int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags)
{
int noblock = flags & MSG_DONTWAIT;
struct sock *sk = sock->sk;
struct sk_buff *skb;
size_t copied;
int err;
BT_DBG("sock %p sk %p len %zu", sock, sk, len);
if (flags & MSG_OOB)
return -EOPNOTSUPP;
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb) {
if (sk->sk_shutdown & RCV_SHUTDOWN)
return 0;
return err;
}
copied = skb->len;
if (len < copied) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
skb_reset_transport_header(skb);
err = skb_copy_datagram_msg(skb, 0, msg, copied);
if (err == 0) {
sock_recv_ts_and_drops(msg, sk, skb);
if (bt_sk(sk)->skb_msg_name)
bt_sk(sk)->skb_msg_name(skb, msg->msg_name,
&msg->msg_namelen);
}
skb_free_datagram(sk, skb);
return err ? : copied;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
maksim krasnyanskiy | maksim krasnyanskiy | 144 | 70.59% | 2 | 18.18% |
marcel holtmann | marcel holtmann | 45 | 22.06% | 3 | 27.27% |
andrei emeltchenko | andrei emeltchenko | 5 | 2.45% | 1 | 9.09% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 4 | 1.96% | 2 | 18.18% |
stephen hemminger | stephen hemminger | 4 | 1.96% | 1 | 9.09% |
david s. miller | david s. miller | 1 | 0.49% | 1 | 9.09% |
neil horman | neil horman | 1 | 0.49% | 1 | 9.09% |
| Total | 204 | 100.00% | 11 | 100.00% |
EXPORT_SYMBOL(bt_sock_recvmsg);
static long bt_sock_data_wait(struct sock *sk, long timeo)
{
DECLARE_WAITQUEUE(wait, current);
add_wait_queue(sk_sleep(sk), &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
if (!skb_queue_empty(&sk->sk_receive_queue))
break;
if (sk->sk_err || (sk->sk_shutdown & RCV_SHUTDOWN))
break;
if (signal_pending(current) || !timeo)
break;
sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
return timeo;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
mat martineau | mat martineau | 128 | 96.97% | 1 | 50.00% |
eric dumazet | eric dumazet | 4 | 3.03% | 1 | 50.00% |
| Total | 132 | 100.00% | 2 | 100.00% |
int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
size_t size, int flags)
{
struct sock *sk = sock->sk;
int err = 0;
size_t target, copied = 0;
long timeo;
if (flags & MSG_OOB)
return -EOPNOTSUPP;
BT_DBG("sk %p size %zu", sk, size);
lock_sock(sk);
target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
do {
struct sk_buff *skb;
int chunk;
skb = skb_dequeue(&sk->sk_receive_queue);
if (!skb) {
if (copied >= target)
break;
err = sock_error(sk);
if (err)
break;
if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
err = -EAGAIN;
if (!timeo)
break;
timeo = bt_sock_data_wait(sk, timeo);
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
goto out;
}
continue;
}
chunk = min_t(unsigned int, skb->len, size);
if (skb_copy_datagram_msg(skb, 0, msg, chunk)) {
skb_queue_head(&sk->sk_receive_queue, skb);
if (!copied)
copied = -EFAULT;
break;
}
copied += chunk;
size -= chunk;
sock_recv_ts_and_drops(msg, sk, skb);
if (!(flags & MSG_PEEK)) {
int skb_len = skb_headlen(skb);
if (chunk <= skb_len) {
__skb_pull(skb, chunk);
} else {
struct sk_buff *frag;
__skb_pull(skb, skb_len);
chunk -= skb_len;
skb_walk_frags(skb, frag) {
if (chunk <= frag->len) {
/* Pulling partial data */
skb->len -= chunk;
skb->data_len -= chunk;
__skb_pull(frag, chunk);
break;
} else if (frag->len) {
/* Pulling all frag data */
chunk -= frag->len;
skb->len -= frag->len;
skb->data_len -= frag->len;
__skb_pull(frag, frag->len);
}
}
}
if (skb->len) {
skb_queue_head(&sk->sk_receive_queue, skb);
break;
}
kfree_skb(skb);
} else {
/* put message back and return */
skb_queue_head(&sk->sk_receive_queue, skb);
break;
}
} while (size);
out:
release_sock(sk);
return copied ? : err;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
mat martineau | mat martineau | 438 | 98.87% | 2 | 50.00% |
andrei emeltchenko | andrei emeltchenko | 4 | 0.90% | 1 | 25.00% |
david s. miller | david s. miller | 1 | 0.23% | 1 | 25.00% |
| Total | 443 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(bt_sock_stream_recvmsg);
static inline unsigned int bt_accept_poll(struct sock *parent)
{
struct bt_sock *s, *n;
struct sock *sk;
list_for_each_entry_safe(s, n, &bt_sk(parent)->accept_q, accept_q) {
sk = (struct sock *)s;
if (sk->sk_state == BT_CONNECTED ||
(test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags) &&
sk->sk_state == BT_CONNECT2))
return POLLIN | POLLRDNORM;
}
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
marcel holtmann | marcel holtmann | 70 | 83.33% | 2 | 50.00% |
gustavo padovan | gustavo padovan | 7 | 8.33% | 1 | 25.00% |
geliang tang | geliang tang | 7 | 8.33% | 1 | 25.00% |
| Total | 84 | 100.00% | 4 | 100.00% |
unsigned int bt_sock_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
unsigned int mask = 0;
BT_DBG("sock %p, sk %p", sock, sk);
poll_wait(file, sk_sleep(sk), wait);
if (sk->sk_state == BT_LISTEN)
return bt_accept_poll(sk);
if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
mask |= POLLERR |
(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
if (sk->sk_shutdown & RCV_SHUTDOWN)
mask |= POLLRDHUP | POLLIN | POLLRDNORM;
if (sk->sk_shutdown == SHUTDOWN_MASK)
mask |= POLLHUP;
if (!skb_queue_empty(&sk->sk_receive_queue))
mask |= POLLIN | POLLRDNORM;
if (sk->sk_state == BT_CLOSED)
mask |= POLLHUP;
if (sk->sk_state == BT_CONNECT ||
sk->sk_state == BT_CONNECT2 ||
sk->sk_state == BT_CONFIG)
return mask;
if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk))
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
else
sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
return mask;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
maksim krasnyanskiy | maksim krasnyanskiy | 129 | 58.11% | 2 | 15.38% |
marcel holtmann | marcel holtmann | 22 | 9.91% | 1 | 7.69% |
maxim krasnyansky | maxim krasnyansky | 15 | 6.76% | 1 | 7.69% |
gustavo padovan | gustavo padovan | 14 | 6.31% | 2 | 15.38% |
jacob e keller | jacob e keller | 13 | 5.86% | 2 | 15.38% |
davide libenzi | davide libenzi | 12 | 5.41% | 1 | 7.69% |
eric dumazet | eric dumazet | 9 | 4.05% | 3 | 23.08% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 8 | 3.60% | 1 | 7.69% |
| Total | 222 | 100.00% | 13 | 100.00% |
EXPORT_SYMBOL(bt_sock_poll);
int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
long amount;
int err;
BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg);
switch (cmd) {
case TIOCOUTQ:
if (sk->sk_state == BT_LISTEN)
return -EINVAL;
amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
if (amount < 0)
amount = 0;
err = put_user(amount, (int __user *) arg);
break;
case TIOCINQ:
if (sk->sk_state == BT_LISTEN)
return -EINVAL;
lock_sock(sk);
skb = skb_peek(&sk->sk_receive_queue);
amount = skb ? skb->len : 0;
release_sock(sk);
err = put_user(amount, (int __user *) arg);
break;
case SIOCGSTAMP:
err = sock_get_timestamp(sk, (struct timeval __user *) arg);
break;
case SIOCGSTAMPNS:
err = sock_get_timestampns(sk, (struct timespec __user *) arg);
break;
default:
err = -ENOIOCTLCMD;
break;
}
return err;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
marcel holtmann | marcel holtmann | 213 | 99.53% | 2 | 66.67% |
eric dumazet | eric dumazet | 1 | 0.47% | 1 | 33.33% |
| Total | 214 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(bt_sock_ioctl);
/* This function expects the sk lock to be held when called */
int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
{
DECLARE_WAITQUEUE(wait, current);
int err = 0;
BT_DBG("sk %p", sk);
add_wait_queue(sk_sleep(sk), &wait);
set_current_state(TASK_INTERRUPTIBLE);
while (sk->sk_state != state) {
if (!timeo) {
err = -EINPROGRESS;
break;
}
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
break;
}
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
set_current_state(TASK_INTERRUPTIBLE);
err = sock_error(sk);
if (err)
break;
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
return err;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
maksim krasnyanskiy | maksim krasnyanskiy | 120 | 82.76% | 2 | 28.57% |
peter hurley | peter hurley | 11 | 7.59% | 1 | 14.29% |
eric dumazet | eric dumazet | 6 | 4.14% | 1 | 14.29% |
benjamin lahaise | benjamin lahaise | 4 | 2.76% | 1 | 14.29% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 3 | 2.07% | 1 | 14.29% |
marcel holtmann | marcel holtmann | 1 | 0.69% | 1 | 14.29% |
| Total | 145 | 100.00% | 7 | 100.00% |
EXPORT_SYMBOL(bt_sock_wait_state);
/* This function expects the sk lock to be held when called */
int bt_sock_wait_ready(struct sock *sk, unsigned long flags)
{
DECLARE_WAITQUEUE(wait, current);
unsigned long timeo;
int err = 0;
BT_DBG("sk %p", sk);
timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
add_wait_queue(sk_sleep(sk), &wait);
set_current_state(TASK_INTERRUPTIBLE);
while (test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags)) {
if (!timeo) {
err = -EAGAIN;
break;
}
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
break;
}
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
set_current_state(TASK_INTERRUPTIBLE);
err = sock_error(sk);
if (err)
break;
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
return err;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
johan hedberg | johan hedberg | 164 | 100.00% | 1 | 100.00% |
| Total | 164 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(bt_sock_wait_ready);
#ifdef CONFIG_PROC_FS
struct bt_seq_state {
struct bt_sock_list *l;
};
static void *bt_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(seq->private->l->lock)
{
struct bt_seq_state *s = seq->private;
struct bt_sock_list *l = s->l;
read_lock(&l->lock);
return seq_hlist_start_head(&l->head, *pos);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
masatake yamato | masatake yamato | 64 | 100.00% | 1 | 100.00% |
| Total | 64 | 100.00% | 1 | 100.00% |
static void *bt_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct bt_seq_state *s = seq->private;
struct bt_sock_list *l = s->l;
return seq_hlist_next(v, &l->head, pos);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
masatake yamato | masatake yamato | 51 | 100.00% | 1 | 100.00% |
| Total | 51 | 100.00% | 1 | 100.00% |
static void bt_seq_stop(struct seq_file *seq, void *v)
__releases(seq->private->l->lock)
{
struct bt_seq_state *s = seq->private;
struct bt_sock_list *l = s->l;
read_unlock(&l->lock);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
masatake yamato | masatake yamato | 51 | 100.00% | 1 | 100.00% |
| Total | 51 | 100.00% | 1 | 100.00% |
static int bt_seq_show(struct seq_file *seq, void *v)
{
struct bt_seq_state *s = seq->private;
struct bt_sock_list *l = s->l;
if (v == SEQ_START_TOKEN) {
seq_puts(seq ,"sk RefCnt Rmem Wmem User Inode Parent");
if (l->custom_seq_show) {
seq_putc(seq, ' ');
l->custom_seq_show(seq, v);
}
seq_putc(seq, '\n');
} else {
struct sock *sk = sk_entry(v);
struct bt_sock *bt = bt_sk(sk);
seq_printf(seq,
"%pK %-6d %-6u %-6u %-6u %-6lu %-6lu",
sk,
atomic_read(&sk->sk_refcnt),
sk_rmem_alloc_get(sk),
sk_wmem_alloc_get(sk),
from_kuid(seq_user_ns(seq), sock_i_uid(sk)),
sock_i_ino(sk),
bt->parent? sock_i_ino(bt->parent): 0LU);
if (l->custom_seq_show) {
seq_putc(seq, ' ');
l->custom_seq_show(seq, v);
}
seq_putc(seq, '\n');
}
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
masatake yamato | masatake yamato | 174 | 89.69% | 1 | 20.00% |
andrei emeltchenko | andrei emeltchenko | 10 | 5.15% | 1 | 20.00% |
eric w. biederman | eric w. biederman | 8 | 4.12% | 1 | 20.00% |
marcel holtmann | marcel holtmann | 2 | 1.03% | 2 | 40.00% |
| Total | 194 | 100.00% | 5 | 100.00% |
static const struct seq_operations bt_seq_ops = {
.start = bt_seq_start,
.next = bt_seq_next,
.stop = bt_seq_stop,
.show = bt_seq_show,
};
static int bt_seq_open(struct inode *inode, struct file *file)
{
struct bt_sock_list *sk_list;
struct bt_seq_state *s;
sk_list = PDE_DATA(inode);
s = __seq_open_private(file, &bt_seq_ops,
sizeof(struct bt_seq_state));
if (!s)
return -ENOMEM;
s->l = sk_list;
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
masatake yamato | masatake yamato | 65 | 97.01% | 1 | 33.33% |
andrei emeltchenko | andrei emeltchenko | 1 | 1.49% | 1 | 33.33% |
al viro | al viro | 1 | 1.49% | 1 | 33.33% |
| Total | 67 | 100.00% | 3 | 100.00% |
static const struct file_operations bt_fops = {
.open = bt_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private
};
int bt_procfs_init(struct net *net, const char *name,
struct bt_sock_list *sk_list,
int (* seq_show)(struct seq_file *, void *))
{
sk_list->custom_seq_show = seq_show;
if (!proc_create_data(name, 0, net->proc_net, &bt_fops, sk_list))
return -ENOMEM;
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
masatake yamato | masatake yamato | 56 | 84.85% | 1 | 25.00% |
al viro | al viro | 6 | 9.09% | 2 | 50.00% |
gao feng | gao feng | 4 | 6.06% | 1 | 25.00% |
| Total | 66 | 100.00% | 4 | 100.00% |
void bt_procfs_cleanup(struct net *net, const char *name)
{
remove_proc_entry(name, net->proc_net);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
masatake yamato | masatake yamato | 19 | 79.17% | 1 | 50.00% |
gao feng | gao feng | 5 | 20.83% | 1 | 50.00% |
| Total | 24 | 100.00% | 2 | 100.00% |
#else
int bt_procfs_init(struct net *net, const char *name,
struct bt_sock_list *sk_list,
int (* seq_show)(struct seq_file *, void *))
{
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
masatake yamato | masatake yamato | 37 | 100.00% | 1 | 100.00% |
| Total | 37 | 100.00% | 1 | 100.00% |
void bt_procfs_cleanup(struct net *net, const char *name)
{
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
masatake yamato | masatake yamato | 14 | 100.00% | 1 | 100.00% |
| Total | 14 | 100.00% | 1 | 100.00% |
#endif
EXPORT_SYMBOL(bt_procfs_init);
EXPORT_SYMBOL(bt_procfs_cleanup);
static struct net_proto_family bt_sock_family_ops = {
.owner = THIS_MODULE,
.family = PF_BLUETOOTH,
.create = bt_sock_create,
};
struct dentry *bt_debugfs;
EXPORT_SYMBOL_GPL(bt_debugfs);
static int __init bt_init(void)
{
int err;
sock_skb_cb_check_size(sizeof(struct bt_skb_cb));
BT_INFO("Core ver %s", BT_SUBSYS_VERSION);
err = bt_selftest();
if (err < 0)
return err;
bt_debugfs = debugfs_create_dir("bluetooth", NULL);
err = bt_sysfs_init();
if (err < 0)
return err;
err = sock_register(&bt_sock_family_ops);
if (err < 0) {
bt_sysfs_cleanup();
return err;
}
BT_INFO("HCI device and connection manager initialized");
err = hci_sock_init();
if (err < 0)
goto error;
err = l2cap_init();
if (err < 0)
goto sock_err;
err = sco_init();
if (err < 0) {
l2cap_exit();
goto sock_err;
}
err = mgmt_init();
if (err < 0) {
sco_exit();
l2cap_exit();
goto sock_err;
}
return 0;
sock_err:
hci_sock_cleanup();
error:
sock_unregister(PF_BLUETOOTH);
bt_sysfs_cleanup();
return err;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
marcel holtmann | marcel holtmann | 65 | 35.71% | 5 | 45.45% |
gustavo f. padovan | gustavo f. padovan | 62 | 34.07% | 1 | 9.09% |
maksim krasnyanskiy | maksim krasnyanskiy | 32 | 17.58% | 3 | 27.27% |
johan hedberg | johan hedberg | 22 | 12.09% | 1 | 9.09% |
eyal birger | eyal birger | 1 | 0.55% | 1 | 9.09% |
| Total | 182 | 100.00% | 11 | 100.00% |
static void __exit bt_exit(void)
{
mgmt_exit();
sco_exit();
l2cap_exit();
hci_sock_cleanup();
sock_unregister(PF_BLUETOOTH);
bt_sysfs_cleanup();
debugfs_remove_recursive(bt_debugfs);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
maksim krasnyanskiy | maksim krasnyanskiy | 16 | 47.06% | 1 | 16.67% |
marcel holtmann | marcel holtmann | 9 | 26.47% | 3 | 50.00% |
gustavo f. padovan | gustavo f. padovan | 6 | 17.65% | 1 | 16.67% |
johan hedberg | johan hedberg | 3 | 8.82% | 1 | 16.67% |
| Total | 34 | 100.00% | 6 | 100.00% |
subsys_initcall(bt_init);
module_exit(bt_exit);
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth Core ver " BT_SUBSYS_VERSION);
MODULE_VERSION(BT_SUBSYS_VERSION);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_BLUETOOTH);
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
maksim krasnyanskiy | maksim krasnyanskiy | 718 | 21.29% | 7 | 7.95% |
marcel holtmann | marcel holtmann | 624 | 18.50% | 24 | 27.27% |
masatake yamato | masatake yamato | 588 | 17.43% | 1 | 1.14% |
mat martineau | mat martineau | 571 | 16.93% | 2 | 2.27% |
linus torvalds | linus torvalds | 229 | 6.79% | 3 | 3.41% |
johan hedberg | johan hedberg | 196 | 5.81% | 3 | 3.41% |
gustavo f. padovan | gustavo f. padovan | 75 | 2.22% | 3 | 3.41% |
dave young | dave young | 69 | 2.05% | 1 | 1.14% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 35 | 1.04% | 4 | 4.55% |
al viro | al viro | 35 | 1.04% | 3 | 3.41% |
gustavo padovan | gustavo padovan | 27 | 0.80% | 2 | 2.27% |
eric w. biederman | eric w. biederman | 26 | 0.77% | 2 | 2.27% |
eric dumazet | eric dumazet | 20 | 0.59% | 4 | 4.55% |
andrei emeltchenko | andrei emeltchenko | 20 | 0.59% | 3 | 3.41% |
octavian purdila | octavian purdila | 18 | 0.53% | 1 | 1.14% |
maxim krasnyansky | maxim krasnyansky | 15 | 0.44% | 1 | 1.14% |
geliang tang | geliang tang | 14 | 0.42% | 1 | 1.14% |
jacob e keller | jacob e keller | 13 | 0.39% | 2 | 2.27% |
davide libenzi | davide libenzi | 12 | 0.36% | 1 | 1.14% |
marcus meissner | marcus meissner | 12 | 0.36% | 1 | 1.14% |
peter hurley | peter hurley | 11 | 0.33% | 1 | 1.14% |
stephen hemminger | stephen hemminger | 11 | 0.33% | 3 | 3.41% |
gao feng | gao feng | 9 | 0.27% | 2 | 2.27% |
eric paris | eric paris | 5 | 0.15% | 1 | 1.14% |
benjamin lahaise | benjamin lahaise | 4 | 0.12% | 1 | 1.14% |
yichen zhao | yichen zhao | 2 | 0.06% | 1 | 1.14% |
david s. miller | david s. miller | 2 | 0.06% | 1 | 1.14% |
hannes frederic sowa | hannes frederic sowa | 2 | 0.06% | 1 | 1.14% |
jan engelhardt | jan engelhardt | 2 | 0.06% | 1 | 1.14% |
david herrmann | david herrmann | 2 | 0.06% | 1 | 1.14% |
eyal birger | eyal birger | 1 | 0.03% | 1 | 1.14% |
fabian frederick | fabian frederick | 1 | 0.03% | 1 | 1.14% |
vinicius costa gomes | vinicius costa gomes | 1 | 0.03% | 1 | 1.14% |
randy dunlap | randy dunlap | 1 | 0.03% | 1 | 1.14% |
hideaki yoshifuji | hideaki yoshifuji | 1 | 0.03% | 1 | 1.14% |
neil horman | neil horman | 1 | 0.03% | 1 | 1.14% |
| Total | 3373 | 100.00% | 88 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.