Release 4.11 fs/ncpfs/sock.c
/*
* linux/fs/ncpfs/sock.c
*
* Copyright (C) 1992, 1993 Rick Sladkey
*
* Modified 1995, 1996 by Volker Lendecke to be usable for ncp
* Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/time.h>
#include <linux/errno.h>
#include <linux/socket.h>
#include <linux/fcntl.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/sched/signal.h>
#include <linux/uaccess.h>
#include <linux/in.h>
#include <linux/net.h>
#include <linux/mm.h>
#include <linux/netdevice.h>
#include <linux/signal.h>
#include <linux/slab.h>
#include <net/scm.h>
#include <net/sock.h>
#include <linux/ipx.h>
#include <linux/poll.h>
#include <linux/file.h>
#include "ncp_fs.h"
#include "ncpsign_kernel.h"
static int _recv(struct socket *sock, void *buf, int size, unsigned flags)
{
struct msghdr msg = {NULL, };
struct kvec iov = {buf, size};
return kernel_recvmsg(sock, &msg, &iov, 1, size, flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Al Viro | 30 | 52.63% | 1 | 33.33% |
Linus Torvalds (pre-git) | 27 | 47.37% | 2 | 66.67% |
Total | 57 | 100.00% | 3 | 100.00% |
static int _send(struct socket *sock, const void *buff, int len)
{
struct msghdr msg = { .msg_flags = 0 };
struct kvec vec = {.iov_base = (void *)buff, .iov_len = len};
iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &vec, 1, len);
return sock_sendmsg(sock, &msg);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Al Viro | 54 | 69.23% | 2 | 40.00% |
Linus Torvalds (pre-git) | 24 | 30.77% | 3 | 60.00% |
Total | 78 | 100.00% | 5 | 100.00% |
struct ncp_request_reply {
struct list_head req;
wait_queue_head_t wq;
atomic_t refs;
unsigned char* reply_buf;
size_t datalen;
int result;
enum { RQ_DONE, RQ_INPROGRESS, RQ_QUEUED, RQ_IDLE, RQ_ABANDONED } status;
struct iov_iter from;
struct kvec tx_iov[3];
u_int16_t tx_type;
u_int32_t sign[6];
};
static inline struct ncp_request_reply* ncp_alloc_req(void)
{
struct ncp_request_reply *req;
req = kmalloc(sizeof(struct ncp_request_reply), GFP_KERNEL);
if (!req)
return NULL;
init_waitqueue_head(&req->wq);
atomic_set(&req->refs, (1));
req->status = RQ_IDLE;
return req;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pierre Ossman | 66 | 100.00% | 1 | 100.00% |
Total | 66 | 100.00% | 1 | 100.00% |
static void ncp_req_get(struct ncp_request_reply *req)
{
atomic_inc(&req->refs);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pierre Ossman | 19 | 100.00% | 1 | 100.00% |
Total | 19 | 100.00% | 1 | 100.00% |
static void ncp_req_put(struct ncp_request_reply *req)
{
if (atomic_dec_and_test(&req->refs))
kfree(req);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pierre Ossman | 26 | 100.00% | 1 | 100.00% |
Total | 26 | 100.00% | 1 | 100.00% |
void ncp_tcp_data_ready(struct sock *sk)
{
struct ncp_server *server = sk->sk_user_data;
server->data_ready(sk);
schedule_work(&server->rcv.tq);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Petr Vandrovec | 21 | 58.33% | 1 | 20.00% |
Linus Torvalds (pre-git) | 11 | 30.56% | 1 | 20.00% |
Al Viro | 2 | 5.56% | 1 | 20.00% |
Arnaldo Carvalho de Melo | 1 | 2.78% | 1 | 20.00% |
Ingo Molnar | 1 | 2.78% | 1 | 20.00% |
Total | 36 | 100.00% | 5 | 100.00% |
void ncp_tcp_error_report(struct sock *sk)
{
struct ncp_server *server = sk->sk_user_data;
server->error_report(sk);
schedule_work(&server->rcv.tq);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Petr Vandrovec | 25 | 69.44% | 1 | 25.00% |
Linus Torvalds (pre-git) | 9 | 25.00% | 1 | 25.00% |
Arnaldo Carvalho de Melo | 1 | 2.78% | 1 | 25.00% |
Ingo Molnar | 1 | 2.78% | 1 | 25.00% |
Total | 36 | 100.00% | 4 | 100.00% |
void ncp_tcp_write_space(struct sock *sk)
{
struct ncp_server *server = sk->sk_user_data;
/* We do not need any locking: we first set tx.creq, and then we do sendmsg,
not vice versa... */
server->write_space(sk);
if (server->tx.creq)
schedule_work(&server->tx.tq);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Petr Vandrovec | 33 | 73.33% | 1 | 25.00% |
Linus Torvalds (pre-git) | 10 | 22.22% | 1 | 25.00% |
Ingo Molnar | 1 | 2.22% | 1 | 25.00% |
Arnaldo Carvalho de Melo | 1 | 2.22% | 1 | 25.00% |
Total | 45 | 100.00% | 4 | 100.00% |
void ncpdgram_timeout_call(unsigned long v)
{
struct ncp_server *server = (void*)v;
schedule_work(&server->timeout_tq);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Petr Vandrovec | 23 | 82.14% | 1 | 20.00% |
Linus Torvalds (pre-git) | 4 | 14.29% | 3 | 60.00% |
Ingo Molnar | 1 | 3.57% | 1 | 20.00% |
Total | 28 | 100.00% | 5 | 100.00% |
static inline void ncp_finish_request(struct ncp_server *server, struct ncp_request_reply *req, int result)
{
req->result = result;
if (req->status != RQ_ABANDONED)
memcpy(req->reply_buf, server->rxbuf, req->datalen);
req->status = RQ_DONE;
wake_up_all(&req->wq);
ncp_req_put(req);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pierre Ossman | 33 | 48.53% | 1 | 25.00% |
Petr Vandrovec | 30 | 44.12% | 1 | 25.00% |
Linus Torvalds (pre-git) | 5 | 7.35% | 2 | 50.00% |
Total | 68 | 100.00% | 4 | 100.00% |
static void __abort_ncp_connection(struct ncp_server *server)
{
struct ncp_request_reply *req;
ncp_invalidate_conn(server);
del_timer(&server->timeout_tm);
while (!list_empty(&server->tx.requests)) {
req = list_entry(server->tx.requests.next, struct ncp_request_reply, req);
list_del_init(&req->req);
ncp_finish_request(server, req, -EIO);
}
req = server->rcv.creq;
if (req) {
server->rcv.creq = NULL;
ncp_finish_request(server, req, -EIO);
server->rcv.ptr = NULL;
server->rcv.state = 0;
}
req = server->tx.creq;
if (req) {
server->tx.creq = NULL;
ncp_finish_request(server, req, -EIO);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Petr Vandrovec | 113 | 70.62% | 1 | 11.11% |
Linus Torvalds (pre-git) | 44 | 27.50% | 7 | 77.78% |
Pierre Ossman | 3 | 1.88% | 1 | 11.11% |
Total | 160 | 100.00% | 9 | 100.00% |
static inline int get_conn_number(struct ncp_reply_header *rp)
{
return rp->conn_low | (rp->conn_high << 8);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Petr Vandrovec | 22 | 88.00% | 1 | 50.00% |
Linus Torvalds (pre-git) | 3 | 12.00% | 1 | 50.00% |
Total | 25 | 100.00% | 2 | 100.00% |
static inline void __ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err)
{
/* If req is done, we got signal, but we also received answer... */
switch (req->status) {
case RQ_IDLE:
case RQ_DONE:
break;
case RQ_QUEUED:
list_del_init(&req->req);
ncp_finish_request(server, req, err);
break;
case RQ_INPROGRESS:
req->status = RQ_ABANDONED;
break;
case RQ_ABANDONED:
break;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Petr Vandrovec | 48 | 67.61% | 1 | 33.33% |
Linus Torvalds (pre-git) | 13 | 18.31% | 1 | 33.33% |
Pierre Ossman | 10 | 14.08% | 1 | 33.33% |
Total | 71 | 100.00% | 3 | 100.00% |
static inline void ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err)
{
mutex_lock(&server->rcv.creq_mutex);
__ncp_abort_request(server, req, err);
mutex_unlock(&server->rcv.creq_mutex);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Petr Vandrovec | 36 | 73.47% | 1 | 33.33% |
Linus Torvalds (pre-git) | 9 | 18.37% | 1 | 33.33% |
Ingo Molnar | 4 | 8.16% | 1 | 33.33% |
Total | 49 | 100.00% | 3 | 100.00% |
static inline void __ncptcp_abort(struct ncp_server *server)
{
__abort_ncp_connection(server);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Petr Vandrovec | 15 | 88.24% | 1 | 50.00% |
Linus Torvalds (pre-git) | 2 | 11.76% | 1 | 50.00% |
Total | 17 | 100.00% | 2 | 100.00% |
static int ncpdgram_send(struct socket *sock, struct ncp_request_reply *req)
{
struct msghdr msg = { .msg_iter = req->from, .msg_flags = MSG_DONTWAIT };
return sock_sendmsg(sock, &msg);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Petr Vandrovec | 23 | 53.49% | 2 | 50.00% |
Al Viro | 18 | 41.86% | 1 | 25.00% |
Linus Torvalds (pre-git) | 2 | 4.65% | 1 | 25.00% |
Total | 43 | 100.00% | 4 | 100.00% |
static void __ncptcp_try_send(struct ncp_server *server)
{
struct ncp_request_reply *rq;
struct msghdr msg = { .msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT };
int result;
rq = server->tx.creq;
if (!rq)
return;
msg.msg_iter = rq->from;
result = sock_sendmsg(server->ncp_sock, &msg);
if (result == -EAGAIN)
return;
if (result < 0) {
pr_err("tcp: Send failed: %d\n", result);
__ncp_abort_request(server, rq, result);
return;
}
if (!msg_data_left(&msg)) {
server->rcv.creq = rq;
server->tx.creq = NULL;
return;
}
rq->from = msg.msg_iter;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Petr Vandrovec | 77 | 57.04% | 2 | 40.00% |
Al Viro | 29 | 21.48% | 1 | 20.00% |
Linus Torvalds (pre-git) | 27 | 20.00% | 1 | 20.00% |
Joe Perches | 2 | 1.48% | 1 | 20.00% |
Total | 135 | 100.00% | 5 | 100.00% |
static inline void ncp_init_header(struct ncp_server *server, struct ncp_request_reply *req, struct ncp_request_header *h)
{
req->status = RQ_INPROGRESS;
h->conn_low = server->connection;
h->conn_high = server->connection >> 8;
h->sequence = ++server->sequence;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Petr Vandrovec | 33 | 60.00% | 1 | 25.00% |
Linus Torvalds (pre-git) | 22 | 40.00% | 3 | 75.00% |
Total | 55 | 100.00% | 4 | 100.00% |
static void ncpdgram_start_request(struct ncp_server *server, struct ncp_request_reply *req)
{
size_t signlen, len = req->tx_iov[1].iov_len;
struct ncp_request_header *h = req->tx_iov[1].iov_base;
ncp_init_header(server, req, h);
signlen = sign_packet(server,
req->tx_iov[1].iov_base + sizeof(struct ncp_request_header) - 1,
len - sizeof(struct ncp_request_header) + 1,
cpu_to_le32(len), req->sign);
if (signlen) {
/* NCP over UDP appends signature */
req->tx_iov[2].iov_base = req->sign;
req->tx_iov[2].iov_len = signlen;
}
iov_iter_kvec(&req->from, WRITE | ITER_KVEC,
req->tx_iov + 1, signlen ? 2 : 1, len + signlen);
server->rcv.creq = req;
server->timeout_last = server->m.time_out;
server->timeout_retries = server->m.retry_count;
ncpdgram_send(server->ncp_sock, req);
mod_timer(&server->timeout_tm, jiffies + server->m.time_out);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Petr Vandrovec | 133 | 63.94% | 1 | 33.33% |
Al Viro | 43 | 20.67% | 1 | 33.33% |
Linus Torvalds (pre-git) | 32 | 15.38% | 1 | 33.33% |
Total | 208 | 100.00% | 3 | 100.00% |
#define NCP_TCP_XMIT_MAGIC (0x446D6454)
#define NCP_TCP_XMIT_VERSION (1)
#define NCP_TCP_RCVD_MAGIC (0x744E6350)
static void ncptcp_start_request(struct ncp_server *server, struct ncp_request_reply *req)
{
size_t signlen, len = req->tx_iov[1].iov_len;
struct ncp_request_header *h = req->tx_iov[1].iov_base;
ncp_init_header(server, req, h);
signlen = sign_packet(server, req->tx_iov[1].iov_base + sizeof(struct ncp_request_header) - 1,
len - sizeof(struct ncp_request_header) + 1,
cpu_to_be32(len + 24), req->sign + 4) + 16;
req->sign[0] = htonl(NCP_TCP_XMIT_MAGIC);
req->sign[1] = htonl(len + signlen);
req->sign[2] = htonl(NCP_TCP_XMIT_VERSION);
req->sign[3] = htonl(req->datalen + 8);
/* NCP over TCP prepends signature */
req->tx_iov[0].iov_base = req->sign;
req->tx_iov[0].iov_len = signlen;
iov_iter_kvec(&req->from, WRITE | ITER_KVEC,
req->tx_iov, 2, len + signlen);
server->tx.creq = req;
__ncptcp_try_send(server);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Petr Vandrovec | 101 | 46.76% | 1 | 25.00% |
Linus Torvalds (pre-git) | 80 | 37.04% | 1 | 25.00% |
Al Viro | 35 | 16.20% | 2 | 50.00% |
Total | 216 | 100.00% | 4 | 100.00% |
static inline void __ncp_start_request(struct ncp_server *server, struct ncp_request_reply *req)
{
/* we copy the data so that we do not depend on the caller
staying alive */
memcpy(server->txbuf, req->tx_iov[1].iov_base, req->tx_iov[1].iov_len);
req->tx_iov[1].iov_base = server->txbuf;
if (server->ncp_sock->type == SOCK_STREAM)
ncptcp_start_request(server, req);
else
ncpdgram_start_request(server, req);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pierre Ossman | 39 | 48.15% | 1 | 33.33% |
Petr Vandrovec | 29 | 35.80% | 1 | 33.33% |
Linus Torvalds (pre-git) | 13 | 16.05% | 1 | 33.33% |
Total | 81 | 100.00% | 3 | 100.00% |
static int ncp_add_request(struct ncp_server *server, struct ncp_request_reply *req)
{
mutex_lock(&server->rcv.creq_mutex);
if (!ncp_conn_valid(server)) {
mutex_unlock(&server->rcv.creq_mutex);
pr_err("tcp: Server died\n");
return -EIO;
}
ncp_req_get(req);
if (server->tx.creq || server->rcv.creq) {
req->status = RQ_QUEUED;
list_add_tail(&req->req, &server->tx.requests);
mutex_unlock(&server->rcv.creq_mutex);
return 0;
}
__ncp_start_request(server, req);
mutex_unlock(&server->rcv.creq_mutex);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Petr Vandrovec | 74 | 56.92% | 1 | 20.00% |
Linus Torvalds (pre-git) | 41 | 31.54% | 1 | 20.00% |
Ingo Molnar | 8 | 6.15% | 1 | 20.00% |
Pierre Ossman | 5 | 3.85% | 1 | 20.00% |
Joe Perches | 2 | 1.54% | 1 | 20.00% |
Total | 130 | 100.00% | 5 | 100.00% |
static void __ncp_next_request(struct ncp_server *server)
{
struct ncp_request_reply *req;
server->rcv.creq = NULL;
if (list_empty(&server->tx.requests)) {
return;
}
req = list_entry(server->tx.requests.next, struct ncp_request_reply, req);
list_del_init(&req->req);
__ncp_start_request(server, req);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Petr Vandrovec | 64 | 88.89% | 1 | 50.00% |
Linus Torvalds (pre-git) | 8 | 11.11% | 1 | 50.00% |
Total | 72 | 100.00% | 2 | 100.00% |
static void info_server(struct ncp_server *server, unsigned int id, const void * data, size_t len)
{
if (server->info_sock) {
struct msghdr msg = { .msg_flags = MSG_NOSIGNAL };
__be32 hdr[2] = {cpu_to_be32(len + 8), cpu_to_be32(id)};
struct kvec iov[2] = {
{.iov_base = hdr, .iov_len = 8},
{.iov_base = (void *)data, .iov_len = len},
};
iov_iter_kvec(&msg.msg_iter, ITER_KVEC | WRITE,
iov, 2, len + 8);
sock_sendmsg(server->info_sock, &msg);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Petr Vandrovec | 77 | 59.69% | 1 | 25.00% |
Al Viro | 52 | 40.31% | 3 | 75.00% |
Total | 129 | 100.00% | 4 | 100.00% |
void ncpdgram_rcv_proc(struct work_struct *work)
{
struct ncp_server *server =
container_of(work, struct ncp_server, rcv.tq);
struct socket* sock;
sock = server->ncp_sock;
while (1) {
struct ncp_reply_header reply;
int result;
result = _recv(sock, &reply, sizeof(reply), MSG_PEEK | MSG_DONTWAIT);
if (result < 0) {
break;
}
if (result >= sizeof(reply)) {
struct ncp_request_reply *req;
if (reply.type == NCP_WATCHDOG) {
unsigned char buf[10];
if (server->connection != get_conn_number(&reply)) {
goto drop;
}
result = _recv(sock, buf, sizeof(buf), MSG_DONTWAIT);
if (result < 0) {
ncp_dbg(1, "recv failed with %d\n", result);
continue;
}
if (result < 10) {
ncp_dbg(1, "too short (%u) watchdog packet\n", result);
continue;
}
if (buf[9] != '?') {
ncp_dbg(1, "bad signature (%02X) in watchdog packet\n", buf[9]);
continue;
}
buf[9] = 'Y';
_send(sock, buf, sizeof(buf));
continue;
}
if (reply.type != NCP_POSITIVE_ACK && reply.type != NCP_REPLY) {
result = _recv(sock, server->unexpected_packet.data, sizeof(server->unexpected_packet.data), MSG_DONTWAIT);
if (result < 0) {
continue;
}
info_server(server, 0, server->unexpected_packet.data, result);
continue;
}
mutex_lock(&server->rcv.creq_mutex);
req = server->rcv.creq;
if (req && (req->tx_type == NCP_ALLOC_SLOT_REQUEST || (server->sequence == reply.sequence &&
server->connection == get_conn_number(&reply)))) {
if (reply.type == NCP_POSITIVE_ACK) {
server->timeout_retries = server->m.retry_count;
server->timeout_last = NCP_MAX_RPC_TIMEOUT;
mod_timer(&server->timeout_tm, jiffies + NCP_MAX_RPC_TIMEOUT);
} else if (reply.type == NCP_REPLY) {
result = _recv(sock, server->rxbuf, req->datalen, MSG_DONTWAIT);
#ifdef CONFIG_NCPFS_PACKET_SIGNING
if (result >= 0 && server->sign_active && req->tx_type != NCP_DEALLOC_SLOT_REQUEST) {
if (result < 8 + 8) {
result = -EIO;
} else {
unsigned int hdrl;
result -= 8;
hdrl = sock->sk->sk_family == AF_INET ? 8 : 6;
if (sign_verify_reply(server, server->rxbuf + hdrl, result - hdrl, cpu_to_le32(result), server->rxbuf + result)) {
pr_info("Signature violation\n");
result = -EIO;
}
}
}
#endif
del_timer(&server->timeout_tm);
server->rcv.creq = NULL;
ncp_finish_request(server, req, result);
__ncp_next_request(server);
mutex_unlock(&server->rcv.creq_mutex);
continue;
}
}
mutex_unlock(&server->rcv.creq_mutex);
}
drop:;
_recv(sock, &reply, sizeof(reply), MSG_DONTWAIT);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Petr Vandrovec | 534 | 92.07% | 3 | 27.27% |
David Howells | 14 | 2.41% | 1 | 9.09% |
Joe Perches | 11 | 1.90% | 2 | 18.18% |
Pierre Ossman | 8 | 1.38% | 1 | 9.09% |
Ingo Molnar | 6 | 1.03% | 1 | 9.09% |
Linus Torvalds (pre-git) | 5 | 0.86% | 1 | 9.09% |
Al Viro | 1 | 0.17% | 1 | 9.09% |
Arnaldo Carvalho de Melo | 1 | 0.17% | 1 | 9.09% |
Total | 580 | 100.00% | 11 | 100.00% |
static void __ncpdgram_timeout_proc(struct ncp_server *server)
{
/* If timer is pending, we are processing another request... */
if (!timer_pending(&server->timeout_tm)) {
struct ncp_request_reply* req;
req = server->rcv.creq;
if (req) {
int timeout;
if (server->m.flags & NCP_MOUNT_SOFT) {
if (server->timeout_retries-- == 0) {
__ncp_abort_request(server, req, -ETIMEDOUT);
return;
}
}
/* Ignore errors */
ncpdgram_send(server->ncp_sock, req);
timeout = server->timeout_last << 1;
if (timeout > NCP_MAX_RPC_TIMEOUT) {
timeout = NCP_MAX_RPC_TIMEOUT;
}
server->timeout_last = timeout;
mod_timer(&server->timeout_tm, jiffies + timeout);
}
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Petr Vandrovec | 129 | 100.00% | 1 | 100.00% |
Total | 129 | 100.00% | 1 | 100.00% |
void ncpdgram_timeout_proc(struct work_struct *work)
{
struct ncp_server *server =
container_of(work, struct ncp_server, timeout_tq);
mutex_lock(&server->rcv.creq_mutex);
__ncpdgram_timeout_proc(server);
mutex_unlock(&server->rcv.creq_mutex);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Petr Vandrovec | 34 | 68.00% | 1 | 33.33% |
David Howells | 12 | 24.00% | 1 | 33.33% |
Ingo Molnar | 4 | 8.00% | 1 | 33.33% |
Total | 50 | 100.00% | 3 | 100.00% |
static int do_tcp_rcv(struct ncp_server *server, void *buffer, size_t len)
{
int result;
if (buffer) {
result = _recv(server->ncp_sock, buffer, len, MSG_DONTWAIT);
} else {
static unsigned char dummy[1024];
if (len > sizeof(dummy)) {
len = sizeof(dummy);
}
result = _recv(server->ncp_sock, dummy, len, MSG_DONTWAIT);
}
if (result < 0) {
return result;
}
if (result > len) {
pr_err("tcp: bug in recvmsg (%u > %zu)\n", result, len);
return -EIO;
}
return result;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Petr Vandrovec | 119 | 98.35% | 1 | 33.33% |
Joe Perches | 1 | 0.83% | 1 | 33.33% |
Alexey Dobriyan | 1 | 0.83% | 1 | 33.33% |
Total | 121 | 100.00% | 3 | 100.00% |
static int __ncptcp_rcv_proc(struct ncp_server *server)
{
/* We have to check the result, so store the complete header */
while (1) {
int result;
struct ncp_request_reply *req;
int datalen;
int type;
while (server->rcv.len) {
result = do_tcp_rcv(server, server->rcv.ptr, server->rcv.len);
if (result == -EAGAIN) {
return 0;
}
if (result <= 0) {
req = server->rcv.creq;
if (req) {
__ncp_abort_request(server, req, -EIO);
} else {
__ncptcp_abort(server);
}
if (result < 0) {
pr_err("tcp: error in recvmsg: %d\n", result);
} else {
ncp_dbg(1, "tcp: EOF\n");
}
return -EIO;
}
if (server->rcv.ptr) {
server->rcv.ptr += result;
}
server->rcv.len -= result;
}
switch (server->rcv.state) {
case 0:
if (server->rcv.buf.magic != htonl(NCP_TCP_RCVD_MAGIC)) {
pr_err("tcp: Unexpected reply type %08X\n", ntohl(server->rcv.buf.magic));
__ncptcp_abort(server);
return -EIO;
}
datalen = ntohl(server->rcv.buf.len) & 0x0FFFFFFF;
if (datalen < 10) {
pr_err("tcp: Unexpected reply len %d\n", datalen);
__ncptcp_abort(server);
return -EIO;
}
#ifdef CONFIG_NCPFS_PACKET_SIGNING
if (server->sign_active) {
if (datalen < 18) {
pr_err("tcp: Unexpected reply len %d\n", datalen);
__ncptcp_abort(server);
return -EIO;
}
server->rcv.buf.len = datalen - 8;
server->rcv.ptr = (unsigned char*)&server->rcv.buf.p1;
server->rcv.len = 8;
server->rcv.state = 4;
break;
}
#endif
type = ntohs(server->rcv.buf.type);
#ifdef CONFIG_NCPFS_PACKET_SIGNING
cont:;
#endif
if (type != NCP_REPLY) {
if (datalen - 8 <= sizeof(server->unexpected_packet.data)) {
*(__u16*)(server->unexpected_packet.data) = htons(type);
server->unexpected_packet.len = datalen - 8;
server->rcv.state = 5;
server->rcv.ptr = server->unexpected_packet.data + 2;
server->rcv.len = datalen - 10;
break;
}
ncp_dbg(1, "tcp: Unexpected NCP type %02X\n", type);
skipdata2:;
server->rcv.state = 2;
skipdata:;
server->rcv.ptr = NULL;
server->rcv.len = datalen - 10;
break;
}
req = server->rcv.creq;
if (!req) {
ncp_dbg(1, "Reply without appropriate request\n");
goto skipdata2;
}
if (datalen > req->datalen + 8) {
pr_err("tcp: Unexpected reply len %d (expected at most %zd)\n", datalen, req->datalen + 8);
server->rcv.state = 3;
goto skipdata;
}
req->datalen = datalen - 8;
((struct ncp_reply_header*)server->rxbuf)->type = NCP_REPLY;
server->rcv.ptr = server->rxbuf + 2;
server->rcv.len = datalen - 10;
server->rcv.state = 1;
break;
#ifdef CONFIG_NCPFS_PACKET_SIGNING
case 4:
datalen = server->rcv.buf.len;
type = ntohs(server->rcv.buf.type2);
goto cont;
#endif
case 1:
req = server->rcv.creq;
if (req->tx_type != NCP_ALLOC_SLOT_REQUEST) {
if (((struct ncp_reply_header*)server->rxbuf)->sequence != server->sequence) {
pr_err("tcp: Bad sequence number\n");
__ncp_abort_request(server, req, -EIO);
return -EIO;
}
if ((((struct ncp_reply_header*)server->rxbuf)->conn_low | (((struct ncp_reply_header*)server->rxbuf)->conn_high << 8)) != server->connection) {
pr_err("tcp: Connection number mismatch\n");
__ncp_abort_request(server, req, -EIO);
return -EIO;
}
}
#ifdef CONFIG_NCPFS_PACKET_SIGNING
if (server->sign_active && req->tx_type != NCP_DEALLOC_SLOT_REQUEST) {
if (sign_verify_reply(server, server->rxbuf + 6, req->datalen - 6, cpu_to_be32(req->datalen + 16), &server->rcv.buf.type)) {
pr_err("tcp: Signature violation\n");
__ncp_abort_request(server, req, -EIO);
return -EIO;
}
}
#endif
ncp_finish_request(server, req, req->datalen);
nextreq:;
__ncp_next_request(server);
case 2:
next:;
server->rcv.ptr = (unsigned char*)&server->rcv.buf;
server->rcv.len = 10;
server->rcv.state = 0;
break;
case 3:
ncp_finish_request(server, server->rcv.creq, -EIO);
goto nextreq;
case 5:
info_server(server, 0, server->unexpected_packet.data, server->unexpected_packet.len);
goto next;
}
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Petr Vandrovec | 871 | 91.88% | 3 | 37.50% |
Pierre Ossman | 44 | 4.64% | 1 | 12.50% |
Joe Perches | 27 | 2.85% | 2 | 25.00% |
Bob Miller | 5 | 0.53% | 1 | 12.50% |
Alexey Dobriyan | 1 | 0.11% | 1 | 12.50% |
Total | 948 | 100.00% | 8 | 100.00% |
void ncp_tcp_rcv_proc(struct work_struct *work)
{
struct ncp_server *server =
container_of(work, struct ncp_server, rcv.tq);
mutex_lock(&server->rcv.creq_mutex);
__ncptcp_rcv_proc(server);
mutex_unlock(&server->rcv.creq_mutex);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Petr Vandrovec | 34 | 65.38% | 1 | 33.33% |
David Howells | 14 | 26.92% | 1 | 33.33% |
Ingo Molnar | 4 | 7.69% | 1 | 33.33% |
Total | 52 | 100.00% | 3 | 100.00% |
void ncp_tcp_tx_proc(struct work_struct *work)
{
struct ncp_server *server =
container_of(work, struct ncp_server, tx.tq);
mutex_lock(&server->rcv.creq_mutex);
__ncptcp_try_send(server);
mutex_unlock(&server->rcv.creq_mutex);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Petr Vandrovec | 34 | 65.38% | 1 | 33.33% |
David Howells | 14 | 26.92% | 1 | 33.33% |
Ingo Molnar | 4 | 7.69% | 1 | 33.33% |
Total | 52 | 100.00% | 3 | 100.00% |
static int do_ncp_rpc_call(struct ncp_server *server, int size,
unsigned char* reply_buf, int max_reply_size)
{
int result;
struct ncp_request_reply *req;
req = ncp_alloc_req();
if (!req)
return -ENOMEM;
req->reply_buf = reply_buf;
req->datalen = max_reply_size;
req->tx_iov[1].iov_base = server->packet;
req->tx_iov[1].iov_len = size;
req->tx_type = *(u_int16_t*)server->packet;
result = ncp_add_request(server, req);
if (result < 0)
goto out;
if (wait_event_interruptible(req->wq, req->status == RQ_DONE)) {
ncp_abort_request(server, req, -EINTR);
result = -EINTR;
goto out;
}
result = req->result;
out:
ncp_req_put(req);
return result;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Petr Vandrovec | 108 | 66.67% | 1 | 33.33% |
Pierre Ossman | 47 | 29.01% | 1 | 33.33% |
Linus Torvalds (pre-git) | 7 | 4.32% | 1 | 33.33% |
Total | 162 | 100.00% | 3 | 100.00% |
/*
* We need the server to be locked here, so check!
*/
static int ncp_do_request(struct ncp_server *server, int size,
void* reply, int max_reply_size)
{
int result;
if (server->lock == 0) {
pr_err("Server not locked!\n");
return -EIO;
}
if (!ncp_conn_valid(server)) {
return -EIO;
}
{
sigset_t old_set;
unsigned long mask, flags;
spin_lock_irqsave(¤t->sighand->siglock, flags);
old_set = current->blocked;
if (current->flags & PF_EXITING)
mask = 0;
else
mask = sigmask(SIGKILL);
if (server->m.flags & NCP_MOUNT_INTR) {
/* FIXME: This doesn't seem right at all. So, like,
we can't handle SIGINT and get whatever to stop?
What if we've blocked it ourselves? What about
alarms? Why, in fact, are we mucking with the
sigmask at all? -- r~ */
if (current->sighand->action[SIGINT - 1].sa.sa_handler == SIG_DFL)
mask |= sigmask(SIGINT);
if (current->sighand->action[SIGQUIT - 1].sa.sa_handler == SIG_DFL)
mask |= sigmask(SIGQUIT);
}
siginitsetinv(¤t->blocked, mask);
recalc_sigpending();
spin_unlock_irqrestore(¤t->sighand->siglock, flags);
result = do_ncp_rpc_call(server, size, reply, max_reply_size);
spin_lock_irqsave(¤t->sighand->siglock, flags);
current->blocked = old_set;
recalc_sigpending();
spin_unlock_irqrestore(¤t->sighand->siglock, flags);
}
ncp_dbg(2, "do_ncp_rpc_call returned %d\n", result);
return result;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 217 | 85.43% | 4 | 33.33% |
Linus Torvalds | 13 | 5.12% | 1 | 8.33% |
Ingo Molnar | 8 | 3.15% | 1 | 8.33% |
Joe Perches | 5 | 1.97% | 2 | 16.67% |
Chris Wedgwood | 4 | 1.57% | 1 | 8.33% |
Petr Vandrovec | 3 | 1.18% | 1 | 8.33% |
Andrew Morton | 2 | 0.79% | 1 | 8.33% |
David S. Miller | 2 | 0.79% | 1 | 8.33% |
Total | 254 | 100.00% | 12 | 100.00% |
/* ncp_do_request assures that at least a complete reply header is
* received. It assumes that server->current_size contains the ncp
* request size
*/
int ncp_request2(struct ncp_server *server, int function,
void* rpl, int size)
{
struct ncp_request_header *h;
struct ncp_reply_header* reply = rpl;
int result;
h = (struct ncp_request_header *) (server->packet);
if (server->has_subfunction != 0) {
*(__u16 *) & (h->data[0]) = htons(server->current_size - sizeof(*h) - 2);
}
h->type = NCP_REQUEST;
/*
* The server shouldn't know or care what task is making a
* request, so we always use the same task number.
*/
h->task = 2; /* (current->pid) & 0xff; */
h->function = function;
result = ncp_do_request(server, server->current_size, reply, size);
if (result < 0) {
ncp_dbg(1, "ncp_request_error: %d\n", result);
goto out;
}
server->completion = reply->completion_code;
server->conn_status = reply->connection_state;
server->reply_size = result;
server->ncp_reply_size = result - sizeof(struct ncp_reply_header);
result = reply->completion_code;
if (result != 0)
ncp_vdbg("completion code=%x\n", result);
out:
return result;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 184 | 91.54% | 5 | 62.50% |
Petr Vandrovec | 12 | 5.97% | 1 | 12.50% |
Joe Perches | 5 | 2.49% | 2 | 25.00% |
Total | 201 | 100.00% | 8 | 100.00% |
int ncp_connect(struct ncp_server *server)
{
struct ncp_request_header *h;
int result;
server->connection = 0xFFFF;
server->sequence = 255;
h = (struct ncp_request_header *) (server->packet);
h->type = NCP_ALLOC_SLOT_REQUEST;
h->task = 2; /* see above */
h->function = 0;
result = ncp_do_request(server, sizeof(*h), server->packet, server->packet_size);
if (result < 0)
goto out;
server->connection = h->conn_low + (h->conn_high * 256);
result = 0;
out:
return result;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 105 | 89.74% | 4 | 80.00% |
Petr Vandrovec | 12 | 10.26% | 1 | 20.00% |
Total | 117 | 100.00% | 5 | 100.00% |
int ncp_disconnect(struct ncp_server *server)
{
struct ncp_request_header *h;
h = (struct ncp_request_header *) (server->packet);
h->type = NCP_DEALLOC_SLOT_REQUEST;
h->task = 2; /* see above */
h->function = 0;
return ncp_do_request(server, sizeof(*h), server->packet, server->packet_size);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 67 | 100.00% | 4 | 100.00% |
Total | 67 | 100.00% | 4 | 100.00% |
void ncp_lock_server(struct ncp_server *server)
{
mutex_lock(&server->mutex);
if (server->lock)
pr_warn("%s: was locked!\n", __func__);
server->lock = 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 31 | 83.78% | 3 | 60.00% |
Joe Perches | 4 | 10.81% | 1 | 20.00% |
Ingo Molnar | 2 | 5.41% | 1 | 20.00% |
Total | 37 | 100.00% | 5 | 100.00% |
void ncp_unlock_server(struct ncp_server *server)
{
if (!server->lock) {
pr_warn("%s: was not locked!\n", __func__);
return;
}
server->lock = 0;
mutex_unlock(&server->mutex);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 35 | 85.37% | 3 | 60.00% |
Joe Perches | 4 | 9.76% | 1 | 20.00% |
Ingo Molnar | 2 | 4.88% | 1 | 20.00% |
Total | 41 | 100.00% | 5 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Petr Vandrovec | 2905 | 60.45% | 5 | 10.87% |
Linus Torvalds (pre-git) | 1115 | 23.20% | 16 | 34.78% |
Pierre Ossman | 310 | 6.45% | 1 | 2.17% |
Al Viro | 268 | 5.58% | 6 | 13.04% |
Joe Perches | 68 | 1.41% | 3 | 6.52% |
David Howells | 54 | 1.12% | 1 | 2.17% |
Ingo Molnar | 49 | 1.02% | 4 | 8.70% |
Linus Torvalds | 14 | 0.29% | 2 | 4.35% |
Bob Miller | 5 | 0.10% | 1 | 2.17% |
Chris Wedgwood | 4 | 0.08% | 1 | 2.17% |
Arnaldo Carvalho de Melo | 4 | 0.08% | 1 | 2.17% |
Tejun Heo | 3 | 0.06% | 1 | 2.17% |
David S. Miller | 2 | 0.04% | 1 | 2.17% |
Andrew Morton | 2 | 0.04% | 1 | 2.17% |
Alexey Dobriyan | 2 | 0.04% | 1 | 2.17% |
Dave Jones | 1 | 0.02% | 1 | 2.17% |
Total | 4806 | 100.00% | 46 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.