Release 4.11 net/iucv/af_iucv.c
/*
* IUCV protocol stack for Linux on zSeries
*
* Copyright IBM Corp. 2006, 2009
*
* Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
* Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
* PM functions:
* Ursula Braun <ursula.braun@de.ibm.com>
*/
#define KMSG_COMPONENT "af_iucv"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/security.h>
#include <net/sock.h>
#include <asm/ebcdic.h>
#include <asm/cpcmd.h>
#include <linux/kmod.h>
#include <net/iucv/af_iucv.h>
#define VERSION "1.2"
static char iucv_userid[80];
static const struct proto_ops iucv_sock_ops;
static struct proto iucv_proto = {
.name = "AF_IUCV",
.owner = THIS_MODULE,
.obj_size = sizeof(struct iucv_sock),
};
static struct iucv_interface *pr_iucv;
/* special AF_IUCV IPRM messages */
static const u8 iprm_shutdown[8] =
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
#define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class))
#define __iucv_sock_wait(sk, condition, timeo, ret) \
do { \
DEFINE_WAIT(__wait); \
long __timeo = timeo; \
ret = 0; \
prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \
while (!(condition)) { \
if (!__timeo) { \
ret = -EAGAIN; \
break; \
} \
if (signal_pending(current)) { \
ret = sock_intr_errno(__timeo); \
break; \
} \
release_sock(sk); \
__timeo = schedule_timeout(__timeo); \
lock_sock(sk); \
ret = sock_error(sk); \
if (ret) \
break; \
} \
finish_wait(sk_sleep(sk), &__wait); \
} while (0)
#define iucv_sock_wait(sk, condition, timeo) \
({ \
int __ret = 0; \
if (!(condition)) \
__iucv_sock_wait(sk, condition, timeo, __ret); \
__ret; \
})
static void iucv_sock_kill(struct sock *sk);
static void iucv_sock_close(struct sock *sk);
static void iucv_sever_path(struct sock *, int);
static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev);
static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
struct sk_buff *skb, u8 flags);
static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify);
/* Call Back functions */
static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
static void iucv_callback_connack(struct iucv_path *, u8 *);
static int iucv_callback_connreq(struct iucv_path *, u8 *, u8 *);
static void iucv_callback_connrej(struct iucv_path *, u8 *);
static void iucv_callback_shutdown(struct iucv_path *, u8 *);
static struct iucv_sock_list iucv_sk_list = {
.lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
.autobind_name = ATOMIC_INIT(0)
};
static struct iucv_handler af_iucv_handler = {
.path_pending = iucv_callback_connreq,
.path_complete = iucv_callback_connack,
.path_severed = iucv_callback_connrej,
.message_pending = iucv_callback_rx,
.message_complete = iucv_callback_txdone,
.path_quiesced = iucv_callback_shutdown,
};
static inline void high_nmcpy(unsigned char *dst, char *src)
{
memcpy(dst, src, 8);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jennifer Hunt | 25 | 100.00% | 1 | 100.00% |
Total | 25 | 100.00% | 1 | 100.00% |
static inline void low_nmcpy(unsigned char *dst, char *src)
{
memcpy(&dst[8], src, 8);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jennifer Hunt | 29 | 100.00% | 1 | 100.00% |
Total | 29 | 100.00% | 1 | 100.00% |
static int afiucv_pm_prepare(struct device *dev)
{
#ifdef CONFIG_PM_DEBUG
printk(KERN_WARNING "afiucv_pm_prepare\n");
#endif
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ursula Braun | 25 | 100.00% | 1 | 100.00% |
Total | 25 | 100.00% | 1 | 100.00% |
static void afiucv_pm_complete(struct device *dev)
{
#ifdef CONFIG_PM_DEBUG
printk(KERN_WARNING "afiucv_pm_complete\n");
#endif
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ursula Braun | 22 | 100.00% | 1 | 100.00% |
Total | 22 | 100.00% | 1 | 100.00% |
/**
* afiucv_pm_freeze() - Freeze PM callback
* @dev: AFIUCV dummy device
*
* Sever all established IUCV communication pathes
*/
static int afiucv_pm_freeze(struct device *dev)
{
struct iucv_sock *iucv;
struct sock *sk;
int err = 0;
#ifdef CONFIG_PM_DEBUG
printk(KERN_WARNING "afiucv_pm_freeze\n");
#endif
read_lock(&iucv_sk_list.lock);
sk_for_each(sk, &iucv_sk_list.head) {
iucv = iucv_sk(sk);
switch (sk->sk_state) {
case IUCV_DISCONN:
case IUCV_CLOSING:
case IUCV_CONNECTED:
iucv_sever_path(sk, 0);
break;
case IUCV_OPEN:
case IUCV_BOUND:
case IUCV_LISTEN:
case IUCV_CLOSED:
default:
break;
}
skb_queue_purge(&iucv->send_skb_q);
skb_queue_purge(&iucv->backlog_skb_q);
}
read_unlock(&iucv_sk_list.lock);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ursula Braun | 126 | 100.00% | 2 | 100.00% |
Total | 126 | 100.00% | 2 | 100.00% |
/**
* afiucv_pm_restore_thaw() - Thaw and restore PM callback
* @dev: AFIUCV dummy device
*
* socket clean up after freeze
*/
static int afiucv_pm_restore_thaw(struct device *dev)
{
struct sock *sk;
#ifdef CONFIG_PM_DEBUG
printk(KERN_WARNING "afiucv_pm_restore_thaw\n");
#endif
read_lock(&iucv_sk_list.lock);
sk_for_each(sk, &iucv_sk_list.head) {
switch (sk->sk_state) {
case IUCV_CONNECTED:
sk->sk_err = EPIPE;
sk->sk_state = IUCV_DISCONN;
sk->sk_state_change(sk);
break;
case IUCV_DISCONN:
case IUCV_CLOSING:
case IUCV_LISTEN:
case IUCV_BOUND:
case IUCV_OPEN:
default:
break;
}
}
read_unlock(&iucv_sk_list.lock);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ursula Braun | 102 | 100.00% | 1 | 100.00% |
Total | 102 | 100.00% | 1 | 100.00% |
static const struct dev_pm_ops afiucv_pm_ops = {
.prepare = afiucv_pm_prepare,
.complete = afiucv_pm_complete,
.freeze = afiucv_pm_freeze,
.thaw = afiucv_pm_restore_thaw,
.restore = afiucv_pm_restore_thaw,
};
static struct device_driver af_iucv_driver = {
.owner = THIS_MODULE,
.name = "afiucv",
.bus = NULL,
.pm = &afiucv_pm_ops,
};
/* dummy device used as trigger for PM functions */
static struct device *af_iucv_dev;
/**
* iucv_msg_length() - Returns the length of an iucv message.
* @msg: Pointer to struct iucv_message, MUST NOT be NULL
*
* The function returns the length of the specified iucv message @msg of data
* stored in a buffer and of data stored in the parameter list (PRMDATA).
*
* For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
* data:
* PRMDATA[0..6] socket data (max 7 bytes);
* PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7])
*
* The socket data length is computed by subtracting the socket data length
* value from 0xFF.
* If the socket data len is greater 7, then PRMDATA can be used for special
* notifications (see iucv_sock_shutdown); and further,
* if the socket data len is > 7, the function returns 8.
*
* Use this function to allocate socket buffers to store iucv message data.
*/
static inline size_t iucv_msg_length(struct iucv_message *msg)
{
size_t datalen;
if (msg->flags & IUCV_IPRMDATA) {
datalen = 0xff - msg->rmmsg[7];
return (datalen < 8) ? datalen : 8;
}
return msg->length;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hendrik Brueckner | 52 | 100.00% | 1 | 100.00% |
Total | 52 | 100.00% | 1 | 100.00% |
/**
* iucv_sock_in_state() - check for specific states
* @sk: sock structure
* @state: first iucv sk state
* @state: second iucv sk state
*
* Returns true if the socket in either in the first or second state.
*/
static int iucv_sock_in_state(struct sock *sk, int state, int state2)
{
return (sk->sk_state == state || sk->sk_state == state2);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hendrik Brueckner | 32 | 100.00% | 1 | 100.00% |
Total | 32 | 100.00% | 1 | 100.00% |
/**
* iucv_below_msglim() - function to check if messages can be sent
* @sk: sock structure
*
* Returns true if the send queue length is lower than the message limit.
* Always returns true if the socket is not connected (no iucv path for
* checking the message limit).
*/
static inline int iucv_below_msglim(struct sock *sk)
{
struct iucv_sock *iucv = iucv_sk(sk);
if (sk->sk_state != IUCV_CONNECTED)
return 1;
if (iucv->transport == AF_IUCV_TRANS_IUCV)
return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
else
return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) &&
(atomic_read(&iucv->pendings) <= 0));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hendrik Brueckner | 50 | 56.82% | 1 | 50.00% |
Ursula Braun | 38 | 43.18% | 1 | 50.00% |
Total | 88 | 100.00% | 2 | 100.00% |
/**
* iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
*/
static void iucv_sock_wake_msglim(struct sock *sk)
{
struct socket_wq *wq;
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
wake_up_interruptible_all(&wq->wait);
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
rcu_read_unlock();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hendrik Brueckner | 35 | 63.64% | 1 | 33.33% |
Eric Dumazet | 19 | 34.55% | 1 | 33.33% |
Herbert Xu | 1 | 1.82% | 1 | 33.33% |
Total | 55 | 100.00% | 3 | 100.00% |
/**
* afiucv_hs_send() - send a message through HiperSockets transport
*/
static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
struct sk_buff *skb, u8 flags)
{
struct iucv_sock *iucv = iucv_sk(sock);
struct af_iucv_trans_hdr *phs_hdr;
struct sk_buff *nskb;
int err, confirm_recv = 0;
memset(skb->head, 0, ETH_HLEN);
phs_hdr = (struct af_iucv_trans_hdr *)skb_push(skb,
sizeof(struct af_iucv_trans_hdr));
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
skb_push(skb, ETH_HLEN);
skb_reset_mac_header(skb);
memset(phs_hdr, 0, sizeof(struct af_iucv_trans_hdr));
phs_hdr->magic = ETH_P_AF_IUCV;
phs_hdr->version = 1;
phs_hdr->flags = flags;
if (flags == AF_IUCV_FLAG_SYN)
phs_hdr->window = iucv->msglimit;
else if ((flags == AF_IUCV_FLAG_WIN) || !flags) {
confirm_recv = atomic_read(&iucv->msg_recv);
phs_hdr->window = confirm_recv;
if (confirm_recv)
phs_hdr->flags = phs_hdr->flags | AF_IUCV_FLAG_WIN;
}
memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8);
memcpy(phs_hdr->destAppName, iucv->dst_name, 8);
memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8);
memcpy(phs_hdr->srcAppName, iucv->src_name, 8);
ASCEBC(phs_hdr->destUserID, sizeof(phs_hdr->destUserID));
ASCEBC(phs_hdr->destAppName, sizeof(phs_hdr->destAppName));
ASCEBC(phs_hdr->srcUserID, sizeof(phs_hdr->srcUserID));
ASCEBC(phs_hdr->srcAppName, sizeof(phs_hdr->srcAppName));
if (imsg)
memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
skb->dev = iucv->hs_dev;
if (!skb->dev)
return -ENODEV;
if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev))
return -ENETDOWN;
if (skb->len > skb->dev->mtu) {
if (sock->sk_type == SOCK_SEQPACKET)
return -EMSGSIZE;
else
skb_trim(skb, skb->dev->mtu);
}
skb->protocol = ETH_P_AF_IUCV;
nskb = skb_clone(skb, GFP_ATOMIC);
if (!nskb)
return -ENOMEM;
skb_queue_tail(&iucv->send_skb_q, nskb);
err = dev_queue_xmit(skb);
if (net_xmit_eval(err)) {
skb_unlink(nskb, &iucv->send_skb_q);
kfree_skb(nskb);
} else {
atomic_sub(confirm_recv, &iucv->msg_recv);
WARN_ON(atomic_read(&iucv->msg_recv) < 0);
}
return net_xmit_eval(err);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ursula Braun | 361 | 72.49% | 2 | 33.33% |
Jennifer Hunt | 134 | 26.91% | 2 | 33.33% |
Hendrik Brueckner | 3 | 0.60% | 2 | 33.33% |
Total | 498 | 100.00% | 6 | 100.00% |
static struct sock *__iucv_get_sock_by_name(char *nm)
{
struct sock *sk;
sk_for_each(sk, &iucv_sk_list.head)
if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
return sk;
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jennifer Hunt | 24 | 51.06% | 1 | 50.00% |
Ursula Braun | 23 | 48.94% | 1 | 50.00% |
Total | 47 | 100.00% | 2 | 100.00% |
static void iucv_sock_destruct(struct sock *sk)
{
skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sk->sk_error_queue);
sk_mem_reclaim(sk);
if (!sock_flag(sk, SOCK_DEAD)) {
pr_err("Attempt to release alive iucv socket %p\n", sk);
return;
}
WARN_ON(atomic_read(&sk->sk_rmem_alloc));
WARN_ON(atomic_read(&sk->sk_wmem_alloc));
WARN_ON(sk->sk_wmem_queued);
WARN_ON(sk->sk_forward_alloc);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ursula Braun | 76 | 86.36% | 2 | 50.00% |
Jennifer Hunt | 7 | 7.95% | 1 | 25.00% |
Ursula Braun-Krahl | 5 | 5.68% | 1 | 25.00% |
Total | 88 | 100.00% | 4 | 100.00% |
/* Cleanup Listen */
static void iucv_sock_cleanup_listen(struct sock *parent)
{
struct sock *sk;
/* Close non-accepted connections */
while ((sk = iucv_accept_dequeue(parent, NULL))) {
iucv_sock_close(sk);
iucv_sock_kill(sk);
}
parent->sk_state = IUCV_CLOSED;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ursula Braun | 37 | 77.08% | 1 | 20.00% |
Ursula Braun-Krahl | 5 | 10.42% | 1 | 20.00% |
Jennifer Hunt | 4 | 8.33% | 2 | 40.00% |
Hendrik Brueckner | 2 | 4.17% | 1 | 20.00% |
Total | 48 | 100.00% | 5 | 100.00% |
/* Kill socket (only if zapped and orphaned) */
static void iucv_sock_kill(struct sock *sk)
{
if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
return;
iucv_sock_unlink(&iucv_sk_list, sk);
sock_set_flag(sk, SOCK_DEAD);
sock_put(sk);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ursula Braun | 26 | 56.52% | 1 | 33.33% |
Jennifer Hunt | 18 | 39.13% | 1 | 33.33% |
Pavel Emelyanov | 2 | 4.35% | 1 | 33.33% |
Total | 46 | 100.00% | 3 | 100.00% |
/* Terminate an IUCV path */
static void iucv_sever_path(struct sock *sk, int with_user_data)
{
unsigned char user_data[16];
struct iucv_sock *iucv = iucv_sk(sk);
struct iucv_path *path = iucv->path;
if (iucv->path) {
iucv->path = NULL;
if (with_user_data) {
low_nmcpy(user_data, iucv->src_name);
high_nmcpy(user_data, iucv->dst_name);
ASCEBC(user_data, sizeof(user_data));
pr_iucv->path_sever(path, user_data);
} else
pr_iucv->path_sever(path, NULL);
iucv_path_free(path);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ursula Braun | 102 | 91.07% | 2 | 66.67% |
Jennifer Hunt | 10 | 8.93% | 1 | 33.33% |
Total | 112 | 100.00% | 3 | 100.00% |
/* Send controlling flags through an IUCV socket for HIPER transport */
static int iucv_send_ctrl(struct sock *sk, u8 flags)
{
int err = 0;
int blen;
struct sk_buff *skb;
u8 shutdown = 0;
blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
if (sk->sk_shutdown & SEND_SHUTDOWN) {
/* controlling flags should be sent anyway */
shutdown = sk->sk_shutdown;
sk->sk_shutdown &= RCV_SHUTDOWN;
}
skb = sock_alloc_send_skb(sk, blen, 1, &err);
if (skb) {
skb_reserve(skb, blen);
err = afiucv_hs_send(NULL, sk, skb, flags);
}
if (shutdown)
sk->sk_shutdown = shutdown;
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ursula Braun | 118 | 100.00% | 2 | 100.00% |
Total | 118 | 100.00% | 2 | 100.00% |
/* Close an IUCV socket */
static void iucv_sock_close(struct sock *sk)
{
struct iucv_sock *iucv = iucv_sk(sk);
unsigned long timeo;
int err = 0;
lock_sock(sk);
switch (sk->sk_state) {
case IUCV_LISTEN:
iucv_sock_cleanup_listen(sk);
break;
case IUCV_CONNECTED:
if (iucv->transport == AF_IUCV_TRANS_HIPER) {
err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
sk->sk_state = IUCV_DISCONN;
sk->sk_state_change(sk);
}
case IUCV_DISCONN: /* fall through */
sk->sk_state = IUCV_CLOSING;
sk->sk_state_change(sk);
if (!err && !skb_queue_empty(&iucv->send_skb_q)) {
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
timeo = sk->sk_lingertime;
else
timeo = IUCV_DISCONN_TIMEOUT;
iucv_sock_wait(sk,
iucv_sock_in_state(sk, IUCV_CLOSED, 0),
timeo);
}
case IUCV_CLOSING: /* fall through */
sk->sk_state = IUCV_CLOSED;
sk->sk_state_change(sk);
sk->sk_err = ECONNRESET;
sk->sk_state_change(sk);
skb_queue_purge(&iucv->send_skb_q);
skb_queue_purge(&iucv->backlog_skb_q);
default: /* fall through */
iucv_sever_path(sk, 1);
}
if (iucv->hs_dev) {
dev_put(iucv->hs_dev);
iucv->hs_dev = NULL;
sk->sk_bound_dev_if = 0;
}
/* mark socket for deletion by iucv_sock_kill() */
sock_set_flag(sk, SOCK_ZAPPED);
release_sock(sk);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ursula Braun | 184 | 72.16% | 5 | 55.56% |
Jennifer Hunt | 51 | 20.00% | 1 | 11.11% |
Hendrik Brueckner | 14 | 5.49% | 2 | 22.22% |
Ursula Braun-Krahl | 6 | 2.35% | 1 | 11.11% |
Total | 255 | 100.00% | 9 | 100.00% |
static void iucv_sock_init(struct sock *sk, struct sock *parent)
{
if (parent) {
sk->sk_type = parent->sk_type;
security_sk_clone(parent, sk);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ursula Braun | 21 | 56.76% | 1 | 33.33% |
Paul Moore | 9 | 24.32% | 1 | 33.33% |
Jennifer Hunt | 7 | 18.92% | 1 | 33.33% |
Total | 37 | 100.00% | 3 | 100.00% |
static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio, int kern)
{
struct sock *sk;
struct iucv_sock *iucv;
sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto, kern);
if (!sk)
return NULL;
iucv = iucv_sk(sk);
sock_init_data(sock, sk);
INIT_LIST_HEAD(&iucv->accept_q);
spin_lock_init(&iucv->accept_q_lock);
skb_queue_head_init(&iucv->send_skb_q);
INIT_LIST_HEAD(&iucv->message_q.list);
spin_lock_init(&iucv->message_q.lock);
skb_queue_head_init(&iucv->backlog_skb_q);
iucv->send_tag = 0;
atomic_set(&iucv->pendings, 0);
iucv->flags = 0;
iucv->msglimit = 0;
atomic_set(&iucv->msg_sent, 0);
atomic_set(&iucv->msg_recv, 0);
iucv->path = NULL;
iucv->sk_txnotify = afiucv_hs_callback_txnotify;
memset(&iucv->src_user_id , 0, 32);
if (pr_iucv)
iucv->transport = AF_IUCV_TRANS_IUCV;
else
iucv->transport = AF_IUCV_TRANS_HIPER;
sk->sk_destruct = iucv_sock_destruct;
sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
sk->sk_allocation = GFP_DMA;
sock_reset_flag(sk, SOCK_ZAPPED);
sk->sk_protocol = proto;
sk->sk_state = IUCV_OPEN;
iucv_sock_link(&iucv_sk_list, sk);
return sk;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ursula Braun | 165 | 63.46% | 1 | 25.00% |
Jennifer Hunt | 89 | 34.23% | 1 | 25.00% |
Eric W. Biedermann | 5 | 1.92% | 1 | 25.00% |
Hendrik Brueckner | 1 | 0.38% | 1 | 25.00% |
Total | 260 | 100.00% | 4 | 100.00% |
/* Create an IUCV socket */
static int iucv_sock_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
if (protocol && protocol != PF_IUCV)
return -EPROTONOSUPPORT;
sock->state = SS_UNCONNECTED;
switch (sock->type) {
case SOCK_STREAM:
sock->ops = &iucv_sock_ops;
break;
case SOCK_SEQPACKET:
/* currently, proto ops can handle both sk types */
sock->ops = &iucv_sock_ops;
break;
default:
return -ESOCKTNOSUPPORT;
}
sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL, kern);
if (!sk)
return -ENOMEM;
iucv_sock_init(sk, NULL);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ursula Braun | 85 | 75.22% | 1 | 33.33% |
Jennifer Hunt | 26 | 23.01% | 1 | 33.33% |
Eric W. Biedermann | 2 | 1.77% | 1 | 33.33% |
Total | 113 | 100.00% | 3 | 100.00% |
void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
{
write_lock_bh(&l->lock);
sk_add_node(sk, &l->head);
write_unlock_bh(&l->lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ursula Braun | 25 | 60.98% | 1 | 50.00% |
Jennifer Hunt | 16 | 39.02% | 1 | 50.00% |
Total | 41 | 100.00% | 2 | 100.00% |
void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
{
write_lock_bh(&l->lock);
sk_del_node_init(sk);
write_unlock_bh(&l->lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ursula Braun | 25 | 69.44% | 1 | 50.00% |
Jennifer Hunt | 11 | 30.56% | 1 | 50.00% |
Total | 36 | 100.00% | 2 | 100.00% |
void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
{
unsigned long flags;
struct iucv_sock *par = iucv_sk(parent);
sock_hold(sk);
spin_lock_irqsave(&par->accept_q_lock, flags);
list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
spin_unlock_irqrestore(&par->accept_q_lock, flags);
iucv_sk(sk)->parent = parent;
sk_acceptq_added(parent);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ursula Braun | 58 | 69.05% | 2 | 50.00% |
Jennifer Hunt | 25 | 29.76% | 1 | 25.00% |
Hendrik Brueckner | 1 | 1.19% | 1 | 25.00% |
Total | 84 | 100.00% | 4 | 100.00% |
void iucv_accept_unlink(struct sock *sk)
{
unsigned long flags;
struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
spin_lock_irqsave(&par->accept_q_lock, flags);
list_del_init(&iucv_sk(sk)->accept_q);
spin_unlock_irqrestore(&par->accept_q_lock, flags);
sk_acceptq_removed(iucv_sk(sk)->parent);
iucv_sk(sk)->parent = NULL;
sock_put(sk);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ursula Braun | 55 | 65.48% | 1 | 33.33% |
Jennifer Hunt | 25 | 29.76% | 1 | 33.33% |
Ursula Braun-Krahl | 4 | 4.76% | 1 | 33.33% |
Total | 84 | 100.00% | 3 | 100.00% |
struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
{
struct iucv_sock *isk, *n;
struct sock *sk;
list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
sk = (struct sock *) isk;
lock_sock(sk);
if (sk->sk_state == IUCV_CLOSED) {
iucv_accept_unlink(sk);
release_sock(sk);
continue;
}
if (sk->sk_state == IUCV_CONNECTED ||
sk->sk_state == IUCV_DISCONN ||
!newsock) {
iucv_accept_unlink(sk);
if (newsock)
sock_graft(sk, newsock);
release_sock(sk);
return sk;
}
release_sock(sk);
}
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ursula Braun | 86 | 67.19% | 2 | 33.33% |
Jennifer Hunt | 37 | 28.91% | 1 | 16.67% |
Hendrik Brueckner | 5 | 3.91% | 3 | 50.00% |
Total | 128 | 100.00% | 6 | 100.00% |
static void __iucv_auto_name(struct iucv_sock *iucv)
{
char name[12];
sprintf(name, "%08x", atomic_inc_return(&