cregit-Linux how code gets into the kernel

Release 4.11 net/iucv/af_iucv.c

Directory: net/iucv
/*
 *  IUCV protocol stack for Linux on zSeries
 *
 *  Copyright IBM Corp. 2006, 2009
 *
 *  Author(s):  Jennifer Hunt <jenhunt@us.ibm.com>
 *              Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
 *  PM functions:
 *              Ursula Braun <ursula.braun@de.ibm.com>
 */


#define KMSG_COMPONENT "af_iucv"

#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

#include <linux/module.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/security.h>
#include <net/sock.h>
#include <asm/ebcdic.h>
#include <asm/cpcmd.h>
#include <linux/kmod.h>

#include <net/iucv/af_iucv.h>


#define VERSION "1.2"


static char iucv_userid[80];


static const struct proto_ops iucv_sock_ops;


static struct proto iucv_proto = {
	.name		= "AF_IUCV",
	.owner		= THIS_MODULE,
	.obj_size	= sizeof(struct iucv_sock),
};


static struct iucv_interface *pr_iucv;

/* special AF_IUCV IPRM messages */

static const u8 iprm_shutdown[8] =
	{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};


#define TRGCLS_SIZE	(sizeof(((struct iucv_message *)0)->class))


#define __iucv_sock_wait(sk, condition, timeo, ret)			\
do {                                                                    \
        DEFINE_WAIT(__wait);                                            \
        long __timeo = timeo;                                           \
        ret = 0;                                                        \
        prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE);     \
        while (!(condition)) {                                          \
                if (!__timeo) {                                         \
                        ret = -EAGAIN;                                  \
                        break;                                          \
                }                                                       \
                if (signal_pending(current)) {                          \
                        ret = sock_intr_errno(__timeo);                 \
                        break;                                          \
                }                                                       \
                release_sock(sk);                                       \
                __timeo = schedule_timeout(__timeo);                    \
                lock_sock(sk);                                          \
                ret = sock_error(sk);                                   \
                if (ret)                                                \
                        break;                                          \
        }                                                               \
        finish_wait(sk_sleep(sk), &__wait);                             \
} while (0)


#define iucv_sock_wait(sk, condition, timeo)				\
({                                                                      \
        int __ret = 0;                                                  \
        if (!(condition))                                               \
                __iucv_sock_wait(sk, condition, timeo, __ret);          \
        __ret;                                                          \
})

static void iucv_sock_kill(struct sock *sk);
static void iucv_sock_close(struct sock *sk);
static void iucv_sever_path(struct sock *, int);

static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
	struct packet_type *pt, struct net_device *orig_dev);
static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
		   struct sk_buff *skb, u8 flags);
static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify);

/* Call Back functions */
static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
static void iucv_callback_connack(struct iucv_path *, u8 *);
static int iucv_callback_connreq(struct iucv_path *, u8 *, u8 *);
static void iucv_callback_connrej(struct iucv_path *, u8 *);
static void iucv_callback_shutdown(struct iucv_path *, u8 *);


static struct iucv_sock_list iucv_sk_list = {
	.lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
	.autobind_name = ATOMIC_INIT(0)
};


static struct iucv_handler af_iucv_handler = {
	.path_pending	  = iucv_callback_connreq,
	.path_complete	  = iucv_callback_connack,
	.path_severed	  = iucv_callback_connrej,
	.message_pending  = iucv_callback_rx,
	.message_complete = iucv_callback_txdone,
	.path_quiesced	  = iucv_callback_shutdown,
};


static inline void high_nmcpy(unsigned char *dst, char *src) { memcpy(dst, src, 8); }

Contributors

PersonTokensPropCommitsCommitProp
Jennifer Hunt25100.00%1100.00%
Total25100.00%1100.00%


static inline void low_nmcpy(unsigned char *dst, char *src) { memcpy(&dst[8], src, 8); }

Contributors

PersonTokensPropCommitsCommitProp
Jennifer Hunt29100.00%1100.00%
Total29100.00%1100.00%


static int afiucv_pm_prepare(struct device *dev) { #ifdef CONFIG_PM_DEBUG printk(KERN_WARNING "afiucv_pm_prepare\n"); #endif return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Ursula Braun25100.00%1100.00%
Total25100.00%1100.00%


static void afiucv_pm_complete(struct device *dev) { #ifdef CONFIG_PM_DEBUG printk(KERN_WARNING "afiucv_pm_complete\n"); #endif }

Contributors

PersonTokensPropCommitsCommitProp
Ursula Braun22100.00%1100.00%
Total22100.00%1100.00%

/** * afiucv_pm_freeze() - Freeze PM callback * @dev: AFIUCV dummy device * * Sever all established IUCV communication pathes */
static int afiucv_pm_freeze(struct device *dev) { struct iucv_sock *iucv; struct sock *sk; int err = 0; #ifdef CONFIG_PM_DEBUG printk(KERN_WARNING "afiucv_pm_freeze\n"); #endif read_lock(&iucv_sk_list.lock); sk_for_each(sk, &iucv_sk_list.head) { iucv = iucv_sk(sk); switch (sk->sk_state) { case IUCV_DISCONN: case IUCV_CLOSING: case IUCV_CONNECTED: iucv_sever_path(sk, 0); break; case IUCV_OPEN: case IUCV_BOUND: case IUCV_LISTEN: case IUCV_CLOSED: default: break; } skb_queue_purge(&iucv->send_skb_q); skb_queue_purge(&iucv->backlog_skb_q); } read_unlock(&iucv_sk_list.lock); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Ursula Braun126100.00%2100.00%
Total126100.00%2100.00%

/** * afiucv_pm_restore_thaw() - Thaw and restore PM callback * @dev: AFIUCV dummy device * * socket clean up after freeze */
static int afiucv_pm_restore_thaw(struct device *dev) { struct sock *sk; #ifdef CONFIG_PM_DEBUG printk(KERN_WARNING "afiucv_pm_restore_thaw\n"); #endif read_lock(&iucv_sk_list.lock); sk_for_each(sk, &iucv_sk_list.head) { switch (sk->sk_state) { case IUCV_CONNECTED: sk->sk_err = EPIPE; sk->sk_state = IUCV_DISCONN; sk->sk_state_change(sk); break; case IUCV_DISCONN: case IUCV_CLOSING: case IUCV_LISTEN: case IUCV_BOUND: case IUCV_OPEN: default: break; } } read_unlock(&iucv_sk_list.lock); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Ursula Braun102100.00%1100.00%
Total102100.00%1100.00%

static const struct dev_pm_ops afiucv_pm_ops = { .prepare = afiucv_pm_prepare, .complete = afiucv_pm_complete, .freeze = afiucv_pm_freeze, .thaw = afiucv_pm_restore_thaw, .restore = afiucv_pm_restore_thaw, }; static struct device_driver af_iucv_driver = { .owner = THIS_MODULE, .name = "afiucv", .bus = NULL, .pm = &afiucv_pm_ops, }; /* dummy device used as trigger for PM functions */ static struct device *af_iucv_dev; /** * iucv_msg_length() - Returns the length of an iucv message. * @msg: Pointer to struct iucv_message, MUST NOT be NULL * * The function returns the length of the specified iucv message @msg of data * stored in a buffer and of data stored in the parameter list (PRMDATA). * * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket * data: * PRMDATA[0..6] socket data (max 7 bytes); * PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7]) * * The socket data length is computed by subtracting the socket data length * value from 0xFF. * If the socket data len is greater 7, then PRMDATA can be used for special * notifications (see iucv_sock_shutdown); and further, * if the socket data len is > 7, the function returns 8. * * Use this function to allocate socket buffers to store iucv message data. */
static inline size_t iucv_msg_length(struct iucv_message *msg) { size_t datalen; if (msg->flags & IUCV_IPRMDATA) { datalen = 0xff - msg->rmmsg[7]; return (datalen < 8) ? datalen : 8; } return msg->length; }

Contributors

PersonTokensPropCommitsCommitProp
Hendrik Brueckner52100.00%1100.00%
Total52100.00%1100.00%

/** * iucv_sock_in_state() - check for specific states * @sk: sock structure * @state: first iucv sk state * @state: second iucv sk state * * Returns true if the socket in either in the first or second state. */
static int iucv_sock_in_state(struct sock *sk, int state, int state2) { return (sk->sk_state == state || sk->sk_state == state2); }

Contributors

PersonTokensPropCommitsCommitProp
Hendrik Brueckner32100.00%1100.00%
Total32100.00%1100.00%

/** * iucv_below_msglim() - function to check if messages can be sent * @sk: sock structure * * Returns true if the send queue length is lower than the message limit. * Always returns true if the socket is not connected (no iucv path for * checking the message limit). */
static inline int iucv_below_msglim(struct sock *sk) { struct iucv_sock *iucv = iucv_sk(sk); if (sk->sk_state != IUCV_CONNECTED) return 1; if (iucv->transport == AF_IUCV_TRANS_IUCV) return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim); else return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) && (atomic_read(&iucv->pendings) <= 0)); }

Contributors

PersonTokensPropCommitsCommitProp
Hendrik Brueckner5056.82%150.00%
Ursula Braun3843.18%150.00%
Total88100.00%2100.00%

/** * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit */
static void iucv_sock_wake_msglim(struct sock *sk) { struct socket_wq *wq; rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (skwq_has_sleeper(wq)) wake_up_interruptible_all(&wq->wait); sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); rcu_read_unlock(); }

Contributors

PersonTokensPropCommitsCommitProp
Hendrik Brueckner3563.64%133.33%
Eric Dumazet1934.55%133.33%
Herbert Xu11.82%133.33%
Total55100.00%3100.00%

/** * afiucv_hs_send() - send a message through HiperSockets transport */
static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, struct sk_buff *skb, u8 flags) { struct iucv_sock *iucv = iucv_sk(sock); struct af_iucv_trans_hdr *phs_hdr; struct sk_buff *nskb; int err, confirm_recv = 0; memset(skb->head, 0, ETH_HLEN); phs_hdr = (struct af_iucv_trans_hdr *)skb_push(skb, sizeof(struct af_iucv_trans_hdr)); skb_reset_mac_header(skb); skb_reset_network_header(skb); skb_push(skb, ETH_HLEN); skb_reset_mac_header(skb); memset(phs_hdr, 0, sizeof(struct af_iucv_trans_hdr)); phs_hdr->magic = ETH_P_AF_IUCV; phs_hdr->version = 1; phs_hdr->flags = flags; if (flags == AF_IUCV_FLAG_SYN) phs_hdr->window = iucv->msglimit; else if ((flags == AF_IUCV_FLAG_WIN) || !flags) { confirm_recv = atomic_read(&iucv->msg_recv); phs_hdr->window = confirm_recv; if (confirm_recv) phs_hdr->flags = phs_hdr->flags | AF_IUCV_FLAG_WIN; } memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8); memcpy(phs_hdr->destAppName, iucv->dst_name, 8); memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8); memcpy(phs_hdr->srcAppName, iucv->src_name, 8); ASCEBC(phs_hdr->destUserID, sizeof(phs_hdr->destUserID)); ASCEBC(phs_hdr->destAppName, sizeof(phs_hdr->destAppName)); ASCEBC(phs_hdr->srcUserID, sizeof(phs_hdr->srcUserID)); ASCEBC(phs_hdr->srcAppName, sizeof(phs_hdr->srcAppName)); if (imsg) memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message)); skb->dev = iucv->hs_dev; if (!skb->dev) return -ENODEV; if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev)) return -ENETDOWN; if (skb->len > skb->dev->mtu) { if (sock->sk_type == SOCK_SEQPACKET) return -EMSGSIZE; else skb_trim(skb, skb->dev->mtu); } skb->protocol = ETH_P_AF_IUCV; nskb = skb_clone(skb, GFP_ATOMIC); if (!nskb) return -ENOMEM; skb_queue_tail(&iucv->send_skb_q, nskb); err = dev_queue_xmit(skb); if (net_xmit_eval(err)) { skb_unlink(nskb, &iucv->send_skb_q); kfree_skb(nskb); } else { atomic_sub(confirm_recv, &iucv->msg_recv); WARN_ON(atomic_read(&iucv->msg_recv) < 0); } return net_xmit_eval(err); }

Contributors

PersonTokensPropCommitsCommitProp
Ursula Braun36172.49%233.33%
Jennifer Hunt13426.91%233.33%
Hendrik Brueckner30.60%233.33%
Total498100.00%6100.00%


static struct sock *__iucv_get_sock_by_name(char *nm) { struct sock *sk; sk_for_each(sk, &iucv_sk_list.head) if (!memcmp(&iucv_sk(sk)->src_name, nm, 8)) return sk; return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Jennifer Hunt2451.06%150.00%
Ursula Braun2348.94%150.00%
Total47100.00%2100.00%


static void iucv_sock_destruct(struct sock *sk) { skb_queue_purge(&sk->sk_receive_queue); skb_queue_purge(&sk->sk_error_queue); sk_mem_reclaim(sk); if (!sock_flag(sk, SOCK_DEAD)) { pr_err("Attempt to release alive iucv socket %p\n", sk); return; } WARN_ON(atomic_read(&sk->sk_rmem_alloc)); WARN_ON(atomic_read(&sk->sk_wmem_alloc)); WARN_ON(sk->sk_wmem_queued); WARN_ON(sk->sk_forward_alloc); }

Contributors

PersonTokensPropCommitsCommitProp
Ursula Braun7686.36%250.00%
Jennifer Hunt77.95%125.00%
Ursula Braun-Krahl55.68%125.00%
Total88100.00%4100.00%

/* Cleanup Listen */
static void iucv_sock_cleanup_listen(struct sock *parent) { struct sock *sk; /* Close non-accepted connections */ while ((sk = iucv_accept_dequeue(parent, NULL))) { iucv_sock_close(sk); iucv_sock_kill(sk); } parent->sk_state = IUCV_CLOSED; }

Contributors

PersonTokensPropCommitsCommitProp
Ursula Braun3777.08%120.00%
Ursula Braun-Krahl510.42%120.00%
Jennifer Hunt48.33%240.00%
Hendrik Brueckner24.17%120.00%
Total48100.00%5100.00%

/* Kill socket (only if zapped and orphaned) */
static void iucv_sock_kill(struct sock *sk) { if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) return; iucv_sock_unlink(&iucv_sk_list, sk); sock_set_flag(sk, SOCK_DEAD); sock_put(sk); }

Contributors

PersonTokensPropCommitsCommitProp
Ursula Braun2656.52%133.33%
Jennifer Hunt1839.13%133.33%
Pavel Emelyanov24.35%133.33%
Total46100.00%3100.00%

/* Terminate an IUCV path */
static void iucv_sever_path(struct sock *sk, int with_user_data) { unsigned char user_data[16]; struct iucv_sock *iucv = iucv_sk(sk); struct iucv_path *path = iucv->path; if (iucv->path) { iucv->path = NULL; if (with_user_data) { low_nmcpy(user_data, iucv->src_name); high_nmcpy(user_data, iucv->dst_name); ASCEBC(user_data, sizeof(user_data)); pr_iucv->path_sever(path, user_data); } else pr_iucv->path_sever(path, NULL); iucv_path_free(path); } }

Contributors

PersonTokensPropCommitsCommitProp
Ursula Braun10291.07%266.67%
Jennifer Hunt108.93%133.33%
Total112100.00%3100.00%

/* Send controlling flags through an IUCV socket for HIPER transport */
static int iucv_send_ctrl(struct sock *sk, u8 flags) { int err = 0; int blen; struct sk_buff *skb; u8 shutdown = 0; blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN; if (sk->sk_shutdown & SEND_SHUTDOWN) { /* controlling flags should be sent anyway */ shutdown = sk->sk_shutdown; sk->sk_shutdown &= RCV_SHUTDOWN; } skb = sock_alloc_send_skb(sk, blen, 1, &err); if (skb) { skb_reserve(skb, blen); err = afiucv_hs_send(NULL, sk, skb, flags); } if (shutdown) sk->sk_shutdown = shutdown; return err; }

Contributors

PersonTokensPropCommitsCommitProp
Ursula Braun118100.00%2100.00%
Total118100.00%2100.00%

/* Close an IUCV socket */
static void iucv_sock_close(struct sock *sk) { struct iucv_sock *iucv = iucv_sk(sk); unsigned long timeo; int err = 0; lock_sock(sk); switch (sk->sk_state) { case IUCV_LISTEN: iucv_sock_cleanup_listen(sk); break; case IUCV_CONNECTED: if (iucv->transport == AF_IUCV_TRANS_HIPER) { err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN); sk->sk_state = IUCV_DISCONN; sk->sk_state_change(sk); } case IUCV_DISCONN: /* fall through */ sk->sk_state = IUCV_CLOSING; sk->sk_state_change(sk); if (!err && !skb_queue_empty(&iucv->send_skb_q)) { if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) timeo = sk->sk_lingertime; else timeo = IUCV_DISCONN_TIMEOUT; iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CLOSED, 0), timeo); } case IUCV_CLOSING: /* fall through */ sk->sk_state = IUCV_CLOSED; sk->sk_state_change(sk); sk->sk_err = ECONNRESET; sk->sk_state_change(sk); skb_queue_purge(&iucv->send_skb_q); skb_queue_purge(&iucv->backlog_skb_q); default: /* fall through */ iucv_sever_path(sk, 1); } if (iucv->hs_dev) { dev_put(iucv->hs_dev); iucv->hs_dev = NULL; sk->sk_bound_dev_if = 0; } /* mark socket for deletion by iucv_sock_kill() */ sock_set_flag(sk, SOCK_ZAPPED); release_sock(sk); }

Contributors

PersonTokensPropCommitsCommitProp
Ursula Braun18472.16%555.56%
Jennifer Hunt5120.00%111.11%
Hendrik Brueckner145.49%222.22%
Ursula Braun-Krahl62.35%111.11%
Total255100.00%9100.00%


static void iucv_sock_init(struct sock *sk, struct sock *parent) { if (parent) { sk->sk_type = parent->sk_type; security_sk_clone(parent, sk); } }

Contributors

PersonTokensPropCommitsCommitProp
Ursula Braun2156.76%133.33%
Paul Moore924.32%133.33%
Jennifer Hunt718.92%133.33%
Total37100.00%3100.00%


static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio, int kern) { struct sock *sk; struct iucv_sock *iucv; sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto, kern); if (!sk) return NULL; iucv = iucv_sk(sk); sock_init_data(sock, sk); INIT_LIST_HEAD(&iucv->accept_q); spin_lock_init(&iucv->accept_q_lock); skb_queue_head_init(&iucv->send_skb_q); INIT_LIST_HEAD(&iucv->message_q.list); spin_lock_init(&iucv->message_q.lock); skb_queue_head_init(&iucv->backlog_skb_q); iucv->send_tag = 0; atomic_set(&iucv->pendings, 0); iucv->flags = 0; iucv->msglimit = 0; atomic_set(&iucv->msg_sent, 0); atomic_set(&iucv->msg_recv, 0); iucv->path = NULL; iucv->sk_txnotify = afiucv_hs_callback_txnotify; memset(&iucv->src_user_id , 0, 32); if (pr_iucv) iucv->transport = AF_IUCV_TRANS_IUCV; else iucv->transport = AF_IUCV_TRANS_HIPER; sk->sk_destruct = iucv_sock_destruct; sk->sk_sndtimeo = IUCV_CONN_TIMEOUT; sk->sk_allocation = GFP_DMA; sock_reset_flag(sk, SOCK_ZAPPED); sk->sk_protocol = proto; sk->sk_state = IUCV_OPEN; iucv_sock_link(&iucv_sk_list, sk); return sk; }

Contributors

PersonTokensPropCommitsCommitProp
Ursula Braun16563.46%125.00%
Jennifer Hunt8934.23%125.00%
Eric W. Biedermann51.92%125.00%
Hendrik Brueckner10.38%125.00%
Total260100.00%4100.00%

/* Create an IUCV socket */
static int iucv_sock_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; if (protocol && protocol != PF_IUCV) return -EPROTONOSUPPORT; sock->state = SS_UNCONNECTED; switch (sock->type) { case SOCK_STREAM: sock->ops = &iucv_sock_ops; break; case SOCK_SEQPACKET: /* currently, proto ops can handle both sk types */ sock->ops = &iucv_sock_ops; break; default: return -ESOCKTNOSUPPORT; } sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL, kern); if (!sk) return -ENOMEM; iucv_sock_init(sk, NULL); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Ursula Braun8575.22%133.33%
Jennifer Hunt2623.01%133.33%
Eric W. Biedermann21.77%133.33%
Total113100.00%3100.00%


void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk) { write_lock_bh(&l->lock); sk_add_node(sk, &l->head); write_unlock_bh(&l->lock); }

Contributors

PersonTokensPropCommitsCommitProp
Ursula Braun2560.98%150.00%
Jennifer Hunt1639.02%150.00%
Total41100.00%2100.00%


void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk) { write_lock_bh(&l->lock); sk_del_node_init(sk); write_unlock_bh(&l->lock); }

Contributors

PersonTokensPropCommitsCommitProp
Ursula Braun2569.44%150.00%
Jennifer Hunt1130.56%150.00%
Total36100.00%2100.00%


void iucv_accept_enqueue(struct sock *parent, struct sock *sk) { unsigned long flags; struct iucv_sock *par = iucv_sk(parent); sock_hold(sk); spin_lock_irqsave(&par->accept_q_lock, flags); list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q); spin_unlock_irqrestore(&par->accept_q_lock, flags); iucv_sk(sk)->parent = parent; sk_acceptq_added(parent); }

Contributors

PersonTokensPropCommitsCommitProp
Ursula Braun5869.05%250.00%
Jennifer Hunt2529.76%125.00%
Hendrik Brueckner11.19%125.00%
Total84100.00%4100.00%


void iucv_accept_unlink(struct sock *sk) { unsigned long flags; struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent); spin_lock_irqsave(&par->accept_q_lock, flags); list_del_init(&iucv_sk(sk)->accept_q); spin_unlock_irqrestore(&par->accept_q_lock, flags); sk_acceptq_removed(iucv_sk(sk)->parent); iucv_sk(sk)->parent = NULL; sock_put(sk); }

Contributors

PersonTokensPropCommitsCommitProp
Ursula Braun5565.48%133.33%
Jennifer Hunt2529.76%133.33%
Ursula Braun-Krahl44.76%133.33%
Total84100.00%3100.00%


struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock) { struct iucv_sock *isk, *n; struct sock *sk; list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) { sk = (struct sock *) isk; lock_sock(sk); if (sk->sk_state == IUCV_CLOSED) { iucv_accept_unlink(sk); release_sock(sk); continue; } if (sk->sk_state == IUCV_CONNECTED || sk->sk_state == IUCV_DISCONN || !newsock) { iucv_accept_unlink(sk); if (newsock) sock_graft(sk, newsock); release_sock(sk); return sk; } release_sock(sk); } return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Ursula Braun8667.19%233.33%
Jennifer Hunt3728.91%116.67%
Hendrik Brueckner53.91%350.00%
Total128100.00%6100.00%


static void __iucv_auto_name(struct iucv_sock *iucv) { char name[12]; sprintf(name, "%08x", atomic_inc_return(&