cregit-Linux how code gets into the kernel

Release 4.11 net/iucv/iucv.c

Directory: net/iucv
/*
 * IUCV base infrastructure.
 *
 * Copyright IBM Corp. 2001, 2009
 *
 * Author(s):
 *    Original source:
 *      Alan Altmark (Alan_Altmark@us.ibm.com)  Sept. 2000
 *      Xenia Tkatschow (xenia@us.ibm.com)
 *    2Gb awareness and general cleanup:
 *      Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
 *    Rewritten for af_iucv:
 *      Martin Schwidefsky <schwidefsky@de.ibm.com>
 *    PM functions:
 *      Ursula Braun (ursula.braun@de.ibm.com)
 *
 * Documentation used:
 *    The original source
 *    CP Programming Service, IBM document # SC24-5760
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2, or (at your option)
 * any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */


#define KMSG_COMPONENT "iucv"

#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/spinlock.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/device.h>
#include <linux/cpu.h>
#include <linux/reboot.h>
#include <net/iucv/iucv.h>
#include <linux/atomic.h>
#include <asm/ebcdic.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/smp.h>

/*
 * FLAGS:
 * All flags are defined in the field IPFLAGS1 of each function
 * and can be found in CP Programming Services.
 * IPSRCCLS - Indicates you have specified a source class.
 * IPTRGCLS - Indicates you have specified a target class.
 * IPFGPID  - Indicates you have specified a pathid.
 * IPFGMID  - Indicates you have specified a message ID.
 * IPNORPY  - Indicates a one-way message. No reply expected.
 * IPALL    - Indicates that all paths are affected.
 */

#define IUCV_IPSRCCLS	0x01

#define IUCV_IPTRGCLS	0x01

#define IUCV_IPFGPID	0x02

#define IUCV_IPFGMID	0x04

#define IUCV_IPNORPY	0x10

#define IUCV_IPALL	0x80


static int iucv_bus_match(struct device *dev, struct device_driver *drv) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Martin Schwidefsky19100.00%1100.00%
Total19100.00%1100.00%

enum iucv_pm_states { IUCV_PM_INITIAL = 0, IUCV_PM_FREEZING = 1, IUCV_PM_THAWING = 2, IUCV_PM_RESTORING = 3, }; static enum iucv_pm_states iucv_pm_state; static int iucv_pm_prepare(struct device *); static void iucv_pm_complete(struct device *); static int iucv_pm_freeze(struct device *); static int iucv_pm_thaw(struct device *); static int iucv_pm_restore(struct device *); static const struct dev_pm_ops iucv_pm_ops = { .prepare = iucv_pm_prepare, .complete = iucv_pm_complete, .freeze = iucv_pm_freeze, .thaw = iucv_pm_thaw, .restore = iucv_pm_restore, }; struct bus_type iucv_bus = { .name = "iucv", .match = iucv_bus_match, .pm = &iucv_pm_ops, }; EXPORT_SYMBOL(iucv_bus); struct device *iucv_root; EXPORT_SYMBOL(iucv_root); static int iucv_available; /* General IUCV interrupt structure */ struct iucv_irq_data { u16 ippathid; u8 ipflags1; u8 iptype; u32 res2[8]; }; struct iucv_irq_list { struct list_head list; struct iucv_irq_data data; }; static struct iucv_irq_data *iucv_irq_data[NR_CPUS]; static cpumask_t iucv_buffer_cpumask = { CPU_BITS_NONE }; static cpumask_t iucv_irq_cpumask = { CPU_BITS_NONE }; /* * Queue of interrupt buffers lock for delivery via the tasklet * (fast but can't call smp_call_function). */ static LIST_HEAD(iucv_task_queue); /* * The tasklet for fast delivery of iucv interrupts. */ static void iucv_tasklet_fn(unsigned long); static DECLARE_TASKLET(iucv_tasklet, iucv_tasklet_fn,0); /* * Queue of interrupt buffers for delivery via a work queue * (slower but can call smp_call_function). */ static LIST_HEAD(iucv_work_queue); /* * The work element to deliver path pending interrupts. */ static void iucv_work_fn(struct work_struct *work); static DECLARE_WORK(iucv_work, iucv_work_fn); /* * Spinlock protecting task and work queue. */ static DEFINE_SPINLOCK(iucv_queue_lock); enum iucv_command_codes { IUCV_QUERY = 0, IUCV_RETRIEVE_BUFFER = 2, IUCV_SEND = 4, IUCV_RECEIVE = 5, IUCV_REPLY = 6, IUCV_REJECT = 8, IUCV_PURGE = 9, IUCV_ACCEPT = 10, IUCV_CONNECT = 11, IUCV_DECLARE_BUFFER = 12, IUCV_QUIESCE = 13, IUCV_RESUME = 14, IUCV_SEVER = 15, IUCV_SETMASK = 16, IUCV_SETCONTROLMASK = 17, }; /* * Error messages that are used with the iucv_sever function. They get * converted to EBCDIC. */ static char iucv_error_no_listener[16] = "NO LISTENER"; static char iucv_error_no_memory[16] = "NO MEMORY"; static char iucv_error_pathid[16] = "INVALID PATHID"; /* * iucv_handler_list: List of registered handlers. */ static LIST_HEAD(iucv_handler_list); /* * iucv_path_table: an array of iucv_path structures. */ static struct iucv_path **iucv_path_table; static unsigned long iucv_max_pathid; /* * iucv_lock: spinlock protecting iucv_handler_list and iucv_pathid_table */ static DEFINE_SPINLOCK(iucv_table_lock); /* * iucv_active_cpu: contains the number of the cpu executing the tasklet * or the work handler. Needed for iucv_path_sever called from tasklet. */ static int iucv_active_cpu = -1; /* * Mutex and wait queue for iucv_register/iucv_unregister. */ static DEFINE_MUTEX(iucv_register_mutex); /* * Counter for number of non-smp capable handlers. */ static int iucv_nonsmp_handler; /* * IUCV control data structure. Used by iucv_path_accept, iucv_path_connect, * iucv_path_quiesce and iucv_path_sever. */ struct iucv_cmd_control { u16 ippathid; u8 ipflags1; u8 iprcode; u16 ipmsglim; u16 res1; u8 ipvmid[8]; u8 ipuser[16]; u8 iptarget[8]; } __attribute__ ((packed,aligned(8))); /* * Data in parameter list iucv structure. Used by iucv_message_send, * iucv_message_send2way and iucv_message_reply. */ struct iucv_cmd_dpl { u16 ippathid; u8 ipflags1; u8 iprcode; u32 ipmsgid; u32 iptrgcls; u8 iprmmsg[8]; u32 ipsrccls; u32 ipmsgtag; u32 ipbfadr2; u32 ipbfln2f; u32 res; } __attribute__ ((packed,aligned(8))); /* * Data in buffer iucv structure. Used by iucv_message_receive, * iucv_message_reject, iucv_message_send, iucv_message_send2way * and iucv_declare_cpu. */ struct iucv_cmd_db { u16 ippathid; u8 ipflags1; u8 iprcode; u32 ipmsgid; u32 iptrgcls; u32 ipbfadr1; u32 ipbfln1f; u32 ipsrccls; u32 ipmsgtag; u32 ipbfadr2; u32 ipbfln2f; u32 res; } __attribute__ ((packed,aligned(8))); /* * Purge message iucv structure. Used by iucv_message_purge. */ struct iucv_cmd_purge { u16 ippathid; u8 ipflags1; u8 iprcode; u32 ipmsgid; u8 ipaudit[3]; u8 res1[5]; u32 res2; u32 ipsrccls; u32 ipmsgtag; u32 res3[3]; } __attribute__ ((packed,aligned(8))); /* * Set mask iucv structure. Used by iucv_enable_cpu. */ struct iucv_cmd_set_mask { u8 ipmask; u8 res1[2]; u8 iprcode; u32 res2[9]; } __attribute__ ((packed,aligned(8))); union iucv_param { struct iucv_cmd_control ctrl; struct iucv_cmd_dpl dpl; struct iucv_cmd_db db; struct iucv_cmd_purge purge; struct iucv_cmd_set_mask set_mask; }; /* * Anchor for per-cpu IUCV command parameter block. */ static union iucv_param *iucv_param[NR_CPUS]; static union iucv_param *iucv_param_irq[NR_CPUS]; /** * iucv_call_b2f0 * @code: identifier of IUCV call to CP. * @parm: pointer to a struct iucv_parm block * * Calls CP to execute IUCV commands. * * Returns the result of the CP IUCV call. */
static inline int __iucv_call_b2f0(int command, union iucv_param *parm) { register unsigned long reg0 asm ("0"); register unsigned long reg1 asm ("1"); int ccode; reg0 = command; reg1 = (unsigned long)parm; asm volatile( " .long 0xb2f01000\n" " ipm %0\n" " srl %0,28\n" : "=d" (ccode), "=m" (*parm), "+d" (reg0), "+a" (reg1) : "m" (*parm) : "cc"); return ccode; }

Contributors

PersonTokensPropCommitsCommitProp
Martin Schwidefsky4685.19%150.00%
Heiko Carstens814.81%150.00%
Total54100.00%2100.00%


static inline int iucv_call_b2f0(int command, union iucv_param *parm) { int ccode; ccode = __iucv_call_b2f0(command, parm); return ccode == 1 ? parm->ctrl.iprcode : ccode; }

Contributors

PersonTokensPropCommitsCommitProp
Heiko Carstens2767.50%150.00%
Martin Schwidefsky1332.50%150.00%
Total40100.00%2100.00%

/** * iucv_query_maxconn * * Determines the maximum number of connections that may be established. * * Returns the maximum number of connections or -EPERM is IUCV is not * available. */
static int __iucv_query_maxconn(void *param, unsigned long *max_pathid) { register unsigned long reg0 asm ("0"); register unsigned long reg1 asm ("1"); int ccode; reg0 = IUCV_QUERY; reg1 = (unsigned long) param; asm volatile ( " .long 0xb2f01000\n" " ipm %0\n" " srl %0,28\n" : "=d" (ccode), "+d" (reg0), "+d" (reg1) : : "cc"); *max_pathid = reg1; return ccode; }

Contributors

PersonTokensPropCommitsCommitProp
Heiko Carstens3559.32%150.00%
Martin Schwidefsky2440.68%150.00%
Total59100.00%2100.00%


static int iucv_query_maxconn(void) { unsigned long max_pathid; void *param; int ccode; param = kzalloc(sizeof(union iucv_param), GFP_KERNEL | GFP_DMA); if (!param) return -ENOMEM; ccode = __iucv_query_maxconn(param, &max_pathid); if (ccode == 0) iucv_max_pathid = max_pathid; kfree(param); return ccode ? -EPERM : 0; }

Contributors

PersonTokensPropCommitsCommitProp
Martin Schwidefsky5775.00%150.00%
Heiko Carstens1925.00%150.00%
Total76100.00%2100.00%

/** * iucv_allow_cpu * @data: unused * * Allow iucv interrupts on this cpu. */
static void iucv_allow_cpu(void *data) { int cpu = smp_processor_id(); union iucv_param *parm; /* * Enable all iucv interrupts. * ipmask contains bits for the different interrupts * 0x80 - Flag to allow nonpriority message pending interrupts * 0x40 - Flag to allow priority message pending interrupts * 0x20 - Flag to allow nonpriority message completion interrupts * 0x10 - Flag to allow priority message completion interrupts * 0x08 - Flag to allow IUCV control interrupts */ parm = iucv_param_irq[cpu]; memset(parm, 0, sizeof(union iucv_param)); parm->set_mask.ipmask = 0xf8; iucv_call_b2f0(IUCV_SETMASK, parm); /* * Enable all iucv control interrupts. * ipmask contains bits for the different interrupts * 0x80 - Flag to allow pending connections interrupts * 0x40 - Flag to allow connection complete interrupts * 0x20 - Flag to allow connection severed interrupts * 0x10 - Flag to allow connection quiesced interrupts * 0x08 - Flag to allow connection resumed interrupts */ memset(parm, 0, sizeof(union iucv_param)); parm->set_mask.ipmask = 0xf8; iucv_call_b2f0(IUCV_SETCONTROLMASK, parm); /* Set indication that iucv interrupts are allowed for this cpu. */ cpumask_set_cpu(cpu, &iucv_irq_cpumask); }

Contributors

PersonTokensPropCommitsCommitProp
Martin Schwidefsky6063.16%120.00%
Ursula Braun3031.58%240.00%
Christoph Lameter33.16%120.00%
Motohiro Kosaki22.11%120.00%
Total95100.00%5100.00%

/** * iucv_block_cpu * @data: unused * * Block iucv interrupts on this cpu. */
static void iucv_block_cpu(void *data) { int cpu = smp_processor_id(); union iucv_param *parm; /* Disable all iucv interrupts. */ parm = iucv_param_irq[cpu]; memset(parm, 0, sizeof(union iucv_param)); iucv_call_b2f0(IUCV_SETMASK, parm); /* Clear indication that iucv interrupts are allowed for this cpu. */ cpumask_clear_cpu(cpu, &iucv_irq_cpumask); }

Contributors

PersonTokensPropCommitsCommitProp
Martin Schwidefsky5289.66%125.00%
Christoph Lameter35.17%125.00%
Motohiro Kosaki23.45%125.00%
Ursula Braun11.72%125.00%
Total58100.00%4100.00%

/** * iucv_block_cpu_almost * @data: unused * * Allow connection-severed interrupts only on this cpu. */
static void iucv_block_cpu_almost(void *data) { int cpu = smp_processor_id(); union iucv_param *parm; /* Allow iucv control interrupts only */ parm = iucv_param_irq[cpu]; memset(parm, 0, sizeof(union iucv_param)); parm->set_mask.ipmask = 0x08; iucv_call_b2f0(IUCV_SETMASK, parm); /* Allow iucv-severed interrupt only */ memset(parm, 0, sizeof(union iucv_param)); parm->set_mask.ipmask = 0x20; iucv_call_b2f0(IUCV_SETCONTROLMASK, parm); /* Clear indication that iucv interrupts are allowed for this cpu. */ cpumask_clear_cpu(cpu, &iucv_irq_cpumask); }

Contributors

PersonTokensPropCommitsCommitProp
Ursula Braun9397.89%150.00%
Motohiro Kosaki22.11%150.00%
Total95100.00%2100.00%

/** * iucv_declare_cpu * @data: unused * * Declare a interrupt buffer on this cpu. */
static void iucv_declare_cpu(void *data) { int cpu = smp_processor_id(); union iucv_param *parm; int rc; if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask)) return; /* Declare interrupt buffer. */ parm = iucv_param_irq[cpu]; memset(parm, 0, sizeof(union iucv_param)); parm->db.ipbfadr1 = virt_to_phys(iucv_irq_data[cpu]); rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm); if (rc) { char *err = "Unknown"; switch (rc) { case 0x03: err = "Directory error"; break; case 0x0a: err = "Invalid length"; break; case 0x13: err = "Buffer already exists"; break; case 0x3e: err = "Buffer overlap"; break; case 0x5c: err = "Paging or storage error"; break; } pr_warn("Defining an interrupt buffer on CPU %i failed with 0x%02x (%s)\n", cpu, rc, err); return; } /* Set indication that an iucv buffer exists for this cpu. */ cpumask_set_cpu(cpu, &iucv_buffer_cpumask); if (iucv_nonsmp_handler == 0 || cpumask_empty(&iucv_irq_cpumask)) /* Enable iucv interrupts on this cpu. */ iucv_allow_cpu(NULL); else /* Disable iucv interrupts on this cpu. */ iucv_block_cpu(NULL); }

Contributors

PersonTokensPropCommitsCommitProp
Martin Schwidefsky17092.90%120.00%
Motohiro Kosaki63.28%120.00%
Christoph Lameter42.19%120.00%
Joe Perches21.09%120.00%
Ursula Braun10.55%120.00%
Total183100.00%5100.00%

/** * iucv_retrieve_cpu * @data: unused * * Retrieve interrupt buffer on this cpu. */
static void iucv_retrieve_cpu(void *data) { int cpu = smp_processor_id(); union iucv_param *parm; if (!cpumask_test_cpu(cpu, &iucv_buffer_cpumask)) return; /* Block iucv interrupts. */ iucv_block_cpu(NULL); /* Retrieve interrupt buffer. */ parm = iucv_param_irq[cpu]; iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm); /* Clear indication that an iucv buffer exists for this cpu. */ cpumask_clear_cpu(cpu, &iucv_buffer_cpumask); }

Contributors

PersonTokensPropCommitsCommitProp
Martin Schwidefsky5688.89%125.00%
Motohiro Kosaki46.35%125.00%
Christoph Lameter23.17%125.00%
Ursula Braun11.59%125.00%
Total63100.00%4100.00%

/** * iucv_setmask_smp * * Allow iucv interrupts on all cpus. */
static void iucv_setmask_mp(void) { int cpu; get_online_cpus(); for_each_online_cpu(cpu) /* Enable all cpus with a declared buffer. */ if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask) && !cpumask_test_cpu(cpu, &iucv_irq_cpumask)) smp_call_function_single(cpu, iucv_allow_cpu, NULL, 1); put_online_cpus(); }

Contributors

PersonTokensPropCommitsCommitProp
Martin Schwidefsky4382.69%240.00%
Heiko Carstens59.62%240.00%
Motohiro Kosaki47.69%120.00%
Total52100.00%5100.00%

/** * iucv_setmask_up * * Allow iucv interrupts on a single cpu. */
static void iucv_setmask_up(void) { cpumask_t cpumask; int cpu; /* Disable all cpu but the first in cpu_irq_cpumask. */ cpumask_copy(&cpumask, &iucv_irq_cpumask); cpumask_clear_cpu(cpumask_first(&iucv_irq_cpumask), &cpumask); for_each_cpu(cpu, &cpumask) smp_call_function_single(cpu, iucv_block_cpu, NULL, 1); }

Contributors

PersonTokensPropCommitsCommitProp
Martin Schwidefsky3871.70%133.33%
Motohiro Kosaki1222.64%133.33%
Heiko Carstens35.66%133.33%
Total53100.00%3100.00%

/** * iucv_enable * * This function makes iucv ready for use. It allocates the pathid * table, declares an iucv interrupt buffer and enables the iucv * interrupts. Called when the first user has registered an iucv * handler. */
static int iucv_enable(void) { size_t alloc_size; int cpu, rc; get_online_cpus(); rc = -ENOMEM; alloc_size = iucv_max_pathid * sizeof(struct iucv_path); iucv_path_table = kzalloc(alloc_size, GFP_KERNEL); if (!iucv_path_table) goto out; /* Declare per cpu buffers. */ rc = -EIO; for_each_online_cpu(cpu) smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); if (cpumask_empty(&iucv_buffer_cpumask)) /* No cpu could declare an iucv buffer. */ goto out; put_online_cpus(); return 0; out: kfree(iucv_path_table); iucv_path_table = NULL; put_online_cpus(); return rc; }

Contributors

PersonTokensPropCommitsCommitProp
Martin Schwidefsky8781.31%120.00%
Heiko Carstens1816.82%360.00%
Motohiro Kosaki21.87%120.00%
Total107100.00%5100.00%

/** * iucv_disable * * This function shuts down iucv. It disables iucv interrupts, retrieves * the iucv interrupt buffer and frees the pathid table. Called after the * last user unregister its iucv handler. */
static void iucv_disable(void) { get_online_cpus(); on_each_cpu(iucv_retrieve_cpu, NULL, 1); kfree(iucv_path_table); iucv_path_table = NULL; put_online_cpus(); }

Contributors

PersonTokensPropCommitsCommitProp
Martin Schwidefsky2268.75%133.33%
Heiko Carstens1031.25%266.67%
Total32100.00%3100.00%


static int iucv_cpu_dead(unsigned int cpu) { kfree(iucv_param_irq[cpu]); iucv_param_irq[cpu] = NULL; kfree(iucv_param[cpu]); iucv_param[cpu] = NULL; kfree(iucv_irq_data[cpu]); iucv_irq_data[cpu] = NULL; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Srivatsa S. Bhat4882.76%133.33%
Sebastian Andrzej Siewior610.34%133.33%
Martin Schwidefsky46.90%133.33%
Total58100.00%3100.00%


static int iucv_cpu_prepare(unsigned int cpu) { /* Note: GFP_DMA used to get memory below 2G */ iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data), GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); if (!iucv_irq_data[cpu]) goto out_free; /* Allocate parameter blocks. */ iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param), GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); if (!iucv_param[cpu]) goto out_free; iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param), GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); if (!iucv_param_irq[cpu]) goto out_free; return 0; out_free: iucv_cpu_dead(cpu); return -ENOMEM; }

Contributors

PersonTokensPropCommitsCommitProp
Ursula Braun3527.34%120.00%
Christoph Lameter3325.78%120.00%
Martin Schwidefsky3023.44%120.00%
Srivatsa S. Bhat2721.09%120.00%
Sebastian Andrzej Siewior32.34%120.00%
Total128100.00%5100.00%


static int iucv_cpu_online(unsigned int cpu) { if (!iucv_path_table) return 0; iucv_declare_cpu(NULL); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Sebastian Andrzej Siewior1142.31%120.00%
Heiko Carstens519.23%120.00%
Srivatsa S. Bhat519.23%120.00%
Martin Schwidefsky415.38%120.00%
Ursula Braun13.85%120.00%
Total26100.00%5100.00%


static int iucv_cpu_down_prep(unsigned int cpu) { cpumask_t cpumask; if (!iucv_path_table) return 0; cpumask_copy(&cpumask, &iucv_buffer_cpumask); cpumask_clear_cpu(cpu, &cpumask); if (cpumask_empty(&cpumask)) /* Can't offline last IUCV enabled cpu. */ return -EINVAL; iucv_retrieve_cpu(NULL); if (!cpumask_empty(&iucv_irq_cpumask)) return 0; smp_call_function_single(cpumask_first(&iucv_buffer_cpumask), iucv_allow_cpu, NULL, 1); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Martin Schwidefsky3945.35%116.67%
Sebastian Andrzej Siewior2124.42%116.67%
Motohiro Kosaki1416.28%116.67%
Heiko Carstens1011.63%233.33%
Akinobu Mita22.33%116.67%
Total86100.00%6100.00%

/** * iucv_sever_pathid * @pathid: path identification number. * @userdata: 16-bytes of user data. * * Sever an iucv path to free up the pathid. Used internally. */
static int iucv_sever_pathid(u16 pathid, u8 *userdata) { union iucv_param *parm; parm = iucv_param_irq[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); if (userdata) memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); parm->ctrl.ippathid = pathid; return iucv_call_b2f0(IUCV_SEVER, parm); }

Contributors

PersonTokensPropCommitsCommitProp
Martin Schwidefsky7594.94%125.00%
Christoph Lameter22.53%125.00%
Ursula Braun22.53%250.00%
Total79100.00%4100.00%

/** * __iucv_cleanup_queue * @dummy: unused dummy argument * * Nop function called via smp_call_function to force work items from * pending external iucv interrupts to the work queue. */
static void __iucv_cleanup_queue(void *dummy) { }

Contributors

PersonTokensPropCommitsCommitProp
Martin Schwidefsky9100.00%2100.00%
Total9100.00%2100.00%

/** * iucv_cleanup_queue * * Function called after a path has been severed to find all remaining * work items for the now stale pathid. The caller needs to hold the * iucv_table_lock. */
static void iucv_cleanup_queue(void) { struct iucv_irq_list *p, *n; /* * When a path is severed, the pathid can be reused immediately * on a iucv connect or a connection pending interrupt. Remove * all entries from the task queue that refer to a stale pathid * (iucv_path_table[ix] == NULL). Only then do the iucv connect * or deliver the connection pending interrupt. To get all the * pending interrupts force them to the work queue by calling * an empty function on all cpus. */ smp_call_function(__iucv_cleanup_queue, NULL, 1); spin_lock_irq(&iucv_queue_lock); list_for_each_entry_safe(p, n, &iucv_task_queue, list) { /* Remove stale work items from the task queue. */ if (iucv_path_table[p->data.ippathid] == NULL) { list_del(&p->list); kfree(p); } } spin_unlock_irq(&iucv_queue_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Martin Schwidefsky7898.73%266.67%
Lucas De Marchi11.27%133.33%
Total79100.00%3100.00%

/** * iucv_register: * @handler: address of iucv handler structure * @smp: != 0 indicates that the handler can deal with out of order messages * * Registers a driver with IUCV. * * Returns 0 on success, -ENOMEM if the memory allocation for the pathid * table failed, or -EIO if IUCV_DECLARE_BUFFER failed on all cpus. */
int iucv_register(struct iucv_handler *handler, int smp) { int rc; if (!iucv_available) return -ENOSYS; mutex_lock(&iucv_register_mutex); if (!smp) iucv_nonsmp_handler++; if (list_empty(&iucv_handler_list)) { rc = iucv_enable(); if (rc) goto out_mutex; } else if (!smp && iucv_nonsmp_handler == 1) iucv_setmask_up(); INIT_LIST_HEAD(&handler->paths); spin_lock_bh(&iucv_table_lock); list_add_tail(&handler->list, &iucv_handler_list); spin_unlock_bh(&iucv_table_lock); rc = 0; out_mutex: mutex_unlock(&iucv_register_mutex); return rc; }

Contributors

PersonTokensPropCommitsCommitProp
Martin Schwidefsky11898.33%150.00%
Ursula Braun-Krahl21.67%150.00%
Total120100.00%2100.00%

EXPORT_SYMBOL(iucv_register); /** * iucv_unregister * @handler: address of iucv handler structure * @smp: != 0 indicates that the handler can deal with out of order messages * * Unregister driver from IUCV. */
void iucv_unregister(struct iucv_handler *handler, int smp) { struct iucv_path *p, *n; mutex_lock(&iucv_register_mutex); spin_lock_bh(&iucv_table_lock); /* Remove handler from the iucv_handler_list. */ list_del_init(&handler->list); /* Sever all pathids still referring to the handler. */ list_for_each_entry_safe(p, n, &handler->paths, list) { iucv_sever_pathid(p->pathid, NULL); iucv_path_table[p->pathid] = NULL; list_del(&p->list); iucv_path_free(p); } spin_unlock_bh(&iucv_table_lock); if (!smp) iucv_nonsmp_handler--; if (list_empty(&iucv_handler_list)) iucv_disable(); else if (!smp && iucv_nonsmp_handler == 0) iucv_setmask_mp(); mutex_unlock(&iucv_register_mutex); }

Contributors

PersonTokensPropCommitsCommitProp
Martin Schwidefsky12999.23%150.00%
Lucas De Marchi10.77%150.00%
Total130100.00%2100.00%

EXPORT_SYMBOL(iucv_unregister);
static int iucv_reboot_event(struct notifier_block *this, unsigned long event, void *ptr) { int i; if (cpumask_empty(&iucv_irq_cpumask)) return NOTIFY_DONE; get_online_cpus(); on_each_cpu_mask(&iucv_irq_cpumask, iucv_block_cpu, NULL, 1); preempt_disable(); for (i = 0; i < iucv_max_pathid; i++) { if (iucv_path_table[i]) iucv_sever_pathid(i, NULL); } preempt_enable(); put_online_cpus(); iucv_disable(); return NOTIFY_DONE; }

Contributors

PersonTokensPropCommitsCommitProp
Ursula Braun7783.70%150.00%
Hendrik Brueckner1516.30%150.00%
Total92100.00%2100.00%

static struct notifier_block iucv_reboot_notifier = { .notifier_call = iucv_reboot_event, }; /** * iucv_path_accept * @path: address of iucv path structure * @handler: address of iucv handler structure * @userdata: 16 bytes of data reflected to the communication partner * @private: private data passed to interrupt handlers for this path * * This function is issued after the user received a connection pending * external interrupt and now wishes to complete the IUCV communication path. * * Returns the result of the CP IUCV call. */
int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler, u8 *userdata, void *private) { union iucv_param *parm; int rc; local_bh_disable(); if (cpumask_empty(&iucv_buffer_cpumask)) { rc = -EIO; goto out; } /* Prepare parameter block. */ parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); parm->ctrl.ippathid = path->pathid; parm->ctrl.ipmsglim = path->msglim; if (userdata) memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); parm->ctrl.ipflags1 = path->flags; rc = iucv_call_b2f0(IUCV_ACCEPT, parm); if (!rc) { path->private = private; path->msglim = parm->ctrl.ipmsglim; path->flags = parm->ctrl.ipflags1; } out: local_bh_enable(); return rc; }

Contributors

PersonTokensPropCommitsCommitProp
Martin Schwidefsky15587.08%120.00%
Ursula Braun1910.67%240.00%
Christoph Lameter21.12%120.00%
Motohiro Kosaki21.12%120.00%
Total178100.00%5100.00%

EXPORT_SYMBOL(iucv_path_accept); /** * iucv_path_connect * @path: address of iucv path structure * @handler: address of iucv handler structure * @userid: 8-byte user identification * @system: 8-byte target system identification * @userdata: 16 bytes of data reflected to the communication partner * @private: private data passed to interrupt handlers for this path * * This function establishes an IUCV path. Although the connect may complete * successfully, you are not able to use the path until you receive an IUCV * Connection Complete external interrupt. * * Returns the result of the CP IUCV call. */
int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler, u8 *userid, u8 *system, u8 *userdata, void *private) { union iucv_param *parm; int rc; spin_lock_bh(&iucv_table_lock); iucv_cleanup_queue(); if (cpumask_empty(&iucv_buffer_cpumask)) { rc = -EIO; goto out