summaryrefslogtreecommitdiffstats
path: root/drivers/s390/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/net')
-rw-r--r--drivers/s390/net/Kconfig112
-rw-r--r--drivers/s390/net/Makefile17
-rw-r--r--drivers/s390/net/claw.c3421
-rw-r--r--drivers/s390/net/claw.h354
-rw-r--r--drivers/s390/net/ctcm_dbug.c79
-rw-r--r--drivers/s390/net/ctcm_dbug.h143
-rw-r--r--drivers/s390/net/ctcm_fsms.c2295
-rw-r--r--drivers/s390/net/ctcm_fsms.h358
-rw-r--r--drivers/s390/net/ctcm_main.c1892
-rw-r--r--drivers/s390/net/ctcm_main.h323
-rw-r--r--drivers/s390/net/ctcm_mpc.c2179
-rw-r--r--drivers/s390/net/ctcm_mpc.h240
-rw-r--r--drivers/s390/net/ctcm_sysfs.c223
-rw-r--r--drivers/s390/net/fsm.c214
-rw-r--r--drivers/s390/net/fsm.h265
-rw-r--r--drivers/s390/net/lcs.c2510
-rw-r--r--drivers/s390/net/lcs.h345
-rw-r--r--drivers/s390/net/netiucv.c2299
-rw-r--r--drivers/s390/net/qeth_core.h941
-rw-r--r--drivers/s390/net/qeth_core_main.c5513
-rw-r--r--drivers/s390/net/qeth_core_mpc.c268
-rw-r--r--drivers/s390/net/qeth_core_mpc.h635
-rw-r--r--drivers/s390/net/qeth_core_sys.c766
-rw-r--r--drivers/s390/net/qeth_l2_main.c1348
-rw-r--r--drivers/s390/net/qeth_l3.h71
-rw-r--r--drivers/s390/net/qeth_l3_main.c3816
-rw-r--r--drivers/s390/net/qeth_l3_sys.c1172
-rw-r--r--drivers/s390/net/smsgiucv.c259
-rw-r--r--drivers/s390/net/smsgiucv.h14
-rw-r--r--drivers/s390/net/smsgiucv_app.c218
30 files changed, 32290 insertions, 0 deletions
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
new file mode 100644
index 00000000000..9b66d2d1809
--- /dev/null
+++ b/drivers/s390/net/Kconfig
@@ -0,0 +1,112 @@
+menu "S/390 network device drivers"
+ depends on NETDEVICES && S390
+
+config LCS
+ def_tristate m
+ prompt "Lan Channel Station Interface"
+ depends on CCW && NETDEVICES && (ETHERNET || TR || FDDI)
+ help
+ Select this option if you want to use LCS networking on IBM System z.
+ This device driver supports Token Ring (IEEE 802.5),
+ FDDI (IEEE 802.7) and Ethernet.
+ To compile as a module, choose M. The module name is lcs.
+ If you do not know what it is, it's safe to choose Y.
+
+config CTCM
+ def_tristate m
+ prompt "CTC and MPC SNA device support"
+ depends on CCW && NETDEVICES
+ help
+ Select this option if you want to use channel-to-channel
+ point-to-point networking on IBM System z.
+ This device driver supports real CTC coupling using ESCON.
+ It also supports virtual CTCs when running under VM.
+ This driver also supports channel-to-channel MPC SNA devices.
+ MPC is an SNA protocol device used by Communication Server for Linux.
+ To compile as a module, choose M. The module name is ctcm.
+ To compile into the kernel, choose Y.
+ If you do not need any channel-to-channel connection, choose N.
+
+config NETIUCV
+ def_tristate m
+ prompt "IUCV network device support (VM only)"
+ depends on IUCV && NETDEVICES
+ help
+ Select this option if you want to use inter-user communication
+ vehicle networking under VM or VIF. It enables a fast communication
+ link between VM guests. Using ifconfig a point-to-point connection
+ can be established to the Linux on IBM System z
+ running on the other VM guest. To compile as a module, choose M.
+ The module name is netiucv. If unsure, choose Y.
+
+config SMSGIUCV
+ def_tristate m
+ prompt "IUCV special message support (VM only)"
+ depends on IUCV
+ help
+ Select this option if you want to be able to receive SMSG messages
+ from other VM guest systems.
+
+config SMSGIUCV_EVENT
+ def_tristate m
+ prompt "Deliver IUCV special messages as uevents (VM only)"
+ depends on SMSGIUCV
+ help
+ Select this option to deliver CP special messages (SMSGs) as
+ uevents. The driver handles only those special messages that
+ start with "APP".
+
+ To compile as a module, choose M. The module name is "smsgiucv_app".
+
+config CLAW
+ def_tristate m
+ prompt "CLAW device support"
+ depends on CCW && NETDEVICES
+ help
+ This driver supports channel attached CLAW devices.
+ CLAW is Common Link Access for Workstation. Common devices
+ that use CLAW are RS/6000s, Cisco Routers (CIP) and 3172 devices.
+ To compile as a module, choose M. The module name is claw.
+ To compile into the kernel, choose Y.
+
+config QETH
+ def_tristate y
+ prompt "Gigabit Ethernet device support"
+ depends on CCW && NETDEVICES && IP_MULTICAST && QDIO
+ help
+ This driver supports the IBM System z OSA Express adapters
+ in QDIO mode (all media types), HiperSockets interfaces and VM GuestLAN
+ interfaces in QDIO and HIPER mode.
+
+ For details please refer to the documentation provided by IBM at
+ <http://www.ibm.com/developerworks/linux/linux390>
+
+ To compile this driver as a module, choose M.
+ The module name is qeth.
+
+config QETH_L2
+ def_tristate y
+ prompt "qeth layer 2 device support"
+ depends on QETH
+ help
+ Select this option to be able to run qeth devices in layer 2 mode.
+ To compile as a module, choose M. The module name is qeth_l2.
+ If unsure, choose y.
+
+config QETH_L3
+ def_tristate y
+ prompt "qeth layer 3 device support"
+ depends on QETH
+ help
+ Select this option to be able to run qeth devices in layer 3 mode.
+ To compile as a module choose M. The module name is qeth_l3.
+ If unsure, choose Y.
+
+config QETH_IPV6
+ def_bool y if (QETH_L3 = IPV6) || (QETH_L3 && IPV6 = 'y')
+
+config CCWGROUP
+ tristate
+ default (LCS || CTCM || QETH || CLAW)
+
+endmenu
diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile
new file mode 100644
index 00000000000..4dfe8c1092d
--- /dev/null
+++ b/drivers/s390/net/Makefile
@@ -0,0 +1,17 @@
+#
+# S/390 network devices
+#
+
+ctcm-y += ctcm_main.o ctcm_fsms.o ctcm_mpc.o ctcm_sysfs.o ctcm_dbug.o
+obj-$(CONFIG_CTCM) += ctcm.o fsm.o
+obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o
+obj-$(CONFIG_SMSGIUCV) += smsgiucv.o
+obj-$(CONFIG_SMSGIUCV_EVENT) += smsgiucv_app.o
+obj-$(CONFIG_LCS) += lcs.o
+obj-$(CONFIG_CLAW) += claw.o
+qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o
+obj-$(CONFIG_QETH) += qeth.o
+qeth_l2-y += qeth_l2_main.o
+obj-$(CONFIG_QETH_L2) += qeth_l2.o
+qeth_l3-y += qeth_l3_main.o qeth_l3_sys.o
+obj-$(CONFIG_QETH_L3) += qeth_l3.o
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
new file mode 100644
index 00000000000..b41fae37d3a
--- /dev/null
+++ b/drivers/s390/net/claw.c
@@ -0,0 +1,3421 @@
+/*
+ * drivers/s390/net/claw.c
+ * ESCON CLAW network driver
+ *
+ * Linux for zSeries version
+ * Copyright IBM Corp. 2002, 2009
+ * Author(s) Original code written by:
+ * Kazuo Iimura <iimura@jp.ibm.com>
+ * Rewritten by
+ * Andy Richter <richtera@us.ibm.com>
+ * Marc Price <mwprice@us.ibm.com>
+ *
+ * sysfs parms:
+ * group x.x.rrrr,x.x.wwww
+ * read_buffer nnnnnnn
+ * write_buffer nnnnnn
+ * host_name aaaaaaaa
+ * adapter_name aaaaaaaa
+ * api_type aaaaaaaa
+ *
+ * eg.
+ * group 0.0.0200 0.0.0201
+ * read_buffer 25
+ * write_buffer 20
+ * host_name LINUX390
+ * adapter_name RS6K
+ * api_type TCPIP
+ *
+ * where
+ *
+ * The device id is decided by the order entries
+ * are added to the group the first is claw0 the second claw1
+ * up to CLAW_MAX_DEV
+ *
+ * rrrr - the first of 2 consecutive device addresses used for the
+ * CLAW protocol.
+ * The specified address is always used as the input (Read)
+ * channel and the next address is used as the output channel.
+ *
+ * wwww - the second of 2 consecutive device addresses used for
+ * the CLAW protocol.
+ * The specified address is always used as the output
+ * channel and the previous address is used as the input channel.
+ *
+ * read_buffer - specifies number of input buffers to allocate.
+ * write_buffer - specifies number of output buffers to allocate.
+ * host_name - host name
+ * adaptor_name - adaptor name
+ * api_type - API type TCPIP or API will be sent and expected
+ * as ws_name
+ *
+ * Note the following requirements:
+ * 1) host_name must match the configured adapter_name on the remote side
+ * 2) adaptor_name must match the configured host name on the remote side
+ *
+ * Change History
+ * 1.00 Initial release shipped
+ * 1.10 Changes for Buffer allocation
+ * 1.15 Changed for 2.6 Kernel No longer compiles on 2.4 or lower
+ * 1.25 Added Packing support
+ * 1.5
+ */
+
+#define KMSG_COMPONENT "claw"
+
+#include <asm/ccwdev.h>
+#include <asm/ccwgroup.h>
+#include <asm/debug.h>
+#include <asm/idals.h>
+#include <asm/io.h>
+#include <linux/bitops.h>
+#include <linux/ctype.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/proc_fs.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/tcp.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+
+#include "claw.h"
+
+/*
+ CLAW uses the s390dbf file system see claw_trace and claw_setup
+*/
+
+static char version[] __initdata = "CLAW driver";
+static char debug_buffer[255];
+/**
+ * Debug Facility Stuff
+ */
+static debug_info_t *claw_dbf_setup;
+static debug_info_t *claw_dbf_trace;
+
+/**
+ * CLAW Debug Facility functions
+ */
+static void
+claw_unregister_debug_facility(void)
+{
+ if (claw_dbf_setup)
+ debug_unregister(claw_dbf_setup);
+ if (claw_dbf_trace)
+ debug_unregister(claw_dbf_trace);
+}
+
+static int
+claw_register_debug_facility(void)
+{
+ claw_dbf_setup = debug_register("claw_setup", 2, 1, 8);
+ claw_dbf_trace = debug_register("claw_trace", 2, 2, 8);
+ if (claw_dbf_setup == NULL || claw_dbf_trace == NULL) {
+ claw_unregister_debug_facility();
+ return -ENOMEM;
+ }
+ debug_register_view(claw_dbf_setup, &debug_hex_ascii_view);
+ debug_set_level(claw_dbf_setup, 2);
+ debug_register_view(claw_dbf_trace, &debug_hex_ascii_view);
+ debug_set_level(claw_dbf_trace, 2);
+ return 0;
+}
+
+static inline void
+claw_set_busy(struct net_device *dev)
+{
+ ((struct claw_privbk *)dev->ml_priv)->tbusy = 1;
+ eieio();
+}
+
+static inline void
+claw_clear_busy(struct net_device *dev)
+{
+ clear_bit(0, &(((struct claw_privbk *) dev->ml_priv)->tbusy));
+ netif_wake_queue(dev);
+ eieio();
+}
+
+static inline int
+claw_check_busy(struct net_device *dev)
+{
+ eieio();
+ return ((struct claw_privbk *) dev->ml_priv)->tbusy;
+}
+
+static inline void
+claw_setbit_busy(int nr,struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ set_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy));
+}
+
+static inline void
+claw_clearbit_busy(int nr,struct net_device *dev)
+{
+ clear_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy));
+ netif_wake_queue(dev);
+}
+
+static inline int
+claw_test_and_setbit_busy(int nr,struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ return test_and_set_bit(nr,
+ (void *)&(((struct claw_privbk *) dev->ml_priv)->tbusy));
+}
+
+
+/* Functions for the DEV methods */
+
+static int claw_probe(struct ccwgroup_device *cgdev);
+static void claw_remove_device(struct ccwgroup_device *cgdev);
+static void claw_purge_skb_queue(struct sk_buff_head *q);
+static int claw_new_device(struct ccwgroup_device *cgdev);
+static int claw_shutdown_device(struct ccwgroup_device *cgdev);
+static int claw_tx(struct sk_buff *skb, struct net_device *dev);
+static int claw_change_mtu( struct net_device *dev, int new_mtu);
+static int claw_open(struct net_device *dev);
+static void claw_irq_handler(struct ccw_device *cdev,
+ unsigned long intparm, struct irb *irb);
+static void claw_irq_tasklet ( unsigned long data );
+static int claw_release(struct net_device *dev);
+static void claw_write_retry ( struct chbk * p_ch );
+static void claw_write_next ( struct chbk * p_ch );
+static void claw_timer ( struct chbk * p_ch );
+
+/* Functions */
+static int add_claw_reads(struct net_device *dev,
+ struct ccwbk* p_first, struct ccwbk* p_last);
+static void ccw_check_return_code (struct ccw_device *cdev, int return_code);
+static void ccw_check_unit_check (struct chbk * p_ch, unsigned char sense );
+static int find_link(struct net_device *dev, char *host_name, char *ws_name );
+static int claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid);
+static int init_ccw_bk(struct net_device *dev);
+static void probe_error( struct ccwgroup_device *cgdev);
+static struct net_device_stats *claw_stats(struct net_device *dev);
+static int pages_to_order_of_mag(int num_of_pages);
+static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr);
+/* sysfs Functions */
+static ssize_t claw_hname_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t claw_hname_write(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count);
+static ssize_t claw_adname_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t claw_adname_write(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count);
+static ssize_t claw_apname_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t claw_apname_write(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count);
+static ssize_t claw_wbuff_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t claw_wbuff_write(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count);
+static ssize_t claw_rbuff_show(struct device *dev,
+ struct device_attribute *attr, char *buf);
+static ssize_t claw_rbuff_write(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count);
+static int claw_add_files(struct device *dev);
+static void claw_remove_files(struct device *dev);
+
+/* Functions for System Validate */
+static int claw_process_control( struct net_device *dev, struct ccwbk * p_ccw);
+static int claw_send_control(struct net_device *dev, __u8 type, __u8 link,
+ __u8 correlator, __u8 rc , char *local_name, char *remote_name);
+static int claw_snd_conn_req(struct net_device *dev, __u8 link);
+static int claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl);
+static int claw_snd_sys_validate_rsp(struct net_device *dev,
+ struct clawctl * p_ctl, __u32 return_code);
+static int claw_strt_conn_req(struct net_device *dev );
+static void claw_strt_read(struct net_device *dev, int lock);
+static void claw_strt_out_IO(struct net_device *dev);
+static void claw_free_wrt_buf(struct net_device *dev);
+
+/* Functions for unpack reads */
+static void unpack_read(struct net_device *dev);
+
+static int claw_pm_prepare(struct ccwgroup_device *gdev)
+{
+ return -EPERM;
+}
+
+/* the root device for claw group devices */
+static struct device *claw_root_dev;
+
+/* ccwgroup table */
+
+static struct ccwgroup_driver claw_group_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "claw",
+ },
+ .max_slaves = 2,
+ .driver_id = 0xC3D3C1E6,
+ .probe = claw_probe,
+ .remove = claw_remove_device,
+ .set_online = claw_new_device,
+ .set_offline = claw_shutdown_device,
+ .prepare = claw_pm_prepare,
+};
+
+static struct ccw_device_id claw_ids[] = {
+ {CCW_DEVICE(0x3088, 0x61), .driver_info = claw_channel_type_claw},
+ {},
+};
+MODULE_DEVICE_TABLE(ccw, claw_ids);
+
+static struct ccw_driver claw_ccw_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "claw",
+ },
+ .ids = claw_ids,
+ .probe = ccwgroup_probe_ccwdev,
+ .remove = ccwgroup_remove_ccwdev,
+ .int_class = IOINT_CLW,
+};
+
+static ssize_t
+claw_driver_group_store(struct device_driver *ddrv, const char *buf,
+ size_t count)
+{
+ int err;
+ err = ccwgroup_create_from_string(claw_root_dev,
+ claw_group_driver.driver_id,
+ &claw_ccw_driver, 2, buf);
+ return err ? err : count;
+}
+
+static DRIVER_ATTR(group, 0200, NULL, claw_driver_group_store);
+
+static struct attribute *claw_group_attrs[] = {
+ &driver_attr_group.attr,
+ NULL,
+};
+
+static struct attribute_group claw_group_attr_group = {
+ .attrs = claw_group_attrs,
+};
+
+static const struct attribute_group *claw_group_attr_groups[] = {
+ &claw_group_attr_group,
+ NULL,
+};
+
+/*
+* Key functions
+*/
+
+/*----------------------------------------------------------------*
+ * claw_probe *
+ * this function is called for each CLAW device. *
+ *----------------------------------------------------------------*/
+static int
+claw_probe(struct ccwgroup_device *cgdev)
+{
+ int rc;
+ struct claw_privbk *privptr=NULL;
+
+ CLAW_DBF_TEXT(2, setup, "probe");
+ if (!get_device(&cgdev->dev))
+ return -ENODEV;
+ privptr = kzalloc(sizeof(struct claw_privbk), GFP_KERNEL);
+ dev_set_drvdata(&cgdev->dev, privptr);
+ if (privptr == NULL) {
+ probe_error(cgdev);
+ put_device(&cgdev->dev);
+ CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM);
+ return -ENOMEM;
+ }
+ privptr->p_mtc_envelope= kzalloc( MAX_ENVELOPE_SIZE, GFP_KERNEL);
+ privptr->p_env = kzalloc(sizeof(struct claw_env), GFP_KERNEL);
+ if ((privptr->p_mtc_envelope==NULL) || (privptr->p_env==NULL)) {
+ probe_error(cgdev);
+ put_device(&cgdev->dev);
+ CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM);
+ return -ENOMEM;
+ }
+ memcpy(privptr->p_env->adapter_name,WS_NAME_NOT_DEF,8);
+ memcpy(privptr->p_env->host_name,WS_NAME_NOT_DEF,8);
+ memcpy(privptr->p_env->api_type,WS_NAME_NOT_DEF,8);
+ privptr->p_env->packing = 0;
+ privptr->p_env->write_buffers = 5;
+ privptr->p_env->read_buffers = 5;
+ privptr->p_env->read_size = CLAW_FRAME_SIZE;
+ privptr->p_env->write_size = CLAW_FRAME_SIZE;
+ rc = claw_add_files(&cgdev->dev);
+ if (rc) {
+ probe_error(cgdev);
+ put_device(&cgdev->dev);
+ dev_err(&cgdev->dev, "Creating the /proc files for a new"
+ " CLAW device failed\n");
+ CLAW_DBF_TEXT_(2, setup, "probex%d", rc);
+ return rc;
+ }
+ privptr->p_env->p_priv = privptr;
+ cgdev->cdev[0]->handler = claw_irq_handler;
+ cgdev->cdev[1]->handler = claw_irq_handler;
+ CLAW_DBF_TEXT(2, setup, "prbext 0");
+
+ return 0;
+} /* end of claw_probe */
+
+/*-------------------------------------------------------------------*
+ * claw_tx *
+ *-------------------------------------------------------------------*/
+
+static int
+claw_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ int rc;
+ struct claw_privbk *privptr = dev->ml_priv;
+ unsigned long saveflags;
+ struct chbk *p_ch;
+
+ CLAW_DBF_TEXT(4, trace, "claw_tx");
+ p_ch = &privptr->channel[WRITE_CHANNEL];
+ spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
+ rc=claw_hw_tx( skb, dev, 1 );
+ spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
+ CLAW_DBF_TEXT_(4, trace, "clawtx%d", rc);
+ if (rc)
+ rc = NETDEV_TX_BUSY;
+ else
+ rc = NETDEV_TX_OK;
+ return rc;
+} /* end of claw_tx */
+
+/*------------------------------------------------------------------*
+ * pack the collect queue into an skb and return it *
+ * If not packing just return the top skb from the queue *
+ *------------------------------------------------------------------*/
+
+static struct sk_buff *
+claw_pack_skb(struct claw_privbk *privptr)
+{
+ struct sk_buff *new_skb,*held_skb;
+ struct chbk *p_ch = &privptr->channel[WRITE_CHANNEL];
+ struct claw_env *p_env = privptr->p_env;
+ int pkt_cnt,pk_ind,so_far;
+
+ new_skb = NULL; /* assume no dice */
+ pkt_cnt = 0;
+ CLAW_DBF_TEXT(4, trace, "PackSKBe");
+ if (!skb_queue_empty(&p_ch->collect_queue)) {
+ /* some data */
+ held_skb = skb_dequeue(&p_ch->collect_queue);
+ if (held_skb)
+ dev_kfree_skb_any(held_skb);
+ else
+ return NULL;
+ if (p_env->packing != DO_PACKED)
+ return held_skb;
+ /* get a new SKB we will pack at least one */
+ new_skb = dev_alloc_skb(p_env->write_size);
+ if (new_skb == NULL) {
+ atomic_inc(&held_skb->users);
+ skb_queue_head(&p_ch->collect_queue,held_skb);
+ return NULL;
+ }
+ /* we have packed packet and a place to put it */
+ pk_ind = 1;
+ so_far = 0;
+ new_skb->cb[1] = 'P'; /* every skb on queue has pack header */
+ while ((pk_ind) && (held_skb != NULL)) {
+ if (held_skb->len+so_far <= p_env->write_size-8) {
+ memcpy(skb_put(new_skb,held_skb->len),
+ held_skb->data,held_skb->len);
+ privptr->stats.tx_packets++;
+ so_far += held_skb->len;
+ pkt_cnt++;
+ dev_kfree_skb_any(held_skb);
+ held_skb = skb_dequeue(&p_ch->collect_queue);
+ if (held_skb)
+ atomic_dec(&held_skb->users);
+ } else {
+ pk_ind = 0;
+ atomic_inc(&held_skb->users);
+ skb_queue_head(&p_ch->collect_queue,held_skb);
+ }
+ }
+ }
+ CLAW_DBF_TEXT(4, trace, "PackSKBx");
+ return new_skb;
+}
+
+/*-------------------------------------------------------------------*
+ * claw_change_mtu *
+ * *
+ *-------------------------------------------------------------------*/
+
+static int
+claw_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct claw_privbk *privptr = dev->ml_priv;
+ int buff_size;
+ CLAW_DBF_TEXT(4, trace, "setmtu");
+ buff_size = privptr->p_env->write_size;
+ if ((new_mtu < 60) || (new_mtu > buff_size)) {
+ return -EINVAL;
+ }
+ dev->mtu = new_mtu;
+ return 0;
+} /* end of claw_change_mtu */
+
+
+/*-------------------------------------------------------------------*
+ * claw_open *
+ * *
+ *-------------------------------------------------------------------*/
+static int
+claw_open(struct net_device *dev)
+{
+
+ int rc;
+ int i;
+ unsigned long saveflags=0;
+ unsigned long parm;
+ struct claw_privbk *privptr;
+ DECLARE_WAITQUEUE(wait, current);
+ struct timer_list timer;
+ struct ccwbk *p_buf;
+
+ CLAW_DBF_TEXT(4, trace, "open");
+ privptr = (struct claw_privbk *)dev->ml_priv;
+ /* allocate and initialize CCW blocks */
+ if (privptr->buffs_alloc == 0) {
+ rc=init_ccw_bk(dev);
+ if (rc) {
+ CLAW_DBF_TEXT(2, trace, "openmem");
+ return -ENOMEM;
+ }
+ }
+ privptr->system_validate_comp=0;
+ privptr->release_pend=0;
+ if(strncmp(privptr->p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) {
+ privptr->p_env->read_size=DEF_PACK_BUFSIZE;
+ privptr->p_env->write_size=DEF_PACK_BUFSIZE;
+ privptr->p_env->packing=PACKING_ASK;
+ } else {
+ privptr->p_env->packing=0;
+ privptr->p_env->read_size=CLAW_FRAME_SIZE;
+ privptr->p_env->write_size=CLAW_FRAME_SIZE;
+ }
+ claw_set_busy(dev);
+ tasklet_init(&privptr->channel[READ_CHANNEL].tasklet, claw_irq_tasklet,
+ (unsigned long) &privptr->channel[READ_CHANNEL]);
+ for ( i = 0; i < 2; i++) {
+ CLAW_DBF_TEXT_(2, trace, "opn_ch%d", i);
+ init_waitqueue_head(&privptr->channel[i].wait);
+ /* skb_queue_head_init(&p_ch->io_queue); */
+ if (i == WRITE_CHANNEL)
+ skb_queue_head_init(
+ &privptr->channel[WRITE_CHANNEL].collect_queue);
+ privptr->channel[i].flag_a = 0;
+ privptr->channel[i].IO_active = 0;
+ privptr->channel[i].flag &= ~CLAW_TIMER;
+ init_timer(&timer);
+ timer.function = (void *)claw_timer;
+ timer.data = (unsigned long)(&privptr->channel[i]);
+ timer.expires = jiffies + 15*HZ;
+ add_timer(&timer);
+ spin_lock_irqsave(get_ccwdev_lock(
+ privptr->channel[i].cdev), saveflags);
+ parm = (unsigned long) &privptr->channel[i];
+ privptr->channel[i].claw_state = CLAW_START_HALT_IO;
+ rc = 0;
+ add_wait_queue(&privptr->channel[i].wait, &wait);
+ rc = ccw_device_halt(
+ (struct ccw_device *)privptr->channel[i].cdev,parm);
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(
+ get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
+ schedule();
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&privptr->channel[i].wait, &wait);
+ if(rc != 0)
+ ccw_check_return_code(privptr->channel[i].cdev, rc);
+ if((privptr->channel[i].flag & CLAW_TIMER) == 0x00)
+ del_timer(&timer);
+ }
+ if ((((privptr->channel[READ_CHANNEL].last_dstat |
+ privptr->channel[WRITE_CHANNEL].last_dstat) &
+ ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) ||
+ (((privptr->channel[READ_CHANNEL].flag |
+ privptr->channel[WRITE_CHANNEL].flag) & CLAW_TIMER) != 0x00)) {
+ dev_info(&privptr->channel[READ_CHANNEL].cdev->dev,
+ "%s: remote side is not ready\n", dev->name);
+ CLAW_DBF_TEXT(2, trace, "notrdy");
+
+ for ( i = 0; i < 2; i++) {
+ spin_lock_irqsave(
+ get_ccwdev_lock(privptr->channel[i].cdev),
+ saveflags);
+ parm = (unsigned long) &privptr->channel[i];
+ privptr->channel[i].claw_state = CLAW_STOP;
+ rc = ccw_device_halt(
+ (struct ccw_device *)&privptr->channel[i].cdev,
+ parm);
+ spin_unlock_irqrestore(
+ get_ccwdev_lock(privptr->channel[i].cdev),
+ saveflags);
+ if (rc != 0) {
+ ccw_check_return_code(
+ privptr->channel[i].cdev, rc);
+ }
+ }
+ free_pages((unsigned long)privptr->p_buff_ccw,
+ (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
+ if (privptr->p_env->read_size < PAGE_SIZE) {
+ free_pages((unsigned long)privptr->p_buff_read,
+ (int)pages_to_order_of_mag(
+ privptr->p_buff_read_num));
+ }
+ else {
+ p_buf=privptr->p_read_active_first;
+ while (p_buf!=NULL) {
+ free_pages((unsigned long)p_buf->p_buffer,
+ (int)pages_to_order_of_mag(
+ privptr->p_buff_pages_perread ));
+ p_buf=p_buf->next;
+ }
+ }
+ if (privptr->p_env->write_size < PAGE_SIZE ) {
+ free_pages((unsigned long)privptr->p_buff_write,
+ (int)pages_to_order_of_mag(
+ privptr->p_buff_write_num));
+ }
+ else {
+ p_buf=privptr->p_write_active_first;
+ while (p_buf!=NULL) {
+ free_pages((unsigned long)p_buf->p_buffer,
+ (int)pages_to_order_of_mag(
+ privptr->p_buff_pages_perwrite ));
+ p_buf=p_buf->next;
+ }
+ }
+ privptr->buffs_alloc = 0;
+ privptr->channel[READ_CHANNEL].flag = 0x00;
+ privptr->channel[WRITE_CHANNEL].flag = 0x00;
+ privptr->p_buff_ccw=NULL;
+ privptr->p_buff_read=NULL;
+ privptr->p_buff_write=NULL;
+ claw_clear_busy(dev);
+ CLAW_DBF_TEXT(2, trace, "open EIO");
+ return -EIO;
+ }
+
+ /* Send SystemValidate command */
+
+ claw_clear_busy(dev);
+ CLAW_DBF_TEXT(4, trace, "openok");
+ return 0;
+} /* end of claw_open */
+
+/*-------------------------------------------------------------------*
+* *
+* claw_irq_handler *
+* *
+*--------------------------------------------------------------------*/
+static void
+claw_irq_handler(struct ccw_device *cdev,
+ unsigned long intparm, struct irb *irb)
+{
+ struct chbk *p_ch = NULL;
+ struct claw_privbk *privptr = NULL;
+ struct net_device *dev = NULL;
+ struct claw_env *p_env;
+ struct chbk *p_ch_r=NULL;
+
+ CLAW_DBF_TEXT(4, trace, "clawirq");
+ /* Bypass all 'unsolicited interrupts' */
+ privptr = dev_get_drvdata(&cdev->dev);
+ if (!privptr) {
+ dev_warn(&cdev->dev, "An uninitialized CLAW device received an"
+ " IRQ, c-%02x d-%02x\n",
+ irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
+ CLAW_DBF_TEXT(2, trace, "badirq");
+ return;
+ }
+
+ /* Try to extract channel from driver data. */
+ if (privptr->channel[READ_CHANNEL].cdev == cdev)
+ p_ch = &privptr->channel[READ_CHANNEL];
+ else if (privptr->channel[WRITE_CHANNEL].cdev == cdev)
+ p_ch = &privptr->channel[WRITE_CHANNEL];
+ else {
+ dev_warn(&cdev->dev, "The device is not a CLAW device\n");
+ CLAW_DBF_TEXT(2, trace, "badchan");
+ return;
+ }
+ CLAW_DBF_TEXT_(4, trace, "IRQCH=%d", p_ch->flag);
+
+ dev = (struct net_device *) (p_ch->ndev);
+ p_env=privptr->p_env;
+
+ /* Copy interruption response block. */
+ memcpy(p_ch->irb, irb, sizeof(struct irb));
+
+ /* Check for good subchannel return code, otherwise info message */
+ if (irb->scsw.cmd.cstat && !(irb->scsw.cmd.cstat & SCHN_STAT_PCI)) {
+ dev_info(&cdev->dev,
+ "%s: subchannel check for device: %04x -"
+ " Sch Stat %02x Dev Stat %02x CPA - %04x\n",
+ dev->name, p_ch->devno,
+ irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
+ irb->scsw.cmd.cpa);
+ CLAW_DBF_TEXT(2, trace, "chanchk");
+ /* return; */
+ }
+
+ /* Check the reason-code of a unit check */
+ if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
+ ccw_check_unit_check(p_ch, irb->ecw[0]);
+
+ /* State machine to bring the connection up, down and to restart */
+ p_ch->last_dstat = irb->scsw.cmd.dstat;
+
+ switch (p_ch->claw_state) {
+ case CLAW_STOP:/* HALT_IO by claw_release (halt sequence) */
+ if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
+ (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
+ (p_ch->irb->scsw.cmd.stctl ==
+ (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))))
+ return;
+ wake_up(&p_ch->wait); /* wake up claw_release */
+ CLAW_DBF_TEXT(4, trace, "stop");
+ return;
+ case CLAW_START_HALT_IO: /* HALT_IO issued by claw_open */
+ if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
+ (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
+ (p_ch->irb->scsw.cmd.stctl ==
+ (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
+ CLAW_DBF_TEXT(4, trace, "haltio");
+ return;
+ }
+ if (p_ch->flag == CLAW_READ) {
+ p_ch->claw_state = CLAW_START_READ;
+ wake_up(&p_ch->wait); /* wake claw_open (READ)*/
+ } else if (p_ch->flag == CLAW_WRITE) {
+ p_ch->claw_state = CLAW_START_WRITE;
+ /* send SYSTEM_VALIDATE */
+ claw_strt_read(dev, LOCK_NO);
+ claw_send_control(dev,
+ SYSTEM_VALIDATE_REQUEST,
+ 0, 0, 0,
+ p_env->host_name,
+ p_env->adapter_name);
+ } else {
+ dev_warn(&cdev->dev, "The CLAW device received"
+ " an unexpected IRQ, "
+ "c-%02x d-%02x\n",
+ irb->scsw.cmd.cstat,
+ irb->scsw.cmd.dstat);
+ return;
+ }
+ CLAW_DBF_TEXT(4, trace, "haltio");
+ return;
+ case CLAW_START_READ:
+ CLAW_DBF_TEXT(4, trace, "ReadIRQ");
+ if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
+ clear_bit(0, (void *)&p_ch->IO_active);
+ if ((p_ch->irb->ecw[0] & 0x41) == 0x41 ||
+ (p_ch->irb->ecw[0] & 0x40) == 0x40 ||
+ (p_ch->irb->ecw[0]) == 0) {
+ privptr->stats.rx_errors++;
+ dev_info(&cdev->dev,
+ "%s: Restart is required after remote "
+ "side recovers \n",
+ dev->name);
+ }
+ CLAW_DBF_TEXT(4, trace, "notrdy");
+ return;
+ }
+ if ((p_ch->irb->scsw.cmd.cstat & SCHN_STAT_PCI) &&
+ (p_ch->irb->scsw.cmd.dstat == 0)) {
+ if (test_and_set_bit(CLAW_BH_ACTIVE,
+ (void *)&p_ch->flag_a) == 0)
+ tasklet_schedule(&p_ch->tasklet);
+ else
+ CLAW_DBF_TEXT(4, trace, "PCINoBH");
+ CLAW_DBF_TEXT(4, trace, "PCI_read");
+ return;
+ }
+ if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
+ (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
+ (p_ch->irb->scsw.cmd.stctl ==
+ (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
+ CLAW_DBF_TEXT(4, trace, "SPend_rd");
+ return;
+ }
+ clear_bit(0, (void *)&p_ch->IO_active);
+ claw_clearbit_busy(TB_RETRY, dev);
+ if (test_and_set_bit(CLAW_BH_ACTIVE,
+ (void *)&p_ch->flag_a) == 0)
+ tasklet_schedule(&p_ch->tasklet);
+ else
+ CLAW_DBF_TEXT(4, trace, "RdBHAct");
+ CLAW_DBF_TEXT(4, trace, "RdIRQXit");
+ return;
+ case CLAW_START_WRITE:
+ if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
+ dev_info(&cdev->dev,
+ "%s: Unit Check Occurred in "
+ "write channel\n", dev->name);
+ clear_bit(0, (void *)&p_ch->IO_active);
+ if (p_ch->irb->ecw[0] & 0x80) {
+ dev_info(&cdev->dev,
+ "%s: Resetting Event "
+ "occurred:\n", dev->name);
+ init_timer(&p_ch->timer);
+ p_ch->timer.function =
+ (void *)claw_write_retry;
+ p_ch->timer.data = (unsigned long)p_ch;
+ p_ch->timer.expires = jiffies + 10*HZ;
+ add_timer(&p_ch->timer);
+ dev_info(&cdev->dev,
+ "%s: write connection "
+ "restarting\n", dev->name);
+ }
+ CLAW_DBF_TEXT(4, trace, "rstrtwrt");
+ return;
+ }
+ if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) {
+ clear_bit(0, (void *)&p_ch->IO_active);
+ dev_info(&cdev->dev,
+ "%s: Unit Exception "
+ "occurred in write channel\n",
+ dev->name);
+ }
+ if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
+ (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
+ (p_ch->irb->scsw.cmd.stctl ==
+ (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
+ CLAW_DBF_TEXT(4, trace, "writeUE");
+ return;
+ }
+ clear_bit(0, (void *)&p_ch->IO_active);
+ if (claw_test_and_setbit_busy(TB_TX, dev) == 0) {
+ claw_write_next(p_ch);
+ claw_clearbit_busy(TB_TX, dev);
+ claw_clear_busy(dev);
+ }
+ p_ch_r = (struct chbk *)&privptr->channel[READ_CHANNEL];
+ if (test_and_set_bit(CLAW_BH_ACTIVE,
+ (void *)&p_ch_r->flag_a) == 0)
+ tasklet_schedule(&p_ch_r->tasklet);
+ CLAW_DBF_TEXT(4, trace, "StWtExit");
+ return;
+ default:
+ dev_warn(&cdev->dev,
+ "The CLAW device for %s received an unexpected IRQ\n",
+ dev->name);
+ CLAW_DBF_TEXT(2, trace, "badIRQ");
+ return;
+ }
+
+} /* end of claw_irq_handler */
+
+
+/*-------------------------------------------------------------------*
+* claw_irq_tasklet *
+* *
+*--------------------------------------------------------------------*/
+static void
+claw_irq_tasklet ( unsigned long data )
+{
+ struct chbk * p_ch;
+ struct net_device *dev;
+
+ p_ch = (struct chbk *) data;
+ dev = (struct net_device *)p_ch->ndev;
+ CLAW_DBF_TEXT(4, trace, "IRQtask");
+ unpack_read(dev);
+ clear_bit(CLAW_BH_ACTIVE, (void *)&p_ch->flag_a);
+ CLAW_DBF_TEXT(4, trace, "TskletXt");
+ return;
+} /* end of claw_irq_bh */
+
+/*-------------------------------------------------------------------*
+* claw_release *
+* *
+*--------------------------------------------------------------------*/
+static int
+claw_release(struct net_device *dev)
+{
+ int rc;
+ int i;
+ unsigned long saveflags;
+ unsigned long parm;
+ struct claw_privbk *privptr;
+ DECLARE_WAITQUEUE(wait, current);
+ struct ccwbk* p_this_ccw;
+ struct ccwbk* p_buf;
+
+ if (!dev)
+ return 0;
+ privptr = (struct claw_privbk *)dev->ml_priv;
+ if (!privptr)
+ return 0;
+ CLAW_DBF_TEXT(4, trace, "release");
+ privptr->release_pend=1;
+ claw_setbit_busy(TB_STOP,dev);
+ for ( i = 1; i >=0 ; i--) {
+ spin_lock_irqsave(
+ get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
+ /* del_timer(&privptr->channel[READ_CHANNEL].timer); */
+ privptr->channel[i].claw_state = CLAW_STOP;
+ privptr->channel[i].IO_active = 0;
+ parm = (unsigned long) &privptr->channel[i];
+ if (i == WRITE_CHANNEL)
+ claw_purge_skb_queue(
+ &privptr->channel[WRITE_CHANNEL].collect_queue);
+ rc = ccw_device_halt (privptr->channel[i].cdev, parm);
+ if (privptr->system_validate_comp==0x00) /* never opened? */
+ init_waitqueue_head(&privptr->channel[i].wait);
+ add_wait_queue(&privptr->channel[i].wait, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(
+ get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
+ schedule();
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&privptr->channel[i].wait, &wait);
+ if (rc != 0) {
+ ccw_check_return_code(privptr->channel[i].cdev, rc);
+ }
+ }
+ if (privptr->pk_skb != NULL) {
+ dev_kfree_skb_any(privptr->pk_skb);
+ privptr->pk_skb = NULL;
+ }
+ if(privptr->buffs_alloc != 1) {
+ CLAW_DBF_TEXT(4, trace, "none2fre");
+ return 0;
+ }
+ CLAW_DBF_TEXT(4, trace, "freebufs");
+ if (privptr->p_buff_ccw != NULL) {
+ free_pages((unsigned long)privptr->p_buff_ccw,
+ (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
+ }
+ CLAW_DBF_TEXT(4, trace, "freeread");
+ if (privptr->p_env->read_size < PAGE_SIZE) {
+ if (privptr->p_buff_read != NULL) {
+ free_pages((unsigned long)privptr->p_buff_read,
+ (int)pages_to_order_of_mag(privptr->p_buff_read_num));
+ }
+ }
+ else {
+ p_buf=privptr->p_read_active_first;
+ while (p_buf!=NULL) {
+ free_pages((unsigned long)p_buf->p_buffer,
+ (int)pages_to_order_of_mag(
+ privptr->p_buff_pages_perread ));
+ p_buf=p_buf->next;
+ }
+ }
+ CLAW_DBF_TEXT(4, trace, "freewrit");
+ if (privptr->p_env->write_size < PAGE_SIZE ) {
+ free_pages((unsigned long)privptr->p_buff_write,
+ (int)pages_to_order_of_mag(privptr->p_buff_write_num));
+ }
+ else {
+ p_buf=privptr->p_write_active_first;
+ while (p_buf!=NULL) {
+ free_pages((unsigned long)p_buf->p_buffer,
+ (int)pages_to_order_of_mag(
+ privptr->p_buff_pages_perwrite ));
+ p_buf=p_buf->next;
+ }
+ }
+ CLAW_DBF_TEXT(4, trace, "clearptr");
+ privptr->buffs_alloc = 0;
+ privptr->p_buff_ccw=NULL;
+ privptr->p_buff_read=NULL;
+ privptr->p_buff_write=NULL;
+ privptr->system_validate_comp=0;
+ privptr->release_pend=0;
+ /* Remove any writes that were pending and reset all reads */
+ p_this_ccw=privptr->p_read_active_first;
+ while (p_this_ccw!=NULL) {
+ p_this_ccw->header.length=0xffff;
+ p_this_ccw->header.opcode=0xff;
+ p_this_ccw->header.flag=0x00;
+ p_this_ccw=p_this_ccw->next;
+ }
+
+ while (privptr->p_write_active_first!=NULL) {
+ p_this_ccw=privptr->p_write_active_first;
+ p_this_ccw->header.flag=CLAW_PENDING;
+ privptr->p_write_active_first=p_this_ccw->next;
+ p_this_ccw->next=privptr->p_write_free_chain;
+ privptr->p_write_free_chain=p_this_ccw;
+ ++privptr->write_free_count;
+ }
+ privptr->p_write_active_last=NULL;
+ privptr->mtc_logical_link = -1;
+ privptr->mtc_skipping = 1;
+ privptr->mtc_offset=0;
+
+ if (((privptr->channel[READ_CHANNEL].last_dstat |
+ privptr->channel[WRITE_CHANNEL].last_dstat) &
+ ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) {
+ dev_warn(&privptr->channel[READ_CHANNEL].cdev->dev,
+ "Deactivating %s completed with incorrect"
+ " subchannel status "
+ "(read %02x, write %02x)\n",
+ dev->name,
+ privptr->channel[READ_CHANNEL].last_dstat,
+ privptr->channel[WRITE_CHANNEL].last_dstat);
+ CLAW_DBF_TEXT(2, trace, "badclose");
+ }
+ CLAW_DBF_TEXT(4, trace, "rlsexit");
+ return 0;
+} /* end of claw_release */
+
+/*-------------------------------------------------------------------*
+* claw_write_retry *
+* *
+*--------------------------------------------------------------------*/
+
+static void
+claw_write_retry ( struct chbk *p_ch )
+{
+
+ struct net_device *dev=p_ch->ndev;
+
+ CLAW_DBF_TEXT(4, trace, "w_retry");
+ if (p_ch->claw_state == CLAW_STOP) {
+ return;
+ }
+ claw_strt_out_IO( dev );
+ CLAW_DBF_TEXT(4, trace, "rtry_xit");
+ return;
+} /* end of claw_write_retry */
+
+
+/*-------------------------------------------------------------------*
+* claw_write_next *
+* *
+*--------------------------------------------------------------------*/
+
+static void
+claw_write_next ( struct chbk * p_ch )
+{
+
+ struct net_device *dev;
+ struct claw_privbk *privptr=NULL;
+ struct sk_buff *pk_skb;
+
+ CLAW_DBF_TEXT(4, trace, "claw_wrt");
+ if (p_ch->claw_state == CLAW_STOP)
+ return;
+ dev = (struct net_device *) p_ch->ndev;
+ privptr = (struct claw_privbk *) dev->ml_priv;
+ claw_free_wrt_buf( dev );
+ if ((privptr->write_free_count > 0) &&
+ !skb_queue_empty(&p_ch->collect_queue)) {
+ pk_skb = claw_pack_skb(privptr);
+ while (pk_skb != NULL) {
+ claw_hw_tx(pk_skb, dev, 1);
+ if (privptr->write_free_count > 0) {
+ pk_skb = claw_pack_skb(privptr);
+ } else
+ pk_skb = NULL;
+ }
+ }
+ if (privptr->p_write_active_first!=NULL) {
+ claw_strt_out_IO(dev);
+ }
+ return;
+} /* end of claw_write_next */
+
+/*-------------------------------------------------------------------*
+* *
+* claw_timer *
+*--------------------------------------------------------------------*/
+
+static void
+claw_timer ( struct chbk * p_ch )
+{
+ CLAW_DBF_TEXT(4, trace, "timer");
+ p_ch->flag |= CLAW_TIMER;
+ wake_up(&p_ch->wait);
+ return;
+} /* end of claw_timer */
+
+/*
+*
+* functions
+*/
+
+
+/*-------------------------------------------------------------------*
+* *
+* pages_to_order_of_mag *
+* *
+* takes a number of pages from 1 to 512 and returns the *
+* log(num_pages)/log(2) get_free_pages() needs a base 2 order *
+* of magnitude get_free_pages() has an upper order of 9 *
+*--------------------------------------------------------------------*/
+
+static int
+pages_to_order_of_mag(int num_of_pages)
+{
+ int order_of_mag=1; /* assume 2 pages */
+ int nump;
+
+ CLAW_DBF_TEXT_(5, trace, "pages%d", num_of_pages);
+ if (num_of_pages == 1) {return 0; } /* magnitude of 0 = 1 page */
+ /* 512 pages = 2Meg on 4k page systems */
+ if (num_of_pages >= 512) {return 9; }
+ /* we have two or more pages order is at least 1 */
+ for (nump=2 ;nump <= 512;nump*=2) {
+ if (num_of_pages <= nump)
+ break;
+ order_of_mag +=1;
+ }
+ if (order_of_mag > 9) { order_of_mag = 9; } /* I know it's paranoid */
+ CLAW_DBF_TEXT_(5, trace, "mag%d", order_of_mag);
+ return order_of_mag;
+}
+
+/*-------------------------------------------------------------------*
+* *
+* add_claw_reads *
+* *
+*--------------------------------------------------------------------*/
+static int
+add_claw_reads(struct net_device *dev, struct ccwbk* p_first,
+ struct ccwbk* p_last)
+{
+ struct claw_privbk *privptr;
+ struct ccw1 temp_ccw;
+ struct endccw * p_end;
+ CLAW_DBF_TEXT(4, trace, "addreads");
+ privptr = dev->ml_priv;
+ p_end = privptr->p_end_ccw;
+
+ /* first CCW and last CCW contains a new set of read channel programs
+ * to apend the running channel programs
+ */
+ if ( p_first==NULL) {
+ CLAW_DBF_TEXT(4, trace, "addexit");
+ return 0;
+ }
+
+ /* set up ending CCW sequence for this segment */
+ if (p_end->read1) {
+ p_end->read1=0x00; /* second ending CCW is now active */
+ /* reset ending CCWs and setup TIC CCWs */
+ p_end->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
+ p_end->read2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
+ p_last->r_TIC_1.cda =(__u32)__pa(&p_end->read2_nop1);
+ p_last->r_TIC_2.cda =(__u32)__pa(&p_end->read2_nop1);
+ p_end->read2_nop2.cda=0;
+ p_end->read2_nop2.count=1;
+ }
+ else {
+ p_end->read1=0x01; /* first ending CCW is now active */
+ /* reset ending CCWs and setup TIC CCWs */
+ p_end->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
+ p_end->read1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
+ p_last->r_TIC_1.cda = (__u32)__pa(&p_end->read1_nop1);
+ p_last->r_TIC_2.cda = (__u32)__pa(&p_end->read1_nop1);
+ p_end->read1_nop2.cda=0;
+ p_end->read1_nop2.count=1;
+ }
+
+ if ( privptr-> p_read_active_first ==NULL ) {
+ privptr->p_read_active_first = p_first; /* set new first */
+ privptr->p_read_active_last = p_last; /* set new last */
+ }
+ else {
+
+ /* set up TIC ccw */
+ temp_ccw.cda= (__u32)__pa(&p_first->read);
+ temp_ccw.count=0;
+ temp_ccw.flags=0;
+ temp_ccw.cmd_code = CCW_CLAW_CMD_TIC;
+
+
+ if (p_end->read1) {
+
+ /* first set of CCW's is chained to the new read */
+ /* chain, so the second set is chained to the active chain. */
+ /* Therefore modify the second set to point to the new */
+ /* read chain set up TIC CCWs */
+ /* make sure we update the CCW so channel doesn't fetch it */
+ /* when it's only half done */
+ memcpy( &p_end->read2_nop2, &temp_ccw ,
+ sizeof(struct ccw1));
+ privptr->p_read_active_last->r_TIC_1.cda=
+ (__u32)__pa(&p_first->read);
+ privptr->p_read_active_last->r_TIC_2.cda=
+ (__u32)__pa(&p_first->read);
+ }
+ else {
+ /* make sure we update the CCW so channel doesn't */
+ /* fetch it when it is only half done */
+ memcpy( &p_end->read1_nop2, &temp_ccw ,
+ sizeof(struct ccw1));
+ privptr->p_read_active_last->r_TIC_1.cda=
+ (__u32)__pa(&p_first->read);
+ privptr->p_read_active_last->r_TIC_2.cda=
+ (__u32)__pa(&p_first->read);
+ }
+ /* chain in new set of blocks */
+ privptr->p_read_active_last->next = p_first;
+ privptr->p_read_active_last=p_last;
+ } /* end of if ( privptr-> p_read_active_first ==NULL) */
+ CLAW_DBF_TEXT(4, trace, "addexit");
+ return 0;
+} /* end of add_claw_reads */
+
+/*-------------------------------------------------------------------*
+ * ccw_check_return_code *
+ * *
+ *-------------------------------------------------------------------*/
+
+static void
+ccw_check_return_code(struct ccw_device *cdev, int return_code)
+{
+ CLAW_DBF_TEXT(4, trace, "ccwret");
+ if (return_code != 0) {
+ switch (return_code) {
+ case -EBUSY: /* BUSY is a transient state no action needed */
+ break;
+ case -ENODEV:
+ dev_err(&cdev->dev, "The remote channel adapter is not"
+ " available\n");
+ break;
+ case -EINVAL:
+ dev_err(&cdev->dev,
+ "The status of the remote channel adapter"
+ " is not valid\n");
+ break;
+ default:
+ dev_err(&cdev->dev, "The common device layer"
+ " returned error code %d\n",
+ return_code);
+ }
+ }
+ CLAW_DBF_TEXT(4, trace, "ccwret");
+} /* end of ccw_check_return_code */
+
+/*-------------------------------------------------------------------*
+* ccw_check_unit_check *
+*--------------------------------------------------------------------*/
+
+static void
+ccw_check_unit_check(struct chbk * p_ch, unsigned char sense )
+{
+ struct net_device *ndev = p_ch->ndev;
+ struct device *dev = &p_ch->cdev->dev;
+
+ CLAW_DBF_TEXT(4, trace, "unitchek");
+ dev_warn(dev, "The communication peer of %s disconnected\n",
+ ndev->name);
+
+ if (sense & 0x40) {
+ if (sense & 0x01) {
+ dev_warn(dev, "The remote channel adapter for"
+ " %s has been reset\n",
+ ndev->name);
+ }
+ } else if (sense & 0x20) {
+ if (sense & 0x04) {
+ dev_warn(dev, "A data streaming timeout occurred"
+ " for %s\n",
+ ndev->name);
+ } else if (sense & 0x10) {
+ dev_warn(dev, "The remote channel adapter for %s"
+ " is faulty\n",
+ ndev->name);
+ } else {
+ dev_warn(dev, "A data transfer parity error occurred"
+ " for %s\n",
+ ndev->name);
+ }
+ } else if (sense & 0x10) {
+ dev_warn(dev, "A read data parity error occurred"
+ " for %s\n",
+ ndev->name);
+ }
+
+} /* end of ccw_check_unit_check */
+
+/*-------------------------------------------------------------------*
+* find_link *
+*--------------------------------------------------------------------*/
+static int
+find_link(struct net_device *dev, char *host_name, char *ws_name )
+{
+ struct claw_privbk *privptr;
+ struct claw_env *p_env;
+ int rc=0;
+
+ CLAW_DBF_TEXT(2, setup, "findlink");
+ privptr = dev->ml_priv;
+ p_env=privptr->p_env;
+ switch (p_env->packing)
+ {
+ case PACKING_ASK:
+ if ((memcmp(WS_APPL_NAME_PACKED, host_name, 8)!=0) ||
+ (memcmp(WS_APPL_NAME_PACKED, ws_name, 8)!=0 ))
+ rc = EINVAL;
+ break;
+ case DO_PACKED:
+ case PACK_SEND:
+ if ((memcmp(WS_APPL_NAME_IP_NAME, host_name, 8)!=0) ||
+ (memcmp(WS_APPL_NAME_IP_NAME, ws_name, 8)!=0 ))
+ rc = EINVAL;
+ break;
+ default:
+ if ((memcmp(HOST_APPL_NAME, host_name, 8)!=0) ||
+ (memcmp(p_env->api_type , ws_name, 8)!=0))
+ rc = EINVAL;
+ break;
+ }
+
+ return rc;
+} /* end of find_link */
+
+/*-------------------------------------------------------------------*
+ * claw_hw_tx *
+ * *
+ * *
+ *-------------------------------------------------------------------*/
+
+static int
+claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
+{
+ int rc=0;
+ struct claw_privbk *privptr;
+ struct ccwbk *p_this_ccw;
+ struct ccwbk *p_first_ccw;
+ struct ccwbk *p_last_ccw;
+ __u32 numBuffers;
+ signed long len_of_data;
+ unsigned long bytesInThisBuffer;
+ unsigned char *pDataAddress;
+ struct endccw *pEnd;
+ struct ccw1 tempCCW;
+ struct claw_env *p_env;
+ struct clawph *pk_head;
+ struct chbk *ch;
+
+ CLAW_DBF_TEXT(4, trace, "hw_tx");
+ privptr = (struct claw_privbk *)(dev->ml_priv);
+ p_env =privptr->p_env;
+ claw_free_wrt_buf(dev); /* Clean up free chain if posible */
+ /* scan the write queue to free any completed write packets */
+ p_first_ccw=NULL;
+ p_last_ccw=NULL;
+ if ((p_env->packing >= PACK_SEND) &&
+ (skb->cb[1] != 'P')) {
+ skb_push(skb,sizeof(struct clawph));
+ pk_head=(struct clawph *)skb->data;
+ pk_head->len=skb->len-sizeof(struct clawph);
+ if (pk_head->len%4) {
+ pk_head->len+= 4-(pk_head->len%4);
+ skb_pad(skb,4-(pk_head->len%4));
+ skb_put(skb,4-(pk_head->len%4));
+ }
+ if (p_env->packing == DO_PACKED)
+ pk_head->link_num = linkid;
+ else
+ pk_head->link_num = 0;
+ pk_head->flag = 0x00;
+ skb_pad(skb,4);
+ skb->cb[1] = 'P';
+ }
+ if (linkid == 0) {
+ if (claw_check_busy(dev)) {
+ if (privptr->write_free_count!=0) {
+ claw_clear_busy(dev);
+ }
+ else {
+ claw_strt_out_IO(dev );
+ claw_free_wrt_buf( dev );
+ if (privptr->write_free_count==0) {
+ ch = &privptr->channel[WRITE_CHANNEL];
+ atomic_inc(&skb->users);
+ skb_queue_tail(&ch->collect_queue, skb);
+ goto Done;
+ }
+ else {
+ claw_clear_busy(dev);
+ }
+ }
+ }
+ /* tx lock */
+ if (claw_test_and_setbit_busy(TB_TX,dev)) { /* set to busy */
+ ch = &privptr->channel[WRITE_CHANNEL];
+ atomic_inc(&skb->users);
+ skb_queue_tail(&ch->collect_queue, skb);
+ claw_strt_out_IO(dev );
+ rc=-EBUSY;
+ goto Done2;
+ }
+ }
+ /* See how many write buffers are required to hold this data */
+ numBuffers = DIV_ROUND_UP(skb->len, privptr->p_env->write_size);
+
+ /* If that number of buffers isn't available, give up for now */
+ if (privptr->write_free_count < numBuffers ||
+ privptr->p_write_free_chain == NULL ) {
+
+ claw_setbit_busy(TB_NOBUFFER,dev);
+ ch = &privptr->channel[WRITE_CHANNEL];
+ atomic_inc(&skb->users);
+ skb_queue_tail(&ch->collect_queue, skb);
+ CLAW_DBF_TEXT(2, trace, "clawbusy");
+ goto Done2;
+ }
+ pDataAddress=skb->data;
+ len_of_data=skb->len;
+
+ while (len_of_data > 0) {
+ p_this_ccw=privptr->p_write_free_chain; /* get a block */
+ if (p_this_ccw == NULL) { /* lost the race */
+ ch = &privptr->channel[WRITE_CHANNEL];
+ atomic_inc(&skb->users);
+ skb_queue_tail(&ch->collect_queue, skb);
+ goto Done2;
+ }
+ privptr->p_write_free_chain=p_this_ccw->next;
+ p_this_ccw->next=NULL;
+ --privptr->write_free_count; /* -1 */
+ if (len_of_data >= privptr->p_env->write_size)
+ bytesInThisBuffer = privptr->p_env->write_size;
+ else
+ bytesInThisBuffer = len_of_data;
+ memcpy( p_this_ccw->p_buffer,pDataAddress, bytesInThisBuffer);
+ len_of_data-=bytesInThisBuffer;
+ pDataAddress+=(unsigned long)bytesInThisBuffer;
+ /* setup write CCW */
+ p_this_ccw->write.cmd_code = (linkid * 8) +1;
+ if (len_of_data>0) {
+ p_this_ccw->write.cmd_code+=MORE_to_COME_FLAG;
+ }
+ p_this_ccw->write.count=bytesInThisBuffer;
+ /* now add to end of this chain */
+ if (p_first_ccw==NULL) {
+ p_first_ccw=p_this_ccw;
+ }
+ if (p_last_ccw!=NULL) {
+ p_last_ccw->next=p_this_ccw;
+ /* set up TIC ccws */
+ p_last_ccw->w_TIC_1.cda=
+ (__u32)__pa(&p_this_ccw->write);
+ }
+ p_last_ccw=p_this_ccw; /* save new last block */
+ }
+
+ /* FirstCCW and LastCCW now contain a new set of write channel
+ * programs to append to the running channel program
+ */
+
+ if (p_first_ccw!=NULL) {
+ /* setup ending ccw sequence for this segment */
+ pEnd=privptr->p_end_ccw;
+ if (pEnd->write1) {
+ pEnd->write1=0x00; /* second end ccw is now active */
+ /* set up Tic CCWs */
+ p_last_ccw->w_TIC_1.cda=
+ (__u32)__pa(&pEnd->write2_nop1);
+ pEnd->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
+ pEnd->write2_nop2.flags =
+ CCW_FLAG_SLI | CCW_FLAG_SKIP;
+ pEnd->write2_nop2.cda=0;
+ pEnd->write2_nop2.count=1;
+ }
+ else { /* end of if (pEnd->write1)*/
+ pEnd->write1=0x01; /* first end ccw is now active */
+ /* set up Tic CCWs */
+ p_last_ccw->w_TIC_1.cda=
+ (__u32)__pa(&pEnd->write1_nop1);
+ pEnd->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
+ pEnd->write1_nop2.flags =
+ CCW_FLAG_SLI | CCW_FLAG_SKIP;
+ pEnd->write1_nop2.cda=0;
+ pEnd->write1_nop2.count=1;
+ } /* end if if (pEnd->write1) */
+
+ if (privptr->p_write_active_first==NULL ) {
+ privptr->p_write_active_first=p_first_ccw;
+ privptr->p_write_active_last=p_last_ccw;
+ }
+ else {
+ /* set up Tic CCWs */
+
+ tempCCW.cda=(__u32)__pa(&p_first_ccw->write);
+ tempCCW.count=0;
+ tempCCW.flags=0;
+ tempCCW.cmd_code=CCW_CLAW_CMD_TIC;
+
+ if (pEnd->write1) {
+
+ /*
+ * first set of ending CCW's is chained to the new write
+ * chain, so the second set is chained to the active chain
+ * Therefore modify the second set to point the new write chain.
+ * make sure we update the CCW atomically
+ * so channel does not fetch it when it's only half done
+ */
+ memcpy( &pEnd->write2_nop2, &tempCCW ,
+ sizeof(struct ccw1));
+ privptr->p_write_active_last->w_TIC_1.cda=
+ (__u32)__pa(&p_first_ccw->write);
+ }
+ else {
+
+ /*make sure we update the CCW atomically
+ *so channel does not fetch it when it's only half done
+ */
+ memcpy(&pEnd->write1_nop2, &tempCCW ,
+ sizeof(struct ccw1));
+ privptr->p_write_active_last->w_TIC_1.cda=
+ (__u32)__pa(&p_first_ccw->write);
+
+ } /* end if if (pEnd->write1) */
+
+ privptr->p_write_active_last->next=p_first_ccw;
+ privptr->p_write_active_last=p_last_ccw;
+ }
+
+ } /* endif (p_first_ccw!=NULL) */
+ dev_kfree_skb_any(skb);
+ claw_strt_out_IO(dev );
+ /* if write free count is zero , set NOBUFFER */
+ if (privptr->write_free_count==0) {
+ claw_setbit_busy(TB_NOBUFFER,dev);
+ }
+Done2:
+ claw_clearbit_busy(TB_TX,dev);
+Done:
+ return(rc);
+} /* end of claw_hw_tx */
+
+/*-------------------------------------------------------------------*
+* *
+* init_ccw_bk *
+* *
+*--------------------------------------------------------------------*/
+
+static int
+init_ccw_bk(struct net_device *dev)
+{
+
+ __u32 ccw_blocks_required;
+ __u32 ccw_blocks_perpage;
+ __u32 ccw_pages_required;
+ __u32 claw_reads_perpage=1;
+ __u32 claw_read_pages;
+ __u32 claw_writes_perpage=1;
+ __u32 claw_write_pages;
+ void *p_buff=NULL;
+ struct ccwbk*p_free_chain;
+ struct ccwbk*p_buf;
+ struct ccwbk*p_last_CCWB;
+ struct ccwbk*p_first_CCWB;
+ struct endccw *p_endccw=NULL;
+ addr_t real_address;
+ struct claw_privbk *privptr = dev->ml_priv;
+ struct clawh *pClawH=NULL;
+ addr_t real_TIC_address;
+ int i,j;
+ CLAW_DBF_TEXT(4, trace, "init_ccw");
+
+ /* initialize statistics field */
+ privptr->active_link_ID=0;
+ /* initialize ccwbk pointers */
+ privptr->p_write_free_chain=NULL; /* pointer to free ccw chain*/
+ privptr->p_write_active_first=NULL; /* pointer to the first write ccw*/
+ privptr->p_write_active_last=NULL; /* pointer to the last write ccw*/
+ privptr->p_read_active_first=NULL; /* pointer to the first read ccw*/
+ privptr->p_read_active_last=NULL; /* pointer to the last read ccw */
+ privptr->p_end_ccw=NULL; /* pointer to ending ccw */
+ privptr->p_claw_signal_blk=NULL; /* pointer to signal block */
+ privptr->buffs_alloc = 0;
+ memset(&privptr->end_ccw, 0x00, sizeof(struct endccw));
+ memset(&privptr->ctl_bk, 0x00, sizeof(struct clawctl));
+ /* initialize free write ccwbk counter */
+ privptr->write_free_count=0; /* number of free bufs on write chain */
+ p_last_CCWB = NULL;
+ p_first_CCWB= NULL;
+ /*
+ * We need 1 CCW block for each read buffer, 1 for each
+ * write buffer, plus 1 for ClawSignalBlock
+ */
+ ccw_blocks_required =
+ privptr->p_env->read_buffers+privptr->p_env->write_buffers+1;
+ /*
+ * compute number of CCW blocks that will fit in a page
+ */
+ ccw_blocks_perpage= PAGE_SIZE / CCWBK_SIZE;
+ ccw_pages_required=
+ DIV_ROUND_UP(ccw_blocks_required, ccw_blocks_perpage);
+
+ /*
+ * read and write sizes are set by 2 constants in claw.h
+ * 4k and 32k. Unpacked values other than 4k are not going to
+ * provide good performance. With packing buffers support 32k
+ * buffers are used.
+ */
+ if (privptr->p_env->read_size < PAGE_SIZE) {
+ claw_reads_perpage = PAGE_SIZE / privptr->p_env->read_size;
+ claw_read_pages = DIV_ROUND_UP(privptr->p_env->read_buffers,
+ claw_reads_perpage);
+ }
+ else { /* > or equal */
+ privptr->p_buff_pages_perread =
+ DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE);
+ claw_read_pages = privptr->p_env->read_buffers *
+ privptr->p_buff_pages_perread;
+ }
+ if (privptr->p_env->write_size < PAGE_SIZE) {
+ claw_writes_perpage =
+ PAGE_SIZE / privptr->p_env->write_size;
+ claw_write_pages = DIV_ROUND_UP(privptr->p_env->write_buffers,
+ claw_writes_perpage);
+
+ }
+ else { /* > or equal */
+ privptr->p_buff_pages_perwrite =
+ DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE);
+ claw_write_pages = privptr->p_env->write_buffers *
+ privptr->p_buff_pages_perwrite;
+ }
+ /*
+ * allocate ccw_pages_required
+ */
+ if (privptr->p_buff_ccw==NULL) {
+ privptr->p_buff_ccw=
+ (void *)__get_free_pages(__GFP_DMA,
+ (int)pages_to_order_of_mag(ccw_pages_required ));
+ if (privptr->p_buff_ccw==NULL) {
+ return -ENOMEM;
+ }
+ privptr->p_buff_ccw_num=ccw_pages_required;
+ }
+ memset(privptr->p_buff_ccw, 0x00,
+ privptr->p_buff_ccw_num * PAGE_SIZE);
+
+ /*
+ * obtain ending ccw block address
+ *
+ */
+ privptr->p_end_ccw = (struct endccw *)&privptr->end_ccw;
+ real_address = (__u32)__pa(privptr->p_end_ccw);
+ /* Initialize ending CCW block */
+ p_endccw=privptr->p_end_ccw;
+ p_endccw->real=real_address;
+ p_endccw->write1=0x00;
+ p_endccw->read1=0x00;
+
+ /* write1_nop1 */
+ p_endccw->write1_nop1.cmd_code = CCW_CLAW_CMD_NOP;
+ p_endccw->write1_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ p_endccw->write1_nop1.count = 1;
+ p_endccw->write1_nop1.cda = 0;
+
+ /* write1_nop2 */
+ p_endccw->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
+ p_endccw->write1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
+ p_endccw->write1_nop2.count = 1;
+ p_endccw->write1_nop2.cda = 0;
+
+ /* write2_nop1 */
+ p_endccw->write2_nop1.cmd_code = CCW_CLAW_CMD_NOP;
+ p_endccw->write2_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ p_endccw->write2_nop1.count = 1;
+ p_endccw->write2_nop1.cda = 0;
+
+ /* write2_nop2 */
+ p_endccw->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
+ p_endccw->write2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
+ p_endccw->write2_nop2.count = 1;
+ p_endccw->write2_nop2.cda = 0;
+
+ /* read1_nop1 */
+ p_endccw->read1_nop1.cmd_code = CCW_CLAW_CMD_NOP;
+ p_endccw->read1_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ p_endccw->read1_nop1.count = 1;
+ p_endccw->read1_nop1.cda = 0;
+
+ /* read1_nop2 */
+ p_endccw->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
+ p_endccw->read1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
+ p_endccw->read1_nop2.count = 1;
+ p_endccw->read1_nop2.cda = 0;
+
+ /* read2_nop1 */
+ p_endccw->read2_nop1.cmd_code = CCW_CLAW_CMD_NOP;
+ p_endccw->read2_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ p_endccw->read2_nop1.count = 1;
+ p_endccw->read2_nop1.cda = 0;
+
+ /* read2_nop2 */
+ p_endccw->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
+ p_endccw->read2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
+ p_endccw->read2_nop2.count = 1;
+ p_endccw->read2_nop2.cda = 0;
+
+ /*
+ * Build a chain of CCWs
+ *
+ */
+ p_buff=privptr->p_buff_ccw;
+
+ p_free_chain=NULL;
+ for (i=0 ; i < ccw_pages_required; i++ ) {
+ real_address = (__u32)__pa(p_buff);
+ p_buf=p_buff;
+ for (j=0 ; j < ccw_blocks_perpage ; j++) {
+ p_buf->next = p_free_chain;
+ p_free_chain = p_buf;
+ p_buf->real=(__u32)__pa(p_buf);
+ ++p_buf;
+ }
+ p_buff+=PAGE_SIZE;
+ }
+ /*
+ * Initialize ClawSignalBlock
+ *
+ */
+ if (privptr->p_claw_signal_blk==NULL) {
+ privptr->p_claw_signal_blk=p_free_chain;
+ p_free_chain=p_free_chain->next;
+ pClawH=(struct clawh *)privptr->p_claw_signal_blk;
+ pClawH->length=0xffff;
+ pClawH->opcode=0xff;
+ pClawH->flag=CLAW_BUSY;
+ }
+
+ /*
+ * allocate write_pages_required and add to free chain
+ */
+ if (privptr->p_buff_write==NULL) {
+ if (privptr->p_env->write_size < PAGE_SIZE) {
+ privptr->p_buff_write=
+ (void *)__get_free_pages(__GFP_DMA,
+ (int)pages_to_order_of_mag(claw_write_pages ));
+ if (privptr->p_buff_write==NULL) {
+ privptr->p_buff_ccw=NULL;
+ return -ENOMEM;
+ }
+ /*
+ * Build CLAW write free chain
+ *
+ */
+
+ memset(privptr->p_buff_write, 0x00,
+ ccw_pages_required * PAGE_SIZE);
+ privptr->p_write_free_chain=NULL;
+
+ p_buff=privptr->p_buff_write;
+
+ for (i=0 ; i< privptr->p_env->write_buffers ; i++) {
+ p_buf = p_free_chain; /* get a CCW */
+ p_free_chain = p_buf->next;
+ p_buf->next =privptr->p_write_free_chain;
+ privptr->p_write_free_chain = p_buf;
+ p_buf-> p_buffer = (struct clawbuf *)p_buff;
+ p_buf-> write.cda = (__u32)__pa(p_buff);
+ p_buf-> write.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
+ p_buf-> w_read_FF.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ p_buf-> w_read_FF.count = 1;
+ p_buf-> w_read_FF.cda =
+ (__u32)__pa(&p_buf-> header.flag);
+ p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
+ p_buf-> w_TIC_1.flags = 0;
+ p_buf-> w_TIC_1.count = 0;
+
+ if (((unsigned long)p_buff +
+ privptr->p_env->write_size) >=
+ ((unsigned long)(p_buff+2*
+ (privptr->p_env->write_size) - 1) & PAGE_MASK)) {
+ p_buff = p_buff+privptr->p_env->write_size;
+ }
+ }
+ }
+ else /* Buffers are => PAGE_SIZE. 1 buff per get_free_pages */
+ {
+ privptr->p_write_free_chain=NULL;
+ for (i = 0; i< privptr->p_env->write_buffers ; i++) {
+ p_buff=(void *)__get_free_pages(__GFP_DMA,
+ (int)pages_to_order_of_mag(
+ privptr->p_buff_pages_perwrite) );
+ if (p_buff==NULL) {
+ free_pages((unsigned long)privptr->p_buff_ccw,
+ (int)pages_to_order_of_mag(
+ privptr->p_buff_ccw_num));
+ privptr->p_buff_ccw=NULL;
+ p_buf=privptr->p_buff_write;
+ while (p_buf!=NULL) {
+ free_pages((unsigned long)
+ p_buf->p_buffer,
+ (int)pages_to_order_of_mag(
+ privptr->p_buff_pages_perwrite));
+ p_buf=p_buf->next;
+ }
+ return -ENOMEM;
+ } /* Error on get_pages */
+ memset(p_buff, 0x00, privptr->p_env->write_size );
+ p_buf = p_free_chain;
+ p_free_chain = p_buf->next;
+ p_buf->next = privptr->p_write_free_chain;
+ privptr->p_write_free_chain = p_buf;
+ privptr->p_buff_write = p_buf;
+ p_buf->p_buffer=(struct clawbuf *)p_buff;
+ p_buf-> write.cda = (__u32)__pa(p_buff);
+ p_buf-> write.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
+ p_buf-> w_read_FF.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ p_buf-> w_read_FF.count = 1;
+ p_buf-> w_read_FF.cda =
+ (__u32)__pa(&p_buf-> header.flag);
+ p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
+ p_buf-> w_TIC_1.flags = 0;
+ p_buf-> w_TIC_1.count = 0;
+ } /* for all write_buffers */
+
+ } /* else buffers are PAGE_SIZE or bigger */
+
+ }
+ privptr->p_buff_write_num=claw_write_pages;
+ privptr->write_free_count=privptr->p_env->write_buffers;
+
+
+ /*
+ * allocate read_pages_required and chain to free chain
+ */
+ if (privptr->p_buff_read==NULL) {
+ if (privptr->p_env->read_size < PAGE_SIZE) {
+ privptr->p_buff_read=
+ (void *)__get_free_pages(__GFP_DMA,
+ (int)pages_to_order_of_mag(claw_read_pages) );
+ if (privptr->p_buff_read==NULL) {
+ free_pages((unsigned long)privptr->p_buff_ccw,
+ (int)pages_to_order_of_mag(
+ privptr->p_buff_ccw_num));
+ /* free the write pages size is < page size */
+ free_pages((unsigned long)privptr->p_buff_write,
+ (int)pages_to_order_of_mag(
+ privptr->p_buff_write_num));
+ privptr->p_buff_ccw=NULL;
+ privptr->p_buff_write=NULL;
+ return -ENOMEM;
+ }
+ memset(privptr->p_buff_read, 0x00, claw_read_pages * PAGE_SIZE);
+ privptr->p_buff_read_num=claw_read_pages;
+ /*
+ * Build CLAW read free chain
+ *
+ */
+ p_buff=privptr->p_buff_read;
+ for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
+ p_buf = p_free_chain;
+ p_free_chain = p_buf->next;
+
+ if (p_last_CCWB==NULL) {
+ p_buf->next=NULL;
+ real_TIC_address=0;
+ p_last_CCWB=p_buf;
+ }
+ else {
+ p_buf->next=p_first_CCWB;
+ real_TIC_address=
+ (__u32)__pa(&p_first_CCWB -> read );
+ }
+
+ p_first_CCWB=p_buf;
+
+ p_buf->p_buffer=(struct clawbuf *)p_buff;
+ /* initialize read command */
+ p_buf-> read.cmd_code = CCW_CLAW_CMD_READ;
+ p_buf-> read.cda = (__u32)__pa(p_buff);
+ p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ p_buf-> read.count = privptr->p_env->read_size;
+
+ /* initialize read_h command */
+ p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER;
+ p_buf-> read_h.cda =
+ (__u32)__pa(&(p_buf->header));
+ p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ p_buf-> read_h.count = sizeof(struct clawh);
+
+ /* initialize Signal command */
+ p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD;
+ p_buf-> signal.cda =
+ (__u32)__pa(&(pClawH->flag));
+ p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ p_buf-> signal.count = 1;
+
+ /* initialize r_TIC_1 command */
+ p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
+ p_buf-> r_TIC_1.cda = (__u32)real_TIC_address;
+ p_buf-> r_TIC_1.flags = 0;
+ p_buf-> r_TIC_1.count = 0;
+
+ /* initialize r_read_FF command */
+ p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
+ p_buf-> r_read_FF.cda =
+ (__u32)__pa(&(pClawH->flag));
+ p_buf-> r_read_FF.flags =
+ CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI;
+ p_buf-> r_read_FF.count = 1;
+
+ /* initialize r_TIC_2 */
+ memcpy(&p_buf->r_TIC_2,
+ &p_buf->r_TIC_1, sizeof(struct ccw1));
+
+ /* initialize Header */
+ p_buf->header.length=0xffff;
+ p_buf->header.opcode=0xff;
+ p_buf->header.flag=CLAW_PENDING;
+
+ if (((unsigned long)p_buff+privptr->p_env->read_size) >=
+ ((unsigned long)(p_buff+2*(privptr->p_env->read_size)
+ -1)
+ & PAGE_MASK)) {
+ p_buff= p_buff+privptr->p_env->read_size;
+ }
+ else {
+ p_buff=
+ (void *)((unsigned long)
+ (p_buff+2*(privptr->p_env->read_size)-1)
+ & PAGE_MASK) ;
+ }
+ } /* for read_buffers */
+ } /* read_size < PAGE_SIZE */
+ else { /* read Size >= PAGE_SIZE */
+ for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
+ p_buff = (void *)__get_free_pages(__GFP_DMA,
+ (int)pages_to_order_of_mag(
+ privptr->p_buff_pages_perread));
+ if (p_buff==NULL) {
+ free_pages((unsigned long)privptr->p_buff_ccw,
+ (int)pages_to_order_of_mag(privptr->
+ p_buff_ccw_num));
+ /* free the write pages */
+ p_buf=privptr->p_buff_write;
+ while (p_buf!=NULL) {
+ free_pages(
+ (unsigned long)p_buf->p_buffer,
+ (int)pages_to_order_of_mag(
+ privptr->p_buff_pages_perwrite));
+ p_buf=p_buf->next;
+ }
+ /* free any read pages already alloc */
+ p_buf=privptr->p_buff_read;
+ while (p_buf!=NULL) {
+ free_pages(
+ (unsigned long)p_buf->p_buffer,
+ (int)pages_to_order_of_mag(
+ privptr->p_buff_pages_perread));
+ p_buf=p_buf->next;
+ }
+ privptr->p_buff_ccw=NULL;
+ privptr->p_buff_write=NULL;
+ return -ENOMEM;
+ }
+ memset(p_buff, 0x00, privptr->p_env->read_size);
+ p_buf = p_free_chain;
+ privptr->p_buff_read = p_buf;
+ p_free_chain = p_buf->next;
+
+ if (p_last_CCWB==NULL) {
+ p_buf->next=NULL;
+ real_TIC_address=0;
+ p_last_CCWB=p_buf;
+ }
+ else {
+ p_buf->next=p_first_CCWB;
+ real_TIC_address=
+ (addr_t)__pa(
+ &p_first_CCWB -> read );
+ }
+
+ p_first_CCWB=p_buf;
+ /* save buff address */
+ p_buf->p_buffer=(struct clawbuf *)p_buff;
+ /* initialize read command */
+ p_buf-> read.cmd_code = CCW_CLAW_CMD_READ;
+ p_buf-> read.cda = (__u32)__pa(p_buff);
+ p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ p_buf-> read.count = privptr->p_env->read_size;
+
+ /* initialize read_h command */
+ p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER;
+ p_buf-> read_h.cda =
+ (__u32)__pa(&(p_buf->header));
+ p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ p_buf-> read_h.count = sizeof(struct clawh);
+
+ /* initialize Signal command */
+ p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD;
+ p_buf-> signal.cda =
+ (__u32)__pa(&(pClawH->flag));
+ p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ p_buf-> signal.count = 1;
+
+ /* initialize r_TIC_1 command */
+ p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
+ p_buf-> r_TIC_1.cda = (__u32)real_TIC_address;
+ p_buf-> r_TIC_1.flags = 0;
+ p_buf-> r_TIC_1.count = 0;
+
+ /* initialize r_read_FF command */
+ p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
+ p_buf-> r_read_FF.cda =
+ (__u32)__pa(&(pClawH->flag));
+ p_buf-> r_read_FF.flags =
+ CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI;
+ p_buf-> r_read_FF.count = 1;
+
+ /* initialize r_TIC_2 */
+ memcpy(&p_buf->r_TIC_2, &p_buf->r_TIC_1,
+ sizeof(struct ccw1));
+
+ /* initialize Header */
+ p_buf->header.length=0xffff;
+ p_buf->header.opcode=0xff;
+ p_buf->header.flag=CLAW_PENDING;
+
+ } /* For read_buffers */
+ } /* read_size >= PAGE_SIZE */
+ } /* pBuffread = NULL */
+ add_claw_reads( dev ,p_first_CCWB , p_last_CCWB);
+ privptr->buffs_alloc = 1;
+
+ return 0;
+} /* end of init_ccw_bk */
+
+/*-------------------------------------------------------------------*
+* *
+* probe_error *
+* *
+*--------------------------------------------------------------------*/
+
+static void
+probe_error( struct ccwgroup_device *cgdev)
+{
+ struct claw_privbk *privptr;
+
+ CLAW_DBF_TEXT(4, trace, "proberr");
+ privptr = dev_get_drvdata(&cgdev->dev);
+ if (privptr != NULL) {
+ dev_set_drvdata(&cgdev->dev, NULL);
+ kfree(privptr->p_env);
+ kfree(privptr->p_mtc_envelope);
+ kfree(privptr);
+ }
+} /* probe_error */
+
+/*-------------------------------------------------------------------*
+* claw_process_control *
+* *
+* *
+*--------------------------------------------------------------------*/
+
+static int
+claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
+{
+
+ struct clawbuf *p_buf;
+ struct clawctl ctlbk;
+ struct clawctl *p_ctlbk;
+ char temp_host_name[8];
+ char temp_ws_name[8];
+ struct claw_privbk *privptr;
+ struct claw_env *p_env;
+ struct sysval *p_sysval;
+ struct conncmd *p_connect=NULL;
+ int rc;
+ struct chbk *p_ch = NULL;
+ struct device *tdev;
+ CLAW_DBF_TEXT(2, setup, "clw_cntl");
+ udelay(1000); /* Wait a ms for the control packets to
+ *catch up to each other */
+ privptr = dev->ml_priv;
+ p_env=privptr->p_env;
+ tdev = &privptr->channel[READ_CHANNEL].cdev->dev;
+ memcpy( &temp_host_name, p_env->host_name, 8);
+ memcpy( &temp_ws_name, p_env->adapter_name , 8);
+ dev_info(tdev, "%s: CLAW device %.8s: "
+ "Received Control Packet\n",
+ dev->name, temp_ws_name);
+ if (privptr->release_pend==1) {
+ return 0;
+ }
+ p_buf=p_ccw->p_buffer;
+ p_ctlbk=&ctlbk;
+ if (p_env->packing == DO_PACKED) { /* packing in progress?*/
+ memcpy(p_ctlbk, &p_buf->buffer[4], sizeof(struct clawctl));
+ } else {
+ memcpy(p_ctlbk, p_buf, sizeof(struct clawctl));
+ }
+ switch (p_ctlbk->command)
+ {
+ case SYSTEM_VALIDATE_REQUEST:
+ if (p_ctlbk->version != CLAW_VERSION_ID) {
+ claw_snd_sys_validate_rsp(dev, p_ctlbk,
+ CLAW_RC_WRONG_VERSION);
+ dev_warn(tdev, "The communication peer of %s"
+ " uses an incorrect API version %d\n",
+ dev->name, p_ctlbk->version);
+ }
+ p_sysval = (struct sysval *)&(p_ctlbk->data);
+ dev_info(tdev, "%s: Recv Sys Validate Request: "
+ "Vers=%d,link_id=%d,Corr=%d,WS name=%.8s,"
+ "Host name=%.8s\n",
+ dev->name, p_ctlbk->version,
+ p_ctlbk->linkid,
+ p_ctlbk->correlator,
+ p_sysval->WS_name,
+ p_sysval->host_name);
+ if (memcmp(temp_host_name, p_sysval->host_name, 8)) {
+ claw_snd_sys_validate_rsp(dev, p_ctlbk,
+ CLAW_RC_NAME_MISMATCH);
+ CLAW_DBF_TEXT(2, setup, "HSTBAD");
+ CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->host_name);
+ CLAW_DBF_TEXT_(2, setup, "%s", temp_host_name);
+ dev_warn(tdev,
+ "Host name %s for %s does not match the"
+ " remote adapter name %s\n",
+ p_sysval->host_name,
+ dev->name,
+ temp_host_name);
+ }
+ if (memcmp(temp_ws_name, p_sysval->WS_name, 8)) {
+ claw_snd_sys_validate_rsp(dev, p_ctlbk,
+ CLAW_RC_NAME_MISMATCH);
+ CLAW_DBF_TEXT(2, setup, "WSNBAD");
+ CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->WS_name);
+ CLAW_DBF_TEXT_(2, setup, "%s", temp_ws_name);
+ dev_warn(tdev, "Adapter name %s for %s does not match"
+ " the remote host name %s\n",
+ p_sysval->WS_name,
+ dev->name,
+ temp_ws_name);
+ }
+ if ((p_sysval->write_frame_size < p_env->write_size) &&
+ (p_env->packing == 0)) {
+ claw_snd_sys_validate_rsp(dev, p_ctlbk,
+ CLAW_RC_HOST_RCV_TOO_SMALL);
+ dev_warn(tdev,
+ "The local write buffer is smaller than the"
+ " remote read buffer\n");
+ CLAW_DBF_TEXT(2, setup, "wrtszbad");
+ }
+ if ((p_sysval->read_frame_size < p_env->read_size) &&
+ (p_env->packing == 0)) {
+ claw_snd_sys_validate_rsp(dev, p_ctlbk,
+ CLAW_RC_HOST_RCV_TOO_SMALL);
+ dev_warn(tdev,
+ "The local read buffer is smaller than the"
+ " remote write buffer\n");
+ CLAW_DBF_TEXT(2, setup, "rdsizbad");
+ }
+ claw_snd_sys_validate_rsp(dev, p_ctlbk, 0);
+ dev_info(tdev,
+ "CLAW device %.8s: System validate"
+ " completed.\n", temp_ws_name);
+ dev_info(tdev,
+ "%s: sys Validate Rsize:%d Wsize:%d\n",
+ dev->name, p_sysval->read_frame_size,
+ p_sysval->write_frame_size);
+ privptr->system_validate_comp = 1;
+ if (strncmp(p_env->api_type, WS_APPL_NAME_PACKED, 6) == 0)
+ p_env->packing = PACKING_ASK;
+ claw_strt_conn_req(dev);
+ break;
+ case SYSTEM_VALIDATE_RESPONSE:
+ p_sysval = (struct sysval *)&(p_ctlbk->data);
+ dev_info(tdev,
+ "Settings for %s validated (version=%d, "
+ "remote device=%d, rc=%d, adapter name=%.8s, "
+ "host name=%.8s)\n",
+ dev->name,
+ p_ctlbk->version,
+ p_ctlbk->correlator,
+ p_ctlbk->rc,
+ p_sysval->WS_name,
+ p_sysval->host_name);
+ switch (p_ctlbk->rc) {
+ case 0:
+ dev_info(tdev, "%s: CLAW device "
+ "%.8s: System validate completed.\n",
+ dev->name, temp_ws_name);
+ if (privptr->system_validate_comp == 0)
+ claw_strt_conn_req(dev);
+ privptr->system_validate_comp = 1;
+ break;
+ case CLAW_RC_NAME_MISMATCH:
+ dev_warn(tdev, "Validating %s failed because of"
+ " a host or adapter name mismatch\n",
+ dev->name);
+ break;
+ case CLAW_RC_WRONG_VERSION:
+ dev_warn(tdev, "Validating %s failed because of a"
+ " version conflict\n",
+ dev->name);
+ break;
+ case CLAW_RC_HOST_RCV_TOO_SMALL:
+ dev_warn(tdev, "Validating %s failed because of a"
+ " frame size conflict\n",
+ dev->name);
+ break;
+ default:
+ dev_warn(tdev, "The communication peer of %s rejected"
+ " the connection\n",
+ dev->name);
+ break;
+ }
+ break;
+
+ case CONNECTION_REQUEST:
+ p_connect = (struct conncmd *)&(p_ctlbk->data);
+ dev_info(tdev, "%s: Recv Conn Req: Vers=%d,link_id=%d,"
+ "Corr=%d,HOST appl=%.8s,WS appl=%.8s\n",
+ dev->name,
+ p_ctlbk->version,
+ p_ctlbk->linkid,
+ p_ctlbk->correlator,
+ p_connect->host_name,
+ p_connect->WS_name);
+ if (privptr->active_link_ID != 0) {
+ claw_snd_disc(dev, p_ctlbk);
+ dev_info(tdev, "%s rejected a connection request"
+ " because it is already active\n",
+ dev->name);
+ }
+ if (p_ctlbk->linkid != 1) {
+ claw_snd_disc(dev, p_ctlbk);
+ dev_info(tdev, "%s rejected a request to open multiple"
+ " connections\n",
+ dev->name);
+ }
+ rc = find_link(dev, p_connect->host_name, p_connect->WS_name);
+ if (rc != 0) {
+ claw_snd_disc(dev, p_ctlbk);
+ dev_info(tdev, "%s rejected a connection request"
+ " because of a type mismatch\n",
+ dev->name);
+ }
+ claw_send_control(dev,
+ CONNECTION_CONFIRM, p_ctlbk->linkid,
+ p_ctlbk->correlator,
+ 0, p_connect->host_name,
+ p_connect->WS_name);
+ if (p_env->packing == PACKING_ASK) {
+ p_env->packing = PACK_SEND;
+ claw_snd_conn_req(dev, 0);
+ }
+ dev_info(tdev, "%s: CLAW device %.8s: Connection "
+ "completed link_id=%d.\n",
+ dev->name, temp_ws_name,
+ p_ctlbk->linkid);
+ privptr->active_link_ID = p_ctlbk->linkid;
+ p_ch = &privptr->channel[WRITE_CHANNEL];
+ wake_up(&p_ch->wait); /* wake up claw_open ( WRITE) */
+ break;
+ case CONNECTION_RESPONSE:
+ p_connect = (struct conncmd *)&(p_ctlbk->data);
+ dev_info(tdev, "%s: Recv Conn Resp: Vers=%d,link_id=%d,"
+ "Corr=%d,RC=%d,Host appl=%.8s, WS appl=%.8s\n",
+ dev->name,
+ p_ctlbk->version,
+ p_ctlbk->linkid,
+ p_ctlbk->correlator,
+ p_ctlbk->rc,
+ p_connect->host_name,
+ p_connect->WS_name);
+
+ if (p_ctlbk->rc != 0) {
+ dev_warn(tdev, "The communication peer of %s rejected"
+ " a connection request\n",
+ dev->name);
+ return 1;
+ }
+ rc = find_link(dev,
+ p_connect->host_name, p_connect->WS_name);
+ if (rc != 0) {
+ claw_snd_disc(dev, p_ctlbk);
+ dev_warn(tdev, "The communication peer of %s"
+ " rejected a connection "
+ "request because of a type mismatch\n",
+ dev->name);
+ }
+ /* should be until CONNECTION_CONFIRM */
+ privptr->active_link_ID = -(p_ctlbk->linkid);
+ break;
+ case CONNECTION_CONFIRM:
+ p_connect = (struct conncmd *)&(p_ctlbk->data);
+ dev_info(tdev,
+ "%s: Recv Conn Confirm:Vers=%d,link_id=%d,"
+ "Corr=%d,Host appl=%.8s,WS appl=%.8s\n",
+ dev->name,
+ p_ctlbk->version,
+ p_ctlbk->linkid,
+ p_ctlbk->correlator,
+ p_connect->host_name,
+ p_connect->WS_name);
+ if (p_ctlbk->linkid == -(privptr->active_link_ID)) {
+ privptr->active_link_ID = p_ctlbk->linkid;
+ if (p_env->packing > PACKING_ASK) {
+ dev_info(tdev,
+ "%s: Confirmed Now packing\n", dev->name);
+ p_env->packing = DO_PACKED;
+ }
+ p_ch = &privptr->channel[WRITE_CHANNEL];
+ wake_up(&p_ch->wait);
+ } else {
+ dev_warn(tdev, "Activating %s failed because of"
+ " an incorrect link ID=%d\n",
+ dev->name, p_ctlbk->linkid);
+ claw_snd_disc(dev, p_ctlbk);
+ }
+ break;
+ case DISCONNECT:
+ dev_info(tdev, "%s: Disconnect: "
+ "Vers=%d,link_id=%d,Corr=%d\n",
+ dev->name, p_ctlbk->version,
+ p_ctlbk->linkid, p_ctlbk->correlator);
+ if ((p_ctlbk->linkid == 2) &&
+ (p_env->packing == PACK_SEND)) {
+ privptr->active_link_ID = 1;
+ p_env->packing = DO_PACKED;
+ } else
+ privptr->active_link_ID = 0;
+ break;
+ case CLAW_ERROR:
+ dev_warn(tdev, "The communication peer of %s failed\n",
+ dev->name);
+ break;
+ default:
+ dev_warn(tdev, "The communication peer of %s sent"
+ " an unknown command code\n",
+ dev->name);
+ break;
+ }
+
+ return 0;
+} /* end of claw_process_control */
+
+
+/*-------------------------------------------------------------------*
+* claw_send_control *
+* *
+*--------------------------------------------------------------------*/
+
+static int
+claw_send_control(struct net_device *dev, __u8 type, __u8 link,
+ __u8 correlator, __u8 rc, char *local_name, char *remote_name)
+{
+ struct claw_privbk *privptr;
+ struct clawctl *p_ctl;
+ struct sysval *p_sysval;
+ struct conncmd *p_connect;
+ struct sk_buff *skb;
+
+ CLAW_DBF_TEXT(2, setup, "sndcntl");
+ privptr = dev->ml_priv;
+ p_ctl=(struct clawctl *)&privptr->ctl_bk;
+
+ p_ctl->command=type;
+ p_ctl->version=CLAW_VERSION_ID;
+ p_ctl->linkid=link;
+ p_ctl->correlator=correlator;
+ p_ctl->rc=rc;
+
+ p_sysval=(struct sysval *)&p_ctl->data;
+ p_connect=(struct conncmd *)&p_ctl->data;
+
+ switch (p_ctl->command) {
+ case SYSTEM_VALIDATE_REQUEST:
+ case SYSTEM_VALIDATE_RESPONSE:
+ memcpy(&p_sysval->host_name, local_name, 8);
+ memcpy(&p_sysval->WS_name, remote_name, 8);
+ if (privptr->p_env->packing > 0) {
+ p_sysval->read_frame_size = DEF_PACK_BUFSIZE;
+ p_sysval->write_frame_size = DEF_PACK_BUFSIZE;
+ } else {
+ /* how big is the biggest group of packets */
+ p_sysval->read_frame_size =
+ privptr->p_env->read_size;
+ p_sysval->write_frame_size =
+ privptr->p_env->write_size;
+ }
+ memset(&p_sysval->reserved, 0x00, 4);
+ break;
+ case CONNECTION_REQUEST:
+ case CONNECTION_RESPONSE:
+ case CONNECTION_CONFIRM:
+ case DISCONNECT:
+ memcpy(&p_sysval->host_name, local_name, 8);
+ memcpy(&p_sysval->WS_name, remote_name, 8);
+ if (privptr->p_env->packing > 0) {
+ /* How big is the biggest packet */
+ p_connect->reserved1[0]=CLAW_FRAME_SIZE;
+ p_connect->reserved1[1]=CLAW_FRAME_SIZE;
+ } else {
+ memset(&p_connect->reserved1, 0x00, 4);
+ memset(&p_connect->reserved2, 0x00, 4);
+ }
+ break;
+ default:
+ break;
+ }
+
+ /* write Control Record to the device */
+
+
+ skb = dev_alloc_skb(sizeof(struct clawctl));
+ if (!skb) {
+ return -ENOMEM;
+ }
+ memcpy(skb_put(skb, sizeof(struct clawctl)),
+ p_ctl, sizeof(struct clawctl));
+ if (privptr->p_env->packing >= PACK_SEND)
+ claw_hw_tx(skb, dev, 1);
+ else
+ claw_hw_tx(skb, dev, 0);
+ return 0;
+} /* end of claw_send_control */
+
+/*-------------------------------------------------------------------*
+* claw_snd_conn_req *
+* *
+*--------------------------------------------------------------------*/
+static int
+claw_snd_conn_req(struct net_device *dev, __u8 link)
+{
+ int rc;
+ struct claw_privbk *privptr = dev->ml_priv;
+ struct clawctl *p_ctl;
+
+ CLAW_DBF_TEXT(2, setup, "snd_conn");
+ rc = 1;
+ p_ctl=(struct clawctl *)&privptr->ctl_bk;
+ p_ctl->linkid = link;
+ if ( privptr->system_validate_comp==0x00 ) {
+ return rc;
+ }
+ if (privptr->p_env->packing == PACKING_ASK )
+ rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
+ WS_APPL_NAME_PACKED, WS_APPL_NAME_PACKED);
+ if (privptr->p_env->packing == PACK_SEND) {
+ rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
+ WS_APPL_NAME_IP_NAME, WS_APPL_NAME_IP_NAME);
+ }
+ if (privptr->p_env->packing == 0)
+ rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
+ HOST_APPL_NAME, privptr->p_env->api_type);
+ return rc;
+
+} /* end of claw_snd_conn_req */
+
+
+/*-------------------------------------------------------------------*
+* claw_snd_disc *
+* *
+*--------------------------------------------------------------------*/
+
+static int
+claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl)
+{
+ int rc;
+ struct conncmd * p_connect;
+
+ CLAW_DBF_TEXT(2, setup, "snd_dsc");
+ p_connect=(struct conncmd *)&p_ctl->data;
+
+ rc=claw_send_control(dev, DISCONNECT, p_ctl->linkid,
+ p_ctl->correlator, 0,
+ p_connect->host_name, p_connect->WS_name);
+ return rc;
+} /* end of claw_snd_disc */
+
+
+/*-------------------------------------------------------------------*
+* claw_snd_sys_validate_rsp *
+* *
+*--------------------------------------------------------------------*/
+
+static int
+claw_snd_sys_validate_rsp(struct net_device *dev,
+ struct clawctl *p_ctl, __u32 return_code)
+{
+ struct claw_env * p_env;
+ struct claw_privbk *privptr;
+ int rc;
+
+ CLAW_DBF_TEXT(2, setup, "chkresp");
+ privptr = dev->ml_priv;
+ p_env=privptr->p_env;
+ rc=claw_send_control(dev, SYSTEM_VALIDATE_RESPONSE,
+ p_ctl->linkid,
+ p_ctl->correlator,
+ return_code,
+ p_env->host_name,
+ p_env->adapter_name );
+ return rc;
+} /* end of claw_snd_sys_validate_rsp */
+
+/*-------------------------------------------------------------------*
+* claw_strt_conn_req *
+* *
+*--------------------------------------------------------------------*/
+
+static int
+claw_strt_conn_req(struct net_device *dev )
+{
+ int rc;
+
+ CLAW_DBF_TEXT(2, setup, "conn_req");
+ rc=claw_snd_conn_req(dev, 1);
+ return rc;
+} /* end of claw_strt_conn_req */
+
+
+
+/*-------------------------------------------------------------------*
+ * claw_stats *
+ *-------------------------------------------------------------------*/
+
+static struct
+net_device_stats *claw_stats(struct net_device *dev)
+{
+ struct claw_privbk *privptr;
+
+ CLAW_DBF_TEXT(4, trace, "stats");
+ privptr = dev->ml_priv;
+ return &privptr->stats;
+} /* end of claw_stats */
+
+
+/*-------------------------------------------------------------------*
+* unpack_read *
+* *
+*--------------------------------------------------------------------*/
+static void
+unpack_read(struct net_device *dev )
+{
+ struct sk_buff *skb;
+ struct claw_privbk *privptr;
+ struct claw_env *p_env;
+ struct ccwbk *p_this_ccw;
+ struct ccwbk *p_first_ccw;
+ struct ccwbk *p_last_ccw;
+ struct clawph *p_packh;
+ void *p_packd;
+ struct clawctl *p_ctlrec=NULL;
+ struct device *p_dev;
+
+ __u32 len_of_data;
+ __u32 pack_off;
+ __u8 link_num;
+ __u8 mtc_this_frm=0;
+ __u32 bytes_to_mov;
+ int i=0;
+ int p=0;
+
+ CLAW_DBF_TEXT(4, trace, "unpkread");
+ p_first_ccw=NULL;
+ p_last_ccw=NULL;
+ p_packh=NULL;
+ p_packd=NULL;
+ privptr = dev->ml_priv;
+
+ p_dev = &privptr->channel[READ_CHANNEL].cdev->dev;
+ p_env = privptr->p_env;
+ p_this_ccw=privptr->p_read_active_first;
+ while (p_this_ccw!=NULL && p_this_ccw->header.flag!=CLAW_PENDING) {
+ pack_off = 0;
+ p = 0;
+ p_this_ccw->header.flag=CLAW_PENDING;
+ privptr->p_read_active_first=p_this_ccw->next;
+ p_this_ccw->next=NULL;
+ p_packh = (struct clawph *)p_this_ccw->p_buffer;
+ if ((p_env->packing == PACK_SEND) &&
+ (p_packh->len == 32) &&
+ (p_packh->link_num == 0)) { /* is it a packed ctl rec? */
+ p_packh++; /* peek past pack header */
+ p_ctlrec = (struct clawctl *)p_packh;
+ p_packh--; /* un peek */
+ if ((p_ctlrec->command == CONNECTION_RESPONSE) ||
+ (p_ctlrec->command == CONNECTION_CONFIRM))
+ p_env->packing = DO_PACKED;
+ }
+ if (p_env->packing == DO_PACKED)
+ link_num=p_packh->link_num;
+ else
+ link_num=p_this_ccw->header.opcode / 8;
+ if ((p_this_ccw->header.opcode & MORE_to_COME_FLAG)!=0) {
+ mtc_this_frm=1;
+ if (p_this_ccw->header.length!=
+ privptr->p_env->read_size ) {
+ dev_warn(p_dev,
+ "The communication peer of %s"
+ " sent a faulty"
+ " frame of length %02x\n",
+ dev->name, p_this_ccw->header.length);
+ }
+ }
+
+ if (privptr->mtc_skipping) {
+ /*
+ * We're in the mode of skipping past a
+ * multi-frame message
+ * that we can't process for some reason or other.
+ * The first frame without the More-To-Come flag is
+ * the last frame of the skipped message.
+ */
+ /* in case of More-To-Come not set in this frame */
+ if (mtc_this_frm==0) {
+ privptr->mtc_skipping=0; /* Ok, the end */
+ privptr->mtc_logical_link=-1;
+ }
+ goto NextFrame;
+ }
+
+ if (link_num==0) {
+ claw_process_control(dev, p_this_ccw);
+ CLAW_DBF_TEXT(4, trace, "UnpkCntl");
+ goto NextFrame;
+ }
+unpack_next:
+ if (p_env->packing == DO_PACKED) {
+ if (pack_off > p_env->read_size)
+ goto NextFrame;
+ p_packd = p_this_ccw->p_buffer+pack_off;
+ p_packh = (struct clawph *) p_packd;
+ if ((p_packh->len == 0) || /* done with this frame? */
+ (p_packh->flag != 0))
+ goto NextFrame;
+ bytes_to_mov = p_packh->len;
+ pack_off += bytes_to_mov+sizeof(struct clawph);
+ p++;
+ } else {
+ bytes_to_mov=p_this_ccw->header.length;
+ }
+ if (privptr->mtc_logical_link<0) {
+
+ /*
+ * if More-To-Come is set in this frame then we don't know
+ * length of entire message, and hence have to allocate
+ * large buffer */
+
+ /* We are starting a new envelope */
+ privptr->mtc_offset=0;
+ privptr->mtc_logical_link=link_num;
+ }
+
+ if (bytes_to_mov > (MAX_ENVELOPE_SIZE- privptr->mtc_offset) ) {
+ /* error */
+ privptr->stats.rx_frame_errors++;
+ goto NextFrame;
+ }
+ if (p_env->packing == DO_PACKED) {
+ memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset,
+ p_packd+sizeof(struct clawph), bytes_to_mov);
+
+ } else {
+ memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset,
+ p_this_ccw->p_buffer, bytes_to_mov);
+ }
+ if (mtc_this_frm==0) {
+ len_of_data=privptr->mtc_offset+bytes_to_mov;
+ skb=dev_alloc_skb(len_of_data);
+ if (skb) {
+ memcpy(skb_put(skb,len_of_data),
+ privptr->p_mtc_envelope,
+ len_of_data);
+ skb->dev=dev;
+ skb_reset_mac_header(skb);
+ skb->protocol=htons(ETH_P_IP);
+ skb->ip_summed=CHECKSUM_UNNECESSARY;
+ privptr->stats.rx_packets++;
+ privptr->stats.rx_bytes+=len_of_data;
+ netif_rx(skb);
+ }
+ else {
+ dev_info(p_dev, "Allocating a buffer for"
+ " incoming data failed\n");
+ privptr->stats.rx_dropped++;
+ }
+ privptr->mtc_offset=0;
+ privptr->mtc_logical_link=-1;
+ }
+ else {
+ privptr->mtc_offset+=bytes_to_mov;
+ }
+ if (p_env->packing == DO_PACKED)
+ goto unpack_next;
+NextFrame:
+ /*
+ * Remove ThisCCWblock from active read queue, and add it
+ * to queue of free blocks to be reused.
+ */
+ i++;
+ p_this_ccw->header.length=0xffff;
+ p_this_ccw->header.opcode=0xff;
+ /*
+ * add this one to the free queue for later reuse
+ */
+ if (p_first_ccw==NULL) {
+ p_first_ccw = p_this_ccw;
+ }
+ else {
+ p_last_ccw->next = p_this_ccw;
+ }
+ p_last_ccw = p_this_ccw;
+ /*
+ * chain to next block on active read queue
+ */
+ p_this_ccw = privptr->p_read_active_first;
+ CLAW_DBF_TEXT_(4, trace, "rxpkt %d", p);
+ } /* end of while */
+
+ /* check validity */
+
+ CLAW_DBF_TEXT_(4, trace, "rxfrm %d", i);
+ add_claw_reads(dev, p_first_ccw, p_last_ccw);
+ claw_strt_read(dev, LOCK_YES);
+ return;
+} /* end of unpack_read */
+
+/*-------------------------------------------------------------------*
+* claw_strt_read *
+* *
+*--------------------------------------------------------------------*/
+static void
+claw_strt_read (struct net_device *dev, int lock )
+{
+ int rc = 0;
+ __u32 parm;
+ unsigned long saveflags = 0;
+ struct claw_privbk *privptr = dev->ml_priv;
+ struct ccwbk*p_ccwbk;
+ struct chbk *p_ch;
+ struct clawh *p_clawh;
+ p_ch = &privptr->channel[READ_CHANNEL];
+
+ CLAW_DBF_TEXT(4, trace, "StRdNter");
+ p_clawh=(struct clawh *)privptr->p_claw_signal_blk;
+ p_clawh->flag=CLAW_IDLE; /* 0x00 */
+
+ if ((privptr->p_write_active_first!=NULL &&
+ privptr->p_write_active_first->header.flag!=CLAW_PENDING) ||
+ (privptr->p_read_active_first!=NULL &&
+ privptr->p_read_active_first->header.flag!=CLAW_PENDING )) {
+ p_clawh->flag=CLAW_BUSY; /* 0xff */
+ }
+ if (lock==LOCK_YES) {
+ spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
+ }
+ if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
+ CLAW_DBF_TEXT(4, trace, "HotRead");
+ p_ccwbk=privptr->p_read_active_first;
+ parm = (unsigned long) p_ch;
+ rc = ccw_device_start (p_ch->cdev, &p_ccwbk->read, parm,
+ 0xff, 0);
+ if (rc != 0) {
+ ccw_check_return_code(p_ch->cdev, rc);
+ }
+ }
+ else {
+ CLAW_DBF_TEXT(2, trace, "ReadAct");
+ }
+
+ if (lock==LOCK_YES) {
+ spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
+ }
+ CLAW_DBF_TEXT(4, trace, "StRdExit");
+ return;
+} /* end of claw_strt_read */
+
+/*-------------------------------------------------------------------*
+* claw_strt_out_IO *
+* *
+*--------------------------------------------------------------------*/
+
+static void
+claw_strt_out_IO( struct net_device *dev )
+{
+ int rc = 0;
+ unsigned long parm;
+ struct claw_privbk *privptr;
+ struct chbk *p_ch;
+ struct ccwbk *p_first_ccw;
+
+ if (!dev) {
+ return;
+ }
+ privptr = (struct claw_privbk *)dev->ml_priv;
+ p_ch = &privptr->channel[WRITE_CHANNEL];
+
+ CLAW_DBF_TEXT(4, trace, "strt_io");
+ p_first_ccw=privptr->p_write_active_first;
+
+ if (p_ch->claw_state == CLAW_STOP)
+ return;
+ if (p_first_ccw == NULL) {
+ return;
+ }
+ if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
+ parm = (unsigned long) p_ch;
+ CLAW_DBF_TEXT(2, trace, "StWrtIO");
+ rc = ccw_device_start(p_ch->cdev, &p_first_ccw->write, parm,
+ 0xff, 0);
+ if (rc != 0) {
+ ccw_check_return_code(p_ch->cdev, rc);
+ }
+ }
+ dev->trans_start = jiffies;
+ return;
+} /* end of claw_strt_out_IO */
+
+/*-------------------------------------------------------------------*
+* Free write buffers *
+* *
+*--------------------------------------------------------------------*/
+
+static void
+claw_free_wrt_buf( struct net_device *dev )
+{
+
+ struct claw_privbk *privptr = (struct claw_privbk *)dev->ml_priv;
+ struct ccwbk*p_this_ccw;
+ struct ccwbk*p_next_ccw;
+
+ CLAW_DBF_TEXT(4, trace, "freewrtb");
+ /* scan the write queue to free any completed write packets */
+ p_this_ccw=privptr->p_write_active_first;
+ while ( (p_this_ccw!=NULL) && (p_this_ccw->header.flag!=CLAW_PENDING))
+ {
+ p_next_ccw = p_this_ccw->next;
+ if (((p_next_ccw!=NULL) &&
+ (p_next_ccw->header.flag!=CLAW_PENDING)) ||
+ ((p_this_ccw == privptr->p_write_active_last) &&
+ (p_this_ccw->header.flag!=CLAW_PENDING))) {
+ /* The next CCW is OK or this is */
+ /* the last CCW...free it @A1A */
+ privptr->p_write_active_first=p_this_ccw->next;
+ p_this_ccw->header.flag=CLAW_PENDING;
+ p_this_ccw->next=privptr->p_write_free_chain;
+ privptr->p_write_free_chain=p_this_ccw;
+ ++privptr->write_free_count;
+ privptr->stats.tx_bytes+= p_this_ccw->write.count;
+ p_this_ccw=privptr->p_write_active_first;
+ privptr->stats.tx_packets++;
+ }
+ else {
+ break;
+ }
+ }
+ if (privptr->write_free_count!=0) {
+ claw_clearbit_busy(TB_NOBUFFER,dev);
+ }
+ /* whole chain removed? */
+ if (privptr->p_write_active_first==NULL) {
+ privptr->p_write_active_last=NULL;
+ }
+ CLAW_DBF_TEXT_(4, trace, "FWC=%d", privptr->write_free_count);
+ return;
+}
+
+/*-------------------------------------------------------------------*
+* claw free netdevice *
+* *
+*--------------------------------------------------------------------*/
+static void
+claw_free_netdevice(struct net_device * dev, int free_dev)
+{
+ struct claw_privbk *privptr;
+
+ CLAW_DBF_TEXT(2, setup, "free_dev");
+ if (!dev)
+ return;
+ CLAW_DBF_TEXT_(2, setup, "%s", dev->name);
+ privptr = dev->ml_priv;
+ if (dev->flags & IFF_RUNNING)
+ claw_release(dev);
+ if (privptr) {
+ privptr->channel[READ_CHANNEL].ndev = NULL; /* say it's free */
+ }
+ dev->ml_priv = NULL;
+#ifdef MODULE
+ if (free_dev) {
+ free_netdev(dev);
+ }
+#endif
+ CLAW_DBF_TEXT(2, setup, "free_ok");
+}
+
+/**
+ * Claw init netdevice
+ * Initialize everything of the net device except the name and the
+ * channel structs.
+ */
+static const struct net_device_ops claw_netdev_ops = {
+ .ndo_open = claw_open,
+ .ndo_stop = claw_release,
+ .ndo_get_stats = claw_stats,
+ .ndo_start_xmit = claw_tx,
+ .ndo_change_mtu = claw_change_mtu,
+};
+
+static void
+claw_init_netdevice(struct net_device * dev)
+{
+ CLAW_DBF_TEXT(2, setup, "init_dev");
+ CLAW_DBF_TEXT_(2, setup, "%s", dev->name);
+ dev->mtu = CLAW_DEFAULT_MTU_SIZE;
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->type = ARPHRD_SLIP;
+ dev->tx_queue_len = 1300;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ dev->netdev_ops = &claw_netdev_ops;
+ CLAW_DBF_TEXT(2, setup, "initok");
+ return;
+}
+
+/**
+ * Init a new channel in the privptr->channel[i].
+ *
+ * @param cdev The ccw_device to be added.
+ *
+ * @return 0 on success, !0 on error.
+ */
+static int
+add_channel(struct ccw_device *cdev,int i,struct claw_privbk *privptr)
+{
+ struct chbk *p_ch;
+ struct ccw_dev_id dev_id;
+
+ CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cdev->dev));
+ privptr->channel[i].flag = i+1; /* Read is 1 Write is 2 */
+ p_ch = &privptr->channel[i];
+ p_ch->cdev = cdev;
+ snprintf(p_ch->id, CLAW_ID_SIZE, "cl-%s", dev_name(&cdev->dev));
+ ccw_device_get_id(cdev, &dev_id);
+ p_ch->devno = dev_id.devno;
+ if ((p_ch->irb = kzalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) {
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+
+/**
+ *
+ * Setup an interface.
+ *
+ * @param cgdev Device to be setup.
+ *
+ * @returns 0 on success, !0 on failure.
+ */
+static int
+claw_new_device(struct ccwgroup_device *cgdev)
+{
+ struct claw_privbk *privptr;
+ struct claw_env *p_env;
+ struct net_device *dev;
+ int ret;
+ struct ccw_dev_id dev_id;
+
+ dev_info(&cgdev->dev, "add for %s\n",
+ dev_name(&cgdev->cdev[READ_CHANNEL]->dev));
+ CLAW_DBF_TEXT(2, setup, "new_dev");
+ privptr = dev_get_drvdata(&cgdev->dev);
+ dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr);
+ dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr);
+ if (!privptr)
+ return -ENODEV;
+ p_env = privptr->p_env;
+ ccw_device_get_id(cgdev->cdev[READ_CHANNEL], &dev_id);
+ p_env->devno[READ_CHANNEL] = dev_id.devno;
+ ccw_device_get_id(cgdev->cdev[WRITE_CHANNEL], &dev_id);
+ p_env->devno[WRITE_CHANNEL] = dev_id.devno;
+ ret = add_channel(cgdev->cdev[0],0,privptr);
+ if (ret == 0)
+ ret = add_channel(cgdev->cdev[1],1,privptr);
+ if (ret != 0) {
+ dev_warn(&cgdev->dev, "Creating a CLAW group device"
+ " failed with error code %d\n", ret);
+ goto out;
+ }
+ ret = ccw_device_set_online(cgdev->cdev[READ_CHANNEL]);
+ if (ret != 0) {
+ dev_warn(&cgdev->dev,
+ "Setting the read subchannel online"
+ " failed with error code %d\n", ret);
+ goto out;
+ }
+ ret = ccw_device_set_online(cgdev->cdev[WRITE_CHANNEL]);
+ if (ret != 0) {
+ dev_warn(&cgdev->dev,
+ "Setting the write subchannel online "
+ "failed with error code %d\n", ret);
+ goto out;
+ }
+ dev = alloc_netdev(0,"claw%d",claw_init_netdevice);
+ if (!dev) {
+ dev_warn(&cgdev->dev,
+ "Activating the CLAW device failed\n");
+ goto out;
+ }
+ dev->ml_priv = privptr;
+ dev_set_drvdata(&cgdev->dev, privptr);
+ dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr);
+ dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr);
+ /* sysfs magic */
+ SET_NETDEV_DEV(dev, &cgdev->dev);
+ if (register_netdev(dev) != 0) {
+ claw_free_netdevice(dev, 1);
+ CLAW_DBF_TEXT(2, trace, "regfail");
+ goto out;
+ }
+ dev->flags &=~IFF_RUNNING;
+ if (privptr->buffs_alloc == 0) {
+ ret=init_ccw_bk(dev);
+ if (ret !=0) {
+ unregister_netdev(dev);
+ claw_free_netdevice(dev,1);
+ CLAW_DBF_TEXT(2, trace, "ccwmem");
+ goto out;
+ }
+ }
+ privptr->channel[READ_CHANNEL].ndev = dev;
+ privptr->channel[WRITE_CHANNEL].ndev = dev;
+ privptr->p_env->ndev = dev;
+
+ dev_info(&cgdev->dev, "%s:readsize=%d writesize=%d "
+ "readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n",
+ dev->name, p_env->read_size,
+ p_env->write_size, p_env->read_buffers,
+ p_env->write_buffers, p_env->devno[READ_CHANNEL],
+ p_env->devno[WRITE_CHANNEL]);
+ dev_info(&cgdev->dev, "%s:host_name:%.8s, adapter_name "
+ ":%.8s api_type: %.8s\n",
+ dev->name, p_env->host_name,
+ p_env->adapter_name , p_env->api_type);
+ return 0;
+out:
+ ccw_device_set_offline(cgdev->cdev[1]);
+ ccw_device_set_offline(cgdev->cdev[0]);
+ return -ENODEV;
+}
+
+static void
+claw_purge_skb_queue(struct sk_buff_head *q)
+{
+ struct sk_buff *skb;
+
+ CLAW_DBF_TEXT(4, trace, "purgque");
+ while ((skb = skb_dequeue(q))) {
+ atomic_dec(&skb->users);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+/**
+ * Shutdown an interface.
+ *
+ * @param cgdev Device to be shut down.
+ *
+ * @returns 0 on success, !0 on failure.
+ */
+static int
+claw_shutdown_device(struct ccwgroup_device *cgdev)
+{
+ struct claw_privbk *priv;
+ struct net_device *ndev;
+ int ret = 0;
+
+ CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
+ priv = dev_get_drvdata(&cgdev->dev);
+ if (!priv)
+ return -ENODEV;
+ ndev = priv->channel[READ_CHANNEL].ndev;
+ if (ndev) {
+ /* Close the device */
+ dev_info(&cgdev->dev, "%s: shutting down\n",
+ ndev->name);
+ if (ndev->flags & IFF_RUNNING)
+ ret = claw_release(ndev);
+ ndev->flags &=~IFF_RUNNING;
+ unregister_netdev(ndev);
+ ndev->ml_priv = NULL; /* cgdev data, not ndev's to free */
+ claw_free_netdevice(ndev, 1);
+ priv->channel[READ_CHANNEL].ndev = NULL;
+ priv->channel[WRITE_CHANNEL].ndev = NULL;
+ priv->p_env->ndev = NULL;
+ }
+ ccw_device_set_offline(cgdev->cdev[1]);
+ ccw_device_set_offline(cgdev->cdev[0]);
+ return ret;
+}
+
+static void
+claw_remove_device(struct ccwgroup_device *cgdev)
+{
+ struct claw_privbk *priv;
+
+ BUG_ON(!cgdev);
+ CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
+ priv = dev_get_drvdata(&cgdev->dev);
+ BUG_ON(!priv);
+ dev_info(&cgdev->dev, " will be removed.\n");
+ if (cgdev->state == CCWGROUP_ONLINE)
+ claw_shutdown_device(cgdev);
+ claw_remove_files(&cgdev->dev);
+ kfree(priv->p_mtc_envelope);
+ priv->p_mtc_envelope=NULL;
+ kfree(priv->p_env);
+ priv->p_env=NULL;
+ kfree(priv->channel[0].irb);
+ priv->channel[0].irb=NULL;
+ kfree(priv->channel[1].irb);
+ priv->channel[1].irb=NULL;
+ kfree(priv);
+ dev_set_drvdata(&cgdev->dev, NULL);
+ dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, NULL);
+ dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, NULL);
+ put_device(&cgdev->dev);
+
+ return;
+}
+
+
+/*
+ * sysfs attributes
+ */
+static ssize_t
+claw_hname_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct claw_privbk *priv;
+ struct claw_env * p_env;
+
+ priv = dev_get_drvdata(dev);
+ if (!priv)
+ return -ENODEV;
+ p_env = priv->p_env;
+ return sprintf(buf, "%s\n",p_env->host_name);
+}
+
+static ssize_t
+claw_hname_write(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct claw_privbk *priv;
+ struct claw_env * p_env;
+
+ priv = dev_get_drvdata(dev);
+ if (!priv)
+ return -ENODEV;
+ p_env = priv->p_env;
+ if (count > MAX_NAME_LEN+1)
+ return -EINVAL;
+ memset(p_env->host_name, 0x20, MAX_NAME_LEN);
+ strncpy(p_env->host_name,buf, count);
+ p_env->host_name[count-1] = 0x20; /* clear extra 0x0a */
+ p_env->host_name[MAX_NAME_LEN] = 0x00;
+ CLAW_DBF_TEXT(2, setup, "HstnSet");
+ CLAW_DBF_TEXT_(2, setup, "%s", p_env->host_name);
+
+ return count;
+}
+
+static DEVICE_ATTR(host_name, 0644, claw_hname_show, claw_hname_write);
+
+static ssize_t
+claw_adname_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct claw_privbk *priv;
+ struct claw_env * p_env;
+
+ priv = dev_get_drvdata(dev);
+ if (!priv)
+ return -ENODEV;
+ p_env = priv->p_env;
+ return sprintf(buf, "%s\n", p_env->adapter_name);
+}
+
+static ssize_t
+claw_adname_write(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct claw_privbk *priv;
+ struct claw_env * p_env;
+
+ priv = dev_get_drvdata(dev);
+ if (!priv)
+ return -ENODEV;
+ p_env = priv->p_env;
+ if (count > MAX_NAME_LEN+1)
+ return -EINVAL;
+ memset(p_env->adapter_name, 0x20, MAX_NAME_LEN);
+ strncpy(p_env->adapter_name,buf, count);
+ p_env->adapter_name[count-1] = 0x20; /* clear extra 0x0a */
+ p_env->adapter_name[MAX_NAME_LEN] = 0x00;
+ CLAW_DBF_TEXT(2, setup, "AdnSet");
+ CLAW_DBF_TEXT_(2, setup, "%s", p_env->adapter_name);
+
+ return count;
+}
+
+static DEVICE_ATTR(adapter_name, 0644, claw_adname_show, claw_adname_write);
+
+static ssize_t
+claw_apname_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct claw_privbk *priv;
+ struct claw_env * p_env;
+
+ priv = dev_get_drvdata(dev);
+ if (!priv)
+ return -ENODEV;
+ p_env = priv->p_env;
+ return sprintf(buf, "%s\n",
+ p_env->api_type);
+}
+
+static ssize_t
+claw_apname_write(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct claw_privbk *priv;
+ struct claw_env * p_env;
+
+ priv = dev_get_drvdata(dev);
+ if (!priv)
+ return -ENODEV;
+ p_env = priv->p_env;
+ if (count > MAX_NAME_LEN+1)
+ return -EINVAL;
+ memset(p_env->api_type, 0x20, MAX_NAME_LEN);
+ strncpy(p_env->api_type,buf, count);
+ p_env->api_type[count-1] = 0x20; /* we get a loose 0x0a */
+ p_env->api_type[MAX_NAME_LEN] = 0x00;
+ if(strncmp(p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) {
+ p_env->read_size=DEF_PACK_BUFSIZE;
+ p_env->write_size=DEF_PACK_BUFSIZE;
+ p_env->packing=PACKING_ASK;
+ CLAW_DBF_TEXT(2, setup, "PACKING");
+ }
+ else {
+ p_env->packing=0;
+ p_env->read_size=CLAW_FRAME_SIZE;
+ p_env->write_size=CLAW_FRAME_SIZE;
+ CLAW_DBF_TEXT(2, setup, "ApiSet");
+ }
+ CLAW_DBF_TEXT_(2, setup, "%s", p_env->api_type);
+ return count;
+}
+
+static DEVICE_ATTR(api_type, 0644, claw_apname_show, claw_apname_write);
+
+static ssize_t
+claw_wbuff_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct claw_privbk *priv;
+ struct claw_env * p_env;
+
+ priv = dev_get_drvdata(dev);
+ if (!priv)
+ return -ENODEV;
+ p_env = priv->p_env;
+ return sprintf(buf, "%d\n", p_env->write_buffers);
+}
+
+static ssize_t
+claw_wbuff_write(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct claw_privbk *priv;
+ struct claw_env * p_env;
+ int nnn,max;
+
+ priv = dev_get_drvdata(dev);
+ if (!priv)
+ return -ENODEV;
+ p_env = priv->p_env;
+ sscanf(buf, "%i", &nnn);
+ if (p_env->packing) {
+ max = 64;
+ }
+ else {
+ max = 512;
+ }
+ if ((nnn > max ) || (nnn < 2))
+ return -EINVAL;
+ p_env->write_buffers = nnn;
+ CLAW_DBF_TEXT(2, setup, "Wbufset");
+ CLAW_DBF_TEXT_(2, setup, "WB=%d", p_env->write_buffers);
+ return count;
+}
+
+static DEVICE_ATTR(write_buffer, 0644, claw_wbuff_show, claw_wbuff_write);
+
+static ssize_t
+claw_rbuff_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct claw_privbk *priv;
+ struct claw_env * p_env;
+
+ priv = dev_get_drvdata(dev);
+ if (!priv)
+ return -ENODEV;
+ p_env = priv->p_env;
+ return sprintf(buf, "%d\n", p_env->read_buffers);
+}
+
+static ssize_t
+claw_rbuff_write(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct claw_privbk *priv;
+ struct claw_env *p_env;
+ int nnn,max;
+
+ priv = dev_get_drvdata(dev);
+ if (!priv)
+ return -ENODEV;
+ p_env = priv->p_env;
+ sscanf(buf, "%i", &nnn);
+ if (p_env->packing) {
+ max = 64;
+ }
+ else {
+ max = 512;
+ }
+ if ((nnn > max ) || (nnn < 2))
+ return -EINVAL;
+ p_env->read_buffers = nnn;
+ CLAW_DBF_TEXT(2, setup, "Rbufset");
+ CLAW_DBF_TEXT_(2, setup, "RB=%d", p_env->read_buffers);
+ return count;
+}
+
+static DEVICE_ATTR(read_buffer, 0644, claw_rbuff_show, claw_rbuff_write);
+
+static struct attribute *claw_attr[] = {
+ &dev_attr_read_buffer.attr,
+ &dev_attr_write_buffer.attr,
+ &dev_attr_adapter_name.attr,
+ &dev_attr_api_type.attr,
+ &dev_attr_host_name.attr,
+ NULL,
+};
+
+static struct attribute_group claw_attr_group = {
+ .attrs = claw_attr,
+};
+
+static int
+claw_add_files(struct device *dev)
+{
+ CLAW_DBF_TEXT(2, setup, "add_file");
+ return sysfs_create_group(&dev->kobj, &claw_attr_group);
+}
+
+static void
+claw_remove_files(struct device *dev)
+{
+ CLAW_DBF_TEXT(2, setup, "rem_file");
+ sysfs_remove_group(&dev->kobj, &claw_attr_group);
+}
+
+/*--------------------------------------------------------------------*
+* claw_init and cleanup *
+*---------------------------------------------------------------------*/
+
+static void __exit
+claw_cleanup(void)
+{
+ driver_remove_file(&claw_group_driver.driver,
+ &driver_attr_group);
+ ccwgroup_driver_unregister(&claw_group_driver);
+ ccw_driver_unregister(&claw_ccw_driver);
+ root_device_unregister(claw_root_dev);
+ claw_unregister_debug_facility();
+ pr_info("Driver unloaded\n");
+
+}
+
+/**
+ * Initialize module.
+ * This is called just after the module is loaded.
+ *
+ * @return 0 on success, !0 on error.
+ */
+static int __init
+claw_init(void)
+{
+ int ret = 0;
+
+ pr_info("Loading %s\n", version);
+ ret = claw_register_debug_facility();
+ if (ret) {
+ pr_err("Registering with the S/390 debug feature"
+ " failed with error code %d\n", ret);
+ goto out_err;
+ }
+ CLAW_DBF_TEXT(2, setup, "init_mod");
+ claw_root_dev = root_device_register("claw");
+ ret = IS_ERR(claw_root_dev) ? PTR_ERR(claw_root_dev) : 0;
+ if (ret)
+ goto register_err;
+ ret = ccw_driver_register(&claw_ccw_driver);
+ if (ret)
+ goto ccw_err;
+ claw_group_driver.driver.groups = claw_group_attr_groups;
+ ret = ccwgroup_driver_register(&claw_group_driver);
+ if (ret)
+ goto ccwgroup_err;
+ return 0;
+
+ccwgroup_err:
+ ccw_driver_unregister(&claw_ccw_driver);
+ccw_err:
+ root_device_unregister(claw_root_dev);
+register_err:
+ CLAW_DBF_TEXT(2, setup, "init_bad");
+ claw_unregister_debug_facility();
+out_err:
+ pr_err("Initializing the claw device driver failed\n");
+ return ret;
+}
+
+module_init(claw_init);
+module_exit(claw_cleanup);
+
+MODULE_AUTHOR("Andy Richter <richtera@us.ibm.com>");
+MODULE_DESCRIPTION("Linux for System z CLAW Driver\n" \
+ "Copyright 2000,2008 IBM Corporation\n");
+MODULE_LICENSE("GPL");
diff --git a/drivers/s390/net/claw.h b/drivers/s390/net/claw.h
new file mode 100644
index 00000000000..1bc5904df19
--- /dev/null
+++ b/drivers/s390/net/claw.h
@@ -0,0 +1,354 @@
+/*******************************************************
+* Define constants *
+* *
+********************************************************/
+
+/*-----------------------------------------------------*
+* CCW command codes for CLAW protocol *
+*------------------------------------------------------*/
+
+#define CCW_CLAW_CMD_WRITE 0x01 /* write - not including link */
+#define CCW_CLAW_CMD_READ 0x02 /* read */
+#define CCW_CLAW_CMD_NOP 0x03 /* NOP */
+#define CCW_CLAW_CMD_SENSE 0x04 /* Sense */
+#define CCW_CLAW_CMD_SIGNAL_SMOD 0x05 /* Signal Status Modifier */
+#define CCW_CLAW_CMD_TIC 0x08 /* TIC */
+#define CCW_CLAW_CMD_READHEADER 0x12 /* read header data */
+#define CCW_CLAW_CMD_READFF 0x22 /* read an FF */
+#define CCW_CLAW_CMD_SENSEID 0xe4 /* Sense ID */
+
+
+/*-----------------------------------------------------*
+* CLAW Unique constants *
+*------------------------------------------------------*/
+
+#define MORE_to_COME_FLAG 0x04 /* OR with write CCW in case of m-t-c */
+#define CLAW_IDLE 0x00 /* flag to indicate CLAW is idle */
+#define CLAW_BUSY 0xff /* flag to indicate CLAW is busy */
+#define CLAW_PENDING 0x00 /* flag to indicate i/o is pending */
+#define CLAW_COMPLETE 0xff /* flag to indicate i/o completed */
+
+/*-----------------------------------------------------*
+* CLAW control command code *
+*------------------------------------------------------*/
+
+#define SYSTEM_VALIDATE_REQUEST 0x01 /* System Validate request */
+#define SYSTEM_VALIDATE_RESPONSE 0x02 /* System Validate response */
+#define CONNECTION_REQUEST 0x21 /* Connection request */
+#define CONNECTION_RESPONSE 0x22 /* Connection response */
+#define CONNECTION_CONFIRM 0x23 /* Connection confirm */
+#define DISCONNECT 0x24 /* Disconnect */
+#define CLAW_ERROR 0x41 /* CLAW error message */
+#define CLAW_VERSION_ID 2 /* CLAW version ID */
+
+/*-----------------------------------------------------*
+* CLAW adater sense bytes *
+*------------------------------------------------------*/
+
+#define CLAW_ADAPTER_SENSE_BYTE 0x41 /* Stop command issued to adapter */
+
+/*-----------------------------------------------------*
+* CLAW control command return codes *
+*------------------------------------------------------*/
+
+#define CLAW_RC_NAME_MISMATCH 166 /* names do not match */
+#define CLAW_RC_WRONG_VERSION 167 /* wrong CLAW version number */
+#define CLAW_RC_HOST_RCV_TOO_SMALL 180 /* Host maximum receive is */
+ /* less than Linux on zSeries*/
+ /* transmit size */
+
+/*-----------------------------------------------------*
+* CLAW Constants application name *
+*------------------------------------------------------*/
+
+#define HOST_APPL_NAME "TCPIP "
+#define WS_APPL_NAME_IP_LINK "TCPIP "
+#define WS_APPL_NAME_IP_NAME "IP "
+#define WS_APPL_NAME_API_LINK "API "
+#define WS_APPL_NAME_PACKED "PACKED "
+#define WS_NAME_NOT_DEF "NOT_DEF "
+#define PACKING_ASK 1
+#define PACK_SEND 2
+#define DO_PACKED 3
+
+#define MAX_ENVELOPE_SIZE 65536
+#define CLAW_DEFAULT_MTU_SIZE 4096
+#define DEF_PACK_BUFSIZE 32768
+#define READ_CHANNEL 0
+#define WRITE_CHANNEL 1
+
+#define TB_TX 0 /* sk buffer handling in process */
+#define TB_STOP 1 /* network device stop in process */
+#define TB_RETRY 2 /* retry in process */
+#define TB_NOBUFFER 3 /* no buffer on free queue */
+#define CLAW_MAX_LINK_ID 1
+#define CLAW_MAX_DEV 256 /* max claw devices */
+#define MAX_NAME_LEN 8 /* host name, adapter name length */
+#define CLAW_FRAME_SIZE 4096
+#define CLAW_ID_SIZE 20+3
+
+/* state machine codes used in claw_irq_handler */
+
+#define CLAW_STOP 0
+#define CLAW_START_HALT_IO 1
+#define CLAW_START_SENSEID 2
+#define CLAW_START_READ 3
+#define CLAW_START_WRITE 4
+
+/*-----------------------------------------------------*
+* Lock flag *
+*------------------------------------------------------*/
+#define LOCK_YES 0
+#define LOCK_NO 1
+
+/*-----------------------------------------------------*
+* DBF Debug macros *
+*------------------------------------------------------*/
+#define CLAW_DBF_TEXT(level, name, text) \
+ do { \
+ debug_text_event(claw_dbf_##name, level, text); \
+ } while (0)
+
+#define CLAW_DBF_HEX(level,name,addr,len) \
+do { \
+ debug_event(claw_dbf_##name,level,(void*)(addr),len); \
+} while (0)
+
+/* Allow to sort out low debug levels early to avoid wasted sprints */
+static inline int claw_dbf_passes(debug_info_t *dbf_grp, int level)
+{
+ return (level <= dbf_grp->level);
+}
+
+#define CLAW_DBF_TEXT_(level,name,text...) \
+ do { \
+ if (claw_dbf_passes(claw_dbf_##name, level)) { \
+ sprintf(debug_buffer, text); \
+ debug_text_event(claw_dbf_##name, level, \
+ debug_buffer); \
+ } \
+ } while (0)
+
+/**
+ * Enum for classifying detected devices.
+ */
+enum claw_channel_types {
+ /* Device is not a channel */
+ claw_channel_type_none,
+
+ /* Device is a CLAW channel device */
+ claw_channel_type_claw
+};
+
+
+/*******************************************************
+* Define Control Blocks *
+* *
+********************************************************/
+
+/*------------------------------------------------------*/
+/* CLAW header */
+/*------------------------------------------------------*/
+
+struct clawh {
+ __u16 length; /* length of data read by preceding read CCW */
+ __u8 opcode; /* equivalent read CCW */
+ __u8 flag; /* flag of FF to indicate read was completed */
+};
+
+/*------------------------------------------------------*/
+/* CLAW Packing header 4 bytes */
+/*------------------------------------------------------*/
+struct clawph {
+ __u16 len; /* Length of Packed Data Area */
+ __u8 flag; /* Reserved not used */
+ __u8 link_num; /* Link ID */
+};
+
+/*------------------------------------------------------*/
+/* CLAW Ending struct ccwbk */
+/*------------------------------------------------------*/
+struct endccw {
+ __u32 real; /* real address of this block */
+ __u8 write1; /* write 1 is active */
+ __u8 read1; /* read 1 is active */
+ __u16 reserved; /* reserved for future use */
+ struct ccw1 write1_nop1;
+ struct ccw1 write1_nop2;
+ struct ccw1 write2_nop1;
+ struct ccw1 write2_nop2;
+ struct ccw1 read1_nop1;
+ struct ccw1 read1_nop2;
+ struct ccw1 read2_nop1;
+ struct ccw1 read2_nop2;
+};
+
+/*------------------------------------------------------*/
+/* CLAW struct ccwbk */
+/*------------------------------------------------------*/
+struct ccwbk {
+ void *next; /* pointer to next ccw block */
+ __u32 real; /* real address of this ccw */
+ void *p_buffer; /* virtual address of data */
+ struct clawh header; /* claw header */
+ struct ccw1 write; /* write CCW */
+ struct ccw1 w_read_FF; /* read FF */
+ struct ccw1 w_TIC_1; /* TIC */
+ struct ccw1 read; /* read CCW */
+ struct ccw1 read_h; /* read header */
+ struct ccw1 signal; /* signal SMOD */
+ struct ccw1 r_TIC_1; /* TIC1 */
+ struct ccw1 r_read_FF; /* read FF */
+ struct ccw1 r_TIC_2; /* TIC2 */
+};
+
+/*------------------------------------------------------*/
+/* CLAW control block */
+/*------------------------------------------------------*/
+struct clawctl {
+ __u8 command; /* control command */
+ __u8 version; /* CLAW protocol version */
+ __u8 linkid; /* link ID */
+ __u8 correlator; /* correlator */
+ __u8 rc; /* return code */
+ __u8 reserved1; /* reserved */
+ __u8 reserved2; /* reserved */
+ __u8 reserved3; /* reserved */
+ __u8 data[24]; /* command specific fields */
+};
+
+/*------------------------------------------------------*/
+/* Data for SYSTEMVALIDATE command */
+/*------------------------------------------------------*/
+struct sysval {
+ char WS_name[8]; /* Workstation System name */
+ char host_name[8]; /* Host system name */
+ __u16 read_frame_size; /* read frame size */
+ __u16 write_frame_size; /* write frame size */
+ __u8 reserved[4]; /* reserved */
+};
+
+/*------------------------------------------------------*/
+/* Data for Connect command */
+/*------------------------------------------------------*/
+struct conncmd {
+ char WS_name[8]; /* Workstation application name */
+ char host_name[8]; /* Host application name */
+ __u16 reserved1[2]; /* read frame size */
+ __u8 reserved2[4]; /* reserved */
+};
+
+/*------------------------------------------------------*/
+/* Data for CLAW error */
+/*------------------------------------------------------*/
+struct clawwerror {
+ char reserved1[8]; /* reserved */
+ char reserved2[8]; /* reserved */
+ char reserved3[8]; /* reserved */
+};
+
+/*------------------------------------------------------*/
+/* Data buffer for CLAW */
+/*------------------------------------------------------*/
+struct clawbuf {
+ char buffer[MAX_ENVELOPE_SIZE]; /* data buffer */
+};
+
+/*------------------------------------------------------*/
+/* Channel control block for read and write channel */
+/*------------------------------------------------------*/
+
+struct chbk {
+ unsigned int devno;
+ int irq;
+ char id[CLAW_ID_SIZE];
+ __u32 IO_active;
+ __u8 claw_state;
+ struct irb *irb;
+ struct ccw_device *cdev; /* pointer to the channel device */
+ struct net_device *ndev;
+ wait_queue_head_t wait;
+ struct tasklet_struct tasklet;
+ struct timer_list timer;
+ unsigned long flag_a; /* atomic flags */
+#define CLAW_BH_ACTIVE 0
+ unsigned long flag_b; /* atomic flags */
+#define CLAW_WRITE_ACTIVE 0
+ __u8 last_dstat;
+ __u8 flag;
+ struct sk_buff_head collect_queue;
+ spinlock_t collect_lock;
+#define CLAW_WRITE 0x02 /* - Set if this is a write channel */
+#define CLAW_READ 0x01 /* - Set if this is a read channel */
+#define CLAW_TIMER 0x80 /* - Set if timer made the wake_up */
+};
+
+/*--------------------------------------------------------------*
+* CLAW environment block *
+*---------------------------------------------------------------*/
+
+struct claw_env {
+ unsigned int devno[2]; /* device number */
+ char host_name[9]; /* Host name */
+ char adapter_name [9]; /* adapter name */
+ char api_type[9]; /* TCPIP, API or PACKED */
+ void *p_priv; /* privptr */
+ __u16 read_buffers; /* read buffer number */
+ __u16 write_buffers; /* write buffer number */
+ __u16 read_size; /* read buffer size */
+ __u16 write_size; /* write buffer size */
+ __u16 dev_id; /* device ident */
+ __u8 packing; /* are we packing? */
+ __u8 in_use; /* device active flag */
+ struct net_device *ndev; /* backward ptr to the net dev*/
+};
+
+/*--------------------------------------------------------------*
+* CLAW main control block *
+*---------------------------------------------------------------*/
+
+struct claw_privbk {
+ void *p_buff_ccw;
+ __u32 p_buff_ccw_num;
+ void *p_buff_read;
+ __u32 p_buff_read_num;
+ __u32 p_buff_pages_perread;
+ void *p_buff_write;
+ __u32 p_buff_write_num;
+ __u32 p_buff_pages_perwrite;
+ long active_link_ID; /* Active logical link ID */
+ struct ccwbk *p_write_free_chain; /* pointer to free ccw chain */
+ struct ccwbk *p_write_active_first; /* ptr to the first write ccw */
+ struct ccwbk *p_write_active_last; /* ptr to the last write ccw */
+ struct ccwbk *p_read_active_first; /* ptr to the first read ccw */
+ struct ccwbk *p_read_active_last; /* ptr to the last read ccw */
+ struct endccw *p_end_ccw; /*ptr to ending ccw */
+ struct ccwbk *p_claw_signal_blk; /* ptr to signal block */
+ __u32 write_free_count; /* number of free bufs for write */
+ struct net_device_stats stats; /* device status */
+ struct chbk channel[2]; /* Channel control blocks */
+ __u8 mtc_skipping;
+ int mtc_offset;
+ int mtc_logical_link;
+ void *p_mtc_envelope;
+ struct sk_buff *pk_skb; /* packing buffer */
+ int pk_cnt;
+ struct clawctl ctl_bk;
+ struct claw_env *p_env;
+ __u8 system_validate_comp;
+ __u8 release_pend;
+ __u8 checksum_received_ip_pkts;
+ __u8 buffs_alloc;
+ struct endccw end_ccw;
+ unsigned long tbusy;
+
+};
+
+
+/************************************************************/
+/* define global constants */
+/************************************************************/
+
+#define CCWBK_SIZE sizeof(struct ccwbk)
+
+
diff --git a/drivers/s390/net/ctcm_dbug.c b/drivers/s390/net/ctcm_dbug.c
new file mode 100644
index 00000000000..d962fd741a2
--- /dev/null
+++ b/drivers/s390/net/ctcm_dbug.c
@@ -0,0 +1,79 @@
+/*
+ * drivers/s390/net/ctcm_dbug.c
+ *
+ * Copyright IBM Corp. 2001, 2007
+ * Authors: Peter Tiedemann (ptiedem@de.ibm.com)
+ *
+ */
+
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/ctype.h>
+#include <linux/sysctl.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include "ctcm_dbug.h"
+
+/*
+ * Debug Facility Stuff
+ */
+
+struct ctcm_dbf_info ctcm_dbf[CTCM_DBF_INFOS] = {
+ [CTCM_DBF_SETUP] = {"ctc_setup", 8, 1, 64, CTC_DBF_INFO, NULL},
+ [CTCM_DBF_ERROR] = {"ctc_error", 8, 1, 64, CTC_DBF_ERROR, NULL},
+ [CTCM_DBF_TRACE] = {"ctc_trace", 8, 1, 64, CTC_DBF_ERROR, NULL},
+ [CTCM_DBF_MPC_SETUP] = {"mpc_setup", 8, 1, 80, CTC_DBF_INFO, NULL},
+ [CTCM_DBF_MPC_ERROR] = {"mpc_error", 8, 1, 80, CTC_DBF_ERROR, NULL},
+ [CTCM_DBF_MPC_TRACE] = {"mpc_trace", 8, 1, 80, CTC_DBF_ERROR, NULL},
+};
+
+void ctcm_unregister_dbf_views(void)
+{
+ int x;
+ for (x = 0; x < CTCM_DBF_INFOS; x++) {
+ debug_unregister(ctcm_dbf[x].id);
+ ctcm_dbf[x].id = NULL;
+ }
+}
+
+int ctcm_register_dbf_views(void)
+{
+ int x;
+ for (x = 0; x < CTCM_DBF_INFOS; x++) {
+ /* register the areas */
+ ctcm_dbf[x].id = debug_register(ctcm_dbf[x].name,
+ ctcm_dbf[x].pages,
+ ctcm_dbf[x].areas,
+ ctcm_dbf[x].len);
+ if (ctcm_dbf[x].id == NULL) {
+ ctcm_unregister_dbf_views();
+ return -ENOMEM;
+ }
+
+ /* register a view */
+ debug_register_view(ctcm_dbf[x].id, &debug_hex_ascii_view);
+ /* set a passing level */
+ debug_set_level(ctcm_dbf[x].id, ctcm_dbf[x].level);
+ }
+
+ return 0;
+}
+
+void ctcm_dbf_longtext(enum ctcm_dbf_names dbf_nix, int level, char *fmt, ...)
+{
+ char dbf_txt_buf[64];
+ va_list args;
+
+ if (level > (ctcm_dbf[dbf_nix].id)->level)
+ return;
+ va_start(args, fmt);
+ vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
+ va_end(args);
+
+ debug_text_event(ctcm_dbf[dbf_nix].id, level, dbf_txt_buf);
+}
+
diff --git a/drivers/s390/net/ctcm_dbug.h b/drivers/s390/net/ctcm_dbug.h
new file mode 100644
index 00000000000..26966d0b9ab
--- /dev/null
+++ b/drivers/s390/net/ctcm_dbug.h
@@ -0,0 +1,143 @@
+/*
+ * drivers/s390/net/ctcm_dbug.h
+ *
+ * Copyright IBM Corp. 2001, 2007
+ * Authors: Peter Tiedemann (ptiedem@de.ibm.com)
+ *
+ */
+
+#ifndef _CTCM_DBUG_H_
+#define _CTCM_DBUG_H_
+
+/*
+ * Debug Facility stuff
+ */
+
+#include <asm/debug.h>
+
+#ifdef DEBUG
+ #define do_debug 1
+#else
+ #define do_debug 0
+#endif
+#ifdef DEBUGCCW
+ #define do_debug_ccw 1
+ #define DEBUGDATA 1
+#else
+ #define do_debug_ccw 0
+#endif
+#ifdef DEBUGDATA
+ #define do_debug_data 1
+#else
+ #define do_debug_data 0
+#endif
+
+/* define dbf debug levels similar to kernel msg levels */
+#define CTC_DBF_ALWAYS 0 /* always print this */
+#define CTC_DBF_EMERG 0 /* system is unusable */
+#define CTC_DBF_ALERT 1 /* action must be taken immediately */
+#define CTC_DBF_CRIT 2 /* critical conditions */
+#define CTC_DBF_ERROR 3 /* error conditions */
+#define CTC_DBF_WARN 4 /* warning conditions */
+#define CTC_DBF_NOTICE 5 /* normal but significant condition */
+#define CTC_DBF_INFO 5 /* informational */
+#define CTC_DBF_DEBUG 6 /* debug-level messages */
+
+enum ctcm_dbf_names {
+ CTCM_DBF_SETUP,
+ CTCM_DBF_ERROR,
+ CTCM_DBF_TRACE,
+ CTCM_DBF_MPC_SETUP,
+ CTCM_DBF_MPC_ERROR,
+ CTCM_DBF_MPC_TRACE,
+ CTCM_DBF_INFOS /* must be last element */
+};
+
+struct ctcm_dbf_info {
+ char name[DEBUG_MAX_NAME_LEN];
+ int pages;
+ int areas;
+ int len;
+ int level;
+ debug_info_t *id;
+};
+
+extern struct ctcm_dbf_info ctcm_dbf[CTCM_DBF_INFOS];
+
+int ctcm_register_dbf_views(void);
+void ctcm_unregister_dbf_views(void);
+void ctcm_dbf_longtext(enum ctcm_dbf_names dbf_nix, int level, char *text, ...);
+
+static inline const char *strtail(const char *s, int n)
+{
+ int l = strlen(s);
+ return (l > n) ? s + (l - n) : s;
+}
+
+#define CTCM_FUNTAIL strtail((char *)__func__, 16)
+
+#define CTCM_DBF_TEXT(name, level, text) \
+ do { \
+ debug_text_event(ctcm_dbf[CTCM_DBF_##name].id, level, text); \
+ } while (0)
+
+#define CTCM_DBF_HEX(name, level, addr, len) \
+ do { \
+ debug_event(ctcm_dbf[CTCM_DBF_##name].id, \
+ level, (void *)(addr), len); \
+ } while (0)
+
+#define CTCM_DBF_TEXT_(name, level, text...) \
+ ctcm_dbf_longtext(CTCM_DBF_##name, level, text)
+
+/*
+ * cat : one of {setup, mpc_setup, trace, mpc_trace, error, mpc_error}.
+ * dev : netdevice with valid name field.
+ * text: any text string.
+ */
+#define CTCM_DBF_DEV_NAME(cat, dev, text) \
+ do { \
+ CTCM_DBF_TEXT_(cat, CTC_DBF_INFO, "%s(%s) :- %s", \
+ CTCM_FUNTAIL, dev->name, text); \
+ } while (0)
+
+#define MPC_DBF_DEV_NAME(cat, dev, text) \
+ do { \
+ CTCM_DBF_TEXT_(MPC_##cat, CTC_DBF_INFO, "%s(%s) := %s", \
+ CTCM_FUNTAIL, dev->name, text); \
+ } while (0)
+
+#define CTCMY_DBF_DEV_NAME(cat, dev, text) \
+ do { \
+ if (IS_MPCDEV(dev)) \
+ MPC_DBF_DEV_NAME(cat, dev, text); \
+ else \
+ CTCM_DBF_DEV_NAME(cat, dev, text); \
+ } while (0)
+
+/*
+ * cat : one of {setup, mpc_setup, trace, mpc_trace, error, mpc_error}.
+ * dev : netdevice.
+ * text: any text string.
+ */
+#define CTCM_DBF_DEV(cat, dev, text) \
+ do { \
+ CTCM_DBF_TEXT_(cat, CTC_DBF_INFO, "%s(%p) :-: %s", \
+ CTCM_FUNTAIL, dev, text); \
+ } while (0)
+
+#define MPC_DBF_DEV(cat, dev, text) \
+ do { \
+ CTCM_DBF_TEXT_(MPC_##cat, CTC_DBF_INFO, "%s(%p) :=: %s", \
+ CTCM_FUNTAIL, dev, text); \
+ } while (0)
+
+#define CTCMY_DBF_DEV(cat, dev, text) \
+ do { \
+ if (IS_MPCDEV(dev)) \
+ MPC_DBF_DEV(cat, dev, text); \
+ else \
+ CTCM_DBF_DEV(cat, dev, text); \
+ } while (0)
+
+#endif
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
new file mode 100644
index 00000000000..2d602207541
--- /dev/null
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -0,0 +1,2295 @@
+/*
+ * drivers/s390/net/ctcm_fsms.c
+ *
+ * Copyright IBM Corp. 2001, 2007
+ * Authors: Fritz Elfert (felfert@millenux.com)
+ * Peter Tiedemann (ptiedem@de.ibm.com)
+ * MPC additions :
+ * Belinda Thompson (belindat@us.ibm.com)
+ * Andy Richter (richtera@us.ibm.com)
+ */
+
+#undef DEBUG
+#undef DEBUGDATA
+#undef DEBUGCCW
+
+#define KMSG_COMPONENT "ctcm"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/bitops.h>
+
+#include <linux/signal.h>
+#include <linux/string.h>
+
+#include <linux/ip.h>
+#include <linux/if_arp.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/ctype.h>
+#include <net/dst.h>
+
+#include <linux/io.h>
+#include <asm/ccwdev.h>
+#include <asm/ccwgroup.h>
+#include <linux/uaccess.h>
+
+#include <asm/idals.h>
+
+#include "fsm.h"
+
+#include "ctcm_dbug.h"
+#include "ctcm_main.h"
+#include "ctcm_fsms.h"
+
+const char *dev_state_names[] = {
+ [DEV_STATE_STOPPED] = "Stopped",
+ [DEV_STATE_STARTWAIT_RXTX] = "StartWait RXTX",
+ [DEV_STATE_STARTWAIT_RX] = "StartWait RX",
+ [DEV_STATE_STARTWAIT_TX] = "StartWait TX",
+ [DEV_STATE_STOPWAIT_RXTX] = "StopWait RXTX",
+ [DEV_STATE_STOPWAIT_RX] = "StopWait RX",
+ [DEV_STATE_STOPWAIT_TX] = "StopWait TX",
+ [DEV_STATE_RUNNING] = "Running",
+};
+
+const char *dev_event_names[] = {
+ [DEV_EVENT_START] = "Start",
+ [DEV_EVENT_STOP] = "Stop",
+ [DEV_EVENT_RXUP] = "RX up",
+ [DEV_EVENT_TXUP] = "TX up",
+ [DEV_EVENT_RXDOWN] = "RX down",
+ [DEV_EVENT_TXDOWN] = "TX down",
+ [DEV_EVENT_RESTART] = "Restart",
+};
+
+const char *ctc_ch_event_names[] = {
+ [CTC_EVENT_IO_SUCCESS] = "ccw_device success",
+ [CTC_EVENT_IO_EBUSY] = "ccw_device busy",
+ [CTC_EVENT_IO_ENODEV] = "ccw_device enodev",
+ [CTC_EVENT_IO_UNKNOWN] = "ccw_device unknown",
+ [CTC_EVENT_ATTNBUSY] = "Status ATTN & BUSY",
+ [CTC_EVENT_ATTN] = "Status ATTN",
+ [CTC_EVENT_BUSY] = "Status BUSY",
+ [CTC_EVENT_UC_RCRESET] = "Unit check remote reset",
+ [CTC_EVENT_UC_RSRESET] = "Unit check remote system reset",
+ [CTC_EVENT_UC_TXTIMEOUT] = "Unit check TX timeout",
+ [CTC_EVENT_UC_TXPARITY] = "Unit check TX parity",
+ [CTC_EVENT_UC_HWFAIL] = "Unit check Hardware failure",
+ [CTC_EVENT_UC_RXPARITY] = "Unit check RX parity",
+ [CTC_EVENT_UC_ZERO] = "Unit check ZERO",
+ [CTC_EVENT_UC_UNKNOWN] = "Unit check Unknown",
+ [CTC_EVENT_SC_UNKNOWN] = "SubChannel check Unknown",
+ [CTC_EVENT_MC_FAIL] = "Machine check failure",
+ [CTC_EVENT_MC_GOOD] = "Machine check operational",
+ [CTC_EVENT_IRQ] = "IRQ normal",
+ [CTC_EVENT_FINSTAT] = "IRQ final",
+ [CTC_EVENT_TIMER] = "Timer",
+ [CTC_EVENT_START] = "Start",
+ [CTC_EVENT_STOP] = "Stop",
+ /*
+ * additional MPC events
+ */
+ [CTC_EVENT_SEND_XID] = "XID Exchange",
+ [CTC_EVENT_RSWEEP_TIMER] = "MPC Group Sweep Timer",
+};
+
+const char *ctc_ch_state_names[] = {
+ [CTC_STATE_IDLE] = "Idle",
+ [CTC_STATE_STOPPED] = "Stopped",
+ [CTC_STATE_STARTWAIT] = "StartWait",
+ [CTC_STATE_STARTRETRY] = "StartRetry",
+ [CTC_STATE_SETUPWAIT] = "SetupWait",
+ [CTC_STATE_RXINIT] = "RX init",
+ [CTC_STATE_TXINIT] = "TX init",
+ [CTC_STATE_RX] = "RX",
+ [CTC_STATE_TX] = "TX",
+ [CTC_STATE_RXIDLE] = "RX idle",
+ [CTC_STATE_TXIDLE] = "TX idle",
+ [CTC_STATE_RXERR] = "RX error",
+ [CTC_STATE_TXERR] = "TX error",
+ [CTC_STATE_TERM] = "Terminating",
+ [CTC_STATE_DTERM] = "Restarting",
+ [CTC_STATE_NOTOP] = "Not operational",
+ /*
+ * additional MPC states
+ */
+ [CH_XID0_PENDING] = "Pending XID0 Start",
+ [CH_XID0_INPROGRESS] = "In XID0 Negotiations ",
+ [CH_XID7_PENDING] = "Pending XID7 P1 Start",
+ [CH_XID7_PENDING1] = "Active XID7 P1 Exchange ",
+ [CH_XID7_PENDING2] = "Pending XID7 P2 Start ",
+ [CH_XID7_PENDING3] = "Active XID7 P2 Exchange ",
+ [CH_XID7_PENDING4] = "XID7 Complete - Pending READY ",
+};
+
+static void ctcm_action_nop(fsm_instance *fi, int event, void *arg);
+
+/*
+ * ----- static ctcm actions for channel statemachine -----
+ *
+*/
+static void chx_txdone(fsm_instance *fi, int event, void *arg);
+static void chx_rx(fsm_instance *fi, int event, void *arg);
+static void chx_rxidle(fsm_instance *fi, int event, void *arg);
+static void chx_firstio(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_start(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg);
+
+/*
+ * ----- static ctcmpc actions for ctcmpc channel statemachine -----
+ *
+*/
+static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg);
+static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg);
+static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg);
+/* shared :
+static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_start(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg);
+static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg);
+*/
+static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg);
+static void ctcmpc_chx_attnbusy(fsm_instance *, int, void *);
+static void ctcmpc_chx_resend(fsm_instance *, int, void *);
+static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg);
+
+/**
+ * Check return code of a preceding ccw_device call, halt_IO etc...
+ *
+ * ch : The channel, the error belongs to.
+ * Returns the error code (!= 0) to inspect.
+ */
+void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg)
+{
+ CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+ "%s(%s): %s: %04x\n",
+ CTCM_FUNTAIL, ch->id, msg, rc);
+ switch (rc) {
+ case -EBUSY:
+ pr_info("%s: The communication peer is busy\n",
+ ch->id);
+ fsm_event(ch->fsm, CTC_EVENT_IO_EBUSY, ch);
+ break;
+ case -ENODEV:
+ pr_err("%s: The specified target device is not valid\n",
+ ch->id);
+ fsm_event(ch->fsm, CTC_EVENT_IO_ENODEV, ch);
+ break;
+ default:
+ pr_err("An I/O operation resulted in error %04x\n",
+ rc);
+ fsm_event(ch->fsm, CTC_EVENT_IO_UNKNOWN, ch);
+ }
+}
+
+void ctcm_purge_skb_queue(struct sk_buff_head *q)
+{
+ struct sk_buff *skb;
+
+ CTCM_DBF_TEXT(TRACE, CTC_DBF_DEBUG, __func__);
+
+ while ((skb = skb_dequeue(q))) {
+ atomic_dec(&skb->users);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+/**
+ * NOP action for statemachines
+ */
+static void ctcm_action_nop(fsm_instance *fi, int event, void *arg)
+{
+}
+
+/*
+ * Actions for channel - statemachines.
+ */
+
+/**
+ * Normal data has been send. Free the corresponding
+ * skb (it's in io_queue), reset dev->tbusy and
+ * revert to idle state.
+ *
+ * fi An instance of a channel statemachine.
+ * event The event, just happened.
+ * arg Generic pointer, casted from channel * upon call.
+ */
+static void chx_txdone(fsm_instance *fi, int event, void *arg)
+{
+ struct channel *ch = arg;
+ struct net_device *dev = ch->netdev;
+ struct ctcm_priv *priv = dev->ml_priv;
+ struct sk_buff *skb;
+ int first = 1;
+ int i;
+ unsigned long duration;
+ struct timespec done_stamp = current_kernel_time(); /* xtime */
+
+ CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name);
+
+ duration =
+ (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
+ (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
+ if (duration > ch->prof.tx_time)
+ ch->prof.tx_time = duration;
+
+ if (ch->irb->scsw.cmd.count != 0)
+ CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
+ "%s(%s): TX not complete, remaining %d bytes",
+ CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count);
+ fsm_deltimer(&ch->timer);
+ while ((skb = skb_dequeue(&ch->io_queue))) {
+ priv->stats.tx_packets++;
+ priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
+ if (first) {
+ priv->stats.tx_bytes += 2;
+ first = 0;
+ }
+ atomic_dec(&skb->users);
+ dev_kfree_skb_irq(skb);
+ }
+ spin_lock(&ch->collect_lock);
+ clear_normalized_cda(&ch->ccw[4]);
+ if (ch->collect_len > 0) {
+ int rc;
+
+ if (ctcm_checkalloc_buffer(ch)) {
+ spin_unlock(&ch->collect_lock);
+ return;
+ }
+ ch->trans_skb->data = ch->trans_skb_data;
+ skb_reset_tail_pointer(ch->trans_skb);
+ ch->trans_skb->len = 0;
+ if (ch->prof.maxmulti < (ch->collect_len + 2))
+ ch->prof.maxmulti = ch->collect_len + 2;
+ if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
+ ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
+ *((__u16 *)skb_put(ch->trans_skb, 2)) = ch->collect_len + 2;
+ i = 0;
+ while ((skb = skb_dequeue(&ch->collect_queue))) {
+ skb_copy_from_linear_data(skb,
+ skb_put(ch->trans_skb, skb->len), skb->len);
+ priv->stats.tx_packets++;
+ priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
+ atomic_dec(&skb->users);
+ dev_kfree_skb_irq(skb);
+ i++;
+ }
+ ch->collect_len = 0;
+ spin_unlock(&ch->collect_lock);
+ ch->ccw[1].count = ch->trans_skb->len;
+ fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
+ ch->prof.send_stamp = current_kernel_time(); /* xtime */
+ rc = ccw_device_start(ch->cdev, &ch->ccw[0],
+ (unsigned long)ch, 0xff, 0);
+ ch->prof.doios_multi++;
+ if (rc != 0) {
+ priv->stats.tx_dropped += i;
+ priv->stats.tx_errors += i;
+ fsm_deltimer(&ch->timer);
+ ctcm_ccw_check_rc(ch, rc, "chained TX");
+ }
+ } else {
+ spin_unlock(&ch->collect_lock);
+ fsm_newstate(fi, CTC_STATE_TXIDLE);
+ }
+ ctcm_clear_busy_do(dev);
+}
+
+/**
+ * Initial data is sent.
+ * Notify device statemachine that we are up and
+ * running.
+ *
+ * fi An instance of a channel statemachine.
+ * event The event, just happened.
+ * arg Generic pointer, casted from channel * upon call.
+ */
+void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg)
+{
+ struct channel *ch = arg;
+ struct net_device *dev = ch->netdev;
+ struct ctcm_priv *priv = dev->ml_priv;
+
+ CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name);
+
+ fsm_deltimer(&ch->timer);
+ fsm_newstate(fi, CTC_STATE_TXIDLE);
+ fsm_event(priv->fsm, DEV_EVENT_TXUP, ch->netdev);
+}
+
+/**
+ * Got normal data, check for sanity, queue it up, allocate new buffer
+ * trigger bottom half, and initiate next read.
+ *
+ * fi An instance of a channel statemachine.
+ * event The event, just happened.
+ * arg Generic pointer, casted from channel * upon call.
+ */
+static void chx_rx(fsm_instance *fi, int event, void *arg)
+{
+ struct channel *ch = arg;
+ struct net_device *dev = ch->netdev;
+ struct ctcm_priv *priv = dev->ml_priv;
+ int len = ch->max_bufsize - ch->irb->scsw.cmd.count;
+ struct sk_buff *skb = ch->trans_skb;
+ __u16 block_len = *((__u16 *)skb->data);
+ int check_len;
+ int rc;
+
+ fsm_deltimer(&ch->timer);
+ if (len < 8) {
+ CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
+ "%s(%s): got packet with length %d < 8\n",
+ CTCM_FUNTAIL, dev->name, len);
+ priv->stats.rx_dropped++;
+ priv->stats.rx_length_errors++;
+ goto again;
+ }
+ if (len > ch->max_bufsize) {
+ CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
+ "%s(%s): got packet with length %d > %d\n",
+ CTCM_FUNTAIL, dev->name, len, ch->max_bufsize);
+ priv->stats.rx_dropped++;
+ priv->stats.rx_length_errors++;
+ goto again;
+ }
+
+ /*
+ * VM TCP seems to have a bug sending 2 trailing bytes of garbage.
+ */
+ switch (ch->protocol) {
+ case CTCM_PROTO_S390:
+ case CTCM_PROTO_OS390:
+ check_len = block_len + 2;
+ break;
+ default:
+ check_len = block_len;
+ break;
+ }
+ if ((len < block_len) || (len > check_len)) {
+ CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
+ "%s(%s): got block length %d != rx length %d\n",
+ CTCM_FUNTAIL, dev->name, block_len, len);
+ if (do_debug)
+ ctcmpc_dump_skb(skb, 0);
+
+ *((__u16 *)skb->data) = len;
+ priv->stats.rx_dropped++;
+ priv->stats.rx_length_errors++;
+ goto again;
+ }
+ if (block_len > 2) {
+ *((__u16 *)skb->data) = block_len - 2;
+ ctcm_unpack_skb(ch, skb);
+ }
+ again:
+ skb->data = ch->trans_skb_data;
+ skb_reset_tail_pointer(skb);
+ skb->len = 0;
+ if (ctcm_checkalloc_buffer(ch))
+ return;
+ ch->ccw[1].count = ch->max_bufsize;
+ rc = ccw_device_start(ch->cdev, &ch->ccw[0],
+ (unsigned long)ch, 0xff, 0);
+ if (rc != 0)
+ ctcm_ccw_check_rc(ch, rc, "normal RX");
+}
+
+/**
+ * Initialize connection by sending a __u16 of value 0.
+ *
+ * fi An instance of a channel statemachine.
+ * event The event, just happened.
+ * arg Generic pointer, casted from channel * upon call.
+ */
+static void chx_firstio(fsm_instance *fi, int event, void *arg)
+{
+ int rc;
+ struct channel *ch = arg;
+ int fsmstate = fsm_getstate(fi);
+
+ CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
+ "%s(%s) : %02x",
+ CTCM_FUNTAIL, ch->id, fsmstate);
+
+ ch->sense_rc = 0; /* reset unit check report control */
+ if (fsmstate == CTC_STATE_TXIDLE)
+ CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
+ "%s(%s): remote side issued READ?, init.\n",
+ CTCM_FUNTAIL, ch->id);
+ fsm_deltimer(&ch->timer);
+ if (ctcm_checkalloc_buffer(ch))
+ return;
+ if ((fsmstate == CTC_STATE_SETUPWAIT) &&
+ (ch->protocol == CTCM_PROTO_OS390)) {
+ /* OS/390 resp. z/OS */
+ if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
+ *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
+ fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC,
+ CTC_EVENT_TIMER, ch);
+ chx_rxidle(fi, event, arg);
+ } else {
+ struct net_device *dev = ch->netdev;
+ struct ctcm_priv *priv = dev->ml_priv;
+ fsm_newstate(fi, CTC_STATE_TXIDLE);
+ fsm_event(priv->fsm, DEV_EVENT_TXUP, dev);
+ }
+ return;
+ }
+ /*
+ * Don't setup a timer for receiving the initial RX frame
+ * if in compatibility mode, since VM TCP delays the initial
+ * frame until it has some data to send.
+ */
+ if ((CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE) ||
+ (ch->protocol != CTCM_PROTO_S390))
+ fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
+
+ *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
+ ch->ccw[1].count = 2; /* Transfer only length */
+
+ fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
+ ? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
+ rc = ccw_device_start(ch->cdev, &ch->ccw[0],
+ (unsigned long)ch, 0xff, 0);
+ if (rc != 0) {
+ fsm_deltimer(&ch->timer);
+ fsm_newstate(fi, CTC_STATE_SETUPWAIT);
+ ctcm_ccw_check_rc(ch, rc, "init IO");
+ }
+ /*
+ * If in compatibility mode since we don't setup a timer, we
+ * also signal RX channel up immediately. This enables us
+ * to send packets early which in turn usually triggers some
+ * reply from VM TCP which brings up the RX channel to it's
+ * final state.
+ */
+ if ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) &&
+ (ch->protocol == CTCM_PROTO_S390)) {
+ struct net_device *dev = ch->netdev;
+ struct ctcm_priv *priv = dev->ml_priv;
+ fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
+ }
+}
+
+/**
+ * Got initial data, check it. If OK,
+ * notify device statemachine that we are up and
+ * running.
+ *
+ * fi An instance of a channel statemachine.
+ * event The event, just happened.
+ * arg Generic pointer, casted from channel * upon call.
+ */
+static void chx_rxidle(fsm_instance *fi, int event, void *arg)
+{
+ struct channel *ch = arg;
+ struct net_device *dev = ch->netdev;
+ struct ctcm_priv *priv = dev->ml_priv;
+ __u16 buflen;
+ int rc;
+
+ fsm_deltimer(&ch->timer);
+ buflen = *((__u16 *)ch->trans_skb->data);
+ CTCM_PR_DEBUG("%s: %s: Initial RX count = %d\n",
+ __func__, dev->name, buflen);
+
+ if (buflen >= CTCM_INITIAL_BLOCKLEN) {
+ if (ctcm_checkalloc_buffer(ch))
+ return;
+ ch->ccw[1].count = ch->max_bufsize;
+ fsm_newstate(fi, CTC_STATE_RXIDLE);
+ rc = ccw_device_start(ch->cdev, &ch->ccw[0],
+ (unsigned long)ch, 0xff, 0);
+ if (rc != 0) {
+ fsm_newstate(fi, CTC_STATE_RXINIT);
+ ctcm_ccw_check_rc(ch, rc, "initial RX");
+ } else
+ fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
+ } else {
+ CTCM_PR_DEBUG("%s: %s: Initial RX count %d not %d\n",
+ __func__, dev->name,
+ buflen, CTCM_INITIAL_BLOCKLEN);
+ chx_firstio(fi, event, arg);
+ }
+}
+
+/**
+ * Set channel into extended mode.
+ *
+ * fi An instance of a channel statemachine.
+ * event The event, just happened.
+ * arg Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg)
+{
+ struct channel *ch = arg;
+ int rc;
+ unsigned long saveflags = 0;
+ int timeout = CTCM_TIME_5_SEC;
+
+ fsm_deltimer(&ch->timer);
+ if (IS_MPC(ch)) {
+ timeout = 1500;
+ CTCM_PR_DEBUG("enter %s: cp=%i ch=0x%p id=%s\n",
+ __func__, smp_processor_id(), ch, ch->id);
+ }
+ fsm_addtimer(&ch->timer, timeout, CTC_EVENT_TIMER, ch);
+ fsm_newstate(fi, CTC_STATE_SETUPWAIT);
+ CTCM_CCW_DUMP((char *)&ch->ccw[6], sizeof(struct ccw1) * 2);
+
+ if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */
+ spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
+ /* Such conditional locking is undeterministic in
+ * static view. => ignore sparse warnings here. */
+
+ rc = ccw_device_start(ch->cdev, &ch->ccw[6],
+ (unsigned long)ch, 0xff, 0);
+ if (event == CTC_EVENT_TIMER) /* see above comments */
+ spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
+ if (rc != 0) {
+ fsm_deltimer(&ch->timer);
+ fsm_newstate(fi, CTC_STATE_STARTWAIT);
+ ctcm_ccw_check_rc(ch, rc, "set Mode");
+ } else
+ ch->retry = 0;
+}
+
+/**
+ * Setup channel.
+ *
+ * fi An instance of a channel statemachine.
+ * event The event, just happened.
+ * arg Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_start(fsm_instance *fi, int event, void *arg)
+{
+ struct channel *ch = arg;
+ unsigned long saveflags;
+ int rc;
+
+ CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s): %s",
+ CTCM_FUNTAIL, ch->id,
+ (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX");
+
+ if (ch->trans_skb != NULL) {
+ clear_normalized_cda(&ch->ccw[1]);
+ dev_kfree_skb(ch->trans_skb);
+ ch->trans_skb = NULL;
+ }
+ if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
+ ch->ccw[1].cmd_code = CCW_CMD_READ;
+ ch->ccw[1].flags = CCW_FLAG_SLI;
+ ch->ccw[1].count = 0;
+ } else {
+ ch->ccw[1].cmd_code = CCW_CMD_WRITE;
+ ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ ch->ccw[1].count = 0;
+ }
+ if (ctcm_checkalloc_buffer(ch)) {
+ CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
+ "%s(%s): %s trans_skb alloc delayed "
+ "until first transfer",
+ CTCM_FUNTAIL, ch->id,
+ (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
+ "RX" : "TX");
+ }
+ ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
+ ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ ch->ccw[0].count = 0;
+ ch->ccw[0].cda = 0;
+ ch->ccw[2].cmd_code = CCW_CMD_NOOP; /* jointed CE + DE */
+ ch->ccw[2].flags = CCW_FLAG_SLI;
+ ch->ccw[2].count = 0;
+ ch->ccw[2].cda = 0;
+ memcpy(&ch->ccw[3], &ch->ccw[0], sizeof(struct ccw1) * 3);
+ ch->ccw[4].cda = 0;
+ ch->ccw[4].flags &= ~CCW_FLAG_IDA;
+
+ fsm_newstate(fi, CTC_STATE_STARTWAIT);
+ fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch);
+ spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
+ rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
+ spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
+ if (rc != 0) {
+ if (rc != -EBUSY)
+ fsm_deltimer(&ch->timer);
+ ctcm_ccw_check_rc(ch, rc, "initial HaltIO");
+ }
+}
+
+/**
+ * Shutdown a channel.
+ *
+ * fi An instance of a channel statemachine.
+ * event The event, just happened.
+ * arg Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg)
+{
+ struct channel *ch = arg;
+ unsigned long saveflags = 0;
+ int rc;
+ int oldstate;
+
+ fsm_deltimer(&ch->timer);
+ if (IS_MPC(ch))
+ fsm_deltimer(&ch->sweep_timer);
+
+ fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
+
+ if (event == CTC_EVENT_STOP) /* only for STOP not yet locked */
+ spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
+ /* Such conditional locking is undeterministic in
+ * static view. => ignore sparse warnings here. */
+ oldstate = fsm_getstate(fi);
+ fsm_newstate(fi, CTC_STATE_TERM);
+ rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
+
+ if (event == CTC_EVENT_STOP)
+ spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
+ /* see remark above about conditional locking */
+
+ if (rc != 0 && rc != -EBUSY) {
+ fsm_deltimer(&ch->timer);
+ if (event != CTC_EVENT_STOP) {
+ fsm_newstate(fi, oldstate);
+ ctcm_ccw_check_rc(ch, rc, (char *)__func__);
+ }
+ }
+}
+
+/**
+ * Cleanup helper for chx_fail and chx_stopped
+ * cleanup channels queue and notify interface statemachine.
+ *
+ * fi An instance of a channel statemachine.
+ * state The next state (depending on caller).
+ * ch The channel to operate on.
+ */
+static void ctcm_chx_cleanup(fsm_instance *fi, int state,
+ struct channel *ch)
+{
+ struct net_device *dev = ch->netdev;
+ struct ctcm_priv *priv = dev->ml_priv;
+
+ CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE,
+ "%s(%s): %s[%d]\n",
+ CTCM_FUNTAIL, dev->name, ch->id, state);
+
+ fsm_deltimer(&ch->timer);
+ if (IS_MPC(ch))
+ fsm_deltimer(&ch->sweep_timer);
+
+ fsm_newstate(fi, state);
+ if (state == CTC_STATE_STOPPED && ch->trans_skb != NULL) {
+ clear_normalized_cda(&ch->ccw[1]);
+ dev_kfree_skb_any(ch->trans_skb);
+ ch->trans_skb = NULL;
+ }
+
+ ch->th_seg = 0x00;
+ ch->th_seq_num = 0x00;
+ if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
+ skb_queue_purge(&ch->io_queue);
+ fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
+ } else {
+ ctcm_purge_skb_queue(&ch->io_queue);
+ if (IS_MPC(ch))
+ ctcm_purge_skb_queue(&ch->sweep_queue);
+ spin_lock(&ch->collect_lock);
+ ctcm_purge_skb_queue(&ch->collect_queue);
+ ch->collect_len = 0;
+ spin_unlock(&ch->collect_lock);
+ fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
+ }
+}
+
+/**
+ * A channel has successfully been halted.
+ * Cleanup it's queue and notify interface statemachine.
+ *
+ * fi An instance of a channel statemachine.
+ * event The event, just happened.
+ * arg Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg)
+{
+ ctcm_chx_cleanup(fi, CTC_STATE_STOPPED, arg);
+}
+
+/**
+ * A stop command from device statemachine arrived and we are in
+ * not operational mode. Set state to stopped.
+ *
+ * fi An instance of a channel statemachine.
+ * event The event, just happened.
+ * arg Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg)
+{
+ fsm_newstate(fi, CTC_STATE_STOPPED);
+}
+
+/**
+ * A machine check for no path, not operational status or gone device has
+ * happened.
+ * Cleanup queue and notify interface statemachine.
+ *
+ * fi An instance of a channel statemachine.
+ * event The event, just happened.
+ * arg Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg)
+{
+ ctcm_chx_cleanup(fi, CTC_STATE_NOTOP, arg);
+}
+
+/**
+ * Handle error during setup of channel.
+ *
+ * fi An instance of a channel statemachine.
+ * event The event, just happened.
+ * arg Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg)
+{
+ struct channel *ch = arg;
+ struct net_device *dev = ch->netdev;
+ struct ctcm_priv *priv = dev->ml_priv;
+
+ /*
+ * Special case: Got UC_RCRESET on setmode.
+ * This means that remote side isn't setup. In this case
+ * simply retry after some 10 secs...
+ */
+ if ((fsm_getstate(fi) == CTC_STATE_SETUPWAIT) &&
+ ((event == CTC_EVENT_UC_RCRESET) ||
+ (event == CTC_EVENT_UC_RSRESET))) {
+ fsm_newstate(fi, CTC_STATE_STARTRETRY);
+ fsm_deltimer(&ch->timer);
+ fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
+ if (!IS_MPC(ch) &&
+ (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)) {
+ int rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
+ if (rc != 0)
+ ctcm_ccw_check_rc(ch, rc,
+ "HaltIO in chx_setuperr");
+ }
+ return;
+ }
+
+ CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT,
+ "%s(%s) : %s error during %s channel setup state=%s\n",
+ CTCM_FUNTAIL, dev->name, ctc_ch_event_names[event],
+ (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX",
+ fsm_getstate_str(fi));
+
+ if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
+ fsm_newstate(fi, CTC_STATE_RXERR);
+ fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
+ } else {
+ fsm_newstate(fi, CTC_STATE_TXERR);
+ fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
+ }
+}
+
+/**
+ * Restart a channel after an error.
+ *
+ * fi An instance of a channel statemachine.
+ * event The event, just happened.
+ * arg Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg)
+{
+ struct channel *ch = arg;
+ struct net_device *dev = ch->netdev;
+ unsigned long saveflags = 0;
+ int oldstate;
+ int rc;
+
+ CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
+ "%s: %s[%d] of %s\n",
+ CTCM_FUNTAIL, ch->id, event, dev->name);
+
+ fsm_deltimer(&ch->timer);
+
+ fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
+ oldstate = fsm_getstate(fi);
+ fsm_newstate(fi, CTC_STATE_STARTWAIT);
+ if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */
+ spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
+ /* Such conditional locking is a known problem for
+ * sparse because its undeterministic in static view.
+ * Warnings should be ignored here. */
+ rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
+ if (event == CTC_EVENT_TIMER)
+ spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
+ if (rc != 0) {
+ if (rc != -EBUSY) {
+ fsm_deltimer(&ch->timer);
+ fsm_newstate(fi, oldstate);
+ }
+ ctcm_ccw_check_rc(ch, rc, "HaltIO in ctcm_chx_restart");
+ }
+}
+
+/**
+ * Handle error during RX initial handshake (exchange of
+ * 0-length block header)
+ *
+ * fi An instance of a channel statemachine.
+ * event The event, just happened.
+ * arg Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg)
+{
+ struct channel *ch = arg;
+ struct net_device *dev = ch->netdev;
+ struct ctcm_priv *priv = dev->ml_priv;
+
+ if (event == CTC_EVENT_TIMER) {
+ if (!IS_MPCDEV(dev))
+ /* TODO : check if MPC deletes timer somewhere */
+ fsm_deltimer(&ch->timer);
+ if (ch->retry++ < 3)
+ ctcm_chx_restart(fi, event, arg);
+ else {
+ fsm_newstate(fi, CTC_STATE_RXERR);
+ fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
+ }
+ } else {
+ CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+ "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id,
+ ctc_ch_event_names[event], fsm_getstate_str(fi));
+
+ dev_warn(&dev->dev,
+ "Initialization failed with RX/TX init handshake "
+ "error %s\n", ctc_ch_event_names[event]);
+ }
+}
+
+/**
+ * Notify device statemachine if we gave up initialization
+ * of RX channel.
+ *
+ * fi An instance of a channel statemachine.
+ * event The event, just happened.
+ * arg Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg)
+{
+ struct channel *ch = arg;
+ struct net_device *dev = ch->netdev;
+ struct ctcm_priv *priv = dev->ml_priv;
+
+ CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+ "%s(%s): RX %s busy, init. fail",
+ CTCM_FUNTAIL, dev->name, ch->id);
+ fsm_newstate(fi, CTC_STATE_RXERR);
+ fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
+}
+
+/**
+ * Handle RX Unit check remote reset (remote disconnected)
+ *
+ * fi An instance of a channel statemachine.
+ * event The event, just happened.
+ * arg Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg)
+{
+ struct channel *ch = arg;
+ struct channel *ch2;
+ struct net_device *dev = ch->netdev;
+ struct ctcm_priv *priv = dev->ml_priv;
+
+ CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
+ "%s: %s: remote disconnect - re-init ...",
+ CTCM_FUNTAIL, dev->name);
+ fsm_deltimer(&ch->timer);
+ /*
+ * Notify device statemachine
+ */
+ fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
+ fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
+
+ fsm_newstate(fi, CTC_STATE_DTERM);
+ ch2 = priv->channel[CTCM_WRITE];
+ fsm_newstate(ch2->fsm, CTC_STATE_DTERM);
+
+ ccw_device_halt(ch->cdev, (unsigned long)ch);
+ ccw_device_halt(ch2->cdev, (unsigned long)ch2);
+}
+
+/**
+ * Handle error during TX channel initialization.
+ *
+ * fi An instance of a channel statemachine.
+ * event The event, just happened.
+ * arg Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg)
+{
+ struct channel *ch = arg;
+ struct net_device *dev = ch->netdev;
+ struct ctcm_priv *priv = dev->ml_priv;
+
+ if (event == CTC_EVENT_TIMER) {
+ fsm_deltimer(&ch->timer);
+ if (ch->retry++ < 3)
+ ctcm_chx_restart(fi, event, arg);
+ else {
+ fsm_newstate(fi, CTC_STATE_TXERR);
+ fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
+ }
+ } else {
+ CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+ "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id,
+ ctc_ch_event_names[event], fsm_getstate_str(fi));
+
+ dev_warn(&dev->dev,
+ "Initialization failed with RX/TX init handshake "
+ "error %s\n", ctc_ch_event_names[event]);
+ }
+}
+
+/**
+ * Handle TX timeout by retrying operation.
+ *
+ * fi An instance of a channel statemachine.
+ * event The event, just happened.
+ * arg Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg)
+{
+ struct channel *ch = arg;
+ struct net_device *dev = ch->netdev;
+ struct ctcm_priv *priv = dev->ml_priv;
+ struct sk_buff *skb;
+
+ CTCM_PR_DEBUG("Enter: %s: cp=%i ch=0x%p id=%s\n",
+ __func__, smp_processor_id(), ch, ch->id);
+
+ fsm_deltimer(&ch->timer);
+ if (ch->retry++ > 3) {
+ struct mpc_group *gptr = priv->mpcg;
+ CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO,
+ "%s: %s: retries exceeded",
+ CTCM_FUNTAIL, ch->id);
+ fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
+ /* call restart if not MPC or if MPC and mpcg fsm is ready.
+ use gptr as mpc indicator */
+ if (!(gptr && (fsm_getstate(gptr->fsm) != MPCG_STATE_READY)))
+ ctcm_chx_restart(fi, event, arg);
+ goto done;
+ }
+
+ CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
+ "%s : %s: retry %d",
+ CTCM_FUNTAIL, ch->id, ch->retry);
+ skb = skb_peek(&ch->io_queue);
+ if (skb) {
+ int rc = 0;
+ unsigned long saveflags = 0;
+ clear_normalized_cda(&ch->ccw[4]);
+ ch->ccw[4].count = skb->len;
+ if (set_normalized_cda(&ch->ccw[4], skb->data)) {
+ CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO,
+ "%s: %s: IDAL alloc failed",
+ CTCM_FUNTAIL, ch->id);
+ fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
+ ctcm_chx_restart(fi, event, arg);
+ goto done;
+ }
+ fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch);
+ if (event == CTC_EVENT_TIMER) /* for TIMER not yet locked */
+ spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
+ /* Such conditional locking is a known problem for
+ * sparse because its undeterministic in static view.
+ * Warnings should be ignored here. */
+ if (do_debug_ccw)
+ ctcmpc_dumpit((char *)&ch->ccw[3],
+ sizeof(struct ccw1) * 3);
+
+ rc = ccw_device_start(ch->cdev, &ch->ccw[3],
+ (unsigned long)ch, 0xff, 0);
+ if (event == CTC_EVENT_TIMER)
+ spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev),
+ saveflags);
+ if (rc != 0) {
+ fsm_deltimer(&ch->timer);
+ ctcm_ccw_check_rc(ch, rc, "TX in chx_txretry");
+ ctcm_purge_skb_queue(&ch->io_queue);
+ }
+ }
+done:
+ return;
+}
+
+/**
+ * Handle fatal errors during an I/O command.
+ *
+ * fi An instance of a channel statemachine.
+ * event The event, just happened.
+ * arg Generic pointer, casted from channel * upon call.
+ */
+static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg)
+{
+ struct channel *ch = arg;
+ struct net_device *dev = ch->netdev;
+ struct ctcm_priv *priv = dev->ml_priv;
+ int rd = CHANNEL_DIRECTION(ch->flags);
+
+ fsm_deltimer(&ch->timer);
+ CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+ "%s: %s: %s unrecoverable channel error",
+ CTCM_FUNTAIL, ch->id, rd == CTCM_READ ? "RX" : "TX");
+
+ if (IS_MPC(ch)) {
+ priv->stats.tx_dropped++;
+ priv->stats.tx_errors++;
+ }
+ if (rd == CTCM_READ) {
+ fsm_newstate(fi, CTC_STATE_RXERR);
+ fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
+ } else {
+ fsm_newstate(fi, CTC_STATE_TXERR);
+ fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
+ }
+}
+
+/*
+ * The ctcm statemachine for a channel.
+ */
+const fsm_node ch_fsm[] = {
+ { CTC_STATE_STOPPED, CTC_EVENT_STOP, ctcm_action_nop },
+ { CTC_STATE_STOPPED, CTC_EVENT_START, ctcm_chx_start },
+ { CTC_STATE_STOPPED, CTC_EVENT_FINSTAT, ctcm_action_nop },
+ { CTC_STATE_STOPPED, CTC_EVENT_MC_FAIL, ctcm_action_nop },
+
+ { CTC_STATE_NOTOP, CTC_EVENT_STOP, ctcm_chx_stop },
+ { CTC_STATE_NOTOP, CTC_EVENT_START, ctcm_action_nop },
+ { CTC_STATE_NOTOP, CTC_EVENT_FINSTAT, ctcm_action_nop },
+ { CTC_STATE_NOTOP, CTC_EVENT_MC_FAIL, ctcm_action_nop },
+ { CTC_STATE_NOTOP, CTC_EVENT_MC_GOOD, ctcm_chx_start },
+
+ { CTC_STATE_STARTWAIT, CTC_EVENT_STOP, ctcm_chx_haltio },
+ { CTC_STATE_STARTWAIT, CTC_EVENT_START, ctcm_action_nop },
+ { CTC_STATE_STARTWAIT, CTC_EVENT_FINSTAT, ctcm_chx_setmode },
+ { CTC_STATE_STARTWAIT, CTC_EVENT_TIMER, ctcm_chx_setuperr },
+ { CTC_STATE_STARTWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
+ { CTC_STATE_STARTWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
+
+ { CTC_STATE_STARTRETRY, CTC_EVENT_STOP, ctcm_chx_haltio },
+ { CTC_STATE_STARTRETRY, CTC_EVENT_TIMER, ctcm_chx_setmode },
+ { CTC_STATE_STARTRETRY, CTC_EVENT_FINSTAT, ctcm_action_nop },
+ { CTC_STATE_STARTRETRY, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
+
+ { CTC_STATE_SETUPWAIT, CTC_EVENT_STOP, ctcm_chx_haltio },
+ { CTC_STATE_SETUPWAIT, CTC_EVENT_START, ctcm_action_nop },
+ { CTC_STATE_SETUPWAIT, CTC_EVENT_FINSTAT, chx_firstio },
+ { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
+ { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
+ { CTC_STATE_SETUPWAIT, CTC_EVENT_TIMER, ctcm_chx_setmode },
+ { CTC_STATE_SETUPWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
+ { CTC_STATE_SETUPWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
+
+ { CTC_STATE_RXINIT, CTC_EVENT_STOP, ctcm_chx_haltio },
+ { CTC_STATE_RXINIT, CTC_EVENT_START, ctcm_action_nop },
+ { CTC_STATE_RXINIT, CTC_EVENT_FINSTAT, chx_rxidle },
+ { CTC_STATE_RXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_rxiniterr },
+ { CTC_STATE_RXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_rxiniterr },
+ { CTC_STATE_RXINIT, CTC_EVENT_TIMER, ctcm_chx_rxiniterr },
+ { CTC_STATE_RXINIT, CTC_EVENT_ATTNBUSY, ctcm_chx_rxinitfail },
+ { CTC_STATE_RXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
+ { CTC_STATE_RXINIT, CTC_EVENT_UC_ZERO, chx_firstio },
+ { CTC_STATE_RXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
+
+ { CTC_STATE_RXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio },
+ { CTC_STATE_RXIDLE, CTC_EVENT_START, ctcm_action_nop },
+ { CTC_STATE_RXIDLE, CTC_EVENT_FINSTAT, chx_rx },
+ { CTC_STATE_RXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_rxdisc },
+ { CTC_STATE_RXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
+ { CTC_STATE_RXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
+ { CTC_STATE_RXIDLE, CTC_EVENT_UC_ZERO, chx_rx },
+
+ { CTC_STATE_TXINIT, CTC_EVENT_STOP, ctcm_chx_haltio },
+ { CTC_STATE_TXINIT, CTC_EVENT_START, ctcm_action_nop },
+ { CTC_STATE_TXINIT, CTC_EVENT_FINSTAT, ctcm_chx_txidle },
+ { CTC_STATE_TXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_txiniterr },
+ { CTC_STATE_TXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_txiniterr },
+ { CTC_STATE_TXINIT, CTC_EVENT_TIMER, ctcm_chx_txiniterr },
+ { CTC_STATE_TXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
+ { CTC_STATE_TXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
+
+ { CTC_STATE_TXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio },
+ { CTC_STATE_TXIDLE, CTC_EVENT_START, ctcm_action_nop },
+ { CTC_STATE_TXIDLE, CTC_EVENT_FINSTAT, chx_firstio },
+ { CTC_STATE_TXIDLE, CTC_EVENT_UC_RCRESET, ctcm_action_nop },
+ { CTC_STATE_TXIDLE, CTC_EVENT_UC_RSRESET, ctcm_action_nop },
+ { CTC_STATE_TXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
+ { CTC_STATE_TXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
+
+ { CTC_STATE_TERM, CTC_EVENT_STOP, ctcm_action_nop },
+ { CTC_STATE_TERM, CTC_EVENT_START, ctcm_chx_restart },
+ { CTC_STATE_TERM, CTC_EVENT_FINSTAT, ctcm_chx_stopped },
+ { CTC_STATE_TERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop },
+ { CTC_STATE_TERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop },
+ { CTC_STATE_TERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
+
+ { CTC_STATE_DTERM, CTC_EVENT_STOP, ctcm_chx_haltio },
+ { CTC_STATE_DTERM, CTC_EVENT_START, ctcm_chx_restart },
+ { CTC_STATE_DTERM, CTC_EVENT_FINSTAT, ctcm_chx_setmode },
+ { CTC_STATE_DTERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop },
+ { CTC_STATE_DTERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop },
+ { CTC_STATE_DTERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
+
+ { CTC_STATE_TX, CTC_EVENT_STOP, ctcm_chx_haltio },
+ { CTC_STATE_TX, CTC_EVENT_START, ctcm_action_nop },
+ { CTC_STATE_TX, CTC_EVENT_FINSTAT, chx_txdone },
+ { CTC_STATE_TX, CTC_EVENT_UC_RCRESET, ctcm_chx_txretry },
+ { CTC_STATE_TX, CTC_EVENT_UC_RSRESET, ctcm_chx_txretry },
+ { CTC_STATE_TX, CTC_EVENT_TIMER, ctcm_chx_txretry },
+ { CTC_STATE_TX, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
+ { CTC_STATE_TX, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
+
+ { CTC_STATE_RXERR, CTC_EVENT_STOP, ctcm_chx_haltio },
+ { CTC_STATE_TXERR, CTC_EVENT_STOP, ctcm_chx_haltio },
+ { CTC_STATE_TXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
+ { CTC_STATE_RXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
+};
+
+int ch_fsm_len = ARRAY_SIZE(ch_fsm);
+
+/*
+ * MPC actions for mpc channel statemachine
+ * handling of MPC protocol requires extra
+ * statemachine and actions which are prefixed ctcmpc_ .
+ * The ctc_ch_states and ctc_ch_state_names,
+ * ctc_ch_events and ctc_ch_event_names share the ctcm definitions
+ * which are expanded by some elements.
+ */
+
+/*
+ * Actions for mpc channel statemachine.
+ */
+
+/**
+ * Normal data has been send. Free the corresponding
+ * skb (it's in io_queue), reset dev->tbusy and
+ * revert to idle state.
+ *
+ * fi An instance of a channel statemachine.
+ * event The event, just happened.
+ * arg Generic pointer, casted from channel * upon call.
+ */
+static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
+{
+ struct channel *ch = arg;
+ struct net_device *dev = ch->netdev;
+ struct ctcm_priv *priv = dev->ml_priv;
+ struct mpc_group *grp = priv->mpcg;
+ struct sk_buff *skb;
+ int first = 1;
+ int i;
+ __u32 data_space;
+ unsigned long duration;
+ struct sk_buff *peekskb;
+ int rc;
+ struct th_header *header;
+ struct pdu *p_header;
+ struct timespec done_stamp = current_kernel_time(); /* xtime */
+
+ CTCM_PR_DEBUG("Enter %s: %s cp:%i\n",
+ __func__, dev->name, smp_processor_id());
+
+ duration =
+ (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
+ (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
+ if (duration > ch->prof.tx_time)
+ ch->prof.tx_time = duration;
+
+ if (ch->irb->scsw.cmd.count != 0)
+ CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
+ "%s(%s): TX not complete, remaining %d bytes",
+ CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count);
+ fsm_deltimer(&ch->timer);
+ while ((skb = skb_dequeue(&ch->io_queue))) {
+ priv->stats.tx_packets++;
+ priv->stats.tx_bytes += skb->len - TH_HEADER_LENGTH;
+ if (first) {
+ priv->stats.tx_bytes += 2;
+ first = 0;
+ }
+ atomic_dec(&skb->users);
+ dev_kfree_skb_irq(skb);
+ }
+ spin_lock(&ch->collect_lock);
+ clear_normalized_cda(&ch->ccw[4]);
+ if ((ch->collect_len <= 0) || (grp->in_sweep != 0)) {
+ spin_unlock(&ch->collect_lock);
+ fsm_newstate(fi, CTC_STATE_TXIDLE);
+ goto done;
+ }
+
+ if (ctcm_checkalloc_buffer(ch)) {
+ spin_unlock(&ch->collect_lock);
+ goto done;
+ }
+ ch->trans_skb->data = ch->trans_skb_data;
+ skb_reset_tail_pointer(ch->trans_skb);
+ ch->trans_skb->len = 0;
+ if (ch->prof.maxmulti < (ch->collect_len + TH_HEADER_LENGTH))
+ ch->prof.maxmulti = ch->collect_len + TH_HEADER_LENGTH;
+ if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
+ ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
+ i = 0;
+ p_header = NULL;
+ data_space = grp->group_max_buflen - TH_HEADER_LENGTH;
+
+ CTCM_PR_DBGDATA("%s: building trans_skb from collect_q"
+ " data_space:%04x\n",
+ __func__, data_space);
+
+ while ((skb = skb_dequeue(&ch->collect_queue))) {
+ memcpy(skb_put(ch->trans_skb, skb->len), skb->data, skb->len);
+ p_header = (struct pdu *)
+ (skb_tail_pointer(ch->trans_skb) - skb->len);
+ p_header->pdu_flag = 0x00;
+ if (skb->protocol == ntohs(ETH_P_SNAP))
+ p_header->pdu_flag |= 0x60;
+ else
+ p_header->pdu_flag |= 0x20;
+
+ CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n",
+ __func__, ch->trans_skb->len);
+ CTCM_PR_DBGDATA("%s: pdu header and data for up"
+ " to 32 bytes sent to vtam\n", __func__);
+ CTCM_D3_DUMP((char *)p_header, min_t(int, skb->len, 32));
+
+ ch->collect_len -= skb->len;
+ data_space -= skb->len;
+ priv->stats.tx_packets++;
+ priv->stats.tx_bytes += skb->len;
+ atomic_dec(&skb->users);
+ dev_kfree_skb_any(skb);
+ peekskb = skb_peek(&ch->collect_queue);
+ if (peekskb->len > data_space)
+ break;
+ i++;
+ }
+ /* p_header points to the last one we handled */
+ if (p_header)
+ p_header->pdu_flag |= PDU_LAST; /*Say it's the last one*/
+ header = kzalloc(TH_HEADER_LENGTH, gfp_type());
+ if (!header) {
+ spin_unlock(&ch->collect_lock);
+ fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
+ goto done;
+ }
+ header->th_ch_flag = TH_HAS_PDU; /* Normal data */
+ ch->th_seq_num++;
+ header->th_seq_num = ch->th_seq_num;
+
+ CTCM_PR_DBGDATA("%s: ToVTAM_th_seq= %08x\n" ,
+ __func__, ch->th_seq_num);
+
+ memcpy(skb_push(ch->trans_skb, TH_HEADER_LENGTH), header,
+ TH_HEADER_LENGTH); /* put the TH on the packet */
+
+ kfree(header);
+
+ CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n",
+ __func__, ch->trans_skb->len);
+ CTCM_PR_DBGDATA("%s: up-to-50 bytes of trans_skb "
+ "data to vtam from collect_q\n", __func__);
+ CTCM_D3_DUMP((char *)ch->trans_skb->data,
+ min_t(int, ch->trans_skb->len, 50));
+
+ spin_unlock(&ch->collect_lock);
+ clear_normalized_cda(&ch->ccw[1]);
+ if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
+ dev_kfree_skb_any(ch->trans_skb);
+ ch->trans_skb = NULL;
+ CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR,
+ "%s: %s: IDAL alloc failed",
+ CTCM_FUNTAIL, ch->id);
+ fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
+ return;
+ }
+ ch->ccw[1].count = ch->trans_skb->len;
+ fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
+ ch->prof.send_stamp = current_kernel_time(); /* xtime */
+ if (do_debug_ccw)
+ ctcmpc_dumpit((char *)&ch->ccw[0], sizeof(struct ccw1) * 3);
+ rc = ccw_device_start(ch->cdev, &ch->ccw[0],
+ (unsigned long)ch, 0xff, 0);
+ ch->prof.doios_multi++;
+ if (rc != 0) {
+ priv->stats.tx_dropped += i;
+ priv->stats.tx_errors += i;
+ fsm_deltimer(&ch->timer);
+ ctcm_ccw_check_rc(ch, rc, "chained TX");
+ }
+done:
+ ctcm_clear_busy(dev);
+ return;
+}
+
+/**
+ * Got normal data, check for sanity, queue it up, allocate new buffer
+ * trigger bottom half, and initiate next read.
+ *
+ * fi An instance of a channel statemachine.
+ * event The event, just happened.
+ * arg Generic pointer, casted from channel * upon call.
+ */
+static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg)
+{
+ struct channel *ch = arg;
+ struct net_device *dev = ch->netdev;
+ struct ctcm_priv *priv = dev->ml_priv;
+ struct mpc_group *grp = priv->mpcg;
+ struct sk_buff *skb = ch->trans_skb;
+ struct sk_buff *new_skb;
+ unsigned long saveflags = 0; /* avoids compiler warning */
+ int len = ch->max_bufsize - ch->irb->scsw.cmd.count;
+
+ CTCM_PR_DEBUG("%s: %s: cp:%i %s maxbuf : %04x, len: %04x\n",
+ CTCM_FUNTAIL, dev->name, smp_processor_id(),
+ ch->id, ch->max_bufsize, len);
+ fsm_deltimer(&ch->timer);
+
+ if (skb == NULL) {
+ CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+ "%s(%s): TRANS_SKB = NULL",
+ CTCM_FUNTAIL, dev->name);
+ goto again;
+ }
+
+ if (len < TH_HEADER_LENGTH) {
+ CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+ "%s(%s): packet length %d to short",
+ CTCM_FUNTAIL, dev->name, len);
+ priv->stats.rx_dropped++;
+ priv->stats.rx_length_errors++;
+ } else {
+ /* must have valid th header or game over */
+ __u32 block_len = len;
+ len = TH_HEADER_LENGTH + XID2_LENGTH + 4;
+ new_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC);
+
+ if (new_skb == NULL) {
+ CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+ "%s(%d): skb allocation failed",
+ CTCM_FUNTAIL, dev->name);
+ fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
+ goto again;
+ }
+ switch (fsm_getstate(grp->fsm)) {
+ case MPCG_STATE_RESET:
+ case MPCG_STATE_INOP:
+ dev_kfree_skb_any(new_skb);
+ break;
+ case MPCG_STATE_FLOWC:
+ case MPCG_STATE_READY:
+ memcpy(skb_put(new_skb, block_len),
+ skb->data, block_len);
+ skb_queue_tail(&ch->io_queue, new_skb);
+ tasklet_schedule(&ch->ch_tasklet);
+ break;
+ default:
+ memcpy(skb_put(new_skb, len), skb->data, len);
+ skb_queue_tail(&ch->io_queue, new_skb);
+ tasklet_hi_schedule(&ch->ch_tasklet);
+ break;
+ }
+ }
+
+again:
+ switch (fsm_getstate(grp->fsm)) {
+ int rc, dolock;
+ case MPCG_STATE_FLOWC:
+ case MPCG_STATE_READY:
+ if (ctcm_checkalloc_buffer(ch))
+ break;
+ ch->trans_skb->data = ch->trans_skb_data;
+ skb_reset_tail_pointer(ch->trans_skb);
+ ch->trans_skb->len = 0;
+ ch->ccw[1].count = ch->max_bufsize;
+ if (do_debug_ccw)
+ ctcmpc_dumpit((char *)&ch->ccw[0],
+ sizeof(struct ccw1) * 3);
+ dolock = !in_irq();
+ if (dolock)
+ spin_lock_irqsave(
+ get_ccwdev_lock(ch->cdev), saveflags);
+ rc = ccw_device_start(ch->cdev, &ch->ccw[0],
+ (unsigned long)ch, 0xff, 0);
+ if (dolock) /* see remark about conditional locking */
+ spin_unlock_irqrestore(
+ get_ccwdev_lock(ch->cdev), saveflags);
+ if (rc != 0)
+ ctcm_ccw_check_rc(ch, rc, "normal RX");
+ default:
+ break;
+ }
+
+ CTCM_PR_DEBUG("Exit %s: %s, ch=0x%p, id=%s\n",
+ __func__, dev->name, ch, ch->id);
+
+}
+
+/**
+ * Initialize connection by sending a __u16 of value 0.
+ *
+ * fi An instance of a channel statemachine.
+ * event The event, just happened.
+ * arg Generic pointer, casted from channel * upon call.
+ */
+static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg)
+{
+ struct channel *ch = arg;
+ struct net_device *dev = ch->netdev;
+ struct ctcm_priv *priv = dev->ml_priv;
+ struct mpc_group *gptr = priv->mpcg;
+
+ CTCM_PR_DEBUG("Enter %s: id=%s, ch=0x%p\n",
+ __func__, ch->id, ch);
+
+ CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_INFO,
+ "%s: %s: chstate:%i, grpstate:%i, prot:%i\n",
+ CTCM_FUNTAIL, ch->id, fsm_getstate(fi),
+ fsm_getstate(gptr->fsm), ch->protocol);
+
+ if (fsm_getstate(fi) == CTC_STATE_TXIDLE)
+ MPC_DBF_DEV_NAME(TRACE, dev, "remote side issued READ? ");
+
+ fsm_deltimer(&ch->timer);
+ if (ctcm_checkalloc_buffer(ch))
+ goto done;
+
+ switch (fsm_getstate(fi)) {
+ case CTC_STATE_STARTRETRY:
+ case CTC_STATE_SETUPWAIT:
+ if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
+ ctcmpc_chx_rxidle(fi, event, arg);
+ } else {
+ fsm_newstate(fi, CTC_STATE_TXIDLE);
+ fsm_event(priv->fsm, DEV_EVENT_TXUP, dev);
+ }
+ goto done;
+ default:
+ break;
+ };
+
+ fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
+ ? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
+
+done:
+ CTCM_PR_DEBUG("Exit %s: id=%s, ch=0x%p\n",
+ __func__, ch->id, ch);
+ return;
+}
+
+/**
+ * Got initial data, check it. If OK,
+ * notify device statemachine that we are up and
+ * running.
+ *
+ * fi An instance of a channel statemachine.
+ * event The event, just happened.
+ * arg Generic pointer, casted from channel * upon call.
+ */
+void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg)
+{
+ struct channel *ch = arg;
+ struct net_device *dev = ch->netdev;
+ struct ctcm_priv *priv = dev->ml_priv;
+ struct mpc_group *grp = priv->mpcg;
+ int rc;
+ unsigned long saveflags = 0; /* avoids compiler warning */
+
+ fsm_deltimer(&ch->timer);
+ CTCM_PR_DEBUG("%s: %s: %s: cp:%i, chstate:%i grpstate:%i\n",
+ __func__, ch->id, dev->name, smp_processor_id(),
+ fsm_getstate(fi), fsm_getstate(grp->fsm));
+
+ fsm_newstate(fi, CTC_STATE_RXIDLE);
+ /* XID processing complete */
+
+ switch (fsm_getstate(grp->fsm)) {
+ case MPCG_STATE_FLOWC:
+ case MPCG_STATE_READY:
+ if (ctcm_checkalloc_buffer(ch))
+ goto done;
+ ch->trans_skb->data = ch->trans_skb_data;
+ skb_reset_tail_pointer(ch->trans_skb);
+ ch->trans_skb->len = 0;
+ ch->ccw[1].count = ch->max_bufsize;
+ CTCM_CCW_DUMP((char *)&ch->ccw[0], sizeof(struct ccw1) * 3);
+ if (event == CTC_EVENT_START)
+ /* see remark about conditional locking */
+ spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
+ rc = ccw_device_start(ch->cdev, &ch->ccw[0],
+ (unsigned long)ch, 0xff, 0);
+ if (event == CTC_EVENT_START)
+ spin_unlock_irqrestore(
+ get_ccwdev_lock(ch->cdev), saveflags);
+ if (rc != 0) {
+ fsm_newstate(fi, CTC_STATE_RXINIT);
+ ctcm_ccw_check_rc(ch, rc, "initial RX");
+ goto done;
+ }
+ break;
+ default:
+ break;
+ }
+
+ fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
+done:
+ return;
+}
+
+/*
+ * ctcmpc channel FSM action
+ * called from several points in ctcmpc_ch_fsm
+ * ctcmpc only
+ */
+static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg)
+{
+ struct channel *ch = arg;
+ struct net_device *dev = ch->netdev;
+ struct ctcm_priv *priv = dev->ml_priv;
+ struct mpc_group *grp = priv->mpcg;
+
+ CTCM_PR_DEBUG("%s(%s): %s(ch=0x%p), cp=%i, ChStat:%s, GrpStat:%s\n",
+ __func__, dev->name, ch->id, ch, smp_processor_id(),
+ fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm));
+
+ switch (fsm_getstate(grp->fsm)) {
+ case MPCG_STATE_XID2INITW:
+ /* ok..start yside xid exchanges */
+ if (!ch->in_mpcgroup)
+ break;
+ if (fsm_getstate(ch->fsm) == CH_XID0_PENDING) {
+ fsm_deltimer(&grp->timer);
+ fsm_addtimer(&grp->timer,
+ MPC_XID_TIMEOUT_VALUE,
+ MPCG_EVENT_TIMER, dev);
+ fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
+
+ } else if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1)
+ /* attn rcvd before xid0 processed via bh */
+ fsm_newstate(ch->fsm, CH_XID7_PENDING1);
+ break;
+ case MPCG_STATE_XID2INITX:
+ case MPCG_STATE_XID0IOWAIT:
+ case MPCG_STATE_XID0IOWAIX:
+ /* attn rcvd before xid0 processed on ch
+ but mid-xid0 processing for group */
+ if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1)
+ fsm_newstate(ch->fsm, CH_XID7_PENDING1);
+ break;
+ case MPCG_STATE_XID7INITW:
+ case MPCG_STATE_XID7INITX:
+ case MPCG_STATE_XID7INITI:
+ case MPCG_STATE_XID7INITZ:
+ switch (fsm_getstate(ch->fsm)) {
+ case CH_XID7_PENDING:
+ fsm_newstate(ch->fsm, CH_XID7_PENDING1);
+ break;
+ case CH_XID7_PENDING2:
+ fsm_newstate(ch->fsm, CH_XID7_PENDING3);
+ break;
+ }
+ fsm_event(grp->fsm, MPCG_EVENT_XID7DONE, dev);
+ break;
+ }
+
+ return;
+}
+
+/*
+ * ctcmpc channel FSM action
+ * called from one point in ctcmpc_ch_fsm
+ * ctcmpc only
+ */
+static void ctcmpc_chx_attnbusy(fsm_instance *fsm, int event, void *arg)
+{
+ struct channel *ch = arg;
+ struct net_device *dev = ch->netdev;
+ struct ctcm_priv *priv = dev->ml_priv;
+ struct mpc_group *grp = priv->mpcg;
+
+ CTCM_PR_DEBUG("%s(%s): %s\n ChState:%s GrpState:%s\n",
+ __func__, dev->name, ch->id,
+ fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm));
+
+ fsm_deltimer(&ch->timer);
+
+ switch (fsm_getstate(grp->fsm)) {
+ case MPCG_STATE_XID0IOWAIT:
+ /* vtam wants to be primary.start yside xid exchanges*/
+ /* only receive one attn-busy at a time so must not */
+ /* change state each time */
+ grp->changed_side = 1;
+ fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);
+ break;
+ case MPCG_STATE_XID2INITW:
+ if (grp->changed_side == 1) {
+ grp->changed_side = 2;
+ break;
+ }
+ /* process began via call to establish_conn */
+ /* so must report failure instead of reverting */
+ /* back to ready-for-xid passive state */
+ if (grp->estconnfunc)
+ goto done;
+ /* this attnbusy is NOT the result of xside xid */
+ /* collisions so yside must have been triggered */
+ /* by an ATTN that was not intended to start XID */
+ /* processing. Revert back to ready-for-xid and */
+ /* wait for ATTN interrupt to signal xid start */
+ if (fsm_getstate(ch->fsm) == CH_XID0_INPROGRESS) {
+ fsm_newstate(ch->fsm, CH_XID0_PENDING) ;
+ fsm_deltimer(&grp->timer);
+ goto done;
+ }
+ fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+ goto done;
+ case MPCG_STATE_XID2INITX:
+ /* XID2 was received before ATTN Busy for second
+ channel.Send yside xid for second channel.
+ */
+ if (grp->changed_side == 1) {
+ grp->changed_side = 2;
+ break;
+ }
+ case MPCG_STATE_XID0IOWAIX:
+ case MPCG_STATE_XID7INITW:
+ case MPCG_STATE_XID7INITX:
+ case MPCG_STATE_XID7INITI:
+ case MPCG_STATE_XID7INITZ:
+ default:
+ /* multiple attn-busy indicates too out-of-sync */
+ /* and they are certainly not being received as part */
+ /* of valid mpc group negotiations.. */
+ fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+ goto done;
+ }
+
+ if (grp->changed_side == 1) {
+ fsm_deltimer(&grp->timer);
+ fsm_addtimer(&grp->timer, MPC_XID_TIMEOUT_VALUE,
+ MPCG_EVENT_TIMER, dev);
+ }
+ if (ch->in_mpcgroup)
+ fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
+ else
+ CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+ "%s(%s): channel %s not added to group",
+ CTCM_FUNTAIL, dev->name, ch->id);
+
+done:
+ return;
+}
+
+/*
+ * ctcmpc channel FSM action
+ * called from several points in ctcmpc_ch_fsm
+ * ctcmpc only
+ */
+static void ctcmpc_chx_resend(fsm_instance *fsm, int event, void *arg)
+{
+ struct channel *ch = arg;
+ struct net_device *dev = ch->netdev;
+ struct ctcm_priv *priv = dev->ml_priv;
+ struct mpc_group *grp = priv->mpcg;
+
+ fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
+ return;
+}
+
+/*
+ * ctcmpc channel FSM action
+ * called from several points in ctcmpc_ch_fsm
+ * ctcmpc only
+ */
+static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg)
+{
+ struct channel *ach = arg;
+ struct net_device *dev = ach->netdev;
+ struct ctcm_priv *priv = dev->ml_priv;
+ struct mpc_group *grp = priv->mpcg;
+ struct channel *wch = priv->channel[CTCM_WRITE];
+ struct channel *rch = priv->channel[CTCM_READ];
+ struct sk_buff *skb;
+ struct th_sweep *header;
+ int rc = 0;
+ unsigned long saveflags = 0;
+
+ CTCM_PR_DEBUG("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n",
+ __func__, smp_processor_id(), ach, ach->id);
+
+ if (grp->in_sweep == 0)
+ goto done;
+
+ CTCM_PR_DBGDATA("%s: 1: ToVTAM_th_seq= %08x\n" ,
+ __func__, wch->th_seq_num);
+ CTCM_PR_DBGDATA("%s: 1: FromVTAM_th_seq= %08x\n" ,
+ __func__, rch->th_seq_num);
+
+ if (fsm_getstate(wch->fsm) != CTC_STATE_TXIDLE) {
+ /* give the previous IO time to complete */
+ fsm_addtimer(&wch->sweep_timer,
+ 200, CTC_EVENT_RSWEEP_TIMER, wch);
+ goto done;
+ }
+
+ skb = skb_dequeue(&wch->sweep_queue);
+ if (!skb)
+ goto done;
+
+ if (set_normalized_cda(&wch->ccw[4], skb->data)) {
+ grp->in_sweep = 0;
+ ctcm_clear_busy_do(dev);
+ dev_kfree_skb_any(skb);
+ fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+ goto done;
+ } else {
+ atomic_inc(&skb->users);
+ skb_queue_tail(&wch->io_queue, skb);
+ }
+
+ /* send out the sweep */
+ wch->ccw[4].count = skb->len;
+
+ header = (struct th_sweep *)skb->data;
+ switch (header->th.th_ch_flag) {
+ case TH_SWEEP_REQ:
+ grp->sweep_req_pend_num--;
+ break;
+ case TH_SWEEP_RESP:
+ grp->sweep_rsp_pend_num--;
+ break;
+ }
+
+ header->sw.th_last_seq = wch->th_seq_num;
+
+ CTCM_CCW_DUMP((char *)&wch->ccw[3], sizeof(struct ccw1) * 3);
+ CTCM_PR_DBGDATA("%s: sweep packet\n", __func__);
+ CTCM_D3_DUMP((char *)header, TH_SWEEP_LENGTH);
+
+ fsm_addtimer(&wch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, wch);
+ fsm_newstate(wch->fsm, CTC_STATE_TX);
+
+ spin_lock_irqsave(get_ccwdev_lock(wch->cdev), saveflags);
+ wch->prof.send_stamp = current_kernel_time(); /* xtime */
+ rc = ccw_device_start(wch->cdev, &wch->ccw[3],
+ (unsigned long) wch, 0xff, 0);
+ spin_unlock_irqrestore(get_ccwdev_lock(wch->cdev), saveflags);
+
+ if ((grp->sweep_req_pend_num == 0) &&
+ (grp->sweep_rsp_pend_num == 0)) {
+ grp->in_sweep = 0;
+ rch->th_seq_num = 0x00;
+ wch->th_seq_num = 0x00;
+ ctcm_clear_busy_do(dev);
+ }
+
+ CTCM_PR_DBGDATA("%s: To-/From-VTAM_th_seq = %08x/%08x\n" ,
+ __func__, wch->th_seq_num, rch->th_seq_num);
+
+ if (rc != 0)
+ ctcm_ccw_check_rc(wch, rc, "send sweep");
+
+done:
+ return;
+}
+
+
+/*
+ * The ctcmpc statemachine for a channel.
+ */
+
+const fsm_node ctcmpc_ch_fsm[] = {
+ { CTC_STATE_STOPPED, CTC_EVENT_STOP, ctcm_action_nop },
+ { CTC_STATE_STOPPED, CTC_EVENT_START, ctcm_chx_start },
+ { CTC_STATE_STOPPED, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
+ { CTC_STATE_STOPPED, CTC_EVENT_FINSTAT, ctcm_action_nop },
+ { CTC_STATE_STOPPED, CTC_EVENT_MC_FAIL, ctcm_action_nop },
+
+ { CTC_STATE_NOTOP, CTC_EVENT_STOP, ctcm_chx_stop },
+ { CTC_STATE_NOTOP, CTC_EVENT_START, ctcm_action_nop },
+ { CTC_STATE_NOTOP, CTC_EVENT_FINSTAT, ctcm_action_nop },
+ { CTC_STATE_NOTOP, CTC_EVENT_MC_FAIL, ctcm_action_nop },
+ { CTC_STATE_NOTOP, CTC_EVENT_MC_GOOD, ctcm_chx_start },
+ { CTC_STATE_NOTOP, CTC_EVENT_UC_RCRESET, ctcm_chx_stop },
+ { CTC_STATE_NOTOP, CTC_EVENT_UC_RSRESET, ctcm_chx_stop },
+ { CTC_STATE_NOTOP, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
+
+ { CTC_STATE_STARTWAIT, CTC_EVENT_STOP, ctcm_chx_haltio },
+ { CTC_STATE_STARTWAIT, CTC_EVENT_START, ctcm_action_nop },
+ { CTC_STATE_STARTWAIT, CTC_EVENT_FINSTAT, ctcm_chx_setmode },
+ { CTC_STATE_STARTWAIT, CTC_EVENT_TIMER, ctcm_chx_setuperr },
+ { CTC_STATE_STARTWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
+ { CTC_STATE_STARTWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
+
+ { CTC_STATE_STARTRETRY, CTC_EVENT_STOP, ctcm_chx_haltio },
+ { CTC_STATE_STARTRETRY, CTC_EVENT_TIMER, ctcm_chx_setmode },
+ { CTC_STATE_STARTRETRY, CTC_EVENT_FINSTAT, ctcm_chx_setmode },
+ { CTC_STATE_STARTRETRY, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
+ { CTC_STATE_STARTRETRY, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
+
+ { CTC_STATE_SETUPWAIT, CTC_EVENT_STOP, ctcm_chx_haltio },
+ { CTC_STATE_SETUPWAIT, CTC_EVENT_START, ctcm_action_nop },
+ { CTC_STATE_SETUPWAIT, CTC_EVENT_FINSTAT, ctcmpc_chx_firstio },
+ { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
+ { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
+ { CTC_STATE_SETUPWAIT, CTC_EVENT_TIMER, ctcm_chx_setmode },
+ { CTC_STATE_SETUPWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
+ { CTC_STATE_SETUPWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
+
+ { CTC_STATE_RXINIT, CTC_EVENT_STOP, ctcm_chx_haltio },
+ { CTC_STATE_RXINIT, CTC_EVENT_START, ctcm_action_nop },
+ { CTC_STATE_RXINIT, CTC_EVENT_FINSTAT, ctcmpc_chx_rxidle },
+ { CTC_STATE_RXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_rxiniterr },
+ { CTC_STATE_RXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_rxiniterr },
+ { CTC_STATE_RXINIT, CTC_EVENT_TIMER, ctcm_chx_rxiniterr },
+ { CTC_STATE_RXINIT, CTC_EVENT_ATTNBUSY, ctcm_chx_rxinitfail },
+ { CTC_STATE_RXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
+ { CTC_STATE_RXINIT, CTC_EVENT_UC_ZERO, ctcmpc_chx_firstio },
+ { CTC_STATE_RXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
+
+ { CH_XID0_PENDING, CTC_EVENT_FINSTAT, ctcm_action_nop },
+ { CH_XID0_PENDING, CTC_EVENT_ATTN, ctcmpc_chx_attn },
+ { CH_XID0_PENDING, CTC_EVENT_STOP, ctcm_chx_haltio },
+ { CH_XID0_PENDING, CTC_EVENT_START, ctcm_action_nop },
+ { CH_XID0_PENDING, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
+ { CH_XID0_PENDING, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
+ { CH_XID0_PENDING, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
+ { CH_XID0_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
+ { CH_XID0_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
+ { CH_XID0_PENDING, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
+
+ { CH_XID0_INPROGRESS, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
+ { CH_XID0_INPROGRESS, CTC_EVENT_ATTN, ctcmpc_chx_attn },
+ { CH_XID0_INPROGRESS, CTC_EVENT_STOP, ctcm_chx_haltio },
+ { CH_XID0_INPROGRESS, CTC_EVENT_START, ctcm_action_nop },
+ { CH_XID0_INPROGRESS, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
+ { CH_XID0_INPROGRESS, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
+ { CH_XID0_INPROGRESS, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
+ { CH_XID0_INPROGRESS, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
+ { CH_XID0_INPROGRESS, CTC_EVENT_ATTNBUSY, ctcmpc_chx_attnbusy },
+ { CH_XID0_INPROGRESS, CTC_EVENT_TIMER, ctcmpc_chx_resend },
+ { CH_XID0_INPROGRESS, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
+
+ { CH_XID7_PENDING, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
+ { CH_XID7_PENDING, CTC_EVENT_ATTN, ctcmpc_chx_attn },
+ { CH_XID7_PENDING, CTC_EVENT_STOP, ctcm_chx_haltio },
+ { CH_XID7_PENDING, CTC_EVENT_START, ctcm_action_nop },
+ { CH_XID7_PENDING, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
+ { CH_XID7_PENDING, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
+ { CH_XID7_PENDING, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
+ { CH_XID7_PENDING, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
+ { CH_XID7_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
+ { CH_XID7_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
+ { CH_XID7_PENDING, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
+ { CH_XID7_PENDING, CTC_EVENT_TIMER, ctcmpc_chx_resend },
+ { CH_XID7_PENDING, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
+
+ { CH_XID7_PENDING1, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
+ { CH_XID7_PENDING1, CTC_EVENT_ATTN, ctcmpc_chx_attn },
+ { CH_XID7_PENDING1, CTC_EVENT_STOP, ctcm_chx_haltio },
+ { CH_XID7_PENDING1, CTC_EVENT_START, ctcm_action_nop },
+ { CH_XID7_PENDING1, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
+ { CH_XID7_PENDING1, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
+ { CH_XID7_PENDING1, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
+ { CH_XID7_PENDING1, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
+ { CH_XID7_PENDING1, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
+ { CH_XID7_PENDING1, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
+ { CH_XID7_PENDING1, CTC_EVENT_TIMER, ctcmpc_chx_resend },
+ { CH_XID7_PENDING1, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
+
+ { CH_XID7_PENDING2, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
+ { CH_XID7_PENDING2, CTC_EVENT_ATTN, ctcmpc_chx_attn },
+ { CH_XID7_PENDING2, CTC_EVENT_STOP, ctcm_chx_haltio },
+ { CH_XID7_PENDING2, CTC_EVENT_START, ctcm_action_nop },
+ { CH_XID7_PENDING2, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
+ { CH_XID7_PENDING2, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
+ { CH_XID7_PENDING2, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
+ { CH_XID7_PENDING2, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
+ { CH_XID7_PENDING2, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
+ { CH_XID7_PENDING2, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
+ { CH_XID7_PENDING2, CTC_EVENT_TIMER, ctcmpc_chx_resend },
+ { CH_XID7_PENDING2, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
+
+ { CH_XID7_PENDING3, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
+ { CH_XID7_PENDING3, CTC_EVENT_ATTN, ctcmpc_chx_attn },
+ { CH_XID7_PENDING3, CTC_EVENT_STOP, ctcm_chx_haltio },
+ { CH_XID7_PENDING3, CTC_EVENT_START, ctcm_action_nop },
+ { CH_XID7_PENDING3, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
+ { CH_XID7_PENDING3, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
+ { CH_XID7_PENDING3, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
+ { CH_XID7_PENDING3, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
+ { CH_XID7_PENDING3, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
+ { CH_XID7_PENDING3, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
+ { CH_XID7_PENDING3, CTC_EVENT_TIMER, ctcmpc_chx_resend },
+ { CH_XID7_PENDING3, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
+
+ { CH_XID7_PENDING4, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
+ { CH_XID7_PENDING4, CTC_EVENT_ATTN, ctcmpc_chx_attn },
+ { CH_XID7_PENDING4, CTC_EVENT_STOP, ctcm_chx_haltio },
+ { CH_XID7_PENDING4, CTC_EVENT_START, ctcm_action_nop },
+ { CH_XID7_PENDING4, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
+ { CH_XID7_PENDING4, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
+ { CH_XID7_PENDING4, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
+ { CH_XID7_PENDING4, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
+ { CH_XID7_PENDING4, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
+ { CH_XID7_PENDING4, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
+ { CH_XID7_PENDING4, CTC_EVENT_TIMER, ctcmpc_chx_resend },
+ { CH_XID7_PENDING4, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
+
+ { CTC_STATE_RXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio },
+ { CTC_STATE_RXIDLE, CTC_EVENT_START, ctcm_action_nop },
+ { CTC_STATE_RXIDLE, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
+ { CTC_STATE_RXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_rxdisc },
+ { CTC_STATE_RXIDLE, CTC_EVENT_UC_RSRESET, ctcm_chx_fail },
+ { CTC_STATE_RXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
+ { CTC_STATE_RXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
+ { CTC_STATE_RXIDLE, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
+
+ { CTC_STATE_TXINIT, CTC_EVENT_STOP, ctcm_chx_haltio },
+ { CTC_STATE_TXINIT, CTC_EVENT_START, ctcm_action_nop },
+ { CTC_STATE_TXINIT, CTC_EVENT_FINSTAT, ctcm_chx_txidle },
+ { CTC_STATE_TXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_txiniterr },
+ { CTC_STATE_TXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_txiniterr },
+ { CTC_STATE_TXINIT, CTC_EVENT_TIMER, ctcm_chx_txiniterr },
+ { CTC_STATE_TXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
+ { CTC_STATE_TXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
+ { CTC_STATE_TXINIT, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep },
+
+ { CTC_STATE_TXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio },
+ { CTC_STATE_TXIDLE, CTC_EVENT_START, ctcm_action_nop },
+ { CTC_STATE_TXIDLE, CTC_EVENT_FINSTAT, ctcmpc_chx_firstio },
+ { CTC_STATE_TXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_fail },
+ { CTC_STATE_TXIDLE, CTC_EVENT_UC_RSRESET, ctcm_chx_fail },
+ { CTC_STATE_TXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
+ { CTC_STATE_TXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
+ { CTC_STATE_TXIDLE, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep },
+
+ { CTC_STATE_TERM, CTC_EVENT_STOP, ctcm_action_nop },
+ { CTC_STATE_TERM, CTC_EVENT_START, ctcm_chx_restart },
+ { CTC_STATE_TERM, CTC_EVENT_FINSTAT, ctcm_chx_stopped },
+ { CTC_STATE_TERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop },
+ { CTC_STATE_TERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop },
+ { CTC_STATE_TERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
+ { CTC_STATE_TERM, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
+ { CTC_STATE_TERM, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
+
+ { CTC_STATE_DTERM, CTC_EVENT_STOP, ctcm_chx_haltio },
+ { CTC_STATE_DTERM, CTC_EVENT_START, ctcm_chx_restart },
+ { CTC_STATE_DTERM, CTC_EVENT_FINSTAT, ctcm_chx_setmode },
+ { CTC_STATE_DTERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop },
+ { CTC_STATE_DTERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop },
+ { CTC_STATE_DTERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
+ { CTC_STATE_DTERM, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
+
+ { CTC_STATE_TX, CTC_EVENT_STOP, ctcm_chx_haltio },
+ { CTC_STATE_TX, CTC_EVENT_START, ctcm_action_nop },
+ { CTC_STATE_TX, CTC_EVENT_FINSTAT, ctcmpc_chx_txdone },
+ { CTC_STATE_TX, CTC_EVENT_UC_RCRESET, ctcm_chx_fail },
+ { CTC_STATE_TX, CTC_EVENT_UC_RSRESET, ctcm_chx_fail },
+ { CTC_STATE_TX, CTC_EVENT_TIMER, ctcm_chx_txretry },
+ { CTC_STATE_TX, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
+ { CTC_STATE_TX, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
+ { CTC_STATE_TX, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep },
+ { CTC_STATE_TX, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
+
+ { CTC_STATE_RXERR, CTC_EVENT_STOP, ctcm_chx_haltio },
+ { CTC_STATE_TXERR, CTC_EVENT_STOP, ctcm_chx_haltio },
+ { CTC_STATE_TXERR, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
+ { CTC_STATE_TXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
+ { CTC_STATE_RXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
+};
+
+int mpc_ch_fsm_len = ARRAY_SIZE(ctcmpc_ch_fsm);
+
+/*
+ * Actions for interface - statemachine.
+ */
+
+/**
+ * Startup channels by sending CTC_EVENT_START to each channel.
+ *
+ * fi An instance of an interface statemachine.
+ * event The event, just happened.
+ * arg Generic pointer, casted from struct net_device * upon call.
+ */
+static void dev_action_start(fsm_instance *fi, int event, void *arg)
+{
+ struct net_device *dev = arg;
+ struct ctcm_priv *priv = dev->ml_priv;
+ int direction;
+
+ CTCMY_DBF_DEV_NAME(SETUP, dev, "");
+
+ fsm_deltimer(&priv->restart_timer);
+ fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
+ if (IS_MPC(priv))
+ priv->mpcg->channels_terminating = 0;
+ for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
+ struct channel *ch = priv->channel[direction];
+ fsm_event(ch->fsm, CTC_EVENT_START, ch);
+ }
+}
+
+/**
+ * Shutdown channels by sending CTC_EVENT_STOP to each channel.
+ *
+ * fi An instance of an interface statemachine.
+ * event The event, just happened.
+ * arg Generic pointer, casted from struct net_device * upon call.
+ */
+static void dev_action_stop(fsm_instance *fi, int event, void *arg)
+{
+ int direction;
+ struct net_device *dev = arg;
+ struct ctcm_priv *priv = dev->ml_priv;
+
+ CTCMY_DBF_DEV_NAME(SETUP, dev, "");
+
+ fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
+ for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
+ struct channel *ch = priv->channel[direction];
+ fsm_event(ch->fsm, CTC_EVENT_STOP, ch);
+ ch->th_seq_num = 0x00;
+ CTCM_PR_DEBUG("%s: CH_th_seq= %08x\n",
+ __func__, ch->th_seq_num);
+ }
+ if (IS_MPC(priv))
+ fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET);
+}
+
+static void dev_action_restart(fsm_instance *fi, int event, void *arg)
+{
+ int restart_timer;
+ struct net_device *dev = arg;
+ struct ctcm_priv *priv = dev->ml_priv;
+
+ CTCMY_DBF_DEV_NAME(TRACE, dev, "");
+
+ if (IS_MPC(priv)) {
+ restart_timer = CTCM_TIME_1_SEC;
+ } else {
+ restart_timer = CTCM_TIME_5_SEC;
+ }
+ dev_info(&dev->dev, "Restarting device\n");
+
+ dev_action_stop(fi, event, arg);
+ fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
+ if (IS_MPC(priv))
+ fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET);
+
+ /* going back into start sequence too quickly can */
+ /* result in the other side becoming unreachable due */
+ /* to sense reported when IO is aborted */
+ fsm_addtimer(&priv->restart_timer, restart_timer,
+ DEV_EVENT_START, dev);
+}
+
+/**
+ * Called from channel statemachine
+ * when a channel is up and running.
+ *
+ * fi An instance of an interface statemachine.
+ * event The event, just happened.
+ * arg Generic pointer, casted from struct net_device * upon call.
+ */
+static void dev_action_chup(fsm_instance *fi, int event, void *arg)
+{
+ struct net_device *dev = arg;
+ struct ctcm_priv *priv = dev->ml_priv;
+ int dev_stat = fsm_getstate(fi);
+
+ CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE,
+ "%s(%s): priv = %p [%d,%d]\n ", CTCM_FUNTAIL,
+ dev->name, dev->ml_priv, dev_stat, event);
+
+ switch (fsm_getstate(fi)) {
+ case DEV_STATE_STARTWAIT_RXTX:
+ if (event == DEV_EVENT_RXUP)
+ fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
+ else
+ fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
+ break;
+ case DEV_STATE_STARTWAIT_RX:
+ if (event == DEV_EVENT_RXUP) {
+ fsm_newstate(fi, DEV_STATE_RUNNING);
+ dev_info(&dev->dev,
+ "Connected with remote side\n");
+ ctcm_clear_busy(dev);
+ }
+ break;
+ case DEV_STATE_STARTWAIT_TX:
+ if (event == DEV_EVENT_TXUP) {
+ fsm_newstate(fi, DEV_STATE_RUNNING);
+ dev_info(&dev->dev,
+ "Connected with remote side\n");
+ ctcm_clear_busy(dev);
+ }
+ break;
+ case DEV_STATE_STOPWAIT_TX:
+ if (event == DEV_EVENT_RXUP)
+ fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
+ break;
+ case DEV_STATE_STOPWAIT_RX:
+ if (event == DEV_EVENT_TXUP)
+ fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
+ break;
+ }
+
+ if (IS_MPC(priv)) {
+ if (event == DEV_EVENT_RXUP)
+ mpc_channel_action(priv->channel[CTCM_READ],
+ CTCM_READ, MPC_CHANNEL_ADD);
+ else
+ mpc_channel_action(priv->channel[CTCM_WRITE],
+ CTCM_WRITE, MPC_CHANNEL_ADD);
+ }
+}
+
+/**
+ * Called from device statemachine
+ * when a channel has been shutdown.
+ *
+ * fi An instance of an interface statemachine.
+ * event The event, just happened.
+ * arg Generic pointer, casted from struct net_device * upon call.
+ */
+static void dev_action_chdown(fsm_instance *fi, int event, void *arg)
+{
+
+ struct net_device *dev = arg;
+ struct ctcm_priv *priv = dev->ml_priv;
+
+ CTCMY_DBF_DEV_NAME(SETUP, dev, "");
+
+ switch (fsm_getstate(fi)) {
+ case DEV_STATE_RUNNING:
+ if (event == DEV_EVENT_TXDOWN)
+ fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
+ else
+ fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
+ break;
+ case DEV_STATE_STARTWAIT_RX:
+ if (event == DEV_EVENT_TXDOWN)
+ fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
+ break;
+ case DEV_STATE_STARTWAIT_TX:
+ if (event == DEV_EVENT_RXDOWN)
+ fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
+ break;
+ case DEV_STATE_STOPWAIT_RXTX:
+ if (event == DEV_EVENT_TXDOWN)
+ fsm_newstate(fi, DEV_STATE_STOPWAIT_RX);
+ else
+ fsm_newstate(fi, DEV_STATE_STOPWAIT_TX);
+ break;
+ case DEV_STATE_STOPWAIT_RX:
+ if (event == DEV_EVENT_RXDOWN)
+ fsm_newstate(fi, DEV_STATE_STOPPED);
+ break;
+ case DEV_STATE_STOPWAIT_TX:
+ if (event == DEV_EVENT_TXDOWN)
+ fsm_newstate(fi, DEV_STATE_STOPPED);
+ break;
+ }
+ if (IS_MPC(priv)) {
+ if (event == DEV_EVENT_RXDOWN)
+ mpc_channel_action(priv->channel[CTCM_READ],
+ CTCM_READ, MPC_CHANNEL_REMOVE);
+ else
+ mpc_channel_action(priv->channel[CTCM_WRITE],
+ CTCM_WRITE, MPC_CHANNEL_REMOVE);
+ }
+}
+
+const fsm_node dev_fsm[] = {
+ { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start },
+ { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_START, dev_action_start },
+ { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
+ { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
+ { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
+ { DEV_STATE_STOPWAIT_RX, DEV_EVENT_START, dev_action_start },
+ { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
+ { DEV_STATE_STOPWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
+ { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXDOWN, dev_action_chdown },
+ { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
+ { DEV_STATE_STOPWAIT_TX, DEV_EVENT_START, dev_action_start },
+ { DEV_STATE_STOPWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
+ { DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
+ { DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXDOWN, dev_action_chdown },
+ { DEV_STATE_STOPWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
+ { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP, dev_action_stop },
+ { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP, dev_action_chup },
+ { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP, dev_action_chup },
+ { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
+ { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
+ { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
+ { DEV_STATE_STARTWAIT_TX, DEV_EVENT_STOP, dev_action_stop },
+ { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
+ { DEV_STATE_STARTWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
+ { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXDOWN, dev_action_chdown },
+ { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
+ { DEV_STATE_STARTWAIT_RX, DEV_EVENT_STOP, dev_action_stop },
+ { DEV_STATE_STARTWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
+ { DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
+ { DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXDOWN, dev_action_chdown },
+ { DEV_STATE_STARTWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
+ { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
+ { DEV_STATE_RUNNING, DEV_EVENT_RXDOWN, dev_action_chdown },
+ { DEV_STATE_RUNNING, DEV_EVENT_TXDOWN, dev_action_chdown },
+ { DEV_STATE_RUNNING, DEV_EVENT_TXUP, ctcm_action_nop },
+ { DEV_STATE_RUNNING, DEV_EVENT_RXUP, ctcm_action_nop },
+ { DEV_STATE_RUNNING, DEV_EVENT_RESTART, dev_action_restart },
+};
+
+int dev_fsm_len = ARRAY_SIZE(dev_fsm);
+
+/* --- This is the END my friend --- */
+
diff --git a/drivers/s390/net/ctcm_fsms.h b/drivers/s390/net/ctcm_fsms.h
new file mode 100644
index 00000000000..046d077fabb
--- /dev/null
+++ b/drivers/s390/net/ctcm_fsms.h
@@ -0,0 +1,358 @@
+/*
+ * drivers/s390/net/ctcm_fsms.h
+ *
+ * Copyright IBM Corp. 2001, 2007
+ * Authors: Fritz Elfert (felfert@millenux.com)
+ * Peter Tiedemann (ptiedem@de.ibm.com)
+ * MPC additions :
+ * Belinda Thompson (belindat@us.ibm.com)
+ * Andy Richter (richtera@us.ibm.com)
+ */
+#ifndef _CTCM_FSMS_H_
+#define _CTCM_FSMS_H_
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/bitops.h>
+
+#include <linux/signal.h>
+#include <linux/string.h>
+
+#include <linux/ip.h>
+#include <linux/if_arp.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/ctype.h>
+#include <net/dst.h>
+
+#include <linux/io.h>
+#include <asm/ccwdev.h>
+#include <asm/ccwgroup.h>
+#include <linux/uaccess.h>
+
+#include <asm/idals.h>
+
+#include "fsm.h"
+#include "ctcm_main.h"
+
+/*
+ * Definitions for the channel statemachine(s) for ctc and ctcmpc
+ *
+ * To allow better kerntyping, prefix-less definitions for channel states
+ * and channel events have been replaced :
+ * ch_event... -> ctc_ch_event...
+ * CH_EVENT... -> CTC_EVENT...
+ * ch_state... -> ctc_ch_state...
+ * CH_STATE... -> CTC_STATE...
+ */
+/*
+ * Events of the channel statemachine(s) for ctc and ctcmpc
+ */
+enum ctc_ch_events {
+ /*
+ * Events, representing return code of
+ * I/O operations (ccw_device_start, ccw_device_halt et al.)
+ */
+ CTC_EVENT_IO_SUCCESS,
+ CTC_EVENT_IO_EBUSY,
+ CTC_EVENT_IO_ENODEV,
+ CTC_EVENT_IO_UNKNOWN,
+
+ CTC_EVENT_ATTNBUSY,
+ CTC_EVENT_ATTN,
+ CTC_EVENT_BUSY,
+ /*
+ * Events, representing unit-check
+ */
+ CTC_EVENT_UC_RCRESET,
+ CTC_EVENT_UC_RSRESET,
+ CTC_EVENT_UC_TXTIMEOUT,
+ CTC_EVENT_UC_TXPARITY,
+ CTC_EVENT_UC_HWFAIL,
+ CTC_EVENT_UC_RXPARITY,
+ CTC_EVENT_UC_ZERO,
+ CTC_EVENT_UC_UNKNOWN,
+ /*
+ * Events, representing subchannel-check
+ */
+ CTC_EVENT_SC_UNKNOWN,
+ /*
+ * Events, representing machine checks
+ */
+ CTC_EVENT_MC_FAIL,
+ CTC_EVENT_MC_GOOD,
+ /*
+ * Event, representing normal IRQ
+ */
+ CTC_EVENT_IRQ,
+ CTC_EVENT_FINSTAT,
+ /*
+ * Event, representing timer expiry.
+ */
+ CTC_EVENT_TIMER,
+ /*
+ * Events, representing commands from upper levels.
+ */
+ CTC_EVENT_START,
+ CTC_EVENT_STOP,
+ CTC_NR_EVENTS,
+ /*
+ * additional MPC events
+ */
+ CTC_EVENT_SEND_XID = CTC_NR_EVENTS,
+ CTC_EVENT_RSWEEP_TIMER,
+ /*
+ * MUST be always the last element!!
+ */
+ CTC_MPC_NR_EVENTS,
+};
+
+/*
+ * States of the channel statemachine(s) for ctc and ctcmpc.
+ */
+enum ctc_ch_states {
+ /*
+ * Channel not assigned to any device,
+ * initial state, direction invalid
+ */
+ CTC_STATE_IDLE,
+ /*
+ * Channel assigned but not operating
+ */
+ CTC_STATE_STOPPED,
+ CTC_STATE_STARTWAIT,
+ CTC_STATE_STARTRETRY,
+ CTC_STATE_SETUPWAIT,
+ CTC_STATE_RXINIT,
+ CTC_STATE_TXINIT,
+ CTC_STATE_RX,
+ CTC_STATE_TX,
+ CTC_STATE_RXIDLE,
+ CTC_STATE_TXIDLE,
+ CTC_STATE_RXERR,
+ CTC_STATE_TXERR,
+ CTC_STATE_TERM,
+ CTC_STATE_DTERM,
+ CTC_STATE_NOTOP,
+ CTC_NR_STATES, /* MUST be the last element of non-expanded states */
+ /*
+ * additional MPC states
+ */
+ CH_XID0_PENDING = CTC_NR_STATES,
+ CH_XID0_INPROGRESS,
+ CH_XID7_PENDING,
+ CH_XID7_PENDING1,
+ CH_XID7_PENDING2,
+ CH_XID7_PENDING3,
+ CH_XID7_PENDING4,
+ CTC_MPC_NR_STATES, /* MUST be the last element of expanded mpc states */
+};
+
+extern const char *ctc_ch_event_names[];
+
+extern const char *ctc_ch_state_names[];
+
+void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg);
+void ctcm_purge_skb_queue(struct sk_buff_head *q);
+void fsm_action_nop(fsm_instance *fi, int event, void *arg);
+
+/*
+ * ----- non-static actions for ctcm channel statemachine -----
+ *
+ */
+void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg);
+
+/*
+ * ----- FSM (state/event/action) of the ctcm channel statemachine -----
+ */
+extern const fsm_node ch_fsm[];
+extern int ch_fsm_len;
+
+
+/*
+ * ----- non-static actions for ctcmpc channel statemachine ----
+ *
+ */
+/* shared :
+void ctcm_chx_txidle(fsm_instance * fi, int event, void *arg);
+ */
+void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg);
+
+/*
+ * ----- FSM (state/event/action) of the ctcmpc channel statemachine -----
+ */
+extern const fsm_node ctcmpc_ch_fsm[];
+extern int mpc_ch_fsm_len;
+
+/*
+ * Definitions for the device interface statemachine for ctc and mpc
+ */
+
+/*
+ * States of the device interface statemachine.
+ */
+enum dev_states {
+ DEV_STATE_STOPPED,
+ DEV_STATE_STARTWAIT_RXTX,
+ DEV_STATE_STARTWAIT_RX,
+ DEV_STATE_STARTWAIT_TX,
+ DEV_STATE_STOPWAIT_RXTX,
+ DEV_STATE_STOPWAIT_RX,
+ DEV_STATE_STOPWAIT_TX,
+ DEV_STATE_RUNNING,
+ /*
+ * MUST be always the last element!!
+ */
+ CTCM_NR_DEV_STATES
+};
+
+extern const char *dev_state_names[];
+
+/*
+ * Events of the device interface statemachine.
+ * ctcm and ctcmpc
+ */
+enum dev_events {
+ DEV_EVENT_START,
+ DEV_EVENT_STOP,
+ DEV_EVENT_RXUP,
+ DEV_EVENT_TXUP,
+ DEV_EVENT_RXDOWN,
+ DEV_EVENT_TXDOWN,
+ DEV_EVENT_RESTART,
+ /*
+ * MUST be always the last element!!
+ */
+ CTCM_NR_DEV_EVENTS
+};
+
+extern const char *dev_event_names[];
+
+/*
+ * Actions for the device interface statemachine.
+ * ctc and ctcmpc
+ */
+/*
+static void dev_action_start(fsm_instance * fi, int event, void *arg);
+static void dev_action_stop(fsm_instance * fi, int event, void *arg);
+static void dev_action_restart(fsm_instance *fi, int event, void *arg);
+static void dev_action_chup(fsm_instance * fi, int event, void *arg);
+static void dev_action_chdown(fsm_instance * fi, int event, void *arg);
+*/
+
+/*
+ * The (state/event/action) fsm table of the device interface statemachine.
+ * ctcm and ctcmpc
+ */
+extern const fsm_node dev_fsm[];
+extern int dev_fsm_len;
+
+
+/*
+ * Definitions for the MPC Group statemachine
+ */
+
+/*
+ * MPC Group Station FSM States
+
+State Name When In This State
+====================== =======================================
+MPCG_STATE_RESET Initial State When Driver Loaded
+ We receive and send NOTHING
+
+MPCG_STATE_INOP INOP Received.
+ Group level non-recoverable error
+
+MPCG_STATE_READY XID exchanges for at least 1 write and
+ 1 read channel have completed.
+ Group is ready for data transfer.
+
+States from ctc_mpc_alloc_channel
+==============================================================
+MPCG_STATE_XID2INITW Awaiting XID2(0) Initiation
+ ATTN from other side will start
+ XID negotiations.
+ Y-side protocol only.
+
+MPCG_STATE_XID2INITX XID2(0) negotiations are in progress.
+ At least 1, but not all, XID2(0)'s
+ have been received from partner.
+
+MPCG_STATE_XID7INITW XID2(0) complete
+ No XID2(7)'s have yet been received.
+ XID2(7) negotiations pending.
+
+MPCG_STATE_XID7INITX XID2(7) negotiations in progress.
+ At least 1, but not all, XID2(7)'s
+ have been received from partner.
+
+MPCG_STATE_XID7INITF XID2(7) negotiations complete.
+ Transitioning to READY.
+
+MPCG_STATE_READY Ready for Data Transfer.
+
+
+States from ctc_mpc_establish_connectivity call
+==============================================================
+MPCG_STATE_XID0IOWAIT Initiating XID2(0) negotiations.
+ X-side protocol only.
+ ATTN-BUSY from other side will convert
+ this to Y-side protocol and the
+ ctc_mpc_alloc_channel flow will begin.
+
+MPCG_STATE_XID0IOWAIX XID2(0) negotiations are in progress.
+ At least 1, but not all, XID2(0)'s
+ have been received from partner.
+
+MPCG_STATE_XID7INITI XID2(0) complete
+ No XID2(7)'s have yet been received.
+ XID2(7) negotiations pending.
+
+MPCG_STATE_XID7INITZ XID2(7) negotiations in progress.
+ At least 1, but not all, XID2(7)'s
+ have been received from partner.
+
+MPCG_STATE_XID7INITF XID2(7) negotiations complete.
+ Transitioning to READY.
+
+MPCG_STATE_READY Ready for Data Transfer.
+
+*/
+
+enum mpcg_events {
+ MPCG_EVENT_INOP,
+ MPCG_EVENT_DISCONC,
+ MPCG_EVENT_XID0DO,
+ MPCG_EVENT_XID2,
+ MPCG_EVENT_XID2DONE,
+ MPCG_EVENT_XID7DONE,
+ MPCG_EVENT_TIMER,
+ MPCG_EVENT_DOIO,
+ MPCG_NR_EVENTS,
+};
+
+enum mpcg_states {
+ MPCG_STATE_RESET,
+ MPCG_STATE_INOP,
+ MPCG_STATE_XID2INITW,
+ MPCG_STATE_XID2INITX,
+ MPCG_STATE_XID7INITW,
+ MPCG_STATE_XID7INITX,
+ MPCG_STATE_XID0IOWAIT,
+ MPCG_STATE_XID0IOWAIX,
+ MPCG_STATE_XID7INITI,
+ MPCG_STATE_XID7INITZ,
+ MPCG_STATE_XID7INITF,
+ MPCG_STATE_FLOWC,
+ MPCG_STATE_READY,
+ MPCG_NR_STATES,
+};
+
+#endif
+/* --- This is the END my friend --- */
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
new file mode 100644
index 00000000000..5cb93a8e340
--- /dev/null
+++ b/drivers/s390/net/ctcm_main.c
@@ -0,0 +1,1892 @@
+/*
+ * drivers/s390/net/ctcm_main.c
+ *
+ * Copyright IBM Corp. 2001, 2009
+ * Author(s):
+ * Original CTC driver(s):
+ * Fritz Elfert (felfert@millenux.com)
+ * Dieter Wellerdiek (wel@de.ibm.com)
+ * Martin Schwidefsky (schwidefsky@de.ibm.com)
+ * Denis Joseph Barrow (barrow_dj@yahoo.com)
+ * Jochen Roehrig (roehrig@de.ibm.com)
+ * Cornelia Huck <cornelia.huck@de.ibm.com>
+ * MPC additions:
+ * Belinda Thompson (belindat@us.ibm.com)
+ * Andy Richter (richtera@us.ibm.com)
+ * Revived by:
+ * Peter Tiedemann (ptiedem@de.ibm.com)
+ */
+
+#undef DEBUG
+#undef DEBUGDATA
+#undef DEBUGCCW
+
+#define KMSG_COMPONENT "ctcm"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/bitops.h>
+
+#include <linux/signal.h>
+#include <linux/string.h>
+
+#include <linux/ip.h>
+#include <linux/if_arp.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/ctype.h>
+#include <net/dst.h>
+
+#include <linux/io.h>
+#include <asm/ccwdev.h>
+#include <asm/ccwgroup.h>
+#include <linux/uaccess.h>
+
+#include <asm/idals.h>
+
+#include "ctcm_fsms.h"
+#include "ctcm_main.h"
+
+/* Some common global variables */
+
+/**
+ * The root device for ctcm group devices
+ */
+static struct device *ctcm_root_dev;
+
+/*
+ * Linked list of all detected channels.
+ */
+struct channel *channels;
+
+/**
+ * Unpack a just received skb and hand it over to
+ * upper layers.
+ *
+ * ch The channel where this skb has been received.
+ * pskb The received skb.
+ */
+void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb)
+{
+ struct net_device *dev = ch->netdev;
+ struct ctcm_priv *priv = dev->ml_priv;
+ __u16 len = *((__u16 *) pskb->data);
+
+ skb_put(pskb, 2 + LL_HEADER_LENGTH);
+ skb_pull(pskb, 2);
+ pskb->dev = dev;
+ pskb->ip_summed = CHECKSUM_UNNECESSARY;
+ while (len > 0) {
+ struct sk_buff *skb;
+ int skblen;
+ struct ll_header *header = (struct ll_header *)pskb->data;
+
+ skb_pull(pskb, LL_HEADER_LENGTH);
+ if ((ch->protocol == CTCM_PROTO_S390) &&
+ (header->type != ETH_P_IP)) {
+ if (!(ch->logflags & LOG_FLAG_ILLEGALPKT)) {
+ ch->logflags |= LOG_FLAG_ILLEGALPKT;
+ /*
+ * Check packet type only if we stick strictly
+ * to S/390's protocol of OS390. This only
+ * supports IP. Otherwise allow any packet
+ * type.
+ */
+ CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+ "%s(%s): Illegal packet type 0x%04x"
+ " - dropping",
+ CTCM_FUNTAIL, dev->name, header->type);
+ }
+ priv->stats.rx_dropped++;
+ priv->stats.rx_frame_errors++;
+ return;
+ }
+ pskb->protocol = ntohs(header->type);
+ if ((header->length <= LL_HEADER_LENGTH) ||
+ (len <= LL_HEADER_LENGTH)) {
+ if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) {
+ CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+ "%s(%s): Illegal packet size %d(%d,%d)"
+ "- dropping",
+ CTCM_FUNTAIL, dev->name,
+ header->length, dev->mtu, len);
+ ch->logflags |= LOG_FLAG_ILLEGALSIZE;
+ }
+
+ priv->stats.rx_dropped++;
+ priv->stats.rx_length_errors++;
+ return;
+ }
+ header->length -= LL_HEADER_LENGTH;
+ len -= LL_HEADER_LENGTH;
+ if ((header->length > skb_tailroom(pskb)) ||
+ (header->length > len)) {
+ if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
+ CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+ "%s(%s): Packet size %d (overrun)"
+ " - dropping", CTCM_FUNTAIL,
+ dev->name, header->length);
+ ch->logflags |= LOG_FLAG_OVERRUN;
+ }
+
+ priv->stats.rx_dropped++;
+ priv->stats.rx_length_errors++;
+ return;
+ }
+ skb_put(pskb, header->length);
+ skb_reset_mac_header(pskb);
+ len -= header->length;
+ skb = dev_alloc_skb(pskb->len);
+ if (!skb) {
+ if (!(ch->logflags & LOG_FLAG_NOMEM)) {
+ CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+ "%s(%s): MEMORY allocation error",
+ CTCM_FUNTAIL, dev->name);
+ ch->logflags |= LOG_FLAG_NOMEM;
+ }
+ priv->stats.rx_dropped++;
+ return;
+ }
+ skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
+ pskb->len);
+ skb_reset_mac_header(skb);
+ skb->dev = pskb->dev;
+ skb->protocol = pskb->protocol;
+ pskb->ip_summed = CHECKSUM_UNNECESSARY;
+ skblen = skb->len;
+ /*
+ * reset logflags
+ */
+ ch->logflags = 0;
+ priv->stats.rx_packets++;
+ priv->stats.rx_bytes += skblen;
+ netif_rx_ni(skb);
+ if (len > 0) {
+ skb_pull(pskb, header->length);
+ if (skb_tailroom(pskb) < LL_HEADER_LENGTH) {
+ CTCM_DBF_DEV_NAME(TRACE, dev,
+ "Overrun in ctcm_unpack_skb");
+ ch->logflags |= LOG_FLAG_OVERRUN;
+ return;
+ }
+ skb_put(pskb, LL_HEADER_LENGTH);
+ }
+ }
+}
+
+/**
+ * Release a specific channel in the channel list.
+ *
+ * ch Pointer to channel struct to be released.
+ */
+static void channel_free(struct channel *ch)
+{
+ CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s)", CTCM_FUNTAIL, ch->id);
+ ch->flags &= ~CHANNEL_FLAGS_INUSE;
+ fsm_newstate(ch->fsm, CTC_STATE_IDLE);
+}
+
+/**
+ * Remove a specific channel in the channel list.
+ *
+ * ch Pointer to channel struct to be released.
+ */
+static void channel_remove(struct channel *ch)
+{
+ struct channel **c = &channels;
+ char chid[CTCM_ID_SIZE+1];
+ int ok = 0;
+
+ if (ch == NULL)
+ return;
+ else
+ strncpy(chid, ch->id, CTCM_ID_SIZE);
+
+ channel_free(ch);
+ while (*c) {
+ if (*c == ch) {
+ *c = ch->next;
+ fsm_deltimer(&ch->timer);
+ if (IS_MPC(ch))
+ fsm_deltimer(&ch->sweep_timer);
+
+ kfree_fsm(ch->fsm);
+ clear_normalized_cda(&ch->ccw[4]);
+ if (ch->trans_skb != NULL) {
+ clear_normalized_cda(&ch->ccw[1]);
+ dev_kfree_skb_any(ch->trans_skb);
+ }
+ if (IS_MPC(ch)) {
+ tasklet_kill(&ch->ch_tasklet);
+ tasklet_kill(&ch->ch_disc_tasklet);
+ kfree(ch->discontact_th);
+ }
+ kfree(ch->ccw);
+ kfree(ch->irb);
+ kfree(ch);
+ ok = 1;
+ break;
+ }
+ c = &((*c)->next);
+ }
+
+ CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s) %s", CTCM_FUNTAIL,
+ chid, ok ? "OK" : "failed");
+}
+
+/**
+ * Get a specific channel from the channel list.
+ *
+ * type Type of channel we are interested in.
+ * id Id of channel we are interested in.
+ * direction Direction we want to use this channel for.
+ *
+ * returns Pointer to a channel or NULL if no matching channel available.
+ */
+static struct channel *channel_get(enum ctcm_channel_types type,
+ char *id, int direction)
+{
+ struct channel *ch = channels;
+
+ while (ch && (strncmp(ch->id, id, CTCM_ID_SIZE) || (ch->type != type)))
+ ch = ch->next;
+ if (!ch) {
+ CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+ "%s(%d, %s, %d) not found in channel list\n",
+ CTCM_FUNTAIL, type, id, direction);
+ } else {
+ if (ch->flags & CHANNEL_FLAGS_INUSE)
+ ch = NULL;
+ else {
+ ch->flags |= CHANNEL_FLAGS_INUSE;
+ ch->flags &= ~CHANNEL_FLAGS_RWMASK;
+ ch->flags |= (direction == CTCM_WRITE)
+ ? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ;
+ fsm_newstate(ch->fsm, CTC_STATE_STOPPED);
+ }
+ }
+ return ch;
+}
+
+static long ctcm_check_irb_error(struct ccw_device *cdev, struct irb *irb)
+{
+ if (!IS_ERR(irb))
+ return 0;
+
+ CTCM_DBF_TEXT_(ERROR, CTC_DBF_WARN,
+ "irb error %ld on device %s\n",
+ PTR_ERR(irb), dev_name(&cdev->dev));
+
+ switch (PTR_ERR(irb)) {
+ case -EIO:
+ dev_err(&cdev->dev,
+ "An I/O-error occurred on the CTCM device\n");
+ break;
+ case -ETIMEDOUT:
+ dev_err(&cdev->dev,
+ "An adapter hardware operation timed out\n");
+ break;
+ default:
+ dev_err(&cdev->dev,
+ "An error occurred on the adapter hardware\n");
+ }
+ return PTR_ERR(irb);
+}
+
+
+/**
+ * Check sense of a unit check.
+ *
+ * ch The channel, the sense code belongs to.
+ * sense The sense code to inspect.
+ */
+static inline void ccw_unit_check(struct channel *ch, __u8 sense)
+{
+ CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
+ "%s(%s): %02x",
+ CTCM_FUNTAIL, ch->id, sense);
+
+ if (sense & SNS0_INTERVENTION_REQ) {
+ if (sense & 0x01) {
+ if (ch->sense_rc != 0x01) {
+ pr_notice(
+ "%s: The communication peer has "
+ "disconnected\n", ch->id);
+ ch->sense_rc = 0x01;
+ }
+ fsm_event(ch->fsm, CTC_EVENT_UC_RCRESET, ch);
+ } else {
+ if (ch->sense_rc != SNS0_INTERVENTION_REQ) {
+ pr_notice(
+ "%s: The remote operating system is "
+ "not available\n", ch->id);
+ ch->sense_rc = SNS0_INTERVENTION_REQ;
+ }
+ fsm_event(ch->fsm, CTC_EVENT_UC_RSRESET, ch);
+ }
+ } else if (sense & SNS0_EQUIPMENT_CHECK) {
+ if (sense & SNS0_BUS_OUT_CHECK) {
+ if (ch->sense_rc != SNS0_BUS_OUT_CHECK) {
+ CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
+ "%s(%s): remote HW error %02x",
+ CTCM_FUNTAIL, ch->id, sense);
+ ch->sense_rc = SNS0_BUS_OUT_CHECK;
+ }
+ fsm_event(ch->fsm, CTC_EVENT_UC_HWFAIL, ch);
+ } else {
+ if (ch->sense_rc != SNS0_EQUIPMENT_CHECK) {
+ CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
+ "%s(%s): remote read parity error %02x",
+ CTCM_FUNTAIL, ch->id, sense);
+ ch->sense_rc = SNS0_EQUIPMENT_CHECK;
+ }
+ fsm_event(ch->fsm, CTC_EVENT_UC_RXPARITY, ch);
+ }
+ } else if (sense & SNS0_BUS_OUT_CHECK) {
+ if (ch->sense_rc != SNS0_BUS_OUT_CHECK) {
+ CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
+ "%s(%s): BUS OUT error %02x",
+ CTCM_FUNTAIL, ch->id, sense);
+ ch->sense_rc = SNS0_BUS_OUT_CHECK;
+ }
+ if (sense & 0x04) /* data-streaming timeout */
+ fsm_event(ch->fsm, CTC_EVENT_UC_TXTIMEOUT, ch);
+ else /* Data-transfer parity error */
+ fsm_event(ch->fsm, CTC_EVENT_UC_TXPARITY, ch);
+ } else if (sense & SNS0_CMD_REJECT) {
+ if (ch->sense_rc != SNS0_CMD_REJECT) {
+ CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
+ "%s(%s): Command rejected",
+ CTCM_FUNTAIL, ch->id);
+ ch->sense_rc = SNS0_CMD_REJECT;
+ }
+ } else if (sense == 0) {
+ CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
+ "%s(%s): Unit check ZERO",
+ CTCM_FUNTAIL, ch->id);
+ fsm_event(ch->fsm, CTC_EVENT_UC_ZERO, ch);
+ } else {
+ CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
+ "%s(%s): Unit check code %02x unknown",
+ CTCM_FUNTAIL, ch->id, sense);
+ fsm_event(ch->fsm, CTC_EVENT_UC_UNKNOWN, ch);
+ }
+}
+
+int ctcm_ch_alloc_buffer(struct channel *ch)
+{
+ clear_normalized_cda(&ch->ccw[1]);
+ ch->trans_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC | GFP_DMA);
+ if (ch->trans_skb == NULL) {
+ CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+ "%s(%s): %s trans_skb allocation error",
+ CTCM_FUNTAIL, ch->id,
+ (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
+ "RX" : "TX");
+ return -ENOMEM;
+ }
+
+ ch->ccw[1].count = ch->max_bufsize;
+ if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
+ dev_kfree_skb(ch->trans_skb);
+ ch->trans_skb = NULL;
+ CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+ "%s(%s): %s set norm_cda failed",
+ CTCM_FUNTAIL, ch->id,
+ (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
+ "RX" : "TX");
+ return -ENOMEM;
+ }
+
+ ch->ccw[1].count = 0;
+ ch->trans_skb_data = ch->trans_skb->data;
+ ch->flags &= ~CHANNEL_FLAGS_BUFSIZE_CHANGED;
+ return 0;
+}
+
+/*
+ * Interface API for upper network layers
+ */
+
+/**
+ * Open an interface.
+ * Called from generic network layer when ifconfig up is run.
+ *
+ * dev Pointer to interface struct.
+ *
+ * returns 0 on success, -ERRNO on failure. (Never fails.)
+ */
+int ctcm_open(struct net_device *dev)
+{
+ struct ctcm_priv *priv = dev->ml_priv;
+
+ CTCMY_DBF_DEV_NAME(SETUP, dev, "");
+ if (!IS_MPC(priv))
+ fsm_event(priv->fsm, DEV_EVENT_START, dev);
+ return 0;
+}
+
+/**
+ * Close an interface.
+ * Called from generic network layer when ifconfig down is run.
+ *
+ * dev Pointer to interface struct.
+ *
+ * returns 0 on success, -ERRNO on failure. (Never fails.)
+ */
+int ctcm_close(struct net_device *dev)
+{
+ struct ctcm_priv *priv = dev->ml_priv;
+
+ CTCMY_DBF_DEV_NAME(SETUP, dev, "");
+ if (!IS_MPC(priv))
+ fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
+ return 0;
+}
+
+
+/**
+ * Transmit a packet.
+ * This is a helper function for ctcm_tx().
+ *
+ * ch Channel to be used for sending.
+ * skb Pointer to struct sk_buff of packet to send.
+ * The linklevel header has already been set up
+ * by ctcm_tx().
+ *
+ * returns 0 on success, -ERRNO on failure. (Never fails.)
+ */
+static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
+{
+ unsigned long saveflags;
+ struct ll_header header;
+ int rc = 0;
+ __u16 block_len;
+ int ccw_idx;
+ struct sk_buff *nskb;
+ unsigned long hi;
+
+ /* we need to acquire the lock for testing the state
+ * otherwise we can have an IRQ changing the state to
+ * TXIDLE after the test but before acquiring the lock.
+ */
+ spin_lock_irqsave(&ch->collect_lock, saveflags);
+ if (fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) {
+ int l = skb->len + LL_HEADER_LENGTH;
+
+ if (ch->collect_len + l > ch->max_bufsize - 2) {
+ spin_unlock_irqrestore(&ch->collect_lock, saveflags);
+ return -EBUSY;
+ } else {
+ atomic_inc(&skb->users);
+ header.length = l;
+ header.type = skb->protocol;
+ header.unused = 0;
+ memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
+ LL_HEADER_LENGTH);
+ skb_queue_tail(&ch->collect_queue, skb);
+ ch->collect_len += l;
+ }
+ spin_unlock_irqrestore(&ch->collect_lock, saveflags);
+ goto done;
+ }
+ spin_unlock_irqrestore(&ch->collect_lock, saveflags);
+ /*
+ * Protect skb against beeing free'd by upper
+ * layers.
+ */
+ atomic_inc(&skb->users);
+ ch->prof.txlen += skb->len;
+ header.length = skb->len + LL_HEADER_LENGTH;
+ header.type = skb->protocol;
+ header.unused = 0;
+ memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH);
+ block_len = skb->len + 2;
+ *((__u16 *)skb_push(skb, 2)) = block_len;
+
+ /*
+ * IDAL support in CTCM is broken, so we have to
+ * care about skb's above 2G ourselves.
+ */
+ hi = ((unsigned long)skb_tail_pointer(skb) + LL_HEADER_LENGTH) >> 31;
+ if (hi) {
+ nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
+ if (!nskb) {
+ atomic_dec(&skb->users);
+ skb_pull(skb, LL_HEADER_LENGTH + 2);
+ ctcm_clear_busy(ch->netdev);
+ return -ENOMEM;
+ } else {
+ memcpy(skb_put(nskb, skb->len), skb->data, skb->len);
+ atomic_inc(&nskb->users);
+ atomic_dec(&skb->users);
+ dev_kfree_skb_irq(skb);
+ skb = nskb;
+ }
+ }
+
+ ch->ccw[4].count = block_len;
+ if (set_normalized_cda(&ch->ccw[4], skb->data)) {
+ /*
+ * idal allocation failed, try via copying to
+ * trans_skb. trans_skb usually has a pre-allocated
+ * idal.
+ */
+ if (ctcm_checkalloc_buffer(ch)) {
+ /*
+ * Remove our header. It gets added
+ * again on retransmit.
+ */
+ atomic_dec(&skb->users);
+ skb_pull(skb, LL_HEADER_LENGTH + 2);
+ ctcm_clear_busy(ch->netdev);
+ return -ENOMEM;
+ }
+
+ skb_reset_tail_pointer(ch->trans_skb);
+ ch->trans_skb->len = 0;
+ ch->ccw[1].count = skb->len;
+ skb_copy_from_linear_data(skb,
+ skb_put(ch->trans_skb, skb->len), skb->len);
+ atomic_dec(&skb->users);
+ dev_kfree_skb_irq(skb);
+ ccw_idx = 0;
+ } else {
+ skb_queue_tail(&ch->io_queue, skb);
+ ccw_idx = 3;
+ }
+ ch->retry = 0;
+ fsm_newstate(ch->fsm, CTC_STATE_TX);
+ fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
+ spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
+ ch->prof.send_stamp = current_kernel_time(); /* xtime */
+ rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
+ (unsigned long)ch, 0xff, 0);
+ spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
+ if (ccw_idx == 3)
+ ch->prof.doios_single++;
+ if (rc != 0) {
+ fsm_deltimer(&ch->timer);
+ ctcm_ccw_check_rc(ch, rc, "single skb TX");
+ if (ccw_idx == 3)
+ skb_dequeue_tail(&ch->io_queue);
+ /*
+ * Remove our header. It gets added
+ * again on retransmit.
+ */
+ skb_pull(skb, LL_HEADER_LENGTH + 2);
+ } else if (ccw_idx == 0) {
+ struct net_device *dev = ch->netdev;
+ struct ctcm_priv *priv = dev->ml_priv;
+ priv->stats.tx_packets++;
+ priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
+ }
+done:
+ ctcm_clear_busy(ch->netdev);
+ return rc;
+}
+
+static void ctcmpc_send_sweep_req(struct channel *rch)
+{
+ struct net_device *dev = rch->netdev;
+ struct ctcm_priv *priv;
+ struct mpc_group *grp;
+ struct th_sweep *header;
+ struct sk_buff *sweep_skb;
+ struct channel *ch;
+ /* int rc = 0; */
+
+ priv = dev->ml_priv;
+ grp = priv->mpcg;
+ ch = priv->channel[CTCM_WRITE];
+
+ /* sweep processing is not complete until response and request */
+ /* has completed for all read channels in group */
+ if (grp->in_sweep == 0) {
+ grp->in_sweep = 1;
+ grp->sweep_rsp_pend_num = grp->active_channels[CTCM_READ];
+ grp->sweep_req_pend_num = grp->active_channels[CTCM_READ];
+ }
+
+ sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA);
+
+ if (sweep_skb == NULL) {
+ /* rc = -ENOMEM; */
+ goto nomem;
+ }
+
+ header = kmalloc(TH_SWEEP_LENGTH, gfp_type());
+
+ if (!header) {
+ dev_kfree_skb_any(sweep_skb);
+ /* rc = -ENOMEM; */
+ goto nomem;
+ }
+
+ header->th.th_seg = 0x00 ;
+ header->th.th_ch_flag = TH_SWEEP_REQ; /* 0x0f */
+ header->th.th_blk_flag = 0x00;
+ header->th.th_is_xid = 0x00;
+ header->th.th_seq_num = 0x00;
+ header->sw.th_last_seq = ch->th_seq_num;
+
+ memcpy(skb_put(sweep_skb, TH_SWEEP_LENGTH), header, TH_SWEEP_LENGTH);
+
+ kfree(header);
+
+ dev->trans_start = jiffies;
+ skb_queue_tail(&ch->sweep_queue, sweep_skb);
+
+ fsm_addtimer(&ch->sweep_timer, 100, CTC_EVENT_RSWEEP_TIMER, ch);
+
+ return;
+
+nomem:
+ grp->in_sweep = 0;
+ ctcm_clear_busy(dev);
+ fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+
+ return;
+}
+
+/*
+ * MPC mode version of transmit_skb
+ */
+static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
+{
+ struct pdu *p_header;
+ struct net_device *dev = ch->netdev;
+ struct ctcm_priv *priv = dev->ml_priv;
+ struct mpc_group *grp = priv->mpcg;
+ struct th_header *header;
+ struct sk_buff *nskb;
+ int rc = 0;
+ int ccw_idx;
+ unsigned long hi;
+ unsigned long saveflags = 0; /* avoids compiler warning */
+
+ CTCM_PR_DEBUG("Enter %s: %s, cp=%i ch=0x%p id=%s state=%s\n",
+ __func__, dev->name, smp_processor_id(), ch,
+ ch->id, fsm_getstate_str(ch->fsm));
+
+ if ((fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) || grp->in_sweep) {
+ spin_lock_irqsave(&ch->collect_lock, saveflags);
+ atomic_inc(&skb->users);
+ p_header = kmalloc(PDU_HEADER_LENGTH, gfp_type());
+
+ if (!p_header) {
+ spin_unlock_irqrestore(&ch->collect_lock, saveflags);
+ goto nomem_exit;
+ }
+
+ p_header->pdu_offset = skb->len;
+ p_header->pdu_proto = 0x01;
+ p_header->pdu_flag = 0x00;
+ if (skb->protocol == ntohs(ETH_P_SNAP)) {
+ p_header->pdu_flag |= PDU_FIRST | PDU_CNTL;
+ } else {
+ p_header->pdu_flag |= PDU_FIRST;
+ }
+ p_header->pdu_seq = 0;
+ memcpy(skb_push(skb, PDU_HEADER_LENGTH), p_header,
+ PDU_HEADER_LENGTH);
+
+ CTCM_PR_DEBUG("%s(%s): Put on collect_q - skb len: %04x \n"
+ "pdu header and data for up to 32 bytes:\n",
+ __func__, dev->name, skb->len);
+ CTCM_D3_DUMP((char *)skb->data, min_t(int, 32, skb->len));
+
+ skb_queue_tail(&ch->collect_queue, skb);
+ ch->collect_len += skb->len;
+ kfree(p_header);
+
+ spin_unlock_irqrestore(&ch->collect_lock, saveflags);
+ goto done;
+ }
+
+ /*
+ * Protect skb against beeing free'd by upper
+ * layers.
+ */
+ atomic_inc(&skb->users);
+
+ /*
+ * IDAL support in CTCM is broken, so we have to
+ * care about skb's above 2G ourselves.
+ */
+ hi = ((unsigned long)skb->tail + TH_HEADER_LENGTH) >> 31;
+ if (hi) {
+ nskb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
+ if (!nskb) {
+ goto nomem_exit;
+ } else {
+ memcpy(skb_put(nskb, skb->len), skb->data, skb->len);
+ atomic_inc(&nskb->users);
+ atomic_dec(&skb->users);
+ dev_kfree_skb_irq(skb);
+ skb = nskb;
+ }
+ }
+
+ p_header = kmalloc(PDU_HEADER_LENGTH, gfp_type());
+
+ if (!p_header)
+ goto nomem_exit;
+
+ p_header->pdu_offset = skb->len;
+ p_header->pdu_proto = 0x01;
+ p_header->pdu_flag = 0x00;
+ p_header->pdu_seq = 0;
+ if (skb->protocol == ntohs(ETH_P_SNAP)) {
+ p_header->pdu_flag |= PDU_FIRST | PDU_CNTL;
+ } else {
+ p_header->pdu_flag |= PDU_FIRST;
+ }
+ memcpy(skb_push(skb, PDU_HEADER_LENGTH), p_header, PDU_HEADER_LENGTH);
+
+ kfree(p_header);
+
+ if (ch->collect_len > 0) {
+ spin_lock_irqsave(&ch->collect_lock, saveflags);
+ skb_queue_tail(&ch->collect_queue, skb);
+ ch->collect_len += skb->len;
+ skb = skb_dequeue(&ch->collect_queue);
+ ch->collect_len -= skb->len;
+ spin_unlock_irqrestore(&ch->collect_lock, saveflags);
+ }
+
+ p_header = (struct pdu *)skb->data;
+ p_header->pdu_flag |= PDU_LAST;
+
+ ch->prof.txlen += skb->len - PDU_HEADER_LENGTH;
+
+ header = kmalloc(TH_HEADER_LENGTH, gfp_type());
+ if (!header)
+ goto nomem_exit;
+
+ header->th_seg = 0x00;
+ header->th_ch_flag = TH_HAS_PDU; /* Normal data */
+ header->th_blk_flag = 0x00;
+ header->th_is_xid = 0x00; /* Just data here */
+ ch->th_seq_num++;
+ header->th_seq_num = ch->th_seq_num;
+
+ CTCM_PR_DBGDATA("%s(%s) ToVTAM_th_seq= %08x\n" ,
+ __func__, dev->name, ch->th_seq_num);
+
+ /* put the TH on the packet */
+ memcpy(skb_push(skb, TH_HEADER_LENGTH), header, TH_HEADER_LENGTH);
+
+ kfree(header);
+
+ CTCM_PR_DBGDATA("%s(%s): skb len: %04x\n - pdu header and data for "
+ "up to 32 bytes sent to vtam:\n",
+ __func__, dev->name, skb->len);
+ CTCM_D3_DUMP((char *)skb->data, min_t(int, 32, skb->len));
+
+ ch->ccw[4].count = skb->len;
+ if (set_normalized_cda(&ch->ccw[4], skb->data)) {
+ /*
+ * idal allocation failed, try via copying to trans_skb.
+ * trans_skb usually has a pre-allocated idal.
+ */
+ if (ctcm_checkalloc_buffer(ch)) {
+ /*
+ * Remove our header.
+ * It gets added again on retransmit.
+ */
+ goto nomem_exit;
+ }
+
+ skb_reset_tail_pointer(ch->trans_skb);
+ ch->trans_skb->len = 0;
+ ch->ccw[1].count = skb->len;
+ memcpy(skb_put(ch->trans_skb, skb->len), skb->data, skb->len);
+ atomic_dec(&skb->users);
+ dev_kfree_skb_irq(skb);
+ ccw_idx = 0;
+ CTCM_PR_DBGDATA("%s(%s): trans_skb len: %04x\n"
+ "up to 32 bytes sent to vtam:\n",
+ __func__, dev->name, ch->trans_skb->len);
+ CTCM_D3_DUMP((char *)ch->trans_skb->data,
+ min_t(int, 32, ch->trans_skb->len));
+ } else {
+ skb_queue_tail(&ch->io_queue, skb);
+ ccw_idx = 3;
+ }
+ ch->retry = 0;
+ fsm_newstate(ch->fsm, CTC_STATE_TX);
+ fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
+
+ if (do_debug_ccw)
+ ctcmpc_dumpit((char *)&ch->ccw[ccw_idx],
+ sizeof(struct ccw1) * 3);
+
+ spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
+ ch->prof.send_stamp = current_kernel_time(); /* xtime */
+ rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
+ (unsigned long)ch, 0xff, 0);
+ spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
+ if (ccw_idx == 3)
+ ch->prof.doios_single++;
+ if (rc != 0) {
+ fsm_deltimer(&ch->timer);
+ ctcm_ccw_check_rc(ch, rc, "single skb TX");
+ if (ccw_idx == 3)
+ skb_dequeue_tail(&ch->io_queue);
+ } else if (ccw_idx == 0) {
+ priv->stats.tx_packets++;
+ priv->stats.tx_bytes += skb->len - TH_HEADER_LENGTH;
+ }
+ if (ch->th_seq_num > 0xf0000000) /* Chose at random. */
+ ctcmpc_send_sweep_req(ch);
+
+ goto done;
+nomem_exit:
+ CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_CRIT,
+ "%s(%s): MEMORY allocation ERROR\n",
+ CTCM_FUNTAIL, ch->id);
+ rc = -ENOMEM;
+ atomic_dec(&skb->users);
+ dev_kfree_skb_any(skb);
+ fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
+done:
+ CTCM_PR_DEBUG("Exit %s(%s)\n", __func__, dev->name);
+ return rc;
+}
+
+/**
+ * Start transmission of a packet.
+ * Called from generic network device layer.
+ *
+ * skb Pointer to buffer containing the packet.
+ * dev Pointer to interface struct.
+ *
+ * returns 0 if packet consumed, !0 if packet rejected.
+ * Note: If we return !0, then the packet is free'd by
+ * the generic network layer.
+ */
+/* first merge version - leaving both functions separated */
+static int ctcm_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ctcm_priv *priv = dev->ml_priv;
+
+ if (skb == NULL) {
+ CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+ "%s(%s): NULL sk_buff passed",
+ CTCM_FUNTAIL, dev->name);
+ priv->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+ if (skb_headroom(skb) < (LL_HEADER_LENGTH + 2)) {
+ CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+ "%s(%s): Got sk_buff with head room < %ld bytes",
+ CTCM_FUNTAIL, dev->name, LL_HEADER_LENGTH + 2);
+ dev_kfree_skb(skb);
+ priv->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ /*
+ * If channels are not running, try to restart them
+ * and throw away packet.
+ */
+ if (fsm_getstate(priv->fsm) != DEV_STATE_RUNNING) {
+ fsm_event(priv->fsm, DEV_EVENT_START, dev);
+ dev_kfree_skb(skb);
+ priv->stats.tx_dropped++;
+ priv->stats.tx_errors++;
+ priv->stats.tx_carrier_errors++;
+ return NETDEV_TX_OK;
+ }
+
+ if (ctcm_test_and_set_busy(dev))
+ return NETDEV_TX_BUSY;
+
+ dev->trans_start = jiffies;
+ if (ctcm_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0)
+ return NETDEV_TX_BUSY;
+ return NETDEV_TX_OK;
+}
+
+/* unmerged MPC variant of ctcm_tx */
+static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ int len = 0;
+ struct ctcm_priv *priv = dev->ml_priv;
+ struct mpc_group *grp = priv->mpcg;
+ struct sk_buff *newskb = NULL;
+
+ /*
+ * Some sanity checks ...
+ */
+ if (skb == NULL) {
+ CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+ "%s(%s): NULL sk_buff passed",
+ CTCM_FUNTAIL, dev->name);
+ priv->stats.tx_dropped++;
+ goto done;
+ }
+ if (skb_headroom(skb) < (TH_HEADER_LENGTH + PDU_HEADER_LENGTH)) {
+ CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR,
+ "%s(%s): Got sk_buff with head room < %ld bytes",
+ CTCM_FUNTAIL, dev->name,
+ TH_HEADER_LENGTH + PDU_HEADER_LENGTH);
+
+ CTCM_D3_DUMP((char *)skb->data, min_t(int, 32, skb->len));
+
+ len = skb->len + TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
+ newskb = __dev_alloc_skb(len, gfp_type() | GFP_DMA);
+
+ if (!newskb) {
+ CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR,
+ "%s: %s: __dev_alloc_skb failed",
+ __func__, dev->name);
+
+ dev_kfree_skb_any(skb);
+ priv->stats.tx_dropped++;
+ priv->stats.tx_errors++;
+ priv->stats.tx_carrier_errors++;
+ fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+ goto done;
+ }
+ newskb->protocol = skb->protocol;
+ skb_reserve(newskb, TH_HEADER_LENGTH + PDU_HEADER_LENGTH);
+ memcpy(skb_put(newskb, skb->len), skb->data, skb->len);
+ dev_kfree_skb_any(skb);
+ skb = newskb;
+ }
+
+ /*
+ * If channels are not running,
+ * notify anybody about a link failure and throw
+ * away packet.
+ */
+ if ((fsm_getstate(priv->fsm) != DEV_STATE_RUNNING) ||
+ (fsm_getstate(grp->fsm) < MPCG_STATE_XID2INITW)) {
+ dev_kfree_skb_any(skb);
+ CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+ "%s(%s): inactive MPCGROUP - dropped",
+ CTCM_FUNTAIL, dev->name);
+ priv->stats.tx_dropped++;
+ priv->stats.tx_errors++;
+ priv->stats.tx_carrier_errors++;
+ goto done;
+ }
+
+ if (ctcm_test_and_set_busy(dev)) {
+ CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+ "%s(%s): device busy - dropped",
+ CTCM_FUNTAIL, dev->name);
+ dev_kfree_skb_any(skb);
+ priv->stats.tx_dropped++;
+ priv->stats.tx_errors++;
+ priv->stats.tx_carrier_errors++;
+ fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+ goto done;
+ }
+
+ dev->trans_start = jiffies;
+ if (ctcmpc_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0) {
+ CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+ "%s(%s): device error - dropped",
+ CTCM_FUNTAIL, dev->name);
+ dev_kfree_skb_any(skb);
+ priv->stats.tx_dropped++;
+ priv->stats.tx_errors++;
+ priv->stats.tx_carrier_errors++;
+ ctcm_clear_busy(dev);
+ fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+ goto done;
+ }
+ ctcm_clear_busy(dev);
+done:
+ if (do_debug)
+ MPC_DBF_DEV_NAME(TRACE, dev, "exit");
+
+ return NETDEV_TX_OK; /* handle freeing of skb here */
+}
+
+
+/**
+ * Sets MTU of an interface.
+ *
+ * dev Pointer to interface struct.
+ * new_mtu The new MTU to use for this interface.
+ *
+ * returns 0 on success, -EINVAL if MTU is out of valid range.
+ * (valid range is 576 .. 65527). If VM is on the
+ * remote side, maximum MTU is 32760, however this is
+ * not checked here.
+ */
+static int ctcm_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct ctcm_priv *priv;
+ int max_bufsize;
+
+ if (new_mtu < 576 || new_mtu > 65527)
+ return -EINVAL;
+
+ priv = dev->ml_priv;
+ max_bufsize = priv->channel[CTCM_READ]->max_bufsize;
+
+ if (IS_MPC(priv)) {
+ if (new_mtu > max_bufsize - TH_HEADER_LENGTH)
+ return -EINVAL;
+ dev->hard_header_len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
+ } else {
+ if (new_mtu > max_bufsize - LL_HEADER_LENGTH - 2)
+ return -EINVAL;
+ dev->hard_header_len = LL_HEADER_LENGTH + 2;
+ }
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+/**
+ * Returns interface statistics of a device.
+ *
+ * dev Pointer to interface struct.
+ *
+ * returns Pointer to stats struct of this interface.
+ */
+static struct net_device_stats *ctcm_stats(struct net_device *dev)
+{
+ return &((struct ctcm_priv *)dev->ml_priv)->stats;
+}
+
+static void ctcm_free_netdevice(struct net_device *dev)
+{
+ struct ctcm_priv *priv;
+ struct mpc_group *grp;
+
+ CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
+ "%s(%s)", CTCM_FUNTAIL, dev->name);
+ priv = dev->ml_priv;
+ if (priv) {
+ grp = priv->mpcg;
+ if (grp) {
+ if (grp->fsm)
+ kfree_fsm(grp->fsm);
+ if (grp->xid_skb)
+ dev_kfree_skb(grp->xid_skb);
+ if (grp->rcvd_xid_skb)
+ dev_kfree_skb(grp->rcvd_xid_skb);
+ tasklet_kill(&grp->mpc_tasklet2);
+ kfree(grp);
+ priv->mpcg = NULL;
+ }
+ if (priv->fsm) {
+ kfree_fsm(priv->fsm);
+ priv->fsm = NULL;
+ }
+ kfree(priv->xid);
+ priv->xid = NULL;
+ /*
+ * Note: kfree(priv); is done in "opposite" function of
+ * allocator function probe_device which is remove_device.
+ */
+ }
+#ifdef MODULE
+ free_netdev(dev);
+#endif
+}
+
+struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv);
+
+static const struct net_device_ops ctcm_netdev_ops = {
+ .ndo_open = ctcm_open,
+ .ndo_stop = ctcm_close,
+ .ndo_get_stats = ctcm_stats,
+ .ndo_change_mtu = ctcm_change_mtu,
+ .ndo_start_xmit = ctcm_tx,
+};
+
+static const struct net_device_ops ctcm_mpc_netdev_ops = {
+ .ndo_open = ctcm_open,
+ .ndo_stop = ctcm_close,
+ .ndo_get_stats = ctcm_stats,
+ .ndo_change_mtu = ctcm_change_mtu,
+ .ndo_start_xmit = ctcmpc_tx,
+};
+
+void static ctcm_dev_setup(struct net_device *dev)
+{
+ dev->type = ARPHRD_SLIP;
+ dev->tx_queue_len = 100;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+}
+
+/*
+ * Initialize everything of the net device except the name and the
+ * channel structs.
+ */
+static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv)
+{
+ struct net_device *dev;
+ struct mpc_group *grp;
+ if (!priv)
+ return NULL;
+
+ if (IS_MPC(priv))
+ dev = alloc_netdev(0, MPC_DEVICE_GENE, ctcm_dev_setup);
+ else
+ dev = alloc_netdev(0, CTC_DEVICE_GENE, ctcm_dev_setup);
+
+ if (!dev) {
+ CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT,
+ "%s: MEMORY allocation ERROR",
+ CTCM_FUNTAIL);
+ return NULL;
+ }
+ dev->ml_priv = priv;
+ priv->fsm = init_fsm("ctcmdev", dev_state_names, dev_event_names,
+ CTCM_NR_DEV_STATES, CTCM_NR_DEV_EVENTS,
+ dev_fsm, dev_fsm_len, GFP_KERNEL);
+ if (priv->fsm == NULL) {
+ CTCMY_DBF_DEV(SETUP, dev, "init_fsm error");
+ free_netdev(dev);
+ return NULL;
+ }
+ fsm_newstate(priv->fsm, DEV_STATE_STOPPED);
+ fsm_settimer(priv->fsm, &priv->restart_timer);
+
+ if (IS_MPC(priv)) {
+ /* MPC Group Initializations */
+ grp = ctcmpc_init_mpc_group(priv);
+ if (grp == NULL) {
+ MPC_DBF_DEV(SETUP, dev, "init_mpc_group error");
+ free_netdev(dev);
+ return NULL;
+ }
+ tasklet_init(&grp->mpc_tasklet2,
+ mpc_group_ready, (unsigned long)dev);
+ dev->mtu = MPC_BUFSIZE_DEFAULT -
+ TH_HEADER_LENGTH - PDU_HEADER_LENGTH;
+
+ dev->netdev_ops = &ctcm_mpc_netdev_ops;
+ dev->hard_header_len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
+ priv->buffer_size = MPC_BUFSIZE_DEFAULT;
+ } else {
+ dev->mtu = CTCM_BUFSIZE_DEFAULT - LL_HEADER_LENGTH - 2;
+ dev->netdev_ops = &ctcm_netdev_ops;
+ dev->hard_header_len = LL_HEADER_LENGTH + 2;
+ }
+
+ CTCMY_DBF_DEV(SETUP, dev, "finished");
+
+ return dev;
+}
+
+/**
+ * Main IRQ handler.
+ *
+ * cdev The ccw_device the interrupt is for.
+ * intparm interruption parameter.
+ * irb interruption response block.
+ */
+static void ctcm_irq_handler(struct ccw_device *cdev,
+ unsigned long intparm, struct irb *irb)
+{
+ struct channel *ch;
+ struct net_device *dev;
+ struct ctcm_priv *priv;
+ struct ccwgroup_device *cgdev;
+ int cstat;
+ int dstat;
+
+ CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
+ "Enter %s(%s)", CTCM_FUNTAIL, dev_name(&cdev->dev));
+
+ if (ctcm_check_irb_error(cdev, irb))
+ return;
+
+ cgdev = dev_get_drvdata(&cdev->dev);
+
+ cstat = irb->scsw.cmd.cstat;
+ dstat = irb->scsw.cmd.dstat;
+
+ /* Check for unsolicited interrupts. */
+ if (cgdev == NULL) {
+ CTCM_DBF_TEXT_(TRACE, CTC_DBF_ERROR,
+ "%s(%s) unsolicited irq: c-%02x d-%02x\n",
+ CTCM_FUNTAIL, dev_name(&cdev->dev), cstat, dstat);
+ dev_warn(&cdev->dev,
+ "The adapter received a non-specific IRQ\n");
+ return;
+ }
+
+ priv = dev_get_drvdata(&cgdev->dev);
+
+ /* Try to extract channel from driver data. */
+ if (priv->channel[CTCM_READ]->cdev == cdev)
+ ch = priv->channel[CTCM_READ];
+ else if (priv->channel[CTCM_WRITE]->cdev == cdev)
+ ch = priv->channel[CTCM_WRITE];
+ else {
+ dev_err(&cdev->dev,
+ "%s: Internal error: Can't determine channel for "
+ "interrupt device %s\n",
+ __func__, dev_name(&cdev->dev));
+ /* Explain: inconsistent internal structures */
+ return;
+ }
+
+ dev = ch->netdev;
+ if (dev == NULL) {
+ dev_err(&cdev->dev,
+ "%s Internal error: net_device is NULL, ch = 0x%p\n",
+ __func__, ch);
+ /* Explain: inconsistent internal structures */
+ return;
+ }
+
+ /* Copy interruption response block. */
+ memcpy(ch->irb, irb, sizeof(struct irb));
+
+ /* Issue error message and return on subchannel error code */
+ if (irb->scsw.cmd.cstat) {
+ fsm_event(ch->fsm, CTC_EVENT_SC_UNKNOWN, ch);
+ CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
+ "%s(%s): sub-ch check %s: cs=%02x ds=%02x",
+ CTCM_FUNTAIL, dev->name, ch->id, cstat, dstat);
+ dev_warn(&cdev->dev,
+ "A check occurred on the subchannel\n");
+ return;
+ }
+
+ /* Check the reason-code of a unit check */
+ if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
+ if ((irb->ecw[0] & ch->sense_rc) == 0)
+ /* print it only once */
+ CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN,
+ "%s(%s): sense=%02x, ds=%02x",
+ CTCM_FUNTAIL, ch->id, irb->ecw[0], dstat);
+ ccw_unit_check(ch, irb->ecw[0]);
+ return;
+ }
+ if (irb->scsw.cmd.dstat & DEV_STAT_BUSY) {
+ if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION)
+ fsm_event(ch->fsm, CTC_EVENT_ATTNBUSY, ch);
+ else
+ fsm_event(ch->fsm, CTC_EVENT_BUSY, ch);
+ return;
+ }
+ if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
+ fsm_event(ch->fsm, CTC_EVENT_ATTN, ch);
+ return;
+ }
+ if ((irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
+ (irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
+ (irb->scsw.cmd.stctl ==
+ (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))
+ fsm_event(ch->fsm, CTC_EVENT_FINSTAT, ch);
+ else
+ fsm_event(ch->fsm, CTC_EVENT_IRQ, ch);
+
+}
+
+/**
+ * Add ctcm specific attributes.
+ * Add ctcm private data.
+ *
+ * cgdev pointer to ccwgroup_device just added
+ *
+ * returns 0 on success, !0 on failure.
+ */
+static int ctcm_probe_device(struct ccwgroup_device *cgdev)
+{
+ struct ctcm_priv *priv;
+ int rc;
+
+ CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
+ "%s %p",
+ __func__, cgdev);
+
+ if (!get_device(&cgdev->dev))
+ return -ENODEV;
+
+ priv = kzalloc(sizeof(struct ctcm_priv), GFP_KERNEL);
+ if (!priv) {
+ CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
+ "%s: memory allocation failure",
+ CTCM_FUNTAIL);
+ put_device(&cgdev->dev);
+ return -ENOMEM;
+ }
+
+ rc = ctcm_add_files(&cgdev->dev);
+ if (rc) {
+ kfree(priv);
+ put_device(&cgdev->dev);
+ return rc;
+ }
+ priv->buffer_size = CTCM_BUFSIZE_DEFAULT;
+ cgdev->cdev[0]->handler = ctcm_irq_handler;
+ cgdev->cdev[1]->handler = ctcm_irq_handler;
+ dev_set_drvdata(&cgdev->dev, priv);
+
+ return 0;
+}
+
+/**
+ * Add a new channel to the list of channels.
+ * Keeps the channel list sorted.
+ *
+ * cdev The ccw_device to be added.
+ * type The type class of the new channel.
+ * priv Points to the private data of the ccwgroup_device.
+ *
+ * returns 0 on success, !0 on error.
+ */
+static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type,
+ struct ctcm_priv *priv)
+{
+ struct channel **c = &channels;
+ struct channel *ch;
+ int ccw_num;
+ int rc = 0;
+
+ CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
+ "%s(%s), type %d, proto %d",
+ __func__, dev_name(&cdev->dev), type, priv->protocol);
+
+ ch = kzalloc(sizeof(struct channel), GFP_KERNEL);
+ if (ch == NULL)
+ return -ENOMEM;
+
+ ch->protocol = priv->protocol;
+ if (IS_MPC(priv)) {
+ ch->discontact_th = kzalloc(TH_HEADER_LENGTH, gfp_type());
+ if (ch->discontact_th == NULL)
+ goto nomem_return;
+
+ ch->discontact_th->th_blk_flag = TH_DISCONTACT;
+ tasklet_init(&ch->ch_disc_tasklet,
+ mpc_action_send_discontact, (unsigned long)ch);
+
+ tasklet_init(&ch->ch_tasklet, ctcmpc_bh, (unsigned long)ch);
+ ch->max_bufsize = (MPC_BUFSIZE_DEFAULT - 35);
+ ccw_num = 17;
+ } else
+ ccw_num = 8;
+
+ ch->ccw = kzalloc(ccw_num * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
+ if (ch->ccw == NULL)
+ goto nomem_return;
+
+ ch->cdev = cdev;
+ snprintf(ch->id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev->dev));
+ ch->type = type;
+
+ /**
+ * "static" ccws are used in the following way:
+ *
+ * ccw[0..2] (Channel program for generic I/O):
+ * 0: prepare
+ * 1: read or write (depending on direction) with fixed
+ * buffer (idal allocated once when buffer is allocated)
+ * 2: nop
+ * ccw[3..5] (Channel program for direct write of packets)
+ * 3: prepare
+ * 4: write (idal allocated on every write).
+ * 5: nop
+ * ccw[6..7] (Channel program for initial channel setup):
+ * 6: set extended mode
+ * 7: nop
+ *
+ * ch->ccw[0..5] are initialized in ch_action_start because
+ * the channel's direction is yet unknown here.
+ *
+ * ccws used for xid2 negotiations
+ * ch-ccw[8-14] need to be used for the XID exchange either
+ * X side XID2 Processing
+ * 8: write control
+ * 9: write th
+ * 10: write XID
+ * 11: read th from secondary
+ * 12: read XID from secondary
+ * 13: read 4 byte ID
+ * 14: nop
+ * Y side XID Processing
+ * 8: sense
+ * 9: read th
+ * 10: read XID
+ * 11: write th
+ * 12: write XID
+ * 13: write 4 byte ID
+ * 14: nop
+ *
+ * ccws used for double noop due to VM timing issues
+ * which result in unrecoverable Busy on channel
+ * 15: nop
+ * 16: nop
+ */
+ ch->ccw[6].cmd_code = CCW_CMD_SET_EXTENDED;
+ ch->ccw[6].flags = CCW_FLAG_SLI;
+
+ ch->ccw[7].cmd_code = CCW_CMD_NOOP;
+ ch->ccw[7].flags = CCW_FLAG_SLI;
+
+ if (IS_MPC(priv)) {
+ ch->ccw[15].cmd_code = CCW_CMD_WRITE;
+ ch->ccw[15].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ ch->ccw[15].count = TH_HEADER_LENGTH;
+ ch->ccw[15].cda = virt_to_phys(ch->discontact_th);
+
+ ch->ccw[16].cmd_code = CCW_CMD_NOOP;
+ ch->ccw[16].flags = CCW_FLAG_SLI;
+
+ ch->fsm = init_fsm(ch->id, ctc_ch_state_names,
+ ctc_ch_event_names, CTC_MPC_NR_STATES,
+ CTC_MPC_NR_EVENTS, ctcmpc_ch_fsm,
+ mpc_ch_fsm_len, GFP_KERNEL);
+ } else {
+ ch->fsm = init_fsm(ch->id, ctc_ch_state_names,
+ ctc_ch_event_names, CTC_NR_STATES,
+ CTC_NR_EVENTS, ch_fsm,
+ ch_fsm_len, GFP_KERNEL);
+ }
+ if (ch->fsm == NULL)
+ goto free_return;
+
+ fsm_newstate(ch->fsm, CTC_STATE_IDLE);
+
+ ch->irb = kzalloc(sizeof(struct irb), GFP_KERNEL);
+ if (ch->irb == NULL)
+ goto nomem_return;
+
+ while (*c && ctcm_less_than((*c)->id, ch->id))
+ c = &(*c)->next;
+
+ if (*c && (!strncmp((*c)->id, ch->id, CTCM_ID_SIZE))) {
+ CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
+ "%s (%s) already in list, using old entry",
+ __func__, (*c)->id);
+
+ goto free_return;
+ }
+
+ spin_lock_init(&ch->collect_lock);
+
+ fsm_settimer(ch->fsm, &ch->timer);
+ skb_queue_head_init(&ch->io_queue);
+ skb_queue_head_init(&ch->collect_queue);
+
+ if (IS_MPC(priv)) {
+ fsm_settimer(ch->fsm, &ch->sweep_timer);
+ skb_queue_head_init(&ch->sweep_queue);
+ }
+ ch->next = *c;
+ *c = ch;
+ return 0;
+
+nomem_return:
+ rc = -ENOMEM;
+
+free_return: /* note that all channel pointers are 0 or valid */
+ kfree(ch->ccw);
+ kfree(ch->discontact_th);
+ kfree_fsm(ch->fsm);
+ kfree(ch->irb);
+ kfree(ch);
+ return rc;
+}
+
+/*
+ * Return type of a detected device.
+ */
+static enum ctcm_channel_types get_channel_type(struct ccw_device_id *id)
+{
+ enum ctcm_channel_types type;
+ type = (enum ctcm_channel_types)id->driver_info;
+
+ if (type == ctcm_channel_type_ficon)
+ type = ctcm_channel_type_escon;
+
+ return type;
+}
+
+/**
+ *
+ * Setup an interface.
+ *
+ * cgdev Device to be setup.
+ *
+ * returns 0 on success, !0 on failure.
+ */
+static int ctcm_new_device(struct ccwgroup_device *cgdev)
+{
+ char read_id[CTCM_ID_SIZE];
+ char write_id[CTCM_ID_SIZE];
+ int direction;
+ enum ctcm_channel_types type;
+ struct ctcm_priv *priv;
+ struct net_device *dev;
+ struct ccw_device *cdev0;
+ struct ccw_device *cdev1;
+ struct channel *readc;
+ struct channel *writec;
+ int ret;
+ int result;
+
+ priv = dev_get_drvdata(&cgdev->dev);
+ if (!priv) {
+ result = -ENODEV;
+ goto out_err_result;
+ }
+
+ cdev0 = cgdev->cdev[0];
+ cdev1 = cgdev->cdev[1];
+
+ type = get_channel_type(&cdev0->id);
+
+ snprintf(read_id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev0->dev));
+ snprintf(write_id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev1->dev));
+
+ ret = add_channel(cdev0, type, priv);
+ if (ret) {
+ result = ret;
+ goto out_err_result;
+ }
+ ret = add_channel(cdev1, type, priv);
+ if (ret) {
+ result = ret;
+ goto out_remove_channel1;
+ }
+
+ ret = ccw_device_set_online(cdev0);
+ if (ret != 0) {
+ CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
+ "%s(%s) set_online rc=%d",
+ CTCM_FUNTAIL, read_id, ret);
+ result = -EIO;
+ goto out_remove_channel2;
+ }
+
+ ret = ccw_device_set_online(cdev1);
+ if (ret != 0) {
+ CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
+ "%s(%s) set_online rc=%d",
+ CTCM_FUNTAIL, write_id, ret);
+
+ result = -EIO;
+ goto out_ccw1;
+ }
+
+ dev = ctcm_init_netdevice(priv);
+ if (dev == NULL) {
+ result = -ENODEV;
+ goto out_ccw2;
+ }
+
+ for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
+ priv->channel[direction] =
+ channel_get(type, direction == CTCM_READ ?
+ read_id : write_id, direction);
+ if (priv->channel[direction] == NULL) {
+ if (direction == CTCM_WRITE)
+ channel_free(priv->channel[CTCM_READ]);
+ goto out_dev;
+ }
+ priv->channel[direction]->netdev = dev;
+ priv->channel[direction]->protocol = priv->protocol;
+ priv->channel[direction]->max_bufsize = priv->buffer_size;
+ }
+ /* sysfs magic */
+ SET_NETDEV_DEV(dev, &cgdev->dev);
+
+ if (register_netdev(dev)) {
+ result = -ENODEV;
+ goto out_dev;
+ }
+
+ if (ctcm_add_attributes(&cgdev->dev)) {
+ result = -ENODEV;
+ goto out_unregister;
+ }
+
+ strlcpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name));
+
+ dev_info(&dev->dev,
+ "setup OK : r/w = %s/%s, protocol : %d\n",
+ priv->channel[CTCM_READ]->id,
+ priv->channel[CTCM_WRITE]->id, priv->protocol);
+
+ CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
+ "setup(%s) OK : r/w = %s/%s, protocol : %d", dev->name,
+ priv->channel[CTCM_READ]->id,
+ priv->channel[CTCM_WRITE]->id, priv->protocol);
+
+ return 0;
+out_unregister:
+ unregister_netdev(dev);
+out_dev:
+ ctcm_free_netdevice(dev);
+out_ccw2:
+ ccw_device_set_offline(cgdev->cdev[1]);
+out_ccw1:
+ ccw_device_set_offline(cgdev->cdev[0]);
+out_remove_channel2:
+ readc = channel_get(type, read_id, CTCM_READ);
+ channel_remove(readc);
+out_remove_channel1:
+ writec = channel_get(type, write_id, CTCM_WRITE);
+ channel_remove(writec);
+out_err_result:
+ return result;
+}
+
+/**
+ * Shutdown an interface.
+ *
+ * cgdev Device to be shut down.
+ *
+ * returns 0 on success, !0 on failure.
+ */
+static int ctcm_shutdown_device(struct ccwgroup_device *cgdev)
+{
+ struct ctcm_priv *priv;
+ struct net_device *dev;
+
+ priv = dev_get_drvdata(&cgdev->dev);
+ if (!priv)
+ return -ENODEV;
+
+ if (priv->channel[CTCM_READ]) {
+ dev = priv->channel[CTCM_READ]->netdev;
+ CTCM_DBF_DEV(SETUP, dev, "");
+ /* Close the device */
+ ctcm_close(dev);
+ dev->flags &= ~IFF_RUNNING;
+ ctcm_remove_attributes(&cgdev->dev);
+ channel_free(priv->channel[CTCM_READ]);
+ } else
+ dev = NULL;
+
+ if (priv->channel[CTCM_WRITE])
+ channel_free(priv->channel[CTCM_WRITE]);
+
+ if (dev) {
+ unregister_netdev(dev);
+ ctcm_free_netdevice(dev);
+ }
+
+ if (priv->fsm)
+ kfree_fsm(priv->fsm);
+
+ ccw_device_set_offline(cgdev->cdev[1]);
+ ccw_device_set_offline(cgdev->cdev[0]);
+
+ if (priv->channel[CTCM_READ])
+ channel_remove(priv->channel[CTCM_READ]);
+ if (priv->channel[CTCM_WRITE])
+ channel_remove(priv->channel[CTCM_WRITE]);
+ priv->channel[CTCM_READ] = priv->channel[CTCM_WRITE] = NULL;
+
+ return 0;
+
+}
+
+
+static void ctcm_remove_device(struct ccwgroup_device *cgdev)
+{
+ struct ctcm_priv *priv = dev_get_drvdata(&cgdev->dev);
+
+ BUG_ON(priv == NULL);
+
+ CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO,
+ "removing device %p, proto : %d",
+ cgdev, priv->protocol);
+
+ if (cgdev->state == CCWGROUP_ONLINE)
+ ctcm_shutdown_device(cgdev);
+ ctcm_remove_files(&cgdev->dev);
+ dev_set_drvdata(&cgdev->dev, NULL);
+ kfree(priv);
+ put_device(&cgdev->dev);
+}
+
+static int ctcm_pm_suspend(struct ccwgroup_device *gdev)
+{
+ struct ctcm_priv *priv = dev_get_drvdata(&gdev->dev);
+
+ if (gdev->state == CCWGROUP_OFFLINE)
+ return 0;
+ netif_device_detach(priv->channel[CTCM_READ]->netdev);
+ ctcm_close(priv->channel[CTCM_READ]->netdev);
+ if (!wait_event_timeout(priv->fsm->wait_q,
+ fsm_getstate(priv->fsm) == DEV_STATE_STOPPED, CTCM_TIME_5_SEC)) {
+ netif_device_attach(priv->channel[CTCM_READ]->netdev);
+ return -EBUSY;
+ }
+ ccw_device_set_offline(gdev->cdev[1]);
+ ccw_device_set_offline(gdev->cdev[0]);
+ return 0;
+}
+
+static int ctcm_pm_resume(struct ccwgroup_device *gdev)
+{
+ struct ctcm_priv *priv = dev_get_drvdata(&gdev->dev);
+ int rc;
+
+ if (gdev->state == CCWGROUP_OFFLINE)
+ return 0;
+ rc = ccw_device_set_online(gdev->cdev[1]);
+ if (rc)
+ goto err_out;
+ rc = ccw_device_set_online(gdev->cdev[0]);
+ if (rc)
+ goto err_out;
+ ctcm_open(priv->channel[CTCM_READ]->netdev);
+err_out:
+ netif_device_attach(priv->channel[CTCM_READ]->netdev);
+ return rc;
+}
+
+static struct ccw_device_id ctcm_ids[] = {
+ {CCW_DEVICE(0x3088, 0x08), .driver_info = ctcm_channel_type_parallel},
+ {CCW_DEVICE(0x3088, 0x1e), .driver_info = ctcm_channel_type_ficon},
+ {CCW_DEVICE(0x3088, 0x1f), .driver_info = ctcm_channel_type_escon},
+ {},
+};
+MODULE_DEVICE_TABLE(ccw, ctcm_ids);
+
+static struct ccw_driver ctcm_ccw_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "ctcm",
+ },
+ .ids = ctcm_ids,
+ .probe = ccwgroup_probe_ccwdev,
+ .remove = ccwgroup_remove_ccwdev,
+ .int_class = IOINT_CTC,
+};
+
+static struct ccwgroup_driver ctcm_group_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = CTC_DRIVER_NAME,
+ },
+ .max_slaves = 2,
+ .driver_id = 0xC3E3C3D4, /* CTCM */
+ .probe = ctcm_probe_device,
+ .remove = ctcm_remove_device,
+ .set_online = ctcm_new_device,
+ .set_offline = ctcm_shutdown_device,
+ .freeze = ctcm_pm_suspend,
+ .thaw = ctcm_pm_resume,
+ .restore = ctcm_pm_resume,
+};
+
+static ssize_t
+ctcm_driver_group_store(struct device_driver *ddrv, const char *buf,
+ size_t count)
+{
+ int err;
+
+ err = ccwgroup_create_from_string(ctcm_root_dev,
+ ctcm_group_driver.driver_id,
+ &ctcm_ccw_driver, 2, buf);
+ return err ? err : count;
+}
+
+static DRIVER_ATTR(group, 0200, NULL, ctcm_driver_group_store);
+
+static struct attribute *ctcm_group_attrs[] = {
+ &driver_attr_group.attr,
+ NULL,
+};
+
+static struct attribute_group ctcm_group_attr_group = {
+ .attrs = ctcm_group_attrs,
+};
+
+static const struct attribute_group *ctcm_group_attr_groups[] = {
+ &ctcm_group_attr_group,
+ NULL,
+};
+
+/*
+ * Module related routines
+ */
+
+/*
+ * Prepare to be unloaded. Free IRQ's and release all resources.
+ * This is called just before this module is unloaded. It is
+ * not called, if the usage count is !0, so we don't need to check
+ * for that.
+ */
+static void __exit ctcm_exit(void)
+{
+ driver_remove_file(&ctcm_group_driver.driver, &driver_attr_group);
+ ccwgroup_driver_unregister(&ctcm_group_driver);
+ ccw_driver_unregister(&ctcm_ccw_driver);
+ root_device_unregister(ctcm_root_dev);
+ ctcm_unregister_dbf_views();
+ pr_info("CTCM driver unloaded\n");
+}
+
+/*
+ * Print Banner.
+ */
+static void print_banner(void)
+{
+ pr_info("CTCM driver initialized\n");
+}
+
+/**
+ * Initialize module.
+ * This is called just after the module is loaded.
+ *
+ * returns 0 on success, !0 on error.
+ */
+static int __init ctcm_init(void)
+{
+ int ret;
+
+ channels = NULL;
+
+ ret = ctcm_register_dbf_views();
+ if (ret)
+ goto out_err;
+ ctcm_root_dev = root_device_register("ctcm");
+ ret = IS_ERR(ctcm_root_dev) ? PTR_ERR(ctcm_root_dev) : 0;
+ if (ret)
+ goto register_err;
+ ret = ccw_driver_register(&ctcm_ccw_driver);
+ if (ret)
+ goto ccw_err;
+ ctcm_group_driver.driver.groups = ctcm_group_attr_groups;
+ ret = ccwgroup_driver_register(&ctcm_group_driver);
+ if (ret)
+ goto ccwgroup_err;
+ print_banner();
+ return 0;
+
+ccwgroup_err:
+ ccw_driver_unregister(&ctcm_ccw_driver);
+ccw_err:
+ root_device_unregister(ctcm_root_dev);
+register_err:
+ ctcm_unregister_dbf_views();
+out_err:
+ pr_err("%s / Initializing the ctcm device driver failed, ret = %d\n",
+ __func__, ret);
+ return ret;
+}
+
+module_init(ctcm_init);
+module_exit(ctcm_exit);
+
+MODULE_AUTHOR("Peter Tiedemann <ptiedem@de.ibm.com>");
+MODULE_DESCRIPTION("Network driver for S/390 CTC + CTCMPC (SNA)");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/s390/net/ctcm_main.h b/drivers/s390/net/ctcm_main.h
new file mode 100644
index 00000000000..24d5215eb0c
--- /dev/null
+++ b/drivers/s390/net/ctcm_main.h
@@ -0,0 +1,323 @@
+/*
+ * drivers/s390/net/ctcm_main.h
+ *
+ * Copyright IBM Corp. 2001, 2007
+ * Authors: Fritz Elfert (felfert@millenux.com)
+ * Peter Tiedemann (ptiedem@de.ibm.com)
+ */
+
+#ifndef _CTCM_MAIN_H_
+#define _CTCM_MAIN_H_
+
+#include <asm/ccwdev.h>
+#include <asm/ccwgroup.h>
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+
+#include "fsm.h"
+#include "ctcm_dbug.h"
+#include "ctcm_mpc.h"
+
+#define CTC_DRIVER_NAME "ctcm"
+#define CTC_DEVICE_NAME "ctc"
+#define MPC_DEVICE_NAME "mpc"
+#define CTC_DEVICE_GENE CTC_DEVICE_NAME "%d"
+#define MPC_DEVICE_GENE MPC_DEVICE_NAME "%d"
+
+#define CHANNEL_FLAGS_READ 0
+#define CHANNEL_FLAGS_WRITE 1
+#define CHANNEL_FLAGS_INUSE 2
+#define CHANNEL_FLAGS_BUFSIZE_CHANGED 4
+#define CHANNEL_FLAGS_FAILED 8
+#define CHANNEL_FLAGS_WAITIRQ 16
+#define CHANNEL_FLAGS_RWMASK 1
+#define CHANNEL_DIRECTION(f) (f & CHANNEL_FLAGS_RWMASK)
+
+#define LOG_FLAG_ILLEGALPKT 1
+#define LOG_FLAG_ILLEGALSIZE 2
+#define LOG_FLAG_OVERRUN 4
+#define LOG_FLAG_NOMEM 8
+
+#define ctcm_pr_debug(fmt, arg...) printk(KERN_DEBUG fmt, ##arg)
+
+#define CTCM_PR_DEBUG(fmt, arg...) \
+ do { \
+ if (do_debug) \
+ printk(KERN_DEBUG fmt, ##arg); \
+ } while (0)
+
+#define CTCM_PR_DBGDATA(fmt, arg...) \
+ do { \
+ if (do_debug_data) \
+ printk(KERN_DEBUG fmt, ##arg); \
+ } while (0)
+
+#define CTCM_D3_DUMP(buf, len) \
+ do { \
+ if (do_debug_data) \
+ ctcmpc_dumpit(buf, len); \
+ } while (0)
+
+#define CTCM_CCW_DUMP(buf, len) \
+ do { \
+ if (do_debug_ccw) \
+ ctcmpc_dumpit(buf, len); \
+ } while (0)
+
+/**
+ * Enum for classifying detected devices
+ */
+enum ctcm_channel_types {
+ /* Device is not a channel */
+ ctcm_channel_type_none,
+
+ /* Device is a CTC/A */
+ ctcm_channel_type_parallel,
+
+ /* Device is a FICON channel */
+ ctcm_channel_type_ficon,
+
+ /* Device is a ESCON channel */
+ ctcm_channel_type_escon
+};
+
+/*
+ * CCW commands, used in this driver.
+ */
+#define CCW_CMD_WRITE 0x01
+#define CCW_CMD_READ 0x02
+#define CCW_CMD_NOOP 0x03
+#define CCW_CMD_TIC 0x08
+#define CCW_CMD_SENSE_CMD 0x14
+#define CCW_CMD_WRITE_CTL 0x17
+#define CCW_CMD_SET_EXTENDED 0xc3
+#define CCW_CMD_PREPARE 0xe3
+
+#define CTCM_PROTO_S390 0
+#define CTCM_PROTO_LINUX 1
+#define CTCM_PROTO_LINUX_TTY 2
+#define CTCM_PROTO_OS390 3
+#define CTCM_PROTO_MPC 4
+#define CTCM_PROTO_MAX 4
+
+#define CTCM_BUFSIZE_LIMIT 65535
+#define CTCM_BUFSIZE_DEFAULT 32768
+#define MPC_BUFSIZE_DEFAULT CTCM_BUFSIZE_LIMIT
+
+#define CTCM_TIME_1_SEC 1000
+#define CTCM_TIME_5_SEC 5000
+#define CTCM_TIME_10_SEC 10000
+
+#define CTCM_INITIAL_BLOCKLEN 2
+
+#define CTCM_READ 0
+#define CTCM_WRITE 1
+
+#define CTCM_ID_SIZE 20+3
+
+struct ctcm_profile {
+ unsigned long maxmulti;
+ unsigned long maxcqueue;
+ unsigned long doios_single;
+ unsigned long doios_multi;
+ unsigned long txlen;
+ unsigned long tx_time;
+ struct timespec send_stamp;
+};
+
+/*
+ * Definition of one channel
+ */
+struct channel {
+ struct channel *next;
+ char id[CTCM_ID_SIZE];
+ struct ccw_device *cdev;
+ /*
+ * Type of this channel.
+ * CTC/A or Escon for valid channels.
+ */
+ enum ctcm_channel_types type;
+ /*
+ * Misc. flags. See CHANNEL_FLAGS_... below
+ */
+ __u32 flags;
+ __u16 protocol; /* protocol of this channel (4 = MPC) */
+ /*
+ * I/O and irq related stuff
+ */
+ struct ccw1 *ccw;
+ struct irb *irb;
+ /*
+ * RX/TX buffer size
+ */
+ int max_bufsize;
+ struct sk_buff *trans_skb; /* transmit/receive buffer */
+ struct sk_buff_head io_queue; /* universal I/O queue */
+ struct tasklet_struct ch_tasklet; /* MPC ONLY */
+ /*
+ * TX queue for collecting skb's during busy.
+ */
+ struct sk_buff_head collect_queue;
+ /*
+ * Amount of data in collect_queue.
+ */
+ int collect_len;
+ /*
+ * spinlock for collect_queue and collect_len
+ */
+ spinlock_t collect_lock;
+ /*
+ * Timer for detecting unresposive
+ * I/O operations.
+ */
+ fsm_timer timer;
+ /* MPC ONLY section begin */
+ __u32 th_seq_num; /* SNA TH seq number */
+ __u8 th_seg;
+ __u32 pdu_seq;
+ struct sk_buff *xid_skb;
+ char *xid_skb_data;
+ struct th_header *xid_th;
+ struct xid2 *xid;
+ char *xid_id;
+ struct th_header *rcvd_xid_th;
+ struct xid2 *rcvd_xid;
+ char *rcvd_xid_id;
+ __u8 in_mpcgroup;
+ fsm_timer sweep_timer;
+ struct sk_buff_head sweep_queue;
+ struct th_header *discontact_th;
+ struct tasklet_struct ch_disc_tasklet;
+ /* MPC ONLY section end */
+
+ int retry; /* retry counter for misc. operations */
+ fsm_instance *fsm; /* finite state machine of this channel */
+ struct net_device *netdev; /* corresponding net_device */
+ struct ctcm_profile prof;
+ __u8 *trans_skb_data;
+ __u16 logflags;
+ __u8 sense_rc; /* last unit check sense code report control */
+};
+
+struct ctcm_priv {
+ struct net_device_stats stats;
+ unsigned long tbusy;
+
+ /* The MPC group struct of this interface */
+ struct mpc_group *mpcg; /* MPC only */
+ struct xid2 *xid; /* MPC only */
+
+ /* The finite state machine of this interface */
+ fsm_instance *fsm;
+
+ /* The protocol of this device */
+ __u16 protocol;
+
+ /* Timer for restarting after I/O Errors */
+ fsm_timer restart_timer;
+
+ int buffer_size; /* ctc only */
+
+ struct channel *channel[2];
+};
+
+int ctcm_open(struct net_device *dev);
+int ctcm_close(struct net_device *dev);
+
+/*
+ * prototypes for non-static sysfs functions
+ */
+int ctcm_add_attributes(struct device *dev);
+void ctcm_remove_attributes(struct device *dev);
+int ctcm_add_files(struct device *dev);
+void ctcm_remove_files(struct device *dev);
+
+/*
+ * Compatibility macros for busy handling
+ * of network devices.
+ */
+static inline void ctcm_clear_busy_do(struct net_device *dev)
+{
+ clear_bit(0, &(((struct ctcm_priv *)dev->ml_priv)->tbusy));
+ netif_wake_queue(dev);
+}
+
+static inline void ctcm_clear_busy(struct net_device *dev)
+{
+ struct mpc_group *grp;
+ grp = ((struct ctcm_priv *)dev->ml_priv)->mpcg;
+
+ if (!(grp && grp->in_sweep))
+ ctcm_clear_busy_do(dev);
+}
+
+
+static inline int ctcm_test_and_set_busy(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ return test_and_set_bit(0,
+ &(((struct ctcm_priv *)dev->ml_priv)->tbusy));
+}
+
+extern int loglevel;
+extern struct channel *channels;
+
+void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb);
+
+/*
+ * Functions related to setup and device detection.
+ */
+
+static inline int ctcm_less_than(char *id1, char *id2)
+{
+ unsigned long dev1, dev2;
+
+ id1 = id1 + 5;
+ id2 = id2 + 5;
+
+ dev1 = simple_strtoul(id1, &id1, 16);
+ dev2 = simple_strtoul(id2, &id2, 16);
+
+ return (dev1 < dev2);
+}
+
+int ctcm_ch_alloc_buffer(struct channel *ch);
+
+static inline int ctcm_checkalloc_buffer(struct channel *ch)
+{
+ if (ch->trans_skb == NULL)
+ return ctcm_ch_alloc_buffer(ch);
+ if (ch->flags & CHANNEL_FLAGS_BUFSIZE_CHANGED) {
+ dev_kfree_skb(ch->trans_skb);
+ return ctcm_ch_alloc_buffer(ch);
+ }
+ return 0;
+}
+
+struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv);
+
+/* test if protocol attribute (of struct ctcm_priv or struct channel)
+ * has MPC protocol setting. Type is not checked
+ */
+#define IS_MPC(p) ((p)->protocol == CTCM_PROTO_MPC)
+
+/* test if struct ctcm_priv of struct net_device has MPC protocol setting */
+#define IS_MPCDEV(dev) IS_MPC((struct ctcm_priv *)dev->ml_priv)
+
+static inline gfp_t gfp_type(void)
+{
+ return in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
+}
+
+/*
+ * Definition of our link level header.
+ */
+struct ll_header {
+ __u16 length;
+ __u16 type;
+ __u16 unused;
+};
+#define LL_HEADER_LENGTH (sizeof(struct ll_header))
+
+#endif
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
new file mode 100644
index 00000000000..da4c747335e
--- /dev/null
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -0,0 +1,2179 @@
+/*
+ * drivers/s390/net/ctcm_mpc.c
+ *
+ * Copyright IBM Corp. 2004, 2007
+ * Authors: Belinda Thompson (belindat@us.ibm.com)
+ * Andy Richter (richtera@us.ibm.com)
+ * Peter Tiedemann (ptiedem@de.ibm.com)
+ */
+
+/*
+ This module exports functions to be used by CCS:
+ EXPORT_SYMBOL(ctc_mpc_alloc_channel);
+ EXPORT_SYMBOL(ctc_mpc_establish_connectivity);
+ EXPORT_SYMBOL(ctc_mpc_dealloc_ch);
+ EXPORT_SYMBOL(ctc_mpc_flow_control);
+*/
+
+#undef DEBUG
+#undef DEBUGDATA
+#undef DEBUGCCW
+
+#define KMSG_COMPONENT "ctcm"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+
+#include <linux/signal.h>
+#include <linux/string.h>
+#include <linux/proc_fs.h>
+
+#include <linux/ip.h>
+#include <linux/if_arp.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/ctype.h>
+#include <linux/netdevice.h>
+#include <net/dst.h>
+
+#include <linux/io.h> /* instead of <asm/io.h> ok ? */
+#include <asm/ccwdev.h>
+#include <asm/ccwgroup.h>
+#include <linux/bitops.h> /* instead of <asm/bitops.h> ok ? */
+#include <linux/uaccess.h> /* instead of <asm/uaccess.h> ok ? */
+#include <linux/wait.h>
+#include <linux/moduleparam.h>
+#include <asm/idals.h>
+
+#include "ctcm_mpc.h"
+#include "ctcm_main.h"
+#include "ctcm_fsms.h"
+
+static const struct xid2 init_xid = {
+ .xid2_type_id = XID_FM2,
+ .xid2_len = 0x45,
+ .xid2_adj_id = 0,
+ .xid2_rlen = 0x31,
+ .xid2_resv1 = 0,
+ .xid2_flag1 = 0,
+ .xid2_fmtt = 0,
+ .xid2_flag4 = 0x80,
+ .xid2_resv2 = 0,
+ .xid2_tgnum = 0,
+ .xid2_sender_id = 0,
+ .xid2_flag2 = 0,
+ .xid2_option = XID2_0,
+ .xid2_resv3 = "\x00",
+ .xid2_resv4 = 0,
+ .xid2_dlc_type = XID2_READ_SIDE,
+ .xid2_resv5 = 0,
+ .xid2_mpc_flag = 0,
+ .xid2_resv6 = 0,
+ .xid2_buf_len = (MPC_BUFSIZE_DEFAULT - 35),
+};
+
+static const struct th_header thnorm = {
+ .th_seg = 0x00,
+ .th_ch_flag = TH_IS_XID,
+ .th_blk_flag = TH_DATA_IS_XID,
+ .th_is_xid = 0x01,
+ .th_seq_num = 0x00000000,
+};
+
+static const struct th_header thdummy = {
+ .th_seg = 0x00,
+ .th_ch_flag = 0x00,
+ .th_blk_flag = TH_DATA_IS_XID,
+ .th_is_xid = 0x01,
+ .th_seq_num = 0x00000000,
+};
+
+/*
+ * Definition of one MPC group
+ */
+
+/*
+ * Compatibility macros for busy handling
+ * of network devices.
+ */
+
+static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb);
+
+/*
+ * MPC Group state machine actions (static prototypes)
+ */
+static void mpc_action_nop(fsm_instance *fsm, int event, void *arg);
+static void mpc_action_go_ready(fsm_instance *fsm, int event, void *arg);
+static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg);
+static void mpc_action_timeout(fsm_instance *fi, int event, void *arg);
+static int mpc_validate_xid(struct mpcg_info *mpcginfo);
+static void mpc_action_yside_xid(fsm_instance *fsm, int event, void *arg);
+static void mpc_action_doxid0(fsm_instance *fsm, int event, void *arg);
+static void mpc_action_doxid7(fsm_instance *fsm, int event, void *arg);
+static void mpc_action_xside_xid(fsm_instance *fsm, int event, void *arg);
+static void mpc_action_rcvd_xid0(fsm_instance *fsm, int event, void *arg);
+static void mpc_action_rcvd_xid7(fsm_instance *fsm, int event, void *arg);
+
+#ifdef DEBUGDATA
+/*-------------------------------------------------------------------*
+* Dump buffer format *
+* *
+*--------------------------------------------------------------------*/
+void ctcmpc_dumpit(char *buf, int len)
+{
+ __u32 ct, sw, rm, dup;
+ char *ptr, *rptr;
+ char tbuf[82], tdup[82];
+ #if (UTS_MACHINE == s390x)
+ char addr[22];
+ #else
+ char addr[12];
+ #endif
+ char boff[12];
+ char bhex[82], duphex[82];
+ char basc[40];
+
+ sw = 0;
+ rptr = ptr = buf;
+ rm = 16;
+ duphex[0] = 0x00;
+ dup = 0;
+
+ for (ct = 0; ct < len; ct++, ptr++, rptr++) {
+ if (sw == 0) {
+ #if (UTS_MACHINE == s390x)
+ sprintf(addr, "%16.16lx", (__u64)rptr);
+ #else
+ sprintf(addr, "%8.8X", (__u32)rptr);
+ #endif
+
+ sprintf(boff, "%4.4X", (__u32)ct);
+ bhex[0] = '\0';
+ basc[0] = '\0';
+ }
+ if ((sw == 4) || (sw == 12))
+ strcat(bhex, " ");
+ if (sw == 8)
+ strcat(bhex, " ");
+
+ #if (UTS_MACHINE == s390x)
+ sprintf(tbuf, "%2.2lX", (__u64)*ptr);
+ #else
+ sprintf(tbuf, "%2.2X", (__u32)*ptr);
+ #endif
+
+ tbuf[2] = '\0';
+ strcat(bhex, tbuf);
+ if ((0 != isprint(*ptr)) && (*ptr >= 0x20))
+ basc[sw] = *ptr;
+ else
+ basc[sw] = '.';
+
+ basc[sw+1] = '\0';
+ sw++;
+ rm--;
+ if (sw != 16)
+ continue;
+ if ((strcmp(duphex, bhex)) != 0) {
+ if (dup != 0) {
+ sprintf(tdup,
+ "Duplicate as above to %s", addr);
+ ctcm_pr_debug(" --- %s ---\n",
+ tdup);
+ }
+ ctcm_pr_debug(" %s (+%s) : %s [%s]\n",
+ addr, boff, bhex, basc);
+ dup = 0;
+ strcpy(duphex, bhex);
+ } else
+ dup++;
+
+ sw = 0;
+ rm = 16;
+ } /* endfor */
+
+ if (sw != 0) {
+ for ( ; rm > 0; rm--, sw++) {
+ if ((sw == 4) || (sw == 12))
+ strcat(bhex, " ");
+ if (sw == 8)
+ strcat(bhex, " ");
+ strcat(bhex, " ");
+ strcat(basc, " ");
+ }
+ if (dup != 0) {
+ sprintf(tdup, "Duplicate as above to %s", addr);
+ ctcm_pr_debug(" --- %s ---\n", tdup);
+ }
+ ctcm_pr_debug(" %s (+%s) : %s [%s]\n",
+ addr, boff, bhex, basc);
+ } else {
+ if (dup >= 1) {
+ sprintf(tdup, "Duplicate as above to %s", addr);
+ ctcm_pr_debug(" --- %s ---\n", tdup);
+ }
+ if (dup != 0) {
+ ctcm_pr_debug(" %s (+%s) : %s [%s]\n",
+ addr, boff, bhex, basc);
+ }
+ }
+
+ return;
+
+} /* end of ctcmpc_dumpit */
+#endif
+
+#ifdef DEBUGDATA
+/*
+ * Dump header and first 16 bytes of an sk_buff for debugging purposes.
+ *
+ * skb The sk_buff to dump.
+ * offset Offset relative to skb-data, where to start the dump.
+ */
+void ctcmpc_dump_skb(struct sk_buff *skb, int offset)
+{
+ __u8 *p = skb->data;
+ struct th_header *header;
+ struct pdu *pheader;
+ int bl = skb->len;
+ int i;
+
+ if (p == NULL)
+ return;
+
+ p += offset;
+ header = (struct th_header *)p;
+
+ ctcm_pr_debug("dump:\n");
+ ctcm_pr_debug("skb len=%d \n", skb->len);
+ if (skb->len > 2) {
+ switch (header->th_ch_flag) {
+ case TH_HAS_PDU:
+ break;
+ case 0x00:
+ case TH_IS_XID:
+ if ((header->th_blk_flag == TH_DATA_IS_XID) &&
+ (header->th_is_xid == 0x01))
+ goto dumpth;
+ case TH_SWEEP_REQ:
+ goto dumpth;
+ case TH_SWEEP_RESP:
+ goto dumpth;
+ default:
+ break;
+ }
+
+ pheader = (struct pdu *)p;
+ ctcm_pr_debug("pdu->offset: %d hex: %04x\n",
+ pheader->pdu_offset, pheader->pdu_offset);
+ ctcm_pr_debug("pdu->flag : %02x\n", pheader->pdu_flag);
+ ctcm_pr_debug("pdu->proto : %02x\n", pheader->pdu_proto);
+ ctcm_pr_debug("pdu->seq : %02x\n", pheader->pdu_seq);
+ goto dumpdata;
+
+dumpth:
+ ctcm_pr_debug("th->seg : %02x\n", header->th_seg);
+ ctcm_pr_debug("th->ch : %02x\n", header->th_ch_flag);
+ ctcm_pr_debug("th->blk_flag: %02x\n", header->th_blk_flag);
+ ctcm_pr_debug("th->type : %s\n",
+ (header->th_is_xid) ? "DATA" : "XID");
+ ctcm_pr_debug("th->seqnum : %04x\n", header->th_seq_num);
+
+ }
+dumpdata:
+ if (bl > 32)
+ bl = 32;
+ ctcm_pr_debug("data: ");
+ for (i = 0; i < bl; i++)
+ ctcm_pr_debug("%02x%s", *p++, (i % 16) ? " " : "\n");
+ ctcm_pr_debug("\n");
+}
+#endif
+
+static struct net_device *ctcmpc_get_dev(int port_num)
+{
+ char device[20];
+ struct net_device *dev;
+ struct ctcm_priv *priv;
+
+ sprintf(device, "%s%i", MPC_DEVICE_NAME, port_num);
+
+ dev = __dev_get_by_name(&init_net, device);
+
+ if (dev == NULL) {
+ CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+ "%s: Device not found by name: %s",
+ CTCM_FUNTAIL, device);
+ return NULL;
+ }
+ priv = dev->ml_priv;
+ if (priv == NULL) {
+ CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+ "%s(%s): dev->ml_priv is NULL",
+ CTCM_FUNTAIL, device);
+ return NULL;
+ }
+ if (priv->mpcg == NULL) {
+ CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+ "%s(%s): priv->mpcg is NULL",
+ CTCM_FUNTAIL, device);
+ return NULL;
+ }
+ return dev;
+}
+
+/*
+ * ctc_mpc_alloc_channel
+ * (exported interface)
+ *
+ * Device Initialization :
+ * ACTPATH driven IO operations
+ */
+int ctc_mpc_alloc_channel(int port_num, void (*callback)(int, int))
+{
+ struct net_device *dev;
+ struct mpc_group *grp;
+ struct ctcm_priv *priv;
+
+ dev = ctcmpc_get_dev(port_num);
+ if (dev == NULL)
+ return 1;
+ priv = dev->ml_priv;
+ grp = priv->mpcg;
+
+ grp->allochanfunc = callback;
+ grp->port_num = port_num;
+ grp->port_persist = 1;
+
+ CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_INFO,
+ "%s(%s): state=%s",
+ CTCM_FUNTAIL, dev->name, fsm_getstate_str(grp->fsm));
+
+ switch (fsm_getstate(grp->fsm)) {
+ case MPCG_STATE_INOP:
+ /* Group is in the process of terminating */
+ grp->alloc_called = 1;
+ break;
+ case MPCG_STATE_RESET:
+ /* MPC Group will transition to state */
+ /* MPCG_STATE_XID2INITW iff the minimum number */
+ /* of 1 read and 1 write channel have successfully*/
+ /* activated */
+ /*fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);*/
+ if (callback)
+ grp->send_qllc_disc = 1;
+ case MPCG_STATE_XID0IOWAIT:
+ fsm_deltimer(&grp->timer);
+ grp->outstanding_xid2 = 0;
+ grp->outstanding_xid7 = 0;
+ grp->outstanding_xid7_p2 = 0;
+ grp->saved_xid2 = NULL;
+ if (callback)
+ ctcm_open(dev);
+ fsm_event(priv->fsm, DEV_EVENT_START, dev);
+ break;
+ case MPCG_STATE_READY:
+ /* XID exchanges completed after PORT was activated */
+ /* Link station already active */
+ /* Maybe timing issue...retry callback */
+ grp->allocchan_callback_retries++;
+ if (grp->allocchan_callback_retries < 4) {
+ if (grp->allochanfunc)
+ grp->allochanfunc(grp->port_num,
+ grp->group_max_buflen);
+ } else {
+ /* there are problems...bail out */
+ /* there may be a state mismatch so restart */
+ fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+ grp->allocchan_callback_retries = 0;
+ }
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ctc_mpc_alloc_channel);
+
+/*
+ * ctc_mpc_establish_connectivity
+ * (exported interface)
+ */
+void ctc_mpc_establish_connectivity(int port_num,
+ void (*callback)(int, int, int))
+{
+ struct net_device *dev;
+ struct mpc_group *grp;
+ struct ctcm_priv *priv;
+ struct channel *rch, *wch;
+
+ dev = ctcmpc_get_dev(port_num);
+ if (dev == NULL)
+ return;
+ priv = dev->ml_priv;
+ grp = priv->mpcg;
+ rch = priv->channel[CTCM_READ];
+ wch = priv->channel[CTCM_WRITE];
+
+ CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_INFO,
+ "%s(%s): state=%s",
+ CTCM_FUNTAIL, dev->name, fsm_getstate_str(grp->fsm));
+
+ grp->estconnfunc = callback;
+ grp->port_num = port_num;
+
+ switch (fsm_getstate(grp->fsm)) {
+ case MPCG_STATE_READY:
+ /* XID exchanges completed after PORT was activated */
+ /* Link station already active */
+ /* Maybe timing issue...retry callback */
+ fsm_deltimer(&grp->timer);
+ grp->estconn_callback_retries++;
+ if (grp->estconn_callback_retries < 4) {
+ if (grp->estconnfunc) {
+ grp->estconnfunc(grp->port_num, 0,
+ grp->group_max_buflen);
+ grp->estconnfunc = NULL;
+ }
+ } else {
+ /* there are problems...bail out */
+ fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+ grp->estconn_callback_retries = 0;
+ }
+ break;
+ case MPCG_STATE_INOP:
+ case MPCG_STATE_RESET:
+ /* MPC Group is not ready to start XID - min num of */
+ /* 1 read and 1 write channel have not been acquired*/
+
+ CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+ "%s(%s): REJECTED - inactive channels",
+ CTCM_FUNTAIL, dev->name);
+ if (grp->estconnfunc) {
+ grp->estconnfunc(grp->port_num, -1, 0);
+ grp->estconnfunc = NULL;
+ }
+ break;
+ case MPCG_STATE_XID2INITW:
+ /* alloc channel was called but no XID exchange */
+ /* has occurred. initiate xside XID exchange */
+ /* make sure yside XID0 processing has not started */
+
+ if ((fsm_getstate(rch->fsm) > CH_XID0_PENDING) ||
+ (fsm_getstate(wch->fsm) > CH_XID0_PENDING)) {
+ CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+ "%s(%s): ABORT - PASSIVE XID",
+ CTCM_FUNTAIL, dev->name);
+ break;
+ }
+ grp->send_qllc_disc = 1;
+ fsm_newstate(grp->fsm, MPCG_STATE_XID0IOWAIT);
+ fsm_deltimer(&grp->timer);
+ fsm_addtimer(&grp->timer, MPC_XID_TIMEOUT_VALUE,
+ MPCG_EVENT_TIMER, dev);
+ grp->outstanding_xid7 = 0;
+ grp->outstanding_xid7_p2 = 0;
+ grp->saved_xid2 = NULL;
+ if ((rch->in_mpcgroup) &&
+ (fsm_getstate(rch->fsm) == CH_XID0_PENDING))
+ fsm_event(grp->fsm, MPCG_EVENT_XID0DO, rch);
+ else {
+ CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+ "%s(%s): RX-%s not ready for ACTIVE XID0",
+ CTCM_FUNTAIL, dev->name, rch->id);
+ if (grp->estconnfunc) {
+ grp->estconnfunc(grp->port_num, -1, 0);
+ grp->estconnfunc = NULL;
+ }
+ fsm_deltimer(&grp->timer);
+ goto done;
+ }
+ if ((wch->in_mpcgroup) &&
+ (fsm_getstate(wch->fsm) == CH_XID0_PENDING))
+ fsm_event(grp->fsm, MPCG_EVENT_XID0DO, wch);
+ else {
+ CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+ "%s(%s): WX-%s not ready for ACTIVE XID0",
+ CTCM_FUNTAIL, dev->name, wch->id);
+ if (grp->estconnfunc) {
+ grp->estconnfunc(grp->port_num, -1, 0);
+ grp->estconnfunc = NULL;
+ }
+ fsm_deltimer(&grp->timer);
+ goto done;
+ }
+ break;
+ case MPCG_STATE_XID0IOWAIT:
+ /* already in active XID negotiations */
+ default:
+ break;
+ }
+
+done:
+ CTCM_PR_DEBUG("Exit %s()\n", __func__);
+ return;
+}
+EXPORT_SYMBOL(ctc_mpc_establish_connectivity);
+
+/*
+ * ctc_mpc_dealloc_ch
+ * (exported interface)
+ */
+void ctc_mpc_dealloc_ch(int port_num)
+{
+ struct net_device *dev;
+ struct ctcm_priv *priv;
+ struct mpc_group *grp;
+
+ dev = ctcmpc_get_dev(port_num);
+ if (dev == NULL)
+ return;
+ priv = dev->ml_priv;
+ grp = priv->mpcg;
+
+ CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_DEBUG,
+ "%s: %s: refcount = %d\n",
+ CTCM_FUNTAIL, dev->name, netdev_refcnt_read(dev));
+
+ fsm_deltimer(&priv->restart_timer);
+ grp->channels_terminating = 0;
+ fsm_deltimer(&grp->timer);
+ grp->allochanfunc = NULL;
+ grp->estconnfunc = NULL;
+ grp->port_persist = 0;
+ grp->send_qllc_disc = 0;
+ fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+
+ ctcm_close(dev);
+ return;
+}
+EXPORT_SYMBOL(ctc_mpc_dealloc_ch);
+
+/*
+ * ctc_mpc_flow_control
+ * (exported interface)
+ */
+void ctc_mpc_flow_control(int port_num, int flowc)
+{
+ struct ctcm_priv *priv;
+ struct mpc_group *grp;
+ struct net_device *dev;
+ struct channel *rch;
+ int mpcg_state;
+
+ dev = ctcmpc_get_dev(port_num);
+ if (dev == NULL)
+ return;
+ priv = dev->ml_priv;
+ grp = priv->mpcg;
+
+ CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
+ "%s: %s: flowc = %d",
+ CTCM_FUNTAIL, dev->name, flowc);
+
+ rch = priv->channel[CTCM_READ];
+
+ mpcg_state = fsm_getstate(grp->fsm);
+ switch (flowc) {
+ case 1:
+ if (mpcg_state == MPCG_STATE_FLOWC)
+ break;
+ if (mpcg_state == MPCG_STATE_READY) {
+ if (grp->flow_off_called == 1)
+ grp->flow_off_called = 0;
+ else
+ fsm_newstate(grp->fsm, MPCG_STATE_FLOWC);
+ break;
+ }
+ break;
+ case 0:
+ if (mpcg_state == MPCG_STATE_FLOWC) {
+ fsm_newstate(grp->fsm, MPCG_STATE_READY);
+ /* ensure any data that has accumulated */
+ /* on the io_queue will now be sen t */
+ tasklet_schedule(&rch->ch_tasklet);
+ }
+ /* possible race condition */
+ if (mpcg_state == MPCG_STATE_READY) {
+ grp->flow_off_called = 1;
+ break;
+ }
+ break;
+ }
+
+}
+EXPORT_SYMBOL(ctc_mpc_flow_control);
+
+static int mpc_send_qllc_discontact(struct net_device *);
+
+/*
+ * helper function of ctcmpc_unpack_skb
+*/
+static void mpc_rcvd_sweep_resp(struct mpcg_info *mpcginfo)
+{
+ struct channel *rch = mpcginfo->ch;
+ struct net_device *dev = rch->netdev;
+ struct ctcm_priv *priv = dev->ml_priv;
+ struct mpc_group *grp = priv->mpcg;
+ struct channel *ch = priv->channel[CTCM_WRITE];
+
+ CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, ch, ch->id);
+ CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH);
+
+ grp->sweep_rsp_pend_num--;
+
+ if ((grp->sweep_req_pend_num == 0) &&
+ (grp->sweep_rsp_pend_num == 0)) {
+ fsm_deltimer(&ch->sweep_timer);
+ grp->in_sweep = 0;
+ rch->th_seq_num = 0x00;
+ ch->th_seq_num = 0x00;
+ ctcm_clear_busy_do(dev);
+ }
+
+ kfree(mpcginfo);
+
+ return;
+
+}
+
+/*
+ * helper function of mpc_rcvd_sweep_req
+ * which is a helper of ctcmpc_unpack_skb
+ */
+static void ctcmpc_send_sweep_resp(struct channel *rch)
+{
+ struct net_device *dev = rch->netdev;
+ struct ctcm_priv *priv = dev->ml_priv;
+ struct mpc_group *grp = priv->mpcg;
+ struct th_sweep *header;
+ struct sk_buff *sweep_skb;
+ struct channel *ch = priv->channel[CTCM_WRITE];
+
+ CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, rch, rch->id);
+
+ sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC | GFP_DMA);
+ if (sweep_skb == NULL) {
+ CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+ "%s(%s): sweep_skb allocation ERROR\n",
+ CTCM_FUNTAIL, rch->id);
+ goto done;
+ }
+
+ header = kmalloc(sizeof(struct th_sweep), gfp_type());
+
+ if (!header) {
+ dev_kfree_skb_any(sweep_skb);
+ goto done;
+ }
+
+ header->th.th_seg = 0x00 ;
+ header->th.th_ch_flag = TH_SWEEP_RESP;
+ header->th.th_blk_flag = 0x00;
+ header->th.th_is_xid = 0x00;
+ header->th.th_seq_num = 0x00;
+ header->sw.th_last_seq = ch->th_seq_num;
+
+ memcpy(skb_put(sweep_skb, TH_SWEEP_LENGTH), header, TH_SWEEP_LENGTH);
+
+ kfree(header);
+
+ dev->trans_start = jiffies;
+ skb_queue_tail(&ch->sweep_queue, sweep_skb);
+
+ fsm_addtimer(&ch->sweep_timer, 100, CTC_EVENT_RSWEEP_TIMER, ch);
+
+ return;
+
+done:
+ grp->in_sweep = 0;
+ ctcm_clear_busy_do(dev);
+ fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+
+ return;
+}
+
+/*
+ * helper function of ctcmpc_unpack_skb
+ */
+static void mpc_rcvd_sweep_req(struct mpcg_info *mpcginfo)
+{
+ struct channel *rch = mpcginfo->ch;
+ struct net_device *dev = rch->netdev;
+ struct ctcm_priv *priv = dev->ml_priv;
+ struct mpc_group *grp = priv->mpcg;
+ struct channel *ch = priv->channel[CTCM_WRITE];
+
+ if (do_debug)
+ CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
+ " %s(): ch=0x%p id=%s\n", __func__, ch, ch->id);
+
+ if (grp->in_sweep == 0) {
+ grp->in_sweep = 1;
+ ctcm_test_and_set_busy(dev);
+ grp->sweep_req_pend_num = grp->active_channels[CTCM_READ];
+ grp->sweep_rsp_pend_num = grp->active_channels[CTCM_READ];
+ }
+
+ CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH);
+
+ grp->sweep_req_pend_num--;
+ ctcmpc_send_sweep_resp(ch);
+ kfree(mpcginfo);
+ return;
+}
+
+/*
+ * MPC Group Station FSM definitions
+ */
+static const char *mpcg_event_names[] = {
+ [MPCG_EVENT_INOP] = "INOP Condition",
+ [MPCG_EVENT_DISCONC] = "Discontact Received",
+ [MPCG_EVENT_XID0DO] = "Channel Active - Start XID",
+ [MPCG_EVENT_XID2] = "XID2 Received",
+ [MPCG_EVENT_XID2DONE] = "XID0 Complete",
+ [MPCG_EVENT_XID7DONE] = "XID7 Complete",
+ [MPCG_EVENT_TIMER] = "XID Setup Timer",
+ [MPCG_EVENT_DOIO] = "XID DoIO",
+};
+
+static const char *mpcg_state_names[] = {
+ [MPCG_STATE_RESET] = "Reset",
+ [MPCG_STATE_INOP] = "INOP",
+ [MPCG_STATE_XID2INITW] = "Passive XID- XID0 Pending Start",
+ [MPCG_STATE_XID2INITX] = "Passive XID- XID0 Pending Complete",
+ [MPCG_STATE_XID7INITW] = "Passive XID- XID7 Pending P1 Start",
+ [MPCG_STATE_XID7INITX] = "Passive XID- XID7 Pending P2 Complete",
+ [MPCG_STATE_XID0IOWAIT] = "Active XID- XID0 Pending Start",
+ [MPCG_STATE_XID0IOWAIX] = "Active XID- XID0 Pending Complete",
+ [MPCG_STATE_XID7INITI] = "Active XID- XID7 Pending Start",
+ [MPCG_STATE_XID7INITZ] = "Active XID- XID7 Pending Complete ",
+ [MPCG_STATE_XID7INITF] = "XID - XID7 Complete ",
+ [MPCG_STATE_FLOWC] = "FLOW CONTROL ON",
+ [MPCG_STATE_READY] = "READY",
+};
+
+/*
+ * The MPC Group Station FSM
+ * 22 events
+ */
+static const fsm_node mpcg_fsm[] = {
+ { MPCG_STATE_RESET, MPCG_EVENT_INOP, mpc_action_go_inop },
+ { MPCG_STATE_INOP, MPCG_EVENT_INOP, mpc_action_nop },
+ { MPCG_STATE_FLOWC, MPCG_EVENT_INOP, mpc_action_go_inop },
+
+ { MPCG_STATE_READY, MPCG_EVENT_DISCONC, mpc_action_discontact },
+ { MPCG_STATE_READY, MPCG_EVENT_INOP, mpc_action_go_inop },
+
+ { MPCG_STATE_XID2INITW, MPCG_EVENT_XID0DO, mpc_action_doxid0 },
+ { MPCG_STATE_XID2INITW, MPCG_EVENT_XID2, mpc_action_rcvd_xid0 },
+ { MPCG_STATE_XID2INITW, MPCG_EVENT_INOP, mpc_action_go_inop },
+ { MPCG_STATE_XID2INITW, MPCG_EVENT_TIMER, mpc_action_timeout },
+ { MPCG_STATE_XID2INITW, MPCG_EVENT_DOIO, mpc_action_yside_xid },
+
+ { MPCG_STATE_XID2INITX, MPCG_EVENT_XID0DO, mpc_action_doxid0 },
+ { MPCG_STATE_XID2INITX, MPCG_EVENT_XID2, mpc_action_rcvd_xid0 },
+ { MPCG_STATE_XID2INITX, MPCG_EVENT_INOP, mpc_action_go_inop },
+ { MPCG_STATE_XID2INITX, MPCG_EVENT_TIMER, mpc_action_timeout },
+ { MPCG_STATE_XID2INITX, MPCG_EVENT_DOIO, mpc_action_yside_xid },
+
+ { MPCG_STATE_XID7INITW, MPCG_EVENT_XID2DONE, mpc_action_doxid7 },
+ { MPCG_STATE_XID7INITW, MPCG_EVENT_DISCONC, mpc_action_discontact },
+ { MPCG_STATE_XID7INITW, MPCG_EVENT_XID2, mpc_action_rcvd_xid7 },
+ { MPCG_STATE_XID7INITW, MPCG_EVENT_INOP, mpc_action_go_inop },
+ { MPCG_STATE_XID7INITW, MPCG_EVENT_TIMER, mpc_action_timeout },
+ { MPCG_STATE_XID7INITW, MPCG_EVENT_XID7DONE, mpc_action_doxid7 },
+ { MPCG_STATE_XID7INITW, MPCG_EVENT_DOIO, mpc_action_yside_xid },
+
+ { MPCG_STATE_XID7INITX, MPCG_EVENT_DISCONC, mpc_action_discontact },
+ { MPCG_STATE_XID7INITX, MPCG_EVENT_XID2, mpc_action_rcvd_xid7 },
+ { MPCG_STATE_XID7INITX, MPCG_EVENT_INOP, mpc_action_go_inop },
+ { MPCG_STATE_XID7INITX, MPCG_EVENT_XID7DONE, mpc_action_doxid7 },
+ { MPCG_STATE_XID7INITX, MPCG_EVENT_TIMER, mpc_action_timeout },
+ { MPCG_STATE_XID7INITX, MPCG_EVENT_DOIO, mpc_action_yside_xid },
+
+ { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_XID0DO, mpc_action_doxid0 },
+ { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_DISCONC, mpc_action_discontact },
+ { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_XID2, mpc_action_rcvd_xid0 },
+ { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_INOP, mpc_action_go_inop },
+ { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_TIMER, mpc_action_timeout },
+ { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_DOIO, mpc_action_xside_xid },
+
+ { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_XID0DO, mpc_action_doxid0 },
+ { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_DISCONC, mpc_action_discontact },
+ { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_XID2, mpc_action_rcvd_xid0 },
+ { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_INOP, mpc_action_go_inop },
+ { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_TIMER, mpc_action_timeout },
+ { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_DOIO, mpc_action_xside_xid },
+
+ { MPCG_STATE_XID7INITI, MPCG_EVENT_XID2DONE, mpc_action_doxid7 },
+ { MPCG_STATE_XID7INITI, MPCG_EVENT_XID2, mpc_action_rcvd_xid7 },
+ { MPCG_STATE_XID7INITI, MPCG_EVENT_DISCONC, mpc_action_discontact },
+ { MPCG_STATE_XID7INITI, MPCG_EVENT_INOP, mpc_action_go_inop },
+ { MPCG_STATE_XID7INITI, MPCG_EVENT_TIMER, mpc_action_timeout },
+ { MPCG_STATE_XID7INITI, MPCG_EVENT_XID7DONE, mpc_action_doxid7 },
+ { MPCG_STATE_XID7INITI, MPCG_EVENT_DOIO, mpc_action_xside_xid },
+
+ { MPCG_STATE_XID7INITZ, MPCG_EVENT_XID2, mpc_action_rcvd_xid7 },
+ { MPCG_STATE_XID7INITZ, MPCG_EVENT_XID7DONE, mpc_action_doxid7 },
+ { MPCG_STATE_XID7INITZ, MPCG_EVENT_DISCONC, mpc_action_discontact },
+ { MPCG_STATE_XID7INITZ, MPCG_EVENT_INOP, mpc_action_go_inop },
+ { MPCG_STATE_XID7INITZ, MPCG_EVENT_TIMER, mpc_action_timeout },
+ { MPCG_STATE_XID7INITZ, MPCG_EVENT_DOIO, mpc_action_xside_xid },
+
+ { MPCG_STATE_XID7INITF, MPCG_EVENT_INOP, mpc_action_go_inop },
+ { MPCG_STATE_XID7INITF, MPCG_EVENT_XID7DONE, mpc_action_go_ready },
+};
+
+static int mpcg_fsm_len = ARRAY_SIZE(mpcg_fsm);
+
+/*
+ * MPC Group Station FSM action
+ * CTCM_PROTO_MPC only
+ */
+static void mpc_action_go_ready(fsm_instance *fsm, int event, void *arg)
+{
+ struct net_device *dev = arg;
+ struct ctcm_priv *priv = dev->ml_priv;
+ struct mpc_group *grp = priv->mpcg;
+
+ if (grp == NULL) {
+ CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+ "%s(%s): No MPC group",
+ CTCM_FUNTAIL, dev->name);
+ return;
+ }
+
+ fsm_deltimer(&grp->timer);
+
+ if (grp->saved_xid2->xid2_flag2 == 0x40) {
+ priv->xid->xid2_flag2 = 0x00;
+ if (grp->estconnfunc) {
+ grp->estconnfunc(grp->port_num, 1,
+ grp->group_max_buflen);
+ grp->estconnfunc = NULL;
+ } else if (grp->allochanfunc)
+ grp->send_qllc_disc = 1;
+
+ fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+ CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+ "%s(%s): fails",
+ CTCM_FUNTAIL, dev->name);
+ return;
+ }
+
+ grp->port_persist = 1;
+ grp->out_of_sequence = 0;
+ grp->estconn_called = 0;
+
+ tasklet_hi_schedule(&grp->mpc_tasklet2);
+
+ return;
+}
+
+/*
+ * helper of ctcm_init_netdevice
+ * CTCM_PROTO_MPC only
+ */
+void mpc_group_ready(unsigned long adev)
+{
+ struct net_device *dev = (struct net_device *)adev;
+ struct ctcm_priv *priv = dev->ml_priv;
+ struct mpc_group *grp = priv->mpcg;
+ struct channel *ch = NULL;
+
+ if (grp == NULL) {
+ CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+ "%s(%s): No MPC group",
+ CTCM_FUNTAIL, dev->name);
+ return;
+ }
+
+ CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_NOTICE,
+ "%s: %s: GROUP TRANSITIONED TO READY, maxbuf = %d\n",
+ CTCM_FUNTAIL, dev->name, grp->group_max_buflen);
+
+ fsm_newstate(grp->fsm, MPCG_STATE_READY);
+
+ /* Put up a read on the channel */
+ ch = priv->channel[CTCM_READ];
+ ch->pdu_seq = 0;
+ CTCM_PR_DBGDATA("ctcmpc: %s() ToDCM_pdu_seq= %08x\n" ,
+ __func__, ch->pdu_seq);
+
+ ctcmpc_chx_rxidle(ch->fsm, CTC_EVENT_START, ch);
+ /* Put the write channel in idle state */
+ ch = priv->channel[CTCM_WRITE];
+ if (ch->collect_len > 0) {
+ spin_lock(&ch->collect_lock);
+ ctcm_purge_skb_queue(&ch->collect_queue);
+ ch->collect_len = 0;
+ spin_unlock(&ch->collect_lock);
+ }
+ ctcm_chx_txidle(ch->fsm, CTC_EVENT_START, ch);
+ ctcm_clear_busy(dev);
+
+ if (grp->estconnfunc) {
+ grp->estconnfunc(grp->port_num, 0,
+ grp->group_max_buflen);
+ grp->estconnfunc = NULL;
+ } else if (grp->allochanfunc)
+ grp->allochanfunc(grp->port_num, grp->group_max_buflen);
+
+ grp->send_qllc_disc = 1;
+ grp->changed_side = 0;
+
+ return;
+
+}
+
+/*
+ * Increment the MPC Group Active Channel Counts
+ * helper of dev_action (called from channel fsm)
+ */
+void mpc_channel_action(struct channel *ch, int direction, int action)
+{
+ struct net_device *dev = ch->netdev;
+ struct ctcm_priv *priv = dev->ml_priv;
+ struct mpc_group *grp = priv->mpcg;
+
+ if (grp == NULL) {
+ CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+ "%s(%s): No MPC group",
+ CTCM_FUNTAIL, dev->name);
+ return;
+ }
+
+ CTCM_PR_DEBUG("enter %s: ch=0x%p id=%s\n", __func__, ch, ch->id);
+
+ CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE,
+ "%s: %i / Grp:%s total_channels=%i, active_channels: "
+ "read=%i, write=%i\n", __func__, action,
+ fsm_getstate_str(grp->fsm), grp->num_channel_paths,
+ grp->active_channels[CTCM_READ],
+ grp->active_channels[CTCM_WRITE]);
+
+ if ((action == MPC_CHANNEL_ADD) && (ch->in_mpcgroup == 0)) {
+ grp->num_channel_paths++;
+ grp->active_channels[direction]++;
+ grp->outstanding_xid2++;
+ ch->in_mpcgroup = 1;
+
+ if (ch->xid_skb != NULL)
+ dev_kfree_skb_any(ch->xid_skb);
+
+ ch->xid_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT,
+ GFP_ATOMIC | GFP_DMA);
+ if (ch->xid_skb == NULL) {
+ CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+ "%s(%s): Couldn't alloc ch xid_skb\n",
+ CTCM_FUNTAIL, dev->name);
+ fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+ return;
+ }
+ ch->xid_skb_data = ch->xid_skb->data;
+ ch->xid_th = (struct th_header *)ch->xid_skb->data;
+ skb_put(ch->xid_skb, TH_HEADER_LENGTH);
+ ch->xid = (struct xid2 *)skb_tail_pointer(ch->xid_skb);
+ skb_put(ch->xid_skb, XID2_LENGTH);
+ ch->xid_id = skb_tail_pointer(ch->xid_skb);
+ ch->xid_skb->data = ch->xid_skb_data;
+ skb_reset_tail_pointer(ch->xid_skb);
+ ch->xid_skb->len = 0;
+
+ memcpy(skb_put(ch->xid_skb, grp->xid_skb->len),
+ grp->xid_skb->data,
+ grp->xid_skb->len);
+
+ ch->xid->xid2_dlc_type =
+ ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
+ ? XID2_READ_SIDE : XID2_WRITE_SIDE);
+
+ if (CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE)
+ ch->xid->xid2_buf_len = 0x00;
+
+ ch->xid_skb->data = ch->xid_skb_data;
+ skb_reset_tail_pointer(ch->xid_skb);
+ ch->xid_skb->len = 0;
+
+ fsm_newstate(ch->fsm, CH_XID0_PENDING);
+
+ if ((grp->active_channels[CTCM_READ] > 0) &&
+ (grp->active_channels[CTCM_WRITE] > 0) &&
+ (fsm_getstate(grp->fsm) < MPCG_STATE_XID2INITW)) {
+ fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);
+ CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_NOTICE,
+ "%s: %s: MPC GROUP CHANNELS ACTIVE\n",
+ __func__, dev->name);
+ }
+ } else if ((action == MPC_CHANNEL_REMOVE) &&
+ (ch->in_mpcgroup == 1)) {
+ ch->in_mpcgroup = 0;
+ grp->num_channel_paths--;
+ grp->active_channels[direction]--;
+
+ if (ch->xid_skb != NULL)
+ dev_kfree_skb_any(ch->xid_skb);
+ ch->xid_skb = NULL;
+
+ if (grp->channels_terminating)
+ goto done;
+
+ if (((grp->active_channels[CTCM_READ] == 0) &&
+ (grp->active_channels[CTCM_WRITE] > 0))
+ || ((grp->active_channels[CTCM_WRITE] == 0) &&
+ (grp->active_channels[CTCM_READ] > 0)))
+ fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+ }
+done:
+ CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
+ "exit %s: %i / Grp:%s total_channels=%i, active_channels: "
+ "read=%i, write=%i\n", __func__, action,
+ fsm_getstate_str(grp->fsm), grp->num_channel_paths,
+ grp->active_channels[CTCM_READ],
+ grp->active_channels[CTCM_WRITE]);
+
+ CTCM_PR_DEBUG("exit %s: ch=0x%p id=%s\n", __func__, ch, ch->id);
+}
+
+/**
+ * Unpack a just received skb and hand it over to
+ * upper layers.
+ * special MPC version of unpack_skb.
+ *
+ * ch The channel where this skb has been received.
+ * pskb The received skb.
+ */
+static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
+{
+ struct net_device *dev = ch->netdev;
+ struct ctcm_priv *priv = dev->ml_priv;
+ struct mpc_group *grp = priv->mpcg;
+ struct pdu *curr_pdu;
+ struct mpcg_info *mpcginfo;
+ struct th_header *header = NULL;
+ struct th_sweep *sweep = NULL;
+ int pdu_last_seen = 0;
+ __u32 new_len;
+ struct sk_buff *skb;
+ int skblen;
+ int sendrc = 0;
+
+ CTCM_PR_DEBUG("ctcmpc enter: %s() %s cp:%i ch:%s\n",
+ __func__, dev->name, smp_processor_id(), ch->id);
+
+ header = (struct th_header *)pskb->data;
+ if ((header->th_seg == 0) &&
+ (header->th_ch_flag == 0) &&
+ (header->th_blk_flag == 0) &&
+ (header->th_seq_num == 0))
+ /* nothing for us */ goto done;
+
+ CTCM_PR_DBGDATA("%s: th_header\n", __func__);
+ CTCM_D3_DUMP((char *)header, TH_HEADER_LENGTH);
+ CTCM_PR_DBGDATA("%s: pskb len: %04x \n", __func__, pskb->len);
+
+ pskb->dev = dev;
+ pskb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb_pull(pskb, TH_HEADER_LENGTH);
+
+ if (likely(header->th_ch_flag == TH_HAS_PDU)) {
+ CTCM_PR_DBGDATA("%s: came into th_has_pdu\n", __func__);
+ if ((fsm_getstate(grp->fsm) == MPCG_STATE_FLOWC) ||
+ ((fsm_getstate(grp->fsm) == MPCG_STATE_READY) &&
+ (header->th_seq_num != ch->th_seq_num + 1) &&
+ (ch->th_seq_num != 0))) {
+ /* This is NOT the next segment *
+ * we are not the correct race winner *
+ * go away and let someone else win *
+ * BUT..this only applies if xid negot *
+ * is done *
+ */
+ grp->out_of_sequence += 1;
+ __skb_push(pskb, TH_HEADER_LENGTH);
+ skb_queue_tail(&ch->io_queue, pskb);
+ CTCM_PR_DBGDATA("%s: th_seq_num expect:%08x "
+ "got:%08x\n", __func__,
+ ch->th_seq_num + 1, header->th_seq_num);
+
+ return;
+ }
+ grp->out_of_sequence = 0;
+ ch->th_seq_num = header->th_seq_num;
+
+ CTCM_PR_DBGDATA("ctcmpc: %s() FromVTAM_th_seq=%08x\n",
+ __func__, ch->th_seq_num);
+
+ if (unlikely(fsm_getstate(grp->fsm) != MPCG_STATE_READY))
+ goto done;
+ while ((pskb->len > 0) && !pdu_last_seen) {
+ curr_pdu = (struct pdu *)pskb->data;
+
+ CTCM_PR_DBGDATA("%s: pdu_header\n", __func__);
+ CTCM_D3_DUMP((char *)pskb->data, PDU_HEADER_LENGTH);
+ CTCM_PR_DBGDATA("%s: pskb len: %04x \n",
+ __func__, pskb->len);
+
+ skb_pull(pskb, PDU_HEADER_LENGTH);
+
+ if (curr_pdu->pdu_flag & PDU_LAST)
+ pdu_last_seen = 1;
+ if (curr_pdu->pdu_flag & PDU_CNTL)
+ pskb->protocol = htons(ETH_P_SNAP);
+ else
+ pskb->protocol = htons(ETH_P_SNA_DIX);
+
+ if ((pskb->len <= 0) || (pskb->len > ch->max_bufsize)) {
+ CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+ "%s(%s): Dropping packet with "
+ "illegal siize %d",
+ CTCM_FUNTAIL, dev->name, pskb->len);
+
+ priv->stats.rx_dropped++;
+ priv->stats.rx_length_errors++;
+ goto done;
+ }
+ skb_reset_mac_header(pskb);
+ new_len = curr_pdu->pdu_offset;
+ CTCM_PR_DBGDATA("%s: new_len: %04x \n",
+ __func__, new_len);
+ if ((new_len == 0) || (new_len > pskb->len)) {
+ /* should never happen */
+ /* pskb len must be hosed...bail out */
+ CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+ "%s(%s): non valid pdu_offset: %04x",
+ /* "data may be lost", */
+ CTCM_FUNTAIL, dev->name, new_len);
+ goto done;
+ }
+ skb = __dev_alloc_skb(new_len+4, GFP_ATOMIC);
+
+ if (!skb) {
+ CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+ "%s(%s): MEMORY allocation error",
+ CTCM_FUNTAIL, dev->name);
+ priv->stats.rx_dropped++;
+ fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+ goto done;
+ }
+ memcpy(skb_put(skb, new_len), pskb->data, new_len);
+
+ skb_reset_mac_header(skb);
+ skb->dev = pskb->dev;
+ skb->protocol = pskb->protocol;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ *((__u32 *) skb_push(skb, 4)) = ch->pdu_seq;
+ ch->pdu_seq++;
+
+ if (do_debug_data) {
+ ctcm_pr_debug("%s: ToDCM_pdu_seq= %08x\n",
+ __func__, ch->pdu_seq);
+ ctcm_pr_debug("%s: skb:%0lx "
+ "skb len: %d \n", __func__,
+ (unsigned long)skb, skb->len);
+ ctcm_pr_debug("%s: up to 32 bytes "
+ "of pdu_data sent\n", __func__);
+ ctcmpc_dump32((char *)skb->data, skb->len);
+ }
+
+ skblen = skb->len;
+ sendrc = netif_rx(skb);
+ priv->stats.rx_packets++;
+ priv->stats.rx_bytes += skblen;
+ skb_pull(pskb, new_len); /* point to next PDU */
+ }
+ } else {
+ mpcginfo = kmalloc(sizeof(struct mpcg_info), gfp_type());
+ if (mpcginfo == NULL)
+ goto done;
+
+ mpcginfo->ch = ch;
+ mpcginfo->th = header;
+ mpcginfo->skb = pskb;
+ CTCM_PR_DEBUG("%s: Not PDU - may be control pkt\n",
+ __func__);
+ /* it's a sweep? */
+ sweep = (struct th_sweep *)pskb->data;
+ mpcginfo->sweep = sweep;
+ if (header->th_ch_flag == TH_SWEEP_REQ)
+ mpc_rcvd_sweep_req(mpcginfo);
+ else if (header->th_ch_flag == TH_SWEEP_RESP)
+ mpc_rcvd_sweep_resp(mpcginfo);
+ else if (header->th_blk_flag == TH_DATA_IS_XID) {
+ struct xid2 *thisxid = (struct xid2 *)pskb->data;
+ skb_pull(pskb, XID2_LENGTH);
+ mpcginfo->xid = thisxid;
+ fsm_event(grp->fsm, MPCG_EVENT_XID2, mpcginfo);
+ } else if (header->th_blk_flag == TH_DISCONTACT)
+ fsm_event(grp->fsm, MPCG_EVENT_DISCONC, mpcginfo);
+ else if (header->th_seq_num != 0) {
+ CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+ "%s(%s): control pkt expected\n",
+ CTCM_FUNTAIL, dev->name);
+ priv->stats.rx_dropped++;
+ /* mpcginfo only used for non-data transfers */
+ kfree(mpcginfo);
+ if (do_debug_data)
+ ctcmpc_dump_skb(pskb, -8);
+ }
+ }
+done:
+
+ dev_kfree_skb_any(pskb);
+ if (sendrc == NET_RX_DROP) {
+ dev_warn(&dev->dev,
+ "The network backlog for %s is exceeded, "
+ "package dropped\n", __func__);
+ fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+ }
+
+ CTCM_PR_DEBUG("exit %s: %s: ch=0x%p id=%s\n",
+ __func__, dev->name, ch, ch->id);
+}
+
+/**
+ * tasklet helper for mpc's skb unpacking.
+ *
+ * ch The channel to work on.
+ * Allow flow control back pressure to occur here.
+ * Throttling back channel can result in excessive
+ * channel inactivity and system deact of channel
+ */
+void ctcmpc_bh(unsigned long thischan)
+{
+ struct channel *ch = (struct channel *)thischan;
+ struct sk_buff *skb;
+ struct net_device *dev = ch->netdev;
+ struct ctcm_priv *priv = dev->ml_priv;
+ struct mpc_group *grp = priv->mpcg;
+
+ CTCM_PR_DEBUG("%s cp:%i enter: %s() %s\n",
+ dev->name, smp_processor_id(), __func__, ch->id);
+ /* caller has requested driver to throttle back */
+ while ((fsm_getstate(grp->fsm) != MPCG_STATE_FLOWC) &&
+ (skb = skb_dequeue(&ch->io_queue))) {
+ ctcmpc_unpack_skb(ch, skb);
+ if (grp->out_of_sequence > 20) {
+ /* assume data loss has occurred if */
+ /* missing seq_num for extended */
+ /* period of time */
+ grp->out_of_sequence = 0;
+ fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+ break;
+ }
+ if (skb == skb_peek(&ch->io_queue))
+ break;
+ }
+ CTCM_PR_DEBUG("exit %s: %s: ch=0x%p id=%s\n",
+ __func__, dev->name, ch, ch->id);
+ return;
+}
+
+/*
+ * MPC Group Initializations
+ */
+struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv)
+{
+ struct mpc_group *grp;
+
+ CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_INFO,
+ "Enter %s(%p)", CTCM_FUNTAIL, priv);
+
+ grp = kzalloc(sizeof(struct mpc_group), GFP_KERNEL);
+ if (grp == NULL)
+ return NULL;
+
+ grp->fsm = init_fsm("mpcg", mpcg_state_names, mpcg_event_names,
+ MPCG_NR_STATES, MPCG_NR_EVENTS, mpcg_fsm,
+ mpcg_fsm_len, GFP_KERNEL);
+ if (grp->fsm == NULL) {
+ kfree(grp);
+ return NULL;
+ }
+
+ fsm_newstate(grp->fsm, MPCG_STATE_RESET);
+ fsm_settimer(grp->fsm, &grp->timer);
+
+ grp->xid_skb =
+ __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC | GFP_DMA);
+ if (grp->xid_skb == NULL) {
+ kfree_fsm(grp->fsm);
+ kfree(grp);
+ return NULL;
+ }
+ /* base xid for all channels in group */
+ grp->xid_skb_data = grp->xid_skb->data;
+ grp->xid_th = (struct th_header *)grp->xid_skb->data;
+ memcpy(skb_put(grp->xid_skb, TH_HEADER_LENGTH),
+ &thnorm, TH_HEADER_LENGTH);
+
+ grp->xid = (struct xid2 *)skb_tail_pointer(grp->xid_skb);
+ memcpy(skb_put(grp->xid_skb, XID2_LENGTH), &init_xid, XID2_LENGTH);
+ grp->xid->xid2_adj_id = jiffies | 0xfff00000;
+ grp->xid->xid2_sender_id = jiffies;
+
+ grp->xid_id = skb_tail_pointer(grp->xid_skb);
+ memcpy(skb_put(grp->xid_skb, 4), "VTAM", 4);
+
+ grp->rcvd_xid_skb =
+ __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA);
+ if (grp->rcvd_xid_skb == NULL) {
+ kfree_fsm(grp->fsm);
+ dev_kfree_skb(grp->xid_skb);
+ kfree(grp);
+ return NULL;
+ }
+ grp->rcvd_xid_data = grp->rcvd_xid_skb->data;
+ grp->rcvd_xid_th = (struct th_header *)grp->rcvd_xid_skb->data;
+ memcpy(skb_put(grp->rcvd_xid_skb, TH_HEADER_LENGTH),
+ &thnorm, TH_HEADER_LENGTH);
+ grp->saved_xid2 = NULL;
+ priv->xid = grp->xid;
+ priv->mpcg = grp;
+ return grp;
+}
+
+/*
+ * The MPC Group Station FSM
+ */
+
+/*
+ * MPC Group Station FSM actions
+ * CTCM_PROTO_MPC only
+ */
+
+/**
+ * NOP action for statemachines
+ */
+static void mpc_action_nop(fsm_instance *fi, int event, void *arg)
+{
+}
+
+/*
+ * invoked when the device transitions to dev_stopped
+ * MPC will stop each individual channel if a single XID failure
+ * occurs, or will intitiate all channels be stopped if a GROUP
+ * level failure occurs.
+ */
+static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg)
+{
+ struct net_device *dev = arg;
+ struct ctcm_priv *priv;
+ struct mpc_group *grp;
+ struct channel *wch;
+
+ BUG_ON(dev == NULL);
+ CTCM_PR_DEBUG("Enter %s: %s\n", __func__, dev->name);
+
+ priv = dev->ml_priv;
+ grp = priv->mpcg;
+ grp->flow_off_called = 0;
+ fsm_deltimer(&grp->timer);
+ if (grp->channels_terminating)
+ return;
+
+ grp->channels_terminating = 1;
+ grp->saved_state = fsm_getstate(grp->fsm);
+ fsm_newstate(grp->fsm, MPCG_STATE_INOP);
+ if (grp->saved_state > MPCG_STATE_XID7INITF)
+ CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE,
+ "%s(%s): MPC GROUP INOPERATIVE",
+ CTCM_FUNTAIL, dev->name);
+ if ((grp->saved_state != MPCG_STATE_RESET) ||
+ /* dealloc_channel has been called */
+ (grp->port_persist == 0))
+ fsm_deltimer(&priv->restart_timer);
+
+ wch = priv->channel[CTCM_WRITE];
+
+ switch (grp->saved_state) {
+ case MPCG_STATE_RESET:
+ case MPCG_STATE_INOP:
+ case MPCG_STATE_XID2INITW:
+ case MPCG_STATE_XID0IOWAIT:
+ case MPCG_STATE_XID2INITX:
+ case MPCG_STATE_XID7INITW:
+ case MPCG_STATE_XID7INITX:
+ case MPCG_STATE_XID0IOWAIX:
+ case MPCG_STATE_XID7INITI:
+ case MPCG_STATE_XID7INITZ:
+ case MPCG_STATE_XID7INITF:
+ break;
+ case MPCG_STATE_FLOWC:
+ case MPCG_STATE_READY:
+ default:
+ tasklet_hi_schedule(&wch->ch_disc_tasklet);
+ }
+
+ grp->xid2_tgnum = 0;
+ grp->group_max_buflen = 0; /*min of all received */
+ grp->outstanding_xid2 = 0;
+ grp->outstanding_xid7 = 0;
+ grp->outstanding_xid7_p2 = 0;
+ grp->saved_xid2 = NULL;
+ grp->xidnogood = 0;
+ grp->changed_side = 0;
+
+ grp->rcvd_xid_skb->data = grp->rcvd_xid_data;
+ skb_reset_tail_pointer(grp->rcvd_xid_skb);
+ grp->rcvd_xid_skb->len = 0;
+ grp->rcvd_xid_th = (struct th_header *)grp->rcvd_xid_skb->data;
+ memcpy(skb_put(grp->rcvd_xid_skb, TH_HEADER_LENGTH), &thnorm,
+ TH_HEADER_LENGTH);
+
+ if (grp->send_qllc_disc == 1) {
+ grp->send_qllc_disc = 0;
+ mpc_send_qllc_discontact(dev);
+ }
+
+ /* DO NOT issue DEV_EVENT_STOP directly out of this code */
+ /* This can result in INOP of VTAM PU due to halting of */
+ /* outstanding IO which causes a sense to be returned */
+ /* Only about 3 senses are allowed and then IOS/VTAM will*/
+ /* become unreachable without manual intervention */
+ if ((grp->port_persist == 1) || (grp->alloc_called)) {
+ grp->alloc_called = 0;
+ fsm_deltimer(&priv->restart_timer);
+ fsm_addtimer(&priv->restart_timer, 500, DEV_EVENT_RESTART, dev);
+ fsm_newstate(grp->fsm, MPCG_STATE_RESET);
+ if (grp->saved_state > MPCG_STATE_XID7INITF)
+ CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ALWAYS,
+ "%s(%s): MPC GROUP RECOVERY SCHEDULED",
+ CTCM_FUNTAIL, dev->name);
+ } else {
+ fsm_deltimer(&priv->restart_timer);
+ fsm_addtimer(&priv->restart_timer, 500, DEV_EVENT_STOP, dev);
+ fsm_newstate(grp->fsm, MPCG_STATE_RESET);
+ CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ALWAYS,
+ "%s(%s): NO MPC GROUP RECOVERY ATTEMPTED",
+ CTCM_FUNTAIL, dev->name);
+ }
+}
+
+/**
+ * Handle mpc group action timeout.
+ * MPC Group Station FSM action
+ * CTCM_PROTO_MPC only
+ *
+ * fi An instance of an mpc_group fsm.
+ * event The event, just happened.
+ * arg Generic pointer, casted from net_device * upon call.
+ */
+static void mpc_action_timeout(fsm_instance *fi, int event, void *arg)
+{
+ struct net_device *dev = arg;
+ struct ctcm_priv *priv;
+ struct mpc_group *grp;
+ struct channel *wch;
+ struct channel *rch;
+
+ BUG_ON(dev == NULL);
+
+ priv = dev->ml_priv;
+ grp = priv->mpcg;
+ wch = priv->channel[CTCM_WRITE];
+ rch = priv->channel[CTCM_READ];
+
+ switch (fsm_getstate(grp->fsm)) {
+ case MPCG_STATE_XID2INITW:
+ /* Unless there is outstanding IO on the */
+ /* channel just return and wait for ATTN */
+ /* interrupt to begin XID negotiations */
+ if ((fsm_getstate(rch->fsm) == CH_XID0_PENDING) &&
+ (fsm_getstate(wch->fsm) == CH_XID0_PENDING))
+ break;
+ default:
+ fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+ }
+
+ CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
+ "%s: dev=%s exit",
+ CTCM_FUNTAIL, dev->name);
+ return;
+}
+
+/*
+ * MPC Group Station FSM action
+ * CTCM_PROTO_MPC only
+ */
+void mpc_action_discontact(fsm_instance *fi, int event, void *arg)
+{
+ struct mpcg_info *mpcginfo = arg;
+ struct channel *ch = mpcginfo->ch;
+ struct net_device *dev;
+ struct ctcm_priv *priv;
+ struct mpc_group *grp;
+
+ if (ch) {
+ dev = ch->netdev;
+ if (dev) {
+ priv = dev->ml_priv;
+ if (priv) {
+ CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE,
+ "%s: %s: %s\n",
+ CTCM_FUNTAIL, dev->name, ch->id);
+ grp = priv->mpcg;
+ grp->send_qllc_disc = 1;
+ fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
+ }
+ }
+ }
+
+ return;
+}
+
+/*
+ * MPC Group Station - not part of FSM
+ * CTCM_PROTO_MPC only
+ * called from add_channel in ctcm_main.c
+ */
+void mpc_action_send_discontact(unsigned long thischan)
+{
+ int rc;
+ struct channel *ch = (struct channel *)thischan;
+ unsigned long saveflags = 0;
+
+ spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
+ rc = ccw_device_start(ch->cdev, &ch->ccw[15],
+ (unsigned long)ch, 0xff, 0);
+ spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
+
+ if (rc != 0) {
+ ctcm_ccw_check_rc(ch, rc, (char *)__func__);
+ }
+
+ return;
+}
+
+
+/*
+ * helper function of mpc FSM
+ * CTCM_PROTO_MPC only
+ * mpc_action_rcvd_xid7
+*/
+static int mpc_validate_xid(struct mpcg_info *mpcginfo)
+{
+ struct channel *ch = mpcginfo->ch;
+ struct net_device *dev = ch->netdev;
+ struct ctcm_priv *priv = dev->ml_priv;
+ struct mpc_group *grp = priv->mpcg;
+ struct xid2 *xid = mpcginfo->xid;
+ int rc = 0;
+ __u64 our_id = 0;
+ __u64 their_id = 0;
+ int len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH;
+
+ CTCM_PR_DEBUG("Enter %s: xid=%p\n", __func__, xid);
+
+ if (xid == NULL) {
+ rc = 1;
+ /* XID REJECTED: xid == NULL */
+ CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+ "%s(%s): xid = NULL",
+ CTCM_FUNTAIL, ch->id);
+ goto done;
+ }
+
+ CTCM_D3_DUMP((char *)xid, XID2_LENGTH);
+
+ /*the received direction should be the opposite of ours */
+ if (((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? XID2_WRITE_SIDE :
+ XID2_READ_SIDE) != xid->xid2_dlc_type) {
+ rc = 2;
+ /* XID REJECTED: r/w channel pairing mismatch */
+ CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+ "%s(%s): r/w channel pairing mismatch",
+ CTCM_FUNTAIL, ch->id);
+ goto done;
+ }
+
+ if (xid->xid2_dlc_type == XID2_READ_SIDE) {
+ CTCM_PR_DEBUG("%s: grpmaxbuf:%d xid2buflen:%d\n", __func__,
+ grp->group_max_buflen, xid->xid2_buf_len);
+
+ if (grp->group_max_buflen == 0 || grp->group_max_buflen >
+ xid->xid2_buf_len - len)
+ grp->group_max_buflen = xid->xid2_buf_len - len;
+ }
+
+ if (grp->saved_xid2 == NULL) {
+ grp->saved_xid2 =
+ (struct xid2 *)skb_tail_pointer(grp->rcvd_xid_skb);
+
+ memcpy(skb_put(grp->rcvd_xid_skb,
+ XID2_LENGTH), xid, XID2_LENGTH);
+ grp->rcvd_xid_skb->data = grp->rcvd_xid_data;
+
+ skb_reset_tail_pointer(grp->rcvd_xid_skb);
+ grp->rcvd_xid_skb->len = 0;
+
+ /* convert two 32 bit numbers into 1 64 bit for id compare */
+ our_id = (__u64)priv->xid->xid2_adj_id;
+ our_id = our_id << 32;
+ our_id = our_id + priv->xid->xid2_sender_id;
+ their_id = (__u64)xid->xid2_adj_id;
+ their_id = their_id << 32;
+ their_id = their_id + xid->xid2_sender_id;
+ /* lower id assume the xside role */
+ if (our_id < their_id) {
+ grp->roll = XSIDE;
+ CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE,
+ "%s(%s): WE HAVE LOW ID - TAKE XSIDE",
+ CTCM_FUNTAIL, ch->id);
+ } else {
+ grp->roll = YSIDE;
+ CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE,
+ "%s(%s): WE HAVE HIGH ID - TAKE YSIDE",
+ CTCM_FUNTAIL, ch->id);
+ }
+
+ } else {
+ if (xid->xid2_flag4 != grp->saved_xid2->xid2_flag4) {
+ rc = 3;
+ /* XID REJECTED: xid flag byte4 mismatch */
+ CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+ "%s(%s): xid flag byte4 mismatch",
+ CTCM_FUNTAIL, ch->id);
+ }
+ if (xid->xid2_flag2 == 0x40) {
+ rc = 4;
+ /* XID REJECTED - xid NOGOOD */
+ CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+ "%s(%s): xid NOGOOD",
+ CTCM_FUNTAIL, ch->id);
+ }
+ if (xid->xid2_adj_id != grp->saved_xid2->xid2_adj_id) {
+ rc = 5;
+ /* XID REJECTED - Adjacent Station ID Mismatch */
+ CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+ "%s(%s): Adjacent Station ID Mismatch",
+ CTCM_FUNTAIL, ch->id);
+ }
+ if (xid->xid2_sender_id != grp->saved_xid2->xid2_sender_id) {
+ rc = 6;
+ /* XID REJECTED - Sender Address Mismatch */
+ CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+ "%s(%s): Sender Address Mismatch",
+ CTCM_FUNTAIL, ch->id);
+ }
+ }
+done:
+ if (rc) {
+ dev_warn(&dev->dev,
+ "The XID used in the MPC protocol is not valid, "
+ "rc = %d\n", rc);
+ priv->xid->xid2_flag2 = 0x40;
+ grp->saved_xid2->xid2_flag2 = 0x40;
+ }
+
+ return rc;
+}
+
+/*
+ * MPC Group Station FSM action
+ * CTCM_PROTO_MPC only
+ */
+static void mpc_action_side_xid(fsm_instance *fsm, void *arg, int side)
+{
+ struct channel *ch = arg;
+ int rc = 0;
+ int gotlock = 0;
+ unsigned long saveflags = 0; /* avoids compiler warning with
+ spin_unlock_irqrestore */
+
+ CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n",
+ __func__, smp_processor_id(), ch, ch->id);
+
+ if (ctcm_checkalloc_buffer(ch))
+ goto done;
+
+ /*
+ * skb data-buffer referencing:
+ */
+ ch->trans_skb->data = ch->trans_skb_data;
+ skb_reset_tail_pointer(ch->trans_skb);
+ ch->trans_skb->len = 0;
+ /* result of the previous 3 statements is NOT always
+ * already set after ctcm_checkalloc_buffer
+ * because of possible reuse of the trans_skb
+ */
+ memset(ch->trans_skb->data, 0, 16);
+ ch->rcvd_xid_th = (struct th_header *)ch->trans_skb_data;
+ /* check is main purpose here: */
+ skb_put(ch->trans_skb, TH_HEADER_LENGTH);
+ ch->rcvd_xid = (struct xid2 *)skb_tail_pointer(ch->trans_skb);
+ /* check is main purpose here: */
+ skb_put(ch->trans_skb, XID2_LENGTH);
+ ch->rcvd_xid_id = skb_tail_pointer(ch->trans_skb);
+ /* cleanup back to startpoint */
+ ch->trans_skb->data = ch->trans_skb_data;
+ skb_reset_tail_pointer(ch->trans_skb);
+ ch->trans_skb->len = 0;
+
+ /* non-checking rewrite of above skb data-buffer referencing: */
+ /*
+ memset(ch->trans_skb->data, 0, 16);
+ ch->rcvd_xid_th = (struct th_header *)ch->trans_skb_data;
+ ch->rcvd_xid = (struct xid2 *)(ch->trans_skb_data + TH_HEADER_LENGTH);
+ ch->rcvd_xid_id = ch->trans_skb_data + TH_HEADER_LENGTH + XID2_LENGTH;
+ */
+
+ ch->ccw[8].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ ch->ccw[8].count = 0;
+ ch->ccw[8].cda = 0x00;
+
+ if (!(ch->xid_th && ch->xid && ch->xid_id))
+ CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_INFO,
+ "%s(%s): xid_th=%p, xid=%p, xid_id=%p",
+ CTCM_FUNTAIL, ch->id, ch->xid_th, ch->xid, ch->xid_id);
+
+ if (side == XSIDE) {
+ /* mpc_action_xside_xid */
+ if (ch->xid_th == NULL)
+ goto done;
+ ch->ccw[9].cmd_code = CCW_CMD_WRITE;
+ ch->ccw[9].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ ch->ccw[9].count = TH_HEADER_LENGTH;
+ ch->ccw[9].cda = virt_to_phys(ch->xid_th);
+
+ if (ch->xid == NULL)
+ goto done;
+ ch->ccw[10].cmd_code = CCW_CMD_WRITE;
+ ch->ccw[10].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ ch->ccw[10].count = XID2_LENGTH;
+ ch->ccw[10].cda = virt_to_phys(ch->xid);
+
+ ch->ccw[11].cmd_code = CCW_CMD_READ;
+ ch->ccw[11].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ ch->ccw[11].count = TH_HEADER_LENGTH;
+ ch->ccw[11].cda = virt_to_phys(ch->rcvd_xid_th);
+
+ ch->ccw[12].cmd_code = CCW_CMD_READ;
+ ch->ccw[12].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ ch->ccw[12].count = XID2_LENGTH;
+ ch->ccw[12].cda = virt_to_phys(ch->rcvd_xid);
+
+ ch->ccw[13].cmd_code = CCW_CMD_READ;
+ ch->ccw[13].cda = virt_to_phys(ch->rcvd_xid_id);
+
+ } else { /* side == YSIDE : mpc_action_yside_xid */
+ ch->ccw[9].cmd_code = CCW_CMD_READ;
+ ch->ccw[9].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ ch->ccw[9].count = TH_HEADER_LENGTH;
+ ch->ccw[9].cda = virt_to_phys(ch->rcvd_xid_th);
+
+ ch->ccw[10].cmd_code = CCW_CMD_READ;
+ ch->ccw[10].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ ch->ccw[10].count = XID2_LENGTH;
+ ch->ccw[10].cda = virt_to_phys(ch->rcvd_xid);
+
+ if (ch->xid_th == NULL)
+ goto done;
+ ch->ccw[11].cmd_code = CCW_CMD_WRITE;
+ ch->ccw[11].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ ch->ccw[11].count = TH_HEADER_LENGTH;
+ ch->ccw[11].cda = virt_to_phys(ch->xid_th);
+
+ if (ch->xid == NULL)
+ goto done;
+ ch->ccw[12].cmd_code = CCW_CMD_WRITE;
+ ch->ccw[12].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ ch->ccw[12].count = XID2_LENGTH;
+ ch->ccw[12].cda = virt_to_phys(ch->xid);
+
+ if (ch->xid_id == NULL)
+ goto done;
+ ch->ccw[13].cmd_code = CCW_CMD_WRITE;
+ ch->ccw[13].cda = virt_to_phys(ch->xid_id);
+
+ }
+ ch->ccw[13].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
+ ch->ccw[13].count = 4;
+
+ ch->ccw[14].cmd_code = CCW_CMD_NOOP;
+ ch->ccw[14].flags = CCW_FLAG_SLI;
+ ch->ccw[14].count = 0;
+ ch->ccw[14].cda = 0;
+
+ CTCM_CCW_DUMP((char *)&ch->ccw[8], sizeof(struct ccw1) * 7);
+ CTCM_D3_DUMP((char *)ch->xid_th, TH_HEADER_LENGTH);
+ CTCM_D3_DUMP((char *)ch->xid, XID2_LENGTH);
+ CTCM_D3_DUMP((char *)ch->xid_id, 4);
+
+ if (!in_irq()) {
+ /* Such conditional locking is a known problem for
+ * sparse because its static undeterministic.
+ * Warnings should be ignored here. */
+ spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
+ gotlock = 1;
+ }
+
+ fsm_addtimer(&ch->timer, 5000 , CTC_EVENT_TIMER, ch);
+ rc = ccw_device_start(ch->cdev, &ch->ccw[8],
+ (unsigned long)ch, 0xff, 0);
+
+ if (gotlock) /* see remark above about conditional locking */
+ spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
+
+ if (rc != 0) {
+ ctcm_ccw_check_rc(ch, rc,
+ (side == XSIDE) ? "x-side XID" : "y-side XID");
+ }
+
+done:
+ CTCM_PR_DEBUG("Exit %s: ch=0x%p id=%s\n",
+ __func__, ch, ch->id);
+ return;
+
+}
+
+/*
+ * MPC Group Station FSM action
+ * CTCM_PROTO_MPC only
+ */
+static void mpc_action_xside_xid(fsm_instance *fsm, int event, void *arg)
+{
+ mpc_action_side_xid(fsm, arg, XSIDE);
+}
+
+/*
+ * MPC Group Station FSM action
+ * CTCM_PROTO_MPC only
+ */
+static void mpc_action_yside_xid(fsm_instance *fsm, int event, void *arg)
+{
+ mpc_action_side_xid(fsm, arg, YSIDE);
+}
+
+/*
+ * MPC Group Station FSM action
+ * CTCM_PROTO_MPC only
+ */
+static void mpc_action_doxid0(fsm_instance *fsm, int event, void *arg)
+{
+ struct channel *ch = arg;
+ struct net_device *dev = ch->netdev;
+ struct ctcm_priv *priv = dev->ml_priv;
+ struct mpc_group *grp = priv->mpcg;
+
+ CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n",
+ __func__, smp_processor_id(), ch, ch->id);
+
+ if (ch->xid == NULL) {
+ CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+ "%s(%s): ch->xid == NULL",
+ CTCM_FUNTAIL, dev->name);
+ return;
+ }
+
+ fsm_newstate(ch->fsm, CH_XID0_INPROGRESS);
+
+ ch->xid->xid2_option = XID2_0;
+
+ switch (fsm_getstate(grp->fsm)) {
+ case MPCG_STATE_XID2INITW:
+ case MPCG_STATE_XID2INITX:
+ ch->ccw[8].cmd_code = CCW_CMD_SENSE_CMD;
+ break;
+ case MPCG_STATE_XID0IOWAIT:
+ case MPCG_STATE_XID0IOWAIX:
+ ch->ccw[8].cmd_code = CCW_CMD_WRITE_CTL;
+ break;
+ }
+
+ fsm_event(grp->fsm, MPCG_EVENT_DOIO, ch);
+
+ return;
+}
+
+/*
+ * MPC Group Station FSM action
+ * CTCM_PROTO_MPC only
+*/
+static void mpc_action_doxid7(fsm_instance *fsm, int event, void *arg)
+{
+ struct net_device *dev = arg;
+ struct ctcm_priv *priv = dev->ml_priv;
+ struct mpc_group *grp = NULL;
+ int direction;
+ int send = 0;
+
+ if (priv)
+ grp = priv->mpcg;
+ if (grp == NULL)
+ return;
+
+ for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
+ struct channel *ch = priv->channel[direction];
+ struct xid2 *thisxid = ch->xid;
+ ch->xid_skb->data = ch->xid_skb_data;
+ skb_reset_tail_pointer(ch->xid_skb);
+ ch->xid_skb->len = 0;
+ thisxid->xid2_option = XID2_7;
+ send = 0;
+
+ /* xid7 phase 1 */
+ if (grp->outstanding_xid7_p2 > 0) {
+ if (grp->roll == YSIDE) {
+ if (fsm_getstate(ch->fsm) == CH_XID7_PENDING1) {
+ fsm_newstate(ch->fsm, CH_XID7_PENDING2);
+ ch->ccw[8].cmd_code = CCW_CMD_SENSE_CMD;
+ memcpy(skb_put(ch->xid_skb,
+ TH_HEADER_LENGTH),
+ &thdummy, TH_HEADER_LENGTH);
+ send = 1;
+ }
+ } else if (fsm_getstate(ch->fsm) < CH_XID7_PENDING2) {
+ fsm_newstate(ch->fsm, CH_XID7_PENDING2);
+ ch->ccw[8].cmd_code = CCW_CMD_WRITE_CTL;
+ memcpy(skb_put(ch->xid_skb,
+ TH_HEADER_LENGTH),
+ &thnorm, TH_HEADER_LENGTH);
+ send = 1;
+ }
+ } else {
+ /* xid7 phase 2 */
+ if (grp->roll == YSIDE) {
+ if (fsm_getstate(ch->fsm) < CH_XID7_PENDING4) {
+ fsm_newstate(ch->fsm, CH_XID7_PENDING4);
+ memcpy(skb_put(ch->xid_skb,
+ TH_HEADER_LENGTH),
+ &thnorm, TH_HEADER_LENGTH);
+ ch->ccw[8].cmd_code = CCW_CMD_WRITE_CTL;
+ send = 1;
+ }
+ } else if (fsm_getstate(ch->fsm) == CH_XID7_PENDING3) {
+ fsm_newstate(ch->fsm, CH_XID7_PENDING4);
+ ch->ccw[8].cmd_code = CCW_CMD_SENSE_CMD;
+ memcpy(skb_put(ch->xid_skb, TH_HEADER_LENGTH),
+ &thdummy, TH_HEADER_LENGTH);
+ send = 1;
+ }
+ }
+
+ if (send)
+ fsm_event(grp->fsm, MPCG_EVENT_DOIO, ch);
+ }
+
+ return;
+}
+
+/*
+ * MPC Group Station FSM action
+ * CTCM_PROTO_MPC only
+ */
+static void mpc_action_rcvd_xid0(fsm_instance *fsm, int event, void *arg)
+{
+
+ struct mpcg_info *mpcginfo = arg;
+ struct channel *ch = mpcginfo->ch;
+ struct net_device *dev = ch->netdev;
+ struct ctcm_priv *priv = dev->ml_priv;
+ struct mpc_group *grp = priv->mpcg;
+
+ CTCM_PR_DEBUG("%s: ch-id:%s xid2:%i xid7:%i xidt_p2:%i \n",
+ __func__, ch->id, grp->outstanding_xid2,
+ grp->outstanding_xid7, grp->outstanding_xid7_p2);
+
+ if (fsm_getstate(ch->fsm) < CH_XID7_PENDING)
+ fsm_newstate(ch->fsm, CH_XID7_PENDING);
+
+ grp->outstanding_xid2--;
+ grp->outstanding_xid7++;
+ grp->outstanding_xid7_p2++;
+
+ /* must change state before validating xid to */
+ /* properly handle interim interrupts received*/
+ switch (fsm_getstate(grp->fsm)) {
+ case MPCG_STATE_XID2INITW:
+ fsm_newstate(grp->fsm, MPCG_STATE_XID2INITX);
+ mpc_validate_xid(mpcginfo);
+ break;
+ case MPCG_STATE_XID0IOWAIT:
+ fsm_newstate(grp->fsm, MPCG_STATE_XID0IOWAIX);
+ mpc_validate_xid(mpcginfo);
+ break;
+ case MPCG_STATE_XID2INITX:
+ if (grp->outstanding_xid2 == 0) {
+ fsm_newstate(grp->fsm, MPCG_STATE_XID7INITW);
+ mpc_validate_xid(mpcginfo);
+ fsm_event(grp->fsm, MPCG_EVENT_XID2DONE, dev);
+ }
+ break;
+ case MPCG_STATE_XID0IOWAIX:
+ if (grp->outstanding_xid2 == 0) {
+ fsm_newstate(grp->fsm, MPCG_STATE_XID7INITI);
+ mpc_validate_xid(mpcginfo);
+ fsm_event(grp->fsm, MPCG_EVENT_XID2DONE, dev);
+ }
+ break;
+ }
+ kfree(mpcginfo);
+
+ CTCM_PR_DEBUG("ctcmpc:%s() %s xid2:%i xid7:%i xidt_p2:%i \n",
+ __func__, ch->id, grp->outstanding_xid2,
+ grp->outstanding_xid7, grp->outstanding_xid7_p2);
+ CTCM_PR_DEBUG("ctcmpc:%s() %s grpstate: %s chanstate: %s \n",
+ __func__, ch->id,
+ fsm_getstate_str(grp->fsm), fsm_getstate_str(ch->fsm));
+ return;
+
+}
+
+
+/*
+ * MPC Group Station FSM action
+ * CTCM_PROTO_MPC only
+ */
+static void mpc_action_rcvd_xid7(fsm_instance *fsm, int event, void *arg)
+{
+ struct mpcg_info *mpcginfo = arg;
+ struct channel *ch = mpcginfo->ch;
+ struct net_device *dev = ch->netdev;
+ struct ctcm_priv *priv = dev->ml_priv;
+ struct mpc_group *grp = priv->mpcg;
+
+ CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n",
+ __func__, smp_processor_id(), ch, ch->id);
+ CTCM_PR_DEBUG("%s: outstanding_xid7: %i, outstanding_xid7_p2: %i\n",
+ __func__, grp->outstanding_xid7, grp->outstanding_xid7_p2);
+
+ grp->outstanding_xid7--;
+ ch->xid_skb->data = ch->xid_skb_data;
+ skb_reset_tail_pointer(ch->xid_skb);
+ ch->xid_skb->len = 0;
+
+ switch (fsm_getstate(grp->fsm)) {
+ case MPCG_STATE_XID7INITI:
+ fsm_newstate(grp->fsm, MPCG_STATE_XID7INITZ);
+ mpc_validate_xid(mpcginfo);
+ break;
+ case MPCG_STATE_XID7INITW:
+ fsm_newstate(grp->fsm, MPCG_STATE_XID7INITX);
+ mpc_validate_xid(mpcginfo);
+ break;
+ case MPCG_STATE_XID7INITZ:
+ case MPCG_STATE_XID7INITX:
+ if (grp->outstanding_xid7 == 0) {
+ if (grp->outstanding_xid7_p2 > 0) {
+ grp->outstanding_xid7 =
+ grp->outstanding_xid7_p2;
+ grp->outstanding_xid7_p2 = 0;
+ } else
+ fsm_newstate(grp->fsm, MPCG_STATE_XID7INITF);
+
+ mpc_validate_xid(mpcginfo);
+ fsm_event(grp->fsm, MPCG_EVENT_XID7DONE, dev);
+ break;
+ }
+ mpc_validate_xid(mpcginfo);
+ break;
+ }
+ kfree(mpcginfo);
+ return;
+}
+
+/*
+ * mpc_action helper of an MPC Group Station FSM action
+ * CTCM_PROTO_MPC only
+ */
+static int mpc_send_qllc_discontact(struct net_device *dev)
+{
+ __u32 new_len = 0;
+ struct sk_buff *skb;
+ struct qllc *qllcptr;
+ struct ctcm_priv *priv = dev->ml_priv;
+ struct mpc_group *grp = priv->mpcg;
+
+ CTCM_PR_DEBUG("%s: GROUP STATE: %s\n",
+ __func__, mpcg_state_names[grp->saved_state]);
+
+ switch (grp->saved_state) {
+ /*
+ * establish conn callback function is
+ * preferred method to report failure
+ */
+ case MPCG_STATE_XID0IOWAIT:
+ case MPCG_STATE_XID0IOWAIX:
+ case MPCG_STATE_XID7INITI:
+ case MPCG_STATE_XID7INITZ:
+ case MPCG_STATE_XID2INITW:
+ case MPCG_STATE_XID2INITX:
+ case MPCG_STATE_XID7INITW:
+ case MPCG_STATE_XID7INITX:
+ if (grp->estconnfunc) {
+ grp->estconnfunc(grp->port_num, -1, 0);
+ grp->estconnfunc = NULL;
+ break;
+ }
+ case MPCG_STATE_FLOWC:
+ case MPCG_STATE_READY:
+ grp->send_qllc_disc = 2;
+ new_len = sizeof(struct qllc);
+ qllcptr = kzalloc(new_len, gfp_type() | GFP_DMA);
+ if (qllcptr == NULL) {
+ CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+ "%s(%s): qllcptr allocation error",
+ CTCM_FUNTAIL, dev->name);
+ return -ENOMEM;
+ }
+
+ qllcptr->qllc_address = 0xcc;
+ qllcptr->qllc_commands = 0x03;
+
+ skb = __dev_alloc_skb(new_len, GFP_ATOMIC);
+
+ if (skb == NULL) {
+ CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+ "%s(%s): skb allocation error",
+ CTCM_FUNTAIL, dev->name);
+ priv->stats.rx_dropped++;
+ kfree(qllcptr);
+ return -ENOMEM;
+ }
+
+ memcpy(skb_put(skb, new_len), qllcptr, new_len);
+ kfree(qllcptr);
+
+ if (skb_headroom(skb) < 4) {
+ CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
+ "%s(%s): skb_headroom error",
+ CTCM_FUNTAIL, dev->name);
+ dev_kfree_skb_any(skb);
+ return -ENOMEM;
+ }
+
+ *((__u32 *)skb_push(skb, 4)) =
+ priv->channel[CTCM_READ]->pdu_seq;
+ priv->channel[CTCM_READ]->pdu_seq++;
+ CTCM_PR_DBGDATA("ctcmpc: %s ToDCM_pdu_seq= %08x\n",
+ __func__, priv->channel[CTCM_READ]->pdu_seq);
+
+ /* receipt of CC03 resets anticipated sequence number on
+ receiving side */
+ priv->channel[CTCM_READ]->pdu_seq = 0x00;
+ skb_reset_mac_header(skb);
+ skb->dev = dev;
+ skb->protocol = htons(ETH_P_SNAP);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ CTCM_D3_DUMP(skb->data, (sizeof(struct qllc) + 4));
+
+ netif_rx(skb);
+ break;
+ default:
+ break;
+
+ }
+
+ return 0;
+}
+/* --- This is the END my friend --- */
+
diff --git a/drivers/s390/net/ctcm_mpc.h b/drivers/s390/net/ctcm_mpc.h
new file mode 100644
index 00000000000..1fa07b0c11c
--- /dev/null
+++ b/drivers/s390/net/ctcm_mpc.h
@@ -0,0 +1,240 @@
+/*
+ * drivers/s390/net/ctcm_mpc.h
+ *
+ * Copyright IBM Corp. 2007
+ * Authors: Peter Tiedemann (ptiedem@de.ibm.com)
+ *
+ * MPC additions:
+ * Belinda Thompson (belindat@us.ibm.com)
+ * Andy Richter (richtera@us.ibm.com)
+ */
+
+#ifndef _CTC_MPC_H_
+#define _CTC_MPC_H_
+
+#include <linux/interrupt.h>
+#include <linux/skbuff.h>
+#include "fsm.h"
+
+/*
+ * MPC external interface
+ * Note that ctc_mpc_xyz are called with a lock on ................
+ */
+
+/* port_number is the mpc device 0, 1, 2 etc mpc2 is port_number 2 */
+
+/* passive open Just wait for XID2 exchange */
+extern int ctc_mpc_alloc_channel(int port,
+ void (*callback)(int port_num, int max_write_size));
+/* active open Alloc then send XID2 */
+extern void ctc_mpc_establish_connectivity(int port,
+ void (*callback)(int port_num, int rc, int max_write_size));
+
+extern void ctc_mpc_dealloc_ch(int port);
+extern void ctc_mpc_flow_control(int port, int flowc);
+
+/*
+ * other MPC Group prototypes and structures
+ */
+
+#define ETH_P_SNA_DIX 0x80D5
+
+/*
+ * Declaration of an XID2
+ *
+ */
+#define ALLZEROS 0x0000000000000000
+
+#define XID_FM2 0x20
+#define XID2_0 0x00
+#define XID2_7 0x07
+#define XID2_WRITE_SIDE 0x04
+#define XID2_READ_SIDE 0x05
+
+struct xid2 {
+ __u8 xid2_type_id;
+ __u8 xid2_len;
+ __u32 xid2_adj_id;
+ __u8 xid2_rlen;
+ __u8 xid2_resv1;
+ __u8 xid2_flag1;
+ __u8 xid2_fmtt;
+ __u8 xid2_flag4;
+ __u16 xid2_resv2;
+ __u8 xid2_tgnum;
+ __u32 xid2_sender_id;
+ __u8 xid2_flag2;
+ __u8 xid2_option;
+ char xid2_resv3[8];
+ __u16 xid2_resv4;
+ __u8 xid2_dlc_type;
+ __u16 xid2_resv5;
+ __u8 xid2_mpc_flag;
+ __u8 xid2_resv6;
+ __u16 xid2_buf_len;
+ char xid2_buffer[255 - (13 * sizeof(__u8) +
+ 2 * sizeof(__u32) +
+ 4 * sizeof(__u16) +
+ 8 * sizeof(char))];
+} __attribute__ ((packed));
+
+#define XID2_LENGTH (sizeof(struct xid2))
+
+struct th_header {
+ __u8 th_seg;
+ __u8 th_ch_flag;
+#define TH_HAS_PDU 0xf0
+#define TH_IS_XID 0x01
+#define TH_SWEEP_REQ 0xfe
+#define TH_SWEEP_RESP 0xff
+ __u8 th_blk_flag;
+#define TH_DATA_IS_XID 0x80
+#define TH_RETRY 0x40
+#define TH_DISCONTACT 0xc0
+#define TH_SEG_BLK 0x20
+#define TH_LAST_SEG 0x10
+#define TH_PDU_PART 0x08
+ __u8 th_is_xid; /* is 0x01 if this is XID */
+ __u32 th_seq_num;
+} __attribute__ ((packed));
+
+struct th_addon {
+ __u32 th_last_seq;
+ __u32 th_resvd;
+} __attribute__ ((packed));
+
+struct th_sweep {
+ struct th_header th;
+ struct th_addon sw;
+} __attribute__ ((packed));
+
+#define TH_HEADER_LENGTH (sizeof(struct th_header))
+#define TH_SWEEP_LENGTH (sizeof(struct th_sweep))
+
+#define PDU_LAST 0x80
+#define PDU_CNTL 0x40
+#define PDU_FIRST 0x20
+
+struct pdu {
+ __u32 pdu_offset;
+ __u8 pdu_flag;
+ __u8 pdu_proto; /* 0x01 is APPN SNA */
+ __u16 pdu_seq;
+} __attribute__ ((packed));
+
+#define PDU_HEADER_LENGTH (sizeof(struct pdu))
+
+struct qllc {
+ __u8 qllc_address;
+#define QLLC_REQ 0xFF
+#define QLLC_RESP 0x00
+ __u8 qllc_commands;
+#define QLLC_DISCONNECT 0x53
+#define QLLC_UNSEQACK 0x73
+#define QLLC_SETMODE 0x93
+#define QLLC_EXCHID 0xBF
+} __attribute__ ((packed));
+
+
+/*
+ * Definition of one MPC group
+ */
+
+#define MAX_MPCGCHAN 10
+#define MPC_XID_TIMEOUT_VALUE 10000
+#define MPC_CHANNEL_ADD 0
+#define MPC_CHANNEL_REMOVE 1
+#define MPC_CHANNEL_ATTN 2
+#define XSIDE 1
+#define YSIDE 0
+
+struct mpcg_info {
+ struct sk_buff *skb;
+ struct channel *ch;
+ struct xid2 *xid;
+ struct th_sweep *sweep;
+ struct th_header *th;
+};
+
+struct mpc_group {
+ struct tasklet_struct mpc_tasklet;
+ struct tasklet_struct mpc_tasklet2;
+ int changed_side;
+ int saved_state;
+ int channels_terminating;
+ int out_of_sequence;
+ int flow_off_called;
+ int port_num;
+ int port_persist;
+ int alloc_called;
+ __u32 xid2_adj_id;
+ __u8 xid2_tgnum;
+ __u32 xid2_sender_id;
+ int num_channel_paths;
+ int active_channels[2];
+ __u16 group_max_buflen;
+ int outstanding_xid2;
+ int outstanding_xid7;
+ int outstanding_xid7_p2;
+ int sweep_req_pend_num;
+ int sweep_rsp_pend_num;
+ struct sk_buff *xid_skb;
+ char *xid_skb_data;
+ struct th_header *xid_th;
+ struct xid2 *xid;
+ char *xid_id;
+ struct th_header *rcvd_xid_th;
+ struct sk_buff *rcvd_xid_skb;
+ char *rcvd_xid_data;
+ __u8 in_sweep;
+ __u8 roll;
+ struct xid2 *saved_xid2;
+ void (*allochanfunc)(int, int);
+ int allocchan_callback_retries;
+ void (*estconnfunc)(int, int, int);
+ int estconn_callback_retries;
+ int estconn_called;
+ int xidnogood;
+ int send_qllc_disc;
+ fsm_timer timer;
+ fsm_instance *fsm; /* group xid fsm */
+};
+
+#ifdef DEBUGDATA
+void ctcmpc_dumpit(char *buf, int len);
+#else
+static inline void ctcmpc_dumpit(char *buf, int len)
+{
+}
+#endif
+
+#ifdef DEBUGDATA
+/*
+ * Dump header and first 16 bytes of an sk_buff for debugging purposes.
+ *
+ * skb The struct sk_buff to dump.
+ * offset Offset relative to skb-data, where to start the dump.
+ */
+void ctcmpc_dump_skb(struct sk_buff *skb, int offset);
+#else
+static inline void ctcmpc_dump_skb(struct sk_buff *skb, int offset)
+{}
+#endif
+
+static inline void ctcmpc_dump32(char *buf, int len)
+{
+ if (len < 32)
+ ctcmpc_dumpit(buf, len);
+ else
+ ctcmpc_dumpit(buf, 32);
+}
+
+int ctcmpc_open(struct net_device *);
+void ctcm_ccw_check_rc(struct channel *, int, char *);
+void mpc_group_ready(unsigned long adev);
+void mpc_channel_action(struct channel *ch, int direction, int action);
+void mpc_action_send_discontact(unsigned long thischan);
+void mpc_action_discontact(fsm_instance *fi, int event, void *arg);
+void ctcmpc_bh(unsigned long thischan);
+#endif
+/* --- This is the END my friend --- */
diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c
new file mode 100644
index 00000000000..650aec1839e
--- /dev/null
+++ b/drivers/s390/net/ctcm_sysfs.c
@@ -0,0 +1,223 @@
+/*
+ * drivers/s390/net/ctcm_sysfs.c
+ *
+ * Copyright IBM Corp. 2007, 2007
+ * Authors: Peter Tiedemann (ptiedem@de.ibm.com)
+ *
+ */
+
+#undef DEBUG
+#undef DEBUGDATA
+#undef DEBUGCCW
+
+#define KMSG_COMPONENT "ctcm"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/sysfs.h>
+#include <linux/slab.h>
+#include "ctcm_main.h"
+
+/*
+ * sysfs attributes
+ */
+
+static ssize_t ctcm_buffer_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ctcm_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+ return sprintf(buf, "%d\n", priv->buffer_size);
+}
+
+static ssize_t ctcm_buffer_write(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct net_device *ndev;
+ int bs1;
+ struct ctcm_priv *priv = dev_get_drvdata(dev);
+
+ ndev = priv->channel[CTCM_READ]->netdev;
+ if (!(priv && priv->channel[CTCM_READ] && ndev)) {
+ CTCM_DBF_TEXT(SETUP, CTC_DBF_ERROR, "bfnondev");
+ return -ENODEV;
+ }
+
+ sscanf(buf, "%u", &bs1);
+ if (bs1 > CTCM_BUFSIZE_LIMIT)
+ goto einval;
+ if (bs1 < (576 + LL_HEADER_LENGTH + 2))
+ goto einval;
+ priv->buffer_size = bs1; /* just to overwrite the default */
+
+ if ((ndev->flags & IFF_RUNNING) &&
+ (bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2)))
+ goto einval;
+
+ priv->channel[CTCM_READ]->max_bufsize = bs1;
+ priv->channel[CTCM_WRITE]->max_bufsize = bs1;
+ if (!(ndev->flags & IFF_RUNNING))
+ ndev->mtu = bs1 - LL_HEADER_LENGTH - 2;
+ priv->channel[CTCM_READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
+ priv->channel[CTCM_WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED;
+
+ CTCM_DBF_DEV(SETUP, ndev, buf);
+ return count;
+
+einval:
+ CTCM_DBF_DEV(SETUP, ndev, "buff_err");
+ return -EINVAL;
+}
+
+static void ctcm_print_statistics(struct ctcm_priv *priv)
+{
+ char *sbuf;
+ char *p;
+
+ if (!priv)
+ return;
+ sbuf = kmalloc(2048, GFP_KERNEL);
+ if (sbuf == NULL)
+ return;
+ p = sbuf;
+
+ p += sprintf(p, " Device FSM state: %s\n",
+ fsm_getstate_str(priv->fsm));
+ p += sprintf(p, " RX channel FSM state: %s\n",
+ fsm_getstate_str(priv->channel[CTCM_READ]->fsm));
+ p += sprintf(p, " TX channel FSM state: %s\n",
+ fsm_getstate_str(priv->channel[CTCM_WRITE]->fsm));
+ p += sprintf(p, " Max. TX buffer used: %ld\n",
+ priv->channel[WRITE]->prof.maxmulti);
+ p += sprintf(p, " Max. chained SKBs: %ld\n",
+ priv->channel[WRITE]->prof.maxcqueue);
+ p += sprintf(p, " TX single write ops: %ld\n",
+ priv->channel[WRITE]->prof.doios_single);
+ p += sprintf(p, " TX multi write ops: %ld\n",
+ priv->channel[WRITE]->prof.doios_multi);
+ p += sprintf(p, " Netto bytes written: %ld\n",
+ priv->channel[WRITE]->prof.txlen);
+ p += sprintf(p, " Max. TX IO-time: %ld\n",
+ priv->channel[WRITE]->prof.tx_time);
+
+ printk(KERN_INFO "Statistics for %s:\n%s",
+ priv->channel[CTCM_WRITE]->netdev->name, sbuf);
+ kfree(sbuf);
+ return;
+}
+
+static ssize_t stats_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ctcm_priv *priv = dev_get_drvdata(dev);
+ if (!priv)
+ return -ENODEV;
+ ctcm_print_statistics(priv);
+ return sprintf(buf, "0\n");
+}
+
+static ssize_t stats_write(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ctcm_priv *priv = dev_get_drvdata(dev);
+ if (!priv)
+ return -ENODEV;
+ /* Reset statistics */
+ memset(&priv->channel[WRITE]->prof, 0,
+ sizeof(priv->channel[CTCM_WRITE]->prof));
+ return count;
+}
+
+static ssize_t ctcm_proto_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ctcm_priv *priv = dev_get_drvdata(dev);
+ if (!priv)
+ return -ENODEV;
+
+ return sprintf(buf, "%d\n", priv->protocol);
+}
+
+static ssize_t ctcm_proto_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int value;
+ struct ctcm_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+ sscanf(buf, "%u", &value);
+ if (!((value == CTCM_PROTO_S390) ||
+ (value == CTCM_PROTO_LINUX) ||
+ (value == CTCM_PROTO_MPC) ||
+ (value == CTCM_PROTO_OS390)))
+ return -EINVAL;
+ priv->protocol = value;
+ CTCM_DBF_DEV(SETUP, dev, buf);
+
+ return count;
+}
+
+static const char *ctcm_type[] = {
+ "not a channel",
+ "CTC/A",
+ "FICON channel",
+ "ESCON channel",
+ "unknown channel type",
+ "unsupported channel type",
+};
+
+static ssize_t ctcm_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ccwgroup_device *cgdev;
+
+ cgdev = to_ccwgroupdev(dev);
+ if (!cgdev)
+ return -ENODEV;
+
+ return sprintf(buf, "%s\n",
+ ctcm_type[cgdev->cdev[0]->id.driver_info]);
+}
+
+static DEVICE_ATTR(buffer, 0644, ctcm_buffer_show, ctcm_buffer_write);
+static DEVICE_ATTR(protocol, 0644, ctcm_proto_show, ctcm_proto_store);
+static DEVICE_ATTR(type, 0444, ctcm_type_show, NULL);
+static DEVICE_ATTR(stats, 0644, stats_show, stats_write);
+
+static struct attribute *ctcm_attr[] = {
+ &dev_attr_protocol.attr,
+ &dev_attr_type.attr,
+ &dev_attr_buffer.attr,
+ NULL,
+};
+
+static struct attribute_group ctcm_attr_group = {
+ .attrs = ctcm_attr,
+};
+
+int ctcm_add_attributes(struct device *dev)
+{
+ int rc;
+
+ rc = device_create_file(dev, &dev_attr_stats);
+
+ return rc;
+}
+
+void ctcm_remove_attributes(struct device *dev)
+{
+ device_remove_file(dev, &dev_attr_stats);
+}
+
+int ctcm_add_files(struct device *dev)
+{
+ return sysfs_create_group(&dev->kobj, &ctcm_attr_group);
+}
+
+void ctcm_remove_files(struct device *dev)
+{
+ sysfs_remove_group(&dev->kobj, &ctcm_attr_group);
+}
+
diff --git a/drivers/s390/net/fsm.c b/drivers/s390/net/fsm.c
new file mode 100644
index 00000000000..e5dea67f902
--- /dev/null
+++ b/drivers/s390/net/fsm.c
@@ -0,0 +1,214 @@
+/**
+ * A generic FSM based on fsm used in isdn4linux
+ *
+ */
+
+#include "fsm.h"
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+
+MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
+MODULE_DESCRIPTION("Finite state machine helper functions");
+MODULE_LICENSE("GPL");
+
+fsm_instance *
+init_fsm(char *name, const char **state_names, const char **event_names, int nr_states,
+ int nr_events, const fsm_node *tmpl, int tmpl_len, gfp_t order)
+{
+ int i;
+ fsm_instance *this;
+ fsm_function_t *m;
+ fsm *f;
+
+ this = kzalloc(sizeof(fsm_instance), order);
+ if (this == NULL) {
+ printk(KERN_WARNING
+ "fsm(%s): init_fsm: Couldn't alloc instance\n", name);
+ return NULL;
+ }
+ strlcpy(this->name, name, sizeof(this->name));
+ init_waitqueue_head(&this->wait_q);
+
+ f = kzalloc(sizeof(fsm), order);
+ if (f == NULL) {
+ printk(KERN_WARNING
+ "fsm(%s): init_fsm: Couldn't alloc fsm\n", name);
+ kfree_fsm(this);
+ return NULL;
+ }
+ f->nr_events = nr_events;
+ f->nr_states = nr_states;
+ f->event_names = event_names;
+ f->state_names = state_names;
+ this->f = f;
+
+ m = kcalloc(nr_states*nr_events, sizeof(fsm_function_t), order);
+ if (m == NULL) {
+ printk(KERN_WARNING
+ "fsm(%s): init_fsm: Couldn't alloc jumptable\n", name);
+ kfree_fsm(this);
+ return NULL;
+ }
+ f->jumpmatrix = m;
+
+ for (i = 0; i < tmpl_len; i++) {
+ if ((tmpl[i].cond_state >= nr_states) ||
+ (tmpl[i].cond_event >= nr_events) ) {
+ printk(KERN_ERR
+ "fsm(%s): init_fsm: Bad template l=%d st(%ld/%ld) ev(%ld/%ld)\n",
+ name, i, (long)tmpl[i].cond_state, (long)f->nr_states,
+ (long)tmpl[i].cond_event, (long)f->nr_events);
+ kfree_fsm(this);
+ return NULL;
+ } else
+ m[nr_states * tmpl[i].cond_event + tmpl[i].cond_state] =
+ tmpl[i].function;
+ }
+ return this;
+}
+
+void
+kfree_fsm(fsm_instance *this)
+{
+ if (this) {
+ if (this->f) {
+ kfree(this->f->jumpmatrix);
+ kfree(this->f);
+ }
+ kfree(this);
+ } else
+ printk(KERN_WARNING
+ "fsm: kfree_fsm called with NULL argument\n");
+}
+
+#if FSM_DEBUG_HISTORY
+void
+fsm_print_history(fsm_instance *fi)
+{
+ int idx = 0;
+ int i;
+
+ if (fi->history_size >= FSM_HISTORY_SIZE)
+ idx = fi->history_index;
+
+ printk(KERN_DEBUG "fsm(%s): History:\n", fi->name);
+ for (i = 0; i < fi->history_size; i++) {
+ int e = fi->history[idx].event;
+ int s = fi->history[idx++].state;
+ idx %= FSM_HISTORY_SIZE;
+ if (e == -1)
+ printk(KERN_DEBUG " S=%s\n",
+ fi->f->state_names[s]);
+ else
+ printk(KERN_DEBUG " S=%s E=%s\n",
+ fi->f->state_names[s],
+ fi->f->event_names[e]);
+ }
+ fi->history_size = fi->history_index = 0;
+}
+
+void
+fsm_record_history(fsm_instance *fi, int state, int event)
+{
+ fi->history[fi->history_index].state = state;
+ fi->history[fi->history_index++].event = event;
+ fi->history_index %= FSM_HISTORY_SIZE;
+ if (fi->history_size < FSM_HISTORY_SIZE)
+ fi->history_size++;
+}
+#endif
+
+const char *
+fsm_getstate_str(fsm_instance *fi)
+{
+ int st = atomic_read(&fi->state);
+ if (st >= fi->f->nr_states)
+ return "Invalid";
+ return fi->f->state_names[st];
+}
+
+static void
+fsm_expire_timer(fsm_timer *this)
+{
+#if FSM_TIMER_DEBUG
+ printk(KERN_DEBUG "fsm(%s): Timer %p expired\n",
+ this->fi->name, this);
+#endif
+ fsm_event(this->fi, this->expire_event, this->event_arg);
+}
+
+void
+fsm_settimer(fsm_instance *fi, fsm_timer *this)
+{
+ this->fi = fi;
+ this->tl.function = (void *)fsm_expire_timer;
+ this->tl.data = (long)this;
+#if FSM_TIMER_DEBUG
+ printk(KERN_DEBUG "fsm(%s): Create timer %p\n", fi->name,
+ this);
+#endif
+ init_timer(&this->tl);
+}
+
+void
+fsm_deltimer(fsm_timer *this)
+{
+#if FSM_TIMER_DEBUG
+ printk(KERN_DEBUG "fsm(%s): Delete timer %p\n", this->fi->name,
+ this);
+#endif
+ del_timer(&this->tl);
+}
+
+int
+fsm_addtimer(fsm_timer *this, int millisec, int event, void *arg)
+{
+
+#if FSM_TIMER_DEBUG
+ printk(KERN_DEBUG "fsm(%s): Add timer %p %dms\n",
+ this->fi->name, this, millisec);
+#endif
+
+ init_timer(&this->tl);
+ this->tl.function = (void *)fsm_expire_timer;
+ this->tl.data = (long)this;
+ this->expire_event = event;
+ this->event_arg = arg;
+ this->tl.expires = jiffies + (millisec * HZ) / 1000;
+ add_timer(&this->tl);
+ return 0;
+}
+
+/* FIXME: this function is never used, why */
+void
+fsm_modtimer(fsm_timer *this, int millisec, int event, void *arg)
+{
+
+#if FSM_TIMER_DEBUG
+ printk(KERN_DEBUG "fsm(%s): Restart timer %p %dms\n",
+ this->fi->name, this, millisec);
+#endif
+
+ del_timer(&this->tl);
+ init_timer(&this->tl);
+ this->tl.function = (void *)fsm_expire_timer;
+ this->tl.data = (long)this;
+ this->expire_event = event;
+ this->event_arg = arg;
+ this->tl.expires = jiffies + (millisec * HZ) / 1000;
+ add_timer(&this->tl);
+}
+
+EXPORT_SYMBOL(init_fsm);
+EXPORT_SYMBOL(kfree_fsm);
+EXPORT_SYMBOL(fsm_settimer);
+EXPORT_SYMBOL(fsm_deltimer);
+EXPORT_SYMBOL(fsm_addtimer);
+EXPORT_SYMBOL(fsm_modtimer);
+EXPORT_SYMBOL(fsm_getstate_str);
+
+#if FSM_DEBUG_HISTORY
+EXPORT_SYMBOL(fsm_print_history);
+EXPORT_SYMBOL(fsm_record_history);
+#endif
diff --git a/drivers/s390/net/fsm.h b/drivers/s390/net/fsm.h
new file mode 100644
index 00000000000..a4510cf5903
--- /dev/null
+++ b/drivers/s390/net/fsm.h
@@ -0,0 +1,265 @@
+#ifndef _FSM_H_
+#define _FSM_H_
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/time.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/atomic.h>
+
+/**
+ * Define this to get debugging messages.
+ */
+#define FSM_DEBUG 0
+
+/**
+ * Define this to get debugging massages for
+ * timer handling.
+ */
+#define FSM_TIMER_DEBUG 0
+
+/**
+ * Define these to record a history of
+ * Events/Statechanges and print it if a
+ * action_function is not found.
+ */
+#define FSM_DEBUG_HISTORY 0
+#define FSM_HISTORY_SIZE 40
+
+struct fsm_instance_t;
+
+/**
+ * Definition of an action function, called by a FSM
+ */
+typedef void (*fsm_function_t)(struct fsm_instance_t *, int, void *);
+
+/**
+ * Internal jump table for a FSM
+ */
+typedef struct {
+ fsm_function_t *jumpmatrix;
+ int nr_events;
+ int nr_states;
+ const char **event_names;
+ const char **state_names;
+} fsm;
+
+#if FSM_DEBUG_HISTORY
+/**
+ * Element of State/Event history used for debugging.
+ */
+typedef struct {
+ int state;
+ int event;
+} fsm_history;
+#endif
+
+/**
+ * Representation of a FSM
+ */
+typedef struct fsm_instance_t {
+ fsm *f;
+ atomic_t state;
+ char name[16];
+ void *userdata;
+ int userint;
+ wait_queue_head_t wait_q;
+#if FSM_DEBUG_HISTORY
+ int history_index;
+ int history_size;
+ fsm_history history[FSM_HISTORY_SIZE];
+#endif
+} fsm_instance;
+
+/**
+ * Description of a state-event combination
+ */
+typedef struct {
+ int cond_state;
+ int cond_event;
+ fsm_function_t function;
+} fsm_node;
+
+/**
+ * Description of a FSM Timer.
+ */
+typedef struct {
+ fsm_instance *fi;
+ struct timer_list tl;
+ int expire_event;
+ void *event_arg;
+} fsm_timer;
+
+/**
+ * Creates an FSM
+ *
+ * @param name Name of this instance for logging purposes.
+ * @param state_names An array of names for all states for logging purposes.
+ * @param event_names An array of names for all events for logging purposes.
+ * @param nr_states Number of states for this instance.
+ * @param nr_events Number of events for this instance.
+ * @param tmpl An array of fsm_nodes, describing this FSM.
+ * @param tmpl_len Length of the describing array.
+ * @param order Parameter for allocation of the FSM data structs.
+ */
+extern fsm_instance *
+init_fsm(char *name, const char **state_names,
+ const char **event_names,
+ int nr_states, int nr_events, const fsm_node *tmpl,
+ int tmpl_len, gfp_t order);
+
+/**
+ * Releases an FSM
+ *
+ * @param fi Pointer to an FSM, previously created with init_fsm.
+ */
+extern void kfree_fsm(fsm_instance *fi);
+
+#if FSM_DEBUG_HISTORY
+extern void
+fsm_print_history(fsm_instance *fi);
+
+extern void
+fsm_record_history(fsm_instance *fi, int state, int event);
+#endif
+
+/**
+ * Emits an event to a FSM.
+ * If an action function is defined for the current state/event combination,
+ * this function is called.
+ *
+ * @param fi Pointer to FSM which should receive the event.
+ * @param event The event do be delivered.
+ * @param arg A generic argument, handed to the action function.
+ *
+ * @return 0 on success,
+ * 1 if current state or event is out of range
+ * !0 if state and event in range, but no action defined.
+ */
+static inline int
+fsm_event(fsm_instance *fi, int event, void *arg)
+{
+ fsm_function_t r;
+ int state = atomic_read(&fi->state);
+
+ if ((state >= fi->f->nr_states) ||
+ (event >= fi->f->nr_events) ) {
+ printk(KERN_ERR "fsm(%s): Invalid state st(%ld/%ld) ev(%d/%ld)\n",
+ fi->name, (long)state,(long)fi->f->nr_states, event,
+ (long)fi->f->nr_events);
+#if FSM_DEBUG_HISTORY
+ fsm_print_history(fi);
+#endif
+ return 1;
+ }
+ r = fi->f->jumpmatrix[fi->f->nr_states * event + state];
+ if (r) {
+#if FSM_DEBUG
+ printk(KERN_DEBUG "fsm(%s): state %s event %s\n",
+ fi->name, fi->f->state_names[state],
+ fi->f->event_names[event]);
+#endif
+#if FSM_DEBUG_HISTORY
+ fsm_record_history(fi, state, event);
+#endif
+ r(fi, event, arg);
+ return 0;
+ } else {
+#if FSM_DEBUG || FSM_DEBUG_HISTORY
+ printk(KERN_DEBUG "fsm(%s): no function for event %s in state %s\n",
+ fi->name, fi->f->event_names[event],
+ fi->f->state_names[state]);
+#endif
+#if FSM_DEBUG_HISTORY
+ fsm_print_history(fi);
+#endif
+ return !0;
+ }
+}
+
+/**
+ * Modifies the state of an FSM.
+ * This does <em>not</em> trigger an event or calls an action function.
+ *
+ * @param fi Pointer to FSM
+ * @param state The new state for this FSM.
+ */
+static inline void
+fsm_newstate(fsm_instance *fi, int newstate)
+{
+ atomic_set(&fi->state,newstate);
+#if FSM_DEBUG_HISTORY
+ fsm_record_history(fi, newstate, -1);
+#endif
+#if FSM_DEBUG
+ printk(KERN_DEBUG "fsm(%s): New state %s\n", fi->name,
+ fi->f->state_names[newstate]);
+#endif
+ wake_up(&fi->wait_q);
+}
+
+/**
+ * Retrieves the state of an FSM
+ *
+ * @param fi Pointer to FSM
+ *
+ * @return The current state of the FSM.
+ */
+static inline int
+fsm_getstate(fsm_instance *fi)
+{
+ return atomic_read(&fi->state);
+}
+
+/**
+ * Retrieves the name of the state of an FSM
+ *
+ * @param fi Pointer to FSM
+ *
+ * @return The current state of the FSM in a human readable form.
+ */
+extern const char *fsm_getstate_str(fsm_instance *fi);
+
+/**
+ * Initializes a timer for an FSM.
+ * This prepares an fsm_timer for usage with fsm_addtimer.
+ *
+ * @param fi Pointer to FSM
+ * @param timer The timer to be initialized.
+ */
+extern void fsm_settimer(fsm_instance *fi, fsm_timer *);
+
+/**
+ * Clears a pending timer of an FSM instance.
+ *
+ * @param timer The timer to clear.
+ */
+extern void fsm_deltimer(fsm_timer *timer);
+
+/**
+ * Adds and starts a timer to an FSM instance.
+ *
+ * @param timer The timer to be added. The field fi of that timer
+ * must have been set to point to the instance.
+ * @param millisec Duration, after which the timer should expire.
+ * @param event Event, to trigger if timer expires.
+ * @param arg Generic argument, provided to expiry function.
+ *
+ * @return 0 on success, -1 if timer is already active.
+ */
+extern int fsm_addtimer(fsm_timer *timer, int millisec, int event, void *arg);
+
+/**
+ * Modifies a timer of an FSM.
+ *
+ * @param timer The timer to modify.
+ * @param millisec Duration, after which the timer should expire.
+ * @param event Event, to trigger if timer expires.
+ * @param arg Generic argument, provided to expiry function.
+ */
+extern void fsm_modtimer(fsm_timer *timer, int millisec, int event, void *arg);
+
+#endif /* _FSM_H_ */
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
new file mode 100644
index 00000000000..863fc219715
--- /dev/null
+++ b/drivers/s390/net/lcs.c
@@ -0,0 +1,2510 @@
+/*
+ * Linux for S/390 Lan Channel Station Network Driver
+ *
+ * Copyright IBM Corp. 1999, 2009
+ * Author(s): Original Code written by
+ * DJ Barrow <djbarrow@de.ibm.com,barrow_dj@yahoo.com>
+ * Rewritten by
+ * Frank Pavlic <fpavlic@de.ibm.com> and
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define KMSG_COMPONENT "lcs"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/if.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/trdevice.h>
+#include <linux/fddidevice.h>
+#include <linux/inetdevice.h>
+#include <linux/in.h>
+#include <linux/igmp.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <net/arp.h>
+#include <net/ip.h>
+
+#include <asm/debug.h>
+#include <asm/idals.h>
+#include <asm/timex.h>
+#include <linux/device.h>
+#include <asm/ccwgroup.h>
+
+#include "lcs.h"
+
+
+#if !defined(CONFIG_ETHERNET) && \
+ !defined(CONFIG_TR) && !defined(CONFIG_FDDI)
+#error Cannot compile lcs.c without some net devices switched on.
+#endif
+
+/**
+ * initialization string for output
+ */
+
+static char version[] __initdata = "LCS driver";
+
+/**
+ * the root device for lcs group devices
+ */
+static struct device *lcs_root_dev;
+
+/**
+ * Some prototypes.
+ */
+static void lcs_tasklet(unsigned long);
+static void lcs_start_kernel_thread(struct work_struct *);
+static void lcs_get_frames_cb(struct lcs_channel *, struct lcs_buffer *);
+#ifdef CONFIG_IP_MULTICAST
+static int lcs_send_delipm(struct lcs_card *, struct lcs_ipm_list *);
+#endif /* CONFIG_IP_MULTICAST */
+static int lcs_recovery(void *ptr);
+
+/**
+ * Debug Facility Stuff
+ */
+static char debug_buffer[255];
+static debug_info_t *lcs_dbf_setup;
+static debug_info_t *lcs_dbf_trace;
+
+/**
+ * LCS Debug Facility functions
+ */
+static void
+lcs_unregister_debug_facility(void)
+{
+ if (lcs_dbf_setup)
+ debug_unregister(lcs_dbf_setup);
+ if (lcs_dbf_trace)
+ debug_unregister(lcs_dbf_trace);
+}
+
+static int
+lcs_register_debug_facility(void)
+{
+ lcs_dbf_setup = debug_register("lcs_setup", 2, 1, 8);
+ lcs_dbf_trace = debug_register("lcs_trace", 4, 1, 8);
+ if (lcs_dbf_setup == NULL || lcs_dbf_trace == NULL) {
+ pr_err("Not enough memory for debug facility.\n");
+ lcs_unregister_debug_facility();
+ return -ENOMEM;
+ }
+ debug_register_view(lcs_dbf_setup, &debug_hex_ascii_view);
+ debug_set_level(lcs_dbf_setup, 2);
+ debug_register_view(lcs_dbf_trace, &debug_hex_ascii_view);
+ debug_set_level(lcs_dbf_trace, 2);
+ return 0;
+}
+
+/**
+ * Allocate io buffers.
+ */
+static int
+lcs_alloc_channel(struct lcs_channel *channel)
+{
+ int cnt;
+
+ LCS_DBF_TEXT(2, setup, "ichalloc");
+ for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
+ /* alloc memory fo iobuffer */
+ channel->iob[cnt].data =
+ kzalloc(LCS_IOBUFFERSIZE, GFP_DMA | GFP_KERNEL);
+ if (channel->iob[cnt].data == NULL)
+ break;
+ channel->iob[cnt].state = LCS_BUF_STATE_EMPTY;
+ }
+ if (cnt < LCS_NUM_BUFFS) {
+ /* Not all io buffers could be allocated. */
+ LCS_DBF_TEXT(2, setup, "echalloc");
+ while (cnt-- > 0)
+ kfree(channel->iob[cnt].data);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/**
+ * Free io buffers.
+ */
+static void
+lcs_free_channel(struct lcs_channel *channel)
+{
+ int cnt;
+
+ LCS_DBF_TEXT(2, setup, "ichfree");
+ for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
+ kfree(channel->iob[cnt].data);
+ channel->iob[cnt].data = NULL;
+ }
+}
+
+/*
+ * Cleanup channel.
+ */
+static void
+lcs_cleanup_channel(struct lcs_channel *channel)
+{
+ LCS_DBF_TEXT(3, setup, "cleanch");
+ /* Kill write channel tasklets. */
+ tasklet_kill(&channel->irq_tasklet);
+ /* Free channel buffers. */
+ lcs_free_channel(channel);
+}
+
+/**
+ * LCS free memory for card and channels.
+ */
+static void
+lcs_free_card(struct lcs_card *card)
+{
+ LCS_DBF_TEXT(2, setup, "remcard");
+ LCS_DBF_HEX(2, setup, &card, sizeof(void*));
+ kfree(card);
+}
+
+/**
+ * LCS alloc memory for card and channels
+ */
+static struct lcs_card *
+lcs_alloc_card(void)
+{
+ struct lcs_card *card;
+ int rc;
+
+ LCS_DBF_TEXT(2, setup, "alloclcs");
+
+ card = kzalloc(sizeof(struct lcs_card), GFP_KERNEL | GFP_DMA);
+ if (card == NULL)
+ return NULL;
+ card->lan_type = LCS_FRAME_TYPE_AUTO;
+ card->pkt_seq = 0;
+ card->lancmd_timeout = LCS_LANCMD_TIMEOUT_DEFAULT;
+ /* Allocate io buffers for the read channel. */
+ rc = lcs_alloc_channel(&card->read);
+ if (rc){
+ LCS_DBF_TEXT(2, setup, "iccwerr");
+ lcs_free_card(card);
+ return NULL;
+ }
+ /* Allocate io buffers for the write channel. */
+ rc = lcs_alloc_channel(&card->write);
+ if (rc) {
+ LCS_DBF_TEXT(2, setup, "iccwerr");
+ lcs_cleanup_channel(&card->read);
+ lcs_free_card(card);
+ return NULL;
+ }
+
+#ifdef CONFIG_IP_MULTICAST
+ INIT_LIST_HEAD(&card->ipm_list);
+#endif
+ LCS_DBF_HEX(2, setup, &card, sizeof(void*));
+ return card;
+}
+
+/*
+ * Setup read channel.
+ */
+static void
+lcs_setup_read_ccws(struct lcs_card *card)
+{
+ int cnt;
+
+ LCS_DBF_TEXT(2, setup, "ireadccw");
+ /* Setup read ccws. */
+ memset(card->read.ccws, 0, sizeof (struct ccw1) * (LCS_NUM_BUFFS + 1));
+ for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
+ card->read.ccws[cnt].cmd_code = LCS_CCW_READ;
+ card->read.ccws[cnt].count = LCS_IOBUFFERSIZE;
+ card->read.ccws[cnt].flags =
+ CCW_FLAG_CC | CCW_FLAG_SLI | CCW_FLAG_PCI;
+ /*
+ * Note: we have allocated the buffer with GFP_DMA, so
+ * we do not need to do set_normalized_cda.
+ */
+ card->read.ccws[cnt].cda =
+ (__u32) __pa(card->read.iob[cnt].data);
+ ((struct lcs_header *)
+ card->read.iob[cnt].data)->offset = LCS_ILLEGAL_OFFSET;
+ card->read.iob[cnt].callback = lcs_get_frames_cb;
+ card->read.iob[cnt].state = LCS_BUF_STATE_READY;
+ card->read.iob[cnt].count = LCS_IOBUFFERSIZE;
+ }
+ card->read.ccws[0].flags &= ~CCW_FLAG_PCI;
+ card->read.ccws[LCS_NUM_BUFFS - 1].flags &= ~CCW_FLAG_PCI;
+ card->read.ccws[LCS_NUM_BUFFS - 1].flags |= CCW_FLAG_SUSPEND;
+ /* Last ccw is a tic (transfer in channel). */
+ card->read.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER;
+ card->read.ccws[LCS_NUM_BUFFS].cda =
+ (__u32) __pa(card->read.ccws);
+ /* Setg initial state of the read channel. */
+ card->read.state = LCS_CH_STATE_INIT;
+
+ card->read.io_idx = 0;
+ card->read.buf_idx = 0;
+}
+
+static void
+lcs_setup_read(struct lcs_card *card)
+{
+ LCS_DBF_TEXT(3, setup, "initread");
+
+ lcs_setup_read_ccws(card);
+ /* Initialize read channel tasklet. */
+ card->read.irq_tasklet.data = (unsigned long) &card->read;
+ card->read.irq_tasklet.func = lcs_tasklet;
+ /* Initialize waitqueue. */
+ init_waitqueue_head(&card->read.wait_q);
+}
+
+/*
+ * Setup write channel.
+ */
+static void
+lcs_setup_write_ccws(struct lcs_card *card)
+{
+ int cnt;
+
+ LCS_DBF_TEXT(3, setup, "iwritccw");
+ /* Setup write ccws. */
+ memset(card->write.ccws, 0, sizeof(struct ccw1) * LCS_NUM_BUFFS + 1);
+ for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) {
+ card->write.ccws[cnt].cmd_code = LCS_CCW_WRITE;
+ card->write.ccws[cnt].count = 0;
+ card->write.ccws[cnt].flags =
+ CCW_FLAG_SUSPEND | CCW_FLAG_CC | CCW_FLAG_SLI;
+ /*
+ * Note: we have allocated the buffer with GFP_DMA, so
+ * we do not need to do set_normalized_cda.
+ */
+ card->write.ccws[cnt].cda =
+ (__u32) __pa(card->write.iob[cnt].data);
+ }
+ /* Last ccw is a tic (transfer in channel). */
+ card->write.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER;
+ card->write.ccws[LCS_NUM_BUFFS].cda =
+ (__u32) __pa(card->write.ccws);
+ /* Set initial state of the write channel. */
+ card->read.state = LCS_CH_STATE_INIT;
+
+ card->write.io_idx = 0;
+ card->write.buf_idx = 0;
+}
+
+static void
+lcs_setup_write(struct lcs_card *card)
+{
+ LCS_DBF_TEXT(3, setup, "initwrit");
+
+ lcs_setup_write_ccws(card);
+ /* Initialize write channel tasklet. */
+ card->write.irq_tasklet.data = (unsigned long) &card->write;
+ card->write.irq_tasklet.func = lcs_tasklet;
+ /* Initialize waitqueue. */
+ init_waitqueue_head(&card->write.wait_q);
+}
+
+static void
+lcs_set_allowed_threads(struct lcs_card *card, unsigned long threads)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->mask_lock, flags);
+ card->thread_allowed_mask = threads;
+ spin_unlock_irqrestore(&card->mask_lock, flags);
+ wake_up(&card->wait_q);
+}
+static inline int
+lcs_threads_running(struct lcs_card *card, unsigned long threads)
+{
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(&card->mask_lock, flags);
+ rc = (card->thread_running_mask & threads);
+ spin_unlock_irqrestore(&card->mask_lock, flags);
+ return rc;
+}
+
+static int
+lcs_wait_for_threads(struct lcs_card *card, unsigned long threads)
+{
+ return wait_event_interruptible(card->wait_q,
+ lcs_threads_running(card, threads) == 0);
+}
+
+static inline int
+lcs_set_thread_start_bit(struct lcs_card *card, unsigned long thread)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->mask_lock, flags);
+ if ( !(card->thread_allowed_mask & thread) ||
+ (card->thread_start_mask & thread) ) {
+ spin_unlock_irqrestore(&card->mask_lock, flags);
+ return -EPERM;
+ }
+ card->thread_start_mask |= thread;
+ spin_unlock_irqrestore(&card->mask_lock, flags);
+ return 0;
+}
+
+static void
+lcs_clear_thread_running_bit(struct lcs_card *card, unsigned long thread)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->mask_lock, flags);
+ card->thread_running_mask &= ~thread;
+ spin_unlock_irqrestore(&card->mask_lock, flags);
+ wake_up(&card->wait_q);
+}
+
+static inline int
+__lcs_do_run_thread(struct lcs_card *card, unsigned long thread)
+{
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(&card->mask_lock, flags);
+ if (card->thread_start_mask & thread){
+ if ((card->thread_allowed_mask & thread) &&
+ !(card->thread_running_mask & thread)){
+ rc = 1;
+ card->thread_start_mask &= ~thread;
+ card->thread_running_mask |= thread;
+ } else
+ rc = -EPERM;
+ }
+ spin_unlock_irqrestore(&card->mask_lock, flags);
+ return rc;
+}
+
+static int
+lcs_do_run_thread(struct lcs_card *card, unsigned long thread)
+{
+ int rc = 0;
+ wait_event(card->wait_q,
+ (rc = __lcs_do_run_thread(card, thread)) >= 0);
+ return rc;
+}
+
+static int
+lcs_do_start_thread(struct lcs_card *card, unsigned long thread)
+{
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(&card->mask_lock, flags);
+ LCS_DBF_TEXT_(4, trace, " %02x%02x%02x",
+ (u8) card->thread_start_mask,
+ (u8) card->thread_allowed_mask,
+ (u8) card->thread_running_mask);
+ rc = (card->thread_start_mask & thread);
+ spin_unlock_irqrestore(&card->mask_lock, flags);
+ return rc;
+}
+
+/**
+ * Initialize channels,card and state machines.
+ */
+static void
+lcs_setup_card(struct lcs_card *card)
+{
+ LCS_DBF_TEXT(2, setup, "initcard");
+ LCS_DBF_HEX(2, setup, &card, sizeof(void*));
+
+ lcs_setup_read(card);
+ lcs_setup_write(card);
+ /* Set cards initial state. */
+ card->state = DEV_STATE_DOWN;
+ card->tx_buffer = NULL;
+ card->tx_emitted = 0;
+
+ init_waitqueue_head(&card->wait_q);
+ spin_lock_init(&card->lock);
+ spin_lock_init(&card->ipm_lock);
+ spin_lock_init(&card->mask_lock);
+#ifdef CONFIG_IP_MULTICAST
+ INIT_LIST_HEAD(&card->ipm_list);
+#endif
+ INIT_LIST_HEAD(&card->lancmd_waiters);
+}
+
+static inline void
+lcs_clear_multicast_list(struct lcs_card *card)
+{
+#ifdef CONFIG_IP_MULTICAST
+ struct lcs_ipm_list *ipm;
+ unsigned long flags;
+
+ /* Free multicast list. */
+ LCS_DBF_TEXT(3, setup, "clmclist");
+ spin_lock_irqsave(&card->ipm_lock, flags);
+ while (!list_empty(&card->ipm_list)){
+ ipm = list_entry(card->ipm_list.next,
+ struct lcs_ipm_list, list);
+ list_del(&ipm->list);
+ if (ipm->ipm_state != LCS_IPM_STATE_SET_REQUIRED){
+ spin_unlock_irqrestore(&card->ipm_lock, flags);
+ lcs_send_delipm(card, ipm);
+ spin_lock_irqsave(&card->ipm_lock, flags);
+ }
+ kfree(ipm);
+ }
+ spin_unlock_irqrestore(&card->ipm_lock, flags);
+#endif
+}
+/**
+ * Cleanup channels,card and state machines.
+ */
+static void
+lcs_cleanup_card(struct lcs_card *card)
+{
+
+ LCS_DBF_TEXT(3, setup, "cleancrd");
+ LCS_DBF_HEX(2,setup,&card,sizeof(void*));
+
+ if (card->dev != NULL)
+ free_netdev(card->dev);
+ /* Cleanup channels. */
+ lcs_cleanup_channel(&card->write);
+ lcs_cleanup_channel(&card->read);
+}
+
+/**
+ * Start channel.
+ */
+static int
+lcs_start_channel(struct lcs_channel *channel)
+{
+ unsigned long flags;
+ int rc;
+
+ LCS_DBF_TEXT_(4, trace,"ssch%s", dev_name(&channel->ccwdev->dev));
+ spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+ rc = ccw_device_start(channel->ccwdev,
+ channel->ccws + channel->io_idx, 0, 0,
+ DOIO_DENY_PREFETCH | DOIO_ALLOW_SUSPEND);
+ if (rc == 0)
+ channel->state = LCS_CH_STATE_RUNNING;
+ spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+ if (rc) {
+ LCS_DBF_TEXT_(4,trace,"essh%s",
+ dev_name(&channel->ccwdev->dev));
+ dev_err(&channel->ccwdev->dev,
+ "Starting an LCS device resulted in an error,"
+ " rc=%d!\n", rc);
+ }
+ return rc;
+}
+
+static int
+lcs_clear_channel(struct lcs_channel *channel)
+{
+ unsigned long flags;
+ int rc;
+
+ LCS_DBF_TEXT(4,trace,"clearch");
+ LCS_DBF_TEXT_(4, trace, "%s", dev_name(&channel->ccwdev->dev));
+ spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+ rc = ccw_device_clear(channel->ccwdev, (addr_t) channel);
+ spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+ if (rc) {
+ LCS_DBF_TEXT_(4, trace, "ecsc%s",
+ dev_name(&channel->ccwdev->dev));
+ return rc;
+ }
+ wait_event(channel->wait_q, (channel->state == LCS_CH_STATE_CLEARED));
+ channel->state = LCS_CH_STATE_STOPPED;
+ return rc;
+}
+
+
+/**
+ * Stop channel.
+ */
+static int
+lcs_stop_channel(struct lcs_channel *channel)
+{
+ unsigned long flags;
+ int rc;
+
+ if (channel->state == LCS_CH_STATE_STOPPED)
+ return 0;
+ LCS_DBF_TEXT(4,trace,"haltsch");
+ LCS_DBF_TEXT_(4, trace, "%s", dev_name(&channel->ccwdev->dev));
+ channel->state = LCS_CH_STATE_INIT;
+ spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+ rc = ccw_device_halt(channel->ccwdev, (addr_t) channel);
+ spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+ if (rc) {
+ LCS_DBF_TEXT_(4, trace, "ehsc%s",
+ dev_name(&channel->ccwdev->dev));
+ return rc;
+ }
+ /* Asynchronous halt initialted. Wait for its completion. */
+ wait_event(channel->wait_q, (channel->state == LCS_CH_STATE_HALTED));
+ lcs_clear_channel(channel);
+ return 0;
+}
+
+/**
+ * start read and write channel
+ */
+static int
+lcs_start_channels(struct lcs_card *card)
+{
+ int rc;
+
+ LCS_DBF_TEXT(2, trace, "chstart");
+ /* start read channel */
+ rc = lcs_start_channel(&card->read);
+ if (rc)
+ return rc;
+ /* start write channel */
+ rc = lcs_start_channel(&card->write);
+ if (rc)
+ lcs_stop_channel(&card->read);
+ return rc;
+}
+
+/**
+ * stop read and write channel
+ */
+static int
+lcs_stop_channels(struct lcs_card *card)
+{
+ LCS_DBF_TEXT(2, trace, "chhalt");
+ lcs_stop_channel(&card->read);
+ lcs_stop_channel(&card->write);
+ return 0;
+}
+
+/**
+ * Get empty buffer.
+ */
+static struct lcs_buffer *
+__lcs_get_buffer(struct lcs_channel *channel)
+{
+ int index;
+
+ LCS_DBF_TEXT(5, trace, "_getbuff");
+ index = channel->io_idx;
+ do {
+ if (channel->iob[index].state == LCS_BUF_STATE_EMPTY) {
+ channel->iob[index].state = LCS_BUF_STATE_LOCKED;
+ return channel->iob + index;
+ }
+ index = (index + 1) & (LCS_NUM_BUFFS - 1);
+ } while (index != channel->io_idx);
+ return NULL;
+}
+
+static struct lcs_buffer *
+lcs_get_buffer(struct lcs_channel *channel)
+{
+ struct lcs_buffer *buffer;
+ unsigned long flags;
+
+ LCS_DBF_TEXT(5, trace, "getbuff");
+ spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+ buffer = __lcs_get_buffer(channel);
+ spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+ return buffer;
+}
+
+/**
+ * Resume channel program if the channel is suspended.
+ */
+static int
+__lcs_resume_channel(struct lcs_channel *channel)
+{
+ int rc;
+
+ if (channel->state != LCS_CH_STATE_SUSPENDED)
+ return 0;
+ if (channel->ccws[channel->io_idx].flags & CCW_FLAG_SUSPEND)
+ return 0;
+ LCS_DBF_TEXT_(5, trace, "rsch%s", dev_name(&channel->ccwdev->dev));
+ rc = ccw_device_resume(channel->ccwdev);
+ if (rc) {
+ LCS_DBF_TEXT_(4, trace, "ersc%s",
+ dev_name(&channel->ccwdev->dev));
+ dev_err(&channel->ccwdev->dev,
+ "Sending data from the LCS device to the LAN failed"
+ " with rc=%d\n",rc);
+ } else
+ channel->state = LCS_CH_STATE_RUNNING;
+ return rc;
+
+}
+
+/**
+ * Make a buffer ready for processing.
+ */
+static inline void
+__lcs_ready_buffer_bits(struct lcs_channel *channel, int index)
+{
+ int prev, next;
+
+ LCS_DBF_TEXT(5, trace, "rdybits");
+ prev = (index - 1) & (LCS_NUM_BUFFS - 1);
+ next = (index + 1) & (LCS_NUM_BUFFS - 1);
+ /* Check if we may clear the suspend bit of this buffer. */
+ if (channel->ccws[next].flags & CCW_FLAG_SUSPEND) {
+ /* Check if we have to set the PCI bit. */
+ if (!(channel->ccws[prev].flags & CCW_FLAG_SUSPEND))
+ /* Suspend bit of the previous buffer is not set. */
+ channel->ccws[index].flags |= CCW_FLAG_PCI;
+ /* Suspend bit of the next buffer is set. */
+ channel->ccws[index].flags &= ~CCW_FLAG_SUSPEND;
+ }
+}
+
+static int
+lcs_ready_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
+{
+ unsigned long flags;
+ int index, rc;
+
+ LCS_DBF_TEXT(5, trace, "rdybuff");
+ BUG_ON(buffer->state != LCS_BUF_STATE_LOCKED &&
+ buffer->state != LCS_BUF_STATE_PROCESSED);
+ spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+ buffer->state = LCS_BUF_STATE_READY;
+ index = buffer - channel->iob;
+ /* Set length. */
+ channel->ccws[index].count = buffer->count;
+ /* Check relevant PCI/suspend bits. */
+ __lcs_ready_buffer_bits(channel, index);
+ rc = __lcs_resume_channel(channel);
+ spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+ return rc;
+}
+
+/**
+ * Mark the buffer as processed. Take care of the suspend bit
+ * of the previous buffer. This function is called from
+ * interrupt context, so the lock must not be taken.
+ */
+static int
+__lcs_processed_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
+{
+ int index, prev, next;
+
+ LCS_DBF_TEXT(5, trace, "prcsbuff");
+ BUG_ON(buffer->state != LCS_BUF_STATE_READY);
+ buffer->state = LCS_BUF_STATE_PROCESSED;
+ index = buffer - channel->iob;
+ prev = (index - 1) & (LCS_NUM_BUFFS - 1);
+ next = (index + 1) & (LCS_NUM_BUFFS - 1);
+ /* Set the suspend bit and clear the PCI bit of this buffer. */
+ channel->ccws[index].flags |= CCW_FLAG_SUSPEND;
+ channel->ccws[index].flags &= ~CCW_FLAG_PCI;
+ /* Check the suspend bit of the previous buffer. */
+ if (channel->iob[prev].state == LCS_BUF_STATE_READY) {
+ /*
+ * Previous buffer is in state ready. It might have
+ * happened in lcs_ready_buffer that the suspend bit
+ * has not been cleared to avoid an endless loop.
+ * Do it now.
+ */
+ __lcs_ready_buffer_bits(channel, prev);
+ }
+ /* Clear PCI bit of next buffer. */
+ channel->ccws[next].flags &= ~CCW_FLAG_PCI;
+ return __lcs_resume_channel(channel);
+}
+
+/**
+ * Put a processed buffer back to state empty.
+ */
+static void
+lcs_release_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
+{
+ unsigned long flags;
+
+ LCS_DBF_TEXT(5, trace, "relbuff");
+ BUG_ON(buffer->state != LCS_BUF_STATE_LOCKED &&
+ buffer->state != LCS_BUF_STATE_PROCESSED);
+ spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+ buffer->state = LCS_BUF_STATE_EMPTY;
+ spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+}
+
+/**
+ * Get buffer for a lan command.
+ */
+static struct lcs_buffer *
+lcs_get_lancmd(struct lcs_card *card, int count)
+{
+ struct lcs_buffer *buffer;
+ struct lcs_cmd *cmd;
+
+ LCS_DBF_TEXT(4, trace, "getlncmd");
+ /* Get buffer and wait if none is available. */
+ wait_event(card->write.wait_q,
+ ((buffer = lcs_get_buffer(&card->write)) != NULL));
+ count += sizeof(struct lcs_header);
+ *(__u16 *)(buffer->data + count) = 0;
+ buffer->count = count + sizeof(__u16);
+ buffer->callback = lcs_release_buffer;
+ cmd = (struct lcs_cmd *) buffer->data;
+ cmd->offset = count;
+ cmd->type = LCS_FRAME_TYPE_CONTROL;
+ cmd->slot = 0;
+ return buffer;
+}
+
+
+static void
+lcs_get_reply(struct lcs_reply *reply)
+{
+ WARN_ON(atomic_read(&reply->refcnt) <= 0);
+ atomic_inc(&reply->refcnt);
+}
+
+static void
+lcs_put_reply(struct lcs_reply *reply)
+{
+ WARN_ON(atomic_read(&reply->refcnt) <= 0);
+ if (atomic_dec_and_test(&reply->refcnt)) {
+ kfree(reply);
+ }
+
+}
+
+static struct lcs_reply *
+lcs_alloc_reply(struct lcs_cmd *cmd)
+{
+ struct lcs_reply *reply;
+
+ LCS_DBF_TEXT(4, trace, "getreply");
+
+ reply = kzalloc(sizeof(struct lcs_reply), GFP_ATOMIC);
+ if (!reply)
+ return NULL;
+ atomic_set(&reply->refcnt,1);
+ reply->sequence_no = cmd->sequence_no;
+ reply->received = 0;
+ reply->rc = 0;
+ init_waitqueue_head(&reply->wait_q);
+
+ return reply;
+}
+
+/**
+ * Notifier function for lancmd replies. Called from read irq.
+ */
+static void
+lcs_notify_lancmd_waiters(struct lcs_card *card, struct lcs_cmd *cmd)
+{
+ struct list_head *l, *n;
+ struct lcs_reply *reply;
+
+ LCS_DBF_TEXT(4, trace, "notiwait");
+ spin_lock(&card->lock);
+ list_for_each_safe(l, n, &card->lancmd_waiters) {
+ reply = list_entry(l, struct lcs_reply, list);
+ if (reply->sequence_no == cmd->sequence_no) {
+ lcs_get_reply(reply);
+ list_del_init(&reply->list);
+ if (reply->callback != NULL)
+ reply->callback(card, cmd);
+ reply->received = 1;
+ reply->rc = cmd->return_code;
+ wake_up(&reply->wait_q);
+ lcs_put_reply(reply);
+ break;
+ }
+ }
+ spin_unlock(&card->lock);
+}
+
+/**
+ * Emit buffer of a lan command.
+ */
+static void
+lcs_lancmd_timeout(unsigned long data)
+{
+ struct lcs_reply *reply, *list_reply, *r;
+ unsigned long flags;
+
+ LCS_DBF_TEXT(4, trace, "timeout");
+ reply = (struct lcs_reply *) data;
+ spin_lock_irqsave(&reply->card->lock, flags);
+ list_for_each_entry_safe(list_reply, r,
+ &reply->card->lancmd_waiters,list) {
+ if (reply == list_reply) {
+ lcs_get_reply(reply);
+ list_del_init(&reply->list);
+ spin_unlock_irqrestore(&reply->card->lock, flags);
+ reply->received = 1;
+ reply->rc = -ETIME;
+ wake_up(&reply->wait_q);
+ lcs_put_reply(reply);
+ return;
+ }
+ }
+ spin_unlock_irqrestore(&reply->card->lock, flags);
+}
+
+static int
+lcs_send_lancmd(struct lcs_card *card, struct lcs_buffer *buffer,
+ void (*reply_callback)(struct lcs_card *, struct lcs_cmd *))
+{
+ struct lcs_reply *reply;
+ struct lcs_cmd *cmd;
+ struct timer_list timer;
+ unsigned long flags;
+ int rc;
+
+ LCS_DBF_TEXT(4, trace, "sendcmd");
+ cmd = (struct lcs_cmd *) buffer->data;
+ cmd->return_code = 0;
+ cmd->sequence_no = card->sequence_no++;
+ reply = lcs_alloc_reply(cmd);
+ if (!reply)
+ return -ENOMEM;
+ reply->callback = reply_callback;
+ reply->card = card;
+ spin_lock_irqsave(&card->lock, flags);
+ list_add_tail(&reply->list, &card->lancmd_waiters);
+ spin_unlock_irqrestore(&card->lock, flags);
+
+ buffer->callback = lcs_release_buffer;
+ rc = lcs_ready_buffer(&card->write, buffer);
+ if (rc)
+ return rc;
+ init_timer_on_stack(&timer);
+ timer.function = lcs_lancmd_timeout;
+ timer.data = (unsigned long) reply;
+ timer.expires = jiffies + HZ*card->lancmd_timeout;
+ add_timer(&timer);
+ wait_event(reply->wait_q, reply->received);
+ del_timer_sync(&timer);
+ LCS_DBF_TEXT_(4, trace, "rc:%d",reply->rc);
+ rc = reply->rc;
+ lcs_put_reply(reply);
+ return rc ? -EIO : 0;
+}
+
+/**
+ * LCS startup command
+ */
+static int
+lcs_send_startup(struct lcs_card *card, __u8 initiator)
+{
+ struct lcs_buffer *buffer;
+ struct lcs_cmd *cmd;
+
+ LCS_DBF_TEXT(2, trace, "startup");
+ buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
+ cmd = (struct lcs_cmd *) buffer->data;
+ cmd->cmd_code = LCS_CMD_STARTUP;
+ cmd->initiator = initiator;
+ cmd->cmd.lcs_startup.buff_size = LCS_IOBUFFERSIZE;
+ return lcs_send_lancmd(card, buffer, NULL);
+}
+
+/**
+ * LCS shutdown command
+ */
+static int
+lcs_send_shutdown(struct lcs_card *card)
+{
+ struct lcs_buffer *buffer;
+ struct lcs_cmd *cmd;
+
+ LCS_DBF_TEXT(2, trace, "shutdown");
+ buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
+ cmd = (struct lcs_cmd *) buffer->data;
+ cmd->cmd_code = LCS_CMD_SHUTDOWN;
+ cmd->initiator = LCS_INITIATOR_TCPIP;
+ return lcs_send_lancmd(card, buffer, NULL);
+}
+
+/**
+ * LCS lanstat command
+ */
+static void
+__lcs_lanstat_cb(struct lcs_card *card, struct lcs_cmd *cmd)
+{
+ LCS_DBF_TEXT(2, trace, "statcb");
+ memcpy(card->mac, cmd->cmd.lcs_lanstat_cmd.mac_addr, LCS_MAC_LENGTH);
+}
+
+static int
+lcs_send_lanstat(struct lcs_card *card)
+{
+ struct lcs_buffer *buffer;
+ struct lcs_cmd *cmd;
+
+ LCS_DBF_TEXT(2,trace, "cmdstat");
+ buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
+ cmd = (struct lcs_cmd *) buffer->data;
+ /* Setup lanstat command. */
+ cmd->cmd_code = LCS_CMD_LANSTAT;
+ cmd->initiator = LCS_INITIATOR_TCPIP;
+ cmd->cmd.lcs_std_cmd.lan_type = card->lan_type;
+ cmd->cmd.lcs_std_cmd.portno = card->portno;
+ return lcs_send_lancmd(card, buffer, __lcs_lanstat_cb);
+}
+
+/**
+ * send stoplan command
+ */
+static int
+lcs_send_stoplan(struct lcs_card *card, __u8 initiator)
+{
+ struct lcs_buffer *buffer;
+ struct lcs_cmd *cmd;
+
+ LCS_DBF_TEXT(2, trace, "cmdstpln");
+ buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
+ cmd = (struct lcs_cmd *) buffer->data;
+ cmd->cmd_code = LCS_CMD_STOPLAN;
+ cmd->initiator = initiator;
+ cmd->cmd.lcs_std_cmd.lan_type = card->lan_type;
+ cmd->cmd.lcs_std_cmd.portno = card->portno;
+ return lcs_send_lancmd(card, buffer, NULL);
+}
+
+/**
+ * send startlan command
+ */
+static void
+__lcs_send_startlan_cb(struct lcs_card *card, struct lcs_cmd *cmd)
+{
+ LCS_DBF_TEXT(2, trace, "srtlancb");
+ card->lan_type = cmd->cmd.lcs_std_cmd.lan_type;
+ card->portno = cmd->cmd.lcs_std_cmd.portno;
+}
+
+static int
+lcs_send_startlan(struct lcs_card *card, __u8 initiator)
+{
+ struct lcs_buffer *buffer;
+ struct lcs_cmd *cmd;
+
+ LCS_DBF_TEXT(2, trace, "cmdstaln");
+ buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
+ cmd = (struct lcs_cmd *) buffer->data;
+ cmd->cmd_code = LCS_CMD_STARTLAN;
+ cmd->initiator = initiator;
+ cmd->cmd.lcs_std_cmd.lan_type = card->lan_type;
+ cmd->cmd.lcs_std_cmd.portno = card->portno;
+ return lcs_send_lancmd(card, buffer, __lcs_send_startlan_cb);
+}
+
+#ifdef CONFIG_IP_MULTICAST
+/**
+ * send setipm command (Multicast)
+ */
+static int
+lcs_send_setipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list)
+{
+ struct lcs_buffer *buffer;
+ struct lcs_cmd *cmd;
+
+ LCS_DBF_TEXT(2, trace, "cmdsetim");
+ buffer = lcs_get_lancmd(card, LCS_MULTICAST_CMD_SIZE);
+ cmd = (struct lcs_cmd *) buffer->data;
+ cmd->cmd_code = LCS_CMD_SETIPM;
+ cmd->initiator = LCS_INITIATOR_TCPIP;
+ cmd->cmd.lcs_qipassist.lan_type = card->lan_type;
+ cmd->cmd.lcs_qipassist.portno = card->portno;
+ cmd->cmd.lcs_qipassist.version = 4;
+ cmd->cmd.lcs_qipassist.num_ip_pairs = 1;
+ memcpy(cmd->cmd.lcs_qipassist.lcs_ipass_ctlmsg.ip_mac_pair,
+ &ipm_list->ipm, sizeof (struct lcs_ip_mac_pair));
+ LCS_DBF_TEXT_(2, trace, "%x",ipm_list->ipm.ip_addr);
+ return lcs_send_lancmd(card, buffer, NULL);
+}
+
+/**
+ * send delipm command (Multicast)
+ */
+static int
+lcs_send_delipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list)
+{
+ struct lcs_buffer *buffer;
+ struct lcs_cmd *cmd;
+
+ LCS_DBF_TEXT(2, trace, "cmddelim");
+ buffer = lcs_get_lancmd(card, LCS_MULTICAST_CMD_SIZE);
+ cmd = (struct lcs_cmd *) buffer->data;
+ cmd->cmd_code = LCS_CMD_DELIPM;
+ cmd->initiator = LCS_INITIATOR_TCPIP;
+ cmd->cmd.lcs_qipassist.lan_type = card->lan_type;
+ cmd->cmd.lcs_qipassist.portno = card->portno;
+ cmd->cmd.lcs_qipassist.version = 4;
+ cmd->cmd.lcs_qipassist.num_ip_pairs = 1;
+ memcpy(cmd->cmd.lcs_qipassist.lcs_ipass_ctlmsg.ip_mac_pair,
+ &ipm_list->ipm, sizeof (struct lcs_ip_mac_pair));
+ LCS_DBF_TEXT_(2, trace, "%x",ipm_list->ipm.ip_addr);
+ return lcs_send_lancmd(card, buffer, NULL);
+}
+
+/**
+ * check if multicast is supported by LCS
+ */
+static void
+__lcs_check_multicast_cb(struct lcs_card *card, struct lcs_cmd *cmd)
+{
+ LCS_DBF_TEXT(2, trace, "chkmccb");
+ card->ip_assists_supported =
+ cmd->cmd.lcs_qipassist.ip_assists_supported;
+ card->ip_assists_enabled =
+ cmd->cmd.lcs_qipassist.ip_assists_enabled;
+}
+
+static int
+lcs_check_multicast_support(struct lcs_card *card)
+{
+ struct lcs_buffer *buffer;
+ struct lcs_cmd *cmd;
+ int rc;
+
+ LCS_DBF_TEXT(2, trace, "cmdqipa");
+ /* Send query ipassist. */
+ buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE);
+ cmd = (struct lcs_cmd *) buffer->data;
+ cmd->cmd_code = LCS_CMD_QIPASSIST;
+ cmd->initiator = LCS_INITIATOR_TCPIP;
+ cmd->cmd.lcs_qipassist.lan_type = card->lan_type;
+ cmd->cmd.lcs_qipassist.portno = card->portno;
+ cmd->cmd.lcs_qipassist.version = 4;
+ cmd->cmd.lcs_qipassist.num_ip_pairs = 1;
+ rc = lcs_send_lancmd(card, buffer, __lcs_check_multicast_cb);
+ if (rc != 0) {
+ pr_err("Query IPAssist failed. Assuming unsupported!\n");
+ return -EOPNOTSUPP;
+ }
+ if (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT)
+ return 0;
+ return -EOPNOTSUPP;
+}
+
+/**
+ * set or del multicast address on LCS card
+ */
+static void
+lcs_fix_multicast_list(struct lcs_card *card)
+{
+ struct list_head failed_list;
+ struct lcs_ipm_list *ipm, *tmp;
+ unsigned long flags;
+ int rc;
+
+ LCS_DBF_TEXT(4,trace, "fixipm");
+ INIT_LIST_HEAD(&failed_list);
+ spin_lock_irqsave(&card->ipm_lock, flags);
+list_modified:
+ list_for_each_entry_safe(ipm, tmp, &card->ipm_list, list){
+ switch (ipm->ipm_state) {
+ case LCS_IPM_STATE_SET_REQUIRED:
+ /* del from ipm_list so no one else can tamper with
+ * this entry */
+ list_del_init(&ipm->list);
+ spin_unlock_irqrestore(&card->ipm_lock, flags);
+ rc = lcs_send_setipm(card, ipm);
+ spin_lock_irqsave(&card->ipm_lock, flags);
+ if (rc) {
+ pr_info("Adding multicast address failed."
+ " Table possibly full!\n");
+ /* store ipm in failed list -> will be added
+ * to ipm_list again, so a retry will be done
+ * during the next call of this function */
+ list_add_tail(&ipm->list, &failed_list);
+ } else {
+ ipm->ipm_state = LCS_IPM_STATE_ON_CARD;
+ /* re-insert into ipm_list */
+ list_add_tail(&ipm->list, &card->ipm_list);
+ }
+ goto list_modified;
+ case LCS_IPM_STATE_DEL_REQUIRED:
+ list_del(&ipm->list);
+ spin_unlock_irqrestore(&card->ipm_lock, flags);
+ lcs_send_delipm(card, ipm);
+ spin_lock_irqsave(&card->ipm_lock, flags);
+ kfree(ipm);
+ goto list_modified;
+ case LCS_IPM_STATE_ON_CARD:
+ break;
+ }
+ }
+ /* re-insert all entries from the failed_list into ipm_list */
+ list_for_each_entry_safe(ipm, tmp, &failed_list, list)
+ list_move_tail(&ipm->list, &card->ipm_list);
+
+ spin_unlock_irqrestore(&card->ipm_lock, flags);
+}
+
+/**
+ * get mac address for the relevant Multicast address
+ */
+static void
+lcs_get_mac_for_ipm(__be32 ipm, char *mac, struct net_device *dev)
+{
+ LCS_DBF_TEXT(4,trace, "getmac");
+ if (dev->type == ARPHRD_IEEE802_TR)
+ ip_tr_mc_map(ipm, mac);
+ else
+ ip_eth_mc_map(ipm, mac);
+}
+
+/**
+ * function called by net device to handle multicast address relevant things
+ */
+static inline void
+lcs_remove_mc_addresses(struct lcs_card *card, struct in_device *in4_dev)
+{
+ struct ip_mc_list *im4;
+ struct list_head *l;
+ struct lcs_ipm_list *ipm;
+ unsigned long flags;
+ char buf[MAX_ADDR_LEN];
+
+ LCS_DBF_TEXT(4, trace, "remmclst");
+ spin_lock_irqsave(&card->ipm_lock, flags);
+ list_for_each(l, &card->ipm_list) {
+ ipm = list_entry(l, struct lcs_ipm_list, list);
+ for (im4 = rcu_dereference(in4_dev->mc_list);
+ im4 != NULL; im4 = rcu_dereference(im4->next_rcu)) {
+ lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev);
+ if ( (ipm->ipm.ip_addr == im4->multiaddr) &&
+ (memcmp(buf, &ipm->ipm.mac_addr,
+ LCS_MAC_LENGTH) == 0) )
+ break;
+ }
+ if (im4 == NULL)
+ ipm->ipm_state = LCS_IPM_STATE_DEL_REQUIRED;
+ }
+ spin_unlock_irqrestore(&card->ipm_lock, flags);
+}
+
+static inline struct lcs_ipm_list *
+lcs_check_addr_entry(struct lcs_card *card, struct ip_mc_list *im4, char *buf)
+{
+ struct lcs_ipm_list *tmp, *ipm = NULL;
+ struct list_head *l;
+ unsigned long flags;
+
+ LCS_DBF_TEXT(4, trace, "chkmcent");
+ spin_lock_irqsave(&card->ipm_lock, flags);
+ list_for_each(l, &card->ipm_list) {
+ tmp = list_entry(l, struct lcs_ipm_list, list);
+ if ( (tmp->ipm.ip_addr == im4->multiaddr) &&
+ (memcmp(buf, &tmp->ipm.mac_addr,
+ LCS_MAC_LENGTH) == 0) ) {
+ ipm = tmp;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&card->ipm_lock, flags);
+ return ipm;
+}
+
+static inline void
+lcs_set_mc_addresses(struct lcs_card *card, struct in_device *in4_dev)
+{
+
+ struct ip_mc_list *im4;
+ struct lcs_ipm_list *ipm;
+ char buf[MAX_ADDR_LEN];
+ unsigned long flags;
+
+ LCS_DBF_TEXT(4, trace, "setmclst");
+ for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL;
+ im4 = rcu_dereference(im4->next_rcu)) {
+ lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev);
+ ipm = lcs_check_addr_entry(card, im4, buf);
+ if (ipm != NULL)
+ continue; /* Address already in list. */
+ ipm = kzalloc(sizeof(struct lcs_ipm_list), GFP_ATOMIC);
+ if (ipm == NULL) {
+ pr_info("Not enough memory to add"
+ " new multicast entry!\n");
+ break;
+ }
+ memcpy(&ipm->ipm.mac_addr, buf, LCS_MAC_LENGTH);
+ ipm->ipm.ip_addr = im4->multiaddr;
+ ipm->ipm_state = LCS_IPM_STATE_SET_REQUIRED;
+ spin_lock_irqsave(&card->ipm_lock, flags);
+ LCS_DBF_HEX(2,trace,&ipm->ipm.ip_addr,4);
+ list_add(&ipm->list, &card->ipm_list);
+ spin_unlock_irqrestore(&card->ipm_lock, flags);
+ }
+}
+
+static int
+lcs_register_mc_addresses(void *data)
+{
+ struct lcs_card *card;
+ struct in_device *in4_dev;
+
+ card = (struct lcs_card *) data;
+
+ if (!lcs_do_run_thread(card, LCS_SET_MC_THREAD))
+ return 0;
+ LCS_DBF_TEXT(4, trace, "regmulti");
+
+ in4_dev = in_dev_get(card->dev);
+ if (in4_dev == NULL)
+ goto out;
+ rcu_read_lock();
+ lcs_remove_mc_addresses(card,in4_dev);
+ lcs_set_mc_addresses(card, in4_dev);
+ rcu_read_unlock();
+ in_dev_put(in4_dev);
+
+ netif_carrier_off(card->dev);
+ netif_tx_disable(card->dev);
+ wait_event(card->write.wait_q,
+ (card->write.state != LCS_CH_STATE_RUNNING));
+ lcs_fix_multicast_list(card);
+ if (card->state == DEV_STATE_UP) {
+ netif_carrier_on(card->dev);
+ netif_wake_queue(card->dev);
+ }
+out:
+ lcs_clear_thread_running_bit(card, LCS_SET_MC_THREAD);
+ return 0;
+}
+#endif /* CONFIG_IP_MULTICAST */
+
+/**
+ * function called by net device to
+ * handle multicast address relevant things
+ */
+static void
+lcs_set_multicast_list(struct net_device *dev)
+{
+#ifdef CONFIG_IP_MULTICAST
+ struct lcs_card *card;
+
+ LCS_DBF_TEXT(4, trace, "setmulti");
+ card = (struct lcs_card *) dev->ml_priv;
+
+ if (!lcs_set_thread_start_bit(card, LCS_SET_MC_THREAD))
+ schedule_work(&card->kernel_thread_starter);
+#endif /* CONFIG_IP_MULTICAST */
+}
+
+static long
+lcs_check_irb_error(struct ccw_device *cdev, struct irb *irb)
+{
+ if (!IS_ERR(irb))
+ return 0;
+
+ switch (PTR_ERR(irb)) {
+ case -EIO:
+ dev_warn(&cdev->dev,
+ "An I/O-error occurred on the LCS device\n");
+ LCS_DBF_TEXT(2, trace, "ckirberr");
+ LCS_DBF_TEXT_(2, trace, " rc%d", -EIO);
+ break;
+ case -ETIMEDOUT:
+ dev_warn(&cdev->dev,
+ "A command timed out on the LCS device\n");
+ LCS_DBF_TEXT(2, trace, "ckirberr");
+ LCS_DBF_TEXT_(2, trace, " rc%d", -ETIMEDOUT);
+ break;
+ default:
+ dev_warn(&cdev->dev,
+ "An error occurred on the LCS device, rc=%ld\n",
+ PTR_ERR(irb));
+ LCS_DBF_TEXT(2, trace, "ckirberr");
+ LCS_DBF_TEXT(2, trace, " rc???");
+ }
+ return PTR_ERR(irb);
+}
+
+static int
+lcs_get_problem(struct ccw_device *cdev, struct irb *irb)
+{
+ int dstat, cstat;
+ char *sense;
+
+ sense = (char *) irb->ecw;
+ cstat = irb->scsw.cmd.cstat;
+ dstat = irb->scsw.cmd.dstat;
+
+ if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
+ SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
+ SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
+ LCS_DBF_TEXT(2, trace, "CGENCHK");
+ return 1;
+ }
+ if (dstat & DEV_STAT_UNIT_CHECK) {
+ if (sense[LCS_SENSE_BYTE_1] &
+ LCS_SENSE_RESETTING_EVENT) {
+ LCS_DBF_TEXT(2, trace, "REVIND");
+ return 1;
+ }
+ if (sense[LCS_SENSE_BYTE_0] &
+ LCS_SENSE_CMD_REJECT) {
+ LCS_DBF_TEXT(2, trace, "CMDREJ");
+ return 0;
+ }
+ if ((!sense[LCS_SENSE_BYTE_0]) &&
+ (!sense[LCS_SENSE_BYTE_1]) &&
+ (!sense[LCS_SENSE_BYTE_2]) &&
+ (!sense[LCS_SENSE_BYTE_3])) {
+ LCS_DBF_TEXT(2, trace, "ZEROSEN");
+ return 0;
+ }
+ LCS_DBF_TEXT(2, trace, "DGENCHK");
+ return 1;
+ }
+ return 0;
+}
+
+static void
+lcs_schedule_recovery(struct lcs_card *card)
+{
+ LCS_DBF_TEXT(2, trace, "startrec");
+ if (!lcs_set_thread_start_bit(card, LCS_RECOVERY_THREAD))
+ schedule_work(&card->kernel_thread_starter);
+}
+
+/**
+ * IRQ Handler for LCS channels
+ */
+static void
+lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
+{
+ struct lcs_card *card;
+ struct lcs_channel *channel;
+ int rc, index;
+ int cstat, dstat;
+
+ if (lcs_check_irb_error(cdev, irb))
+ return;
+
+ card = CARD_FROM_DEV(cdev);
+ if (card->read.ccwdev == cdev)
+ channel = &card->read;
+ else
+ channel = &card->write;
+
+ cstat = irb->scsw.cmd.cstat;
+ dstat = irb->scsw.cmd.dstat;
+ LCS_DBF_TEXT_(5, trace, "Rint%s", dev_name(&cdev->dev));
+ LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.cstat,
+ irb->scsw.cmd.dstat);
+ LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.fctl,
+ irb->scsw.cmd.actl);
+
+ /* Check for channel and device errors presented */
+ rc = lcs_get_problem(cdev, irb);
+ if (rc || (dstat & DEV_STAT_UNIT_EXCEP)) {
+ dev_warn(&cdev->dev,
+ "The LCS device stopped because of an error,"
+ " dstat=0x%X, cstat=0x%X \n",
+ dstat, cstat);
+ if (rc) {
+ channel->state = LCS_CH_STATE_ERROR;
+ }
+ }
+ if (channel->state == LCS_CH_STATE_ERROR) {
+ lcs_schedule_recovery(card);
+ wake_up(&card->wait_q);
+ return;
+ }
+ /* How far in the ccw chain have we processed? */
+ if ((channel->state != LCS_CH_STATE_INIT) &&
+ (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) &&
+ (irb->scsw.cmd.cpa != 0)) {
+ index = (struct ccw1 *) __va((addr_t) irb->scsw.cmd.cpa)
+ - channel->ccws;
+ if ((irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED) ||
+ (irb->scsw.cmd.cstat & SCHN_STAT_PCI))
+ /* Bloody io subsystem tells us lies about cpa... */
+ index = (index - 1) & (LCS_NUM_BUFFS - 1);
+ while (channel->io_idx != index) {
+ __lcs_processed_buffer(channel,
+ channel->iob + channel->io_idx);
+ channel->io_idx =
+ (channel->io_idx + 1) & (LCS_NUM_BUFFS - 1);
+ }
+ }
+
+ if ((irb->scsw.cmd.dstat & DEV_STAT_DEV_END) ||
+ (irb->scsw.cmd.dstat & DEV_STAT_CHN_END) ||
+ (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK))
+ /* Mark channel as stopped. */
+ channel->state = LCS_CH_STATE_STOPPED;
+ else if (irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED)
+ /* CCW execution stopped on a suspend bit. */
+ channel->state = LCS_CH_STATE_SUSPENDED;
+ if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
+ if (irb->scsw.cmd.cc != 0) {
+ ccw_device_halt(channel->ccwdev, (addr_t) channel);
+ return;
+ }
+ /* The channel has been stopped by halt_IO. */
+ channel->state = LCS_CH_STATE_HALTED;
+ }
+ if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC)
+ channel->state = LCS_CH_STATE_CLEARED;
+ /* Do the rest in the tasklet. */
+ tasklet_schedule(&channel->irq_tasklet);
+}
+
+/**
+ * Tasklet for IRQ handler
+ */
+static void
+lcs_tasklet(unsigned long data)
+{
+ unsigned long flags;
+ struct lcs_channel *channel;
+ struct lcs_buffer *iob;
+ int buf_idx;
+
+ channel = (struct lcs_channel *) data;
+ LCS_DBF_TEXT_(5, trace, "tlet%s", dev_name(&channel->ccwdev->dev));
+
+ /* Check for processed buffers. */
+ iob = channel->iob;
+ buf_idx = channel->buf_idx;
+ while (iob[buf_idx].state == LCS_BUF_STATE_PROCESSED) {
+ /* Do the callback thing. */
+ if (iob[buf_idx].callback != NULL)
+ iob[buf_idx].callback(channel, iob + buf_idx);
+ buf_idx = (buf_idx + 1) & (LCS_NUM_BUFFS - 1);
+ }
+ channel->buf_idx = buf_idx;
+
+ if (channel->state == LCS_CH_STATE_STOPPED)
+ lcs_start_channel(channel);
+ spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
+ if (channel->state == LCS_CH_STATE_SUSPENDED &&
+ channel->iob[channel->io_idx].state == LCS_BUF_STATE_READY)
+ __lcs_resume_channel(channel);
+ spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
+
+ /* Something happened on the channel. Wake up waiters. */
+ wake_up(&channel->wait_q);
+}
+
+/**
+ * Finish current tx buffer and make it ready for transmit.
+ */
+static void
+__lcs_emit_txbuffer(struct lcs_card *card)
+{
+ LCS_DBF_TEXT(5, trace, "emittx");
+ *(__u16 *)(card->tx_buffer->data + card->tx_buffer->count) = 0;
+ card->tx_buffer->count += 2;
+ lcs_ready_buffer(&card->write, card->tx_buffer);
+ card->tx_buffer = NULL;
+ card->tx_emitted++;
+}
+
+/**
+ * Callback for finished tx buffers.
+ */
+static void
+lcs_txbuffer_cb(struct lcs_channel *channel, struct lcs_buffer *buffer)
+{
+ struct lcs_card *card;
+
+ LCS_DBF_TEXT(5, trace, "txbuffcb");
+ /* Put buffer back to pool. */
+ lcs_release_buffer(channel, buffer);
+ card = container_of(channel, struct lcs_card, write);
+ if (netif_queue_stopped(card->dev) && netif_carrier_ok(card->dev))
+ netif_wake_queue(card->dev);
+ spin_lock(&card->lock);
+ card->tx_emitted--;
+ if (card->tx_emitted <= 0 && card->tx_buffer != NULL)
+ /*
+ * Last running tx buffer has finished. Submit partially
+ * filled current buffer.
+ */
+ __lcs_emit_txbuffer(card);
+ spin_unlock(&card->lock);
+}
+
+/**
+ * Packet transmit function called by network stack
+ */
+static int
+__lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct lcs_header *header;
+ int rc = NETDEV_TX_OK;
+
+ LCS_DBF_TEXT(5, trace, "hardxmit");
+ if (skb == NULL) {
+ card->stats.tx_dropped++;
+ card->stats.tx_errors++;
+ return NETDEV_TX_OK;
+ }
+ if (card->state != DEV_STATE_UP) {
+ dev_kfree_skb(skb);
+ card->stats.tx_dropped++;
+ card->stats.tx_errors++;
+ card->stats.tx_carrier_errors++;
+ return NETDEV_TX_OK;
+ }
+ if (skb->protocol == htons(ETH_P_IPV6)) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+ netif_stop_queue(card->dev);
+ spin_lock(&card->lock);
+ if (card->tx_buffer != NULL &&
+ card->tx_buffer->count + sizeof(struct lcs_header) +
+ skb->len + sizeof(u16) > LCS_IOBUFFERSIZE)
+ /* skb too big for current tx buffer. */
+ __lcs_emit_txbuffer(card);
+ if (card->tx_buffer == NULL) {
+ /* Get new tx buffer */
+ card->tx_buffer = lcs_get_buffer(&card->write);
+ if (card->tx_buffer == NULL) {
+ card->stats.tx_dropped++;
+ rc = NETDEV_TX_BUSY;
+ goto out;
+ }
+ card->tx_buffer->callback = lcs_txbuffer_cb;
+ card->tx_buffer->count = 0;
+ }
+ header = (struct lcs_header *)
+ (card->tx_buffer->data + card->tx_buffer->count);
+ card->tx_buffer->count += skb->len + sizeof(struct lcs_header);
+ header->offset = card->tx_buffer->count;
+ header->type = card->lan_type;
+ header->slot = card->portno;
+ skb_copy_from_linear_data(skb, header + 1, skb->len);
+ spin_unlock(&card->lock);
+ card->stats.tx_bytes += skb->len;
+ card->stats.tx_packets++;
+ dev_kfree_skb(skb);
+ netif_wake_queue(card->dev);
+ spin_lock(&card->lock);
+ if (card->tx_emitted <= 0 && card->tx_buffer != NULL)
+ /* If this is the first tx buffer emit it immediately. */
+ __lcs_emit_txbuffer(card);
+out:
+ spin_unlock(&card->lock);
+ return rc;
+}
+
+static int
+lcs_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct lcs_card *card;
+ int rc;
+
+ LCS_DBF_TEXT(5, trace, "pktxmit");
+ card = (struct lcs_card *) dev->ml_priv;
+ rc = __lcs_start_xmit(card, skb, dev);
+ return rc;
+}
+
+/**
+ * send startlan and lanstat command to make LCS device ready
+ */
+static int
+lcs_startlan_auto(struct lcs_card *card)
+{
+ int rc;
+
+ LCS_DBF_TEXT(2, trace, "strtauto");
+#ifdef CONFIG_ETHERNET
+ card->lan_type = LCS_FRAME_TYPE_ENET;
+ rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
+ if (rc == 0)
+ return 0;
+
+#endif
+#ifdef CONFIG_TR
+ card->lan_type = LCS_FRAME_TYPE_TR;
+ rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
+ if (rc == 0)
+ return 0;
+#endif
+#ifdef CONFIG_FDDI
+ card->lan_type = LCS_FRAME_TYPE_FDDI;
+ rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
+ if (rc == 0)
+ return 0;
+#endif
+ return -EIO;
+}
+
+static int
+lcs_startlan(struct lcs_card *card)
+{
+ int rc, i;
+
+ LCS_DBF_TEXT(2, trace, "startlan");
+ rc = 0;
+ if (card->portno != LCS_INVALID_PORT_NO) {
+ if (card->lan_type == LCS_FRAME_TYPE_AUTO)
+ rc = lcs_startlan_auto(card);
+ else
+ rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP);
+ } else {
+ for (i = 0; i <= 16; i++) {
+ card->portno = i;
+ if (card->lan_type != LCS_FRAME_TYPE_AUTO)
+ rc = lcs_send_startlan(card,
+ LCS_INITIATOR_TCPIP);
+ else
+ /* autodetecting lan type */
+ rc = lcs_startlan_auto(card);
+ if (rc == 0)
+ break;
+ }
+ }
+ if (rc == 0)
+ return lcs_send_lanstat(card);
+ return rc;
+}
+
+/**
+ * LCS detect function
+ * setup channels and make them I/O ready
+ */
+static int
+lcs_detect(struct lcs_card *card)
+{
+ int rc = 0;
+
+ LCS_DBF_TEXT(2, setup, "lcsdetct");
+ /* start/reset card */
+ if (card->dev)
+ netif_stop_queue(card->dev);
+ rc = lcs_stop_channels(card);
+ if (rc == 0) {
+ rc = lcs_start_channels(card);
+ if (rc == 0) {
+ rc = lcs_send_startup(card, LCS_INITIATOR_TCPIP);
+ if (rc == 0)
+ rc = lcs_startlan(card);
+ }
+ }
+ if (rc == 0) {
+ card->state = DEV_STATE_UP;
+ } else {
+ card->state = DEV_STATE_DOWN;
+ card->write.state = LCS_CH_STATE_INIT;
+ card->read.state = LCS_CH_STATE_INIT;
+ }
+ return rc;
+}
+
+/**
+ * LCS Stop card
+ */
+static int
+lcs_stopcard(struct lcs_card *card)
+{
+ int rc;
+
+ LCS_DBF_TEXT(3, setup, "stopcard");
+
+ if (card->read.state != LCS_CH_STATE_STOPPED &&
+ card->write.state != LCS_CH_STATE_STOPPED &&
+ card->read.state != LCS_CH_STATE_ERROR &&
+ card->write.state != LCS_CH_STATE_ERROR &&
+ card->state == DEV_STATE_UP) {
+ lcs_clear_multicast_list(card);
+ rc = lcs_send_stoplan(card,LCS_INITIATOR_TCPIP);
+ rc = lcs_send_shutdown(card);
+ }
+ rc = lcs_stop_channels(card);
+ card->state = DEV_STATE_DOWN;
+
+ return rc;
+}
+
+/**
+ * Kernel Thread helper functions for LGW initiated commands
+ */
+static void
+lcs_start_kernel_thread(struct work_struct *work)
+{
+ struct lcs_card *card = container_of(work, struct lcs_card, kernel_thread_starter);
+ LCS_DBF_TEXT(5, trace, "krnthrd");
+ if (lcs_do_start_thread(card, LCS_RECOVERY_THREAD))
+ kthread_run(lcs_recovery, card, "lcs_recover");
+#ifdef CONFIG_IP_MULTICAST
+ if (lcs_do_start_thread(card, LCS_SET_MC_THREAD))
+ kthread_run(lcs_register_mc_addresses, card, "regipm");
+#endif
+}
+
+/**
+ * Process control frames.
+ */
+static void
+lcs_get_control(struct lcs_card *card, struct lcs_cmd *cmd)
+{
+ LCS_DBF_TEXT(5, trace, "getctrl");
+ if (cmd->initiator == LCS_INITIATOR_LGW) {
+ switch(cmd->cmd_code) {
+ case LCS_CMD_STARTUP:
+ case LCS_CMD_STARTLAN:
+ lcs_schedule_recovery(card);
+ break;
+ case LCS_CMD_STOPLAN:
+ pr_warning("Stoplan for %s initiated by LGW.\n",
+ card->dev->name);
+ if (card->dev)
+ netif_carrier_off(card->dev);
+ break;
+ default:
+ LCS_DBF_TEXT(5, trace, "noLGWcmd");
+ break;
+ }
+ } else
+ lcs_notify_lancmd_waiters(card, cmd);
+}
+
+/**
+ * Unpack network packet.
+ */
+static void
+lcs_get_skb(struct lcs_card *card, char *skb_data, unsigned int skb_len)
+{
+ struct sk_buff *skb;
+
+ LCS_DBF_TEXT(5, trace, "getskb");
+ if (card->dev == NULL ||
+ card->state != DEV_STATE_UP)
+ /* The card isn't up. Ignore the packet. */
+ return;
+
+ skb = dev_alloc_skb(skb_len);
+ if (skb == NULL) {
+ dev_err(&card->dev->dev,
+ " Allocating a socket buffer to interface %s failed\n",
+ card->dev->name);
+ card->stats.rx_dropped++;
+ return;
+ }
+ memcpy(skb_put(skb, skb_len), skb_data, skb_len);
+ skb->protocol = card->lan_type_trans(skb, card->dev);
+ card->stats.rx_bytes += skb_len;
+ card->stats.rx_packets++;
+ if (skb->protocol == htons(ETH_P_802_2))
+ *((__u32 *)skb->cb) = ++card->pkt_seq;
+ netif_rx(skb);
+}
+
+/**
+ * LCS main routine to get packets and lancmd replies from the buffers
+ */
+static void
+lcs_get_frames_cb(struct lcs_channel *channel, struct lcs_buffer *buffer)
+{
+ struct lcs_card *card;
+ struct lcs_header *lcs_hdr;
+ __u16 offset;
+
+ LCS_DBF_TEXT(5, trace, "lcsgtpkt");
+ lcs_hdr = (struct lcs_header *) buffer->data;
+ if (lcs_hdr->offset == LCS_ILLEGAL_OFFSET) {
+ LCS_DBF_TEXT(4, trace, "-eiogpkt");
+ return;
+ }
+ card = container_of(channel, struct lcs_card, read);
+ offset = 0;
+ while (lcs_hdr->offset != 0) {
+ if (lcs_hdr->offset <= 0 ||
+ lcs_hdr->offset > LCS_IOBUFFERSIZE ||
+ lcs_hdr->offset < offset) {
+ /* Offset invalid. */
+ card->stats.rx_length_errors++;
+ card->stats.rx_errors++;
+ return;
+ }
+ /* What kind of frame is it? */
+ if (lcs_hdr->type == LCS_FRAME_TYPE_CONTROL)
+ /* Control frame. */
+ lcs_get_control(card, (struct lcs_cmd *) lcs_hdr);
+ else if (lcs_hdr->type == LCS_FRAME_TYPE_ENET ||
+ lcs_hdr->type == LCS_FRAME_TYPE_TR ||
+ lcs_hdr->type == LCS_FRAME_TYPE_FDDI)
+ /* Normal network packet. */
+ lcs_get_skb(card, (char *)(lcs_hdr + 1),
+ lcs_hdr->offset - offset -
+ sizeof(struct lcs_header));
+ else
+ /* Unknown frame type. */
+ ; // FIXME: error message ?
+ /* Proceed to next frame. */
+ offset = lcs_hdr->offset;
+ lcs_hdr->offset = LCS_ILLEGAL_OFFSET;
+ lcs_hdr = (struct lcs_header *) (buffer->data + offset);
+ }
+ /* The buffer is now empty. Make it ready again. */
+ lcs_ready_buffer(&card->read, buffer);
+}
+
+/**
+ * get network statistics for ifconfig and other user programs
+ */
+static struct net_device_stats *
+lcs_getstats(struct net_device *dev)
+{
+ struct lcs_card *card;
+
+ LCS_DBF_TEXT(4, trace, "netstats");
+ card = (struct lcs_card *) dev->ml_priv;
+ return &card->stats;
+}
+
+/**
+ * stop lcs device
+ * This function will be called by user doing ifconfig xxx down
+ */
+static int
+lcs_stop_device(struct net_device *dev)
+{
+ struct lcs_card *card;
+ int rc;
+
+ LCS_DBF_TEXT(2, trace, "stopdev");
+ card = (struct lcs_card *) dev->ml_priv;
+ netif_carrier_off(dev);
+ netif_tx_disable(dev);
+ dev->flags &= ~IFF_UP;
+ wait_event(card->write.wait_q,
+ (card->write.state != LCS_CH_STATE_RUNNING));
+ rc = lcs_stopcard(card);
+ if (rc)
+ dev_err(&card->dev->dev,
+ " Shutting down the LCS device failed\n ");
+ return rc;
+}
+
+/**
+ * start lcs device and make it runnable
+ * This function will be called by user doing ifconfig xxx up
+ */
+static int
+lcs_open_device(struct net_device *dev)
+{
+ struct lcs_card *card;
+ int rc;
+
+ LCS_DBF_TEXT(2, trace, "opendev");
+ card = (struct lcs_card *) dev->ml_priv;
+ /* initialize statistics */
+ rc = lcs_detect(card);
+ if (rc) {
+ pr_err("Error in opening device!\n");
+
+ } else {
+ dev->flags |= IFF_UP;
+ netif_carrier_on(dev);
+ netif_wake_queue(dev);
+ card->state = DEV_STATE_UP;
+ }
+ return rc;
+}
+
+/**
+ * show function for portno called by cat or similar things
+ */
+static ssize_t
+lcs_portno_show (struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct lcs_card *card;
+
+ card = dev_get_drvdata(dev);
+
+ if (!card)
+ return 0;
+
+ return sprintf(buf, "%d\n", card->portno);
+}
+
+/**
+ * store the value which is piped to file portno
+ */
+static ssize_t
+lcs_portno_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct lcs_card *card;
+ int value;
+
+ card = dev_get_drvdata(dev);
+
+ if (!card)
+ return 0;
+
+ sscanf(buf, "%u", &value);
+ /* TODO: sanity checks */
+ card->portno = value;
+
+ return count;
+
+}
+
+static DEVICE_ATTR(portno, 0644, lcs_portno_show, lcs_portno_store);
+
+static const char *lcs_type[] = {
+ "not a channel",
+ "2216 parallel",
+ "2216 channel",
+ "OSA LCS card",
+ "unknown channel type",
+ "unsupported channel type",
+};
+
+static ssize_t
+lcs_type_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct ccwgroup_device *cgdev;
+
+ cgdev = to_ccwgroupdev(dev);
+ if (!cgdev)
+ return -ENODEV;
+
+ return sprintf(buf, "%s\n", lcs_type[cgdev->cdev[0]->id.driver_info]);
+}
+
+static DEVICE_ATTR(type, 0444, lcs_type_show, NULL);
+
+static ssize_t
+lcs_timeout_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct lcs_card *card;
+
+ card = dev_get_drvdata(dev);
+
+ return card ? sprintf(buf, "%u\n", card->lancmd_timeout) : 0;
+}
+
+static ssize_t
+lcs_timeout_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct lcs_card *card;
+ int value;
+
+ card = dev_get_drvdata(dev);
+
+ if (!card)
+ return 0;
+
+ sscanf(buf, "%u", &value);
+ /* TODO: sanity checks */
+ card->lancmd_timeout = value;
+
+ return count;
+
+}
+
+static DEVICE_ATTR(lancmd_timeout, 0644, lcs_timeout_show, lcs_timeout_store);
+
+static ssize_t
+lcs_dev_recover_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct lcs_card *card = dev_get_drvdata(dev);
+ char *tmp;
+ int i;
+
+ if (!card)
+ return -EINVAL;
+ if (card->state != DEV_STATE_UP)
+ return -EPERM;
+ i = simple_strtoul(buf, &tmp, 16);
+ if (i == 1)
+ lcs_schedule_recovery(card);
+ return count;
+}
+
+static DEVICE_ATTR(recover, 0200, NULL, lcs_dev_recover_store);
+
+static struct attribute * lcs_attrs[] = {
+ &dev_attr_portno.attr,
+ &dev_attr_type.attr,
+ &dev_attr_lancmd_timeout.attr,
+ &dev_attr_recover.attr,
+ NULL,
+};
+
+static struct attribute_group lcs_attr_group = {
+ .attrs = lcs_attrs,
+};
+
+/**
+ * lcs_probe_device is called on establishing a new ccwgroup_device.
+ */
+static int
+lcs_probe_device(struct ccwgroup_device *ccwgdev)
+{
+ struct lcs_card *card;
+ int ret;
+
+ if (!get_device(&ccwgdev->dev))
+ return -ENODEV;
+
+ LCS_DBF_TEXT(2, setup, "add_dev");
+ card = lcs_alloc_card();
+ if (!card) {
+ LCS_DBF_TEXT_(2, setup, " rc%d", -ENOMEM);
+ put_device(&ccwgdev->dev);
+ return -ENOMEM;
+ }
+ ret = sysfs_create_group(&ccwgdev->dev.kobj, &lcs_attr_group);
+ if (ret) {
+ lcs_free_card(card);
+ put_device(&ccwgdev->dev);
+ return ret;
+ }
+ dev_set_drvdata(&ccwgdev->dev, card);
+ ccwgdev->cdev[0]->handler = lcs_irq;
+ ccwgdev->cdev[1]->handler = lcs_irq;
+ card->gdev = ccwgdev;
+ INIT_WORK(&card->kernel_thread_starter, lcs_start_kernel_thread);
+ card->thread_start_mask = 0;
+ card->thread_allowed_mask = 0;
+ card->thread_running_mask = 0;
+ return 0;
+}
+
+static int
+lcs_register_netdev(struct ccwgroup_device *ccwgdev)
+{
+ struct lcs_card *card;
+
+ LCS_DBF_TEXT(2, setup, "regnetdv");
+ card = dev_get_drvdata(&ccwgdev->dev);
+ if (card->dev->reg_state != NETREG_UNINITIALIZED)
+ return 0;
+ SET_NETDEV_DEV(card->dev, &ccwgdev->dev);
+ return register_netdev(card->dev);
+}
+
+/**
+ * lcs_new_device will be called by setting the group device online.
+ */
+static const struct net_device_ops lcs_netdev_ops = {
+ .ndo_open = lcs_open_device,
+ .ndo_stop = lcs_stop_device,
+ .ndo_get_stats = lcs_getstats,
+ .ndo_start_xmit = lcs_start_xmit,
+};
+
+static const struct net_device_ops lcs_mc_netdev_ops = {
+ .ndo_open = lcs_open_device,
+ .ndo_stop = lcs_stop_device,
+ .ndo_get_stats = lcs_getstats,
+ .ndo_start_xmit = lcs_start_xmit,
+ .ndo_set_rx_mode = lcs_set_multicast_list,
+};
+
+static int
+lcs_new_device(struct ccwgroup_device *ccwgdev)
+{
+ struct lcs_card *card;
+ struct net_device *dev=NULL;
+ enum lcs_dev_states recover_state;
+ int rc;
+
+ card = dev_get_drvdata(&ccwgdev->dev);
+ if (!card)
+ return -ENODEV;
+
+ LCS_DBF_TEXT(2, setup, "newdev");
+ LCS_DBF_HEX(3, setup, &card, sizeof(void*));
+ card->read.ccwdev = ccwgdev->cdev[0];
+ card->write.ccwdev = ccwgdev->cdev[1];
+
+ recover_state = card->state;
+ rc = ccw_device_set_online(card->read.ccwdev);
+ if (rc)
+ goto out_err;
+ rc = ccw_device_set_online(card->write.ccwdev);
+ if (rc)
+ goto out_werr;
+
+ LCS_DBF_TEXT(3, setup, "lcsnewdv");
+
+ lcs_setup_card(card);
+ rc = lcs_detect(card);
+ if (rc) {
+ LCS_DBF_TEXT(2, setup, "dtctfail");
+ dev_err(&card->dev->dev,
+ "Detecting a network adapter for LCS devices"
+ " failed with rc=%d (0x%x)\n", rc, rc);
+ lcs_stopcard(card);
+ goto out;
+ }
+ if (card->dev) {
+ LCS_DBF_TEXT(2, setup, "samedev");
+ LCS_DBF_HEX(3, setup, &card, sizeof(void*));
+ goto netdev_out;
+ }
+ switch (card->lan_type) {
+#ifdef CONFIG_ETHERNET
+ case LCS_FRAME_TYPE_ENET:
+ card->lan_type_trans = eth_type_trans;
+ dev = alloc_etherdev(0);
+ break;
+#endif
+#ifdef CONFIG_TR
+ case LCS_FRAME_TYPE_TR:
+ card->lan_type_trans = tr_type_trans;
+ dev = alloc_trdev(0);
+ break;
+#endif
+#ifdef CONFIG_FDDI
+ case LCS_FRAME_TYPE_FDDI:
+ card->lan_type_trans = fddi_type_trans;
+ dev = alloc_fddidev(0);
+ break;
+#endif
+ default:
+ LCS_DBF_TEXT(3, setup, "errinit");
+ pr_err(" Initialization failed\n");
+ goto out;
+ }
+ if (!dev)
+ goto out;
+ card->dev = dev;
+ card->dev->ml_priv = card;
+ card->dev->netdev_ops = &lcs_netdev_ops;
+ memcpy(card->dev->dev_addr, card->mac, LCS_MAC_LENGTH);
+#ifdef CONFIG_IP_MULTICAST
+ if (!lcs_check_multicast_support(card))
+ card->dev->netdev_ops = &lcs_mc_netdev_ops;
+#endif
+netdev_out:
+ lcs_set_allowed_threads(card,0xffffffff);
+ if (recover_state == DEV_STATE_RECOVER) {
+ lcs_set_multicast_list(card->dev);
+ card->dev->flags |= IFF_UP;
+ netif_carrier_on(card->dev);
+ netif_wake_queue(card->dev);
+ card->state = DEV_STATE_UP;
+ } else {
+ lcs_stopcard(card);
+ }
+
+ if (lcs_register_netdev(ccwgdev) != 0)
+ goto out;
+
+ /* Print out supported assists: IPv6 */
+ pr_info("LCS device %s %s IPv6 support\n", card->dev->name,
+ (card->ip_assists_supported & LCS_IPASS_IPV6_SUPPORT) ?
+ "with" : "without");
+ /* Print out supported assist: Multicast */
+ pr_info("LCS device %s %s Multicast support\n", card->dev->name,
+ (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) ?
+ "with" : "without");
+ return 0;
+out:
+
+ ccw_device_set_offline(card->write.ccwdev);
+out_werr:
+ ccw_device_set_offline(card->read.ccwdev);
+out_err:
+ return -ENODEV;
+}
+
+/**
+ * lcs_shutdown_device, called when setting the group device offline.
+ */
+static int
+__lcs_shutdown_device(struct ccwgroup_device *ccwgdev, int recovery_mode)
+{
+ struct lcs_card *card;
+ enum lcs_dev_states recover_state;
+ int ret;
+
+ LCS_DBF_TEXT(3, setup, "shtdndev");
+ card = dev_get_drvdata(&ccwgdev->dev);
+ if (!card)
+ return -ENODEV;
+ if (recovery_mode == 0) {
+ lcs_set_allowed_threads(card, 0);
+ if (lcs_wait_for_threads(card, LCS_SET_MC_THREAD))
+ return -ERESTARTSYS;
+ }
+ LCS_DBF_HEX(3, setup, &card, sizeof(void*));
+ recover_state = card->state;
+
+ ret = lcs_stop_device(card->dev);
+ ret = ccw_device_set_offline(card->read.ccwdev);
+ ret = ccw_device_set_offline(card->write.ccwdev);
+ if (recover_state == DEV_STATE_UP) {
+ card->state = DEV_STATE_RECOVER;
+ }
+ if (ret)
+ return ret;
+ return 0;
+}
+
+static int
+lcs_shutdown_device(struct ccwgroup_device *ccwgdev)
+{
+ return __lcs_shutdown_device(ccwgdev, 0);
+}
+
+/**
+ * drive lcs recovery after startup and startlan initiated by Lan Gateway
+ */
+static int
+lcs_recovery(void *ptr)
+{
+ struct lcs_card *card;
+ struct ccwgroup_device *gdev;
+ int rc;
+
+ card = (struct lcs_card *) ptr;
+
+ LCS_DBF_TEXT(4, trace, "recover1");
+ if (!lcs_do_run_thread(card, LCS_RECOVERY_THREAD))
+ return 0;
+ LCS_DBF_TEXT(4, trace, "recover2");
+ gdev = card->gdev;
+ dev_warn(&gdev->dev,
+ "A recovery process has been started for the LCS device\n");
+ rc = __lcs_shutdown_device(gdev, 1);
+ rc = lcs_new_device(gdev);
+ if (!rc)
+ pr_info("Device %s successfully recovered!\n",
+ card->dev->name);
+ else
+ pr_info("Device %s could not be recovered!\n",
+ card->dev->name);
+ lcs_clear_thread_running_bit(card, LCS_RECOVERY_THREAD);
+ return 0;
+}
+
+/**
+ * lcs_remove_device, free buffers and card
+ */
+static void
+lcs_remove_device(struct ccwgroup_device *ccwgdev)
+{
+ struct lcs_card *card;
+
+ card = dev_get_drvdata(&ccwgdev->dev);
+ if (!card)
+ return;
+
+ LCS_DBF_TEXT(3, setup, "remdev");
+ LCS_DBF_HEX(3, setup, &card, sizeof(void*));
+ if (ccwgdev->state == CCWGROUP_ONLINE) {
+ lcs_shutdown_device(ccwgdev);
+ }
+ if (card->dev)
+ unregister_netdev(card->dev);
+ sysfs_remove_group(&ccwgdev->dev.kobj, &lcs_attr_group);
+ lcs_cleanup_card(card);
+ lcs_free_card(card);
+ put_device(&ccwgdev->dev);
+}
+
+static int lcs_pm_suspend(struct lcs_card *card)
+{
+ if (card->dev)
+ netif_device_detach(card->dev);
+ lcs_set_allowed_threads(card, 0);
+ lcs_wait_for_threads(card, 0xffffffff);
+ if (card->state != DEV_STATE_DOWN)
+ __lcs_shutdown_device(card->gdev, 1);
+ return 0;
+}
+
+static int lcs_pm_resume(struct lcs_card *card)
+{
+ int rc = 0;
+
+ if (card->state == DEV_STATE_RECOVER)
+ rc = lcs_new_device(card->gdev);
+ if (card->dev)
+ netif_device_attach(card->dev);
+ if (rc) {
+ dev_warn(&card->gdev->dev, "The lcs device driver "
+ "failed to recover the device\n");
+ }
+ return rc;
+}
+
+static int lcs_prepare(struct ccwgroup_device *gdev)
+{
+ return 0;
+}
+
+static void lcs_complete(struct ccwgroup_device *gdev)
+{
+ return;
+}
+
+static int lcs_freeze(struct ccwgroup_device *gdev)
+{
+ struct lcs_card *card = dev_get_drvdata(&gdev->dev);
+ return lcs_pm_suspend(card);
+}
+
+static int lcs_thaw(struct ccwgroup_device *gdev)
+{
+ struct lcs_card *card = dev_get_drvdata(&gdev->dev);
+ return lcs_pm_resume(card);
+}
+
+static int lcs_restore(struct ccwgroup_device *gdev)
+{
+ struct lcs_card *card = dev_get_drvdata(&gdev->dev);
+ return lcs_pm_resume(card);
+}
+
+static struct ccw_device_id lcs_ids[] = {
+ {CCW_DEVICE(0x3088, 0x08), .driver_info = lcs_channel_type_parallel},
+ {CCW_DEVICE(0x3088, 0x1f), .driver_info = lcs_channel_type_2216},
+ {CCW_DEVICE(0x3088, 0x60), .driver_info = lcs_channel_type_osa2},
+ {},
+};
+MODULE_DEVICE_TABLE(ccw, lcs_ids);
+
+static struct ccw_driver lcs_ccw_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "lcs",
+ },
+ .ids = lcs_ids,
+ .probe = ccwgroup_probe_ccwdev,
+ .remove = ccwgroup_remove_ccwdev,
+ .int_class = IOINT_LCS,
+};
+
+/**
+ * LCS ccwgroup driver registration
+ */
+static struct ccwgroup_driver lcs_group_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "lcs",
+ },
+ .max_slaves = 2,
+ .driver_id = 0xD3C3E2,
+ .probe = lcs_probe_device,
+ .remove = lcs_remove_device,
+ .set_online = lcs_new_device,
+ .set_offline = lcs_shutdown_device,
+ .prepare = lcs_prepare,
+ .complete = lcs_complete,
+ .freeze = lcs_freeze,
+ .thaw = lcs_thaw,
+ .restore = lcs_restore,
+};
+
+static ssize_t
+lcs_driver_group_store(struct device_driver *ddrv, const char *buf,
+ size_t count)
+{
+ int err;
+ err = ccwgroup_create_from_string(lcs_root_dev,
+ lcs_group_driver.driver_id,
+ &lcs_ccw_driver, 2, buf);
+ return err ? err : count;
+}
+
+static DRIVER_ATTR(group, 0200, NULL, lcs_driver_group_store);
+
+static struct attribute *lcs_group_attrs[] = {
+ &driver_attr_group.attr,
+ NULL,
+};
+
+static struct attribute_group lcs_group_attr_group = {
+ .attrs = lcs_group_attrs,
+};
+
+static const struct attribute_group *lcs_group_attr_groups[] = {
+ &lcs_group_attr_group,
+ NULL,
+};
+
+/**
+ * LCS Module/Kernel initialization function
+ */
+static int
+__init lcs_init_module(void)
+{
+ int rc;
+
+ pr_info("Loading %s\n", version);
+ rc = lcs_register_debug_facility();
+ LCS_DBF_TEXT(0, setup, "lcsinit");
+ if (rc)
+ goto out_err;
+ lcs_root_dev = root_device_register("lcs");
+ rc = IS_ERR(lcs_root_dev) ? PTR_ERR(lcs_root_dev) : 0;
+ if (rc)
+ goto register_err;
+ rc = ccw_driver_register(&lcs_ccw_driver);
+ if (rc)
+ goto ccw_err;
+ lcs_group_driver.driver.groups = lcs_group_attr_groups;
+ rc = ccwgroup_driver_register(&lcs_group_driver);
+ if (rc)
+ goto ccwgroup_err;
+ return 0;
+
+ccwgroup_err:
+ ccw_driver_unregister(&lcs_ccw_driver);
+ccw_err:
+ root_device_unregister(lcs_root_dev);
+register_err:
+ lcs_unregister_debug_facility();
+out_err:
+ pr_err("Initializing the lcs device driver failed\n");
+ return rc;
+}
+
+
+/**
+ * LCS module cleanup function
+ */
+static void
+__exit lcs_cleanup_module(void)
+{
+ pr_info("Terminating lcs module.\n");
+ LCS_DBF_TEXT(0, trace, "cleanup");
+ driver_remove_file(&lcs_group_driver.driver,
+ &driver_attr_group);
+ ccwgroup_driver_unregister(&lcs_group_driver);
+ ccw_driver_unregister(&lcs_ccw_driver);
+ root_device_unregister(lcs_root_dev);
+ lcs_unregister_debug_facility();
+}
+
+module_init(lcs_init_module);
+module_exit(lcs_cleanup_module);
+
+MODULE_AUTHOR("Frank Pavlic <fpavlic@de.ibm.com>");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h
new file mode 100644
index 00000000000..8c03392ac83
--- /dev/null
+++ b/drivers/s390/net/lcs.h
@@ -0,0 +1,345 @@
+/*lcs.h*/
+
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/workqueue.h>
+#include <asm/ccwdev.h>
+
+#define LCS_DBF_TEXT(level, name, text) \
+ do { \
+ debug_text_event(lcs_dbf_##name, level, text); \
+ } while (0)
+
+#define LCS_DBF_HEX(level,name,addr,len) \
+do { \
+ debug_event(lcs_dbf_##name,level,(void*)(addr),len); \
+} while (0)
+
+/* Allow to sort out low debug levels early to avoid wasted sprints */
+static inline int lcs_dbf_passes(debug_info_t *dbf_grp, int level)
+{
+ return (level <= dbf_grp->level);
+}
+
+#define LCS_DBF_TEXT_(level,name,text...) \
+ do { \
+ if (lcs_dbf_passes(lcs_dbf_##name, level)) { \
+ sprintf(debug_buffer, text); \
+ debug_text_event(lcs_dbf_##name, level, debug_buffer); \
+ } \
+ } while (0)
+
+/**
+ * sysfs related stuff
+ */
+#define CARD_FROM_DEV(cdev) \
+ (struct lcs_card *) dev_get_drvdata( \
+ &((struct ccwgroup_device *)dev_get_drvdata(&cdev->dev))->dev);
+
+/**
+ * Enum for classifying detected devices.
+ */
+enum lcs_channel_types {
+ /* Device is not a channel */
+ lcs_channel_type_none,
+
+ /* Device is a 2216 channel */
+ lcs_channel_type_parallel,
+
+ /* Device is a 2216 channel */
+ lcs_channel_type_2216,
+
+ /* Device is a OSA2 card */
+ lcs_channel_type_osa2
+};
+
+/**
+ * CCW commands used in this driver
+ */
+#define LCS_CCW_WRITE 0x01
+#define LCS_CCW_READ 0x02
+#define LCS_CCW_TRANSFER 0x08
+
+/**
+ * LCS device status primitives
+ */
+#define LCS_CMD_STARTLAN 0x01
+#define LCS_CMD_STOPLAN 0x02
+#define LCS_CMD_LANSTAT 0x04
+#define LCS_CMD_STARTUP 0x07
+#define LCS_CMD_SHUTDOWN 0x08
+#define LCS_CMD_QIPASSIST 0xb2
+#define LCS_CMD_SETIPM 0xb4
+#define LCS_CMD_DELIPM 0xb5
+
+#define LCS_INITIATOR_TCPIP 0x00
+#define LCS_INITIATOR_LGW 0x01
+#define LCS_STD_CMD_SIZE 16
+#define LCS_MULTICAST_CMD_SIZE 404
+
+/**
+ * LCS IPASSIST MASKS,only used when multicast is switched on
+ */
+/* Not supported by LCS */
+#define LCS_IPASS_ARP_PROCESSING 0x0001
+#define LCS_IPASS_IN_CHECKSUM_SUPPORT 0x0002
+#define LCS_IPASS_OUT_CHECKSUM_SUPPORT 0x0004
+#define LCS_IPASS_IP_FRAG_REASSEMBLY 0x0008
+#define LCS_IPASS_IP_FILTERING 0x0010
+/* Supported by lcs 3172 */
+#define LCS_IPASS_IPV6_SUPPORT 0x0020
+#define LCS_IPASS_MULTICAST_SUPPORT 0x0040
+
+/**
+ * LCS sense byte definitions
+ */
+#define LCS_SENSE_BYTE_0 0
+#define LCS_SENSE_BYTE_1 1
+#define LCS_SENSE_BYTE_2 2
+#define LCS_SENSE_BYTE_3 3
+#define LCS_SENSE_INTERFACE_DISCONNECT 0x01
+#define LCS_SENSE_EQUIPMENT_CHECK 0x10
+#define LCS_SENSE_BUS_OUT_CHECK 0x20
+#define LCS_SENSE_INTERVENTION_REQUIRED 0x40
+#define LCS_SENSE_CMD_REJECT 0x80
+#define LCS_SENSE_RESETTING_EVENT 0x80
+#define LCS_SENSE_DEVICE_ONLINE 0x20
+
+/**
+ * LCS packet type definitions
+ */
+#define LCS_FRAME_TYPE_CONTROL 0
+#define LCS_FRAME_TYPE_ENET 1
+#define LCS_FRAME_TYPE_TR 2
+#define LCS_FRAME_TYPE_FDDI 7
+#define LCS_FRAME_TYPE_AUTO -1
+
+/**
+ * some more definitions,we will sort them later
+ */
+#define LCS_ILLEGAL_OFFSET 0xffff
+#define LCS_IOBUFFERSIZE 0x5000
+#define LCS_NUM_BUFFS 32 /* needs to be power of 2 */
+#define LCS_MAC_LENGTH 6
+#define LCS_INVALID_PORT_NO -1
+#define LCS_LANCMD_TIMEOUT_DEFAULT 5
+
+/**
+ * Multicast state
+ */
+#define LCS_IPM_STATE_SET_REQUIRED 0
+#define LCS_IPM_STATE_DEL_REQUIRED 1
+#define LCS_IPM_STATE_ON_CARD 2
+
+/**
+ * LCS IP Assist declarations
+ * seems to be only used for multicast
+ */
+#define LCS_IPASS_ARP_PROCESSING 0x0001
+#define LCS_IPASS_INBOUND_CSUM_SUPP 0x0002
+#define LCS_IPASS_OUTBOUND_CSUM_SUPP 0x0004
+#define LCS_IPASS_IP_FRAG_REASSEMBLY 0x0008
+#define LCS_IPASS_IP_FILTERING 0x0010
+#define LCS_IPASS_IPV6_SUPPORT 0x0020
+#define LCS_IPASS_MULTICAST_SUPPORT 0x0040
+
+/**
+ * LCS Buffer states
+ */
+enum lcs_buffer_states {
+ LCS_BUF_STATE_EMPTY, /* buffer is empty */
+ LCS_BUF_STATE_LOCKED, /* buffer is locked, don't touch */
+ LCS_BUF_STATE_READY, /* buffer is ready for read/write */
+ LCS_BUF_STATE_PROCESSED,
+};
+
+/**
+ * LCS Channel State Machine declarations
+ */
+enum lcs_channel_states {
+ LCS_CH_STATE_INIT,
+ LCS_CH_STATE_HALTED,
+ LCS_CH_STATE_STOPPED,
+ LCS_CH_STATE_RUNNING,
+ LCS_CH_STATE_SUSPENDED,
+ LCS_CH_STATE_CLEARED,
+ LCS_CH_STATE_ERROR,
+};
+
+/**
+ * LCS device state machine
+ */
+enum lcs_dev_states {
+ DEV_STATE_DOWN,
+ DEV_STATE_UP,
+ DEV_STATE_RECOVER,
+};
+
+enum lcs_threads {
+ LCS_SET_MC_THREAD = 1,
+ LCS_RECOVERY_THREAD = 2,
+};
+
+/**
+ * LCS struct declarations
+ */
+struct lcs_header {
+ __u16 offset;
+ __u8 type;
+ __u8 slot;
+} __attribute__ ((packed));
+
+struct lcs_ip_mac_pair {
+ __be32 ip_addr;
+ __u8 mac_addr[LCS_MAC_LENGTH];
+ __u8 reserved[2];
+} __attribute__ ((packed));
+
+struct lcs_ipm_list {
+ struct list_head list;
+ struct lcs_ip_mac_pair ipm;
+ __u8 ipm_state;
+};
+
+struct lcs_cmd {
+ __u16 offset;
+ __u8 type;
+ __u8 slot;
+ __u8 cmd_code;
+ __u8 initiator;
+ __u16 sequence_no;
+ __u16 return_code;
+ union {
+ struct {
+ __u8 lan_type;
+ __u8 portno;
+ __u16 parameter_count;
+ __u8 operator_flags[3];
+ __u8 reserved[3];
+ } lcs_std_cmd;
+ struct {
+ __u16 unused1;
+ __u16 buff_size;
+ __u8 unused2[6];
+ } lcs_startup;
+ struct {
+ __u8 lan_type;
+ __u8 portno;
+ __u8 unused[10];
+ __u8 mac_addr[LCS_MAC_LENGTH];
+ __u32 num_packets_deblocked;
+ __u32 num_packets_blocked;
+ __u32 num_packets_tx_on_lan;
+ __u32 num_tx_errors_detected;
+ __u32 num_tx_packets_disgarded;
+ __u32 num_packets_rx_from_lan;
+ __u32 num_rx_errors_detected;
+ __u32 num_rx_discarded_nobuffs_avail;
+ __u32 num_rx_packets_too_large;
+ } lcs_lanstat_cmd;
+#ifdef CONFIG_IP_MULTICAST
+ struct {
+ __u8 lan_type;
+ __u8 portno;
+ __u16 num_ip_pairs;
+ __u16 ip_assists_supported;
+ __u16 ip_assists_enabled;
+ __u16 version;
+ struct {
+ struct lcs_ip_mac_pair
+ ip_mac_pair[32];
+ __u32 response_data;
+ } lcs_ipass_ctlmsg __attribute ((packed));
+ } lcs_qipassist __attribute__ ((packed));
+#endif /*CONFIG_IP_MULTICAST */
+ } cmd __attribute__ ((packed));
+} __attribute__ ((packed));
+
+/**
+ * Forward declarations.
+ */
+struct lcs_card;
+struct lcs_channel;
+
+/**
+ * Definition of an lcs buffer.
+ */
+struct lcs_buffer {
+ enum lcs_buffer_states state;
+ void *data;
+ int count;
+ /* Callback for completion notification. */
+ void (*callback)(struct lcs_channel *, struct lcs_buffer *);
+};
+
+struct lcs_reply {
+ struct list_head list;
+ __u16 sequence_no;
+ atomic_t refcnt;
+ /* Callback for completion notification. */
+ void (*callback)(struct lcs_card *, struct lcs_cmd *);
+ wait_queue_head_t wait_q;
+ struct lcs_card *card;
+ int received;
+ int rc;
+};
+
+/**
+ * Definition of an lcs channel
+ */
+struct lcs_channel {
+ enum lcs_channel_states state;
+ struct ccw_device *ccwdev;
+ struct ccw1 ccws[LCS_NUM_BUFFS + 1];
+ wait_queue_head_t wait_q;
+ struct tasklet_struct irq_tasklet;
+ struct lcs_buffer iob[LCS_NUM_BUFFS];
+ int io_idx;
+ int buf_idx;
+};
+
+
+/**
+ * definition of the lcs card
+ */
+struct lcs_card {
+ spinlock_t lock;
+ spinlock_t ipm_lock;
+ enum lcs_dev_states state;
+ struct net_device *dev;
+ struct net_device_stats stats;
+ __be16 (*lan_type_trans)(struct sk_buff *skb,
+ struct net_device *dev);
+ struct ccwgroup_device *gdev;
+ struct lcs_channel read;
+ struct lcs_channel write;
+ struct lcs_buffer *tx_buffer;
+ int tx_emitted;
+ struct list_head lancmd_waiters;
+ int lancmd_timeout;
+
+ struct work_struct kernel_thread_starter;
+ spinlock_t mask_lock;
+ unsigned long thread_start_mask;
+ unsigned long thread_running_mask;
+ unsigned long thread_allowed_mask;
+ wait_queue_head_t wait_q;
+
+#ifdef CONFIG_IP_MULTICAST
+ struct list_head ipm_list;
+#endif
+ __u8 mac[LCS_MAC_LENGTH];
+ __u16 ip_assists_supported;
+ __u16 ip_assists_enabled;
+ __s8 lan_type;
+ __u32 pkt_seq;
+ __u16 sequence_no;
+ __s16 portno;
+ /* Some info copied from probeinfo */
+ u8 device_forced;
+ u8 max_port_no;
+ u8 hint_port_no;
+ s16 port_protocol_no;
+} __attribute__ ((aligned(8)));
+
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
new file mode 100644
index 00000000000..8160591913f
--- /dev/null
+++ b/drivers/s390/net/netiucv.c
@@ -0,0 +1,2299 @@
+/*
+ * IUCV network driver
+ *
+ * Copyright IBM Corp. 2001, 2009
+ *
+ * Author(s):
+ * Original netiucv driver:
+ * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
+ * Sysfs integration and all bugs therein:
+ * Cornelia Huck (cornelia.huck@de.ibm.com)
+ * PM functions:
+ * Ursula Braun (ursula.braun@de.ibm.com)
+ *
+ * Documentation used:
+ * the source of the original IUCV driver by:
+ * Stefan Hegewald <hegewald@de.ibm.com>
+ * Hartmut Penner <hpenner@de.ibm.com>
+ * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
+ * Martin Schwidefsky (schwidefsky@de.ibm.com)
+ * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#define KMSG_COMPONENT "netiucv"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#undef DEBUG
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/bitops.h>
+
+#include <linux/signal.h>
+#include <linux/string.h>
+#include <linux/device.h>
+
+#include <linux/ip.h>
+#include <linux/if_arp.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/ctype.h>
+#include <net/dst.h>
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/ebcdic.h>
+
+#include <net/iucv/iucv.h>
+#include "fsm.h"
+
+MODULE_AUTHOR
+ ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
+MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
+
+/**
+ * Debug Facility stuff
+ */
+#define IUCV_DBF_SETUP_NAME "iucv_setup"
+#define IUCV_DBF_SETUP_LEN 64
+#define IUCV_DBF_SETUP_PAGES 2
+#define IUCV_DBF_SETUP_NR_AREAS 1
+#define IUCV_DBF_SETUP_LEVEL 3
+
+#define IUCV_DBF_DATA_NAME "iucv_data"
+#define IUCV_DBF_DATA_LEN 128
+#define IUCV_DBF_DATA_PAGES 2
+#define IUCV_DBF_DATA_NR_AREAS 1
+#define IUCV_DBF_DATA_LEVEL 2
+
+#define IUCV_DBF_TRACE_NAME "iucv_trace"
+#define IUCV_DBF_TRACE_LEN 16
+#define IUCV_DBF_TRACE_PAGES 4
+#define IUCV_DBF_TRACE_NR_AREAS 1
+#define IUCV_DBF_TRACE_LEVEL 3
+
+#define IUCV_DBF_TEXT(name,level,text) \
+ do { \
+ debug_text_event(iucv_dbf_##name,level,text); \
+ } while (0)
+
+#define IUCV_DBF_HEX(name,level,addr,len) \
+ do { \
+ debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
+ } while (0)
+
+DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
+
+/* Allow to sort out low debug levels early to avoid wasted sprints */
+static inline int iucv_dbf_passes(debug_info_t *dbf_grp, int level)
+{
+ return (level <= dbf_grp->level);
+}
+
+#define IUCV_DBF_TEXT_(name, level, text...) \
+ do { \
+ if (iucv_dbf_passes(iucv_dbf_##name, level)) { \
+ char* __buf = get_cpu_var(iucv_dbf_txt_buf); \
+ sprintf(__buf, text); \
+ debug_text_event(iucv_dbf_##name, level, __buf); \
+ put_cpu_var(iucv_dbf_txt_buf); \
+ } \
+ } while (0)
+
+#define IUCV_DBF_SPRINTF(name,level,text...) \
+ do { \
+ debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
+ debug_sprintf_event(iucv_dbf_trace, level, text ); \
+ } while (0)
+
+/**
+ * some more debug stuff
+ */
+#define IUCV_HEXDUMP16(importance,header,ptr) \
+PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
+ "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
+ *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
+ *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
+ *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
+ *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
+ *(((char*)ptr)+12),*(((char*)ptr)+13), \
+ *(((char*)ptr)+14),*(((char*)ptr)+15)); \
+PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
+ "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
+ *(((char*)ptr)+16),*(((char*)ptr)+17), \
+ *(((char*)ptr)+18),*(((char*)ptr)+19), \
+ *(((char*)ptr)+20),*(((char*)ptr)+21), \
+ *(((char*)ptr)+22),*(((char*)ptr)+23), \
+ *(((char*)ptr)+24),*(((char*)ptr)+25), \
+ *(((char*)ptr)+26),*(((char*)ptr)+27), \
+ *(((char*)ptr)+28),*(((char*)ptr)+29), \
+ *(((char*)ptr)+30),*(((char*)ptr)+31));
+
+#define PRINTK_HEADER " iucv: " /* for debugging */
+
+/* dummy device to make sure netiucv_pm functions are called */
+static struct device *netiucv_dev;
+
+static int netiucv_pm_prepare(struct device *);
+static void netiucv_pm_complete(struct device *);
+static int netiucv_pm_freeze(struct device *);
+static int netiucv_pm_restore_thaw(struct device *);
+
+static const struct dev_pm_ops netiucv_pm_ops = {
+ .prepare = netiucv_pm_prepare,
+ .complete = netiucv_pm_complete,
+ .freeze = netiucv_pm_freeze,
+ .thaw = netiucv_pm_restore_thaw,
+ .restore = netiucv_pm_restore_thaw,
+};
+
+static struct device_driver netiucv_driver = {
+ .owner = THIS_MODULE,
+ .name = "netiucv",
+ .bus = &iucv_bus,
+ .pm = &netiucv_pm_ops,
+};
+
+static int netiucv_callback_connreq(struct iucv_path *,
+ u8 ipvmid[8], u8 ipuser[16]);
+static void netiucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
+static void netiucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
+static void netiucv_callback_connsusp(struct iucv_path *, u8 ipuser[16]);
+static void netiucv_callback_connres(struct iucv_path *, u8 ipuser[16]);
+static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *);
+static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *);
+
+static struct iucv_handler netiucv_handler = {
+ .path_pending = netiucv_callback_connreq,
+ .path_complete = netiucv_callback_connack,
+ .path_severed = netiucv_callback_connrej,
+ .path_quiesced = netiucv_callback_connsusp,
+ .path_resumed = netiucv_callback_connres,
+ .message_pending = netiucv_callback_rx,
+ .message_complete = netiucv_callback_txdone
+};
+
+/**
+ * Per connection profiling data
+ */
+struct connection_profile {
+ unsigned long maxmulti;
+ unsigned long maxcqueue;
+ unsigned long doios_single;
+ unsigned long doios_multi;
+ unsigned long txlen;
+ unsigned long tx_time;
+ struct timespec send_stamp;
+ unsigned long tx_pending;
+ unsigned long tx_max_pending;
+};
+
+/**
+ * Representation of one iucv connection
+ */
+struct iucv_connection {
+ struct list_head list;
+ struct iucv_path *path;
+ struct sk_buff *rx_buff;
+ struct sk_buff *tx_buff;
+ struct sk_buff_head collect_queue;
+ struct sk_buff_head commit_queue;
+ spinlock_t collect_lock;
+ int collect_len;
+ int max_buffsize;
+ fsm_timer timer;
+ fsm_instance *fsm;
+ struct net_device *netdev;
+ struct connection_profile prof;
+ char userid[9];
+ char userdata[17];
+};
+
+/**
+ * Linked list of all connection structs.
+ */
+static LIST_HEAD(iucv_connection_list);
+static DEFINE_RWLOCK(iucv_connection_rwlock);
+
+/**
+ * Representation of event-data for the
+ * connection state machine.
+ */
+struct iucv_event {
+ struct iucv_connection *conn;
+ void *data;
+};
+
+/**
+ * Private part of the network device structure
+ */
+struct netiucv_priv {
+ struct net_device_stats stats;
+ unsigned long tbusy;
+ fsm_instance *fsm;
+ struct iucv_connection *conn;
+ struct device *dev;
+ int pm_state;
+};
+
+/**
+ * Link level header for a packet.
+ */
+struct ll_header {
+ u16 next;
+};
+
+#define NETIUCV_HDRLEN (sizeof(struct ll_header))
+#define NETIUCV_BUFSIZE_MAX 65537
+#define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX
+#define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
+#define NETIUCV_MTU_DEFAULT 9216
+#define NETIUCV_QUEUELEN_DEFAULT 50
+#define NETIUCV_TIMEOUT_5SEC 5000
+
+/**
+ * Compatibility macros for busy handling
+ * of network devices.
+ */
+static inline void netiucv_clear_busy(struct net_device *dev)
+{
+ struct netiucv_priv *priv = netdev_priv(dev);
+ clear_bit(0, &priv->tbusy);
+ netif_wake_queue(dev);
+}
+
+static inline int netiucv_test_and_set_busy(struct net_device *dev)
+{
+ struct netiucv_priv *priv = netdev_priv(dev);
+ netif_stop_queue(dev);
+ return test_and_set_bit(0, &priv->tbusy);
+}
+
+static u8 iucvMagic_ascii[16] = {
+ 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20
+};
+
+static u8 iucvMagic_ebcdic[16] = {
+ 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
+ 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
+};
+
+/**
+ * Convert an iucv userId to its printable
+ * form (strip whitespace at end).
+ *
+ * @param An iucv userId
+ *
+ * @returns The printable string (static data!!)
+ */
+static char *netiucv_printname(char *name, int len)
+{
+ static char tmp[17];
+ char *p = tmp;
+ memcpy(tmp, name, len);
+ tmp[len] = '\0';
+ while (*p && ((p - tmp) < len) && (!isspace(*p)))
+ p++;
+ *p = '\0';
+ return tmp;
+}
+
+static char *netiucv_printuser(struct iucv_connection *conn)
+{
+ static char tmp_uid[9];
+ static char tmp_udat[17];
+ static char buf[100];
+
+ if (memcmp(conn->userdata, iucvMagic_ebcdic, 16)) {
+ tmp_uid[8] = '\0';
+ tmp_udat[16] = '\0';
+ memcpy(tmp_uid, conn->userid, 8);
+ memcpy(tmp_uid, netiucv_printname(tmp_uid, 8), 8);
+ memcpy(tmp_udat, conn->userdata, 16);
+ EBCASC(tmp_udat, 16);
+ memcpy(tmp_udat, netiucv_printname(tmp_udat, 16), 16);
+ sprintf(buf, "%s.%s", tmp_uid, tmp_udat);
+ return buf;
+ } else
+ return netiucv_printname(conn->userid, 8);
+}
+
+/**
+ * States of the interface statemachine.
+ */
+enum dev_states {
+ DEV_STATE_STOPPED,
+ DEV_STATE_STARTWAIT,
+ DEV_STATE_STOPWAIT,
+ DEV_STATE_RUNNING,
+ /**
+ * MUST be always the last element!!
+ */
+ NR_DEV_STATES
+};
+
+static const char *dev_state_names[] = {
+ "Stopped",
+ "StartWait",
+ "StopWait",
+ "Running",
+};
+
+/**
+ * Events of the interface statemachine.
+ */
+enum dev_events {
+ DEV_EVENT_START,
+ DEV_EVENT_STOP,
+ DEV_EVENT_CONUP,
+ DEV_EVENT_CONDOWN,
+ /**
+ * MUST be always the last element!!
+ */
+ NR_DEV_EVENTS
+};
+
+static const char *dev_event_names[] = {
+ "Start",
+ "Stop",
+ "Connection up",
+ "Connection down",
+};
+
+/**
+ * Events of the connection statemachine
+ */
+enum conn_events {
+ /**
+ * Events, representing callbacks from
+ * lowlevel iucv layer)
+ */
+ CONN_EVENT_CONN_REQ,
+ CONN_EVENT_CONN_ACK,
+ CONN_EVENT_CONN_REJ,
+ CONN_EVENT_CONN_SUS,
+ CONN_EVENT_CONN_RES,
+ CONN_EVENT_RX,
+ CONN_EVENT_TXDONE,
+
+ /**
+ * Events, representing errors return codes from
+ * calls to lowlevel iucv layer
+ */
+
+ /**
+ * Event, representing timer expiry.
+ */
+ CONN_EVENT_TIMER,
+
+ /**
+ * Events, representing commands from upper levels.
+ */
+ CONN_EVENT_START,
+ CONN_EVENT_STOP,
+
+ /**
+ * MUST be always the last element!!
+ */
+ NR_CONN_EVENTS,
+};
+
+static const char *conn_event_names[] = {
+ "Remote connection request",
+ "Remote connection acknowledge",
+ "Remote connection reject",
+ "Connection suspended",
+ "Connection resumed",
+ "Data received",
+ "Data sent",
+
+ "Timer",
+
+ "Start",
+ "Stop",
+};
+
+/**
+ * States of the connection statemachine.
+ */
+enum conn_states {
+ /**
+ * Connection not assigned to any device,
+ * initial state, invalid
+ */
+ CONN_STATE_INVALID,
+
+ /**
+ * Userid assigned but not operating
+ */
+ CONN_STATE_STOPPED,
+
+ /**
+ * Connection registered,
+ * no connection request sent yet,
+ * no connection request received
+ */
+ CONN_STATE_STARTWAIT,
+
+ /**
+ * Connection registered and connection request sent,
+ * no acknowledge and no connection request received yet.
+ */
+ CONN_STATE_SETUPWAIT,
+
+ /**
+ * Connection up and running idle
+ */
+ CONN_STATE_IDLE,
+
+ /**
+ * Data sent, awaiting CONN_EVENT_TXDONE
+ */
+ CONN_STATE_TX,
+
+ /**
+ * Error during registration.
+ */
+ CONN_STATE_REGERR,
+
+ /**
+ * Error during registration.
+ */
+ CONN_STATE_CONNERR,
+
+ /**
+ * MUST be always the last element!!
+ */
+ NR_CONN_STATES,
+};
+
+static const char *conn_state_names[] = {
+ "Invalid",
+ "Stopped",
+ "StartWait",
+ "SetupWait",
+ "Idle",
+ "TX",
+ "Terminating",
+ "Registration error",
+ "Connect error",
+};
+
+
+/**
+ * Debug Facility Stuff
+ */
+static debug_info_t *iucv_dbf_setup = NULL;
+static debug_info_t *iucv_dbf_data = NULL;
+static debug_info_t *iucv_dbf_trace = NULL;
+
+DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
+
+static void iucv_unregister_dbf_views(void)
+{
+ if (iucv_dbf_setup)
+ debug_unregister(iucv_dbf_setup);
+ if (iucv_dbf_data)
+ debug_unregister(iucv_dbf_data);
+ if (iucv_dbf_trace)
+ debug_unregister(iucv_dbf_trace);
+}
+static int iucv_register_dbf_views(void)
+{
+ iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
+ IUCV_DBF_SETUP_PAGES,
+ IUCV_DBF_SETUP_NR_AREAS,
+ IUCV_DBF_SETUP_LEN);
+ iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
+ IUCV_DBF_DATA_PAGES,
+ IUCV_DBF_DATA_NR_AREAS,
+ IUCV_DBF_DATA_LEN);
+ iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
+ IUCV_DBF_TRACE_PAGES,
+ IUCV_DBF_TRACE_NR_AREAS,
+ IUCV_DBF_TRACE_LEN);
+
+ if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
+ (iucv_dbf_trace == NULL)) {
+ iucv_unregister_dbf_views();
+ return -ENOMEM;
+ }
+ debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
+ debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
+
+ debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
+ debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
+
+ debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
+ debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
+
+ return 0;
+}
+
+/*
+ * Callback-wrappers, called from lowlevel iucv layer.
+ */
+
+static void netiucv_callback_rx(struct iucv_path *path,
+ struct iucv_message *msg)
+{
+ struct iucv_connection *conn = path->private;
+ struct iucv_event ev;
+
+ ev.conn = conn;
+ ev.data = msg;
+ fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
+}
+
+static void netiucv_callback_txdone(struct iucv_path *path,
+ struct iucv_message *msg)
+{
+ struct iucv_connection *conn = path->private;
+ struct iucv_event ev;
+
+ ev.conn = conn;
+ ev.data = msg;
+ fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
+}
+
+static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
+{
+ struct iucv_connection *conn = path->private;
+
+ fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn);
+}
+
+static int netiucv_callback_connreq(struct iucv_path *path,
+ u8 ipvmid[8], u8 ipuser[16])
+{
+ struct iucv_connection *conn = path->private;
+ struct iucv_event ev;
+ static char tmp_user[9];
+ static char tmp_udat[17];
+ int rc;
+
+ rc = -EINVAL;
+ memcpy(tmp_user, netiucv_printname(ipvmid, 8), 8);
+ memcpy(tmp_udat, ipuser, 16);
+ EBCASC(tmp_udat, 16);
+ read_lock_bh(&iucv_connection_rwlock);
+ list_for_each_entry(conn, &iucv_connection_list, list) {
+ if (strncmp(ipvmid, conn->userid, 8) ||
+ strncmp(ipuser, conn->userdata, 16))
+ continue;
+ /* Found a matching connection for this path. */
+ conn->path = path;
+ ev.conn = conn;
+ ev.data = path;
+ fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
+ rc = 0;
+ }
+ IUCV_DBF_TEXT_(setup, 2, "Connection requested for %s.%s\n",
+ tmp_user, netiucv_printname(tmp_udat, 16));
+ read_unlock_bh(&iucv_connection_rwlock);
+ return rc;
+}
+
+static void netiucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
+{
+ struct iucv_connection *conn = path->private;
+
+ fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn);
+}
+
+static void netiucv_callback_connsusp(struct iucv_path *path, u8 ipuser[16])
+{
+ struct iucv_connection *conn = path->private;
+
+ fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn);
+}
+
+static void netiucv_callback_connres(struct iucv_path *path, u8 ipuser[16])
+{
+ struct iucv_connection *conn = path->private;
+
+ fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn);
+}
+
+/**
+ * NOP action for statemachines
+ */
+static void netiucv_action_nop(fsm_instance *fi, int event, void *arg)
+{
+}
+
+/*
+ * Actions of the connection statemachine
+ */
+
+/**
+ * netiucv_unpack_skb
+ * @conn: The connection where this skb has been received.
+ * @pskb: The received skb.
+ *
+ * Unpack a just received skb and hand it over to upper layers.
+ * Helper function for conn_action_rx.
+ */
+static void netiucv_unpack_skb(struct iucv_connection *conn,
+ struct sk_buff *pskb)
+{
+ struct net_device *dev = conn->netdev;
+ struct netiucv_priv *privptr = netdev_priv(dev);
+ u16 offset = 0;
+
+ skb_put(pskb, NETIUCV_HDRLEN);
+ pskb->dev = dev;
+ pskb->ip_summed = CHECKSUM_NONE;
+ pskb->protocol = ntohs(ETH_P_IP);
+
+ while (1) {
+ struct sk_buff *skb;
+ struct ll_header *header = (struct ll_header *) pskb->data;
+
+ if (!header->next)
+ break;
+
+ skb_pull(pskb, NETIUCV_HDRLEN);
+ header->next -= offset;
+ offset += header->next;
+ header->next -= NETIUCV_HDRLEN;
+ if (skb_tailroom(pskb) < header->next) {
+ IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
+ header->next, skb_tailroom(pskb));
+ return;
+ }
+ skb_put(pskb, header->next);
+ skb_reset_mac_header(pskb);
+ skb = dev_alloc_skb(pskb->len);
+ if (!skb) {
+ IUCV_DBF_TEXT(data, 2,
+ "Out of memory in netiucv_unpack_skb\n");
+ privptr->stats.rx_dropped++;
+ return;
+ }
+ skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
+ pskb->len);
+ skb_reset_mac_header(skb);
+ skb->dev = pskb->dev;
+ skb->protocol = pskb->protocol;
+ pskb->ip_summed = CHECKSUM_UNNECESSARY;
+ privptr->stats.rx_packets++;
+ privptr->stats.rx_bytes += skb->len;
+ /*
+ * Since receiving is always initiated from a tasklet (in iucv.c),
+ * we must use netif_rx_ni() instead of netif_rx()
+ */
+ netif_rx_ni(skb);
+ skb_pull(pskb, header->next);
+ skb_put(pskb, NETIUCV_HDRLEN);
+ }
+}
+
+static void conn_action_rx(fsm_instance *fi, int event, void *arg)
+{
+ struct iucv_event *ev = arg;
+ struct iucv_connection *conn = ev->conn;
+ struct iucv_message *msg = ev->data;
+ struct netiucv_priv *privptr = netdev_priv(conn->netdev);
+ int rc;
+
+ IUCV_DBF_TEXT(trace, 4, __func__);
+
+ if (!conn->netdev) {
+ iucv_message_reject(conn->path, msg);
+ IUCV_DBF_TEXT(data, 2,
+ "Received data for unlinked connection\n");
+ return;
+ }
+ if (msg->length > conn->max_buffsize) {
+ iucv_message_reject(conn->path, msg);
+ privptr->stats.rx_dropped++;
+ IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
+ msg->length, conn->max_buffsize);
+ return;
+ }
+ conn->rx_buff->data = conn->rx_buff->head;
+ skb_reset_tail_pointer(conn->rx_buff);
+ conn->rx_buff->len = 0;
+ rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data,
+ msg->length, NULL);
+ if (rc || msg->length < 5) {
+ privptr->stats.rx_errors++;
+ IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
+ return;
+ }
+ netiucv_unpack_skb(conn, conn->rx_buff);
+}
+
+static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
+{
+ struct iucv_event *ev = arg;
+ struct iucv_connection *conn = ev->conn;
+ struct iucv_message *msg = ev->data;
+ struct iucv_message txmsg;
+ struct netiucv_priv *privptr = NULL;
+ u32 single_flag = msg->tag;
+ u32 txbytes = 0;
+ u32 txpackets = 0;
+ u32 stat_maxcq = 0;
+ struct sk_buff *skb;
+ unsigned long saveflags;
+ struct ll_header header;
+ int rc;
+
+ IUCV_DBF_TEXT(trace, 4, __func__);
+
+ if (conn && conn->netdev)
+ privptr = netdev_priv(conn->netdev);
+ conn->prof.tx_pending--;
+ if (single_flag) {
+ if ((skb = skb_dequeue(&conn->commit_queue))) {
+ atomic_dec(&skb->users);
+ if (privptr) {
+ privptr->stats.tx_packets++;
+ privptr->stats.tx_bytes +=
+ (skb->len - NETIUCV_HDRLEN
+ - NETIUCV_HDRLEN);
+ }
+ dev_kfree_skb_any(skb);
+ }
+ }
+ conn->tx_buff->data = conn->tx_buff->head;
+ skb_reset_tail_pointer(conn->tx_buff);
+ conn->tx_buff->len = 0;
+ spin_lock_irqsave(&conn->collect_lock, saveflags);
+ while ((skb = skb_dequeue(&conn->collect_queue))) {
+ header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
+ memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
+ NETIUCV_HDRLEN);
+ skb_copy_from_linear_data(skb,
+ skb_put(conn->tx_buff, skb->len),
+ skb->len);
+ txbytes += skb->len;
+ txpackets++;
+ stat_maxcq++;
+ atomic_dec(&skb->users);
+ dev_kfree_skb_any(skb);
+ }
+ if (conn->collect_len > conn->prof.maxmulti)
+ conn->prof.maxmulti = conn->collect_len;
+ conn->collect_len = 0;
+ spin_unlock_irqrestore(&conn->collect_lock, saveflags);
+ if (conn->tx_buff->len == 0) {
+ fsm_newstate(fi, CONN_STATE_IDLE);
+ return;
+ }
+
+ header.next = 0;
+ memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
+ conn->prof.send_stamp = current_kernel_time();
+ txmsg.class = 0;
+ txmsg.tag = 0;
+ rc = iucv_message_send(conn->path, &txmsg, 0, 0,
+ conn->tx_buff->data, conn->tx_buff->len);
+ conn->prof.doios_multi++;
+ conn->prof.txlen += conn->tx_buff->len;
+ conn->prof.tx_pending++;
+ if (conn->prof.tx_pending > conn->prof.tx_max_pending)
+ conn->prof.tx_max_pending = conn->prof.tx_pending;
+ if (rc) {
+ conn->prof.tx_pending--;
+ fsm_newstate(fi, CONN_STATE_IDLE);
+ if (privptr)
+ privptr->stats.tx_errors += txpackets;
+ IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
+ } else {
+ if (privptr) {
+ privptr->stats.tx_packets += txpackets;
+ privptr->stats.tx_bytes += txbytes;
+ }
+ if (stat_maxcq > conn->prof.maxcqueue)
+ conn->prof.maxcqueue = stat_maxcq;
+ }
+}
+
+static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
+{
+ struct iucv_event *ev = arg;
+ struct iucv_connection *conn = ev->conn;
+ struct iucv_path *path = ev->data;
+ struct net_device *netdev = conn->netdev;
+ struct netiucv_priv *privptr = netdev_priv(netdev);
+ int rc;
+
+ IUCV_DBF_TEXT(trace, 3, __func__);
+
+ conn->path = path;
+ path->msglim = NETIUCV_QUEUELEN_DEFAULT;
+ path->flags = 0;
+ rc = iucv_path_accept(path, &netiucv_handler, conn->userdata , conn);
+ if (rc) {
+ IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
+ return;
+ }
+ fsm_newstate(fi, CONN_STATE_IDLE);
+ netdev->tx_queue_len = conn->path->msglim;
+ fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
+}
+
+static void conn_action_connreject(fsm_instance *fi, int event, void *arg)
+{
+ struct iucv_event *ev = arg;
+ struct iucv_path *path = ev->data;
+
+ IUCV_DBF_TEXT(trace, 3, __func__);
+ iucv_path_sever(path, NULL);
+}
+
+static void conn_action_connack(fsm_instance *fi, int event, void *arg)
+{
+ struct iucv_connection *conn = arg;
+ struct net_device *netdev = conn->netdev;
+ struct netiucv_priv *privptr = netdev_priv(netdev);
+
+ IUCV_DBF_TEXT(trace, 3, __func__);
+ fsm_deltimer(&conn->timer);
+ fsm_newstate(fi, CONN_STATE_IDLE);
+ netdev->tx_queue_len = conn->path->msglim;
+ fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
+}
+
+static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
+{
+ struct iucv_connection *conn = arg;
+
+ IUCV_DBF_TEXT(trace, 3, __func__);
+ fsm_deltimer(&conn->timer);
+ iucv_path_sever(conn->path, conn->userdata);
+ fsm_newstate(fi, CONN_STATE_STARTWAIT);
+}
+
+static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
+{
+ struct iucv_connection *conn = arg;
+ struct net_device *netdev = conn->netdev;
+ struct netiucv_priv *privptr = netdev_priv(netdev);
+
+ IUCV_DBF_TEXT(trace, 3, __func__);
+
+ fsm_deltimer(&conn->timer);
+ iucv_path_sever(conn->path, conn->userdata);
+ dev_info(privptr->dev, "The peer z/VM guest %s has closed the "
+ "connection\n", netiucv_printuser(conn));
+ IUCV_DBF_TEXT(data, 2,
+ "conn_action_connsever: Remote dropped connection\n");
+ fsm_newstate(fi, CONN_STATE_STARTWAIT);
+ fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
+}
+
+static void conn_action_start(fsm_instance *fi, int event, void *arg)
+{
+ struct iucv_connection *conn = arg;
+ struct net_device *netdev = conn->netdev;
+ struct netiucv_priv *privptr = netdev_priv(netdev);
+ int rc;
+
+ IUCV_DBF_TEXT(trace, 3, __func__);
+
+ fsm_newstate(fi, CONN_STATE_STARTWAIT);
+
+ /*
+ * We must set the state before calling iucv_connect because the
+ * callback handler could be called at any point after the connection
+ * request is sent
+ */
+
+ fsm_newstate(fi, CONN_STATE_SETUPWAIT);
+ conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
+ IUCV_DBF_TEXT_(setup, 2, "%s: connecting to %s ...\n",
+ netdev->name, netiucv_printuser(conn));
+
+ rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
+ NULL, conn->userdata, conn);
+ switch (rc) {
+ case 0:
+ netdev->tx_queue_len = conn->path->msglim;
+ fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
+ CONN_EVENT_TIMER, conn);
+ return;
+ case 11:
+ dev_warn(privptr->dev,
+ "The IUCV device failed to connect to z/VM guest %s\n",
+ netiucv_printname(conn->userid, 8));
+ fsm_newstate(fi, CONN_STATE_STARTWAIT);
+ break;
+ case 12:
+ dev_warn(privptr->dev,
+ "The IUCV device failed to connect to the peer on z/VM"
+ " guest %s\n", netiucv_printname(conn->userid, 8));
+ fsm_newstate(fi, CONN_STATE_STARTWAIT);
+ break;
+ case 13:
+ dev_err(privptr->dev,
+ "Connecting the IUCV device would exceed the maximum"
+ " number of IUCV connections\n");
+ fsm_newstate(fi, CONN_STATE_CONNERR);
+ break;
+ case 14:
+ dev_err(privptr->dev,
+ "z/VM guest %s has too many IUCV connections"
+ " to connect with the IUCV device\n",
+ netiucv_printname(conn->userid, 8));
+ fsm_newstate(fi, CONN_STATE_CONNERR);
+ break;
+ case 15:
+ dev_err(privptr->dev,
+ "The IUCV device cannot connect to a z/VM guest with no"
+ " IUCV authorization\n");
+ fsm_newstate(fi, CONN_STATE_CONNERR);
+ break;
+ default:
+ dev_err(privptr->dev,
+ "Connecting the IUCV device failed with error %d\n",
+ rc);
+ fsm_newstate(fi, CONN_STATE_CONNERR);
+ break;
+ }
+ IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
+ kfree(conn->path);
+ conn->path = NULL;
+}
+
+static void netiucv_purge_skb_queue(struct sk_buff_head *q)
+{
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(q))) {
+ atomic_dec(&skb->users);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+static void conn_action_stop(fsm_instance *fi, int event, void *arg)
+{
+ struct iucv_event *ev = arg;
+ struct iucv_connection *conn = ev->conn;
+ struct net_device *netdev = conn->netdev;
+ struct netiucv_priv *privptr = netdev_priv(netdev);
+
+ IUCV_DBF_TEXT(trace, 3, __func__);
+
+ fsm_deltimer(&conn->timer);
+ fsm_newstate(fi, CONN_STATE_STOPPED);
+ netiucv_purge_skb_queue(&conn->collect_queue);
+ if (conn->path) {
+ IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
+ iucv_path_sever(conn->path, conn->userdata);
+ kfree(conn->path);
+ conn->path = NULL;
+ }
+ netiucv_purge_skb_queue(&conn->commit_queue);
+ fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
+}
+
+static void conn_action_inval(fsm_instance *fi, int event, void *arg)
+{
+ struct iucv_connection *conn = arg;
+ struct net_device *netdev = conn->netdev;
+
+ IUCV_DBF_TEXT_(data, 2, "%s('%s'): conn_action_inval called\n",
+ netdev->name, conn->userid);
+}
+
+static const fsm_node conn_fsm[] = {
+ { CONN_STATE_INVALID, CONN_EVENT_START, conn_action_inval },
+ { CONN_STATE_STOPPED, CONN_EVENT_START, conn_action_start },
+
+ { CONN_STATE_STOPPED, CONN_EVENT_STOP, conn_action_stop },
+ { CONN_STATE_STARTWAIT, CONN_EVENT_STOP, conn_action_stop },
+ { CONN_STATE_SETUPWAIT, CONN_EVENT_STOP, conn_action_stop },
+ { CONN_STATE_IDLE, CONN_EVENT_STOP, conn_action_stop },
+ { CONN_STATE_TX, CONN_EVENT_STOP, conn_action_stop },
+ { CONN_STATE_REGERR, CONN_EVENT_STOP, conn_action_stop },
+ { CONN_STATE_CONNERR, CONN_EVENT_STOP, conn_action_stop },
+
+ { CONN_STATE_STOPPED, CONN_EVENT_CONN_REQ, conn_action_connreject },
+ { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
+ { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
+ { CONN_STATE_IDLE, CONN_EVENT_CONN_REQ, conn_action_connreject },
+ { CONN_STATE_TX, CONN_EVENT_CONN_REQ, conn_action_connreject },
+
+ { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack },
+ { CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER, conn_action_conntimsev },
+
+ { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever },
+ { CONN_STATE_IDLE, CONN_EVENT_CONN_REJ, conn_action_connsever },
+ { CONN_STATE_TX, CONN_EVENT_CONN_REJ, conn_action_connsever },
+
+ { CONN_STATE_IDLE, CONN_EVENT_RX, conn_action_rx },
+ { CONN_STATE_TX, CONN_EVENT_RX, conn_action_rx },
+
+ { CONN_STATE_TX, CONN_EVENT_TXDONE, conn_action_txdone },
+ { CONN_STATE_IDLE, CONN_EVENT_TXDONE, conn_action_txdone },
+};
+
+static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
+
+
+/*
+ * Actions for interface - statemachine.
+ */
+
+/**
+ * dev_action_start
+ * @fi: An instance of an interface statemachine.
+ * @event: The event, just happened.
+ * @arg: Generic pointer, casted from struct net_device * upon call.
+ *
+ * Startup connection by sending CONN_EVENT_START to it.
+ */
+static void dev_action_start(fsm_instance *fi, int event, void *arg)
+{
+ struct net_device *dev = arg;
+ struct netiucv_priv *privptr = netdev_priv(dev);
+
+ IUCV_DBF_TEXT(trace, 3, __func__);
+
+ fsm_newstate(fi, DEV_STATE_STARTWAIT);
+ fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
+}
+
+/**
+ * Shutdown connection by sending CONN_EVENT_STOP to it.
+ *
+ * @param fi An instance of an interface statemachine.
+ * @param event The event, just happened.
+ * @param arg Generic pointer, casted from struct net_device * upon call.
+ */
+static void
+dev_action_stop(fsm_instance *fi, int event, void *arg)
+{
+ struct net_device *dev = arg;
+ struct netiucv_priv *privptr = netdev_priv(dev);
+ struct iucv_event ev;
+
+ IUCV_DBF_TEXT(trace, 3, __func__);
+
+ ev.conn = privptr->conn;
+
+ fsm_newstate(fi, DEV_STATE_STOPWAIT);
+ fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
+}
+
+/**
+ * Called from connection statemachine
+ * when a connection is up and running.
+ *
+ * @param fi An instance of an interface statemachine.
+ * @param event The event, just happened.
+ * @param arg Generic pointer, casted from struct net_device * upon call.
+ */
+static void
+dev_action_connup(fsm_instance *fi, int event, void *arg)
+{
+ struct net_device *dev = arg;
+ struct netiucv_priv *privptr = netdev_priv(dev);
+
+ IUCV_DBF_TEXT(trace, 3, __func__);
+
+ switch (fsm_getstate(fi)) {
+ case DEV_STATE_STARTWAIT:
+ fsm_newstate(fi, DEV_STATE_RUNNING);
+ dev_info(privptr->dev,
+ "The IUCV device has been connected"
+ " successfully to %s\n",
+ netiucv_printuser(privptr->conn));
+ IUCV_DBF_TEXT(setup, 3,
+ "connection is up and running\n");
+ break;
+ case DEV_STATE_STOPWAIT:
+ IUCV_DBF_TEXT(data, 2,
+ "dev_action_connup: in DEV_STATE_STOPWAIT\n");
+ break;
+ }
+}
+
+/**
+ * Called from connection statemachine
+ * when a connection has been shutdown.
+ *
+ * @param fi An instance of an interface statemachine.
+ * @param event The event, just happened.
+ * @param arg Generic pointer, casted from struct net_device * upon call.
+ */
+static void
+dev_action_conndown(fsm_instance *fi, int event, void *arg)
+{
+ IUCV_DBF_TEXT(trace, 3, __func__);
+
+ switch (fsm_getstate(fi)) {
+ case DEV_STATE_RUNNING:
+ fsm_newstate(fi, DEV_STATE_STARTWAIT);
+ break;
+ case DEV_STATE_STOPWAIT:
+ fsm_newstate(fi, DEV_STATE_STOPPED);
+ IUCV_DBF_TEXT(setup, 3, "connection is down\n");
+ break;
+ }
+}
+
+static const fsm_node dev_fsm[] = {
+ { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start },
+
+ { DEV_STATE_STOPWAIT, DEV_EVENT_START, dev_action_start },
+ { DEV_STATE_STOPWAIT, DEV_EVENT_CONDOWN, dev_action_conndown },
+
+ { DEV_STATE_STARTWAIT, DEV_EVENT_STOP, dev_action_stop },
+ { DEV_STATE_STARTWAIT, DEV_EVENT_CONUP, dev_action_connup },
+
+ { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
+ { DEV_STATE_RUNNING, DEV_EVENT_CONDOWN, dev_action_conndown },
+ { DEV_STATE_RUNNING, DEV_EVENT_CONUP, netiucv_action_nop },
+};
+
+static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
+
+/**
+ * Transmit a packet.
+ * This is a helper function for netiucv_tx().
+ *
+ * @param conn Connection to be used for sending.
+ * @param skb Pointer to struct sk_buff of packet to send.
+ * The linklevel header has already been set up
+ * by netiucv_tx().
+ *
+ * @return 0 on success, -ERRNO on failure. (Never fails.)
+ */
+static int netiucv_transmit_skb(struct iucv_connection *conn,
+ struct sk_buff *skb)
+{
+ struct iucv_message msg;
+ unsigned long saveflags;
+ struct ll_header header;
+ int rc;
+
+ if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
+ int l = skb->len + NETIUCV_HDRLEN;
+
+ spin_lock_irqsave(&conn->collect_lock, saveflags);
+ if (conn->collect_len + l >
+ (conn->max_buffsize - NETIUCV_HDRLEN)) {
+ rc = -EBUSY;
+ IUCV_DBF_TEXT(data, 2,
+ "EBUSY from netiucv_transmit_skb\n");
+ } else {
+ atomic_inc(&skb->users);
+ skb_queue_tail(&conn->collect_queue, skb);
+ conn->collect_len += l;
+ rc = 0;
+ }
+ spin_unlock_irqrestore(&conn->collect_lock, saveflags);
+ } else {
+ struct sk_buff *nskb = skb;
+ /**
+ * Copy the skb to a new allocated skb in lowmem only if the
+ * data is located above 2G in memory or tailroom is < 2.
+ */
+ unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) +
+ NETIUCV_HDRLEN)) >> 31;
+ int copied = 0;
+ if (hi || (skb_tailroom(skb) < 2)) {
+ nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
+ NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
+ if (!nskb) {
+ IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
+ rc = -ENOMEM;
+ return rc;
+ } else {
+ skb_reserve(nskb, NETIUCV_HDRLEN);
+ memcpy(skb_put(nskb, skb->len),
+ skb->data, skb->len);
+ }
+ copied = 1;
+ }
+ /**
+ * skb now is below 2G and has enough room. Add headers.
+ */
+ header.next = nskb->len + NETIUCV_HDRLEN;
+ memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
+ header.next = 0;
+ memcpy(skb_put(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
+
+ fsm_newstate(conn->fsm, CONN_STATE_TX);
+ conn->prof.send_stamp = current_kernel_time();
+
+ msg.tag = 1;
+ msg.class = 0;
+ rc = iucv_message_send(conn->path, &msg, 0, 0,
+ nskb->data, nskb->len);
+ conn->prof.doios_single++;
+ conn->prof.txlen += skb->len;
+ conn->prof.tx_pending++;
+ if (conn->prof.tx_pending > conn->prof.tx_max_pending)
+ conn->prof.tx_max_pending = conn->prof.tx_pending;
+ if (rc) {
+ struct netiucv_priv *privptr;
+ fsm_newstate(conn->fsm, CONN_STATE_IDLE);
+ conn->prof.tx_pending--;
+ privptr = netdev_priv(conn->netdev);
+ if (privptr)
+ privptr->stats.tx_errors++;
+ if (copied)
+ dev_kfree_skb(nskb);
+ else {
+ /**
+ * Remove our headers. They get added
+ * again on retransmit.
+ */
+ skb_pull(skb, NETIUCV_HDRLEN);
+ skb_trim(skb, skb->len - NETIUCV_HDRLEN);
+ }
+ IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
+ } else {
+ if (copied)
+ dev_kfree_skb(skb);
+ atomic_inc(&nskb->users);
+ skb_queue_tail(&conn->commit_queue, nskb);
+ }
+ }
+
+ return rc;
+}
+
+/*
+ * Interface API for upper network layers
+ */
+
+/**
+ * Open an interface.
+ * Called from generic network layer when ifconfig up is run.
+ *
+ * @param dev Pointer to interface struct.
+ *
+ * @return 0 on success, -ERRNO on failure. (Never fails.)
+ */
+static int netiucv_open(struct net_device *dev)
+{
+ struct netiucv_priv *priv = netdev_priv(dev);
+
+ fsm_event(priv->fsm, DEV_EVENT_START, dev);
+ return 0;
+}
+
+/**
+ * Close an interface.
+ * Called from generic network layer when ifconfig down is run.
+ *
+ * @param dev Pointer to interface struct.
+ *
+ * @return 0 on success, -ERRNO on failure. (Never fails.)
+ */
+static int netiucv_close(struct net_device *dev)
+{
+ struct netiucv_priv *priv = netdev_priv(dev);
+
+ fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
+ return 0;
+}
+
+static int netiucv_pm_prepare(struct device *dev)
+{
+ IUCV_DBF_TEXT(trace, 3, __func__);
+ return 0;
+}
+
+static void netiucv_pm_complete(struct device *dev)
+{
+ IUCV_DBF_TEXT(trace, 3, __func__);
+ return;
+}
+
+/**
+ * netiucv_pm_freeze() - Freeze PM callback
+ * @dev: netiucv device
+ *
+ * close open netiucv interfaces
+ */
+static int netiucv_pm_freeze(struct device *dev)
+{
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
+ struct net_device *ndev = NULL;
+ int rc = 0;
+
+ IUCV_DBF_TEXT(trace, 3, __func__);
+ if (priv && priv->conn)
+ ndev = priv->conn->netdev;
+ if (!ndev)
+ goto out;
+ netif_device_detach(ndev);
+ priv->pm_state = fsm_getstate(priv->fsm);
+ rc = netiucv_close(ndev);
+out:
+ return rc;
+}
+
+/**
+ * netiucv_pm_restore_thaw() - Thaw and restore PM callback
+ * @dev: netiucv device
+ *
+ * re-open netiucv interfaces closed during freeze
+ */
+static int netiucv_pm_restore_thaw(struct device *dev)
+{
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
+ struct net_device *ndev = NULL;
+ int rc = 0;
+
+ IUCV_DBF_TEXT(trace, 3, __func__);
+ if (priv && priv->conn)
+ ndev = priv->conn->netdev;
+ if (!ndev)
+ goto out;
+ switch (priv->pm_state) {
+ case DEV_STATE_RUNNING:
+ case DEV_STATE_STARTWAIT:
+ rc = netiucv_open(ndev);
+ break;
+ default:
+ break;
+ }
+ netif_device_attach(ndev);
+out:
+ return rc;
+}
+
+/**
+ * Start transmission of a packet.
+ * Called from generic network device layer.
+ *
+ * @param skb Pointer to buffer containing the packet.
+ * @param dev Pointer to interface struct.
+ *
+ * @return 0 if packet consumed, !0 if packet rejected.
+ * Note: If we return !0, then the packet is free'd by
+ * the generic network layer.
+ */
+static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct netiucv_priv *privptr = netdev_priv(dev);
+ int rc;
+
+ IUCV_DBF_TEXT(trace, 4, __func__);
+ /**
+ * Some sanity checks ...
+ */
+ if (skb == NULL) {
+ IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
+ privptr->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+ if (skb_headroom(skb) < NETIUCV_HDRLEN) {
+ IUCV_DBF_TEXT(data, 2,
+ "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
+ dev_kfree_skb(skb);
+ privptr->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ /**
+ * If connection is not running, try to restart it
+ * and throw away packet.
+ */
+ if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
+ dev_kfree_skb(skb);
+ privptr->stats.tx_dropped++;
+ privptr->stats.tx_errors++;
+ privptr->stats.tx_carrier_errors++;
+ return NETDEV_TX_OK;
+ }
+
+ if (netiucv_test_and_set_busy(dev)) {
+ IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
+ return NETDEV_TX_BUSY;
+ }
+ dev->trans_start = jiffies;
+ rc = netiucv_transmit_skb(privptr->conn, skb);
+ netiucv_clear_busy(dev);
+ return rc ? NETDEV_TX_BUSY : NETDEV_TX_OK;
+}
+
+/**
+ * netiucv_stats
+ * @dev: Pointer to interface struct.
+ *
+ * Returns interface statistics of a device.
+ *
+ * Returns pointer to stats struct of this interface.
+ */
+static struct net_device_stats *netiucv_stats (struct net_device * dev)
+{
+ struct netiucv_priv *priv = netdev_priv(dev);
+
+ IUCV_DBF_TEXT(trace, 5, __func__);
+ return &priv->stats;
+}
+
+/**
+ * netiucv_change_mtu
+ * @dev: Pointer to interface struct.
+ * @new_mtu: The new MTU to use for this interface.
+ *
+ * Sets MTU of an interface.
+ *
+ * Returns 0 on success, -EINVAL if MTU is out of valid range.
+ * (valid range is 576 .. NETIUCV_MTU_MAX).
+ */
+static int netiucv_change_mtu(struct net_device * dev, int new_mtu)
+{
+ IUCV_DBF_TEXT(trace, 3, __func__);
+ if (new_mtu < 576 || new_mtu > NETIUCV_MTU_MAX) {
+ IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n");
+ return -EINVAL;
+ }
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+/*
+ * attributes in sysfs
+ */
+
+static ssize_t user_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+ IUCV_DBF_TEXT(trace, 5, __func__);
+ return sprintf(buf, "%s\n", netiucv_printuser(priv->conn));
+}
+
+static int netiucv_check_user(const char *buf, size_t count, char *username,
+ char *userdata)
+{
+ const char *p;
+ int i;
+
+ p = strchr(buf, '.');
+ if ((p && ((count > 26) ||
+ ((p - buf) > 8) ||
+ (buf + count - p > 18))) ||
+ (!p && (count > 9))) {
+ IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
+ return -EINVAL;
+ }
+
+ for (i = 0, p = buf; i < 8 && *p && *p != '.'; i++, p++) {
+ if (isalnum(*p) || *p == '$') {
+ username[i] = toupper(*p);
+ continue;
+ }
+ if (*p == '\n')
+ /* trailing lf, grr */
+ break;
+ IUCV_DBF_TEXT_(setup, 2,
+ "conn_write: invalid character %02x\n", *p);
+ return -EINVAL;
+ }
+ while (i < 8)
+ username[i++] = ' ';
+ username[8] = '\0';
+
+ if (*p == '.') {
+ p++;
+ for (i = 0; i < 16 && *p; i++, p++) {
+ if (*p == '\n')
+ break;
+ userdata[i] = toupper(*p);
+ }
+ while (i > 0 && i < 16)
+ userdata[i++] = ' ';
+ } else
+ memcpy(userdata, iucvMagic_ascii, 16);
+ userdata[16] = '\0';
+ ASCEBC(userdata, 16);
+
+ return 0;
+}
+
+static ssize_t user_write(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
+ struct net_device *ndev = priv->conn->netdev;
+ char username[9];
+ char userdata[17];
+ int rc;
+ struct iucv_connection *cp;
+
+ IUCV_DBF_TEXT(trace, 3, __func__);
+ rc = netiucv_check_user(buf, count, username, userdata);
+ if (rc)
+ return rc;
+
+ if (memcmp(username, priv->conn->userid, 9) &&
+ (ndev->flags & (IFF_UP | IFF_RUNNING))) {
+ /* username changed while the interface is active. */
+ IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
+ return -EPERM;
+ }
+ read_lock_bh(&iucv_connection_rwlock);
+ list_for_each_entry(cp, &iucv_connection_list, list) {
+ if (!strncmp(username, cp->userid, 9) &&
+ !strncmp(userdata, cp->userdata, 17) && cp->netdev != ndev) {
+ read_unlock_bh(&iucv_connection_rwlock);
+ IUCV_DBF_TEXT_(setup, 2, "user_write: Connection to %s "
+ "already exists\n", netiucv_printuser(cp));
+ return -EEXIST;
+ }
+ }
+ read_unlock_bh(&iucv_connection_rwlock);
+ memcpy(priv->conn->userid, username, 9);
+ memcpy(priv->conn->userdata, userdata, 17);
+ return count;
+}
+
+static DEVICE_ATTR(user, 0644, user_show, user_write);
+
+static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+ IUCV_DBF_TEXT(trace, 5, __func__);
+ return sprintf(buf, "%d\n", priv->conn->max_buffsize);
+}
+
+static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
+ struct net_device *ndev = priv->conn->netdev;
+ char *e;
+ int bs1;
+
+ IUCV_DBF_TEXT(trace, 3, __func__);
+ if (count >= 39)
+ return -EINVAL;
+
+ bs1 = simple_strtoul(buf, &e, 0);
+
+ if (e && (!isspace(*e))) {
+ IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %02x\n",
+ *e);
+ return -EINVAL;
+ }
+ if (bs1 > NETIUCV_BUFSIZE_MAX) {
+ IUCV_DBF_TEXT_(setup, 2,
+ "buffer_write: buffer size %d too large\n",
+ bs1);
+ return -EINVAL;
+ }
+ if ((ndev->flags & IFF_RUNNING) &&
+ (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
+ IUCV_DBF_TEXT_(setup, 2,
+ "buffer_write: buffer size %d too small\n",
+ bs1);
+ return -EINVAL;
+ }
+ if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
+ IUCV_DBF_TEXT_(setup, 2,
+ "buffer_write: buffer size %d too small\n",
+ bs1);
+ return -EINVAL;
+ }
+
+ priv->conn->max_buffsize = bs1;
+ if (!(ndev->flags & IFF_RUNNING))
+ ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN;
+
+ return count;
+
+}
+
+static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
+
+static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+ IUCV_DBF_TEXT(trace, 5, __func__);
+ return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
+}
+
+static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
+
+static ssize_t conn_fsm_show (struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+ IUCV_DBF_TEXT(trace, 5, __func__);
+ return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
+}
+
+static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
+
+static ssize_t maxmulti_show (struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+ IUCV_DBF_TEXT(trace, 5, __func__);
+ return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
+}
+
+static ssize_t maxmulti_write (struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+ IUCV_DBF_TEXT(trace, 4, __func__);
+ priv->conn->prof.maxmulti = 0;
+ return count;
+}
+
+static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
+
+static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+ IUCV_DBF_TEXT(trace, 5, __func__);
+ return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
+}
+
+static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+ IUCV_DBF_TEXT(trace, 4, __func__);
+ priv->conn->prof.maxcqueue = 0;
+ return count;
+}
+
+static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
+
+static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+ IUCV_DBF_TEXT(trace, 5, __func__);
+ return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
+}
+
+static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+ IUCV_DBF_TEXT(trace, 4, __func__);
+ priv->conn->prof.doios_single = 0;
+ return count;
+}
+
+static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
+
+static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+ IUCV_DBF_TEXT(trace, 5, __func__);
+ return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
+}
+
+static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+ IUCV_DBF_TEXT(trace, 5, __func__);
+ priv->conn->prof.doios_multi = 0;
+ return count;
+}
+
+static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
+
+static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+ IUCV_DBF_TEXT(trace, 5, __func__);
+ return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
+}
+
+static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+ IUCV_DBF_TEXT(trace, 4, __func__);
+ priv->conn->prof.txlen = 0;
+ return count;
+}
+
+static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
+
+static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+ IUCV_DBF_TEXT(trace, 5, __func__);
+ return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
+}
+
+static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+ IUCV_DBF_TEXT(trace, 4, __func__);
+ priv->conn->prof.tx_time = 0;
+ return count;
+}
+
+static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
+
+static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+ IUCV_DBF_TEXT(trace, 5, __func__);
+ return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
+}
+
+static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+ IUCV_DBF_TEXT(trace, 4, __func__);
+ priv->conn->prof.tx_pending = 0;
+ return count;
+}
+
+static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
+
+static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+ IUCV_DBF_TEXT(trace, 5, __func__);
+ return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
+}
+
+static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
+
+ IUCV_DBF_TEXT(trace, 4, __func__);
+ priv->conn->prof.tx_max_pending = 0;
+ return count;
+}
+
+static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write);
+
+static struct attribute *netiucv_attrs[] = {
+ &dev_attr_buffer.attr,
+ &dev_attr_user.attr,
+ NULL,
+};
+
+static struct attribute_group netiucv_attr_group = {
+ .attrs = netiucv_attrs,
+};
+
+static struct attribute *netiucv_stat_attrs[] = {
+ &dev_attr_device_fsm_state.attr,
+ &dev_attr_connection_fsm_state.attr,
+ &dev_attr_max_tx_buffer_used.attr,
+ &dev_attr_max_chained_skbs.attr,
+ &dev_attr_tx_single_write_ops.attr,
+ &dev_attr_tx_multi_write_ops.attr,
+ &dev_attr_netto_bytes.attr,
+ &dev_attr_max_tx_io_time.attr,
+ &dev_attr_tx_pending.attr,
+ &dev_attr_tx_max_pending.attr,
+ NULL,
+};
+
+static struct attribute_group netiucv_stat_attr_group = {
+ .name = "stats",
+ .attrs = netiucv_stat_attrs,
+};
+
+static int netiucv_add_files(struct device *dev)
+{
+ int ret;
+
+ IUCV_DBF_TEXT(trace, 3, __func__);
+ ret = sysfs_create_group(&dev->kobj, &netiucv_attr_group);
+ if (ret)
+ return ret;
+ ret = sysfs_create_group(&dev->kobj, &netiucv_stat_attr_group);
+ if (ret)
+ sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
+ return ret;
+}
+
+static void netiucv_remove_files(struct device *dev)
+{
+ IUCV_DBF_TEXT(trace, 3, __func__);
+ sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group);
+ sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
+}
+
+static int netiucv_register_device(struct net_device *ndev)
+{
+ struct netiucv_priv *priv = netdev_priv(ndev);
+ struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL);
+ int ret;
+
+ IUCV_DBF_TEXT(trace, 3, __func__);
+
+ if (dev) {
+ dev_set_name(dev, "net%s", ndev->name);
+ dev->bus = &iucv_bus;
+ dev->parent = iucv_root;
+ /*
+ * The release function could be called after the
+ * module has been unloaded. It's _only_ task is to
+ * free the struct. Therefore, we specify kfree()
+ * directly here. (Probably a little bit obfuscating
+ * but legitime ...).
+ */
+ dev->release = (void (*)(struct device *))kfree;
+ dev->driver = &netiucv_driver;
+ } else
+ return -ENOMEM;
+
+ ret = device_register(dev);
+ if (ret) {
+ put_device(dev);
+ return ret;
+ }
+ ret = netiucv_add_files(dev);
+ if (ret)
+ goto out_unreg;
+ priv->dev = dev;
+ dev_set_drvdata(dev, priv);
+ return 0;
+
+out_unreg:
+ device_unregister(dev);
+ return ret;
+}
+
+static void netiucv_unregister_device(struct device *dev)
+{
+ IUCV_DBF_TEXT(trace, 3, __func__);
+ netiucv_remove_files(dev);
+ device_unregister(dev);
+}
+
+/**
+ * Allocate and initialize a new connection structure.
+ * Add it to the list of netiucv connections;
+ */
+static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
+ char *username,
+ char *userdata)
+{
+ struct iucv_connection *conn;
+
+ conn = kzalloc(sizeof(*conn), GFP_KERNEL);
+ if (!conn)
+ goto out;
+ skb_queue_head_init(&conn->collect_queue);
+ skb_queue_head_init(&conn->commit_queue);
+ spin_lock_init(&conn->collect_lock);
+ conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
+ conn->netdev = dev;
+
+ conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
+ if (!conn->rx_buff)
+ goto out_conn;
+ conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
+ if (!conn->tx_buff)
+ goto out_rx;
+ conn->fsm = init_fsm("netiucvconn", conn_state_names,
+ conn_event_names, NR_CONN_STATES,
+ NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
+ GFP_KERNEL);
+ if (!conn->fsm)
+ goto out_tx;
+
+ fsm_settimer(conn->fsm, &conn->timer);
+ fsm_newstate(conn->fsm, CONN_STATE_INVALID);
+
+ if (userdata)
+ memcpy(conn->userdata, userdata, 17);
+ if (username) {
+ memcpy(conn->userid, username, 9);
+ fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
+ }
+
+ write_lock_bh(&iucv_connection_rwlock);
+ list_add_tail(&conn->list, &iucv_connection_list);
+ write_unlock_bh(&iucv_connection_rwlock);
+ return conn;
+
+out_tx:
+ kfree_skb(conn->tx_buff);
+out_rx:
+ kfree_skb(conn->rx_buff);
+out_conn:
+ kfree(conn);
+out:
+ return NULL;
+}
+
+/**
+ * Release a connection structure and remove it from the
+ * list of netiucv connections.
+ */
+static void netiucv_remove_connection(struct iucv_connection *conn)
+{
+
+ IUCV_DBF_TEXT(trace, 3, __func__);
+ write_lock_bh(&iucv_connection_rwlock);
+ list_del_init(&conn->list);
+ write_unlock_bh(&iucv_connection_rwlock);
+ fsm_deltimer(&conn->timer);
+ netiucv_purge_skb_queue(&conn->collect_queue);
+ if (conn->path) {
+ iucv_path_sever(conn->path, conn->userdata);
+ kfree(conn->path);
+ conn->path = NULL;
+ }
+ netiucv_purge_skb_queue(&conn->commit_queue);
+ kfree_fsm(conn->fsm);
+ kfree_skb(conn->rx_buff);
+ kfree_skb(conn->tx_buff);
+}
+
+/**
+ * Release everything of a net device.
+ */
+static void netiucv_free_netdevice(struct net_device *dev)
+{
+ struct netiucv_priv *privptr = netdev_priv(dev);
+
+ IUCV_DBF_TEXT(trace, 3, __func__);
+
+ if (!dev)
+ return;
+
+ if (privptr) {
+ if (privptr->conn)
+ netiucv_remove_connection(privptr->conn);
+ if (privptr->fsm)
+ kfree_fsm(privptr->fsm);
+ privptr->conn = NULL; privptr->fsm = NULL;
+ /* privptr gets freed by free_netdev() */
+ }
+ free_netdev(dev);
+}
+
+/**
+ * Initialize a net device. (Called from kernel in alloc_netdev())
+ */
+static const struct net_device_ops netiucv_netdev_ops = {
+ .ndo_open = netiucv_open,
+ .ndo_stop = netiucv_close,
+ .ndo_get_stats = netiucv_stats,
+ .ndo_start_xmit = netiucv_tx,
+ .ndo_change_mtu = netiucv_change_mtu,
+};
+
+static void netiucv_setup_netdevice(struct net_device *dev)
+{
+ dev->mtu = NETIUCV_MTU_DEFAULT;
+ dev->destructor = netiucv_free_netdevice;
+ dev->hard_header_len = NETIUCV_HDRLEN;
+ dev->addr_len = 0;
+ dev->type = ARPHRD_SLIP;
+ dev->tx_queue_len = NETIUCV_QUEUELEN_DEFAULT;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ dev->netdev_ops = &netiucv_netdev_ops;
+}
+
+/**
+ * Allocate and initialize everything of a net device.
+ */
+static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
+{
+ struct netiucv_priv *privptr;
+ struct net_device *dev;
+
+ dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d",
+ netiucv_setup_netdevice);
+ if (!dev)
+ return NULL;
+ if (dev_alloc_name(dev, dev->name) < 0)
+ goto out_netdev;
+
+ privptr = netdev_priv(dev);
+ privptr->fsm = init_fsm("netiucvdev", dev_state_names,
+ dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
+ dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
+ if (!privptr->fsm)
+ goto out_netdev;
+
+ privptr->conn = netiucv_new_connection(dev, username, userdata);
+ if (!privptr->conn) {
+ IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
+ goto out_fsm;
+ }
+ fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
+ return dev;
+
+out_fsm:
+ kfree_fsm(privptr->fsm);
+out_netdev:
+ free_netdev(dev);
+ return NULL;
+}
+
+static ssize_t conn_write(struct device_driver *drv,
+ const char *buf, size_t count)
+{
+ char username[9];
+ char userdata[17];
+ int rc;
+ struct net_device *dev;
+ struct netiucv_priv *priv;
+ struct iucv_connection *cp;
+
+ IUCV_DBF_TEXT(trace, 3, __func__);
+ rc = netiucv_check_user(buf, count, username, userdata);
+ if (rc)
+ return rc;
+
+ read_lock_bh(&iucv_connection_rwlock);
+ list_for_each_entry(cp, &iucv_connection_list, list) {
+ if (!strncmp(username, cp->userid, 9) &&
+ !strncmp(userdata, cp->userdata, 17)) {
+ read_unlock_bh(&iucv_connection_rwlock);
+ IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection to %s "
+ "already exists\n", netiucv_printuser(cp));
+ return -EEXIST;
+ }
+ }
+ read_unlock_bh(&iucv_connection_rwlock);
+
+ dev = netiucv_init_netdevice(username, userdata);
+ if (!dev) {
+ IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
+ return -ENODEV;
+ }
+
+ rc = netiucv_register_device(dev);
+ if (rc) {
+ IUCV_DBF_TEXT_(setup, 2,
+ "ret %d from netiucv_register_device\n", rc);
+ goto out_free_ndev;
+ }
+
+ /* sysfs magic */
+ priv = netdev_priv(dev);
+ SET_NETDEV_DEV(dev, priv->dev);
+
+ rc = register_netdev(dev);
+ if (rc)
+ goto out_unreg;
+
+ dev_info(priv->dev, "The IUCV interface to %s has been established "
+ "successfully\n",
+ netiucv_printuser(priv->conn));
+
+ return count;
+
+out_unreg:
+ netiucv_unregister_device(priv->dev);
+out_free_ndev:
+ netiucv_free_netdevice(dev);
+ return rc;
+}
+
+static DRIVER_ATTR(connection, 0200, NULL, conn_write);
+
+static ssize_t remove_write (struct device_driver *drv,
+ const char *buf, size_t count)
+{
+ struct iucv_connection *cp;
+ struct net_device *ndev;
+ struct netiucv_priv *priv;
+ struct device *dev;
+ char name[IFNAMSIZ];
+ const char *p;
+ int i;
+
+ IUCV_DBF_TEXT(trace, 3, __func__);
+
+ if (count >= IFNAMSIZ)
+ count = IFNAMSIZ - 1;
+
+ for (i = 0, p = buf; i < count && *p; i++, p++) {
+ if (*p == '\n' || *p == ' ')
+ /* trailing lf, grr */
+ break;
+ name[i] = *p;
+ }
+ name[i] = '\0';
+
+ read_lock_bh(&iucv_connection_rwlock);
+ list_for_each_entry(cp, &iucv_connection_list, list) {
+ ndev = cp->netdev;
+ priv = netdev_priv(ndev);
+ dev = priv->dev;
+ if (strncmp(name, ndev->name, count))
+ continue;
+ read_unlock_bh(&iucv_connection_rwlock);
+ if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
+ dev_warn(dev, "The IUCV device is connected"
+ " to %s and cannot be removed\n",
+ priv->conn->userid);
+ IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
+ return -EPERM;
+ }
+ unregister_netdev(ndev);
+ netiucv_unregister_device(dev);
+ return count;
+ }
+ read_unlock_bh(&iucv_connection_rwlock);
+ IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
+ return -EINVAL;
+}
+
+static DRIVER_ATTR(remove, 0200, NULL, remove_write);
+
+static struct attribute * netiucv_drv_attrs[] = {
+ &driver_attr_connection.attr,
+ &driver_attr_remove.attr,
+ NULL,
+};
+
+static struct attribute_group netiucv_drv_attr_group = {
+ .attrs = netiucv_drv_attrs,
+};
+
+static const struct attribute_group *netiucv_drv_attr_groups[] = {
+ &netiucv_drv_attr_group,
+ NULL,
+};
+
+static void netiucv_banner(void)
+{
+ pr_info("driver initialized\n");
+}
+
+static void __exit netiucv_exit(void)
+{
+ struct iucv_connection *cp;
+ struct net_device *ndev;
+ struct netiucv_priv *priv;
+ struct device *dev;
+
+ IUCV_DBF_TEXT(trace, 3, __func__);
+ while (!list_empty(&iucv_connection_list)) {
+ cp = list_entry(iucv_connection_list.next,
+ struct iucv_connection, list);
+ ndev = cp->netdev;
+ priv = netdev_priv(ndev);
+ dev = priv->dev;
+
+ unregister_netdev(ndev);
+ netiucv_unregister_device(dev);
+ }
+
+ device_unregister(netiucv_dev);
+ driver_unregister(&netiucv_driver);
+ iucv_unregister(&netiucv_handler, 1);
+ iucv_unregister_dbf_views();
+
+ pr_info("driver unloaded\n");
+ return;
+}
+
+static int __init netiucv_init(void)
+{
+ int rc;
+
+ rc = iucv_register_dbf_views();
+ if (rc)
+ goto out;
+ rc = iucv_register(&netiucv_handler, 1);
+ if (rc)
+ goto out_dbf;
+ IUCV_DBF_TEXT(trace, 3, __func__);
+ netiucv_driver.groups = netiucv_drv_attr_groups;
+ rc = driver_register(&netiucv_driver);
+ if (rc) {
+ IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
+ goto out_iucv;
+ }
+ /* establish dummy device */
+ netiucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
+ if (!netiucv_dev) {
+ rc = -ENOMEM;
+ goto out_driver;
+ }
+ dev_set_name(netiucv_dev, "netiucv");
+ netiucv_dev->bus = &iucv_bus;
+ netiucv_dev->parent = iucv_root;
+ netiucv_dev->release = (void (*)(struct device *))kfree;
+ netiucv_dev->driver = &netiucv_driver;
+ rc = device_register(netiucv_dev);
+ if (rc) {
+ put_device(netiucv_dev);
+ goto out_driver;
+ }
+ netiucv_banner();
+ return rc;
+
+out_driver:
+ driver_unregister(&netiucv_driver);
+out_iucv:
+ iucv_unregister(&netiucv_handler, 1);
+out_dbf:
+ iucv_unregister_dbf_views();
+out:
+ return rc;
+}
+
+module_init(netiucv_init);
+module_exit(netiucv_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
new file mode 100644
index 00000000000..4abc79d3963
--- /dev/null
+++ b/drivers/s390/net/qeth_core.h
@@ -0,0 +1,941 @@
+/*
+ * drivers/s390/net/qeth_core.h
+ *
+ * Copyright IBM Corp. 2007
+ * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
+ * Frank Pavlic <fpavlic@de.ibm.com>,
+ * Thomas Spatzier <tspat@de.ibm.com>,
+ * Frank Blaschka <frank.blaschka@de.ibm.com>
+ */
+
+#ifndef __QETH_CORE_H__
+#define __QETH_CORE_H__
+
+#include <linux/if.h>
+#include <linux/if_arp.h>
+#include <linux/if_tr.h>
+#include <linux/trdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/ctype.h>
+#include <linux/in6.h>
+#include <linux/bitops.h>
+#include <linux/seq_file.h>
+#include <linux/ethtool.h>
+
+#include <net/ipv6.h>
+#include <net/if_inet6.h>
+#include <net/addrconf.h>
+
+#include <asm/debug.h>
+#include <asm/qdio.h>
+#include <asm/ccwdev.h>
+#include <asm/ccwgroup.h>
+#include <asm/sysinfo.h>
+
+#include "qeth_core_mpc.h"
+
+/**
+ * Debug Facility stuff
+ */
+enum qeth_dbf_names {
+ QETH_DBF_SETUP,
+ QETH_DBF_MSG,
+ QETH_DBF_CTRL,
+ QETH_DBF_INFOS /* must be last element */
+};
+
+struct qeth_dbf_info {
+ char name[DEBUG_MAX_NAME_LEN];
+ int pages;
+ int areas;
+ int len;
+ int level;
+ struct debug_view *view;
+ debug_info_t *id;
+};
+
+#define QETH_DBF_CTRL_LEN 256
+
+#define QETH_DBF_TEXT(name, level, text) \
+ debug_text_event(qeth_dbf[QETH_DBF_##name].id, level, text)
+
+#define QETH_DBF_HEX(name, level, addr, len) \
+ debug_event(qeth_dbf[QETH_DBF_##name].id, level, (void *)(addr), len)
+
+#define QETH_DBF_MESSAGE(level, text...) \
+ debug_sprintf_event(qeth_dbf[QETH_DBF_MSG].id, level, text)
+
+#define QETH_DBF_TEXT_(name, level, text...) \
+ qeth_dbf_longtext(qeth_dbf[QETH_DBF_##name].id, level, text)
+
+#define QETH_CARD_TEXT(card, level, text) \
+ debug_text_event(card->debug, level, text)
+
+#define QETH_CARD_HEX(card, level, addr, len) \
+ debug_event(card->debug, level, (void *)(addr), len)
+
+#define QETH_CARD_MESSAGE(card, text...) \
+ debug_sprintf_event(card->debug, level, text)
+
+#define QETH_CARD_TEXT_(card, level, text...) \
+ qeth_dbf_longtext(card->debug, level, text)
+
+#define SENSE_COMMAND_REJECT_BYTE 0
+#define SENSE_COMMAND_REJECT_FLAG 0x80
+#define SENSE_RESETTING_EVENT_BYTE 1
+#define SENSE_RESETTING_EVENT_FLAG 0x80
+
+/*
+ * Common IO related definitions
+ */
+#define CARD_RDEV(card) card->read.ccwdev
+#define CARD_WDEV(card) card->write.ccwdev
+#define CARD_DDEV(card) card->data.ccwdev
+#define CARD_BUS_ID(card) dev_name(&card->gdev->dev)
+#define CARD_RDEV_ID(card) dev_name(&card->read.ccwdev->dev)
+#define CARD_WDEV_ID(card) dev_name(&card->write.ccwdev->dev)
+#define CARD_DDEV_ID(card) dev_name(&card->data.ccwdev->dev)
+#define CHANNEL_ID(channel) dev_name(&channel->ccwdev->dev)
+
+/**
+ * card stuff
+ */
+struct qeth_perf_stats {
+ unsigned int bufs_rec;
+ unsigned int bufs_sent;
+
+ unsigned int skbs_sent_pack;
+ unsigned int bufs_sent_pack;
+
+ unsigned int sc_dp_p;
+ unsigned int sc_p_dp;
+ /* qdio_cq_handler: number of times called, time spent in */
+ __u64 cq_start_time;
+ unsigned int cq_cnt;
+ unsigned int cq_time;
+ /* qdio_input_handler: number of times called, time spent in */
+ __u64 inbound_start_time;
+ unsigned int inbound_cnt;
+ unsigned int inbound_time;
+ /* qeth_send_packet: number of times called, time spent in */
+ __u64 outbound_start_time;
+ unsigned int outbound_cnt;
+ unsigned int outbound_time;
+ /* qdio_output_handler: number of times called, time spent in */
+ __u64 outbound_handler_start_time;
+ unsigned int outbound_handler_cnt;
+ unsigned int outbound_handler_time;
+ /* number of calls to and time spent in do_QDIO for inbound queue */
+ __u64 inbound_do_qdio_start_time;
+ unsigned int inbound_do_qdio_cnt;
+ unsigned int inbound_do_qdio_time;
+ /* number of calls to and time spent in do_QDIO for outbound queues */
+ __u64 outbound_do_qdio_start_time;
+ unsigned int outbound_do_qdio_cnt;
+ unsigned int outbound_do_qdio_time;
+ unsigned int large_send_bytes;
+ unsigned int large_send_cnt;
+ unsigned int sg_skbs_sent;
+ unsigned int sg_frags_sent;
+ /* initial values when measuring starts */
+ unsigned long initial_rx_packets;
+ unsigned long initial_tx_packets;
+ /* inbound scatter gather data */
+ unsigned int sg_skbs_rx;
+ unsigned int sg_frags_rx;
+ unsigned int sg_alloc_page_rx;
+ unsigned int tx_csum;
+ unsigned int tx_lin;
+};
+
+/* Routing stuff */
+struct qeth_routing_info {
+ enum qeth_routing_types type;
+};
+
+/* IPA stuff */
+struct qeth_ipa_info {
+ __u32 supported_funcs;
+ __u32 enabled_funcs;
+};
+
+static inline int qeth_is_ipa_supported(struct qeth_ipa_info *ipa,
+ enum qeth_ipa_funcs func)
+{
+ return (ipa->supported_funcs & func);
+}
+
+static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
+ enum qeth_ipa_funcs func)
+{
+ return (ipa->supported_funcs & ipa->enabled_funcs & func);
+}
+
+#define qeth_adp_supported(c, f) \
+ qeth_is_ipa_supported(&c->options.adp, f)
+#define qeth_adp_enabled(c, f) \
+ qeth_is_ipa_enabled(&c->options.adp, f)
+#define qeth_is_supported(c, f) \
+ qeth_is_ipa_supported(&c->options.ipa4, f)
+#define qeth_is_enabled(c, f) \
+ qeth_is_ipa_enabled(&c->options.ipa4, f)
+#define qeth_is_supported6(c, f) \
+ qeth_is_ipa_supported(&c->options.ipa6, f)
+#define qeth_is_enabled6(c, f) \
+ qeth_is_ipa_enabled(&c->options.ipa6, f)
+#define qeth_is_ipafunc_supported(c, prot, f) \
+ ((prot == QETH_PROT_IPV6) ? \
+ qeth_is_supported6(c, f) : qeth_is_supported(c, f))
+#define qeth_is_ipafunc_enabled(c, prot, f) \
+ ((prot == QETH_PROT_IPV6) ? \
+ qeth_is_enabled6(c, f) : qeth_is_enabled(c, f))
+
+#define QETH_IDX_FUNC_LEVEL_OSD 0x0101
+#define QETH_IDX_FUNC_LEVEL_IQD 0x4108
+
+#define QETH_MODELLIST_ARRAY \
+ {{0x1731, 0x01, 0x1732, QETH_CARD_TYPE_OSD, QETH_MAX_QUEUES, 0}, \
+ {0x1731, 0x05, 0x1732, QETH_CARD_TYPE_IQD, QETH_MAX_QUEUES, 0x103}, \
+ {0x1731, 0x06, 0x1732, QETH_CARD_TYPE_OSN, QETH_MAX_QUEUES, 0}, \
+ {0x1731, 0x02, 0x1732, QETH_CARD_TYPE_OSM, QETH_MAX_QUEUES, 0}, \
+ {0x1731, 0x02, 0x1732, QETH_CARD_TYPE_OSX, QETH_MAX_QUEUES, 0}, \
+ {0, 0, 0, 0, 0, 0} }
+#define QETH_CU_TYPE_IND 0
+#define QETH_CU_MODEL_IND 1
+#define QETH_DEV_TYPE_IND 2
+#define QETH_DEV_MODEL_IND 3
+#define QETH_QUEUE_NO_IND 4
+#define QETH_MULTICAST_IND 5
+
+#define QETH_REAL_CARD 1
+#define QETH_VLAN_CARD 2
+#define QETH_BUFSIZE 4096
+
+/**
+ * some more defs
+ */
+#define QETH_TX_TIMEOUT 100 * HZ
+#define QETH_RCD_TIMEOUT 60 * HZ
+#define QETH_RECLAIM_WORK_TIME HZ
+#define QETH_HEADER_SIZE 32
+#define QETH_MAX_PORTNO 15
+
+/*IPv6 address autoconfiguration stuff*/
+#define UNIQUE_ID_IF_CREATE_ADDR_FAILED 0xfffe
+#define UNIQUE_ID_NOT_BY_CARD 0x10000
+
+/*****************************************************************************/
+/* QDIO queue and buffer handling */
+/*****************************************************************************/
+#define QETH_MAX_QUEUES 4
+#define QETH_IN_BUF_SIZE_DEFAULT 65536
+#define QETH_IN_BUF_COUNT_DEFAULT 64
+#define QETH_IN_BUF_COUNT_HSDEFAULT 128
+#define QETH_IN_BUF_COUNT_MIN 8
+#define QETH_IN_BUF_COUNT_MAX 128
+#define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12)
+#define QETH_IN_BUF_REQUEUE_THRESHOLD(card) \
+ ((card)->qdio.in_buf_pool.buf_count / 2)
+
+/* buffers we have to be behind before we get a PCI */
+#define QETH_PCI_THRESHOLD_A(card) ((card)->qdio.in_buf_pool.buf_count+1)
+/*enqueued free buffers left before we get a PCI*/
+#define QETH_PCI_THRESHOLD_B(card) 0
+/*not used unless the microcode gets patched*/
+#define QETH_PCI_TIMER_VALUE(card) 3
+
+/* priority queing */
+#define QETH_PRIOQ_DEFAULT QETH_NO_PRIO_QUEUEING
+#define QETH_DEFAULT_QUEUE 2
+#define QETH_NO_PRIO_QUEUEING 0
+#define QETH_PRIO_Q_ING_PREC 1
+#define QETH_PRIO_Q_ING_TOS 2
+#define IP_TOS_LOWDELAY 0x10
+#define IP_TOS_HIGHTHROUGHPUT 0x08
+#define IP_TOS_HIGHRELIABILITY 0x04
+#define IP_TOS_NOTIMPORTANT 0x02
+
+/* Packing */
+#define QETH_LOW_WATERMARK_PACK 2
+#define QETH_HIGH_WATERMARK_PACK 5
+#define QETH_WATERMARK_PACK_FUZZ 1
+
+#define QETH_IP_HEADER_SIZE 40
+
+/* large receive scatter gather copy break */
+#define QETH_RX_SG_CB (PAGE_SIZE >> 1)
+#define QETH_RX_PULL_LEN 256
+
+struct qeth_hdr_layer3 {
+ __u8 id;
+ __u8 flags;
+ __u16 inbound_checksum; /*TSO:__u16 seqno */
+ __u32 token; /*TSO: __u32 reserved */
+ __u16 length;
+ __u8 vlan_prio;
+ __u8 ext_flags;
+ __u16 vlan_id;
+ __u16 frame_offset;
+ __u8 dest_addr[16];
+} __attribute__ ((packed));
+
+struct qeth_hdr_layer2 {
+ __u8 id;
+ __u8 flags[3];
+ __u8 port_no;
+ __u8 hdr_length;
+ __u16 pkt_length;
+ __u16 seq_no;
+ __u16 vlan_id;
+ __u32 reserved;
+ __u8 reserved2[16];
+} __attribute__ ((packed));
+
+struct qeth_hdr_osn {
+ __u8 id;
+ __u8 reserved;
+ __u16 seq_no;
+ __u16 reserved2;
+ __u16 control_flags;
+ __u16 pdu_length;
+ __u8 reserved3[18];
+ __u32 ccid;
+} __attribute__ ((packed));
+
+struct qeth_hdr {
+ union {
+ struct qeth_hdr_layer2 l2;
+ struct qeth_hdr_layer3 l3;
+ struct qeth_hdr_osn osn;
+ } hdr;
+} __attribute__ ((packed));
+
+/*TCP Segmentation Offload header*/
+struct qeth_hdr_ext_tso {
+ __u16 hdr_tot_len;
+ __u8 imb_hdr_no;
+ __u8 reserved;
+ __u8 hdr_type;
+ __u8 hdr_version;
+ __u16 hdr_len;
+ __u32 payload_len;
+ __u16 mss;
+ __u16 dg_hdr_len;
+ __u8 padding[16];
+} __attribute__ ((packed));
+
+struct qeth_hdr_tso {
+ struct qeth_hdr hdr; /*hdr->hdr.l3.xxx*/
+ struct qeth_hdr_ext_tso ext;
+} __attribute__ ((packed));
+
+
+/* flags for qeth_hdr.flags */
+#define QETH_HDR_PASSTHRU 0x10
+#define QETH_HDR_IPV6 0x80
+#define QETH_HDR_CAST_MASK 0x07
+enum qeth_cast_flags {
+ QETH_CAST_UNICAST = 0x06,
+ QETH_CAST_MULTICAST = 0x04,
+ QETH_CAST_BROADCAST = 0x05,
+ QETH_CAST_ANYCAST = 0x07,
+ QETH_CAST_NOCAST = 0x00,
+};
+
+enum qeth_layer2_frame_flags {
+ QETH_LAYER2_FLAG_MULTICAST = 0x01,
+ QETH_LAYER2_FLAG_BROADCAST = 0x02,
+ QETH_LAYER2_FLAG_UNICAST = 0x04,
+ QETH_LAYER2_FLAG_VLAN = 0x10,
+};
+
+enum qeth_header_ids {
+ QETH_HEADER_TYPE_LAYER3 = 0x01,
+ QETH_HEADER_TYPE_LAYER2 = 0x02,
+ QETH_HEADER_TYPE_TSO = 0x03,
+ QETH_HEADER_TYPE_OSN = 0x04,
+};
+/* flags for qeth_hdr.ext_flags */
+#define QETH_HDR_EXT_VLAN_FRAME 0x01
+#define QETH_HDR_EXT_TOKEN_ID 0x02
+#define QETH_HDR_EXT_INCLUDE_VLAN_TAG 0x04
+#define QETH_HDR_EXT_SRC_MAC_ADDR 0x08
+#define QETH_HDR_EXT_CSUM_HDR_REQ 0x10
+#define QETH_HDR_EXT_CSUM_TRANSP_REQ 0x20
+#define QETH_HDR_EXT_UDP 0x40 /*bit off for TCP*/
+
+static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
+{
+ return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
+}
+
+enum qeth_qdio_buffer_states {
+ /*
+ * inbound: read out by driver; owned by hardware in order to be filled
+ * outbound: owned by driver in order to be filled
+ */
+ QETH_QDIO_BUF_EMPTY,
+ /*
+ * inbound: filled by hardware; owned by driver in order to be read out
+ * outbound: filled by driver; owned by hardware in order to be sent
+ */
+ QETH_QDIO_BUF_PRIMED,
+ /*
+ * inbound: not applicable
+ * outbound: identified to be pending in TPQ
+ */
+ QETH_QDIO_BUF_PENDING,
+ /*
+ * inbound: not applicable
+ * outbound: found in completion queue
+ */
+ QETH_QDIO_BUF_IN_CQ,
+ /*
+ * inbound: not applicable
+ * outbound: handled via transfer pending / completion queue
+ */
+ QETH_QDIO_BUF_HANDLED_DELAYED,
+};
+
+enum qeth_qdio_info_states {
+ QETH_QDIO_UNINITIALIZED,
+ QETH_QDIO_ALLOCATED,
+ QETH_QDIO_ESTABLISHED,
+ QETH_QDIO_CLEANING
+};
+
+struct qeth_buffer_pool_entry {
+ struct list_head list;
+ struct list_head init_list;
+ void *elements[QDIO_MAX_ELEMENTS_PER_BUFFER];
+};
+
+struct qeth_qdio_buffer_pool {
+ struct list_head entry_list;
+ int buf_count;
+};
+
+struct qeth_qdio_buffer {
+ struct qdio_buffer *buffer;
+ /* the buffer pool entry currently associated to this buffer */
+ struct qeth_buffer_pool_entry *pool_entry;
+ struct sk_buff *rx_skb;
+};
+
+struct qeth_qdio_q {
+ struct qdio_buffer qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
+ struct qeth_qdio_buffer bufs[QDIO_MAX_BUFFERS_PER_Q];
+ int next_buf_to_init;
+} __attribute__ ((aligned(256)));
+
+struct qeth_qdio_out_buffer {
+ struct qdio_buffer *buffer;
+ atomic_t state;
+ int next_element_to_fill;
+ struct sk_buff_head skb_list;
+ int is_header[16];
+
+ struct qaob *aob;
+ struct qeth_qdio_out_q *q;
+ struct qeth_qdio_out_buffer *next_pending;
+};
+
+struct qeth_card;
+
+enum qeth_out_q_states {
+ QETH_OUT_Q_UNLOCKED,
+ QETH_OUT_Q_LOCKED,
+ QETH_OUT_Q_LOCKED_FLUSH,
+};
+
+struct qeth_qdio_out_q {
+ struct qdio_buffer qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
+ struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q];
+ struct qdio_outbuf_state *bufstates; /* convenience pointer */
+ int queue_no;
+ struct qeth_card *card;
+ atomic_t state;
+ int do_pack;
+ /*
+ * index of buffer to be filled by driver; state EMPTY or PACKING
+ */
+ int next_buf_to_fill;
+ /*
+ * number of buffers that are currently filled (PRIMED)
+ * -> these buffers are hardware-owned
+ */
+ atomic_t used_buffers;
+ /* indicates whether PCI flag must be set (or if one is outstanding) */
+ atomic_t set_pci_flags_count;
+} __attribute__ ((aligned(256)));
+
+struct qeth_qdio_info {
+ atomic_t state;
+ /* input */
+ int no_in_queues;
+ struct qeth_qdio_q *in_q;
+ struct qeth_qdio_q *c_q;
+ struct qeth_qdio_buffer_pool in_buf_pool;
+ struct qeth_qdio_buffer_pool init_pool;
+ int in_buf_size;
+
+ /* output */
+ int no_out_queues;
+ struct qeth_qdio_out_q **out_qs;
+ struct qdio_outbuf_state *out_bufstates;
+
+ /* priority queueing */
+ int do_prio_queueing;
+ int default_out_queue;
+};
+
+enum qeth_send_errors {
+ QETH_SEND_ERROR_NONE,
+ QETH_SEND_ERROR_LINK_FAILURE,
+ QETH_SEND_ERROR_RETRY,
+ QETH_SEND_ERROR_KICK_IT,
+};
+
+#define QETH_ETH_MAC_V4 0x0100 /* like v4 */
+#define QETH_ETH_MAC_V6 0x3333 /* like v6 */
+/* tr mc mac is longer, but that will be enough to detect mc frames */
+#define QETH_TR_MAC_NC 0xc000 /* non-canonical */
+#define QETH_TR_MAC_C 0x0300 /* canonical */
+
+#define DEFAULT_ADD_HHLEN 0
+#define MAX_ADD_HHLEN 1024
+
+/**
+ * buffer stuff for read channel
+ */
+#define QETH_CMD_BUFFER_NO 8
+
+/**
+ * channel state machine
+ */
+enum qeth_channel_states {
+ CH_STATE_UP,
+ CH_STATE_DOWN,
+ CH_STATE_ACTIVATING,
+ CH_STATE_HALTED,
+ CH_STATE_STOPPED,
+ CH_STATE_RCD,
+ CH_STATE_RCD_DONE,
+};
+/**
+ * card state machine
+ */
+enum qeth_card_states {
+ CARD_STATE_DOWN,
+ CARD_STATE_HARDSETUP,
+ CARD_STATE_SOFTSETUP,
+ CARD_STATE_UP,
+ CARD_STATE_RECOVER,
+};
+
+/**
+ * Protocol versions
+ */
+enum qeth_prot_versions {
+ QETH_PROT_IPV4 = 0x0004,
+ QETH_PROT_IPV6 = 0x0006,
+};
+
+enum qeth_ip_types {
+ QETH_IP_TYPE_NORMAL,
+ QETH_IP_TYPE_VIPA,
+ QETH_IP_TYPE_RXIP,
+ QETH_IP_TYPE_DEL_ALL_MC,
+};
+
+enum qeth_cmd_buffer_state {
+ BUF_STATE_FREE,
+ BUF_STATE_LOCKED,
+ BUF_STATE_PROCESSED,
+};
+
+enum qeth_cq {
+ QETH_CQ_DISABLED = 0,
+ QETH_CQ_ENABLED = 1,
+ QETH_CQ_NOTAVAILABLE = 2,
+};
+
+struct qeth_ipato {
+ int enabled;
+ int invert4;
+ int invert6;
+ struct list_head entries;
+};
+
+struct qeth_channel;
+
+struct qeth_cmd_buffer {
+ enum qeth_cmd_buffer_state state;
+ struct qeth_channel *channel;
+ unsigned char *data;
+ int rc;
+ void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *);
+};
+
+/**
+ * definition of a qeth channel, used for read and write
+ */
+struct qeth_channel {
+ enum qeth_channel_states state;
+ struct ccw1 ccw;
+ spinlock_t iob_lock;
+ wait_queue_head_t wait_q;
+ struct tasklet_struct irq_tasklet;
+ struct ccw_device *ccwdev;
+/*command buffer for control data*/
+ struct qeth_cmd_buffer iob[QETH_CMD_BUFFER_NO];
+ atomic_t irq_pending;
+ int io_buf_no;
+ int buf_no;
+};
+
+/**
+ * OSA card related definitions
+ */
+struct qeth_token {
+ __u32 issuer_rm_w;
+ __u32 issuer_rm_r;
+ __u32 cm_filter_w;
+ __u32 cm_filter_r;
+ __u32 cm_connection_w;
+ __u32 cm_connection_r;
+ __u32 ulp_filter_w;
+ __u32 ulp_filter_r;
+ __u32 ulp_connection_w;
+ __u32 ulp_connection_r;
+};
+
+struct qeth_seqno {
+ __u32 trans_hdr;
+ __u32 pdu_hdr;
+ __u32 pdu_hdr_ack;
+ __u16 ipa;
+ __u32 pkt_seqno;
+};
+
+struct qeth_reply {
+ struct list_head list;
+ wait_queue_head_t wait_q;
+ int (*callback)(struct qeth_card *, struct qeth_reply *,
+ unsigned long);
+ u32 seqno;
+ unsigned long offset;
+ atomic_t received;
+ int rc;
+ void *param;
+ struct qeth_card *card;
+ atomic_t refcnt;
+};
+
+
+struct qeth_card_blkt {
+ int time_total;
+ int inter_packet;
+ int inter_packet_jumbo;
+};
+
+#define QETH_BROADCAST_WITH_ECHO 0x01
+#define QETH_BROADCAST_WITHOUT_ECHO 0x02
+#define QETH_LAYER2_MAC_READ 0x01
+#define QETH_LAYER2_MAC_REGISTERED 0x02
+struct qeth_card_info {
+ unsigned short unit_addr2;
+ unsigned short cula;
+ unsigned short chpid;
+ __u16 func_level;
+ char mcl_level[QETH_MCL_LENGTH + 1];
+ int guestlan;
+ int mac_bits;
+ int portname_required;
+ int portno;
+ char portname[9];
+ enum qeth_card_types type;
+ enum qeth_link_types link_type;
+ int is_multicast_different;
+ int initial_mtu;
+ int max_mtu;
+ int broadcast_capable;
+ int unique_id;
+ struct qeth_card_blkt blkt;
+ __u32 csum_mask;
+ __u32 tx_csum_mask;
+ enum qeth_ipa_promisc_modes promisc_mode;
+ __u32 diagass_support;
+ __u32 hwtrap;
+};
+
+struct qeth_card_options {
+ struct qeth_routing_info route4;
+ struct qeth_ipa_info ipa4;
+ struct qeth_ipa_info adp; /*Adapter parameters*/
+ struct qeth_routing_info route6;
+ struct qeth_ipa_info ipa6;
+ int broadcast_mode;
+ int macaddr_mode;
+ int fake_broadcast;
+ int add_hhlen;
+ int layer2;
+ int performance_stats;
+ int rx_sg_cb;
+ enum qeth_ipa_isolation_modes isolation;
+ int sniffer;
+ enum qeth_cq cq;
+ char hsuid[9];
+};
+
+/*
+ * thread bits for qeth_card thread masks
+ */
+enum qeth_threads {
+ QETH_RECOVER_THREAD = 1,
+};
+
+struct qeth_osn_info {
+ int (*assist_cb)(struct net_device *dev, void *data);
+ int (*data_cb)(struct sk_buff *skb);
+};
+
+enum qeth_discipline_id {
+ QETH_DISCIPLINE_LAYER3 = 0,
+ QETH_DISCIPLINE_LAYER2 = 1,
+};
+
+struct qeth_discipline {
+ void (*start_poll)(struct ccw_device *, int, unsigned long);
+ qdio_handler_t *input_handler;
+ qdio_handler_t *output_handler;
+ int (*recover)(void *ptr);
+ struct ccwgroup_driver *ccwgdriver;
+};
+
+struct qeth_vlan_vid {
+ struct list_head list;
+ unsigned short vid;
+};
+
+struct qeth_mc_mac {
+ struct list_head list;
+ __u8 mc_addr[MAX_ADDR_LEN];
+ unsigned char mc_addrlen;
+ int is_vmac;
+};
+
+struct qeth_rx {
+ int b_count;
+ int b_index;
+ struct qdio_buffer_element *b_element;
+ int e_offset;
+ int qdio_err;
+};
+
+#define QETH_NAPI_WEIGHT 128
+
+struct qeth_card {
+ struct list_head list;
+ enum qeth_card_states state;
+ int lan_online;
+ spinlock_t lock;
+ struct ccwgroup_device *gdev;
+ struct qeth_channel read;
+ struct qeth_channel write;
+ struct qeth_channel data;
+
+ struct net_device *dev;
+ struct net_device_stats stats;
+
+ struct qeth_card_info info;
+ struct qeth_token token;
+ struct qeth_seqno seqno;
+ struct qeth_card_options options;
+
+ wait_queue_head_t wait_q;
+ spinlock_t vlanlock;
+ spinlock_t mclock;
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ struct list_head vid_list;
+ struct list_head mc_list;
+ struct work_struct kernel_thread_starter;
+ spinlock_t thread_mask_lock;
+ unsigned long thread_start_mask;
+ unsigned long thread_allowed_mask;
+ unsigned long thread_running_mask;
+ spinlock_t ip_lock;
+ struct list_head ip_list;
+ struct list_head *ip_tbd_list;
+ struct qeth_ipato ipato;
+ struct list_head cmd_waiter_list;
+ /* QDIO buffer handling */
+ struct qeth_qdio_info qdio;
+ struct qeth_perf_stats perf_stats;
+ int read_or_write_problem;
+ struct qeth_osn_info osn_info;
+ struct qeth_discipline discipline;
+ atomic_t force_alloc_skb;
+ struct service_level qeth_service_level;
+ struct qdio_ssqd_desc ssqd;
+ debug_info_t *debug;
+ struct mutex conf_mutex;
+ struct mutex discipline_mutex;
+ struct napi_struct napi;
+ struct qeth_rx rx;
+ struct delayed_work buffer_reclaim_work;
+ int reclaim_index;
+};
+
+struct qeth_card_list_struct {
+ struct list_head list;
+ rwlock_t rwlock;
+};
+
+struct qeth_trap_id {
+ __u16 lparnr;
+ char vmname[8];
+ __u8 chpid;
+ __u8 ssid;
+ __u16 devno;
+} __packed;
+
+/*some helper functions*/
+#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
+
+static inline struct qeth_card *CARD_FROM_CDEV(struct ccw_device *cdev)
+{
+ struct qeth_card *card = dev_get_drvdata(&((struct ccwgroup_device *)
+ dev_get_drvdata(&cdev->dev))->dev);
+ return card;
+}
+
+static inline int qeth_get_micros(void)
+{
+ return (int) (get_clock() >> 12);
+}
+
+static inline int qeth_get_ip_version(struct sk_buff *skb)
+{
+ struct ethhdr *ehdr = (struct ethhdr *)skb->data;
+ switch (ehdr->h_proto) {
+ case ETH_P_IPV6:
+ return 6;
+ case ETH_P_IP:
+ return 4;
+ default:
+ return 0;
+ }
+}
+
+static inline void qeth_put_buffer_pool_entry(struct qeth_card *card,
+ struct qeth_buffer_pool_entry *entry)
+{
+ list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list);
+}
+
+static inline int qeth_is_diagass_supported(struct qeth_card *card,
+ enum qeth_diags_cmds cmd)
+{
+ return card->info.diagass_support & (__u32)cmd;
+}
+
+extern struct ccwgroup_driver qeth_l2_ccwgroup_driver;
+extern struct ccwgroup_driver qeth_l3_ccwgroup_driver;
+const char *qeth_get_cardname_short(struct qeth_card *);
+int qeth_realloc_buffer_pool(struct qeth_card *, int);
+int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id);
+void qeth_core_free_discipline(struct qeth_card *);
+int qeth_core_create_device_attributes(struct device *);
+void qeth_core_remove_device_attributes(struct device *);
+int qeth_core_create_osn_attributes(struct device *);
+void qeth_core_remove_osn_attributes(struct device *);
+void qeth_buffer_reclaim_work(struct work_struct *);
+
+/* exports for qeth discipline device drivers */
+extern struct qeth_card_list_struct qeth_core_card_list;
+extern struct kmem_cache *qeth_core_header_cache;
+extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS];
+
+void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int);
+int qeth_threads_running(struct qeth_card *, unsigned long);
+int qeth_wait_for_threads(struct qeth_card *, unsigned long);
+int qeth_do_run_thread(struct qeth_card *, unsigned long);
+void qeth_clear_thread_start_bit(struct qeth_card *, unsigned long);
+void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long);
+int qeth_core_hardsetup_card(struct qeth_card *);
+void qeth_print_status_message(struct qeth_card *);
+int qeth_init_qdio_queues(struct qeth_card *);
+int qeth_send_startlan(struct qeth_card *);
+int qeth_send_stoplan(struct qeth_card *);
+int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
+ int (*reply_cb)
+ (struct qeth_card *, struct qeth_reply *, unsigned long),
+ void *);
+struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *,
+ enum qeth_ipa_cmds, enum qeth_prot_versions);
+int qeth_query_setadapterparms(struct qeth_card *);
+int qeth_check_qdio_errors(struct qeth_card *, struct qdio_buffer *,
+ unsigned int, const char *);
+void qeth_queue_input_buffer(struct qeth_card *, int);
+struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
+ struct qeth_qdio_buffer *, struct qdio_buffer_element **, int *,
+ struct qeth_hdr **);
+void qeth_schedule_recovery(struct qeth_card *);
+void qeth_qdio_start_poll(struct ccw_device *, int, unsigned long);
+void qeth_qdio_input_handler(struct ccw_device *,
+ unsigned int, unsigned int, int,
+ int, unsigned long);
+void qeth_qdio_output_handler(struct ccw_device *, unsigned int,
+ int, int, int, unsigned long);
+void qeth_clear_ipacmd_list(struct qeth_card *);
+int qeth_qdio_clear_card(struct qeth_card *, int);
+void qeth_clear_working_pool_list(struct qeth_card *);
+void qeth_clear_cmd_buffers(struct qeth_channel *);
+void qeth_clear_qdio_buffers(struct qeth_card *);
+void qeth_setadp_promisc_mode(struct qeth_card *);
+struct net_device_stats *qeth_get_stats(struct net_device *);
+int qeth_change_mtu(struct net_device *, int);
+int qeth_setadpparms_change_macaddr(struct qeth_card *);
+void qeth_tx_timeout(struct net_device *);
+void qeth_prepare_control_data(struct qeth_card *, int,
+ struct qeth_cmd_buffer *);
+void qeth_release_buffer(struct qeth_channel *, struct qeth_cmd_buffer *);
+void qeth_prepare_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, char);
+struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *);
+int qeth_mdio_read(struct net_device *, int, int);
+int qeth_snmp_command(struct qeth_card *, char __user *);
+struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *, __u32, __u32);
+int qeth_default_setadapterparms_cb(struct qeth_card *, struct qeth_reply *,
+ unsigned long);
+int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *,
+ int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long),
+ void *reply_param);
+int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
+int qeth_get_elements_no(struct qeth_card *, void *, struct sk_buff *, int);
+int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *,
+ struct sk_buff *, struct qeth_hdr *, int, int, int);
+int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *,
+ struct sk_buff *, struct qeth_hdr *, int);
+int qeth_core_get_sset_count(struct net_device *, int);
+void qeth_core_get_ethtool_stats(struct net_device *,
+ struct ethtool_stats *, u64 *);
+void qeth_core_get_strings(struct net_device *, u32, u8 *);
+void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
+void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...);
+int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *);
+int qeth_set_access_ctrl_online(struct qeth_card *card);
+int qeth_hdr_chk_and_bounce(struct sk_buff *, int);
+int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
+int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
+int qeth_query_ipassists(struct qeth_card *, enum qeth_prot_versions prot);
+
+/* exports for OSN */
+int qeth_osn_assist(struct net_device *, void *, int);
+int qeth_osn_register(unsigned char *read_dev_no, struct net_device **,
+ int (*assist_cb)(struct net_device *, void *),
+ int (*data_cb)(struct sk_buff *));
+void qeth_osn_deregister(struct net_device *);
+
+#endif /* __QETH_CORE_H__ */
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
new file mode 100644
index 00000000000..9c3f38da4c0
--- /dev/null
+++ b/drivers/s390/net/qeth_core_main.c
@@ -0,0 +1,5513 @@
+/*
+ * drivers/s390/net/qeth_core_main.c
+ *
+ * Copyright IBM Corp. 2007, 2009
+ * Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
+ * Frank Pavlic <fpavlic@de.ibm.com>,
+ * Thomas Spatzier <tspat@de.ibm.com>,
+ * Frank Blaschka <frank.blaschka@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "qeth"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/mii.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <net/iucv/af_iucv.h>
+
+#include <asm/ebcdic.h>
+#include <asm/io.h>
+#include <asm/sysinfo.h>
+
+#include "qeth_core.h"
+
+struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
+ /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
+ /* N P A M L V H */
+ [QETH_DBF_SETUP] = {"qeth_setup",
+ 8, 1, 8, 5, &debug_hex_ascii_view, NULL},
+ [QETH_DBF_MSG] = {"qeth_msg",
+ 8, 1, 128, 3, &debug_sprintf_view, NULL},
+ [QETH_DBF_CTRL] = {"qeth_control",
+ 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
+};
+EXPORT_SYMBOL_GPL(qeth_dbf);
+
+struct qeth_card_list_struct qeth_core_card_list;
+EXPORT_SYMBOL_GPL(qeth_core_card_list);
+struct kmem_cache *qeth_core_header_cache;
+EXPORT_SYMBOL_GPL(qeth_core_header_cache);
+static struct kmem_cache *qeth_qdio_outbuf_cache;
+
+static struct device *qeth_core_root_dev;
+static unsigned int known_devices[][6] = QETH_MODELLIST_ARRAY;
+static struct lock_class_key qdio_out_skb_queue_key;
+
+static void qeth_send_control_data_cb(struct qeth_channel *,
+ struct qeth_cmd_buffer *);
+static int qeth_issue_next_read(struct qeth_card *);
+static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *);
+static void qeth_setup_ccw(struct qeth_channel *, unsigned char *, __u32);
+static void qeth_free_buffer_pool(struct qeth_card *);
+static int qeth_qdio_establish(struct qeth_card *);
+static void qeth_free_qdio_buffers(struct qeth_card *);
+static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
+ struct qeth_qdio_out_buffer *buf,
+ enum iucv_tx_notify notification);
+static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
+static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
+ struct qeth_qdio_out_buffer *buf,
+ enum qeth_qdio_buffer_states newbufstate);
+static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
+
+static inline const char *qeth_get_cardname(struct qeth_card *card)
+{
+ if (card->info.guestlan) {
+ switch (card->info.type) {
+ case QETH_CARD_TYPE_OSD:
+ return " Guest LAN QDIO";
+ case QETH_CARD_TYPE_IQD:
+ return " Guest LAN Hiper";
+ case QETH_CARD_TYPE_OSM:
+ return " Guest LAN QDIO - OSM";
+ case QETH_CARD_TYPE_OSX:
+ return " Guest LAN QDIO - OSX";
+ default:
+ return " unknown";
+ }
+ } else {
+ switch (card->info.type) {
+ case QETH_CARD_TYPE_OSD:
+ return " OSD Express";
+ case QETH_CARD_TYPE_IQD:
+ return " HiperSockets";
+ case QETH_CARD_TYPE_OSN:
+ return " OSN QDIO";
+ case QETH_CARD_TYPE_OSM:
+ return " OSM QDIO";
+ case QETH_CARD_TYPE_OSX:
+ return " OSX QDIO";
+ default:
+ return " unknown";
+ }
+ }
+ return " n/a";
+}
+
+/* max length to be returned: 14 */
+const char *qeth_get_cardname_short(struct qeth_card *card)
+{
+ if (card->info.guestlan) {
+ switch (card->info.type) {
+ case QETH_CARD_TYPE_OSD:
+ return "GuestLAN QDIO";
+ case QETH_CARD_TYPE_IQD:
+ return "GuestLAN Hiper";
+ case QETH_CARD_TYPE_OSM:
+ return "GuestLAN OSM";
+ case QETH_CARD_TYPE_OSX:
+ return "GuestLAN OSX";
+ default:
+ return "unknown";
+ }
+ } else {
+ switch (card->info.type) {
+ case QETH_CARD_TYPE_OSD:
+ switch (card->info.link_type) {
+ case QETH_LINK_TYPE_FAST_ETH:
+ return "OSD_100";
+ case QETH_LINK_TYPE_HSTR:
+ return "HSTR";
+ case QETH_LINK_TYPE_GBIT_ETH:
+ return "OSD_1000";
+ case QETH_LINK_TYPE_10GBIT_ETH:
+ return "OSD_10GIG";
+ case QETH_LINK_TYPE_LANE_ETH100:
+ return "OSD_FE_LANE";
+ case QETH_LINK_TYPE_LANE_TR:
+ return "OSD_TR_LANE";
+ case QETH_LINK_TYPE_LANE_ETH1000:
+ return "OSD_GbE_LANE";
+ case QETH_LINK_TYPE_LANE:
+ return "OSD_ATM_LANE";
+ default:
+ return "OSD_Express";
+ }
+ case QETH_CARD_TYPE_IQD:
+ return "HiperSockets";
+ case QETH_CARD_TYPE_OSN:
+ return "OSN";
+ case QETH_CARD_TYPE_OSM:
+ return "OSM_1000";
+ case QETH_CARD_TYPE_OSX:
+ return "OSX_10GIG";
+ default:
+ return "unknown";
+ }
+ }
+ return "n/a";
+}
+
+void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
+ int clear_start_mask)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->thread_mask_lock, flags);
+ card->thread_allowed_mask = threads;
+ if (clear_start_mask)
+ card->thread_start_mask &= threads;
+ spin_unlock_irqrestore(&card->thread_mask_lock, flags);
+ wake_up(&card->wait_q);
+}
+EXPORT_SYMBOL_GPL(qeth_set_allowed_threads);
+
+int qeth_threads_running(struct qeth_card *card, unsigned long threads)
+{
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(&card->thread_mask_lock, flags);
+ rc = (card->thread_running_mask & threads);
+ spin_unlock_irqrestore(&card->thread_mask_lock, flags);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_threads_running);
+
+int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads)
+{
+ return wait_event_interruptible(card->wait_q,
+ qeth_threads_running(card, threads) == 0);
+}
+EXPORT_SYMBOL_GPL(qeth_wait_for_threads);
+
+void qeth_clear_working_pool_list(struct qeth_card *card)
+{
+ struct qeth_buffer_pool_entry *pool_entry, *tmp;
+
+ QETH_CARD_TEXT(card, 5, "clwrklst");
+ list_for_each_entry_safe(pool_entry, tmp,
+ &card->qdio.in_buf_pool.entry_list, list){
+ list_del(&pool_entry->list);
+ }
+}
+EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list);
+
+static int qeth_alloc_buffer_pool(struct qeth_card *card)
+{
+ struct qeth_buffer_pool_entry *pool_entry;
+ void *ptr;
+ int i, j;
+
+ QETH_CARD_TEXT(card, 5, "alocpool");
+ for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
+ pool_entry = kzalloc(sizeof(*pool_entry), GFP_KERNEL);
+ if (!pool_entry) {
+ qeth_free_buffer_pool(card);
+ return -ENOMEM;
+ }
+ for (j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j) {
+ ptr = (void *) __get_free_page(GFP_KERNEL);
+ if (!ptr) {
+ while (j > 0)
+ free_page((unsigned long)
+ pool_entry->elements[--j]);
+ kfree(pool_entry);
+ qeth_free_buffer_pool(card);
+ return -ENOMEM;
+ }
+ pool_entry->elements[j] = ptr;
+ }
+ list_add(&pool_entry->init_list,
+ &card->qdio.init_pool.entry_list);
+ }
+ return 0;
+}
+
+int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
+{
+ QETH_CARD_TEXT(card, 2, "realcbp");
+
+ if ((card->state != CARD_STATE_DOWN) &&
+ (card->state != CARD_STATE_RECOVER))
+ return -EPERM;
+
+ /* TODO: steel/add buffers from/to a running card's buffer pool (?) */
+ qeth_clear_working_pool_list(card);
+ qeth_free_buffer_pool(card);
+ card->qdio.in_buf_pool.buf_count = bufcnt;
+ card->qdio.init_pool.buf_count = bufcnt;
+ return qeth_alloc_buffer_pool(card);
+}
+EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool);
+
+static inline int qeth_cq_init(struct qeth_card *card)
+{
+ int rc;
+
+ if (card->options.cq == QETH_CQ_ENABLED) {
+ QETH_DBF_TEXT(SETUP, 2, "cqinit");
+ memset(card->qdio.c_q->qdio_bufs, 0,
+ QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
+ card->qdio.c_q->next_buf_to_init = 127;
+ rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT,
+ card->qdio.no_in_queues - 1, 0,
+ 127);
+ if (rc) {
+ QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+ goto out;
+ }
+ }
+ rc = 0;
+out:
+ return rc;
+}
+
+static inline int qeth_alloc_cq(struct qeth_card *card)
+{
+ int rc;
+
+ if (card->options.cq == QETH_CQ_ENABLED) {
+ int i;
+ struct qdio_outbuf_state *outbuf_states;
+
+ QETH_DBF_TEXT(SETUP, 2, "cqon");
+ card->qdio.c_q = kzalloc(sizeof(struct qeth_qdio_q),
+ GFP_KERNEL);
+ if (!card->qdio.c_q) {
+ rc = -1;
+ goto kmsg_out;
+ }
+ QETH_DBF_HEX(SETUP, 2, &card->qdio.c_q, sizeof(void *));
+
+ for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
+ card->qdio.c_q->bufs[i].buffer =
+ &card->qdio.c_q->qdio_bufs[i];
+ }
+
+ card->qdio.no_in_queues = 2;
+
+ card->qdio.out_bufstates = (struct qdio_outbuf_state *)
+ kzalloc(card->qdio.no_out_queues *
+ QDIO_MAX_BUFFERS_PER_Q *
+ sizeof(struct qdio_outbuf_state), GFP_KERNEL);
+ outbuf_states = card->qdio.out_bufstates;
+ if (outbuf_states == NULL) {
+ rc = -1;
+ goto free_cq_out;
+ }
+ for (i = 0; i < card->qdio.no_out_queues; ++i) {
+ card->qdio.out_qs[i]->bufstates = outbuf_states;
+ outbuf_states += QDIO_MAX_BUFFERS_PER_Q;
+ }
+ } else {
+ QETH_DBF_TEXT(SETUP, 2, "nocq");
+ card->qdio.c_q = NULL;
+ card->qdio.no_in_queues = 1;
+ }
+ QETH_DBF_TEXT_(SETUP, 2, "iqc%d", card->qdio.no_in_queues);
+ rc = 0;
+out:
+ return rc;
+free_cq_out:
+ kfree(card->qdio.c_q);
+ card->qdio.c_q = NULL;
+kmsg_out:
+ dev_err(&card->gdev->dev, "Failed to create completion queue\n");
+ goto out;
+}
+
+static inline void qeth_free_cq(struct qeth_card *card)
+{
+ if (card->qdio.c_q) {
+ --card->qdio.no_in_queues;
+ kfree(card->qdio.c_q);
+ card->qdio.c_q = NULL;
+ }
+ kfree(card->qdio.out_bufstates);
+ card->qdio.out_bufstates = NULL;
+}
+
+static inline enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
+ int delayed) {
+ enum iucv_tx_notify n;
+
+ switch (sbalf15) {
+ case 0:
+ n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK;
+ break;
+ case 4:
+ case 16:
+ case 17:
+ case 18:
+ n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE :
+ TX_NOTIFY_UNREACHABLE;
+ break;
+ default:
+ n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR :
+ TX_NOTIFY_GENERALERROR;
+ break;
+ }
+
+ return n;
+}
+
+static inline void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q,
+ int bidx, int forced_cleanup)
+{
+ if (q->card->options.cq != QETH_CQ_ENABLED)
+ return;
+
+ if (q->bufs[bidx]->next_pending != NULL) {
+ struct qeth_qdio_out_buffer *head = q->bufs[bidx];
+ struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending;
+
+ while (c) {
+ if (forced_cleanup ||
+ atomic_read(&c->state) ==
+ QETH_QDIO_BUF_HANDLED_DELAYED) {
+ struct qeth_qdio_out_buffer *f = c;
+ QETH_CARD_TEXT(f->q->card, 5, "fp");
+ QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f);
+ /* release here to avoid interleaving between
+ outbound tasklet and inbound tasklet
+ regarding notifications and lifecycle */
+ qeth_release_skbs(c);
+
+ c = f->next_pending;
+ BUG_ON(head->next_pending != f);
+ head->next_pending = c;
+ kmem_cache_free(qeth_qdio_outbuf_cache, f);
+ } else {
+ head = c;
+ c = c->next_pending;
+ }
+
+ }
+ }
+ if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) ==
+ QETH_QDIO_BUF_HANDLED_DELAYED)) {
+ /* for recovery situations */
+ q->bufs[bidx]->aob = q->bufstates[bidx].aob;
+ qeth_init_qdio_out_buf(q, bidx);
+ QETH_CARD_TEXT(q->card, 2, "clprecov");
+ }
+}
+
+
+static inline void qeth_qdio_handle_aob(struct qeth_card *card,
+ unsigned long phys_aob_addr) {
+ struct qaob *aob;
+ struct qeth_qdio_out_buffer *buffer;
+ enum iucv_tx_notify notification;
+
+ aob = (struct qaob *) phys_to_virt(phys_aob_addr);
+ QETH_CARD_TEXT(card, 5, "haob");
+ QETH_CARD_TEXT_(card, 5, "%lx", phys_aob_addr);
+ buffer = (struct qeth_qdio_out_buffer *) aob->user1;
+ QETH_CARD_TEXT_(card, 5, "%lx", aob->user1);
+
+ BUG_ON(buffer == NULL);
+
+ if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
+ QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) {
+ notification = TX_NOTIFY_OK;
+ } else {
+ BUG_ON(atomic_read(&buffer->state) != QETH_QDIO_BUF_PENDING);
+ atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ);
+ notification = TX_NOTIFY_DELAYED_OK;
+ }
+
+ if (aob->aorc != 0) {
+ QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc);
+ notification = qeth_compute_cq_notification(aob->aorc, 1);
+ }
+ qeth_notify_skbs(buffer->q, buffer, notification);
+
+ buffer->aob = NULL;
+ qeth_clear_output_buffer(buffer->q, buffer,
+ QETH_QDIO_BUF_HANDLED_DELAYED);
+
+ /* from here on: do not touch buffer anymore */
+ qdio_release_aob(aob);
+}
+
+static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue)
+{
+ return card->options.cq == QETH_CQ_ENABLED &&
+ card->qdio.c_q != NULL &&
+ queue != 0 &&
+ queue == card->qdio.no_in_queues - 1;
+}
+
+
+static int qeth_issue_next_read(struct qeth_card *card)
+{
+ int rc;
+ struct qeth_cmd_buffer *iob;
+
+ QETH_CARD_TEXT(card, 5, "issnxrd");
+ if (card->read.state != CH_STATE_UP)
+ return -EIO;
+ iob = qeth_get_buffer(&card->read);
+ if (!iob) {
+ dev_warn(&card->gdev->dev, "The qeth device driver "
+ "failed to recover an error on the device\n");
+ QETH_DBF_MESSAGE(2, "%s issue_next_read failed: no iob "
+ "available\n", dev_name(&card->gdev->dev));
+ return -ENOMEM;
+ }
+ qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE);
+ QETH_CARD_TEXT(card, 6, "noirqpnd");
+ rc = ccw_device_start(card->read.ccwdev, &card->read.ccw,
+ (addr_t) iob, 0, 0);
+ if (rc) {
+ QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! "
+ "rc=%i\n", dev_name(&card->gdev->dev), rc);
+ atomic_set(&card->read.irq_pending, 0);
+ card->read_or_write_problem = 1;
+ qeth_schedule_recovery(card);
+ wake_up(&card->wait_q);
+ }
+ return rc;
+}
+
+static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
+{
+ struct qeth_reply *reply;
+
+ reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
+ if (reply) {
+ atomic_set(&reply->refcnt, 1);
+ atomic_set(&reply->received, 0);
+ reply->card = card;
+ };
+ return reply;
+}
+
+static void qeth_get_reply(struct qeth_reply *reply)
+{
+ WARN_ON(atomic_read(&reply->refcnt) <= 0);
+ atomic_inc(&reply->refcnt);
+}
+
+static void qeth_put_reply(struct qeth_reply *reply)
+{
+ WARN_ON(atomic_read(&reply->refcnt) <= 0);
+ if (atomic_dec_and_test(&reply->refcnt))
+ kfree(reply);
+}
+
+static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
+ struct qeth_card *card)
+{
+ char *ipa_name;
+ int com = cmd->hdr.command;
+ ipa_name = qeth_get_ipa_cmd_name(com);
+ if (rc)
+ QETH_DBF_MESSAGE(2, "IPA: %s(x%X) for %s/%s returned "
+ "x%X \"%s\"\n",
+ ipa_name, com, dev_name(&card->gdev->dev),
+ QETH_CARD_IFNAME(card), rc,
+ qeth_get_ipa_msg(rc));
+ else
+ QETH_DBF_MESSAGE(5, "IPA: %s(x%X) for %s/%s succeeded\n",
+ ipa_name, com, dev_name(&card->gdev->dev),
+ QETH_CARD_IFNAME(card));
+}
+
+static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
+ struct qeth_cmd_buffer *iob)
+{
+ struct qeth_ipa_cmd *cmd = NULL;
+
+ QETH_CARD_TEXT(card, 5, "chkipad");
+ if (IS_IPA(iob->data)) {
+ cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
+ if (IS_IPA_REPLY(cmd)) {
+ if (cmd->hdr.command != IPA_CMD_SETCCID &&
+ cmd->hdr.command != IPA_CMD_DELCCID &&
+ cmd->hdr.command != IPA_CMD_MODCCID &&
+ cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
+ qeth_issue_ipa_msg(cmd,
+ cmd->hdr.return_code, card);
+ return cmd;
+ } else {
+ switch (cmd->hdr.command) {
+ case IPA_CMD_STOPLAN:
+ dev_warn(&card->gdev->dev,
+ "The link for interface %s on CHPID"
+ " 0x%X failed\n",
+ QETH_CARD_IFNAME(card),
+ card->info.chpid);
+ card->lan_online = 0;
+ if (card->dev && netif_carrier_ok(card->dev))
+ netif_carrier_off(card->dev);
+ return NULL;
+ case IPA_CMD_STARTLAN:
+ dev_info(&card->gdev->dev,
+ "The link for %s on CHPID 0x%X has"
+ " been restored\n",
+ QETH_CARD_IFNAME(card),
+ card->info.chpid);
+ netif_carrier_on(card->dev);
+ card->lan_online = 1;
+ if (card->info.hwtrap)
+ card->info.hwtrap = 2;
+ qeth_schedule_recovery(card);
+ return NULL;
+ case IPA_CMD_MODCCID:
+ return cmd;
+ case IPA_CMD_REGISTER_LOCAL_ADDR:
+ QETH_CARD_TEXT(card, 3, "irla");
+ break;
+ case IPA_CMD_UNREGISTER_LOCAL_ADDR:
+ QETH_CARD_TEXT(card, 3, "urla");
+ break;
+ default:
+ QETH_DBF_MESSAGE(2, "Received data is IPA "
+ "but not a reply!\n");
+ break;
+ }
+ }
+ }
+ return cmd;
+}
+
+void qeth_clear_ipacmd_list(struct qeth_card *card)
+{
+ struct qeth_reply *reply, *r;
+ unsigned long flags;
+
+ QETH_CARD_TEXT(card, 4, "clipalst");
+
+ spin_lock_irqsave(&card->lock, flags);
+ list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
+ qeth_get_reply(reply);
+ reply->rc = -EIO;
+ atomic_inc(&reply->received);
+ list_del_init(&reply->list);
+ wake_up(&reply->wait_q);
+ qeth_put_reply(reply);
+ }
+ spin_unlock_irqrestore(&card->lock, flags);
+ atomic_set(&card->write.irq_pending, 0);
+}
+EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
+
+static int qeth_check_idx_response(struct qeth_card *card,
+ unsigned char *buffer)
+{
+ if (!buffer)
+ return 0;
+
+ QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
+ if ((buffer[2] & 0xc0) == 0xc0) {
+ QETH_DBF_MESSAGE(2, "received an IDX TERMINATE "
+ "with cause code 0x%02x%s\n",
+ buffer[4],
+ ((buffer[4] == 0x22) ?
+ " -- try another portname" : ""));
+ QETH_CARD_TEXT(card, 2, "ckidxres");
+ QETH_CARD_TEXT(card, 2, " idxterm");
+ QETH_CARD_TEXT_(card, 2, " rc%d", -EIO);
+ if (buffer[4] == 0xf6) {
+ dev_err(&card->gdev->dev,
+ "The qeth device is not configured "
+ "for the OSI layer required by z/VM\n");
+ return -EPERM;
+ }
+ return -EIO;
+ }
+ return 0;
+}
+
+static void qeth_setup_ccw(struct qeth_channel *channel, unsigned char *iob,
+ __u32 len)
+{
+ struct qeth_card *card;
+
+ card = CARD_FROM_CDEV(channel->ccwdev);
+ QETH_CARD_TEXT(card, 4, "setupccw");
+ if (channel == &card->read)
+ memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
+ else
+ memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
+ channel->ccw.count = len;
+ channel->ccw.cda = (__u32) __pa(iob);
+}
+
+static struct qeth_cmd_buffer *__qeth_get_buffer(struct qeth_channel *channel)
+{
+ __u8 index;
+
+ QETH_CARD_TEXT(CARD_FROM_CDEV(channel->ccwdev), 6, "getbuff");
+ index = channel->io_buf_no;
+ do {
+ if (channel->iob[index].state == BUF_STATE_FREE) {
+ channel->iob[index].state = BUF_STATE_LOCKED;
+ channel->io_buf_no = (channel->io_buf_no + 1) %
+ QETH_CMD_BUFFER_NO;
+ memset(channel->iob[index].data, 0, QETH_BUFSIZE);
+ return channel->iob + index;
+ }
+ index = (index + 1) % QETH_CMD_BUFFER_NO;
+ } while (index != channel->io_buf_no);
+
+ return NULL;
+}
+
+void qeth_release_buffer(struct qeth_channel *channel,
+ struct qeth_cmd_buffer *iob)
+{
+ unsigned long flags;
+
+ QETH_CARD_TEXT(CARD_FROM_CDEV(channel->ccwdev), 6, "relbuff");
+ spin_lock_irqsave(&channel->iob_lock, flags);
+ memset(iob->data, 0, QETH_BUFSIZE);
+ iob->state = BUF_STATE_FREE;
+ iob->callback = qeth_send_control_data_cb;
+ iob->rc = 0;
+ spin_unlock_irqrestore(&channel->iob_lock, flags);
+}
+EXPORT_SYMBOL_GPL(qeth_release_buffer);
+
+static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *channel)
+{
+ struct qeth_cmd_buffer *buffer = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&channel->iob_lock, flags);
+ buffer = __qeth_get_buffer(channel);
+ spin_unlock_irqrestore(&channel->iob_lock, flags);
+ return buffer;
+}
+
+struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *channel)
+{
+ struct qeth_cmd_buffer *buffer;
+ wait_event(channel->wait_q,
+ ((buffer = qeth_get_buffer(channel)) != NULL));
+ return buffer;
+}
+EXPORT_SYMBOL_GPL(qeth_wait_for_buffer);
+
+void qeth_clear_cmd_buffers(struct qeth_channel *channel)
+{
+ int cnt;
+
+ for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
+ qeth_release_buffer(channel, &channel->iob[cnt]);
+ channel->buf_no = 0;
+ channel->io_buf_no = 0;
+}
+EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers);
+
+static void qeth_send_control_data_cb(struct qeth_channel *channel,
+ struct qeth_cmd_buffer *iob)
+{
+ struct qeth_card *card;
+ struct qeth_reply *reply, *r;
+ struct qeth_ipa_cmd *cmd;
+ unsigned long flags;
+ int keep_reply;
+ int rc = 0;
+
+ card = CARD_FROM_CDEV(channel->ccwdev);
+ QETH_CARD_TEXT(card, 4, "sndctlcb");
+ rc = qeth_check_idx_response(card, iob->data);
+ switch (rc) {
+ case 0:
+ break;
+ case -EIO:
+ qeth_clear_ipacmd_list(card);
+ qeth_schedule_recovery(card);
+ /* fall through */
+ default:
+ goto out;
+ }
+
+ cmd = qeth_check_ipa_data(card, iob);
+ if ((cmd == NULL) && (card->state != CARD_STATE_DOWN))
+ goto out;
+ /*in case of OSN : check if cmd is set */
+ if (card->info.type == QETH_CARD_TYPE_OSN &&
+ cmd &&
+ cmd->hdr.command != IPA_CMD_STARTLAN &&
+ card->osn_info.assist_cb != NULL) {
+ card->osn_info.assist_cb(card->dev, cmd);
+ goto out;
+ }
+
+ spin_lock_irqsave(&card->lock, flags);
+ list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
+ if ((reply->seqno == QETH_IDX_COMMAND_SEQNO) ||
+ ((cmd) && (reply->seqno == cmd->hdr.seqno))) {
+ qeth_get_reply(reply);
+ list_del_init(&reply->list);
+ spin_unlock_irqrestore(&card->lock, flags);
+ keep_reply = 0;
+ if (reply->callback != NULL) {
+ if (cmd) {
+ reply->offset = (__u16)((char *)cmd -
+ (char *)iob->data);
+ keep_reply = reply->callback(card,
+ reply,
+ (unsigned long)cmd);
+ } else
+ keep_reply = reply->callback(card,
+ reply,
+ (unsigned long)iob);
+ }
+ if (cmd)
+ reply->rc = (u16) cmd->hdr.return_code;
+ else if (iob->rc)
+ reply->rc = iob->rc;
+ if (keep_reply) {
+ spin_lock_irqsave(&card->lock, flags);
+ list_add_tail(&reply->list,
+ &card->cmd_waiter_list);
+ spin_unlock_irqrestore(&card->lock, flags);
+ } else {
+ atomic_inc(&reply->received);
+ wake_up(&reply->wait_q);
+ }
+ qeth_put_reply(reply);
+ goto out;
+ }
+ }
+ spin_unlock_irqrestore(&card->lock, flags);
+out:
+ memcpy(&card->seqno.pdu_hdr_ack,
+ QETH_PDU_HEADER_SEQ_NO(iob->data),
+ QETH_SEQ_NO_LENGTH);
+ qeth_release_buffer(channel, iob);
+}
+
+static int qeth_setup_channel(struct qeth_channel *channel)
+{
+ int cnt;
+
+ QETH_DBF_TEXT(SETUP, 2, "setupch");
+ for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
+ channel->iob[cnt].data =
+ kzalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL);
+ if (channel->iob[cnt].data == NULL)
+ break;
+ channel->iob[cnt].state = BUF_STATE_FREE;
+ channel->iob[cnt].channel = channel;
+ channel->iob[cnt].callback = qeth_send_control_data_cb;
+ channel->iob[cnt].rc = 0;
+ }
+ if (cnt < QETH_CMD_BUFFER_NO) {
+ while (cnt-- > 0)
+ kfree(channel->iob[cnt].data);
+ return -ENOMEM;
+ }
+ channel->buf_no = 0;
+ channel->io_buf_no = 0;
+ atomic_set(&channel->irq_pending, 0);
+ spin_lock_init(&channel->iob_lock);
+
+ init_waitqueue_head(&channel->wait_q);
+ return 0;
+}
+
+static int qeth_set_thread_start_bit(struct qeth_card *card,
+ unsigned long thread)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->thread_mask_lock, flags);
+ if (!(card->thread_allowed_mask & thread) ||
+ (card->thread_start_mask & thread)) {
+ spin_unlock_irqrestore(&card->thread_mask_lock, flags);
+ return -EPERM;
+ }
+ card->thread_start_mask |= thread;
+ spin_unlock_irqrestore(&card->thread_mask_lock, flags);
+ return 0;
+}
+
+void qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->thread_mask_lock, flags);
+ card->thread_start_mask &= ~thread;
+ spin_unlock_irqrestore(&card->thread_mask_lock, flags);
+ wake_up(&card->wait_q);
+}
+EXPORT_SYMBOL_GPL(qeth_clear_thread_start_bit);
+
+void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&card->thread_mask_lock, flags);
+ card->thread_running_mask &= ~thread;
+ spin_unlock_irqrestore(&card->thread_mask_lock, flags);
+ wake_up(&card->wait_q);
+}
+EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit);
+
+static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
+{
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(&card->thread_mask_lock, flags);
+ if (card->thread_start_mask & thread) {
+ if ((card->thread_allowed_mask & thread) &&
+ !(card->thread_running_mask & thread)) {
+ rc = 1;
+ card->thread_start_mask &= ~thread;
+ card->thread_running_mask |= thread;
+ } else
+ rc = -EPERM;
+ }
+ spin_unlock_irqrestore(&card->thread_mask_lock, flags);
+ return rc;
+}
+
+int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
+{
+ int rc = 0;
+
+ wait_event(card->wait_q,
+ (rc = __qeth_do_run_thread(card, thread)) >= 0);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(qeth_do_run_thread);
+
+void qeth_schedule_recovery(struct qeth_card *card)
+{
+ QETH_CARD_TEXT(card, 2, "startrec");
+ if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
+ schedule_work(&card->kernel_thread_starter);
+}
+EXPORT_SYMBOL_GPL(qeth_schedule_recovery);
+
+static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
+{
+ int dstat, cstat;
+ char *sense;
+ struct qeth_card *card;
+
+ sense = (char *) irb->ecw;
+ cstat = irb->scsw.cmd.cstat;
+ dstat = irb->scsw.cmd.dstat;
+ card = CARD_FROM_CDEV(cdev);
+
+ if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
+ SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
+ SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
+ QETH_CARD_TEXT(card, 2, "CGENCHK");
+ dev_warn(&cdev->dev, "The qeth device driver "
+ "failed to recover an error on the device\n");
+ QETH_DBF_MESSAGE(2, "%s check on device dstat=x%x, cstat=x%x\n",
+ dev_name(&cdev->dev), dstat, cstat);
+ print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
+ 16, 1, irb, 64, 1);
+ return 1;
+ }
+
+ if (dstat & DEV_STAT_UNIT_CHECK) {
+ if (sense[SENSE_RESETTING_EVENT_BYTE] &
+ SENSE_RESETTING_EVENT_FLAG) {
+ QETH_CARD_TEXT(card, 2, "REVIND");
+ return 1;
+ }
+ if (sense[SENSE_COMMAND_REJECT_BYTE] &
+ SENSE_COMMAND_REJECT_FLAG) {
+ QETH_CARD_TEXT(card, 2, "CMDREJi");
+ return 1;
+ }
+ if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
+ QETH_CARD_TEXT(card, 2, "AFFE");
+ return 1;
+ }
+ if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
+ QETH_CARD_TEXT(card, 2, "ZEROSEN");
+ return 0;
+ }
+ QETH_CARD_TEXT(card, 2, "DGENCHK");
+ return 1;
+ }
+ return 0;
+}
+
+static long __qeth_check_irb_error(struct ccw_device *cdev,
+ unsigned long intparm, struct irb *irb)
+{
+ struct qeth_card *card;
+
+ card = CARD_FROM_CDEV(cdev);
+
+ if (!IS_ERR(irb))
+ return 0;
+
+ switch (PTR_ERR(irb)) {
+ case -EIO:
+ QETH_DBF_MESSAGE(2, "%s i/o-error on device\n",
+ dev_name(&cdev->dev));
+ QETH_CARD_TEXT(card, 2, "ckirberr");
+ QETH_CARD_TEXT_(card, 2, " rc%d", -EIO);
+ break;
+ case -ETIMEDOUT:
+ dev_warn(&cdev->dev, "A hardware operation timed out"
+ " on the device\n");
+ QETH_CARD_TEXT(card, 2, "ckirberr");
+ QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT);
+ if (intparm == QETH_RCD_PARM) {
+ if (card && (card->data.ccwdev == cdev)) {
+ card->data.state = CH_STATE_DOWN;
+ wake_up(&card->wait_q);
+ }
+ }
+ break;
+ default:
+ QETH_DBF_MESSAGE(2, "%s unknown error %ld on device\n",
+ dev_name(&cdev->dev), PTR_ERR(irb));
+ QETH_CARD_TEXT(card, 2, "ckirberr");
+ QETH_CARD_TEXT(card, 2, " rc???");
+ }
+ return PTR_ERR(irb);
+}
+
+static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
+ struct irb *irb)
+{
+ int rc;
+ int cstat, dstat;
+ struct qeth_cmd_buffer *buffer;
+ struct qeth_channel *channel;
+ struct qeth_card *card;
+ struct qeth_cmd_buffer *iob;
+ __u8 index;
+
+ if (__qeth_check_irb_error(cdev, intparm, irb))
+ return;
+ cstat = irb->scsw.cmd.cstat;
+ dstat = irb->scsw.cmd.dstat;
+
+ card = CARD_FROM_CDEV(cdev);
+ if (!card)
+ return;
+
+ QETH_CARD_TEXT(card, 5, "irq");
+
+ if (card->read.ccwdev == cdev) {
+ channel = &card->read;
+ QETH_CARD_TEXT(card, 5, "read");
+ } else if (card->write.ccwdev == cdev) {
+ channel = &card->write;
+ QETH_CARD_TEXT(card, 5, "write");
+ } else {
+ channel = &card->data;
+ QETH_CARD_TEXT(card, 5, "data");
+ }
+ atomic_set(&channel->irq_pending, 0);
+
+ if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC))
+ channel->state = CH_STATE_STOPPED;
+
+ if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC))
+ channel->state = CH_STATE_HALTED;
+
+ /*let's wake up immediately on data channel*/
+ if ((channel == &card->data) && (intparm != 0) &&
+ (intparm != QETH_RCD_PARM))
+ goto out;
+
+ if (intparm == QETH_CLEAR_CHANNEL_PARM) {
+ QETH_CARD_TEXT(card, 6, "clrchpar");
+ /* we don't have to handle this further */
+ intparm = 0;
+ }
+ if (intparm == QETH_HALT_CHANNEL_PARM) {
+ QETH_CARD_TEXT(card, 6, "hltchpar");
+ /* we don't have to handle this further */
+ intparm = 0;
+ }
+ if ((dstat & DEV_STAT_UNIT_EXCEP) ||
+ (dstat & DEV_STAT_UNIT_CHECK) ||
+ (cstat)) {
+ if (irb->esw.esw0.erw.cons) {
+ dev_warn(&channel->ccwdev->dev,
+ "The qeth device driver failed to recover "
+ "an error on the device\n");
+ QETH_DBF_MESSAGE(2, "%s sense data available. cstat "
+ "0x%X dstat 0x%X\n",
+ dev_name(&channel->ccwdev->dev), cstat, dstat);
+ print_hex_dump(KERN_WARNING, "qeth: irb ",
+ DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
+ print_hex_dump(KERN_WARNING, "qeth: sense data ",
+ DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
+ }
+ if (intparm == QETH_RCD_PARM) {
+ channel->state = CH_STATE_DOWN;
+ goto out;
+ }
+ rc = qeth_get_problem(cdev, irb);
+ if (rc) {
+ qeth_clear_ipacmd_list(card);
+ qeth_schedule_recovery(card);
+ goto out;
+ }
+ }
+
+ if (intparm == QETH_RCD_PARM) {
+ channel->state = CH_STATE_RCD_DONE;
+ goto out;
+ }
+ if (intparm) {
+ buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
+ buffer->state = BUF_STATE_PROCESSED;
+ }
+ if (channel == &card->data)
+ return;
+ if (channel == &card->read &&
+ channel->state == CH_STATE_UP)
+ qeth_issue_next_read(card);
+
+ iob = channel->iob;
+ index = channel->buf_no;
+ while (iob[index].state == BUF_STATE_PROCESSED) {
+ if (iob[index].callback != NULL)
+ iob[index].callback(channel, iob + index);
+
+ index = (index + 1) % QETH_CMD_BUFFER_NO;
+ }
+ channel->buf_no = index;
+out:
+ wake_up(&card->wait_q);
+ return;
+}
+
+static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
+ struct qeth_qdio_out_buffer *buf,
+ enum iucv_tx_notify notification)
+{
+ struct sk_buff *skb;
+
+ if (skb_queue_empty(&buf->skb_list))
+ goto out;
+ skb = skb_peek(&buf->skb_list);
+ while (skb) {
+ QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
+ QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
+ if (skb->protocol == ETH_P_AF_IUCV) {
+ if (skb->sk) {
+ struct iucv_sock *iucv = iucv_sk(skb->sk);
+ iucv->sk_txnotify(skb, notification);
+ }
+ }
+ if (skb_queue_is_last(&buf->skb_list, skb))
+ skb = NULL;
+ else
+ skb = skb_queue_next(&buf->skb_list, skb);
+ }
+out:
+ return;
+}
+
+static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
+{
+ struct sk_buff *skb;
+ struct iucv_sock *iucv;
+ int notify_general_error = 0;
+
+ if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
+ notify_general_error = 1;
+
+ /* release may never happen from within CQ tasklet scope */
+ BUG_ON(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
+
+ skb = skb_dequeue(&buf->skb_list);
+ while (skb) {
+ QETH_CARD_TEXT(buf->q->card, 5, "skbr");
+ QETH_CARD_TEXT_(buf->q->card, 5, "%lx", (long) skb);
+ if (notify_general_error && skb->protocol == ETH_P_AF_IUCV) {
+ if (skb->sk) {
+ iucv = iucv_sk(skb->sk);
+ iucv->sk_txnotify(skb, TX_NOTIFY_GENERALERROR);
+ }
+ }
+ atomic_dec(&skb->users);
+ dev_kfree_skb_any(skb);
+ skb = skb_dequeue(&buf->skb_list);
+ }
+}
+
+static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
+ struct qeth_qdio_out_buffer *buf,
+ enum qeth_qdio_buffer_states newbufstate)
+{
+ int i;
+
+ /* is PCI flag set on buffer? */
+ if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
+ atomic_dec(&queue->set_pci_flags_count);
+
+ if (newbufstate == QETH_QDIO_BUF_EMPTY) {
+ qeth_release_skbs(buf);
+ }
+ for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) {
+ if (buf->buffer->element[i].addr && buf->is_header[i])
+ kmem_cache_free(qeth_core_header_cache,
+ buf->buffer->element[i].addr);
+ buf->is_header[i] = 0;
+ buf->buffer->element[i].length = 0;
+ buf->buffer->element[i].addr = NULL;
+ buf->buffer->element[i].eflags = 0;
+ buf->buffer->element[i].sflags = 0;
+ }
+ buf->buffer->element[15].eflags = 0;
+ buf->buffer->element[15].sflags = 0;
+ buf->next_element_to_fill = 0;
+ atomic_set(&buf->state, newbufstate);
+}
+
+static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free)
+{
+ int j;
+