2 * drivers/s390/net/claw.c
3 * ESCON CLAW network driver
5 * Linux for zSeries version
6 * Copyright IBM Corp. 2002, 2009
7 * Author(s) Original code written by:
8 * Kazuo Iimura <iimura@jp.ibm.com>
10 * Andy Richter <richtera@us.ibm.com>
11 * Marc Price <mwprice@us.ibm.com>
14 * group x.x.rrrr,x.x.wwww
18 * adapter_name aaaaaaaa
22 * group 0.0.0200 0.0.0201
31 * The device id is decided by the order entries
32 * are added to the group the first is claw0 the second claw1
35 * rrrr - the first of 2 consecutive device addresses used for the
37 * The specified address is always used as the input (Read)
38 * channel and the next address is used as the output channel.
40 * wwww - the second of 2 consecutive device addresses used for
42 * The specified address is always used as the output
43 * channel and the previous address is used as the input channel.
45 * read_buffer - specifies number of input buffers to allocate.
46 * write_buffer - specifies number of output buffers to allocate.
47 * host_name - host name
48 * adaptor_name - adaptor name
49 * api_type - API type TCPIP or API will be sent and expected
52 * Note the following requirements:
53 * 1) host_name must match the configured adapter_name on the remote side
54 * 2) adaptor_name must match the configured host name on the remote side
57 * 1.00 Initial release shipped
58 * 1.10 Changes for Buffer allocation
59 * 1.15 Changed for 2.6 Kernel No longer compiles on 2.4 or lower
60 * 1.25 Added Packing support
64 #define KMSG_COMPONENT "claw"
66 #include <asm/ccwdev.h>
67 #include <asm/ccwgroup.h>
68 #include <asm/debug.h>
69 #include <asm/idals.h>
71 #include <linux/bitops.h>
72 #include <linux/ctype.h>
73 #include <linux/delay.h>
74 #include <linux/errno.h>
75 #include <linux/if_arp.h>
76 #include <linux/init.h>
77 #include <linux/interrupt.h>
79 #include <linux/kernel.h>
80 #include <linux/module.h>
81 #include <linux/netdevice.h>
82 #include <linux/etherdevice.h>
83 #include <linux/proc_fs.h>
84 #include <linux/sched.h>
85 #include <linux/signal.h>
86 #include <linux/skbuff.h>
87 #include <linux/slab.h>
88 #include <linux/string.h>
89 #include <linux/tcp.h>
90 #include <linux/timer.h>
91 #include <linux/types.h>
96 CLAW uses the s390dbf file system see claw_trace and claw_setup
99 static char version[] __initdata = "CLAW driver";
100 static char debug_buffer[255];
102 * Debug Facility Stuff
104 static debug_info_t *claw_dbf_setup;
105 static debug_info_t *claw_dbf_trace;
108 * CLAW Debug Facility functions
111 claw_unregister_debug_facility(void)
114 debug_unregister(claw_dbf_setup);
116 debug_unregister(claw_dbf_trace);
120 claw_register_debug_facility(void)
122 claw_dbf_setup = debug_register("claw_setup", 2, 1, 8);
123 claw_dbf_trace = debug_register("claw_trace", 2, 2, 8);
124 if (claw_dbf_setup == NULL || claw_dbf_trace == NULL) {
125 claw_unregister_debug_facility();
128 debug_register_view(claw_dbf_setup, &debug_hex_ascii_view);
129 debug_set_level(claw_dbf_setup, 2);
130 debug_register_view(claw_dbf_trace, &debug_hex_ascii_view);
131 debug_set_level(claw_dbf_trace, 2);
136 claw_set_busy(struct net_device *dev)
138 ((struct claw_privbk *)dev->ml_priv)->tbusy = 1;
142 claw_clear_busy(struct net_device *dev)
144 clear_bit(0, &(((struct claw_privbk *) dev->ml_priv)->tbusy));
145 netif_wake_queue(dev);
149 claw_check_busy(struct net_device *dev)
151 return ((struct claw_privbk *) dev->ml_priv)->tbusy;
155 claw_setbit_busy(int nr,struct net_device *dev)
157 netif_stop_queue(dev);
158 set_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy));
162 claw_clearbit_busy(int nr,struct net_device *dev)
164 clear_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy));
165 netif_wake_queue(dev);
169 claw_test_and_setbit_busy(int nr,struct net_device *dev)
171 netif_stop_queue(dev);
172 return test_and_set_bit(nr,
173 (void *)&(((struct claw_privbk *) dev->ml_priv)->tbusy));
177 /* Functions for the DEV methods */
179 static int claw_probe(struct ccwgroup_device *cgdev);
180 static void claw_remove_device(struct ccwgroup_device *cgdev);
181 static void claw_purge_skb_queue(struct sk_buff_head *q);
182 static int claw_new_device(struct ccwgroup_device *cgdev);
183 static int claw_shutdown_device(struct ccwgroup_device *cgdev);
184 static int claw_tx(struct sk_buff *skb, struct net_device *dev);
185 static int claw_change_mtu( struct net_device *dev, int new_mtu);
186 static int claw_open(struct net_device *dev);
187 static void claw_irq_handler(struct ccw_device *cdev,
188 unsigned long intparm, struct irb *irb);
189 static void claw_irq_tasklet ( unsigned long data );
190 static int claw_release(struct net_device *dev);
191 static void claw_write_retry ( struct chbk * p_ch );
192 static void claw_write_next ( struct chbk * p_ch );
193 static void claw_timer ( struct chbk * p_ch );
196 static int add_claw_reads(struct net_device *dev,
197 struct ccwbk* p_first, struct ccwbk* p_last);
198 static void ccw_check_return_code (struct ccw_device *cdev, int return_code);
199 static void ccw_check_unit_check (struct chbk * p_ch, unsigned char sense );
200 static int find_link(struct net_device *dev, char *host_name, char *ws_name );
201 static int claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid);
202 static int init_ccw_bk(struct net_device *dev);
203 static void probe_error( struct ccwgroup_device *cgdev);
204 static struct net_device_stats *claw_stats(struct net_device *dev);
205 static int pages_to_order_of_mag(int num_of_pages);
206 static struct sk_buff *claw_pack_skb(struct claw_privbk *privptr);
207 /* sysfs Functions */
208 static ssize_t claw_hname_show(struct device *dev,
209 struct device_attribute *attr, char *buf);
210 static ssize_t claw_hname_write(struct device *dev,
211 struct device_attribute *attr,
212 const char *buf, size_t count);
213 static ssize_t claw_adname_show(struct device *dev,
214 struct device_attribute *attr, char *buf);
215 static ssize_t claw_adname_write(struct device *dev,
216 struct device_attribute *attr,
217 const char *buf, size_t count);
218 static ssize_t claw_apname_show(struct device *dev,
219 struct device_attribute *attr, char *buf);
220 static ssize_t claw_apname_write(struct device *dev,
221 struct device_attribute *attr,
222 const char *buf, size_t count);
223 static ssize_t claw_wbuff_show(struct device *dev,
224 struct device_attribute *attr, char *buf);
225 static ssize_t claw_wbuff_write(struct device *dev,
226 struct device_attribute *attr,
227 const char *buf, size_t count);
228 static ssize_t claw_rbuff_show(struct device *dev,
229 struct device_attribute *attr, char *buf);
230 static ssize_t claw_rbuff_write(struct device *dev,
231 struct device_attribute *attr,
232 const char *buf, size_t count);
234 /* Functions for System Validate */
235 static int claw_process_control( struct net_device *dev, struct ccwbk * p_ccw);
236 static int claw_send_control(struct net_device *dev, __u8 type, __u8 link,
237 __u8 correlator, __u8 rc , char *local_name, char *remote_name);
238 static int claw_snd_conn_req(struct net_device *dev, __u8 link);
239 static int claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl);
240 static int claw_snd_sys_validate_rsp(struct net_device *dev,
241 struct clawctl * p_ctl, __u32 return_code);
242 static int claw_strt_conn_req(struct net_device *dev );
243 static void claw_strt_read(struct net_device *dev, int lock);
244 static void claw_strt_out_IO(struct net_device *dev);
245 static void claw_free_wrt_buf(struct net_device *dev);
247 /* Functions for unpack reads */
248 static void unpack_read(struct net_device *dev);
250 static int claw_pm_prepare(struct ccwgroup_device *gdev)
255 /* the root device for claw group devices */
256 static struct device *claw_root_dev;
260 static struct ccwgroup_driver claw_group_driver = {
262 .owner = THIS_MODULE,
266 .remove = claw_remove_device,
267 .set_online = claw_new_device,
268 .set_offline = claw_shutdown_device,
269 .prepare = claw_pm_prepare,
272 static struct ccw_device_id claw_ids[] = {
273 {CCW_DEVICE(0x3088, 0x61), .driver_info = claw_channel_type_claw},
276 MODULE_DEVICE_TABLE(ccw, claw_ids);
278 static struct ccw_driver claw_ccw_driver = {
280 .owner = THIS_MODULE,
284 .probe = ccwgroup_probe_ccwdev,
285 .remove = ccwgroup_remove_ccwdev,
286 .int_class = IOINT_CLW,
289 static ssize_t claw_driver_group_store(struct device_driver *ddrv,
290 const char *buf, size_t count)
293 err = ccwgroup_create_dev(claw_root_dev, &claw_group_driver, 2, buf);
294 return err ? err : count;
296 static DRIVER_ATTR(group, 0200, NULL, claw_driver_group_store);
298 static struct attribute *claw_drv_attrs[] = {
299 &driver_attr_group.attr,
302 static struct attribute_group claw_drv_attr_group = {
303 .attrs = claw_drv_attrs,
305 static const struct attribute_group *claw_drv_attr_groups[] = {
306 &claw_drv_attr_group,
314 /*-------------------------------------------------------------------*
316 *-------------------------------------------------------------------*/
319 claw_tx(struct sk_buff *skb, struct net_device *dev)
322 struct claw_privbk *privptr = dev->ml_priv;
323 unsigned long saveflags;
326 CLAW_DBF_TEXT(4, trace, "claw_tx");
327 p_ch = &privptr->channel[WRITE_CHANNEL];
328 spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
329 rc=claw_hw_tx( skb, dev, 1 );
330 spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
331 CLAW_DBF_TEXT_(4, trace, "clawtx%d", rc);
337 } /* end of claw_tx */
339 /*------------------------------------------------------------------*
340 * pack the collect queue into an skb and return it *
341 * If not packing just return the top skb from the queue *
342 *------------------------------------------------------------------*/
344 static struct sk_buff *
345 claw_pack_skb(struct claw_privbk *privptr)
347 struct sk_buff *new_skb,*held_skb;
348 struct chbk *p_ch = &privptr->channel[WRITE_CHANNEL];
349 struct claw_env *p_env = privptr->p_env;
350 int pkt_cnt,pk_ind,so_far;
352 new_skb = NULL; /* assume no dice */
354 CLAW_DBF_TEXT(4, trace, "PackSKBe");
355 if (!skb_queue_empty(&p_ch->collect_queue)) {
357 held_skb = skb_dequeue(&p_ch->collect_queue);
359 dev_kfree_skb_any(held_skb);
362 if (p_env->packing != DO_PACKED)
364 /* get a new SKB we will pack at least one */
365 new_skb = dev_alloc_skb(p_env->write_size);
366 if (new_skb == NULL) {
367 atomic_inc(&held_skb->users);
368 skb_queue_head(&p_ch->collect_queue,held_skb);
371 /* we have packed packet and a place to put it */
374 new_skb->cb[1] = 'P'; /* every skb on queue has pack header */
375 while ((pk_ind) && (held_skb != NULL)) {
376 if (held_skb->len+so_far <= p_env->write_size-8) {
377 memcpy(skb_put(new_skb,held_skb->len),
378 held_skb->data,held_skb->len);
379 privptr->stats.tx_packets++;
380 so_far += held_skb->len;
382 dev_kfree_skb_any(held_skb);
383 held_skb = skb_dequeue(&p_ch->collect_queue);
385 atomic_dec(&held_skb->users);
388 atomic_inc(&held_skb->users);
389 skb_queue_head(&p_ch->collect_queue,held_skb);
393 CLAW_DBF_TEXT(4, trace, "PackSKBx");
397 /*-------------------------------------------------------------------*
400 *-------------------------------------------------------------------*/
403 claw_change_mtu(struct net_device *dev, int new_mtu)
405 struct claw_privbk *privptr = dev->ml_priv;
407 CLAW_DBF_TEXT(4, trace, "setmtu");
408 buff_size = privptr->p_env->write_size;
409 if ((new_mtu < 60) || (new_mtu > buff_size)) {
414 } /* end of claw_change_mtu */
417 /*-------------------------------------------------------------------*
420 *-------------------------------------------------------------------*/
422 claw_open(struct net_device *dev)
427 unsigned long saveflags=0;
429 struct claw_privbk *privptr;
430 DECLARE_WAITQUEUE(wait, current);
431 struct timer_list timer;
434 CLAW_DBF_TEXT(4, trace, "open");
435 privptr = (struct claw_privbk *)dev->ml_priv;
436 /* allocate and initialize CCW blocks */
437 if (privptr->buffs_alloc == 0) {
440 CLAW_DBF_TEXT(2, trace, "openmem");
444 privptr->system_validate_comp=0;
445 privptr->release_pend=0;
446 if(strncmp(privptr->p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) {
447 privptr->p_env->read_size=DEF_PACK_BUFSIZE;
448 privptr->p_env->write_size=DEF_PACK_BUFSIZE;
449 privptr->p_env->packing=PACKING_ASK;
451 privptr->p_env->packing=0;
452 privptr->p_env->read_size=CLAW_FRAME_SIZE;
453 privptr->p_env->write_size=CLAW_FRAME_SIZE;
456 tasklet_init(&privptr->channel[READ_CHANNEL].tasklet, claw_irq_tasklet,
457 (unsigned long) &privptr->channel[READ_CHANNEL]);
458 for ( i = 0; i < 2; i++) {
459 CLAW_DBF_TEXT_(2, trace, "opn_ch%d", i);
460 init_waitqueue_head(&privptr->channel[i].wait);
461 /* skb_queue_head_init(&p_ch->io_queue); */
462 if (i == WRITE_CHANNEL)
464 &privptr->channel[WRITE_CHANNEL].collect_queue);
465 privptr->channel[i].flag_a = 0;
466 privptr->channel[i].IO_active = 0;
467 privptr->channel[i].flag &= ~CLAW_TIMER;
469 timer.function = (void *)claw_timer;
470 timer.data = (unsigned long)(&privptr->channel[i]);
471 timer.expires = jiffies + 15*HZ;
473 spin_lock_irqsave(get_ccwdev_lock(
474 privptr->channel[i].cdev), saveflags);
475 parm = (unsigned long) &privptr->channel[i];
476 privptr->channel[i].claw_state = CLAW_START_HALT_IO;
478 add_wait_queue(&privptr->channel[i].wait, &wait);
479 rc = ccw_device_halt(
480 (struct ccw_device *)privptr->channel[i].cdev,parm);
481 set_current_state(TASK_INTERRUPTIBLE);
482 spin_unlock_irqrestore(
483 get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
485 set_current_state(TASK_RUNNING);
486 remove_wait_queue(&privptr->channel[i].wait, &wait);
488 ccw_check_return_code(privptr->channel[i].cdev, rc);
489 if((privptr->channel[i].flag & CLAW_TIMER) == 0x00)
492 if ((((privptr->channel[READ_CHANNEL].last_dstat |
493 privptr->channel[WRITE_CHANNEL].last_dstat) &
494 ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) ||
495 (((privptr->channel[READ_CHANNEL].flag |
496 privptr->channel[WRITE_CHANNEL].flag) & CLAW_TIMER) != 0x00)) {
497 dev_info(&privptr->channel[READ_CHANNEL].cdev->dev,
498 "%s: remote side is not ready\n", dev->name);
499 CLAW_DBF_TEXT(2, trace, "notrdy");
501 for ( i = 0; i < 2; i++) {
503 get_ccwdev_lock(privptr->channel[i].cdev),
505 parm = (unsigned long) &privptr->channel[i];
506 privptr->channel[i].claw_state = CLAW_STOP;
507 rc = ccw_device_halt(
508 (struct ccw_device *)&privptr->channel[i].cdev,
510 spin_unlock_irqrestore(
511 get_ccwdev_lock(privptr->channel[i].cdev),
514 ccw_check_return_code(
515 privptr->channel[i].cdev, rc);
518 free_pages((unsigned long)privptr->p_buff_ccw,
519 (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
520 if (privptr->p_env->read_size < PAGE_SIZE) {
521 free_pages((unsigned long)privptr->p_buff_read,
522 (int)pages_to_order_of_mag(
523 privptr->p_buff_read_num));
526 p_buf=privptr->p_read_active_first;
527 while (p_buf!=NULL) {
528 free_pages((unsigned long)p_buf->p_buffer,
529 (int)pages_to_order_of_mag(
530 privptr->p_buff_pages_perread ));
534 if (privptr->p_env->write_size < PAGE_SIZE ) {
535 free_pages((unsigned long)privptr->p_buff_write,
536 (int)pages_to_order_of_mag(
537 privptr->p_buff_write_num));
540 p_buf=privptr->p_write_active_first;
541 while (p_buf!=NULL) {
542 free_pages((unsigned long)p_buf->p_buffer,
543 (int)pages_to_order_of_mag(
544 privptr->p_buff_pages_perwrite ));
548 privptr->buffs_alloc = 0;
549 privptr->channel[READ_CHANNEL].flag = 0x00;
550 privptr->channel[WRITE_CHANNEL].flag = 0x00;
551 privptr->p_buff_ccw=NULL;
552 privptr->p_buff_read=NULL;
553 privptr->p_buff_write=NULL;
554 claw_clear_busy(dev);
555 CLAW_DBF_TEXT(2, trace, "open EIO");
559 /* Send SystemValidate command */
561 claw_clear_busy(dev);
562 CLAW_DBF_TEXT(4, trace, "openok");
564 } /* end of claw_open */
566 /*-------------------------------------------------------------------*
570 *--------------------------------------------------------------------*/
572 claw_irq_handler(struct ccw_device *cdev,
573 unsigned long intparm, struct irb *irb)
575 struct chbk *p_ch = NULL;
576 struct claw_privbk *privptr = NULL;
577 struct net_device *dev = NULL;
578 struct claw_env *p_env;
579 struct chbk *p_ch_r=NULL;
581 CLAW_DBF_TEXT(4, trace, "clawirq");
582 /* Bypass all 'unsolicited interrupts' */
583 privptr = dev_get_drvdata(&cdev->dev);
585 dev_warn(&cdev->dev, "An uninitialized CLAW device received an"
586 " IRQ, c-%02x d-%02x\n",
587 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
588 CLAW_DBF_TEXT(2, trace, "badirq");
592 /* Try to extract channel from driver data. */
593 if (privptr->channel[READ_CHANNEL].cdev == cdev)
594 p_ch = &privptr->channel[READ_CHANNEL];
595 else if (privptr->channel[WRITE_CHANNEL].cdev == cdev)
596 p_ch = &privptr->channel[WRITE_CHANNEL];
598 dev_warn(&cdev->dev, "The device is not a CLAW device\n");
599 CLAW_DBF_TEXT(2, trace, "badchan");
602 CLAW_DBF_TEXT_(4, trace, "IRQCH=%d", p_ch->flag);
604 dev = (struct net_device *) (p_ch->ndev);
605 p_env=privptr->p_env;
607 /* Copy interruption response block. */
608 memcpy(p_ch->irb, irb, sizeof(struct irb));
610 /* Check for good subchannel return code, otherwise info message */
611 if (irb->scsw.cmd.cstat && !(irb->scsw.cmd.cstat & SCHN_STAT_PCI)) {
613 "%s: subchannel check for device: %04x -"
614 " Sch Stat %02x Dev Stat %02x CPA - %04x\n",
615 dev->name, p_ch->devno,
616 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat,
618 CLAW_DBF_TEXT(2, trace, "chanchk");
622 /* Check the reason-code of a unit check */
623 if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
624 ccw_check_unit_check(p_ch, irb->ecw[0]);
626 /* State machine to bring the connection up, down and to restart */
627 p_ch->last_dstat = irb->scsw.cmd.dstat;
629 switch (p_ch->claw_state) {
630 case CLAW_STOP:/* HALT_IO by claw_release (halt sequence) */
631 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
632 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
633 (p_ch->irb->scsw.cmd.stctl ==
634 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))))
636 wake_up(&p_ch->wait); /* wake up claw_release */
637 CLAW_DBF_TEXT(4, trace, "stop");
639 case CLAW_START_HALT_IO: /* HALT_IO issued by claw_open */
640 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
641 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
642 (p_ch->irb->scsw.cmd.stctl ==
643 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
644 CLAW_DBF_TEXT(4, trace, "haltio");
647 if (p_ch->flag == CLAW_READ) {
648 p_ch->claw_state = CLAW_START_READ;
649 wake_up(&p_ch->wait); /* wake claw_open (READ)*/
650 } else if (p_ch->flag == CLAW_WRITE) {
651 p_ch->claw_state = CLAW_START_WRITE;
652 /* send SYSTEM_VALIDATE */
653 claw_strt_read(dev, LOCK_NO);
654 claw_send_control(dev,
655 SYSTEM_VALIDATE_REQUEST,
658 p_env->adapter_name);
660 dev_warn(&cdev->dev, "The CLAW device received"
661 " an unexpected IRQ, "
664 irb->scsw.cmd.dstat);
667 CLAW_DBF_TEXT(4, trace, "haltio");
669 case CLAW_START_READ:
670 CLAW_DBF_TEXT(4, trace, "ReadIRQ");
671 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
672 clear_bit(0, (void *)&p_ch->IO_active);
673 if ((p_ch->irb->ecw[0] & 0x41) == 0x41 ||
674 (p_ch->irb->ecw[0] & 0x40) == 0x40 ||
675 (p_ch->irb->ecw[0]) == 0) {
676 privptr->stats.rx_errors++;
678 "%s: Restart is required after remote "
682 CLAW_DBF_TEXT(4, trace, "notrdy");
685 if ((p_ch->irb->scsw.cmd.cstat & SCHN_STAT_PCI) &&
686 (p_ch->irb->scsw.cmd.dstat == 0)) {
687 if (test_and_set_bit(CLAW_BH_ACTIVE,
688 (void *)&p_ch->flag_a) == 0)
689 tasklet_schedule(&p_ch->tasklet);
691 CLAW_DBF_TEXT(4, trace, "PCINoBH");
692 CLAW_DBF_TEXT(4, trace, "PCI_read");
695 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
696 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
697 (p_ch->irb->scsw.cmd.stctl ==
698 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
699 CLAW_DBF_TEXT(4, trace, "SPend_rd");
702 clear_bit(0, (void *)&p_ch->IO_active);
703 claw_clearbit_busy(TB_RETRY, dev);
704 if (test_and_set_bit(CLAW_BH_ACTIVE,
705 (void *)&p_ch->flag_a) == 0)
706 tasklet_schedule(&p_ch->tasklet);
708 CLAW_DBF_TEXT(4, trace, "RdBHAct");
709 CLAW_DBF_TEXT(4, trace, "RdIRQXit");
711 case CLAW_START_WRITE:
712 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
714 "%s: Unit Check Occurred in "
715 "write channel\n", dev->name);
716 clear_bit(0, (void *)&p_ch->IO_active);
717 if (p_ch->irb->ecw[0] & 0x80) {
719 "%s: Resetting Event "
720 "occurred:\n", dev->name);
721 init_timer(&p_ch->timer);
722 p_ch->timer.function =
723 (void *)claw_write_retry;
724 p_ch->timer.data = (unsigned long)p_ch;
725 p_ch->timer.expires = jiffies + 10*HZ;
726 add_timer(&p_ch->timer);
728 "%s: write connection "
729 "restarting\n", dev->name);
731 CLAW_DBF_TEXT(4, trace, "rstrtwrt");
734 if (p_ch->irb->scsw.cmd.dstat & DEV_STAT_UNIT_EXCEP) {
735 clear_bit(0, (void *)&p_ch->IO_active);
737 "%s: Unit Exception "
738 "occurred in write channel\n",
741 if (!((p_ch->irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) ||
742 (p_ch->irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) ||
743 (p_ch->irb->scsw.cmd.stctl ==
744 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)))) {
745 CLAW_DBF_TEXT(4, trace, "writeUE");
748 clear_bit(0, (void *)&p_ch->IO_active);
749 if (claw_test_and_setbit_busy(TB_TX, dev) == 0) {
750 claw_write_next(p_ch);
751 claw_clearbit_busy(TB_TX, dev);
752 claw_clear_busy(dev);
754 p_ch_r = (struct chbk *)&privptr->channel[READ_CHANNEL];
755 if (test_and_set_bit(CLAW_BH_ACTIVE,
756 (void *)&p_ch_r->flag_a) == 0)
757 tasklet_schedule(&p_ch_r->tasklet);
758 CLAW_DBF_TEXT(4, trace, "StWtExit");
762 "The CLAW device for %s received an unexpected IRQ\n",
764 CLAW_DBF_TEXT(2, trace, "badIRQ");
768 } /* end of claw_irq_handler */
771 /*-------------------------------------------------------------------*
774 *--------------------------------------------------------------------*/
776 claw_irq_tasklet ( unsigned long data )
779 struct net_device *dev;
781 p_ch = (struct chbk *) data;
782 dev = (struct net_device *)p_ch->ndev;
783 CLAW_DBF_TEXT(4, trace, "IRQtask");
785 clear_bit(CLAW_BH_ACTIVE, (void *)&p_ch->flag_a);
786 CLAW_DBF_TEXT(4, trace, "TskletXt");
788 } /* end of claw_irq_bh */
790 /*-------------------------------------------------------------------*
793 *--------------------------------------------------------------------*/
795 claw_release(struct net_device *dev)
799 unsigned long saveflags;
801 struct claw_privbk *privptr;
802 DECLARE_WAITQUEUE(wait, current);
803 struct ccwbk* p_this_ccw;
808 privptr = (struct claw_privbk *)dev->ml_priv;
811 CLAW_DBF_TEXT(4, trace, "release");
812 privptr->release_pend=1;
813 claw_setbit_busy(TB_STOP,dev);
814 for ( i = 1; i >=0 ; i--) {
816 get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
817 /* del_timer(&privptr->channel[READ_CHANNEL].timer); */
818 privptr->channel[i].claw_state = CLAW_STOP;
819 privptr->channel[i].IO_active = 0;
820 parm = (unsigned long) &privptr->channel[i];
821 if (i == WRITE_CHANNEL)
822 claw_purge_skb_queue(
823 &privptr->channel[WRITE_CHANNEL].collect_queue);
824 rc = ccw_device_halt (privptr->channel[i].cdev, parm);
825 if (privptr->system_validate_comp==0x00) /* never opened? */
826 init_waitqueue_head(&privptr->channel[i].wait);
827 add_wait_queue(&privptr->channel[i].wait, &wait);
828 set_current_state(TASK_INTERRUPTIBLE);
829 spin_unlock_irqrestore(
830 get_ccwdev_lock(privptr->channel[i].cdev), saveflags);
832 set_current_state(TASK_RUNNING);
833 remove_wait_queue(&privptr->channel[i].wait, &wait);
835 ccw_check_return_code(privptr->channel[i].cdev, rc);
838 if (privptr->pk_skb != NULL) {
839 dev_kfree_skb_any(privptr->pk_skb);
840 privptr->pk_skb = NULL;
842 if(privptr->buffs_alloc != 1) {
843 CLAW_DBF_TEXT(4, trace, "none2fre");
846 CLAW_DBF_TEXT(4, trace, "freebufs");
847 if (privptr->p_buff_ccw != NULL) {
848 free_pages((unsigned long)privptr->p_buff_ccw,
849 (int)pages_to_order_of_mag(privptr->p_buff_ccw_num));
851 CLAW_DBF_TEXT(4, trace, "freeread");
852 if (privptr->p_env->read_size < PAGE_SIZE) {
853 if (privptr->p_buff_read != NULL) {
854 free_pages((unsigned long)privptr->p_buff_read,
855 (int)pages_to_order_of_mag(privptr->p_buff_read_num));
859 p_buf=privptr->p_read_active_first;
860 while (p_buf!=NULL) {
861 free_pages((unsigned long)p_buf->p_buffer,
862 (int)pages_to_order_of_mag(
863 privptr->p_buff_pages_perread ));
867 CLAW_DBF_TEXT(4, trace, "freewrit");
868 if (privptr->p_env->write_size < PAGE_SIZE ) {
869 free_pages((unsigned long)privptr->p_buff_write,
870 (int)pages_to_order_of_mag(privptr->p_buff_write_num));
873 p_buf=privptr->p_write_active_first;
874 while (p_buf!=NULL) {
875 free_pages((unsigned long)p_buf->p_buffer,
876 (int)pages_to_order_of_mag(
877 privptr->p_buff_pages_perwrite ));
881 CLAW_DBF_TEXT(4, trace, "clearptr");
882 privptr->buffs_alloc = 0;
883 privptr->p_buff_ccw=NULL;
884 privptr->p_buff_read=NULL;
885 privptr->p_buff_write=NULL;
886 privptr->system_validate_comp=0;
887 privptr->release_pend=0;
888 /* Remove any writes that were pending and reset all reads */
889 p_this_ccw=privptr->p_read_active_first;
890 while (p_this_ccw!=NULL) {
891 p_this_ccw->header.length=0xffff;
892 p_this_ccw->header.opcode=0xff;
893 p_this_ccw->header.flag=0x00;
894 p_this_ccw=p_this_ccw->next;
897 while (privptr->p_write_active_first!=NULL) {
898 p_this_ccw=privptr->p_write_active_first;
899 p_this_ccw->header.flag=CLAW_PENDING;
900 privptr->p_write_active_first=p_this_ccw->next;
901 p_this_ccw->next=privptr->p_write_free_chain;
902 privptr->p_write_free_chain=p_this_ccw;
903 ++privptr->write_free_count;
905 privptr->p_write_active_last=NULL;
906 privptr->mtc_logical_link = -1;
907 privptr->mtc_skipping = 1;
908 privptr->mtc_offset=0;
910 if (((privptr->channel[READ_CHANNEL].last_dstat |
911 privptr->channel[WRITE_CHANNEL].last_dstat) &
912 ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) != 0x00) {
913 dev_warn(&privptr->channel[READ_CHANNEL].cdev->dev,
914 "Deactivating %s completed with incorrect"
915 " subchannel status "
916 "(read %02x, write %02x)\n",
918 privptr->channel[READ_CHANNEL].last_dstat,
919 privptr->channel[WRITE_CHANNEL].last_dstat);
920 CLAW_DBF_TEXT(2, trace, "badclose");
922 CLAW_DBF_TEXT(4, trace, "rlsexit");
924 } /* end of claw_release */
926 /*-------------------------------------------------------------------*
929 *--------------------------------------------------------------------*/
932 claw_write_retry ( struct chbk *p_ch )
935 struct net_device *dev=p_ch->ndev;
937 CLAW_DBF_TEXT(4, trace, "w_retry");
938 if (p_ch->claw_state == CLAW_STOP) {
941 claw_strt_out_IO( dev );
942 CLAW_DBF_TEXT(4, trace, "rtry_xit");
944 } /* end of claw_write_retry */
947 /*-------------------------------------------------------------------*
950 *--------------------------------------------------------------------*/
953 claw_write_next ( struct chbk * p_ch )
956 struct net_device *dev;
957 struct claw_privbk *privptr=NULL;
958 struct sk_buff *pk_skb;
960 CLAW_DBF_TEXT(4, trace, "claw_wrt");
961 if (p_ch->claw_state == CLAW_STOP)
963 dev = (struct net_device *) p_ch->ndev;
964 privptr = (struct claw_privbk *) dev->ml_priv;
965 claw_free_wrt_buf( dev );
966 if ((privptr->write_free_count > 0) &&
967 !skb_queue_empty(&p_ch->collect_queue)) {
968 pk_skb = claw_pack_skb(privptr);
969 while (pk_skb != NULL) {
970 claw_hw_tx(pk_skb, dev, 1);
971 if (privptr->write_free_count > 0) {
972 pk_skb = claw_pack_skb(privptr);
977 if (privptr->p_write_active_first!=NULL) {
978 claw_strt_out_IO(dev);
981 } /* end of claw_write_next */
983 /*-------------------------------------------------------------------*
986 *--------------------------------------------------------------------*/
989 claw_timer ( struct chbk * p_ch )
991 CLAW_DBF_TEXT(4, trace, "timer");
992 p_ch->flag |= CLAW_TIMER;
993 wake_up(&p_ch->wait);
995 } /* end of claw_timer */
1003 /*-------------------------------------------------------------------*
1005 * pages_to_order_of_mag *
1007 * takes a number of pages from 1 to 512 and returns the *
1008 * log(num_pages)/log(2) get_free_pages() needs a base 2 order *
1009 * of magnitude get_free_pages() has an upper order of 9 *
1010 *--------------------------------------------------------------------*/
1013 pages_to_order_of_mag(int num_of_pages)
1015 int order_of_mag=1; /* assume 2 pages */
1018 CLAW_DBF_TEXT_(5, trace, "pages%d", num_of_pages);
1019 if (num_of_pages == 1) {return 0; } /* magnitude of 0 = 1 page */
1020 /* 512 pages = 2Meg on 4k page systems */
1021 if (num_of_pages >= 512) {return 9; }
1022 /* we have two or more pages order is at least 1 */
1023 for (nump=2 ;nump <= 512;nump*=2) {
1024 if (num_of_pages <= nump)
1028 if (order_of_mag > 9) { order_of_mag = 9; } /* I know it's paranoid */
1029 CLAW_DBF_TEXT_(5, trace, "mag%d", order_of_mag);
1030 return order_of_mag;
1033 /*-------------------------------------------------------------------*
1037 *--------------------------------------------------------------------*/
1039 add_claw_reads(struct net_device *dev, struct ccwbk* p_first,
1040 struct ccwbk* p_last)
1042 struct claw_privbk *privptr;
1043 struct ccw1 temp_ccw;
1044 struct endccw * p_end;
1045 CLAW_DBF_TEXT(4, trace, "addreads");
1046 privptr = dev->ml_priv;
1047 p_end = privptr->p_end_ccw;
1049 /* first CCW and last CCW contains a new set of read channel programs
1050 * to apend the running channel programs
1052 if ( p_first==NULL) {
1053 CLAW_DBF_TEXT(4, trace, "addexit");
1057 /* set up ending CCW sequence for this segment */
1059 p_end->read1=0x00; /* second ending CCW is now active */
1060 /* reset ending CCWs and setup TIC CCWs */
1061 p_end->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1062 p_end->read2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1063 p_last->r_TIC_1.cda =(__u32)__pa(&p_end->read2_nop1);
1064 p_last->r_TIC_2.cda =(__u32)__pa(&p_end->read2_nop1);
1065 p_end->read2_nop2.cda=0;
1066 p_end->read2_nop2.count=1;
1069 p_end->read1=0x01; /* first ending CCW is now active */
1070 /* reset ending CCWs and setup TIC CCWs */
1071 p_end->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1072 p_end->read1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1073 p_last->r_TIC_1.cda = (__u32)__pa(&p_end->read1_nop1);
1074 p_last->r_TIC_2.cda = (__u32)__pa(&p_end->read1_nop1);
1075 p_end->read1_nop2.cda=0;
1076 p_end->read1_nop2.count=1;
1079 if ( privptr-> p_read_active_first ==NULL ) {
1080 privptr->p_read_active_first = p_first; /* set new first */
1081 privptr->p_read_active_last = p_last; /* set new last */
1085 /* set up TIC ccw */
1086 temp_ccw.cda= (__u32)__pa(&p_first->read);
1089 temp_ccw.cmd_code = CCW_CLAW_CMD_TIC;
1094 /* first set of CCW's is chained to the new read */
1095 /* chain, so the second set is chained to the active chain. */
1096 /* Therefore modify the second set to point to the new */
1097 /* read chain set up TIC CCWs */
1098 /* make sure we update the CCW so channel doesn't fetch it */
1099 /* when it's only half done */
1100 memcpy( &p_end->read2_nop2, &temp_ccw ,
1101 sizeof(struct ccw1));
1102 privptr->p_read_active_last->r_TIC_1.cda=
1103 (__u32)__pa(&p_first->read);
1104 privptr->p_read_active_last->r_TIC_2.cda=
1105 (__u32)__pa(&p_first->read);
1108 /* make sure we update the CCW so channel doesn't */
1109 /* fetch it when it is only half done */
1110 memcpy( &p_end->read1_nop2, &temp_ccw ,
1111 sizeof(struct ccw1));
1112 privptr->p_read_active_last->r_TIC_1.cda=
1113 (__u32)__pa(&p_first->read);
1114 privptr->p_read_active_last->r_TIC_2.cda=
1115 (__u32)__pa(&p_first->read);
1117 /* chain in new set of blocks */
1118 privptr->p_read_active_last->next = p_first;
1119 privptr->p_read_active_last=p_last;
1120 } /* end of if ( privptr-> p_read_active_first ==NULL) */
1121 CLAW_DBF_TEXT(4, trace, "addexit");
1123 } /* end of add_claw_reads */
1125 /*-------------------------------------------------------------------*
1126 * ccw_check_return_code *
1128 *-------------------------------------------------------------------*/
1131 ccw_check_return_code(struct ccw_device *cdev, int return_code)
1133 CLAW_DBF_TEXT(4, trace, "ccwret");
1134 if (return_code != 0) {
1135 switch (return_code) {
1136 case -EBUSY: /* BUSY is a transient state no action needed */
1139 dev_err(&cdev->dev, "The remote channel adapter is not"
1144 "The status of the remote channel adapter"
1148 dev_err(&cdev->dev, "The common device layer"
1149 " returned error code %d\n",
1153 CLAW_DBF_TEXT(4, trace, "ccwret");
1154 } /* end of ccw_check_return_code */
1156 /*-------------------------------------------------------------------*
1157 * ccw_check_unit_check *
1158 *--------------------------------------------------------------------*/
1161 ccw_check_unit_check(struct chbk * p_ch, unsigned char sense )
1163 struct net_device *ndev = p_ch->ndev;
1164 struct device *dev = &p_ch->cdev->dev;
1166 CLAW_DBF_TEXT(4, trace, "unitchek");
1167 dev_warn(dev, "The communication peer of %s disconnected\n",
1172 dev_warn(dev, "The remote channel adapter for"
1173 " %s has been reset\n",
1176 } else if (sense & 0x20) {
1178 dev_warn(dev, "A data streaming timeout occurred"
1181 } else if (sense & 0x10) {
1182 dev_warn(dev, "The remote channel adapter for %s"
1186 dev_warn(dev, "A data transfer parity error occurred"
1190 } else if (sense & 0x10) {
1191 dev_warn(dev, "A read data parity error occurred"
1196 } /* end of ccw_check_unit_check */
1198 /*-------------------------------------------------------------------*
1200 *--------------------------------------------------------------------*/
1202 find_link(struct net_device *dev, char *host_name, char *ws_name )
1204 struct claw_privbk *privptr;
1205 struct claw_env *p_env;
1208 CLAW_DBF_TEXT(2, setup, "findlink");
1209 privptr = dev->ml_priv;
1210 p_env=privptr->p_env;
1211 switch (p_env->packing)
1214 if ((memcmp(WS_APPL_NAME_PACKED, host_name, 8)!=0) ||
1215 (memcmp(WS_APPL_NAME_PACKED, ws_name, 8)!=0 ))
1220 if ((memcmp(WS_APPL_NAME_IP_NAME, host_name, 8)!=0) ||
1221 (memcmp(WS_APPL_NAME_IP_NAME, ws_name, 8)!=0 ))
1225 if ((memcmp(HOST_APPL_NAME, host_name, 8)!=0) ||
1226 (memcmp(p_env->api_type , ws_name, 8)!=0))
1232 } /* end of find_link */
1234 /*-------------------------------------------------------------------*
1238 *-------------------------------------------------------------------*/
1241 claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
1244 struct claw_privbk *privptr;
1245 struct ccwbk *p_this_ccw;
1246 struct ccwbk *p_first_ccw;
1247 struct ccwbk *p_last_ccw;
1249 signed long len_of_data;
1250 unsigned long bytesInThisBuffer;
1251 unsigned char *pDataAddress;
1252 struct endccw *pEnd;
1253 struct ccw1 tempCCW;
1254 struct claw_env *p_env;
1255 struct clawph *pk_head;
1258 CLAW_DBF_TEXT(4, trace, "hw_tx");
1259 privptr = (struct claw_privbk *)(dev->ml_priv);
1260 p_env =privptr->p_env;
1261 claw_free_wrt_buf(dev); /* Clean up free chain if posible */
1262 /* scan the write queue to free any completed write packets */
1265 if ((p_env->packing >= PACK_SEND) &&
1266 (skb->cb[1] != 'P')) {
1267 skb_push(skb,sizeof(struct clawph));
1268 pk_head=(struct clawph *)skb->data;
1269 pk_head->len=skb->len-sizeof(struct clawph);
1270 if (pk_head->len%4) {
1271 pk_head->len+= 4-(pk_head->len%4);
1272 skb_pad(skb,4-(pk_head->len%4));
1273 skb_put(skb,4-(pk_head->len%4));
1275 if (p_env->packing == DO_PACKED)
1276 pk_head->link_num = linkid;
1278 pk_head->link_num = 0;
1279 pk_head->flag = 0x00;
1284 if (claw_check_busy(dev)) {
1285 if (privptr->write_free_count!=0) {
1286 claw_clear_busy(dev);
1289 claw_strt_out_IO(dev );
1290 claw_free_wrt_buf( dev );
1291 if (privptr->write_free_count==0) {
1292 ch = &privptr->channel[WRITE_CHANNEL];
1293 atomic_inc(&skb->users);
1294 skb_queue_tail(&ch->collect_queue, skb);
1298 claw_clear_busy(dev);
1303 if (claw_test_and_setbit_busy(TB_TX,dev)) { /* set to busy */
1304 ch = &privptr->channel[WRITE_CHANNEL];
1305 atomic_inc(&skb->users);
1306 skb_queue_tail(&ch->collect_queue, skb);
1307 claw_strt_out_IO(dev );
1312 /* See how many write buffers are required to hold this data */
1313 numBuffers = DIV_ROUND_UP(skb->len, privptr->p_env->write_size);
1315 /* If that number of buffers isn't available, give up for now */
1316 if (privptr->write_free_count < numBuffers ||
1317 privptr->p_write_free_chain == NULL ) {
1319 claw_setbit_busy(TB_NOBUFFER,dev);
1320 ch = &privptr->channel[WRITE_CHANNEL];
1321 atomic_inc(&skb->users);
1322 skb_queue_tail(&ch->collect_queue, skb);
1323 CLAW_DBF_TEXT(2, trace, "clawbusy");
1326 pDataAddress=skb->data;
1327 len_of_data=skb->len;
1329 while (len_of_data > 0) {
1330 p_this_ccw=privptr->p_write_free_chain; /* get a block */
1331 if (p_this_ccw == NULL) { /* lost the race */
1332 ch = &privptr->channel[WRITE_CHANNEL];
1333 atomic_inc(&skb->users);
1334 skb_queue_tail(&ch->collect_queue, skb);
1337 privptr->p_write_free_chain=p_this_ccw->next;
1338 p_this_ccw->next=NULL;
1339 --privptr->write_free_count; /* -1 */
1340 if (len_of_data >= privptr->p_env->write_size)
1341 bytesInThisBuffer = privptr->p_env->write_size;
1343 bytesInThisBuffer = len_of_data;
1344 memcpy( p_this_ccw->p_buffer,pDataAddress, bytesInThisBuffer);
1345 len_of_data-=bytesInThisBuffer;
1346 pDataAddress+=(unsigned long)bytesInThisBuffer;
1347 /* setup write CCW */
1348 p_this_ccw->write.cmd_code = (linkid * 8) +1;
1349 if (len_of_data>0) {
1350 p_this_ccw->write.cmd_code+=MORE_to_COME_FLAG;
1352 p_this_ccw->write.count=bytesInThisBuffer;
1353 /* now add to end of this chain */
1354 if (p_first_ccw==NULL) {
1355 p_first_ccw=p_this_ccw;
1357 if (p_last_ccw!=NULL) {
1358 p_last_ccw->next=p_this_ccw;
1359 /* set up TIC ccws */
1360 p_last_ccw->w_TIC_1.cda=
1361 (__u32)__pa(&p_this_ccw->write);
1363 p_last_ccw=p_this_ccw; /* save new last block */
1366 /* FirstCCW and LastCCW now contain a new set of write channel
1367 * programs to append to the running channel program
1370 if (p_first_ccw!=NULL) {
1371 /* setup ending ccw sequence for this segment */
1372 pEnd=privptr->p_end_ccw;
1374 pEnd->write1=0x00; /* second end ccw is now active */
1375 /* set up Tic CCWs */
1376 p_last_ccw->w_TIC_1.cda=
1377 (__u32)__pa(&pEnd->write2_nop1);
1378 pEnd->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1379 pEnd->write2_nop2.flags =
1380 CCW_FLAG_SLI | CCW_FLAG_SKIP;
1381 pEnd->write2_nop2.cda=0;
1382 pEnd->write2_nop2.count=1;
1384 else { /* end of if (pEnd->write1)*/
1385 pEnd->write1=0x01; /* first end ccw is now active */
1386 /* set up Tic CCWs */
1387 p_last_ccw->w_TIC_1.cda=
1388 (__u32)__pa(&pEnd->write1_nop1);
1389 pEnd->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1390 pEnd->write1_nop2.flags =
1391 CCW_FLAG_SLI | CCW_FLAG_SKIP;
1392 pEnd->write1_nop2.cda=0;
1393 pEnd->write1_nop2.count=1;
1394 } /* end if if (pEnd->write1) */
1396 if (privptr->p_write_active_first==NULL ) {
1397 privptr->p_write_active_first=p_first_ccw;
1398 privptr->p_write_active_last=p_last_ccw;
1401 /* set up Tic CCWs */
1403 tempCCW.cda=(__u32)__pa(&p_first_ccw->write);
1406 tempCCW.cmd_code=CCW_CLAW_CMD_TIC;
1411 * first set of ending CCW's is chained to the new write
1412 * chain, so the second set is chained to the active chain
1413 * Therefore modify the second set to point the new write chain.
1414 * make sure we update the CCW atomically
1415 * so channel does not fetch it when it's only half done
1417 memcpy( &pEnd->write2_nop2, &tempCCW ,
1418 sizeof(struct ccw1));
1419 privptr->p_write_active_last->w_TIC_1.cda=
1420 (__u32)__pa(&p_first_ccw->write);
1424 /*make sure we update the CCW atomically
1425 *so channel does not fetch it when it's only half done
1427 memcpy(&pEnd->write1_nop2, &tempCCW ,
1428 sizeof(struct ccw1));
1429 privptr->p_write_active_last->w_TIC_1.cda=
1430 (__u32)__pa(&p_first_ccw->write);
1432 } /* end if if (pEnd->write1) */
1434 privptr->p_write_active_last->next=p_first_ccw;
1435 privptr->p_write_active_last=p_last_ccw;
1438 } /* endif (p_first_ccw!=NULL) */
1439 dev_kfree_skb_any(skb);
1440 claw_strt_out_IO(dev );
1441 /* if write free count is zero , set NOBUFFER */
1442 if (privptr->write_free_count==0) {
1443 claw_setbit_busy(TB_NOBUFFER,dev);
1446 claw_clearbit_busy(TB_TX,dev);
1449 } /* end of claw_hw_tx */
1451 /*-------------------------------------------------------------------*
1455 *--------------------------------------------------------------------*/
1458 init_ccw_bk(struct net_device *dev)
1461 __u32 ccw_blocks_required;
1462 __u32 ccw_blocks_perpage;
1463 __u32 ccw_pages_required;
1464 __u32 claw_reads_perpage=1;
1465 __u32 claw_read_pages;
1466 __u32 claw_writes_perpage=1;
1467 __u32 claw_write_pages;
1469 struct ccwbk*p_free_chain;
1471 struct ccwbk*p_last_CCWB;
1472 struct ccwbk*p_first_CCWB;
1473 struct endccw *p_endccw=NULL;
1474 addr_t real_address;
1475 struct claw_privbk *privptr = dev->ml_priv;
1476 struct clawh *pClawH=NULL;
1477 addr_t real_TIC_address;
1479 CLAW_DBF_TEXT(4, trace, "init_ccw");
1481 /* initialize statistics field */
1482 privptr->active_link_ID=0;
1483 /* initialize ccwbk pointers */
1484 privptr->p_write_free_chain=NULL; /* pointer to free ccw chain*/
1485 privptr->p_write_active_first=NULL; /* pointer to the first write ccw*/
1486 privptr->p_write_active_last=NULL; /* pointer to the last write ccw*/
1487 privptr->p_read_active_first=NULL; /* pointer to the first read ccw*/
1488 privptr->p_read_active_last=NULL; /* pointer to the last read ccw */
1489 privptr->p_end_ccw=NULL; /* pointer to ending ccw */
1490 privptr->p_claw_signal_blk=NULL; /* pointer to signal block */
1491 privptr->buffs_alloc = 0;
1492 memset(&privptr->end_ccw, 0x00, sizeof(struct endccw));
1493 memset(&privptr->ctl_bk, 0x00, sizeof(struct clawctl));
1494 /* initialize free write ccwbk counter */
1495 privptr->write_free_count=0; /* number of free bufs on write chain */
1499 * We need 1 CCW block for each read buffer, 1 for each
1500 * write buffer, plus 1 for ClawSignalBlock
1502 ccw_blocks_required =
1503 privptr->p_env->read_buffers+privptr->p_env->write_buffers+1;
1505 * compute number of CCW blocks that will fit in a page
1507 ccw_blocks_perpage= PAGE_SIZE / CCWBK_SIZE;
1509 DIV_ROUND_UP(ccw_blocks_required, ccw_blocks_perpage);
1512 * read and write sizes are set by 2 constants in claw.h
1513 * 4k and 32k. Unpacked values other than 4k are not going to
1514 * provide good performance. With packing buffers support 32k
1517 if (privptr->p_env->read_size < PAGE_SIZE) {
1518 claw_reads_perpage = PAGE_SIZE / privptr->p_env->read_size;
1519 claw_read_pages = DIV_ROUND_UP(privptr->p_env->read_buffers,
1520 claw_reads_perpage);
1522 else { /* > or equal */
1523 privptr->p_buff_pages_perread =
1524 DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE);
1525 claw_read_pages = privptr->p_env->read_buffers *
1526 privptr->p_buff_pages_perread;
1528 if (privptr->p_env->write_size < PAGE_SIZE) {
1529 claw_writes_perpage =
1530 PAGE_SIZE / privptr->p_env->write_size;
1531 claw_write_pages = DIV_ROUND_UP(privptr->p_env->write_buffers,
1532 claw_writes_perpage);
1535 else { /* > or equal */
1536 privptr->p_buff_pages_perwrite =
1537 DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE);
1538 claw_write_pages = privptr->p_env->write_buffers *
1539 privptr->p_buff_pages_perwrite;
1542 * allocate ccw_pages_required
1544 if (privptr->p_buff_ccw==NULL) {
1545 privptr->p_buff_ccw=
1546 (void *)__get_free_pages(__GFP_DMA,
1547 (int)pages_to_order_of_mag(ccw_pages_required ));
1548 if (privptr->p_buff_ccw==NULL) {
1551 privptr->p_buff_ccw_num=ccw_pages_required;
1553 memset(privptr->p_buff_ccw, 0x00,
1554 privptr->p_buff_ccw_num * PAGE_SIZE);
1557 * obtain ending ccw block address
1560 privptr->p_end_ccw = (struct endccw *)&privptr->end_ccw;
1561 real_address = (__u32)__pa(privptr->p_end_ccw);
1562 /* Initialize ending CCW block */
1563 p_endccw=privptr->p_end_ccw;
1564 p_endccw->real=real_address;
1565 p_endccw->write1=0x00;
1566 p_endccw->read1=0x00;
1569 p_endccw->write1_nop1.cmd_code = CCW_CLAW_CMD_NOP;
1570 p_endccw->write1_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1571 p_endccw->write1_nop1.count = 1;
1572 p_endccw->write1_nop1.cda = 0;
1575 p_endccw->write1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1576 p_endccw->write1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1577 p_endccw->write1_nop2.count = 1;
1578 p_endccw->write1_nop2.cda = 0;
1581 p_endccw->write2_nop1.cmd_code = CCW_CLAW_CMD_NOP;
1582 p_endccw->write2_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1583 p_endccw->write2_nop1.count = 1;
1584 p_endccw->write2_nop1.cda = 0;
1587 p_endccw->write2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1588 p_endccw->write2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1589 p_endccw->write2_nop2.count = 1;
1590 p_endccw->write2_nop2.cda = 0;
1593 p_endccw->read1_nop1.cmd_code = CCW_CLAW_CMD_NOP;
1594 p_endccw->read1_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1595 p_endccw->read1_nop1.count = 1;
1596 p_endccw->read1_nop1.cda = 0;
1599 p_endccw->read1_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1600 p_endccw->read1_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1601 p_endccw->read1_nop2.count = 1;
1602 p_endccw->read1_nop2.cda = 0;
1605 p_endccw->read2_nop1.cmd_code = CCW_CLAW_CMD_NOP;
1606 p_endccw->read2_nop1.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1607 p_endccw->read2_nop1.count = 1;
1608 p_endccw->read2_nop1.cda = 0;
1611 p_endccw->read2_nop2.cmd_code = CCW_CLAW_CMD_READFF;
1612 p_endccw->read2_nop2.flags = CCW_FLAG_SLI | CCW_FLAG_SKIP;
1613 p_endccw->read2_nop2.count = 1;
1614 p_endccw->read2_nop2.cda = 0;
1617 * Build a chain of CCWs
1620 p_buff=privptr->p_buff_ccw;
1623 for (i=0 ; i < ccw_pages_required; i++ ) {
1624 real_address = (__u32)__pa(p_buff);
1626 for (j=0 ; j < ccw_blocks_perpage ; j++) {
1627 p_buf->next = p_free_chain;
1628 p_free_chain = p_buf;
1629 p_buf->real=(__u32)__pa(p_buf);
1635 * Initialize ClawSignalBlock
1638 if (privptr->p_claw_signal_blk==NULL) {
1639 privptr->p_claw_signal_blk=p_free_chain;
1640 p_free_chain=p_free_chain->next;
1641 pClawH=(struct clawh *)privptr->p_claw_signal_blk;
1642 pClawH->length=0xffff;
1643 pClawH->opcode=0xff;
1644 pClawH->flag=CLAW_BUSY;
1648 * allocate write_pages_required and add to free chain
1650 if (privptr->p_buff_write==NULL) {
1651 if (privptr->p_env->write_size < PAGE_SIZE) {
1652 privptr->p_buff_write=
1653 (void *)__get_free_pages(__GFP_DMA,
1654 (int)pages_to_order_of_mag(claw_write_pages ));
1655 if (privptr->p_buff_write==NULL) {
1656 privptr->p_buff_ccw=NULL;
1660 * Build CLAW write free chain
1664 memset(privptr->p_buff_write, 0x00,
1665 ccw_pages_required * PAGE_SIZE);
1666 privptr->p_write_free_chain=NULL;
1668 p_buff=privptr->p_buff_write;
1670 for (i=0 ; i< privptr->p_env->write_buffers ; i++) {
1671 p_buf = p_free_chain; /* get a CCW */
1672 p_free_chain = p_buf->next;
1673 p_buf->next =privptr->p_write_free_chain;
1674 privptr->p_write_free_chain = p_buf;
1675 p_buf-> p_buffer = (struct clawbuf *)p_buff;
1676 p_buf-> write.cda = (__u32)__pa(p_buff);
1677 p_buf-> write.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1678 p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
1679 p_buf-> w_read_FF.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1680 p_buf-> w_read_FF.count = 1;
1681 p_buf-> w_read_FF.cda =
1682 (__u32)__pa(&p_buf-> header.flag);
1683 p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
1684 p_buf-> w_TIC_1.flags = 0;
1685 p_buf-> w_TIC_1.count = 0;
1687 if (((unsigned long)p_buff +
1688 privptr->p_env->write_size) >=
1689 ((unsigned long)(p_buff+2*
1690 (privptr->p_env->write_size) - 1) & PAGE_MASK)) {
1691 p_buff = p_buff+privptr->p_env->write_size;
1695 else /* Buffers are => PAGE_SIZE. 1 buff per get_free_pages */
1697 privptr->p_write_free_chain=NULL;
1698 for (i = 0; i< privptr->p_env->write_buffers ; i++) {
1699 p_buff=(void *)__get_free_pages(__GFP_DMA,
1700 (int)pages_to_order_of_mag(
1701 privptr->p_buff_pages_perwrite) );
1703 free_pages((unsigned long)privptr->p_buff_ccw,
1704 (int)pages_to_order_of_mag(
1705 privptr->p_buff_ccw_num));
1706 privptr->p_buff_ccw=NULL;
1707 p_buf=privptr->p_buff_write;
1708 while (p_buf!=NULL) {
1709 free_pages((unsigned long)
1711 (int)pages_to_order_of_mag(
1712 privptr->p_buff_pages_perwrite));
1716 } /* Error on get_pages */
1717 memset(p_buff, 0x00, privptr->p_env->write_size );
1718 p_buf = p_free_chain;
1719 p_free_chain = p_buf->next;
1720 p_buf->next = privptr->p_write_free_chain;
1721 privptr->p_write_free_chain = p_buf;
1722 privptr->p_buff_write = p_buf;
1723 p_buf->p_buffer=(struct clawbuf *)p_buff;
1724 p_buf-> write.cda = (__u32)__pa(p_buff);
1725 p_buf-> write.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1726 p_buf-> w_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
1727 p_buf-> w_read_FF.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1728 p_buf-> w_read_FF.count = 1;
1729 p_buf-> w_read_FF.cda =
1730 (__u32)__pa(&p_buf-> header.flag);
1731 p_buf-> w_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
1732 p_buf-> w_TIC_1.flags = 0;
1733 p_buf-> w_TIC_1.count = 0;
1734 } /* for all write_buffers */
1736 } /* else buffers are PAGE_SIZE or bigger */
1739 privptr->p_buff_write_num=claw_write_pages;
1740 privptr->write_free_count=privptr->p_env->write_buffers;
1744 * allocate read_pages_required and chain to free chain
1746 if (privptr->p_buff_read==NULL) {
1747 if (privptr->p_env->read_size < PAGE_SIZE) {
1748 privptr->p_buff_read=
1749 (void *)__get_free_pages(__GFP_DMA,
1750 (int)pages_to_order_of_mag(claw_read_pages) );
1751 if (privptr->p_buff_read==NULL) {
1752 free_pages((unsigned long)privptr->p_buff_ccw,
1753 (int)pages_to_order_of_mag(
1754 privptr->p_buff_ccw_num));
1755 /* free the write pages size is < page size */
1756 free_pages((unsigned long)privptr->p_buff_write,
1757 (int)pages_to_order_of_mag(
1758 privptr->p_buff_write_num));
1759 privptr->p_buff_ccw=NULL;
1760 privptr->p_buff_write=NULL;
1763 memset(privptr->p_buff_read, 0x00, claw_read_pages * PAGE_SIZE);
1764 privptr->p_buff_read_num=claw_read_pages;
1766 * Build CLAW read free chain
1769 p_buff=privptr->p_buff_read;
1770 for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
1771 p_buf = p_free_chain;
1772 p_free_chain = p_buf->next;
1774 if (p_last_CCWB==NULL) {
1780 p_buf->next=p_first_CCWB;
1782 (__u32)__pa(&p_first_CCWB -> read );
1787 p_buf->p_buffer=(struct clawbuf *)p_buff;
1788 /* initialize read command */
1789 p_buf-> read.cmd_code = CCW_CLAW_CMD_READ;
1790 p_buf-> read.cda = (__u32)__pa(p_buff);
1791 p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1792 p_buf-> read.count = privptr->p_env->read_size;
1794 /* initialize read_h command */
1795 p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER;
1796 p_buf-> read_h.cda =
1797 (__u32)__pa(&(p_buf->header));
1798 p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1799 p_buf-> read_h.count = sizeof(struct clawh);
1801 /* initialize Signal command */
1802 p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD;
1803 p_buf-> signal.cda =
1804 (__u32)__pa(&(pClawH->flag));
1805 p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1806 p_buf-> signal.count = 1;
1808 /* initialize r_TIC_1 command */
1809 p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
1810 p_buf-> r_TIC_1.cda = (__u32)real_TIC_address;
1811 p_buf-> r_TIC_1.flags = 0;
1812 p_buf-> r_TIC_1.count = 0;
1814 /* initialize r_read_FF command */
1815 p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
1816 p_buf-> r_read_FF.cda =
1817 (__u32)__pa(&(pClawH->flag));
1818 p_buf-> r_read_FF.flags =
1819 CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI;
1820 p_buf-> r_read_FF.count = 1;
1822 /* initialize r_TIC_2 */
1823 memcpy(&p_buf->r_TIC_2,
1824 &p_buf->r_TIC_1, sizeof(struct ccw1));
1826 /* initialize Header */
1827 p_buf->header.length=0xffff;
1828 p_buf->header.opcode=0xff;
1829 p_buf->header.flag=CLAW_PENDING;
1831 if (((unsigned long)p_buff+privptr->p_env->read_size) >=
1832 ((unsigned long)(p_buff+2*(privptr->p_env->read_size)
1835 p_buff= p_buff+privptr->p_env->read_size;
1839 (void *)((unsigned long)
1840 (p_buff+2*(privptr->p_env->read_size)-1)
1843 } /* for read_buffers */
1844 } /* read_size < PAGE_SIZE */
1845 else { /* read Size >= PAGE_SIZE */
1846 for (i=0 ; i< privptr->p_env->read_buffers ; i++) {
1847 p_buff = (void *)__get_free_pages(__GFP_DMA,
1848 (int)pages_to_order_of_mag(
1849 privptr->p_buff_pages_perread));
1851 free_pages((unsigned long)privptr->p_buff_ccw,
1852 (int)pages_to_order_of_mag(privptr->
1854 /* free the write pages */
1855 p_buf=privptr->p_buff_write;
1856 while (p_buf!=NULL) {
1858 (unsigned long)p_buf->p_buffer,
1859 (int)pages_to_order_of_mag(
1860 privptr->p_buff_pages_perwrite));
1863 /* free any read pages already alloc */
1864 p_buf=privptr->p_buff_read;
1865 while (p_buf!=NULL) {
1867 (unsigned long)p_buf->p_buffer,
1868 (int)pages_to_order_of_mag(
1869 privptr->p_buff_pages_perread));
1872 privptr->p_buff_ccw=NULL;
1873 privptr->p_buff_write=NULL;
1876 memset(p_buff, 0x00, privptr->p_env->read_size);
1877 p_buf = p_free_chain;
1878 privptr->p_buff_read = p_buf;
1879 p_free_chain = p_buf->next;
1881 if (p_last_CCWB==NULL) {
1887 p_buf->next=p_first_CCWB;
1890 &p_first_CCWB -> read );
1894 /* save buff address */
1895 p_buf->p_buffer=(struct clawbuf *)p_buff;
1896 /* initialize read command */
1897 p_buf-> read.cmd_code = CCW_CLAW_CMD_READ;
1898 p_buf-> read.cda = (__u32)__pa(p_buff);
1899 p_buf-> read.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1900 p_buf-> read.count = privptr->p_env->read_size;
1902 /* initialize read_h command */
1903 p_buf-> read_h.cmd_code = CCW_CLAW_CMD_READHEADER;
1904 p_buf-> read_h.cda =
1905 (__u32)__pa(&(p_buf->header));
1906 p_buf-> read_h.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1907 p_buf-> read_h.count = sizeof(struct clawh);
1909 /* initialize Signal command */
1910 p_buf-> signal.cmd_code = CCW_CLAW_CMD_SIGNAL_SMOD;
1911 p_buf-> signal.cda =
1912 (__u32)__pa(&(pClawH->flag));
1913 p_buf-> signal.flags = CCW_FLAG_SLI | CCW_FLAG_CC;
1914 p_buf-> signal.count = 1;
1916 /* initialize r_TIC_1 command */
1917 p_buf-> r_TIC_1.cmd_code = CCW_CLAW_CMD_TIC;
1918 p_buf-> r_TIC_1.cda = (__u32)real_TIC_address;
1919 p_buf-> r_TIC_1.flags = 0;
1920 p_buf-> r_TIC_1.count = 0;
1922 /* initialize r_read_FF command */
1923 p_buf-> r_read_FF.cmd_code = CCW_CLAW_CMD_READFF;
1924 p_buf-> r_read_FF.cda =
1925 (__u32)__pa(&(pClawH->flag));
1926 p_buf-> r_read_FF.flags =
1927 CCW_FLAG_SLI | CCW_FLAG_CC | CCW_FLAG_PCI;
1928 p_buf-> r_read_FF.count = 1;
1930 /* initialize r_TIC_2 */
1931 memcpy(&p_buf->r_TIC_2, &p_buf->r_TIC_1,
1932 sizeof(struct ccw1));
1934 /* initialize Header */
1935 p_buf->header.length=0xffff;
1936 p_buf->header.opcode=0xff;
1937 p_buf->header.flag=CLAW_PENDING;
1939 } /* For read_buffers */
1940 } /* read_size >= PAGE_SIZE */
1941 } /* pBuffread = NULL */
1942 add_claw_reads( dev ,p_first_CCWB , p_last_CCWB);
1943 privptr->buffs_alloc = 1;
1946 } /* end of init_ccw_bk */
1948 /*-------------------------------------------------------------------*
1952 *--------------------------------------------------------------------*/
1955 probe_error( struct ccwgroup_device *cgdev)
1957 struct claw_privbk *privptr;
1959 CLAW_DBF_TEXT(4, trace, "proberr");
1960 privptr = dev_get_drvdata(&cgdev->dev);
1961 if (privptr != NULL) {
1962 dev_set_drvdata(&cgdev->dev, NULL);
1963 kfree(privptr->p_env);
1964 kfree(privptr->p_mtc_envelope);
1969 /*-------------------------------------------------------------------*
1970 * claw_process_control *
1973 *--------------------------------------------------------------------*/
1976 claw_process_control( struct net_device *dev, struct ccwbk * p_ccw)
1979 struct clawbuf *p_buf;
1980 struct clawctl ctlbk;
1981 struct clawctl *p_ctlbk;
1982 char temp_host_name[8];
1983 char temp_ws_name[8];
1984 struct claw_privbk *privptr;
1985 struct claw_env *p_env;
1986 struct sysval *p_sysval;
1987 struct conncmd *p_connect=NULL;
1989 struct chbk *p_ch = NULL;
1990 struct device *tdev;
1991 CLAW_DBF_TEXT(2, setup, "clw_cntl");
1992 udelay(1000); /* Wait a ms for the control packets to
1993 *catch up to each other */
1994 privptr = dev->ml_priv;
1995 p_env=privptr->p_env;
1996 tdev = &privptr->channel[READ_CHANNEL].cdev->dev;
1997 memcpy( &temp_host_name, p_env->host_name, 8);
1998 memcpy( &temp_ws_name, p_env->adapter_name , 8);
1999 dev_info(tdev, "%s: CLAW device %.8s: "
2000 "Received Control Packet\n",
2001 dev->name, temp_ws_name);
2002 if (privptr->release_pend==1) {
2005 p_buf=p_ccw->p_buffer;
2007 if (p_env->packing == DO_PACKED) { /* packing in progress?*/
2008 memcpy(p_ctlbk, &p_buf->buffer[4], sizeof(struct clawctl));
2010 memcpy(p_ctlbk, p_buf, sizeof(struct clawctl));
2012 switch (p_ctlbk->command)
2014 case SYSTEM_VALIDATE_REQUEST:
2015 if (p_ctlbk->version != CLAW_VERSION_ID) {
2016 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2017 CLAW_RC_WRONG_VERSION);
2018 dev_warn(tdev, "The communication peer of %s"
2019 " uses an incorrect API version %d\n",
2020 dev->name, p_ctlbk->version);
2022 p_sysval = (struct sysval *)&(p_ctlbk->data);
2023 dev_info(tdev, "%s: Recv Sys Validate Request: "
2024 "Vers=%d,link_id=%d,Corr=%d,WS name=%.8s,"
2026 dev->name, p_ctlbk->version,
2028 p_ctlbk->correlator,
2030 p_sysval->host_name);
2031 if (memcmp(temp_host_name, p_sysval->host_name, 8)) {
2032 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2033 CLAW_RC_NAME_MISMATCH);
2034 CLAW_DBF_TEXT(2, setup, "HSTBAD");
2035 CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->host_name);
2036 CLAW_DBF_TEXT_(2, setup, "%s", temp_host_name);
2038 "Host name %s for %s does not match the"
2039 " remote adapter name %s\n",
2040 p_sysval->host_name,
2044 if (memcmp(temp_ws_name, p_sysval->WS_name, 8)) {
2045 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2046 CLAW_RC_NAME_MISMATCH);
2047 CLAW_DBF_TEXT(2, setup, "WSNBAD");
2048 CLAW_DBF_TEXT_(2, setup, "%s", p_sysval->WS_name);
2049 CLAW_DBF_TEXT_(2, setup, "%s", temp_ws_name);
2050 dev_warn(tdev, "Adapter name %s for %s does not match"
2051 " the remote host name %s\n",
2056 if ((p_sysval->write_frame_size < p_env->write_size) &&
2057 (p_env->packing == 0)) {
2058 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2059 CLAW_RC_HOST_RCV_TOO_SMALL);
2061 "The local write buffer is smaller than the"
2062 " remote read buffer\n");
2063 CLAW_DBF_TEXT(2, setup, "wrtszbad");
2065 if ((p_sysval->read_frame_size < p_env->read_size) &&
2066 (p_env->packing == 0)) {
2067 claw_snd_sys_validate_rsp(dev, p_ctlbk,
2068 CLAW_RC_HOST_RCV_TOO_SMALL);
2070 "The local read buffer is smaller than the"
2071 " remote write buffer\n");
2072 CLAW_DBF_TEXT(2, setup, "rdsizbad");
2074 claw_snd_sys_validate_rsp(dev, p_ctlbk, 0);
2076 "CLAW device %.8s: System validate"
2077 " completed.\n", temp_ws_name);
2079 "%s: sys Validate Rsize:%d Wsize:%d\n",
2080 dev->name, p_sysval->read_frame_size,
2081 p_sysval->write_frame_size);
2082 privptr->system_validate_comp = 1;
2083 if (strncmp(p_env->api_type, WS_APPL_NAME_PACKED, 6) == 0)
2084 p_env->packing = PACKING_ASK;
2085 claw_strt_conn_req(dev);
2087 case SYSTEM_VALIDATE_RESPONSE:
2088 p_sysval = (struct sysval *)&(p_ctlbk->data);
2090 "Settings for %s validated (version=%d, "
2091 "remote device=%d, rc=%d, adapter name=%.8s, "
2092 "host name=%.8s)\n",
2095 p_ctlbk->correlator,
2098 p_sysval->host_name);
2099 switch (p_ctlbk->rc) {
2101 dev_info(tdev, "%s: CLAW device "
2102 "%.8s: System validate completed.\n",
2103 dev->name, temp_ws_name);
2104 if (privptr->system_validate_comp == 0)
2105 claw_strt_conn_req(dev);
2106 privptr->system_validate_comp = 1;
2108 case CLAW_RC_NAME_MISMATCH:
2109 dev_warn(tdev, "Validating %s failed because of"
2110 " a host or adapter name mismatch\n",
2113 case CLAW_RC_WRONG_VERSION:
2114 dev_warn(tdev, "Validating %s failed because of a"
2115 " version conflict\n",
2118 case CLAW_RC_HOST_RCV_TOO_SMALL:
2119 dev_warn(tdev, "Validating %s failed because of a"
2120 " frame size conflict\n",
2124 dev_warn(tdev, "The communication peer of %s rejected"
2125 " the connection\n",
2131 case CONNECTION_REQUEST:
2132 p_connect = (struct conncmd *)&(p_ctlbk->data);
2133 dev_info(tdev, "%s: Recv Conn Req: Vers=%d,link_id=%d,"
2134 "Corr=%d,HOST appl=%.8s,WS appl=%.8s\n",
2138 p_ctlbk->correlator,
2139 p_connect->host_name,
2140 p_connect->WS_name);
2141 if (privptr->active_link_ID != 0) {
2142 claw_snd_disc(dev, p_ctlbk);
2143 dev_info(tdev, "%s rejected a connection request"
2144 " because it is already active\n",
2147 if (p_ctlbk->linkid != 1) {
2148 claw_snd_disc(dev, p_ctlbk);
2149 dev_info(tdev, "%s rejected a request to open multiple"
2153 rc = find_link(dev, p_connect->host_name, p_connect->WS_name);
2155 claw_snd_disc(dev, p_ctlbk);
2156 dev_info(tdev, "%s rejected a connection request"
2157 " because of a type mismatch\n",
2160 claw_send_control(dev,
2161 CONNECTION_CONFIRM, p_ctlbk->linkid,
2162 p_ctlbk->correlator,
2163 0, p_connect->host_name,
2164 p_connect->WS_name);
2165 if (p_env->packing == PACKING_ASK) {
2166 p_env->packing = PACK_SEND;
2167 claw_snd_conn_req(dev, 0);
2169 dev_info(tdev, "%s: CLAW device %.8s: Connection "
2170 "completed link_id=%d.\n",
2171 dev->name, temp_ws_name,
2173 privptr->active_link_ID = p_ctlbk->linkid;
2174 p_ch = &privptr->channel[WRITE_CHANNEL];
2175 wake_up(&p_ch->wait); /* wake up claw_open ( WRITE) */
2177 case CONNECTION_RESPONSE:
2178 p_connect = (struct conncmd *)&(p_ctlbk->data);
2179 dev_info(tdev, "%s: Recv Conn Resp: Vers=%d,link_id=%d,"
2180 "Corr=%d,RC=%d,Host appl=%.8s, WS appl=%.8s\n",
2184 p_ctlbk->correlator,
2186 p_connect->host_name,
2187 p_connect->WS_name);
2189 if (p_ctlbk->rc != 0) {
2190 dev_warn(tdev, "The communication peer of %s rejected"
2191 " a connection request\n",
2196 p_connect->host_name, p_connect->WS_name);
2198 claw_snd_disc(dev, p_ctlbk);
2199 dev_warn(tdev, "The communication peer of %s"
2200 " rejected a connection "
2201 "request because of a type mismatch\n",
2204 /* should be until CONNECTION_CONFIRM */
2205 privptr->active_link_ID = -(p_ctlbk->linkid);
2207 case CONNECTION_CONFIRM:
2208 p_connect = (struct conncmd *)&(p_ctlbk->data);
2210 "%s: Recv Conn Confirm:Vers=%d,link_id=%d,"
2211 "Corr=%d,Host appl=%.8s,WS appl=%.8s\n",
2215 p_ctlbk->correlator,
2216 p_connect->host_name,
2217 p_connect->WS_name);
2218 if (p_ctlbk->linkid == -(privptr->active_link_ID)) {
2219 privptr->active_link_ID = p_ctlbk->linkid;
2220 if (p_env->packing > PACKING_ASK) {
2222 "%s: Confirmed Now packing\n", dev->name);
2223 p_env->packing = DO_PACKED;
2225 p_ch = &privptr->channel[WRITE_CHANNEL];
2226 wake_up(&p_ch->wait);
2228 dev_warn(tdev, "Activating %s failed because of"
2229 " an incorrect link ID=%d\n",
2230 dev->name, p_ctlbk->linkid);
2231 claw_snd_disc(dev, p_ctlbk);
2235 dev_info(tdev, "%s: Disconnect: "
2236 "Vers=%d,link_id=%d,Corr=%d\n",
2237 dev->name, p_ctlbk->version,
2238 p_ctlbk->linkid, p_ctlbk->correlator);
2239 if ((p_ctlbk->linkid == 2) &&
2240 (p_env->packing == PACK_SEND)) {
2241 privptr->active_link_ID = 1;
2242 p_env->packing = DO_PACKED;
2244 privptr->active_link_ID = 0;
2247 dev_warn(tdev, "The communication peer of %s failed\n",
2251 dev_warn(tdev, "The communication peer of %s sent"
2252 " an unknown command code\n",
2258 } /* end of claw_process_control */
2261 /*-------------------------------------------------------------------*
2262 * claw_send_control *
2264 *--------------------------------------------------------------------*/
2267 claw_send_control(struct net_device *dev, __u8 type, __u8 link,
2268 __u8 correlator, __u8 rc, char *local_name, char *remote_name)
2270 struct claw_privbk *privptr;
2271 struct clawctl *p_ctl;
2272 struct sysval *p_sysval;
2273 struct conncmd *p_connect;
2274 struct sk_buff *skb;
2276 CLAW_DBF_TEXT(2, setup, "sndcntl");
2277 privptr = dev->ml_priv;
2278 p_ctl=(struct clawctl *)&privptr->ctl_bk;
2280 p_ctl->command=type;
2281 p_ctl->version=CLAW_VERSION_ID;
2283 p_ctl->correlator=correlator;
2286 p_sysval=(struct sysval *)&p_ctl->data;
2287 p_connect=(struct conncmd *)&p_ctl->data;
2289 switch (p_ctl->command) {
2290 case SYSTEM_VALIDATE_REQUEST:
2291 case SYSTEM_VALIDATE_RESPONSE:
2292 memcpy(&p_sysval->host_name, local_name, 8);
2293 memcpy(&p_sysval->WS_name, remote_name, 8);
2294 if (privptr->p_env->packing > 0) {
2295 p_sysval->read_frame_size = DEF_PACK_BUFSIZE;
2296 p_sysval->write_frame_size = DEF_PACK_BUFSIZE;
2298 /* how big is the biggest group of packets */
2299 p_sysval->read_frame_size =
2300 privptr->p_env->read_size;
2301 p_sysval->write_frame_size =
2302 privptr->p_env->write_size;
2304 memset(&p_sysval->reserved, 0x00, 4);
2306 case CONNECTION_REQUEST:
2307 case CONNECTION_RESPONSE:
2308 case CONNECTION_CONFIRM:
2310 memcpy(&p_sysval->host_name, local_name, 8);
2311 memcpy(&p_sysval->WS_name, remote_name, 8);
2312 if (privptr->p_env->packing > 0) {
2313 /* How big is the biggest packet */
2314 p_connect->reserved1[0]=CLAW_FRAME_SIZE;
2315 p_connect->reserved1[1]=CLAW_FRAME_SIZE;
2317 memset(&p_connect->reserved1, 0x00, 4);
2318 memset(&p_connect->reserved2, 0x00, 4);
2325 /* write Control Record to the device */
2328 skb = dev_alloc_skb(sizeof(struct clawctl));
2332 memcpy(skb_put(skb, sizeof(struct clawctl)),
2333 p_ctl, sizeof(struct clawctl));
2334 if (privptr->p_env->packing >= PACK_SEND)
2335 claw_hw_tx(skb, dev, 1);
2337 claw_hw_tx(skb, dev, 0);
2339 } /* end of claw_send_control */
2341 /*-------------------------------------------------------------------*
2342 * claw_snd_conn_req *
2344 *--------------------------------------------------------------------*/
2346 claw_snd_conn_req(struct net_device *dev, __u8 link)
2349 struct claw_privbk *privptr = dev->ml_priv;
2350 struct clawctl *p_ctl;
2352 CLAW_DBF_TEXT(2, setup, "snd_conn");
2354 p_ctl=(struct clawctl *)&privptr->ctl_bk;
2355 p_ctl->linkid = link;
2356 if ( privptr->system_validate_comp==0x00 ) {
2359 if (privptr->p_env->packing == PACKING_ASK )
2360 rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
2361 WS_APPL_NAME_PACKED, WS_APPL_NAME_PACKED);
2362 if (privptr->p_env->packing == PACK_SEND) {
2363 rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
2364 WS_APPL_NAME_IP_NAME, WS_APPL_NAME_IP_NAME);
2366 if (privptr->p_env->packing == 0)
2367 rc=claw_send_control(dev, CONNECTION_REQUEST,0,0,0,
2368 HOST_APPL_NAME, privptr->p_env->api_type);
2371 } /* end of claw_snd_conn_req */
2374 /*-------------------------------------------------------------------*
2377 *--------------------------------------------------------------------*/
2380 claw_snd_disc(struct net_device *dev, struct clawctl * p_ctl)
2383 struct conncmd * p_connect;
2385 CLAW_DBF_TEXT(2, setup, "snd_dsc");
2386 p_connect=(struct conncmd *)&p_ctl->data;
2388 rc=claw_send_control(dev, DISCONNECT, p_ctl->linkid,
2389 p_ctl->correlator, 0,
2390 p_connect->host_name, p_connect->WS_name);
2392 } /* end of claw_snd_disc */
2395 /*-------------------------------------------------------------------*
2396 * claw_snd_sys_validate_rsp *
2398 *--------------------------------------------------------------------*/
2401 claw_snd_sys_validate_rsp(struct net_device *dev,
2402 struct clawctl *p_ctl, __u32 return_code)
2404 struct claw_env * p_env;
2405 struct claw_privbk *privptr;
2408 CLAW_DBF_TEXT(2, setup, "chkresp");
2409 privptr = dev->ml_priv;
2410 p_env=privptr->p_env;
2411 rc=claw_send_control(dev, SYSTEM_VALIDATE_RESPONSE,
2416 p_env->adapter_name );
2418 } /* end of claw_snd_sys_validate_rsp */
2420 /*-------------------------------------------------------------------*
2421 * claw_strt_conn_req *
2423 *--------------------------------------------------------------------*/
2426 claw_strt_conn_req(struct net_device *dev )
2430 CLAW_DBF_TEXT(2, setup, "conn_req");
2431 rc=claw_snd_conn_req(dev, 1);
2433 } /* end of claw_strt_conn_req */
2437 /*-------------------------------------------------------------------*
2439 *-------------------------------------------------------------------*/
2442 net_device_stats *claw_stats(struct net_device *dev)
2444 struct claw_privbk *privptr;
2446 CLAW_DBF_TEXT(4, trace, "stats");
2447 privptr = dev->ml_priv;
2448 return &privptr->stats;
2449 } /* end of claw_stats */
2452 /*-------------------------------------------------------------------*
2455 *--------------------------------------------------------------------*/
2457 unpack_read(struct net_device *dev )
2459 struct sk_buff *skb;
2460 struct claw_privbk *privptr;
2461 struct claw_env *p_env;
2462 struct ccwbk *p_this_ccw;
2463 struct ccwbk *p_first_ccw;
2464 struct ccwbk *p_last_ccw;
2465 struct clawph *p_packh;
2467 struct clawctl *p_ctlrec=NULL;
2468 struct device *p_dev;
2473 __u8 mtc_this_frm=0;
2478 CLAW_DBF_TEXT(4, trace, "unpkread");
2483 privptr = dev->ml_priv;
2485 p_dev = &privptr->channel[READ_CHANNEL].cdev->dev;
2486 p_env = privptr->p_env;
2487 p_this_ccw=privptr->p_read_active_first;
2488 while (p_this_ccw!=NULL && p_this_ccw->header.flag!=CLAW_PENDING) {
2491 p_this_ccw->header.flag=CLAW_PENDING;
2492 privptr->p_read_active_first=p_this_ccw->next;
2493 p_this_ccw->next=NULL;
2494 p_packh = (struct clawph *)p_this_ccw->p_buffer;
2495 if ((p_env->packing == PACK_SEND) &&
2496 (p_packh->len == 32) &&
2497 (p_packh->link_num == 0)) { /* is it a packed ctl rec? */
2498 p_packh++; /* peek past pack header */
2499 p_ctlrec = (struct clawctl *)p_packh;
2500 p_packh--; /* un peek */
2501 if ((p_ctlrec->command == CONNECTION_RESPONSE) ||
2502 (p_ctlrec->command == CONNECTION_CONFIRM))
2503 p_env->packing = DO_PACKED;
2505 if (p_env->packing == DO_PACKED)
2506 link_num=p_packh->link_num;
2508 link_num=p_this_ccw->header.opcode / 8;
2509 if ((p_this_ccw->header.opcode & MORE_to_COME_FLAG)!=0) {
2511 if (p_this_ccw->header.length!=
2512 privptr->p_env->read_size ) {
2514 "The communication peer of %s"
2516 " frame of length %02x\n",
2517 dev->name, p_this_ccw->header.length);
2521 if (privptr->mtc_skipping) {
2523 * We're in the mode of skipping past a
2524 * multi-frame message
2525 * that we can't process for some reason or other.
2526 * The first frame without the More-To-Come flag is
2527 * the last frame of the skipped message.
2529 /* in case of More-To-Come not set in this frame */
2530 if (mtc_this_frm==0) {
2531 privptr->mtc_skipping=0; /* Ok, the end */
2532 privptr->mtc_logical_link=-1;
2538 claw_process_control(dev, p_this_ccw);
2539 CLAW_DBF_TEXT(4, trace, "UnpkCntl");
2543 if (p_env->packing == DO_PACKED) {
2544 if (pack_off > p_env->read_size)
2546 p_packd = p_this_ccw->p_buffer+pack_off;
2547 p_packh = (struct clawph *) p_packd;
2548 if ((p_packh->len == 0) || /* done with this frame? */
2549 (p_packh->flag != 0))
2551 bytes_to_mov = p_packh->len;
2552 pack_off += bytes_to_mov+sizeof(struct clawph);
2555 bytes_to_mov=p_this_ccw->header.length;
2557 if (privptr->mtc_logical_link<0) {
2560 * if More-To-Come is set in this frame then we don't know
2561 * length of entire message, and hence have to allocate
2564 /* We are starting a new envelope */
2565 privptr->mtc_offset=0;
2566 privptr->mtc_logical_link=link_num;
2569 if (bytes_to_mov > (MAX_ENVELOPE_SIZE- privptr->mtc_offset) ) {
2571 privptr->stats.rx_frame_errors++;
2574 if (p_env->packing == DO_PACKED) {
2575 memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset,
2576 p_packd+sizeof(struct clawph), bytes_to_mov);
2579 memcpy( privptr->p_mtc_envelope+ privptr->mtc_offset,
2580 p_this_ccw->p_buffer, bytes_to_mov);
2582 if (mtc_this_frm==0) {
2583 len_of_data=privptr->mtc_offset+bytes_to_mov;
2584 skb=dev_alloc_skb(len_of_data);
2586 memcpy(skb_put(skb,len_of_data),
2587 privptr->p_mtc_envelope,
2590 skb_reset_mac_header(skb);
2591 skb->protocol=htons(ETH_P_IP);
2592 skb->ip_summed=CHECKSUM_UNNECESSARY;
2593 privptr->stats.rx_packets++;
2594 privptr->stats.rx_bytes+=len_of_data;
2598 dev_info(p_dev, "Allocating a buffer for"
2599 " incoming data failed\n");
2600 privptr->stats.rx_dropped++;
2602 privptr->mtc_offset=0;
2603 privptr->mtc_logical_link=-1;
2606 privptr->mtc_offset+=bytes_to_mov;
2608 if (p_env->packing == DO_PACKED)
2612 * Remove ThisCCWblock from active read queue, and add it
2613 * to queue of free blocks to be reused.
2616 p_this_ccw->header.length=0xffff;
2617 p_this_ccw->header.opcode=0xff;
2619 * add this one to the free queue for later reuse
2621 if (p_first_ccw==NULL) {
2622 p_first_ccw = p_this_ccw;
2625 p_last_ccw->next = p_this_ccw;
2627 p_last_ccw = p_this_ccw;
2629 * chain to next block on active read queue
2631 p_this_ccw = privptr->p_read_active_first;
2632 CLAW_DBF_TEXT_(4, trace, "rxpkt %d", p);
2633 } /* end of while */
2635 /* check validity */
2637 CLAW_DBF_TEXT_(4, trace, "rxfrm %d", i);
2638 add_claw_reads(dev, p_first_ccw, p_last_ccw);
2639 claw_strt_read(dev, LOCK_YES);
2641 } /* end of unpack_read */
2643 /*-------------------------------------------------------------------*
2646 *--------------------------------------------------------------------*/
2648 claw_strt_read (struct net_device *dev, int lock )
2652 unsigned long saveflags = 0;
2653 struct claw_privbk *privptr = dev->ml_priv;
2654 struct ccwbk*p_ccwbk;
2656 struct clawh *p_clawh;
2657 p_ch = &privptr->channel[READ_CHANNEL];
2659 CLAW_DBF_TEXT(4, trace, "StRdNter");
2660 p_clawh=(struct clawh *)privptr->p_claw_signal_blk;
2661 p_clawh->flag=CLAW_IDLE; /* 0x00 */
2663 if ((privptr->p_write_active_first!=NULL &&
2664 privptr->p_write_active_first->header.flag!=CLAW_PENDING) ||
2665 (privptr->p_read_active_first!=NULL &&
2666 privptr->p_read_active_first->header.flag!=CLAW_PENDING )) {
2667 p_clawh->flag=CLAW_BUSY; /* 0xff */
2669 if (lock==LOCK_YES) {
2670 spin_lock_irqsave(get_ccwdev_lock(p_ch->cdev), saveflags);
2672 if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
2673 CLAW_DBF_TEXT(4, trace, "HotRead");
2674 p_ccwbk=privptr->p_read_active_first;
2675 parm = (unsigned long) p_ch;
2676 rc = ccw_device_start (p_ch->cdev, &p_ccwbk->read, parm,
2679 ccw_check_return_code(p_ch->cdev, rc);
2683 CLAW_DBF_TEXT(2, trace, "ReadAct");
2686 if (lock==LOCK_YES) {
2687 spin_unlock_irqrestore(get_ccwdev_lock(p_ch->cdev), saveflags);
2689 CLAW_DBF_TEXT(4, trace, "StRdExit");
2691 } /* end of claw_strt_read */
2693 /*-------------------------------------------------------------------*
2694 * claw_strt_out_IO *
2696 *--------------------------------------------------------------------*/
2699 claw_strt_out_IO( struct net_device *dev )
2703 struct claw_privbk *privptr;
2705 struct ccwbk *p_first_ccw;
2710 privptr = (struct claw_privbk *)dev->ml_priv;
2711 p_ch = &privptr->channel[WRITE_CHANNEL];
2713 CLAW_DBF_TEXT(4, trace, "strt_io");
2714 p_first_ccw=privptr->p_write_active_first;
2716 if (p_ch->claw_state == CLAW_STOP)
2718 if (p_first_ccw == NULL) {
2721 if (test_and_set_bit(0, (void *)&p_ch->IO_active) == 0) {
2722 parm = (unsigned long) p_ch;
2723 CLAW_DBF_TEXT(2, trace, "StWrtIO");
2724 rc = ccw_device_start(p_ch->cdev, &p_first_ccw->write, parm,
2727 ccw_check_return_code(p_ch->cdev, rc);
2730 dev->trans_start = jiffies;
2732 } /* end of claw_strt_out_IO */
2734 /*-------------------------------------------------------------------*
2735 * Free write buffers *
2737 *--------------------------------------------------------------------*/
2740 claw_free_wrt_buf( struct net_device *dev )
2743 struct claw_privbk *privptr = (struct claw_privbk *)dev->ml_priv;
2744 struct ccwbk*p_this_ccw;
2745 struct ccwbk*p_next_ccw;
2747 CLAW_DBF_TEXT(4, trace, "freewrtb");
2748 /* scan the write queue to free any completed write packets */
2749 p_this_ccw=privptr->p_write_active_first;
2750 while ( (p_this_ccw!=NULL) && (p_this_ccw->header.flag!=CLAW_PENDING))
2752 p_next_ccw = p_this_ccw->next;
2753 if (((p_next_ccw!=NULL) &&
2754 (p_next_ccw->header.flag!=CLAW_PENDING)) ||
2755 ((p_this_ccw == privptr->p_write_active_last) &&
2756 (p_this_ccw->header.flag!=CLAW_PENDING))) {
2757 /* The next CCW is OK or this is */
2758 /* the last CCW...free it @A1A */
2759 privptr->p_write_active_first=p_this_ccw->next;
2760 p_this_ccw->header.flag=CLAW_PENDING;
2761 p_this_ccw->next=privptr->p_write_free_chain;
2762 privptr->p_write_free_chain=p_this_ccw;
2763 ++privptr->write_free_count;
2764 privptr->stats.tx_bytes+= p_this_ccw->write.count;
2765 p_this_ccw=privptr->p_write_active_first;
2766 privptr->stats.tx_packets++;
2772 if (privptr->write_free_count!=0) {
2773 claw_clearbit_busy(TB_NOBUFFER,dev);
2775 /* whole chain removed? */
2776 if (privptr->p_write_active_first==NULL) {
2777 privptr->p_write_active_last=NULL;
2779 CLAW_DBF_TEXT_(4, trace, "FWC=%d", privptr->write_free_count);
2783 /*-------------------------------------------------------------------*
2784 * claw free netdevice *
2786 *--------------------------------------------------------------------*/
2788 claw_free_netdevice(struct net_device * dev, int free_dev)
2790 struct claw_privbk *privptr;
2792 CLAW_DBF_TEXT(2, setup, "free_dev");
2795 CLAW_DBF_TEXT_(2, setup, "%s", dev->name);
2796 privptr = dev->ml_priv;
2797 if (dev->flags & IFF_RUNNING)
2800 privptr->channel[READ_CHANNEL].ndev = NULL; /* say it's free */
2802 dev->ml_priv = NULL;
2808 CLAW_DBF_TEXT(2, setup, "free_ok");
2812 * Claw init netdevice
2813 * Initialize everything of the net device except the name and the
2816 static const struct net_device_ops claw_netdev_ops = {
2817 .ndo_open = claw_open,
2818 .ndo_stop = claw_release,
2819 .ndo_get_stats = claw_stats,
2820 .ndo_start_xmit = claw_tx,
2821 .ndo_change_mtu = claw_change_mtu,
2825 claw_init_netdevice(struct net_device * dev)
2827 CLAW_DBF_TEXT(2, setup, "init_dev");
2828 CLAW_DBF_TEXT_(2, setup, "%s", dev->name);
2829 dev->mtu = CLAW_DEFAULT_MTU_SIZE;
2830 dev->hard_header_len = 0;
2832 dev->type = ARPHRD_SLIP;
2833 dev->tx_queue_len = 1300;
2834 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
2835 dev->netdev_ops = &claw_netdev_ops;
2836 CLAW_DBF_TEXT(2, setup, "initok");
2841 * Init a new channel in the privptr->channel[i].
2843 * @param cdev The ccw_device to be added.
2845 * @return 0 on success, !0 on error.
2848 add_channel(struct ccw_device *cdev,int i,struct claw_privbk *privptr)
2851 struct ccw_dev_id dev_id;
2853 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cdev->dev));
2854 privptr->channel[i].flag = i+1; /* Read is 1 Write is 2 */
2855 p_ch = &privptr->channel[i];
2857 snprintf(p_ch->id, CLAW_ID_SIZE, "cl-%s", dev_name(&cdev->dev));
2858 ccw_device_get_id(cdev, &dev_id);
2859 p_ch->devno = dev_id.devno;
2860 if ((p_ch->irb = kzalloc(sizeof (struct irb),GFP_KERNEL)) == NULL) {
2869 * Setup an interface.
2871 * @param cgdev Device to be setup.
2873 * @returns 0 on success, !0 on failure.
2876 claw_new_device(struct ccwgroup_device *cgdev)
2878 struct claw_privbk *privptr;
2879 struct claw_env *p_env;
2880 struct net_device *dev;
2882 struct ccw_dev_id dev_id;
2884 dev_info(&cgdev->dev, "add for %s\n",
2885 dev_name(&cgdev->cdev[READ_CHANNEL]->dev));
2886 CLAW_DBF_TEXT(2, setup, "new_dev");
2887 privptr = dev_get_drvdata(&cgdev->dev);
2888 dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr);
2889 dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr);
2892 p_env = privptr->p_env;
2893 ccw_device_get_id(cgdev->cdev[READ_CHANNEL], &dev_id);
2894 p_env->devno[READ_CHANNEL] = dev_id.devno;
2895 ccw_device_get_id(cgdev->cdev[WRITE_CHANNEL], &dev_id);
2896 p_env->devno[WRITE_CHANNEL] = dev_id.devno;
2897 ret = add_channel(cgdev->cdev[0],0,privptr);
2899 ret = add_channel(cgdev->cdev[1],1,privptr);
2901 dev_warn(&cgdev->dev, "Creating a CLAW group device"
2902 " failed with error code %d\n", ret);
2905 ret = ccw_device_set_online(cgdev->cdev[READ_CHANNEL]);
2907 dev_warn(&cgdev->dev,
2908 "Setting the read subchannel online"
2909 " failed with error code %d\n", ret);
2912 ret = ccw_device_set_online(cgdev->cdev[WRITE_CHANNEL]);
2914 dev_warn(&cgdev->dev,
2915 "Setting the write subchannel online "
2916 "failed with error code %d\n", ret);
2919 dev = alloc_netdev(0,"claw%d",claw_init_netdevice);
2921 dev_warn(&cgdev->dev,
2922 "Activating the CLAW device failed\n");
2925 dev->ml_priv = privptr;
2926 dev_set_drvdata(&cgdev->dev, privptr);
2927 dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, privptr);
2928 dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, privptr);
2930 SET_NETDEV_DEV(dev, &cgdev->dev);
2931 if (register_netdev(dev) != 0) {
2932 claw_free_netdevice(dev, 1);
2933 CLAW_DBF_TEXT(2, trace, "regfail");
2936 dev->flags &=~IFF_RUNNING;
2937 if (privptr->buffs_alloc == 0) {
2938 ret=init_ccw_bk(dev);
2940 unregister_netdev(dev);
2941 claw_free_netdevice(dev,1);
2942 CLAW_DBF_TEXT(2, trace, "ccwmem");
2946 privptr->channel[READ_CHANNEL].ndev = dev;
2947 privptr->channel[WRITE_CHANNEL].ndev = dev;
2948 privptr->p_env->ndev = dev;
2950 dev_info(&cgdev->dev, "%s:readsize=%d writesize=%d "
2951 "readbuffer=%d writebuffer=%d read=0x%04x write=0x%04x\n",
2952 dev->name, p_env->read_size,
2953 p_env->write_size, p_env->read_buffers,
2954 p_env->write_buffers, p_env->devno[READ_CHANNEL],
2955 p_env->devno[WRITE_CHANNEL]);
2956 dev_info(&cgdev->dev, "%s:host_name:%.8s, adapter_name "
2957 ":%.8s api_type: %.8s\n",
2958 dev->name, p_env->host_name,
2959 p_env->adapter_name , p_env->api_type);
2962 ccw_device_set_offline(cgdev->cdev[1]);
2963 ccw_device_set_offline(cgdev->cdev[0]);
2968 claw_purge_skb_queue(struct sk_buff_head *q)
2970 struct sk_buff *skb;
2972 CLAW_DBF_TEXT(4, trace, "purgque");
2973 while ((skb = skb_dequeue(q))) {
2974 atomic_dec(&skb->users);
2975 dev_kfree_skb_any(skb);
2980 * Shutdown an interface.
2982 * @param cgdev Device to be shut down.
2984 * @returns 0 on success, !0 on failure.
2987 claw_shutdown_device(struct ccwgroup_device *cgdev)
2989 struct claw_privbk *priv;
2990 struct net_device *ndev;
2993 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
2994 priv = dev_get_drvdata(&cgdev->dev);
2997 ndev = priv->channel[READ_CHANNEL].ndev;
2999 /* Close the device */
3000 dev_info(&cgdev->dev, "%s: shutting down\n",
3002 if (ndev->flags & IFF_RUNNING)
3003 ret = claw_release(ndev);
3004 ndev->flags &=~IFF_RUNNING;
3005 unregister_netdev(ndev);
3006 ndev->ml_priv = NULL; /* cgdev data, not ndev's to free */
3007 claw_free_netdevice(ndev, 1);
3008 priv->channel[READ_CHANNEL].ndev = NULL;
3009 priv->channel[WRITE_CHANNEL].ndev = NULL;
3010 priv->p_env->ndev = NULL;
3012 ccw_device_set_offline(cgdev->cdev[1]);
3013 ccw_device_set_offline(cgdev->cdev[0]);
3018 claw_remove_device(struct ccwgroup_device *cgdev)
3020 struct claw_privbk *priv;
3023 CLAW_DBF_TEXT_(2, setup, "%s", dev_name(&cgdev->dev));
3024 priv = dev_get_drvdata(&cgdev->dev);
3026 dev_info(&cgdev->dev, " will be removed.\n");
3027 if (cgdev->state == CCWGROUP_ONLINE)
3028 claw_shutdown_device(cgdev);
3029 kfree(priv->p_mtc_envelope);
3030 priv->p_mtc_envelope=NULL;
3033 kfree(priv->channel[0].irb);
3034 priv->channel[0].irb=NULL;
3035 kfree(priv->channel[1].irb);
3036 priv->channel[1].irb=NULL;
3038 dev_set_drvdata(&cgdev->dev, NULL);
3039 dev_set_drvdata(&cgdev->cdev[READ_CHANNEL]->dev, NULL);
3040 dev_set_drvdata(&cgdev->cdev[WRITE_CHANNEL]->dev, NULL);
3041 put_device(&cgdev->dev);
3051 claw_hname_show(struct device *dev, struct device_attribute *attr, char *buf)
3053 struct claw_privbk *priv;
3054 struct claw_env * p_env;
3056 priv = dev_get_drvdata(dev);
3059 p_env = priv->p_env;
3060 return sprintf(buf, "%s\n",p_env->host_name);
3064 claw_hname_write(struct device *dev, struct device_attribute *attr,
3065 const char *buf, size_t count)
3067 struct claw_privbk *priv;
3068 struct claw_env * p_env;
3070 priv = dev_get_drvdata(dev);
3073 p_env = priv->p_env;
3074 if (count > MAX_NAME_LEN+1)
3076 memset(p_env->host_name, 0x20, MAX_NAME_LEN);
3077 strncpy(p_env->host_name,buf, count);
3078 p_env->host_name[count-1] = 0x20; /* clear extra 0x0a */
3079 p_env->host_name[MAX_NAME_LEN] = 0x00;
3080 CLAW_DBF_TEXT(2, setup, "HstnSet");
3081 CLAW_DBF_TEXT_(2, setup, "%s", p_env->host_name);
3086 static DEVICE_ATTR(host_name, 0644, claw_hname_show, claw_hname_write);
3089 claw_adname_show(struct device *dev, struct device_attribute *attr, char *buf)
3091 struct claw_privbk *priv;
3092 struct claw_env * p_env;
3094 priv = dev_get_drvdata(dev);
3097 p_env = priv->p_env;
3098 return sprintf(buf, "%s\n", p_env->adapter_name);
3102 claw_adname_write(struct device *dev, struct device_attribute *attr,
3103 const char *buf, size_t count)
3105 struct claw_privbk *priv;
3106 struct claw_env * p_env;
3108 priv = dev_get_drvdata(dev);
3111 p_env = priv->p_env;
3112 if (count > MAX_NAME_LEN+1)
3114 memset(p_env->adapter_name, 0x20, MAX_NAME_LEN);
3115 strncpy(p_env->adapter_name,buf, count);
3116 p_env->adapter_name[count-1] = 0x20; /* clear extra 0x0a */
3117 p_env->adapter_name[MAX_NAME_LEN] = 0x00;
3118 CLAW_DBF_TEXT(2, setup, "AdnSet");
3119 CLAW_DBF_TEXT_(2, setup, "%s", p_env->adapter_name);
3124 static DEVICE_ATTR(adapter_name, 0644, claw_adname_show, claw_adname_write);
3127 claw_apname_show(struct device *dev, struct device_attribute *attr, char *buf)
3129 struct claw_privbk *priv;
3130 struct claw_env * p_env;
3132 priv = dev_get_drvdata(dev);
3135 p_env = priv->p_env;
3136 return sprintf(buf, "%s\n",
3141 claw_apname_write(struct device *dev, struct device_attribute *attr,
3142 const char *buf, size_t count)
3144 struct claw_privbk *priv;
3145 struct claw_env * p_env;
3147 priv = dev_get_drvdata(dev);
3150 p_env = priv->p_env;
3151 if (count > MAX_NAME_LEN+1)
3153 memset(p_env->api_type, 0x20, MAX_NAME_LEN);
3154 strncpy(p_env->api_type,buf, count);
3155 p_env->api_type[count-1] = 0x20; /* we get a loose 0x0a */
3156 p_env->api_type[MAX_NAME_LEN] = 0x00;
3157 if(strncmp(p_env->api_type,WS_APPL_NAME_PACKED,6) == 0) {
3158 p_env->read_size=DEF_PACK_BUFSIZE;
3159 p_env->write_size=DEF_PACK_BUFSIZE;
3160 p_env->packing=PACKING_ASK;
3161 CLAW_DBF_TEXT(2, setup, "PACKING");
3165 p_env->read_size=CLAW_FRAME_SIZE;
3166 p_env->write_size=CLAW_FRAME_SIZE;
3167 CLAW_DBF_TEXT(2, setup, "ApiSet");
3169 CLAW_DBF_TEXT_(2, setup, "%s", p_env->api_type);
3173 static DEVICE_ATTR(api_type, 0644, claw_apname_show, claw_apname_write);
3176 claw_wbuff_show(struct device *dev, struct device_attribute *attr, char *buf)
3178 struct claw_privbk *priv;
3179 struct claw_env * p_env;
3181 priv = dev_get_drvdata(dev);
3184 p_env = priv->p_env;
3185 return sprintf(buf, "%d\n", p_env->write_buffers);
3189 claw_wbuff_write(struct device *dev, struct device_attribute *attr,
3190 const char *buf, size_t count)
3192 struct claw_privbk *priv;
3193 struct claw_env * p_env;
3196 priv = dev_get_drvdata(dev);
3199 p_env = priv->p_env;
3200 sscanf(buf, "%i", &nnn);
3201 if (p_env->packing) {
3207 if ((nnn > max ) || (nnn < 2))
3209 p_env->write_buffers = nnn;
3210 CLAW_DBF_TEXT(2, setup, "Wbufset");
3211 CLAW_DBF_TEXT_(2, setup, "WB=%d", p_env->write_buffers);
3215 static DEVICE_ATTR(write_buffer, 0644, claw_wbuff_show, claw_wbuff_write);
3218 claw_rbuff_show(struct device *dev, struct device_attribute *attr, char *buf)
3220 struct claw_privbk *priv;
3221 struct claw_env * p_env;
3223 priv = dev_get_drvdata(dev);
3226 p_env = priv->p_env;
3227 return sprintf(buf, "%d\n", p_env->read_buffers);
3231 claw_rbuff_write(struct device *dev, struct device_attribute *attr,
3232 const char *buf, size_t count)
3234 struct claw_privbk *priv;
3235 struct claw_env *p_env;
3238 priv = dev_get_drvdata(dev);
3241 p_env = priv->p_env;
3242 sscanf(buf, "%i", &nnn);
3243 if (p_env->packing) {
3249 if ((nnn > max ) || (nnn < 2))
3251 p_env->read_buffers = nnn;
3252 CLAW_DBF_TEXT(2, setup, "Rbufset");
3253 CLAW_DBF_TEXT_(2, setup, "RB=%d", p_env->read_buffers);
3256 static DEVICE_ATTR(read_buffer, 0644, claw_rbuff_show, claw_rbuff_write);
3258 static struct attribute *claw_attr[] = {
3259 &dev_attr_read_buffer.attr,
3260 &dev_attr_write_buffer.attr,
3261 &dev_attr_adapter_name.attr,
3262 &dev_attr_api_type.attr,
3263 &dev_attr_host_name.attr,
3266 static struct attribute_group claw_attr_group = {
3269 static const struct attribute_group *claw_attr_groups[] = {
3273 static const struct device_type claw_devtype = {
3275 .groups = claw_attr_groups,
3278 /*----------------------------------------------------------------*
3280 * this function is called for each CLAW device. *
3281 *----------------------------------------------------------------*/
3282 static int claw_probe(struct ccwgroup_device *cgdev)
3284 struct claw_privbk *privptr = NULL;
3286 CLAW_DBF_TEXT(2, setup, "probe");
3287 if (!get_device(&cgdev->dev))
3289 privptr = kzalloc(sizeof(struct claw_privbk), GFP_KERNEL);
3290 dev_set_drvdata(&cgdev->dev, privptr);
3291 if (privptr == NULL) {
3293 put_device(&cgdev->dev);
3294 CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM);
3297 privptr->p_mtc_envelope = kzalloc(MAX_ENVELOPE_SIZE, GFP_KERNEL);
3298 privptr->p_env = kzalloc(sizeof(struct claw_env), GFP_KERNEL);
3299 if ((privptr->p_mtc_envelope == NULL) || (privptr->p_env == NULL)) {
3301 put_device(&cgdev->dev);
3302 CLAW_DBF_TEXT_(2, setup, "probex%d", -ENOMEM);
3305 memcpy(privptr->p_env->adapter_name, WS_NAME_NOT_DEF, 8);
3306 memcpy(privptr->p_env->host_name, WS_NAME_NOT_DEF, 8);
3307 memcpy(privptr->p_env->api_type, WS_NAME_NOT_DEF, 8);
3308 privptr->p_env->packing = 0;
3309 privptr->p_env->write_buffers = 5;
3310 privptr->p_env->read_buffers = 5;
3311 privptr->p_env->read_size = CLAW_FRAME_SIZE;
3312 privptr->p_env->write_size = CLAW_FRAME_SIZE;
3313 privptr->p_env->p_priv = privptr;
3314 cgdev->cdev[0]->handler = claw_irq_handler;
3315 cgdev->cdev[1]->handler = claw_irq_handler;
3316 cgdev->dev.type = &claw_devtype;
3317 CLAW_DBF_TEXT(2, setup, "prbext 0");
3320 } /* end of claw_probe */
3322 /*--------------------------------------------------------------------*
3323 * claw_init and cleanup *
3324 *---------------------------------------------------------------------*/
3326 static void __exit claw_cleanup(void)
3328 ccwgroup_driver_unregister(&claw_group_driver);
3329 ccw_driver_unregister(&claw_ccw_driver);
3330 root_device_unregister(claw_root_dev);
3331 claw_unregister_debug_facility();
3332 pr_info("Driver unloaded\n");
3336 * Initialize module.
3337 * This is called just after the module is loaded.
3339 * @return 0 on success, !0 on error.
3341 static int __init claw_init(void)
3345 pr_info("Loading %s\n", version);
3346 ret = claw_register_debug_facility();
3348 pr_err("Registering with the S/390 debug feature"
3349 " failed with error code %d\n", ret);
3352 CLAW_DBF_TEXT(2, setup, "init_mod");
3353 claw_root_dev = root_device_register("claw");
3354 ret = IS_ERR(claw_root_dev) ? PTR_ERR(claw_root_dev) : 0;
3357 ret = ccw_driver_register(&claw_ccw_driver);
3360 claw_group_driver.driver.groups = claw_drv_attr_groups;
3361 ret = ccwgroup_driver_register(&claw_group_driver);
3367 ccw_driver_unregister(&claw_ccw_driver);
3369 root_device_unregister(claw_root_dev);
3371 CLAW_DBF_TEXT(2, setup, "init_bad");
3372 claw_unregister_debug_facility();
3374 pr_err("Initializing the claw device driver failed\n");
3378 module_init(claw_init);
3379 module_exit(claw_cleanup);
3381 MODULE_AUTHOR("Andy Richter <richtera@us.ibm.com>");
3382 MODULE_DESCRIPTION("Linux for System z CLAW Driver\n" \
3383 "Copyright 2000,2008 IBM Corporation\n");
3384 MODULE_LICENSE("GPL");