]> Pileus Git - ~andy/linux/blob - drivers/s390/cio/css.c
[S390] cio: remove ccw_device init_name
[~andy/linux] / drivers / s390 / cio / css.c
1 /*
2  * driver for channel subsystem
3  *
4  * Copyright IBM Corp. 2002, 2009
5  *
6  * Author(s): Arnd Bergmann (arndb@de.ibm.com)
7  *            Cornelia Huck (cornelia.huck@de.ibm.com)
8  */
9
10 #define KMSG_COMPONENT "cio"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/device.h>
16 #include <linux/slab.h>
17 #include <linux/errno.h>
18 #include <linux/list.h>
19 #include <linux/reboot.h>
20 #include <linux/suspend.h>
21 #include <asm/isc.h>
22 #include <asm/crw.h>
23
24 #include "css.h"
25 #include "cio.h"
26 #include "cio_debug.h"
27 #include "ioasm.h"
28 #include "chsc.h"
29 #include "device.h"
30 #include "idset.h"
31 #include "chp.h"
32
33 int css_init_done = 0;
34 static int need_reprobe = 0;
35 static int max_ssid = 0;
36
37 struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1];
38
39 int
40 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
41 {
42         struct subchannel_id schid;
43         int ret;
44
45         init_subchannel_id(&schid);
46         ret = -ENODEV;
47         do {
48                 do {
49                         ret = fn(schid, data);
50                         if (ret)
51                                 break;
52                 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
53                 schid.sch_no = 0;
54         } while (schid.ssid++ < max_ssid);
55         return ret;
56 }
57
58 struct cb_data {
59         void *data;
60         struct idset *set;
61         int (*fn_known_sch)(struct subchannel *, void *);
62         int (*fn_unknown_sch)(struct subchannel_id, void *);
63 };
64
65 static int call_fn_known_sch(struct device *dev, void *data)
66 {
67         struct subchannel *sch = to_subchannel(dev);
68         struct cb_data *cb = data;
69         int rc = 0;
70
71         idset_sch_del(cb->set, sch->schid);
72         if (cb->fn_known_sch)
73                 rc = cb->fn_known_sch(sch, cb->data);
74         return rc;
75 }
76
77 static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
78 {
79         struct cb_data *cb = data;
80         int rc = 0;
81
82         if (idset_sch_contains(cb->set, schid))
83                 rc = cb->fn_unknown_sch(schid, cb->data);
84         return rc;
85 }
86
87 static int call_fn_all_sch(struct subchannel_id schid, void *data)
88 {
89         struct cb_data *cb = data;
90         struct subchannel *sch;
91         int rc = 0;
92
93         sch = get_subchannel_by_schid(schid);
94         if (sch) {
95                 if (cb->fn_known_sch)
96                         rc = cb->fn_known_sch(sch, cb->data);
97                 put_device(&sch->dev);
98         } else {
99                 if (cb->fn_unknown_sch)
100                         rc = cb->fn_unknown_sch(schid, cb->data);
101         }
102
103         return rc;
104 }
105
106 int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
107                                int (*fn_unknown)(struct subchannel_id,
108                                void *), void *data)
109 {
110         struct cb_data cb;
111         int rc;
112
113         cb.data = data;
114         cb.fn_known_sch = fn_known;
115         cb.fn_unknown_sch = fn_unknown;
116
117         cb.set = idset_sch_new();
118         if (!cb.set)
119                 /* fall back to brute force scanning in case of oom */
120                 return for_each_subchannel(call_fn_all_sch, &cb);
121
122         idset_fill(cb.set);
123
124         /* Process registered subchannels. */
125         rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
126         if (rc)
127                 goto out;
128         /* Process unregistered subchannels. */
129         if (fn_unknown)
130                 rc = for_each_subchannel(call_fn_unknown_sch, &cb);
131 out:
132         idset_free(cb.set);
133
134         return rc;
135 }
136
137 static struct subchannel *
138 css_alloc_subchannel(struct subchannel_id schid)
139 {
140         struct subchannel *sch;
141         int ret;
142
143         sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA);
144         if (sch == NULL)
145                 return ERR_PTR(-ENOMEM);
146         ret = cio_validate_subchannel (sch, schid);
147         if (ret < 0) {
148                 kfree(sch);
149                 return ERR_PTR(ret);
150         }
151         return sch;
152 }
153
154 static void
155 css_free_subchannel(struct subchannel *sch)
156 {
157         if (sch) {
158                 /* Reset intparm to zeroes. */
159                 sch->config.intparm = 0;
160                 cio_commit_config(sch);
161                 kfree(sch->lock);
162                 kfree(sch);
163         }
164 }
165
166 static void
167 css_subchannel_release(struct device *dev)
168 {
169         struct subchannel *sch;
170
171         sch = to_subchannel(dev);
172         if (!cio_is_console(sch->schid)) {
173                 /* Reset intparm to zeroes. */
174                 sch->config.intparm = 0;
175                 cio_commit_config(sch);
176                 kfree(sch->lock);
177                 kfree(sch);
178         }
179 }
180
181 static int css_sch_device_register(struct subchannel *sch)
182 {
183         int ret;
184
185         mutex_lock(&sch->reg_mutex);
186         dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
187                      sch->schid.sch_no);
188         ret = device_register(&sch->dev);
189         mutex_unlock(&sch->reg_mutex);
190         return ret;
191 }
192
193 /**
194  * css_sch_device_unregister - unregister a subchannel
195  * @sch: subchannel to be unregistered
196  */
197 void css_sch_device_unregister(struct subchannel *sch)
198 {
199         mutex_lock(&sch->reg_mutex);
200         if (device_is_registered(&sch->dev))
201                 device_unregister(&sch->dev);
202         mutex_unlock(&sch->reg_mutex);
203 }
204 EXPORT_SYMBOL_GPL(css_sch_device_unregister);
205
206 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
207 {
208         int i;
209         int mask;
210
211         memset(ssd, 0, sizeof(struct chsc_ssd_info));
212         ssd->path_mask = pmcw->pim;
213         for (i = 0; i < 8; i++) {
214                 mask = 0x80 >> i;
215                 if (pmcw->pim & mask) {
216                         chp_id_init(&ssd->chpid[i]);
217                         ssd->chpid[i].id = pmcw->chpid[i];
218                 }
219         }
220 }
221
222 static void ssd_register_chpids(struct chsc_ssd_info *ssd)
223 {
224         int i;
225         int mask;
226
227         for (i = 0; i < 8; i++) {
228                 mask = 0x80 >> i;
229                 if (ssd->path_mask & mask)
230                         if (!chp_is_registered(ssd->chpid[i]))
231                                 chp_new(ssd->chpid[i]);
232         }
233 }
234
235 void css_update_ssd_info(struct subchannel *sch)
236 {
237         int ret;
238
239         if (cio_is_console(sch->schid)) {
240                 /* Console is initialized too early for functions requiring
241                  * memory allocation. */
242                 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
243         } else {
244                 ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
245                 if (ret)
246                         ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
247                 ssd_register_chpids(&sch->ssd_info);
248         }
249 }
250
251 static ssize_t type_show(struct device *dev, struct device_attribute *attr,
252                          char *buf)
253 {
254         struct subchannel *sch = to_subchannel(dev);
255
256         return sprintf(buf, "%01x\n", sch->st);
257 }
258
259 static DEVICE_ATTR(type, 0444, type_show, NULL);
260
261 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
262                              char *buf)
263 {
264         struct subchannel *sch = to_subchannel(dev);
265
266         return sprintf(buf, "css:t%01X\n", sch->st);
267 }
268
269 static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
270
271 static struct attribute *subch_attrs[] = {
272         &dev_attr_type.attr,
273         &dev_attr_modalias.attr,
274         NULL,
275 };
276
277 static struct attribute_group subch_attr_group = {
278         .attrs = subch_attrs,
279 };
280
281 static struct attribute_group *default_subch_attr_groups[] = {
282         &subch_attr_group,
283         NULL,
284 };
285
286 static int css_register_subchannel(struct subchannel *sch)
287 {
288         int ret;
289
290         /* Initialize the subchannel structure */
291         sch->dev.parent = &channel_subsystems[0]->device;
292         sch->dev.bus = &css_bus_type;
293         sch->dev.release = &css_subchannel_release;
294         sch->dev.groups = default_subch_attr_groups;
295         /*
296          * We don't want to generate uevents for I/O subchannels that don't
297          * have a working ccw device behind them since they will be
298          * unregistered before they can be used anyway, so we delay the add
299          * uevent until after device recognition was successful.
300          * Note that we suppress the uevent for all subchannel types;
301          * the subchannel driver can decide itself when it wants to inform
302          * userspace of its existence.
303          */
304         dev_set_uevent_suppress(&sch->dev, 1);
305         css_update_ssd_info(sch);
306         /* make it known to the system */
307         ret = css_sch_device_register(sch);
308         if (ret) {
309                 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
310                               sch->schid.ssid, sch->schid.sch_no, ret);
311                 return ret;
312         }
313         if (!sch->driver) {
314                 /*
315                  * No driver matched. Generate the uevent now so that
316                  * a fitting driver module may be loaded based on the
317                  * modalias.
318                  */
319                 dev_set_uevent_suppress(&sch->dev, 0);
320                 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
321         }
322         return ret;
323 }
324
325 int css_probe_device(struct subchannel_id schid)
326 {
327         int ret;
328         struct subchannel *sch;
329
330         sch = css_alloc_subchannel(schid);
331         if (IS_ERR(sch))
332                 return PTR_ERR(sch);
333         ret = css_register_subchannel(sch);
334         if (ret)
335                 css_free_subchannel(sch);
336         return ret;
337 }
338
339 static int
340 check_subchannel(struct device * dev, void * data)
341 {
342         struct subchannel *sch;
343         struct subchannel_id *schid = data;
344
345         sch = to_subchannel(dev);
346         return schid_equal(&sch->schid, schid);
347 }
348
349 struct subchannel *
350 get_subchannel_by_schid(struct subchannel_id schid)
351 {
352         struct device *dev;
353
354         dev = bus_find_device(&css_bus_type, NULL,
355                               &schid, check_subchannel);
356
357         return dev ? to_subchannel(dev) : NULL;
358 }
359
360 /**
361  * css_sch_is_valid() - check if a subchannel is valid
362  * @schib: subchannel information block for the subchannel
363  */
364 int css_sch_is_valid(struct schib *schib)
365 {
366         if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
367                 return 0;
368         if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
369                 return 0;
370         return 1;
371 }
372 EXPORT_SYMBOL_GPL(css_sch_is_valid);
373
374 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
375 {
376         struct schib schib;
377
378         if (!slow) {
379                 /* Will be done on the slow path. */
380                 return -EAGAIN;
381         }
382         if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) {
383                 /* Unusable - ignore. */
384                 return 0;
385         }
386         CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, unknown, "
387                          "slow path.\n", schid.ssid, schid.sch_no, CIO_OPER);
388
389         return css_probe_device(schid);
390 }
391
392 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
393 {
394         int ret = 0;
395
396         if (sch->driver) {
397                 if (sch->driver->sch_event)
398                         ret = sch->driver->sch_event(sch, slow);
399                 else
400                         dev_dbg(&sch->dev,
401                                 "Got subchannel machine check but "
402                                 "no sch_event handler provided.\n");
403         }
404         return ret;
405 }
406
407 static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
408 {
409         struct subchannel *sch;
410         int ret;
411
412         sch = get_subchannel_by_schid(schid);
413         if (sch) {
414                 ret = css_evaluate_known_subchannel(sch, slow);
415                 put_device(&sch->dev);
416         } else
417                 ret = css_evaluate_new_subchannel(schid, slow);
418         if (ret == -EAGAIN)
419                 css_schedule_eval(schid);
420 }
421
422 static struct idset *slow_subchannel_set;
423 static spinlock_t slow_subchannel_lock;
424
425 static int __init slow_subchannel_init(void)
426 {
427         spin_lock_init(&slow_subchannel_lock);
428         slow_subchannel_set = idset_sch_new();
429         if (!slow_subchannel_set) {
430                 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
431                 return -ENOMEM;
432         }
433         return 0;
434 }
435
436 static int slow_eval_known_fn(struct subchannel *sch, void *data)
437 {
438         int eval;
439         int rc;
440
441         spin_lock_irq(&slow_subchannel_lock);
442         eval = idset_sch_contains(slow_subchannel_set, sch->schid);
443         idset_sch_del(slow_subchannel_set, sch->schid);
444         spin_unlock_irq(&slow_subchannel_lock);
445         if (eval) {
446                 rc = css_evaluate_known_subchannel(sch, 1);
447                 if (rc == -EAGAIN)
448                         css_schedule_eval(sch->schid);
449         }
450         return 0;
451 }
452
453 static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
454 {
455         int eval;
456         int rc = 0;
457
458         spin_lock_irq(&slow_subchannel_lock);
459         eval = idset_sch_contains(slow_subchannel_set, schid);
460         idset_sch_del(slow_subchannel_set, schid);
461         spin_unlock_irq(&slow_subchannel_lock);
462         if (eval) {
463                 rc = css_evaluate_new_subchannel(schid, 1);
464                 switch (rc) {
465                 case -EAGAIN:
466                         css_schedule_eval(schid);
467                         rc = 0;
468                         break;
469                 case -ENXIO:
470                 case -ENOMEM:
471                 case -EIO:
472                         /* These should abort looping */
473                         break;
474                 default:
475                         rc = 0;
476                 }
477         }
478         return rc;
479 }
480
481 static void css_slow_path_func(struct work_struct *unused)
482 {
483         CIO_TRACE_EVENT(4, "slowpath");
484         for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
485                                    NULL);
486 }
487
488 static DECLARE_WORK(slow_path_work, css_slow_path_func);
489 struct workqueue_struct *slow_path_wq;
490
491 void css_schedule_eval(struct subchannel_id schid)
492 {
493         unsigned long flags;
494
495         spin_lock_irqsave(&slow_subchannel_lock, flags);
496         idset_sch_add(slow_subchannel_set, schid);
497         queue_work(slow_path_wq, &slow_path_work);
498         spin_unlock_irqrestore(&slow_subchannel_lock, flags);
499 }
500
501 void css_schedule_eval_all(void)
502 {
503         unsigned long flags;
504
505         spin_lock_irqsave(&slow_subchannel_lock, flags);
506         idset_fill(slow_subchannel_set);
507         queue_work(slow_path_wq, &slow_path_work);
508         spin_unlock_irqrestore(&slow_subchannel_lock, flags);
509 }
510
511 void css_wait_for_slow_path(void)
512 {
513         flush_workqueue(slow_path_wq);
514 }
515
516 /* Reprobe subchannel if unregistered. */
517 static int reprobe_subchannel(struct subchannel_id schid, void *data)
518 {
519         int ret;
520
521         CIO_MSG_EVENT(6, "cio: reprobe 0.%x.%04x\n",
522                       schid.ssid, schid.sch_no);
523         if (need_reprobe)
524                 return -EAGAIN;
525
526         ret = css_probe_device(schid);
527         switch (ret) {
528         case 0:
529                 break;
530         case -ENXIO:
531         case -ENOMEM:
532         case -EIO:
533                 /* These should abort looping */
534                 break;
535         default:
536                 ret = 0;
537         }
538
539         return ret;
540 }
541
542 static void reprobe_after_idle(struct work_struct *unused)
543 {
544         /* Make sure initial subchannel scan is done. */
545         wait_event(ccw_device_init_wq,
546                    atomic_read(&ccw_device_init_count) == 0);
547         if (need_reprobe)
548                 css_schedule_reprobe();
549 }
550
551 static DECLARE_WORK(reprobe_idle_work, reprobe_after_idle);
552
553 /* Work function used to reprobe all unregistered subchannels. */
554 static void reprobe_all(struct work_struct *unused)
555 {
556         int ret;
557
558         CIO_MSG_EVENT(4, "reprobe start\n");
559
560         /* Make sure initial subchannel scan is done. */
561         if (atomic_read(&ccw_device_init_count) != 0) {
562                 queue_work(ccw_device_work, &reprobe_idle_work);
563                 return;
564         }
565         need_reprobe = 0;
566         ret = for_each_subchannel_staged(NULL, reprobe_subchannel, NULL);
567
568         CIO_MSG_EVENT(4, "reprobe done (rc=%d, need_reprobe=%d)\n", ret,
569                       need_reprobe);
570 }
571
572 static DECLARE_WORK(css_reprobe_work, reprobe_all);
573
574 /* Schedule reprobing of all unregistered subchannels. */
575 void css_schedule_reprobe(void)
576 {
577         need_reprobe = 1;
578         queue_work(slow_path_wq, &css_reprobe_work);
579 }
580
581 EXPORT_SYMBOL_GPL(css_schedule_reprobe);
582
583 /*
584  * Called from the machine check handler for subchannel report words.
585  */
586 static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
587 {
588         struct subchannel_id mchk_schid;
589
590         if (overflow) {
591                 css_schedule_eval_all();
592                 return;
593         }
594         CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
595                       "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
596                       crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
597                       crw0->erc, crw0->rsid);
598         if (crw1)
599                 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
600                               "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
601                               crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
602                               crw1->anc, crw1->erc, crw1->rsid);
603         init_subchannel_id(&mchk_schid);
604         mchk_schid.sch_no = crw0->rsid;
605         if (crw1)
606                 mchk_schid.ssid = (crw1->rsid >> 8) & 3;
607
608         /*
609          * Since we are always presented with IPI in the CRW, we have to
610          * use stsch() to find out if the subchannel in question has come
611          * or gone.
612          */
613         css_evaluate_subchannel(mchk_schid, 0);
614 }
615
616 static int __init
617 __init_channel_subsystem(struct subchannel_id schid, void *data)
618 {
619         struct subchannel *sch;
620         int ret;
621
622         if (cio_is_console(schid))
623                 sch = cio_get_console_subchannel();
624         else {
625                 sch = css_alloc_subchannel(schid);
626                 if (IS_ERR(sch))
627                         ret = PTR_ERR(sch);
628                 else
629                         ret = 0;
630                 switch (ret) {
631                 case 0:
632                         break;
633                 case -ENOMEM:
634                         panic("Out of memory in init_channel_subsystem\n");
635                 /* -ENXIO: no more subchannels. */
636                 case -ENXIO:
637                         return ret;
638                 /* -EIO: this subchannel set not supported. */
639                 case -EIO:
640                         return ret;
641                 default:
642                         return 0;
643                 }
644         }
645         /*
646          * We register ALL valid subchannels in ioinfo, even those
647          * that have been present before init_channel_subsystem.
648          * These subchannels can't have been registered yet (kmalloc
649          * not working) so we do it now. This is true e.g. for the
650          * console subchannel.
651          */
652         css_register_subchannel(sch);
653         return 0;
654 }
655
656 static void __init
657 css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
658 {
659         if (css_general_characteristics.mcss) {
660                 css->global_pgid.pgid_high.ext_cssid.version = 0x80;
661                 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
662         } else {
663 #ifdef CONFIG_SMP
664                 css->global_pgid.pgid_high.cpu_addr = stap();
665 #else
666                 css->global_pgid.pgid_high.cpu_addr = 0;
667 #endif
668         }
669         css->global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident;
670         css->global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine;
671         css->global_pgid.tod_high = tod_high;
672
673 }
674
675 static void
676 channel_subsystem_release(struct device *dev)
677 {
678         struct channel_subsystem *css;
679
680         css = to_css(dev);
681         mutex_destroy(&css->mutex);
682         if (css->pseudo_subchannel) {
683                 /* Implies that it has been generated but never registered. */
684                 css_subchannel_release(&css->pseudo_subchannel->dev);
685                 css->pseudo_subchannel = NULL;
686         }
687         kfree(css);
688 }
689
690 static ssize_t
691 css_cm_enable_show(struct device *dev, struct device_attribute *attr,
692                    char *buf)
693 {
694         struct channel_subsystem *css = to_css(dev);
695         int ret;
696
697         if (!css)
698                 return 0;
699         mutex_lock(&css->mutex);
700         ret = sprintf(buf, "%x\n", css->cm_enabled);
701         mutex_unlock(&css->mutex);
702         return ret;
703 }
704
705 static ssize_t
706 css_cm_enable_store(struct device *dev, struct device_attribute *attr,
707                     const char *buf, size_t count)
708 {
709         struct channel_subsystem *css = to_css(dev);
710         int ret;
711         unsigned long val;
712
713         ret = strict_strtoul(buf, 16, &val);
714         if (ret)
715                 return ret;
716         mutex_lock(&css->mutex);
717         switch (val) {
718         case 0:
719                 ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
720                 break;
721         case 1:
722                 ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
723                 break;
724         default:
725                 ret = -EINVAL;
726         }
727         mutex_unlock(&css->mutex);
728         return ret < 0 ? ret : count;
729 }
730
731 static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store);
732
733 static int __init setup_css(int nr)
734 {
735         u32 tod_high;
736         int ret;
737         struct channel_subsystem *css;
738
739         css = channel_subsystems[nr];
740         memset(css, 0, sizeof(struct channel_subsystem));
741         css->pseudo_subchannel =
742                 kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL);
743         if (!css->pseudo_subchannel)
744                 return -ENOMEM;
745         css->pseudo_subchannel->dev.parent = &css->device;
746         css->pseudo_subchannel->dev.release = css_subchannel_release;
747         dev_set_name(&css->pseudo_subchannel->dev, "defunct");
748         ret = cio_create_sch_lock(css->pseudo_subchannel);
749         if (ret) {
750                 kfree(css->pseudo_subchannel);
751                 return ret;
752         }
753         mutex_init(&css->mutex);
754         css->valid = 1;
755         css->cssid = nr;
756         dev_set_name(&css->device, "css%x", nr);
757         css->device.release = channel_subsystem_release;
758         tod_high = (u32) (get_clock() >> 32);
759         css_generate_pgid(css, tod_high);
760         return 0;
761 }
762
763 static int css_reboot_event(struct notifier_block *this,
764                             unsigned long event,
765                             void *ptr)
766 {
767         int ret, i;
768
769         ret = NOTIFY_DONE;
770         for (i = 0; i <= __MAX_CSSID; i++) {
771                 struct channel_subsystem *css;
772
773                 css = channel_subsystems[i];
774                 mutex_lock(&css->mutex);
775                 if (css->cm_enabled)
776                         if (chsc_secm(css, 0))
777                                 ret = NOTIFY_BAD;
778                 mutex_unlock(&css->mutex);
779         }
780
781         return ret;
782 }
783
784 static struct notifier_block css_reboot_notifier = {
785         .notifier_call = css_reboot_event,
786 };
787
788 /*
789  * Since the css devices are neither on a bus nor have a class
790  * nor have a special device type, we cannot stop/restart channel
791  * path measurements via the normal suspend/resume callbacks, but have
792  * to use notifiers.
793  */
794 static int css_power_event(struct notifier_block *this, unsigned long event,
795                            void *ptr)
796 {
797         void *secm_area;
798         int ret, i;
799
800         switch (event) {
801         case PM_HIBERNATION_PREPARE:
802         case PM_SUSPEND_PREPARE:
803                 ret = NOTIFY_DONE;
804                 for (i = 0; i <= __MAX_CSSID; i++) {
805                         struct channel_subsystem *css;
806
807                         css = channel_subsystems[i];
808                         mutex_lock(&css->mutex);
809                         if (!css->cm_enabled) {
810                                 mutex_unlock(&css->mutex);
811                                 continue;
812                         }
813                         secm_area = (void *)get_zeroed_page(GFP_KERNEL |
814                                                             GFP_DMA);
815                         if (secm_area) {
816                                 if (__chsc_do_secm(css, 0, secm_area))
817                                         ret = NOTIFY_BAD;
818                                 free_page((unsigned long)secm_area);
819                         } else
820                                 ret = NOTIFY_BAD;
821
822                         mutex_unlock(&css->mutex);
823                 }
824                 break;
825         case PM_POST_HIBERNATION:
826         case PM_POST_SUSPEND:
827                 ret = NOTIFY_DONE;
828                 for (i = 0; i <= __MAX_CSSID; i++) {
829                         struct channel_subsystem *css;
830
831                         css = channel_subsystems[i];
832                         mutex_lock(&css->mutex);
833                         if (!css->cm_enabled) {
834                                 mutex_unlock(&css->mutex);
835                                 continue;
836                         }
837                         secm_area = (void *)get_zeroed_page(GFP_KERNEL |
838                                                             GFP_DMA);
839                         if (secm_area) {
840                                 if (__chsc_do_secm(css, 1, secm_area))
841                                         ret = NOTIFY_BAD;
842                                 free_page((unsigned long)secm_area);
843                         } else
844                                 ret = NOTIFY_BAD;
845
846                         mutex_unlock(&css->mutex);
847                 }
848                 /* search for subchannels, which appeared during hibernation */
849                 css_schedule_reprobe();
850                 break;
851         default:
852                 ret = NOTIFY_DONE;
853         }
854         return ret;
855
856 }
857 static struct notifier_block css_power_notifier = {
858         .notifier_call = css_power_event,
859 };
860
861 /*
862  * Now that the driver core is running, we can setup our channel subsystem.
863  * The struct subchannel's are created during probing (except for the
864  * static console subchannel).
865  */
866 static int __init
867 init_channel_subsystem (void)
868 {
869         int ret, i;
870
871         ret = chsc_determine_css_characteristics();
872         if (ret == -ENOMEM)
873                 goto out; /* No need to continue. */
874
875         ret = chsc_alloc_sei_area();
876         if (ret)
877                 goto out;
878
879         ret = slow_subchannel_init();
880         if (ret)
881                 goto out;
882
883         ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
884         if (ret)
885                 goto out;
886
887         if ((ret = bus_register(&css_bus_type)))
888                 goto out;
889
890         /* Try to enable MSS. */
891         ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
892         switch (ret) {
893         case 0: /* Success. */
894                 max_ssid = __MAX_SSID;
895                 break;
896         case -ENOMEM:
897                 goto out_bus;
898         default:
899                 max_ssid = 0;
900         }
901         /* Setup css structure. */
902         for (i = 0; i <= __MAX_CSSID; i++) {
903                 struct channel_subsystem *css;
904
905                 css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL);
906                 if (!css) {
907                         ret = -ENOMEM;
908                         goto out_unregister;
909                 }
910                 channel_subsystems[i] = css;
911                 ret = setup_css(i);
912                 if (ret) {
913                         kfree(channel_subsystems[i]);
914                         goto out_unregister;
915                 }
916                 ret = device_register(&css->device);
917                 if (ret) {
918                         put_device(&css->device);
919                         goto out_unregister;
920                 }
921                 if (css_chsc_characteristics.secm) {
922                         ret = device_create_file(&css->device,
923                                                  &dev_attr_cm_enable);
924                         if (ret)
925                                 goto out_device;
926                 }
927                 ret = device_register(&css->pseudo_subchannel->dev);
928                 if (ret)
929                         goto out_file;
930         }
931         ret = register_reboot_notifier(&css_reboot_notifier);
932         if (ret)
933                 goto out_unregister;
934         ret = register_pm_notifier(&css_power_notifier);
935         if (ret) {
936                 unregister_reboot_notifier(&css_reboot_notifier);
937                 goto out_unregister;
938         }
939         css_init_done = 1;
940
941         /* Enable default isc for I/O subchannels. */
942         isc_register(IO_SCH_ISC);
943
944         for_each_subchannel(__init_channel_subsystem, NULL);
945         return 0;
946 out_file:
947         if (css_chsc_characteristics.secm)
948                 device_remove_file(&channel_subsystems[i]->device,
949                                    &dev_attr_cm_enable);
950 out_device:
951         device_unregister(&channel_subsystems[i]->device);
952 out_unregister:
953         while (i > 0) {
954                 struct channel_subsystem *css;
955
956                 i--;
957                 css = channel_subsystems[i];
958                 device_unregister(&css->pseudo_subchannel->dev);
959                 css->pseudo_subchannel = NULL;
960                 if (css_chsc_characteristics.secm)
961                         device_remove_file(&css->device,
962                                            &dev_attr_cm_enable);
963                 device_unregister(&css->device);
964         }
965 out_bus:
966         bus_unregister(&css_bus_type);
967 out:
968         crw_unregister_handler(CRW_RSC_CSS);
969         chsc_free_sei_area();
970         kfree(slow_subchannel_set);
971         pr_alert("The CSS device driver initialization failed with "
972                  "errno=%d\n", ret);
973         return ret;
974 }
975
976 int sch_is_pseudo_sch(struct subchannel *sch)
977 {
978         return sch == to_css(sch->dev.parent)->pseudo_subchannel;
979 }
980
981 static int css_bus_match(struct device *dev, struct device_driver *drv)
982 {
983         struct subchannel *sch = to_subchannel(dev);
984         struct css_driver *driver = to_cssdriver(drv);
985         struct css_device_id *id;
986
987         for (id = driver->subchannel_type; id->match_flags; id++) {
988                 if (sch->st == id->type)
989                         return 1;
990         }
991
992         return 0;
993 }
994
995 static int css_probe(struct device *dev)
996 {
997         struct subchannel *sch;
998         int ret;
999
1000         sch = to_subchannel(dev);
1001         sch->driver = to_cssdriver(dev->driver);
1002         ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
1003         if (ret)
1004                 sch->driver = NULL;
1005         return ret;
1006 }
1007
1008 static int css_remove(struct device *dev)
1009 {
1010         struct subchannel *sch;
1011         int ret;
1012
1013         sch = to_subchannel(dev);
1014         ret = sch->driver->remove ? sch->driver->remove(sch) : 0;
1015         sch->driver = NULL;
1016         return ret;
1017 }
1018
1019 static void css_shutdown(struct device *dev)
1020 {
1021         struct subchannel *sch;
1022
1023         sch = to_subchannel(dev);
1024         if (sch->driver && sch->driver->shutdown)
1025                 sch->driver->shutdown(sch);
1026 }
1027
1028 static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
1029 {
1030         struct subchannel *sch = to_subchannel(dev);
1031         int ret;
1032
1033         ret = add_uevent_var(env, "ST=%01X", sch->st);
1034         if (ret)
1035                 return ret;
1036         ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
1037         return ret;
1038 }
1039
1040 static int css_pm_prepare(struct device *dev)
1041 {
1042         struct subchannel *sch = to_subchannel(dev);
1043         struct css_driver *drv;
1044
1045         if (mutex_is_locked(&sch->reg_mutex))
1046                 return -EAGAIN;
1047         if (!sch->dev.driver)
1048                 return 0;
1049         drv = to_cssdriver(sch->dev.driver);
1050         /* Notify drivers that they may not register children. */
1051         return drv->prepare ? drv->prepare(sch) : 0;
1052 }
1053
1054 static void css_pm_complete(struct device *dev)
1055 {
1056         struct subchannel *sch = to_subchannel(dev);
1057         struct css_driver *drv;
1058
1059         if (!sch->dev.driver)
1060                 return;
1061         drv = to_cssdriver(sch->dev.driver);
1062         if (drv->complete)
1063                 drv->complete(sch);
1064 }
1065
1066 static int css_pm_freeze(struct device *dev)
1067 {
1068         struct subchannel *sch = to_subchannel(dev);
1069         struct css_driver *drv;
1070
1071         if (!sch->dev.driver)
1072                 return 0;
1073         drv = to_cssdriver(sch->dev.driver);
1074         return drv->freeze ? drv->freeze(sch) : 0;
1075 }
1076
1077 static int css_pm_thaw(struct device *dev)
1078 {
1079         struct subchannel *sch = to_subchannel(dev);
1080         struct css_driver *drv;
1081
1082         if (!sch->dev.driver)
1083                 return 0;
1084         drv = to_cssdriver(sch->dev.driver);
1085         return drv->thaw ? drv->thaw(sch) : 0;
1086 }
1087
1088 static int css_pm_restore(struct device *dev)
1089 {
1090         struct subchannel *sch = to_subchannel(dev);
1091         struct css_driver *drv;
1092
1093         if (!sch->dev.driver)
1094                 return 0;
1095         drv = to_cssdriver(sch->dev.driver);
1096         return drv->restore ? drv->restore(sch) : 0;
1097 }
1098
1099 static struct dev_pm_ops css_pm_ops = {
1100         .prepare = css_pm_prepare,
1101         .complete = css_pm_complete,
1102         .freeze = css_pm_freeze,
1103         .thaw = css_pm_thaw,
1104         .restore = css_pm_restore,
1105 };
1106
1107 struct bus_type css_bus_type = {
1108         .name     = "css",
1109         .match    = css_bus_match,
1110         .probe    = css_probe,
1111         .remove   = css_remove,
1112         .shutdown = css_shutdown,
1113         .uevent   = css_uevent,
1114         .pm = &css_pm_ops,
1115 };
1116
1117 /**
1118  * css_driver_register - register a css driver
1119  * @cdrv: css driver to register
1120  *
1121  * This is mainly a wrapper around driver_register that sets name
1122  * and bus_type in the embedded struct device_driver correctly.
1123  */
1124 int css_driver_register(struct css_driver *cdrv)
1125 {
1126         cdrv->drv.name = cdrv->name;
1127         cdrv->drv.bus = &css_bus_type;
1128         cdrv->drv.owner = cdrv->owner;
1129         return driver_register(&cdrv->drv);
1130 }
1131 EXPORT_SYMBOL_GPL(css_driver_register);
1132
1133 /**
1134  * css_driver_unregister - unregister a css driver
1135  * @cdrv: css driver to unregister
1136  *
1137  * This is a wrapper around driver_unregister.
1138  */
1139 void css_driver_unregister(struct css_driver *cdrv)
1140 {
1141         driver_unregister(&cdrv->drv);
1142 }
1143 EXPORT_SYMBOL_GPL(css_driver_unregister);
1144
1145 subsys_initcall(init_channel_subsystem);
1146
1147 MODULE_LICENSE("GPL");
1148 EXPORT_SYMBOL(css_bus_type);