]> Pileus Git - ~andy/linux/blob - drivers/s390/cio/css.c
[S390] qdio: remove limited number of debugfs entries
[~andy/linux] / drivers / s390 / cio / css.c
1 /*
2  * driver for channel subsystem
3  *
4  * Copyright IBM Corp. 2002, 2009
5  *
6  * Author(s): Arnd Bergmann (arndb@de.ibm.com)
7  *            Cornelia Huck (cornelia.huck@de.ibm.com)
8  */
9
10 #define KMSG_COMPONENT "cio"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/device.h>
16 #include <linux/slab.h>
17 #include <linux/errno.h>
18 #include <linux/list.h>
19 #include <linux/reboot.h>
20 #include <linux/suspend.h>
21 #include <asm/isc.h>
22 #include <asm/crw.h>
23
24 #include "css.h"
25 #include "cio.h"
26 #include "cio_debug.h"
27 #include "ioasm.h"
28 #include "chsc.h"
29 #include "device.h"
30 #include "idset.h"
31 #include "chp.h"
32
33 int css_init_done = 0;
34 static int need_reprobe = 0;
35 static int max_ssid = 0;
36
37 struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1];
38
39 int
40 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
41 {
42         struct subchannel_id schid;
43         int ret;
44
45         init_subchannel_id(&schid);
46         ret = -ENODEV;
47         do {
48                 do {
49                         ret = fn(schid, data);
50                         if (ret)
51                                 break;
52                 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
53                 schid.sch_no = 0;
54         } while (schid.ssid++ < max_ssid);
55         return ret;
56 }
57
58 struct cb_data {
59         void *data;
60         struct idset *set;
61         int (*fn_known_sch)(struct subchannel *, void *);
62         int (*fn_unknown_sch)(struct subchannel_id, void *);
63 };
64
65 static int call_fn_known_sch(struct device *dev, void *data)
66 {
67         struct subchannel *sch = to_subchannel(dev);
68         struct cb_data *cb = data;
69         int rc = 0;
70
71         idset_sch_del(cb->set, sch->schid);
72         if (cb->fn_known_sch)
73                 rc = cb->fn_known_sch(sch, cb->data);
74         return rc;
75 }
76
77 static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
78 {
79         struct cb_data *cb = data;
80         int rc = 0;
81
82         if (idset_sch_contains(cb->set, schid))
83                 rc = cb->fn_unknown_sch(schid, cb->data);
84         return rc;
85 }
86
87 static int call_fn_all_sch(struct subchannel_id schid, void *data)
88 {
89         struct cb_data *cb = data;
90         struct subchannel *sch;
91         int rc = 0;
92
93         sch = get_subchannel_by_schid(schid);
94         if (sch) {
95                 if (cb->fn_known_sch)
96                         rc = cb->fn_known_sch(sch, cb->data);
97                 put_device(&sch->dev);
98         } else {
99                 if (cb->fn_unknown_sch)
100                         rc = cb->fn_unknown_sch(schid, cb->data);
101         }
102
103         return rc;
104 }
105
106 int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
107                                int (*fn_unknown)(struct subchannel_id,
108                                void *), void *data)
109 {
110         struct cb_data cb;
111         int rc;
112
113         cb.data = data;
114         cb.fn_known_sch = fn_known;
115         cb.fn_unknown_sch = fn_unknown;
116
117         cb.set = idset_sch_new();
118         if (!cb.set)
119                 /* fall back to brute force scanning in case of oom */
120                 return for_each_subchannel(call_fn_all_sch, &cb);
121
122         idset_fill(cb.set);
123
124         /* Process registered subchannels. */
125         rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
126         if (rc)
127                 goto out;
128         /* Process unregistered subchannels. */
129         if (fn_unknown)
130                 rc = for_each_subchannel(call_fn_unknown_sch, &cb);
131 out:
132         idset_free(cb.set);
133
134         return rc;
135 }
136
137 static struct subchannel *
138 css_alloc_subchannel(struct subchannel_id schid)
139 {
140         struct subchannel *sch;
141         int ret;
142
143         sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA);
144         if (sch == NULL)
145                 return ERR_PTR(-ENOMEM);
146         ret = cio_validate_subchannel (sch, schid);
147         if (ret < 0) {
148                 kfree(sch);
149                 return ERR_PTR(ret);
150         }
151         return sch;
152 }
153
154 static void
155 css_free_subchannel(struct subchannel *sch)
156 {
157         if (sch) {
158                 /* Reset intparm to zeroes. */
159                 sch->config.intparm = 0;
160                 cio_commit_config(sch);
161                 kfree(sch->lock);
162                 kfree(sch);
163         }
164 }
165
166 static void
167 css_subchannel_release(struct device *dev)
168 {
169         struct subchannel *sch;
170
171         sch = to_subchannel(dev);
172         if (!cio_is_console(sch->schid)) {
173                 /* Reset intparm to zeroes. */
174                 sch->config.intparm = 0;
175                 cio_commit_config(sch);
176                 kfree(sch->lock);
177                 kfree(sch);
178         }
179 }
180
181 static int css_sch_device_register(struct subchannel *sch)
182 {
183         int ret;
184
185         mutex_lock(&sch->reg_mutex);
186         ret = device_register(&sch->dev);
187         mutex_unlock(&sch->reg_mutex);
188         return ret;
189 }
190
191 /**
192  * css_sch_device_unregister - unregister a subchannel
193  * @sch: subchannel to be unregistered
194  */
195 void css_sch_device_unregister(struct subchannel *sch)
196 {
197         mutex_lock(&sch->reg_mutex);
198         if (device_is_registered(&sch->dev))
199                 device_unregister(&sch->dev);
200         mutex_unlock(&sch->reg_mutex);
201 }
202 EXPORT_SYMBOL_GPL(css_sch_device_unregister);
203
204 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
205 {
206         int i;
207         int mask;
208
209         memset(ssd, 0, sizeof(struct chsc_ssd_info));
210         ssd->path_mask = pmcw->pim;
211         for (i = 0; i < 8; i++) {
212                 mask = 0x80 >> i;
213                 if (pmcw->pim & mask) {
214                         chp_id_init(&ssd->chpid[i]);
215                         ssd->chpid[i].id = pmcw->chpid[i];
216                 }
217         }
218 }
219
220 static void ssd_register_chpids(struct chsc_ssd_info *ssd)
221 {
222         int i;
223         int mask;
224
225         for (i = 0; i < 8; i++) {
226                 mask = 0x80 >> i;
227                 if (ssd->path_mask & mask)
228                         if (!chp_is_registered(ssd->chpid[i]))
229                                 chp_new(ssd->chpid[i]);
230         }
231 }
232
233 void css_update_ssd_info(struct subchannel *sch)
234 {
235         int ret;
236
237         if (cio_is_console(sch->schid)) {
238                 /* Console is initialized too early for functions requiring
239                  * memory allocation. */
240                 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
241         } else {
242                 ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
243                 if (ret)
244                         ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
245                 ssd_register_chpids(&sch->ssd_info);
246         }
247 }
248
249 static ssize_t type_show(struct device *dev, struct device_attribute *attr,
250                          char *buf)
251 {
252         struct subchannel *sch = to_subchannel(dev);
253
254         return sprintf(buf, "%01x\n", sch->st);
255 }
256
257 static DEVICE_ATTR(type, 0444, type_show, NULL);
258
259 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
260                              char *buf)
261 {
262         struct subchannel *sch = to_subchannel(dev);
263
264         return sprintf(buf, "css:t%01X\n", sch->st);
265 }
266
267 static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
268
269 static struct attribute *subch_attrs[] = {
270         &dev_attr_type.attr,
271         &dev_attr_modalias.attr,
272         NULL,
273 };
274
275 static struct attribute_group subch_attr_group = {
276         .attrs = subch_attrs,
277 };
278
279 static struct attribute_group *default_subch_attr_groups[] = {
280         &subch_attr_group,
281         NULL,
282 };
283
284 static int css_register_subchannel(struct subchannel *sch)
285 {
286         int ret;
287
288         /* Initialize the subchannel structure */
289         sch->dev.parent = &channel_subsystems[0]->device;
290         sch->dev.bus = &css_bus_type;
291         sch->dev.release = &css_subchannel_release;
292         sch->dev.groups = default_subch_attr_groups;
293         /*
294          * We don't want to generate uevents for I/O subchannels that don't
295          * have a working ccw device behind them since they will be
296          * unregistered before they can be used anyway, so we delay the add
297          * uevent until after device recognition was successful.
298          * Note that we suppress the uevent for all subchannel types;
299          * the subchannel driver can decide itself when it wants to inform
300          * userspace of its existence.
301          */
302         dev_set_uevent_suppress(&sch->dev, 1);
303         css_update_ssd_info(sch);
304         /* make it known to the system */
305         ret = css_sch_device_register(sch);
306         if (ret) {
307                 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
308                               sch->schid.ssid, sch->schid.sch_no, ret);
309                 return ret;
310         }
311         if (!sch->driver) {
312                 /*
313                  * No driver matched. Generate the uevent now so that
314                  * a fitting driver module may be loaded based on the
315                  * modalias.
316                  */
317                 dev_set_uevent_suppress(&sch->dev, 0);
318                 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
319         }
320         return ret;
321 }
322
323 int css_probe_device(struct subchannel_id schid)
324 {
325         int ret;
326         struct subchannel *sch;
327
328         sch = css_alloc_subchannel(schid);
329         if (IS_ERR(sch))
330                 return PTR_ERR(sch);
331         ret = css_register_subchannel(sch);
332         if (ret)
333                 css_free_subchannel(sch);
334         return ret;
335 }
336
337 static int
338 check_subchannel(struct device * dev, void * data)
339 {
340         struct subchannel *sch;
341         struct subchannel_id *schid = data;
342
343         sch = to_subchannel(dev);
344         return schid_equal(&sch->schid, schid);
345 }
346
347 struct subchannel *
348 get_subchannel_by_schid(struct subchannel_id schid)
349 {
350         struct device *dev;
351
352         dev = bus_find_device(&css_bus_type, NULL,
353                               &schid, check_subchannel);
354
355         return dev ? to_subchannel(dev) : NULL;
356 }
357
358 /**
359  * css_sch_is_valid() - check if a subchannel is valid
360  * @schib: subchannel information block for the subchannel
361  */
362 int css_sch_is_valid(struct schib *schib)
363 {
364         if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
365                 return 0;
366         if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
367                 return 0;
368         return 1;
369 }
370 EXPORT_SYMBOL_GPL(css_sch_is_valid);
371
372 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
373 {
374         struct schib schib;
375
376         if (!slow) {
377                 /* Will be done on the slow path. */
378                 return -EAGAIN;
379         }
380         if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) {
381                 /* Unusable - ignore. */
382                 return 0;
383         }
384         CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, unknown, "
385                          "slow path.\n", schid.ssid, schid.sch_no, CIO_OPER);
386
387         return css_probe_device(schid);
388 }
389
390 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
391 {
392         int ret = 0;
393
394         if (sch->driver) {
395                 if (sch->driver->sch_event)
396                         ret = sch->driver->sch_event(sch, slow);
397                 else
398                         dev_dbg(&sch->dev,
399                                 "Got subchannel machine check but "
400                                 "no sch_event handler provided.\n");
401         }
402         return ret;
403 }
404
405 static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
406 {
407         struct subchannel *sch;
408         int ret;
409
410         sch = get_subchannel_by_schid(schid);
411         if (sch) {
412                 ret = css_evaluate_known_subchannel(sch, slow);
413                 put_device(&sch->dev);
414         } else
415                 ret = css_evaluate_new_subchannel(schid, slow);
416         if (ret == -EAGAIN)
417                 css_schedule_eval(schid);
418 }
419
420 static struct idset *slow_subchannel_set;
421 static spinlock_t slow_subchannel_lock;
422
423 static int __init slow_subchannel_init(void)
424 {
425         spin_lock_init(&slow_subchannel_lock);
426         slow_subchannel_set = idset_sch_new();
427         if (!slow_subchannel_set) {
428                 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
429                 return -ENOMEM;
430         }
431         return 0;
432 }
433
434 static int slow_eval_known_fn(struct subchannel *sch, void *data)
435 {
436         int eval;
437         int rc;
438
439         spin_lock_irq(&slow_subchannel_lock);
440         eval = idset_sch_contains(slow_subchannel_set, sch->schid);
441         idset_sch_del(slow_subchannel_set, sch->schid);
442         spin_unlock_irq(&slow_subchannel_lock);
443         if (eval) {
444                 rc = css_evaluate_known_subchannel(sch, 1);
445                 if (rc == -EAGAIN)
446                         css_schedule_eval(sch->schid);
447         }
448         return 0;
449 }
450
451 static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
452 {
453         int eval;
454         int rc = 0;
455
456         spin_lock_irq(&slow_subchannel_lock);
457         eval = idset_sch_contains(slow_subchannel_set, schid);
458         idset_sch_del(slow_subchannel_set, schid);
459         spin_unlock_irq(&slow_subchannel_lock);
460         if (eval) {
461                 rc = css_evaluate_new_subchannel(schid, 1);
462                 switch (rc) {
463                 case -EAGAIN:
464                         css_schedule_eval(schid);
465                         rc = 0;
466                         break;
467                 case -ENXIO:
468                 case -ENOMEM:
469                 case -EIO:
470                         /* These should abort looping */
471                         break;
472                 default:
473                         rc = 0;
474                 }
475         }
476         return rc;
477 }
478
479 static void css_slow_path_func(struct work_struct *unused)
480 {
481         CIO_TRACE_EVENT(4, "slowpath");
482         for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
483                                    NULL);
484 }
485
486 static DECLARE_WORK(slow_path_work, css_slow_path_func);
487 struct workqueue_struct *slow_path_wq;
488
489 void css_schedule_eval(struct subchannel_id schid)
490 {
491         unsigned long flags;
492
493         spin_lock_irqsave(&slow_subchannel_lock, flags);
494         idset_sch_add(slow_subchannel_set, schid);
495         queue_work(slow_path_wq, &slow_path_work);
496         spin_unlock_irqrestore(&slow_subchannel_lock, flags);
497 }
498
499 void css_schedule_eval_all(void)
500 {
501         unsigned long flags;
502
503         spin_lock_irqsave(&slow_subchannel_lock, flags);
504         idset_fill(slow_subchannel_set);
505         queue_work(slow_path_wq, &slow_path_work);
506         spin_unlock_irqrestore(&slow_subchannel_lock, flags);
507 }
508
509 void css_wait_for_slow_path(void)
510 {
511         flush_workqueue(slow_path_wq);
512 }
513
514 /* Reprobe subchannel if unregistered. */
515 static int reprobe_subchannel(struct subchannel_id schid, void *data)
516 {
517         int ret;
518
519         CIO_MSG_EVENT(6, "cio: reprobe 0.%x.%04x\n",
520                       schid.ssid, schid.sch_no);
521         if (need_reprobe)
522                 return -EAGAIN;
523
524         ret = css_probe_device(schid);
525         switch (ret) {
526         case 0:
527                 break;
528         case -ENXIO:
529         case -ENOMEM:
530         case -EIO:
531                 /* These should abort looping */
532                 break;
533         default:
534                 ret = 0;
535         }
536
537         return ret;
538 }
539
540 static void reprobe_after_idle(struct work_struct *unused)
541 {
542         /* Make sure initial subchannel scan is done. */
543         wait_event(ccw_device_init_wq,
544                    atomic_read(&ccw_device_init_count) == 0);
545         if (need_reprobe)
546                 css_schedule_reprobe();
547 }
548
549 static DECLARE_WORK(reprobe_idle_work, reprobe_after_idle);
550
551 /* Work function used to reprobe all unregistered subchannels. */
552 static void reprobe_all(struct work_struct *unused)
553 {
554         int ret;
555
556         CIO_MSG_EVENT(4, "reprobe start\n");
557
558         /* Make sure initial subchannel scan is done. */
559         if (atomic_read(&ccw_device_init_count) != 0) {
560                 queue_work(ccw_device_work, &reprobe_idle_work);
561                 return;
562         }
563         need_reprobe = 0;
564         ret = for_each_subchannel_staged(NULL, reprobe_subchannel, NULL);
565
566         CIO_MSG_EVENT(4, "reprobe done (rc=%d, need_reprobe=%d)\n", ret,
567                       need_reprobe);
568 }
569
570 static DECLARE_WORK(css_reprobe_work, reprobe_all);
571
572 /* Schedule reprobing of all unregistered subchannels. */
573 void css_schedule_reprobe(void)
574 {
575         need_reprobe = 1;
576         queue_work(slow_path_wq, &css_reprobe_work);
577 }
578
579 EXPORT_SYMBOL_GPL(css_schedule_reprobe);
580
581 /*
582  * Called from the machine check handler for subchannel report words.
583  */
584 static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
585 {
586         struct subchannel_id mchk_schid;
587
588         if (overflow) {
589                 css_schedule_eval_all();
590                 return;
591         }
592         CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
593                       "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
594                       crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
595                       crw0->erc, crw0->rsid);
596         if (crw1)
597                 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
598                               "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
599                               crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
600                               crw1->anc, crw1->erc, crw1->rsid);
601         init_subchannel_id(&mchk_schid);
602         mchk_schid.sch_no = crw0->rsid;
603         if (crw1)
604                 mchk_schid.ssid = (crw1->rsid >> 8) & 3;
605
606         /*
607          * Since we are always presented with IPI in the CRW, we have to
608          * use stsch() to find out if the subchannel in question has come
609          * or gone.
610          */
611         css_evaluate_subchannel(mchk_schid, 0);
612 }
613
614 static int __init
615 __init_channel_subsystem(struct subchannel_id schid, void *data)
616 {
617         struct subchannel *sch;
618         int ret;
619
620         if (cio_is_console(schid))
621                 sch = cio_get_console_subchannel();
622         else {
623                 sch = css_alloc_subchannel(schid);
624                 if (IS_ERR(sch))
625                         ret = PTR_ERR(sch);
626                 else
627                         ret = 0;
628                 switch (ret) {
629                 case 0:
630                         break;
631                 case -ENOMEM:
632                         panic("Out of memory in init_channel_subsystem\n");
633                 /* -ENXIO: no more subchannels. */
634                 case -ENXIO:
635                         return ret;
636                 /* -EIO: this subchannel set not supported. */
637                 case -EIO:
638                         return ret;
639                 default:
640                         return 0;
641                 }
642         }
643         /*
644          * We register ALL valid subchannels in ioinfo, even those
645          * that have been present before init_channel_subsystem.
646          * These subchannels can't have been registered yet (kmalloc
647          * not working) so we do it now. This is true e.g. for the
648          * console subchannel.
649          */
650         css_register_subchannel(sch);
651         return 0;
652 }
653
654 static void __init
655 css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
656 {
657         if (css_general_characteristics.mcss) {
658                 css->global_pgid.pgid_high.ext_cssid.version = 0x80;
659                 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
660         } else {
661 #ifdef CONFIG_SMP
662                 css->global_pgid.pgid_high.cpu_addr = stap();
663 #else
664                 css->global_pgid.pgid_high.cpu_addr = 0;
665 #endif
666         }
667         css->global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident;
668         css->global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine;
669         css->global_pgid.tod_high = tod_high;
670
671 }
672
673 static void
674 channel_subsystem_release(struct device *dev)
675 {
676         struct channel_subsystem *css;
677
678         css = to_css(dev);
679         mutex_destroy(&css->mutex);
680         if (css->pseudo_subchannel) {
681                 /* Implies that it has been generated but never registered. */
682                 css_subchannel_release(&css->pseudo_subchannel->dev);
683                 css->pseudo_subchannel = NULL;
684         }
685         kfree(css);
686 }
687
688 static ssize_t
689 css_cm_enable_show(struct device *dev, struct device_attribute *attr,
690                    char *buf)
691 {
692         struct channel_subsystem *css = to_css(dev);
693         int ret;
694
695         if (!css)
696                 return 0;
697         mutex_lock(&css->mutex);
698         ret = sprintf(buf, "%x\n", css->cm_enabled);
699         mutex_unlock(&css->mutex);
700         return ret;
701 }
702
703 static ssize_t
704 css_cm_enable_store(struct device *dev, struct device_attribute *attr,
705                     const char *buf, size_t count)
706 {
707         struct channel_subsystem *css = to_css(dev);
708         int ret;
709         unsigned long val;
710
711         ret = strict_strtoul(buf, 16, &val);
712         if (ret)
713                 return ret;
714         mutex_lock(&css->mutex);
715         switch (val) {
716         case 0:
717                 ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
718                 break;
719         case 1:
720                 ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
721                 break;
722         default:
723                 ret = -EINVAL;
724         }
725         mutex_unlock(&css->mutex);
726         return ret < 0 ? ret : count;
727 }
728
729 static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store);
730
731 static int __init setup_css(int nr)
732 {
733         u32 tod_high;
734         int ret;
735         struct channel_subsystem *css;
736
737         css = channel_subsystems[nr];
738         memset(css, 0, sizeof(struct channel_subsystem));
739         css->pseudo_subchannel =
740                 kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL);
741         if (!css->pseudo_subchannel)
742                 return -ENOMEM;
743         css->pseudo_subchannel->dev.parent = &css->device;
744         css->pseudo_subchannel->dev.release = css_subchannel_release;
745         dev_set_name(&css->pseudo_subchannel->dev, "defunct");
746         ret = cio_create_sch_lock(css->pseudo_subchannel);
747         if (ret) {
748                 kfree(css->pseudo_subchannel);
749                 return ret;
750         }
751         mutex_init(&css->mutex);
752         css->valid = 1;
753         css->cssid = nr;
754         dev_set_name(&css->device, "css%x", nr);
755         css->device.release = channel_subsystem_release;
756         tod_high = (u32) (get_clock() >> 32);
757         css_generate_pgid(css, tod_high);
758         return 0;
759 }
760
761 static int css_reboot_event(struct notifier_block *this,
762                             unsigned long event,
763                             void *ptr)
764 {
765         int ret, i;
766
767         ret = NOTIFY_DONE;
768         for (i = 0; i <= __MAX_CSSID; i++) {
769                 struct channel_subsystem *css;
770
771                 css = channel_subsystems[i];
772                 mutex_lock(&css->mutex);
773                 if (css->cm_enabled)
774                         if (chsc_secm(css, 0))
775                                 ret = NOTIFY_BAD;
776                 mutex_unlock(&css->mutex);
777         }
778
779         return ret;
780 }
781
782 static struct notifier_block css_reboot_notifier = {
783         .notifier_call = css_reboot_event,
784 };
785
786 /*
787  * Since the css devices are neither on a bus nor have a class
788  * nor have a special device type, we cannot stop/restart channel
789  * path measurements via the normal suspend/resume callbacks, but have
790  * to use notifiers.
791  */
792 static int css_power_event(struct notifier_block *this, unsigned long event,
793                            void *ptr)
794 {
795         void *secm_area;
796         int ret, i;
797
798         switch (event) {
799         case PM_HIBERNATION_PREPARE:
800         case PM_SUSPEND_PREPARE:
801                 ret = NOTIFY_DONE;
802                 for (i = 0; i <= __MAX_CSSID; i++) {
803                         struct channel_subsystem *css;
804
805                         css = channel_subsystems[i];
806                         mutex_lock(&css->mutex);
807                         if (!css->cm_enabled) {
808                                 mutex_unlock(&css->mutex);
809                                 continue;
810                         }
811                         secm_area = (void *)get_zeroed_page(GFP_KERNEL |
812                                                             GFP_DMA);
813                         if (secm_area) {
814                                 if (__chsc_do_secm(css, 0, secm_area))
815                                         ret = NOTIFY_BAD;
816                                 free_page((unsigned long)secm_area);
817                         } else
818                                 ret = NOTIFY_BAD;
819
820                         mutex_unlock(&css->mutex);
821                 }
822                 break;
823         case PM_POST_HIBERNATION:
824         case PM_POST_SUSPEND:
825                 ret = NOTIFY_DONE;
826                 for (i = 0; i <= __MAX_CSSID; i++) {
827                         struct channel_subsystem *css;
828
829                         css = channel_subsystems[i];
830                         mutex_lock(&css->mutex);
831                         if (!css->cm_enabled) {
832                                 mutex_unlock(&css->mutex);
833                                 continue;
834                         }
835                         secm_area = (void *)get_zeroed_page(GFP_KERNEL |
836                                                             GFP_DMA);
837                         if (secm_area) {
838                                 if (__chsc_do_secm(css, 1, secm_area))
839                                         ret = NOTIFY_BAD;
840                                 free_page((unsigned long)secm_area);
841                         } else
842                                 ret = NOTIFY_BAD;
843
844                         mutex_unlock(&css->mutex);
845                 }
846                 /* search for subchannels, which appeared during hibernation */
847                 css_schedule_reprobe();
848                 break;
849         default:
850                 ret = NOTIFY_DONE;
851         }
852         return ret;
853
854 }
855 static struct notifier_block css_power_notifier = {
856         .notifier_call = css_power_event,
857 };
858
859 /*
860  * Now that the driver core is running, we can setup our channel subsystem.
861  * The struct subchannel's are created during probing (except for the
862  * static console subchannel).
863  */
864 static int __init
865 init_channel_subsystem (void)
866 {
867         int ret, i;
868
869         ret = chsc_determine_css_characteristics();
870         if (ret == -ENOMEM)
871                 goto out; /* No need to continue. */
872
873         ret = chsc_alloc_sei_area();
874         if (ret)
875                 goto out;
876
877         ret = slow_subchannel_init();
878         if (ret)
879                 goto out;
880
881         ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
882         if (ret)
883                 goto out;
884
885         if ((ret = bus_register(&css_bus_type)))
886                 goto out;
887
888         /* Try to enable MSS. */
889         ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
890         switch (ret) {
891         case 0: /* Success. */
892                 max_ssid = __MAX_SSID;
893                 break;
894         case -ENOMEM:
895                 goto out_bus;
896         default:
897                 max_ssid = 0;
898         }
899         /* Setup css structure. */
900         for (i = 0; i <= __MAX_CSSID; i++) {
901                 struct channel_subsystem *css;
902
903                 css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL);
904                 if (!css) {
905                         ret = -ENOMEM;
906                         goto out_unregister;
907                 }
908                 channel_subsystems[i] = css;
909                 ret = setup_css(i);
910                 if (ret) {
911                         kfree(channel_subsystems[i]);
912                         goto out_unregister;
913                 }
914                 ret = device_register(&css->device);
915                 if (ret) {
916                         put_device(&css->device);
917                         goto out_unregister;
918                 }
919                 if (css_chsc_characteristics.secm) {
920                         ret = device_create_file(&css->device,
921                                                  &dev_attr_cm_enable);
922                         if (ret)
923                                 goto out_device;
924                 }
925                 ret = device_register(&css->pseudo_subchannel->dev);
926                 if (ret)
927                         goto out_file;
928         }
929         ret = register_reboot_notifier(&css_reboot_notifier);
930         if (ret)
931                 goto out_unregister;
932         ret = register_pm_notifier(&css_power_notifier);
933         if (ret) {
934                 unregister_reboot_notifier(&css_reboot_notifier);
935                 goto out_unregister;
936         }
937         css_init_done = 1;
938
939         /* Enable default isc for I/O subchannels. */
940         isc_register(IO_SCH_ISC);
941
942         for_each_subchannel(__init_channel_subsystem, NULL);
943         return 0;
944 out_file:
945         if (css_chsc_characteristics.secm)
946                 device_remove_file(&channel_subsystems[i]->device,
947                                    &dev_attr_cm_enable);
948 out_device:
949         device_unregister(&channel_subsystems[i]->device);
950 out_unregister:
951         while (i > 0) {
952                 struct channel_subsystem *css;
953
954                 i--;
955                 css = channel_subsystems[i];
956                 device_unregister(&css->pseudo_subchannel->dev);
957                 css->pseudo_subchannel = NULL;
958                 if (css_chsc_characteristics.secm)
959                         device_remove_file(&css->device,
960                                            &dev_attr_cm_enable);
961                 device_unregister(&css->device);
962         }
963 out_bus:
964         bus_unregister(&css_bus_type);
965 out:
966         crw_unregister_handler(CRW_RSC_CSS);
967         chsc_free_sei_area();
968         kfree(slow_subchannel_set);
969         pr_alert("The CSS device driver initialization failed with "
970                  "errno=%d\n", ret);
971         return ret;
972 }
973
974 int sch_is_pseudo_sch(struct subchannel *sch)
975 {
976         return sch == to_css(sch->dev.parent)->pseudo_subchannel;
977 }
978
979 static int css_bus_match(struct device *dev, struct device_driver *drv)
980 {
981         struct subchannel *sch = to_subchannel(dev);
982         struct css_driver *driver = to_cssdriver(drv);
983         struct css_device_id *id;
984
985         for (id = driver->subchannel_type; id->match_flags; id++) {
986                 if (sch->st == id->type)
987                         return 1;
988         }
989
990         return 0;
991 }
992
993 static int css_probe(struct device *dev)
994 {
995         struct subchannel *sch;
996         int ret;
997
998         sch = to_subchannel(dev);
999         sch->driver = to_cssdriver(dev->driver);
1000         ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
1001         if (ret)
1002                 sch->driver = NULL;
1003         return ret;
1004 }
1005
1006 static int css_remove(struct device *dev)
1007 {
1008         struct subchannel *sch;
1009         int ret;
1010
1011         sch = to_subchannel(dev);
1012         ret = sch->driver->remove ? sch->driver->remove(sch) : 0;
1013         sch->driver = NULL;
1014         return ret;
1015 }
1016
1017 static void css_shutdown(struct device *dev)
1018 {
1019         struct subchannel *sch;
1020
1021         sch = to_subchannel(dev);
1022         if (sch->driver && sch->driver->shutdown)
1023                 sch->driver->shutdown(sch);
1024 }
1025
1026 static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
1027 {
1028         struct subchannel *sch = to_subchannel(dev);
1029         int ret;
1030
1031         ret = add_uevent_var(env, "ST=%01X", sch->st);
1032         if (ret)
1033                 return ret;
1034         ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
1035         return ret;
1036 }
1037
1038 static int css_pm_prepare(struct device *dev)
1039 {
1040         struct subchannel *sch = to_subchannel(dev);
1041         struct css_driver *drv;
1042
1043         if (mutex_is_locked(&sch->reg_mutex))
1044                 return -EAGAIN;
1045         if (!sch->dev.driver)
1046                 return 0;
1047         drv = to_cssdriver(sch->dev.driver);
1048         /* Notify drivers that they may not register children. */
1049         return drv->prepare ? drv->prepare(sch) : 0;
1050 }
1051
1052 static void css_pm_complete(struct device *dev)
1053 {
1054         struct subchannel *sch = to_subchannel(dev);
1055         struct css_driver *drv;
1056
1057         if (!sch->dev.driver)
1058                 return;
1059         drv = to_cssdriver(sch->dev.driver);
1060         if (drv->complete)
1061                 drv->complete(sch);
1062 }
1063
1064 static int css_pm_freeze(struct device *dev)
1065 {
1066         struct subchannel *sch = to_subchannel(dev);
1067         struct css_driver *drv;
1068
1069         if (!sch->dev.driver)
1070                 return 0;
1071         drv = to_cssdriver(sch->dev.driver);
1072         return drv->freeze ? drv->freeze(sch) : 0;
1073 }
1074
1075 static int css_pm_thaw(struct device *dev)
1076 {
1077         struct subchannel *sch = to_subchannel(dev);
1078         struct css_driver *drv;
1079
1080         if (!sch->dev.driver)
1081                 return 0;
1082         drv = to_cssdriver(sch->dev.driver);
1083         return drv->thaw ? drv->thaw(sch) : 0;
1084 }
1085
1086 static int css_pm_restore(struct device *dev)
1087 {
1088         struct subchannel *sch = to_subchannel(dev);
1089         struct css_driver *drv;
1090
1091         if (!sch->dev.driver)
1092                 return 0;
1093         drv = to_cssdriver(sch->dev.driver);
1094         return drv->restore ? drv->restore(sch) : 0;
1095 }
1096
1097 static struct dev_pm_ops css_pm_ops = {
1098         .prepare = css_pm_prepare,
1099         .complete = css_pm_complete,
1100         .freeze = css_pm_freeze,
1101         .thaw = css_pm_thaw,
1102         .restore = css_pm_restore,
1103 };
1104
1105 struct bus_type css_bus_type = {
1106         .name     = "css",
1107         .match    = css_bus_match,
1108         .probe    = css_probe,
1109         .remove   = css_remove,
1110         .shutdown = css_shutdown,
1111         .uevent   = css_uevent,
1112         .pm = &css_pm_ops,
1113 };
1114
1115 /**
1116  * css_driver_register - register a css driver
1117  * @cdrv: css driver to register
1118  *
1119  * This is mainly a wrapper around driver_register that sets name
1120  * and bus_type in the embedded struct device_driver correctly.
1121  */
1122 int css_driver_register(struct css_driver *cdrv)
1123 {
1124         cdrv->drv.name = cdrv->name;
1125         cdrv->drv.bus = &css_bus_type;
1126         cdrv->drv.owner = cdrv->owner;
1127         return driver_register(&cdrv->drv);
1128 }
1129 EXPORT_SYMBOL_GPL(css_driver_register);
1130
1131 /**
1132  * css_driver_unregister - unregister a css driver
1133  * @cdrv: css driver to unregister
1134  *
1135  * This is a wrapper around driver_unregister.
1136  */
1137 void css_driver_unregister(struct css_driver *cdrv)
1138 {
1139         driver_unregister(&cdrv->drv);
1140 }
1141 EXPORT_SYMBOL_GPL(css_driver_unregister);
1142
1143 subsys_initcall(init_channel_subsystem);
1144
1145 MODULE_LICENSE("GPL");
1146 EXPORT_SYMBOL(css_bus_type);