]> Pileus Git - ~andy/linux/blob - drivers/s390/char/sclp.c
e65572e504ba8f47f3a0e700fd5c82ffa14510b5
[~andy/linux] / drivers / s390 / char / sclp.c
1 /*
2  * core function to access sclp interface
3  *
4  * Copyright IBM Corp. 1999, 2009
5  *
6  * Author(s): Martin Peschke <mpeschke@de.ibm.com>
7  *            Martin Schwidefsky <schwidefsky@de.ibm.com>
8  */
9
10 #include <linux/kernel_stat.h>
11 #include <linux/module.h>
12 #include <linux/err.h>
13 #include <linux/spinlock.h>
14 #include <linux/interrupt.h>
15 #include <linux/timer.h>
16 #include <linux/reboot.h>
17 #include <linux/jiffies.h>
18 #include <linux/init.h>
19 #include <linux/suspend.h>
20 #include <linux/completion.h>
21 #include <linux/platform_device.h>
22 #include <asm/s390_ext.h>
23 #include <asm/types.h>
24 #include <asm/irq.h>
25
26 #include "sclp.h"
27
28 #define SCLP_HEADER             "sclp: "
29
30 /* Structure for register_early_external_interrupt. */
31 static ext_int_info_t ext_int_info_hwc;
32
33 /* Lock to protect internal data consistency. */
34 static DEFINE_SPINLOCK(sclp_lock);
35
36 /* Mask of events that we can send to the sclp interface. */
37 static sccb_mask_t sclp_receive_mask;
38
39 /* Mask of events that we can receive from the sclp interface. */
40 static sccb_mask_t sclp_send_mask;
41
42 /* List of registered event listeners and senders. */
43 static struct list_head sclp_reg_list;
44
45 /* List of queued requests. */
46 static struct list_head sclp_req_queue;
47
48 /* Data for read and and init requests. */
49 static struct sclp_req sclp_read_req;
50 static struct sclp_req sclp_init_req;
51 static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
52 static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
53
54 /* Suspend request */
55 static DECLARE_COMPLETION(sclp_request_queue_flushed);
56
57 static void sclp_suspend_req_cb(struct sclp_req *req, void *data)
58 {
59         complete(&sclp_request_queue_flushed);
60 }
61
62 static struct sclp_req sclp_suspend_req;
63
64 /* Timer for request retries. */
65 static struct timer_list sclp_request_timer;
66
67 /* Internal state: is the driver initialized? */
68 static volatile enum sclp_init_state_t {
69         sclp_init_state_uninitialized,
70         sclp_init_state_initializing,
71         sclp_init_state_initialized
72 } sclp_init_state = sclp_init_state_uninitialized;
73
74 /* Internal state: is a request active at the sclp? */
75 static volatile enum sclp_running_state_t {
76         sclp_running_state_idle,
77         sclp_running_state_running,
78         sclp_running_state_reset_pending
79 } sclp_running_state = sclp_running_state_idle;
80
81 /* Internal state: is a read request pending? */
82 static volatile enum sclp_reading_state_t {
83         sclp_reading_state_idle,
84         sclp_reading_state_reading
85 } sclp_reading_state = sclp_reading_state_idle;
86
87 /* Internal state: is the driver currently serving requests? */
88 static volatile enum sclp_activation_state_t {
89         sclp_activation_state_active,
90         sclp_activation_state_deactivating,
91         sclp_activation_state_inactive,
92         sclp_activation_state_activating
93 } sclp_activation_state = sclp_activation_state_active;
94
95 /* Internal state: is an init mask request pending? */
96 static volatile enum sclp_mask_state_t {
97         sclp_mask_state_idle,
98         sclp_mask_state_initializing
99 } sclp_mask_state = sclp_mask_state_idle;
100
101 /* Internal state: is the driver suspended? */
102 static enum sclp_suspend_state_t {
103         sclp_suspend_state_running,
104         sclp_suspend_state_suspended,
105 } sclp_suspend_state = sclp_suspend_state_running;
106
107 /* Maximum retry counts */
108 #define SCLP_INIT_RETRY         3
109 #define SCLP_MASK_RETRY         3
110
111 /* Timeout intervals in seconds.*/
112 #define SCLP_BUSY_INTERVAL      10
113 #define SCLP_RETRY_INTERVAL     30
114
115 static void sclp_process_queue(void);
116 static void __sclp_make_read_req(void);
117 static int sclp_init_mask(int calculate);
118 static int sclp_init(void);
119
120 /* Perform service call. Return 0 on success, non-zero otherwise. */
121 int
122 sclp_service_call(sclp_cmdw_t command, void *sccb)
123 {
124         int cc;
125
126         asm volatile(
127                 "       .insn   rre,0xb2200000,%1,%2\n"  /* servc %1,%2 */
128                 "       ipm     %0\n"
129                 "       srl     %0,28"
130                 : "=&d" (cc) : "d" (command), "a" (__pa(sccb))
131                 : "cc", "memory");
132         if (cc == 3)
133                 return -EIO;
134         if (cc == 2)
135                 return -EBUSY;
136         return 0;
137 }
138
139
140 static void
141 __sclp_queue_read_req(void)
142 {
143         if (sclp_reading_state == sclp_reading_state_idle) {
144                 sclp_reading_state = sclp_reading_state_reading;
145                 __sclp_make_read_req();
146                 /* Add request to head of queue */
147                 list_add(&sclp_read_req.list, &sclp_req_queue);
148         }
149 }
150
151 /* Set up request retry timer. Called while sclp_lock is locked. */
152 static inline void
153 __sclp_set_request_timer(unsigned long time, void (*function)(unsigned long),
154                          unsigned long data)
155 {
156         del_timer(&sclp_request_timer);
157         sclp_request_timer.function = function;
158         sclp_request_timer.data = data;
159         sclp_request_timer.expires = jiffies + time;
160         add_timer(&sclp_request_timer);
161 }
162
163 /* Request timeout handler. Restart the request queue. If DATA is non-zero,
164  * force restart of running request. */
165 static void
166 sclp_request_timeout(unsigned long data)
167 {
168         unsigned long flags;
169
170         spin_lock_irqsave(&sclp_lock, flags);
171         if (data) {
172                 if (sclp_running_state == sclp_running_state_running) {
173                         /* Break running state and queue NOP read event request
174                          * to get a defined interface state. */
175                         __sclp_queue_read_req();
176                         sclp_running_state = sclp_running_state_idle;
177                 }
178         } else {
179                 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
180                                          sclp_request_timeout, 0);
181         }
182         spin_unlock_irqrestore(&sclp_lock, flags);
183         sclp_process_queue();
184 }
185
186 /* Try to start a request. Return zero if the request was successfully
187  * started or if it will be started at a later time. Return non-zero otherwise.
188  * Called while sclp_lock is locked. */
189 static int
190 __sclp_start_request(struct sclp_req *req)
191 {
192         int rc;
193
194         if (sclp_running_state != sclp_running_state_idle)
195                 return 0;
196         del_timer(&sclp_request_timer);
197         rc = sclp_service_call(req->command, req->sccb);
198         req->start_count++;
199
200         if (rc == 0) {
201                 /* Successfully started request */
202                 req->status = SCLP_REQ_RUNNING;
203                 sclp_running_state = sclp_running_state_running;
204                 __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
205                                          sclp_request_timeout, 1);
206                 return 0;
207         } else if (rc == -EBUSY) {
208                 /* Try again later */
209                 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
210                                          sclp_request_timeout, 0);
211                 return 0;
212         }
213         /* Request failed */
214         req->status = SCLP_REQ_FAILED;
215         return rc;
216 }
217
218 /* Try to start queued requests. */
219 static void
220 sclp_process_queue(void)
221 {
222         struct sclp_req *req;
223         int rc;
224         unsigned long flags;
225
226         spin_lock_irqsave(&sclp_lock, flags);
227         if (sclp_running_state != sclp_running_state_idle) {
228                 spin_unlock_irqrestore(&sclp_lock, flags);
229                 return;
230         }
231         del_timer(&sclp_request_timer);
232         while (!list_empty(&sclp_req_queue)) {
233                 req = list_entry(sclp_req_queue.next, struct sclp_req, list);
234                 if (!req->sccb)
235                         goto do_post;
236                 rc = __sclp_start_request(req);
237                 if (rc == 0)
238                         break;
239                 /* Request failed */
240                 if (req->start_count > 1) {
241                         /* Cannot abort already submitted request - could still
242                          * be active at the SCLP */
243                         __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
244                                                  sclp_request_timeout, 0);
245                         break;
246                 }
247 do_post:
248                 /* Post-processing for aborted request */
249                 list_del(&req->list);
250                 if (req->callback) {
251                         spin_unlock_irqrestore(&sclp_lock, flags);
252                         req->callback(req, req->callback_data);
253                         spin_lock_irqsave(&sclp_lock, flags);
254                 }
255         }
256         spin_unlock_irqrestore(&sclp_lock, flags);
257 }
258
259 static int __sclp_can_add_request(struct sclp_req *req)
260 {
261         if (req == &sclp_suspend_req || req == &sclp_init_req)
262                 return 1;
263         if (sclp_suspend_state != sclp_suspend_state_running)
264                 return 0;
265         if (sclp_init_state != sclp_init_state_initialized)
266                 return 0;
267         if (sclp_activation_state != sclp_activation_state_active)
268                 return 0;
269         return 1;
270 }
271
272 /* Queue a new request. Return zero on success, non-zero otherwise. */
273 int
274 sclp_add_request(struct sclp_req *req)
275 {
276         unsigned long flags;
277         int rc;
278
279         spin_lock_irqsave(&sclp_lock, flags);
280         if (!__sclp_can_add_request(req)) {
281                 spin_unlock_irqrestore(&sclp_lock, flags);
282                 return -EIO;
283         }
284         req->status = SCLP_REQ_QUEUED;
285         req->start_count = 0;
286         list_add_tail(&req->list, &sclp_req_queue);
287         rc = 0;
288         /* Start if request is first in list */
289         if (sclp_running_state == sclp_running_state_idle &&
290             req->list.prev == &sclp_req_queue) {
291                 if (!req->sccb) {
292                         list_del(&req->list);
293                         rc = -ENODATA;
294                         goto out;
295                 }
296                 rc = __sclp_start_request(req);
297                 if (rc)
298                         list_del(&req->list);
299         }
300 out:
301         spin_unlock_irqrestore(&sclp_lock, flags);
302         return rc;
303 }
304
305 EXPORT_SYMBOL(sclp_add_request);
306
307 /* Dispatch events found in request buffer to registered listeners. Return 0
308  * if all events were dispatched, non-zero otherwise. */
309 static int
310 sclp_dispatch_evbufs(struct sccb_header *sccb)
311 {
312         unsigned long flags;
313         struct evbuf_header *evbuf;
314         struct list_head *l;
315         struct sclp_register *reg;
316         int offset;
317         int rc;
318
319         spin_lock_irqsave(&sclp_lock, flags);
320         rc = 0;
321         for (offset = sizeof(struct sccb_header); offset < sccb->length;
322              offset += evbuf->length) {
323                 evbuf = (struct evbuf_header *) ((addr_t) sccb + offset);
324                 /* Check for malformed hardware response */
325                 if (evbuf->length == 0)
326                         break;
327                 /* Search for event handler */
328                 reg = NULL;
329                 list_for_each(l, &sclp_reg_list) {
330                         reg = list_entry(l, struct sclp_register, list);
331                         if (reg->receive_mask & (1 << (32 - evbuf->type)))
332                                 break;
333                         else
334                                 reg = NULL;
335                 }
336                 if (reg && reg->receiver_fn) {
337                         spin_unlock_irqrestore(&sclp_lock, flags);
338                         reg->receiver_fn(evbuf);
339                         spin_lock_irqsave(&sclp_lock, flags);
340                 } else if (reg == NULL)
341                         rc = -ENOSYS;
342         }
343         spin_unlock_irqrestore(&sclp_lock, flags);
344         return rc;
345 }
346
347 /* Read event data request callback. */
348 static void
349 sclp_read_cb(struct sclp_req *req, void *data)
350 {
351         unsigned long flags;
352         struct sccb_header *sccb;
353
354         sccb = (struct sccb_header *) req->sccb;
355         if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 ||
356             sccb->response_code == 0x220))
357                 sclp_dispatch_evbufs(sccb);
358         spin_lock_irqsave(&sclp_lock, flags);
359         sclp_reading_state = sclp_reading_state_idle;
360         spin_unlock_irqrestore(&sclp_lock, flags);
361 }
362
363 /* Prepare read event data request. Called while sclp_lock is locked. */
364 static void __sclp_make_read_req(void)
365 {
366         struct sccb_header *sccb;
367
368         sccb = (struct sccb_header *) sclp_read_sccb;
369         clear_page(sccb);
370         memset(&sclp_read_req, 0, sizeof(struct sclp_req));
371         sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA;
372         sclp_read_req.status = SCLP_REQ_QUEUED;
373         sclp_read_req.start_count = 0;
374         sclp_read_req.callback = sclp_read_cb;
375         sclp_read_req.sccb = sccb;
376         sccb->length = PAGE_SIZE;
377         sccb->function_code = 0;
378         sccb->control_mask[2] = 0x80;
379 }
380
381 /* Search request list for request with matching sccb. Return request if found,
382  * NULL otherwise. Called while sclp_lock is locked. */
383 static inline struct sclp_req *
384 __sclp_find_req(u32 sccb)
385 {
386         struct list_head *l;
387         struct sclp_req *req;
388
389         list_for_each(l, &sclp_req_queue) {
390                 req = list_entry(l, struct sclp_req, list);
391                 if (sccb == (u32) (addr_t) req->sccb)
392                                 return req;
393         }
394         return NULL;
395 }
396
397 /* Handler for external interruption. Perform request post-processing.
398  * Prepare read event data request if necessary. Start processing of next
399  * request on queue. */
400 static void sclp_interrupt_handler(unsigned int ext_int_code,
401                                    unsigned int param32, unsigned long param64)
402 {
403         struct sclp_req *req;
404         u32 finished_sccb;
405         u32 evbuf_pending;
406
407         kstat_cpu(smp_processor_id()).irqs[EXTINT_SCP]++;
408         spin_lock(&sclp_lock);
409         finished_sccb = param32 & 0xfffffff8;
410         evbuf_pending = param32 & 0x3;
411         if (finished_sccb) {
412                 del_timer(&sclp_request_timer);
413                 sclp_running_state = sclp_running_state_reset_pending;
414                 req = __sclp_find_req(finished_sccb);
415                 if (req) {
416                         /* Request post-processing */
417                         list_del(&req->list);
418                         req->status = SCLP_REQ_DONE;
419                         if (req->callback) {
420                                 spin_unlock(&sclp_lock);
421                                 req->callback(req, req->callback_data);
422                                 spin_lock(&sclp_lock);
423                         }
424                 }
425                 sclp_running_state = sclp_running_state_idle;
426         }
427         if (evbuf_pending &&
428             sclp_activation_state == sclp_activation_state_active)
429                 __sclp_queue_read_req();
430         spin_unlock(&sclp_lock);
431         sclp_process_queue();
432 }
433
434 /* Convert interval in jiffies to TOD ticks. */
435 static inline u64
436 sclp_tod_from_jiffies(unsigned long jiffies)
437 {
438         return (u64) (jiffies / HZ) << 32;
439 }
440
441 /* Wait until a currently running request finished. Note: while this function
442  * is running, no timers are served on the calling CPU. */
443 void
444 sclp_sync_wait(void)
445 {
446         unsigned long long old_tick;
447         unsigned long flags;
448         unsigned long cr0, cr0_sync;
449         u64 timeout;
450         int irq_context;
451
452         /* We'll be disabling timer interrupts, so we need a custom timeout
453          * mechanism */
454         timeout = 0;
455         if (timer_pending(&sclp_request_timer)) {
456                 /* Get timeout TOD value */
457                 timeout = get_clock() +
458                           sclp_tod_from_jiffies(sclp_request_timer.expires -
459                                                 jiffies);
460         }
461         local_irq_save(flags);
462         /* Prevent bottom half from executing once we force interrupts open */
463         irq_context = in_interrupt();
464         if (!irq_context)
465                 local_bh_disable();
466         /* Enable service-signal interruption, disable timer interrupts */
467         old_tick = local_tick_disable();
468         trace_hardirqs_on();
469         __ctl_store(cr0, 0, 0);
470         cr0_sync = cr0;
471         cr0_sync &= 0xffff00a0;
472         cr0_sync |= 0x00000200;
473         __ctl_load(cr0_sync, 0, 0);
474         __arch_local_irq_stosm(0x01);
475         /* Loop until driver state indicates finished request */
476         while (sclp_running_state != sclp_running_state_idle) {
477                 /* Check for expired request timer */
478                 if (timer_pending(&sclp_request_timer) &&
479                     get_clock() > timeout &&
480                     del_timer(&sclp_request_timer))
481                         sclp_request_timer.function(sclp_request_timer.data);
482                 cpu_relax();
483         }
484         local_irq_disable();
485         __ctl_load(cr0, 0, 0);
486         if (!irq_context)
487                 _local_bh_enable();
488         local_tick_enable(old_tick);
489         local_irq_restore(flags);
490 }
491 EXPORT_SYMBOL(sclp_sync_wait);
492
493 /* Dispatch changes in send and receive mask to registered listeners. */
494 static void
495 sclp_dispatch_state_change(void)
496 {
497         struct list_head *l;
498         struct sclp_register *reg;
499         unsigned long flags;
500         sccb_mask_t receive_mask;
501         sccb_mask_t send_mask;
502
503         do {
504                 spin_lock_irqsave(&sclp_lock, flags);
505                 reg = NULL;
506                 list_for_each(l, &sclp_reg_list) {
507                         reg = list_entry(l, struct sclp_register, list);
508                         receive_mask = reg->send_mask & sclp_receive_mask;
509                         send_mask = reg->receive_mask & sclp_send_mask;
510                         if (reg->sclp_receive_mask != receive_mask ||
511                             reg->sclp_send_mask != send_mask) {
512                                 reg->sclp_receive_mask = receive_mask;
513                                 reg->sclp_send_mask = send_mask;
514                                 break;
515                         } else
516                                 reg = NULL;
517                 }
518                 spin_unlock_irqrestore(&sclp_lock, flags);
519                 if (reg && reg->state_change_fn)
520                         reg->state_change_fn(reg);
521         } while (reg);
522 }
523
524 struct sclp_statechangebuf {
525         struct evbuf_header     header;
526         u8              validity_sclp_active_facility_mask : 1;
527         u8              validity_sclp_receive_mask : 1;
528         u8              validity_sclp_send_mask : 1;
529         u8              validity_read_data_function_mask : 1;
530         u16             _zeros : 12;
531         u16             mask_length;
532         u64             sclp_active_facility_mask;
533         sccb_mask_t     sclp_receive_mask;
534         sccb_mask_t     sclp_send_mask;
535         u32             read_data_function_mask;
536 } __attribute__((packed));
537
538
539 /* State change event callback. Inform listeners of changes. */
540 static void
541 sclp_state_change_cb(struct evbuf_header *evbuf)
542 {
543         unsigned long flags;
544         struct sclp_statechangebuf *scbuf;
545
546         scbuf = (struct sclp_statechangebuf *) evbuf;
547         if (scbuf->mask_length != sizeof(sccb_mask_t))
548                 return;
549         spin_lock_irqsave(&sclp_lock, flags);
550         if (scbuf->validity_sclp_receive_mask)
551                 sclp_receive_mask = scbuf->sclp_receive_mask;
552         if (scbuf->validity_sclp_send_mask)
553                 sclp_send_mask = scbuf->sclp_send_mask;
554         spin_unlock_irqrestore(&sclp_lock, flags);
555         if (scbuf->validity_sclp_active_facility_mask)
556                 sclp_facilities = scbuf->sclp_active_facility_mask;
557         sclp_dispatch_state_change();
558 }
559
560 static struct sclp_register sclp_state_change_event = {
561         .receive_mask = EVTYP_STATECHANGE_MASK,
562         .receiver_fn = sclp_state_change_cb
563 };
564
565 /* Calculate receive and send mask of currently registered listeners.
566  * Called while sclp_lock is locked. */
567 static inline void
568 __sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask)
569 {
570         struct list_head *l;
571         struct sclp_register *t;
572
573         *receive_mask = 0;
574         *send_mask = 0;
575         list_for_each(l, &sclp_reg_list) {
576                 t = list_entry(l, struct sclp_register, list);
577                 *receive_mask |= t->receive_mask;
578                 *send_mask |= t->send_mask;
579         }
580 }
581
582 /* Register event listener. Return 0 on success, non-zero otherwise. */
583 int
584 sclp_register(struct sclp_register *reg)
585 {
586         unsigned long flags;
587         sccb_mask_t receive_mask;
588         sccb_mask_t send_mask;
589         int rc;
590
591         rc = sclp_init();
592         if (rc)
593                 return rc;
594         spin_lock_irqsave(&sclp_lock, flags);
595         /* Check event mask for collisions */
596         __sclp_get_mask(&receive_mask, &send_mask);
597         if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) {
598                 spin_unlock_irqrestore(&sclp_lock, flags);
599                 return -EBUSY;
600         }
601         /* Trigger initial state change callback */
602         reg->sclp_receive_mask = 0;
603         reg->sclp_send_mask = 0;
604         reg->pm_event_posted = 0;
605         list_add(&reg->list, &sclp_reg_list);
606         spin_unlock_irqrestore(&sclp_lock, flags);
607         rc = sclp_init_mask(1);
608         if (rc) {
609                 spin_lock_irqsave(&sclp_lock, flags);
610                 list_del(&reg->list);
611                 spin_unlock_irqrestore(&sclp_lock, flags);
612         }
613         return rc;
614 }
615
616 EXPORT_SYMBOL(sclp_register);
617
618 /* Unregister event listener. */
619 void
620 sclp_unregister(struct sclp_register *reg)
621 {
622         unsigned long flags;
623
624         spin_lock_irqsave(&sclp_lock, flags);
625         list_del(&reg->list);
626         spin_unlock_irqrestore(&sclp_lock, flags);
627         sclp_init_mask(1);
628 }
629
630 EXPORT_SYMBOL(sclp_unregister);
631
632 /* Remove event buffers which are marked processed. Return the number of
633  * remaining event buffers. */
634 int
635 sclp_remove_processed(struct sccb_header *sccb)
636 {
637         struct evbuf_header *evbuf;
638         int unprocessed;
639         u16 remaining;
640
641         evbuf = (struct evbuf_header *) (sccb + 1);
642         unprocessed = 0;
643         remaining = sccb->length - sizeof(struct sccb_header);
644         while (remaining > 0) {
645                 remaining -= evbuf->length;
646                 if (evbuf->flags & 0x80) {
647                         sccb->length -= evbuf->length;
648                         memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length),
649                                remaining);
650                 } else {
651                         unprocessed++;
652                         evbuf = (struct evbuf_header *)
653                                         ((addr_t) evbuf + evbuf->length);
654                 }
655         }
656         return unprocessed;
657 }
658
659 EXPORT_SYMBOL(sclp_remove_processed);
660
661 struct init_sccb {
662         struct sccb_header header;
663         u16 _reserved;
664         u16 mask_length;
665         sccb_mask_t receive_mask;
666         sccb_mask_t send_mask;
667         sccb_mask_t sclp_receive_mask;
668         sccb_mask_t sclp_send_mask;
669 } __attribute__((packed));
670
671 /* Prepare init mask request. Called while sclp_lock is locked. */
672 static inline void
673 __sclp_make_init_req(u32 receive_mask, u32 send_mask)
674 {
675         struct init_sccb *sccb;
676
677         sccb = (struct init_sccb *) sclp_init_sccb;
678         clear_page(sccb);
679         memset(&sclp_init_req, 0, sizeof(struct sclp_req));
680         sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK;
681         sclp_init_req.status = SCLP_REQ_FILLED;
682         sclp_init_req.start_count = 0;
683         sclp_init_req.callback = NULL;
684         sclp_init_req.callback_data = NULL;
685         sclp_init_req.sccb = sccb;
686         sccb->header.length = sizeof(struct init_sccb);
687         sccb->mask_length = sizeof(sccb_mask_t);
688         sccb->receive_mask = receive_mask;
689         sccb->send_mask = send_mask;
690         sccb->sclp_receive_mask = 0;
691         sccb->sclp_send_mask = 0;
692 }
693
694 /* Start init mask request. If calculate is non-zero, calculate the mask as
695  * requested by registered listeners. Use zero mask otherwise. Return 0 on
696  * success, non-zero otherwise. */
697 static int
698 sclp_init_mask(int calculate)
699 {
700         unsigned long flags;
701         struct init_sccb *sccb = (struct init_sccb *) sclp_init_sccb;
702         sccb_mask_t receive_mask;
703         sccb_mask_t send_mask;
704         int retry;
705         int rc;
706         unsigned long wait;
707
708         spin_lock_irqsave(&sclp_lock, flags);
709         /* Check if interface is in appropriate state */
710         if (sclp_mask_state != sclp_mask_state_idle) {
711                 spin_unlock_irqrestore(&sclp_lock, flags);
712                 return -EBUSY;
713         }
714         if (sclp_activation_state == sclp_activation_state_inactive) {
715                 spin_unlock_irqrestore(&sclp_lock, flags);
716                 return -EINVAL;
717         }
718         sclp_mask_state = sclp_mask_state_initializing;
719         /* Determine mask */
720         if (calculate)
721                 __sclp_get_mask(&receive_mask, &send_mask);
722         else {
723                 receive_mask = 0;
724                 send_mask = 0;
725         }
726         rc = -EIO;
727         for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) {
728                 /* Prepare request */
729                 __sclp_make_init_req(receive_mask, send_mask);
730                 spin_unlock_irqrestore(&sclp_lock, flags);
731                 if (sclp_add_request(&sclp_init_req)) {
732                         /* Try again later */
733                         wait = jiffies + SCLP_BUSY_INTERVAL * HZ;
734                         while (time_before(jiffies, wait))
735                                 sclp_sync_wait();
736                         spin_lock_irqsave(&sclp_lock, flags);
737                         continue;
738                 }
739                 while (sclp_init_req.status != SCLP_REQ_DONE &&
740                        sclp_init_req.status != SCLP_REQ_FAILED)
741                         sclp_sync_wait();
742                 spin_lock_irqsave(&sclp_lock, flags);
743                 if (sclp_init_req.status == SCLP_REQ_DONE &&
744                     sccb->header.response_code == 0x20) {
745                         /* Successful request */
746                         if (calculate) {
747                                 sclp_receive_mask = sccb->sclp_receive_mask;
748                                 sclp_send_mask = sccb->sclp_send_mask;
749                         } else {
750                                 sclp_receive_mask = 0;
751                                 sclp_send_mask = 0;
752                         }
753                         spin_unlock_irqrestore(&sclp_lock, flags);
754                         sclp_dispatch_state_change();
755                         spin_lock_irqsave(&sclp_lock, flags);
756                         rc = 0;
757                         break;
758                 }
759         }
760         sclp_mask_state = sclp_mask_state_idle;
761         spin_unlock_irqrestore(&sclp_lock, flags);
762         return rc;
763 }
764
765 /* Deactivate SCLP interface. On success, new requests will be rejected,
766  * events will no longer be dispatched. Return 0 on success, non-zero
767  * otherwise. */
768 int
769 sclp_deactivate(void)
770 {
771         unsigned long flags;
772         int rc;
773
774         spin_lock_irqsave(&sclp_lock, flags);
775         /* Deactivate can only be called when active */
776         if (sclp_activation_state != sclp_activation_state_active) {
777                 spin_unlock_irqrestore(&sclp_lock, flags);
778                 return -EINVAL;
779         }
780         sclp_activation_state = sclp_activation_state_deactivating;
781         spin_unlock_irqrestore(&sclp_lock, flags);
782         rc = sclp_init_mask(0);
783         spin_lock_irqsave(&sclp_lock, flags);
784         if (rc == 0)
785                 sclp_activation_state = sclp_activation_state_inactive;
786         else
787                 sclp_activation_state = sclp_activation_state_active;
788         spin_unlock_irqrestore(&sclp_lock, flags);
789         return rc;
790 }
791
792 EXPORT_SYMBOL(sclp_deactivate);
793
794 /* Reactivate SCLP interface after sclp_deactivate. On success, new
795  * requests will be accepted, events will be dispatched again. Return 0 on
796  * success, non-zero otherwise. */
797 int
798 sclp_reactivate(void)
799 {
800         unsigned long flags;
801         int rc;
802
803         spin_lock_irqsave(&sclp_lock, flags);
804         /* Reactivate can only be called when inactive */
805         if (sclp_activation_state != sclp_activation_state_inactive) {
806                 spin_unlock_irqrestore(&sclp_lock, flags);
807                 return -EINVAL;
808         }
809         sclp_activation_state = sclp_activation_state_activating;
810         spin_unlock_irqrestore(&sclp_lock, flags);
811         rc = sclp_init_mask(1);
812         spin_lock_irqsave(&sclp_lock, flags);
813         if (rc == 0)
814                 sclp_activation_state = sclp_activation_state_active;
815         else
816                 sclp_activation_state = sclp_activation_state_inactive;
817         spin_unlock_irqrestore(&sclp_lock, flags);
818         return rc;
819 }
820
821 EXPORT_SYMBOL(sclp_reactivate);
822
823 /* Handler for external interruption used during initialization. Modify
824  * request state to done. */
825 static void sclp_check_handler(unsigned int ext_int_code,
826                                unsigned int param32, unsigned long param64)
827 {
828         u32 finished_sccb;
829
830         kstat_cpu(smp_processor_id()).irqs[EXTINT_SCP]++;
831         finished_sccb = param32 & 0xfffffff8;
832         /* Is this the interrupt we are waiting for? */
833         if (finished_sccb == 0)
834                 return;
835         if (finished_sccb != (u32) (addr_t) sclp_init_sccb)
836                 panic("sclp: unsolicited interrupt for buffer at 0x%x\n",
837                       finished_sccb);
838         spin_lock(&sclp_lock);
839         if (sclp_running_state == sclp_running_state_running) {
840                 sclp_init_req.status = SCLP_REQ_DONE;
841                 sclp_running_state = sclp_running_state_idle;
842         }
843         spin_unlock(&sclp_lock);
844 }
845
846 /* Initial init mask request timed out. Modify request state to failed. */
847 static void
848 sclp_check_timeout(unsigned long data)
849 {
850         unsigned long flags;
851
852         spin_lock_irqsave(&sclp_lock, flags);
853         if (sclp_running_state == sclp_running_state_running) {
854                 sclp_init_req.status = SCLP_REQ_FAILED;
855                 sclp_running_state = sclp_running_state_idle;
856         }
857         spin_unlock_irqrestore(&sclp_lock, flags);
858 }
859
860 /* Perform a check of the SCLP interface. Return zero if the interface is
861  * available and there are no pending requests from a previous instance.
862  * Return non-zero otherwise. */
863 static int
864 sclp_check_interface(void)
865 {
866         struct init_sccb *sccb;
867         unsigned long flags;
868         int retry;
869         int rc;
870
871         spin_lock_irqsave(&sclp_lock, flags);
872         /* Prepare init mask command */
873         rc = register_early_external_interrupt(0x2401, sclp_check_handler,
874                                                &ext_int_info_hwc);
875         if (rc) {
876                 spin_unlock_irqrestore(&sclp_lock, flags);
877                 return rc;
878         }
879         for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
880                 __sclp_make_init_req(0, 0);
881                 sccb = (struct init_sccb *) sclp_init_req.sccb;
882                 rc = sclp_service_call(sclp_init_req.command, sccb);
883                 if (rc == -EIO)
884                         break;
885                 sclp_init_req.status = SCLP_REQ_RUNNING;
886                 sclp_running_state = sclp_running_state_running;
887                 __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
888                                          sclp_check_timeout, 0);
889                 spin_unlock_irqrestore(&sclp_lock, flags);
890                 /* Enable service-signal interruption - needs to happen
891                  * with IRQs enabled. */
892                 ctl_set_bit(0, 9);
893                 /* Wait for signal from interrupt or timeout */
894                 sclp_sync_wait();
895                 /* Disable service-signal interruption - needs to happen
896                  * with IRQs enabled. */
897                 ctl_clear_bit(0,9);
898                 spin_lock_irqsave(&sclp_lock, flags);
899                 del_timer(&sclp_request_timer);
900                 if (sclp_init_req.status == SCLP_REQ_DONE &&
901                     sccb->header.response_code == 0x20) {
902                         rc = 0;
903                         break;
904                 } else
905                         rc = -EBUSY;
906         }
907         unregister_early_external_interrupt(0x2401, sclp_check_handler,
908                                             &ext_int_info_hwc);
909         spin_unlock_irqrestore(&sclp_lock, flags);
910         return rc;
911 }
912
913 /* Reboot event handler. Reset send and receive mask to prevent pending SCLP
914  * events from interfering with rebooted system. */
915 static int
916 sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
917 {
918         sclp_deactivate();
919         return NOTIFY_DONE;
920 }
921
922 static struct notifier_block sclp_reboot_notifier = {
923         .notifier_call = sclp_reboot_event
924 };
925
926 /*
927  * Suspend/resume SCLP notifier implementation
928  */
929
930 static void sclp_pm_event(enum sclp_pm_event sclp_pm_event, int rollback)
931 {
932         struct sclp_register *reg;
933         unsigned long flags;
934
935         if (!rollback) {
936                 spin_lock_irqsave(&sclp_lock, flags);
937                 list_for_each_entry(reg, &sclp_reg_list, list)
938                         reg->pm_event_posted = 0;
939                 spin_unlock_irqrestore(&sclp_lock, flags);
940         }
941         do {
942                 spin_lock_irqsave(&sclp_lock, flags);
943                 list_for_each_entry(reg, &sclp_reg_list, list) {
944                         if (rollback && reg->pm_event_posted)
945                                 goto found;
946                         if (!rollback && !reg->pm_event_posted)
947                                 goto found;
948                 }
949                 spin_unlock_irqrestore(&sclp_lock, flags);
950                 return;
951 found:
952                 spin_unlock_irqrestore(&sclp_lock, flags);
953                 if (reg->pm_event_fn)
954                         reg->pm_event_fn(reg, sclp_pm_event);
955                 reg->pm_event_posted = rollback ? 0 : 1;
956         } while (1);
957 }
958
959 /*
960  * Susend/resume callbacks for platform device
961  */
962
963 static int sclp_freeze(struct device *dev)
964 {
965         unsigned long flags;
966         int rc;
967
968         sclp_pm_event(SCLP_PM_EVENT_FREEZE, 0);
969
970         spin_lock_irqsave(&sclp_lock, flags);
971         sclp_suspend_state = sclp_suspend_state_suspended;
972         spin_unlock_irqrestore(&sclp_lock, flags);
973
974         /* Init supend data */
975         memset(&sclp_suspend_req, 0, sizeof(sclp_suspend_req));
976         sclp_suspend_req.callback = sclp_suspend_req_cb;
977         sclp_suspend_req.status = SCLP_REQ_FILLED;
978         init_completion(&sclp_request_queue_flushed);
979
980         rc = sclp_add_request(&sclp_suspend_req);
981         if (rc == 0)
982                 wait_for_completion(&sclp_request_queue_flushed);
983         else if (rc != -ENODATA)
984                 goto fail_thaw;
985
986         rc = sclp_deactivate();
987         if (rc)
988                 goto fail_thaw;
989         return 0;
990
991 fail_thaw:
992         spin_lock_irqsave(&sclp_lock, flags);
993         sclp_suspend_state = sclp_suspend_state_running;
994         spin_unlock_irqrestore(&sclp_lock, flags);
995         sclp_pm_event(SCLP_PM_EVENT_THAW, 1);
996         return rc;
997 }
998
999 static int sclp_undo_suspend(enum sclp_pm_event event)
1000 {
1001         unsigned long flags;
1002         int rc;
1003
1004         rc = sclp_reactivate();
1005         if (rc)
1006                 return rc;
1007
1008         spin_lock_irqsave(&sclp_lock, flags);
1009         sclp_suspend_state = sclp_suspend_state_running;
1010         spin_unlock_irqrestore(&sclp_lock, flags);
1011
1012         sclp_pm_event(event, 0);
1013         return 0;
1014 }
1015
1016 static int sclp_thaw(struct device *dev)
1017 {
1018         return sclp_undo_suspend(SCLP_PM_EVENT_THAW);
1019 }
1020
1021 static int sclp_restore(struct device *dev)
1022 {
1023         return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE);
1024 }
1025
1026 static const struct dev_pm_ops sclp_pm_ops = {
1027         .freeze         = sclp_freeze,
1028         .thaw           = sclp_thaw,
1029         .restore        = sclp_restore,
1030 };
1031
1032 static struct platform_driver sclp_pdrv = {
1033         .driver = {
1034                 .name   = "sclp",
1035                 .owner  = THIS_MODULE,
1036                 .pm     = &sclp_pm_ops,
1037         },
1038 };
1039
1040 static struct platform_device *sclp_pdev;
1041
1042 /* Initialize SCLP driver. Return zero if driver is operational, non-zero
1043  * otherwise. */
1044 static int
1045 sclp_init(void)
1046 {
1047         unsigned long flags;
1048         int rc = 0;
1049
1050         spin_lock_irqsave(&sclp_lock, flags);
1051         /* Check for previous or running initialization */
1052         if (sclp_init_state != sclp_init_state_uninitialized)
1053                 goto fail_unlock;
1054         sclp_init_state = sclp_init_state_initializing;
1055         /* Set up variables */
1056         INIT_LIST_HEAD(&sclp_req_queue);
1057         INIT_LIST_HEAD(&sclp_reg_list);
1058         list_add(&sclp_state_change_event.list, &sclp_reg_list);
1059         init_timer(&sclp_request_timer);
1060         /* Check interface */
1061         spin_unlock_irqrestore(&sclp_lock, flags);
1062         rc = sclp_check_interface();
1063         spin_lock_irqsave(&sclp_lock, flags);
1064         if (rc)
1065                 goto fail_init_state_uninitialized;
1066         /* Register reboot handler */
1067         rc = register_reboot_notifier(&sclp_reboot_notifier);
1068         if (rc)
1069                 goto fail_init_state_uninitialized;
1070         /* Register interrupt handler */
1071         rc = register_early_external_interrupt(0x2401, sclp_interrupt_handler,
1072                                                &ext_int_info_hwc);
1073         if (rc)
1074                 goto fail_unregister_reboot_notifier;
1075         sclp_init_state = sclp_init_state_initialized;
1076         spin_unlock_irqrestore(&sclp_lock, flags);
1077         /* Enable service-signal external interruption - needs to happen with
1078          * IRQs enabled. */
1079         ctl_set_bit(0, 9);
1080         sclp_init_mask(1);
1081         return 0;
1082
1083 fail_unregister_reboot_notifier:
1084         unregister_reboot_notifier(&sclp_reboot_notifier);
1085 fail_init_state_uninitialized:
1086         sclp_init_state = sclp_init_state_uninitialized;
1087 fail_unlock:
1088         spin_unlock_irqrestore(&sclp_lock, flags);
1089         return rc;
1090 }
1091
1092 /*
1093  * SCLP panic notifier: If we are suspended, we thaw SCLP in order to be able
1094  * to print the panic message.
1095  */
1096 static int sclp_panic_notify(struct notifier_block *self,
1097                              unsigned long event, void *data)
1098 {
1099         if (sclp_suspend_state == sclp_suspend_state_suspended)
1100                 sclp_undo_suspend(SCLP_PM_EVENT_THAW);
1101         return NOTIFY_OK;
1102 }
1103
1104 static struct notifier_block sclp_on_panic_nb = {
1105         .notifier_call = sclp_panic_notify,
1106         .priority = SCLP_PANIC_PRIO,
1107 };
1108
1109 static __init int sclp_initcall(void)
1110 {
1111         int rc;
1112
1113         rc = platform_driver_register(&sclp_pdrv);
1114         if (rc)
1115                 return rc;
1116         sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0);
1117         rc = IS_ERR(sclp_pdev) ? PTR_ERR(sclp_pdev) : 0;
1118         if (rc)
1119                 goto fail_platform_driver_unregister;
1120         rc = atomic_notifier_chain_register(&panic_notifier_list,
1121                                             &sclp_on_panic_nb);
1122         if (rc)
1123                 goto fail_platform_device_unregister;
1124
1125         return sclp_init();
1126
1127 fail_platform_device_unregister:
1128         platform_device_unregister(sclp_pdev);
1129 fail_platform_driver_unregister:
1130         platform_driver_unregister(&sclp_pdrv);
1131         return rc;
1132 }
1133
1134 arch_initcall(sclp_initcall);