2 * core function to access sclp interface
4 * Copyright IBM Corp. 1999, 2009
6 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
7 * Martin Schwidefsky <schwidefsky@de.ibm.com>
10 #include <linux/kernel_stat.h>
11 #include <linux/module.h>
12 #include <linux/err.h>
13 #include <linux/spinlock.h>
14 #include <linux/interrupt.h>
15 #include <linux/timer.h>
16 #include <linux/reboot.h>
17 #include <linux/jiffies.h>
18 #include <linux/init.h>
19 #include <linux/suspend.h>
20 #include <linux/completion.h>
21 #include <linux/platform_device.h>
22 #include <asm/s390_ext.h>
23 #include <asm/types.h>
28 #define SCLP_HEADER "sclp: "
30 /* Structure for register_early_external_interrupt. */
31 static ext_int_info_t ext_int_info_hwc;
33 /* Lock to protect internal data consistency. */
34 static DEFINE_SPINLOCK(sclp_lock);
36 /* Mask of events that we can send to the sclp interface. */
37 static sccb_mask_t sclp_receive_mask;
39 /* Mask of events that we can receive from the sclp interface. */
40 static sccb_mask_t sclp_send_mask;
42 /* List of registered event listeners and senders. */
43 static struct list_head sclp_reg_list;
45 /* List of queued requests. */
46 static struct list_head sclp_req_queue;
48 /* Data for read and and init requests. */
49 static struct sclp_req sclp_read_req;
50 static struct sclp_req sclp_init_req;
51 static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
52 static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
55 static DECLARE_COMPLETION(sclp_request_queue_flushed);
57 static void sclp_suspend_req_cb(struct sclp_req *req, void *data)
59 complete(&sclp_request_queue_flushed);
62 static struct sclp_req sclp_suspend_req;
64 /* Timer for request retries. */
65 static struct timer_list sclp_request_timer;
67 /* Internal state: is the driver initialized? */
68 static volatile enum sclp_init_state_t {
69 sclp_init_state_uninitialized,
70 sclp_init_state_initializing,
71 sclp_init_state_initialized
72 } sclp_init_state = sclp_init_state_uninitialized;
74 /* Internal state: is a request active at the sclp? */
75 static volatile enum sclp_running_state_t {
76 sclp_running_state_idle,
77 sclp_running_state_running,
78 sclp_running_state_reset_pending
79 } sclp_running_state = sclp_running_state_idle;
81 /* Internal state: is a read request pending? */
82 static volatile enum sclp_reading_state_t {
83 sclp_reading_state_idle,
84 sclp_reading_state_reading
85 } sclp_reading_state = sclp_reading_state_idle;
87 /* Internal state: is the driver currently serving requests? */
88 static volatile enum sclp_activation_state_t {
89 sclp_activation_state_active,
90 sclp_activation_state_deactivating,
91 sclp_activation_state_inactive,
92 sclp_activation_state_activating
93 } sclp_activation_state = sclp_activation_state_active;
95 /* Internal state: is an init mask request pending? */
96 static volatile enum sclp_mask_state_t {
98 sclp_mask_state_initializing
99 } sclp_mask_state = sclp_mask_state_idle;
101 /* Internal state: is the driver suspended? */
102 static enum sclp_suspend_state_t {
103 sclp_suspend_state_running,
104 sclp_suspend_state_suspended,
105 } sclp_suspend_state = sclp_suspend_state_running;
107 /* Maximum retry counts */
108 #define SCLP_INIT_RETRY 3
109 #define SCLP_MASK_RETRY 3
111 /* Timeout intervals in seconds.*/
112 #define SCLP_BUSY_INTERVAL 10
113 #define SCLP_RETRY_INTERVAL 30
115 static void sclp_process_queue(void);
116 static void __sclp_make_read_req(void);
117 static int sclp_init_mask(int calculate);
118 static int sclp_init(void);
120 /* Perform service call. Return 0 on success, non-zero otherwise. */
122 sclp_service_call(sclp_cmdw_t command, void *sccb)
127 " .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */
130 : "=&d" (cc) : "d" (command), "a" (__pa(sccb))
141 __sclp_queue_read_req(void)
143 if (sclp_reading_state == sclp_reading_state_idle) {
144 sclp_reading_state = sclp_reading_state_reading;
145 __sclp_make_read_req();
146 /* Add request to head of queue */
147 list_add(&sclp_read_req.list, &sclp_req_queue);
151 /* Set up request retry timer. Called while sclp_lock is locked. */
153 __sclp_set_request_timer(unsigned long time, void (*function)(unsigned long),
156 del_timer(&sclp_request_timer);
157 sclp_request_timer.function = function;
158 sclp_request_timer.data = data;
159 sclp_request_timer.expires = jiffies + time;
160 add_timer(&sclp_request_timer);
163 /* Request timeout handler. Restart the request queue. If DATA is non-zero,
164 * force restart of running request. */
166 sclp_request_timeout(unsigned long data)
170 spin_lock_irqsave(&sclp_lock, flags);
172 if (sclp_running_state == sclp_running_state_running) {
173 /* Break running state and queue NOP read event request
174 * to get a defined interface state. */
175 __sclp_queue_read_req();
176 sclp_running_state = sclp_running_state_idle;
179 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
180 sclp_request_timeout, 0);
182 spin_unlock_irqrestore(&sclp_lock, flags);
183 sclp_process_queue();
186 /* Try to start a request. Return zero if the request was successfully
187 * started or if it will be started at a later time. Return non-zero otherwise.
188 * Called while sclp_lock is locked. */
190 __sclp_start_request(struct sclp_req *req)
194 if (sclp_running_state != sclp_running_state_idle)
196 del_timer(&sclp_request_timer);
197 rc = sclp_service_call(req->command, req->sccb);
201 /* Successfully started request */
202 req->status = SCLP_REQ_RUNNING;
203 sclp_running_state = sclp_running_state_running;
204 __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
205 sclp_request_timeout, 1);
207 } else if (rc == -EBUSY) {
208 /* Try again later */
209 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
210 sclp_request_timeout, 0);
214 req->status = SCLP_REQ_FAILED;
218 /* Try to start queued requests. */
220 sclp_process_queue(void)
222 struct sclp_req *req;
226 spin_lock_irqsave(&sclp_lock, flags);
227 if (sclp_running_state != sclp_running_state_idle) {
228 spin_unlock_irqrestore(&sclp_lock, flags);
231 del_timer(&sclp_request_timer);
232 while (!list_empty(&sclp_req_queue)) {
233 req = list_entry(sclp_req_queue.next, struct sclp_req, list);
236 rc = __sclp_start_request(req);
240 if (req->start_count > 1) {
241 /* Cannot abort already submitted request - could still
242 * be active at the SCLP */
243 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
244 sclp_request_timeout, 0);
248 /* Post-processing for aborted request */
249 list_del(&req->list);
251 spin_unlock_irqrestore(&sclp_lock, flags);
252 req->callback(req, req->callback_data);
253 spin_lock_irqsave(&sclp_lock, flags);
256 spin_unlock_irqrestore(&sclp_lock, flags);
259 static int __sclp_can_add_request(struct sclp_req *req)
261 if (req == &sclp_suspend_req || req == &sclp_init_req)
263 if (sclp_suspend_state != sclp_suspend_state_running)
265 if (sclp_init_state != sclp_init_state_initialized)
267 if (sclp_activation_state != sclp_activation_state_active)
272 /* Queue a new request. Return zero on success, non-zero otherwise. */
274 sclp_add_request(struct sclp_req *req)
279 spin_lock_irqsave(&sclp_lock, flags);
280 if (!__sclp_can_add_request(req)) {
281 spin_unlock_irqrestore(&sclp_lock, flags);
284 req->status = SCLP_REQ_QUEUED;
285 req->start_count = 0;
286 list_add_tail(&req->list, &sclp_req_queue);
288 /* Start if request is first in list */
289 if (sclp_running_state == sclp_running_state_idle &&
290 req->list.prev == &sclp_req_queue) {
292 list_del(&req->list);
296 rc = __sclp_start_request(req);
298 list_del(&req->list);
301 spin_unlock_irqrestore(&sclp_lock, flags);
305 EXPORT_SYMBOL(sclp_add_request);
307 /* Dispatch events found in request buffer to registered listeners. Return 0
308 * if all events were dispatched, non-zero otherwise. */
310 sclp_dispatch_evbufs(struct sccb_header *sccb)
313 struct evbuf_header *evbuf;
315 struct sclp_register *reg;
319 spin_lock_irqsave(&sclp_lock, flags);
321 for (offset = sizeof(struct sccb_header); offset < sccb->length;
322 offset += evbuf->length) {
323 evbuf = (struct evbuf_header *) ((addr_t) sccb + offset);
324 /* Check for malformed hardware response */
325 if (evbuf->length == 0)
327 /* Search for event handler */
329 list_for_each(l, &sclp_reg_list) {
330 reg = list_entry(l, struct sclp_register, list);
331 if (reg->receive_mask & (1 << (32 - evbuf->type)))
336 if (reg && reg->receiver_fn) {
337 spin_unlock_irqrestore(&sclp_lock, flags);
338 reg->receiver_fn(evbuf);
339 spin_lock_irqsave(&sclp_lock, flags);
340 } else if (reg == NULL)
343 spin_unlock_irqrestore(&sclp_lock, flags);
347 /* Read event data request callback. */
349 sclp_read_cb(struct sclp_req *req, void *data)
352 struct sccb_header *sccb;
354 sccb = (struct sccb_header *) req->sccb;
355 if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 ||
356 sccb->response_code == 0x220))
357 sclp_dispatch_evbufs(sccb);
358 spin_lock_irqsave(&sclp_lock, flags);
359 sclp_reading_state = sclp_reading_state_idle;
360 spin_unlock_irqrestore(&sclp_lock, flags);
363 /* Prepare read event data request. Called while sclp_lock is locked. */
364 static void __sclp_make_read_req(void)
366 struct sccb_header *sccb;
368 sccb = (struct sccb_header *) sclp_read_sccb;
370 memset(&sclp_read_req, 0, sizeof(struct sclp_req));
371 sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA;
372 sclp_read_req.status = SCLP_REQ_QUEUED;
373 sclp_read_req.start_count = 0;
374 sclp_read_req.callback = sclp_read_cb;
375 sclp_read_req.sccb = sccb;
376 sccb->length = PAGE_SIZE;
377 sccb->function_code = 0;
378 sccb->control_mask[2] = 0x80;
381 /* Search request list for request with matching sccb. Return request if found,
382 * NULL otherwise. Called while sclp_lock is locked. */
383 static inline struct sclp_req *
384 __sclp_find_req(u32 sccb)
387 struct sclp_req *req;
389 list_for_each(l, &sclp_req_queue) {
390 req = list_entry(l, struct sclp_req, list);
391 if (sccb == (u32) (addr_t) req->sccb)
397 /* Handler for external interruption. Perform request post-processing.
398 * Prepare read event data request if necessary. Start processing of next
399 * request on queue. */
400 static void sclp_interrupt_handler(unsigned int ext_int_code,
401 unsigned int param32, unsigned long param64)
403 struct sclp_req *req;
407 kstat_cpu(smp_processor_id()).irqs[EXTINT_SCP]++;
408 spin_lock(&sclp_lock);
409 finished_sccb = param32 & 0xfffffff8;
410 evbuf_pending = param32 & 0x3;
412 del_timer(&sclp_request_timer);
413 sclp_running_state = sclp_running_state_reset_pending;
414 req = __sclp_find_req(finished_sccb);
416 /* Request post-processing */
417 list_del(&req->list);
418 req->status = SCLP_REQ_DONE;
420 spin_unlock(&sclp_lock);
421 req->callback(req, req->callback_data);
422 spin_lock(&sclp_lock);
425 sclp_running_state = sclp_running_state_idle;
428 sclp_activation_state == sclp_activation_state_active)
429 __sclp_queue_read_req();
430 spin_unlock(&sclp_lock);
431 sclp_process_queue();
434 /* Convert interval in jiffies to TOD ticks. */
436 sclp_tod_from_jiffies(unsigned long jiffies)
438 return (u64) (jiffies / HZ) << 32;
441 /* Wait until a currently running request finished. Note: while this function
442 * is running, no timers are served on the calling CPU. */
446 unsigned long long old_tick;
448 unsigned long cr0, cr0_sync;
452 /* We'll be disabling timer interrupts, so we need a custom timeout
455 if (timer_pending(&sclp_request_timer)) {
456 /* Get timeout TOD value */
457 timeout = get_clock() +
458 sclp_tod_from_jiffies(sclp_request_timer.expires -
461 local_irq_save(flags);
462 /* Prevent bottom half from executing once we force interrupts open */
463 irq_context = in_interrupt();
466 /* Enable service-signal interruption, disable timer interrupts */
467 old_tick = local_tick_disable();
469 __ctl_store(cr0, 0, 0);
471 cr0_sync &= 0xffff00a0;
472 cr0_sync |= 0x00000200;
473 __ctl_load(cr0_sync, 0, 0);
474 __arch_local_irq_stosm(0x01);
475 /* Loop until driver state indicates finished request */
476 while (sclp_running_state != sclp_running_state_idle) {
477 /* Check for expired request timer */
478 if (timer_pending(&sclp_request_timer) &&
479 get_clock() > timeout &&
480 del_timer(&sclp_request_timer))
481 sclp_request_timer.function(sclp_request_timer.data);
485 __ctl_load(cr0, 0, 0);
488 local_tick_enable(old_tick);
489 local_irq_restore(flags);
491 EXPORT_SYMBOL(sclp_sync_wait);
493 /* Dispatch changes in send and receive mask to registered listeners. */
495 sclp_dispatch_state_change(void)
498 struct sclp_register *reg;
500 sccb_mask_t receive_mask;
501 sccb_mask_t send_mask;
504 spin_lock_irqsave(&sclp_lock, flags);
506 list_for_each(l, &sclp_reg_list) {
507 reg = list_entry(l, struct sclp_register, list);
508 receive_mask = reg->send_mask & sclp_receive_mask;
509 send_mask = reg->receive_mask & sclp_send_mask;
510 if (reg->sclp_receive_mask != receive_mask ||
511 reg->sclp_send_mask != send_mask) {
512 reg->sclp_receive_mask = receive_mask;
513 reg->sclp_send_mask = send_mask;
518 spin_unlock_irqrestore(&sclp_lock, flags);
519 if (reg && reg->state_change_fn)
520 reg->state_change_fn(reg);
524 struct sclp_statechangebuf {
525 struct evbuf_header header;
526 u8 validity_sclp_active_facility_mask : 1;
527 u8 validity_sclp_receive_mask : 1;
528 u8 validity_sclp_send_mask : 1;
529 u8 validity_read_data_function_mask : 1;
532 u64 sclp_active_facility_mask;
533 sccb_mask_t sclp_receive_mask;
534 sccb_mask_t sclp_send_mask;
535 u32 read_data_function_mask;
536 } __attribute__((packed));
539 /* State change event callback. Inform listeners of changes. */
541 sclp_state_change_cb(struct evbuf_header *evbuf)
544 struct sclp_statechangebuf *scbuf;
546 scbuf = (struct sclp_statechangebuf *) evbuf;
547 if (scbuf->mask_length != sizeof(sccb_mask_t))
549 spin_lock_irqsave(&sclp_lock, flags);
550 if (scbuf->validity_sclp_receive_mask)
551 sclp_receive_mask = scbuf->sclp_receive_mask;
552 if (scbuf->validity_sclp_send_mask)
553 sclp_send_mask = scbuf->sclp_send_mask;
554 spin_unlock_irqrestore(&sclp_lock, flags);
555 if (scbuf->validity_sclp_active_facility_mask)
556 sclp_facilities = scbuf->sclp_active_facility_mask;
557 sclp_dispatch_state_change();
560 static struct sclp_register sclp_state_change_event = {
561 .receive_mask = EVTYP_STATECHANGE_MASK,
562 .receiver_fn = sclp_state_change_cb
565 /* Calculate receive and send mask of currently registered listeners.
566 * Called while sclp_lock is locked. */
568 __sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask)
571 struct sclp_register *t;
575 list_for_each(l, &sclp_reg_list) {
576 t = list_entry(l, struct sclp_register, list);
577 *receive_mask |= t->receive_mask;
578 *send_mask |= t->send_mask;
582 /* Register event listener. Return 0 on success, non-zero otherwise. */
584 sclp_register(struct sclp_register *reg)
587 sccb_mask_t receive_mask;
588 sccb_mask_t send_mask;
594 spin_lock_irqsave(&sclp_lock, flags);
595 /* Check event mask for collisions */
596 __sclp_get_mask(&receive_mask, &send_mask);
597 if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) {
598 spin_unlock_irqrestore(&sclp_lock, flags);
601 /* Trigger initial state change callback */
602 reg->sclp_receive_mask = 0;
603 reg->sclp_send_mask = 0;
604 reg->pm_event_posted = 0;
605 list_add(®->list, &sclp_reg_list);
606 spin_unlock_irqrestore(&sclp_lock, flags);
607 rc = sclp_init_mask(1);
609 spin_lock_irqsave(&sclp_lock, flags);
610 list_del(®->list);
611 spin_unlock_irqrestore(&sclp_lock, flags);
616 EXPORT_SYMBOL(sclp_register);
618 /* Unregister event listener. */
620 sclp_unregister(struct sclp_register *reg)
624 spin_lock_irqsave(&sclp_lock, flags);
625 list_del(®->list);
626 spin_unlock_irqrestore(&sclp_lock, flags);
630 EXPORT_SYMBOL(sclp_unregister);
632 /* Remove event buffers which are marked processed. Return the number of
633 * remaining event buffers. */
635 sclp_remove_processed(struct sccb_header *sccb)
637 struct evbuf_header *evbuf;
641 evbuf = (struct evbuf_header *) (sccb + 1);
643 remaining = sccb->length - sizeof(struct sccb_header);
644 while (remaining > 0) {
645 remaining -= evbuf->length;
646 if (evbuf->flags & 0x80) {
647 sccb->length -= evbuf->length;
648 memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length),
652 evbuf = (struct evbuf_header *)
653 ((addr_t) evbuf + evbuf->length);
659 EXPORT_SYMBOL(sclp_remove_processed);
662 struct sccb_header header;
665 sccb_mask_t receive_mask;
666 sccb_mask_t send_mask;
667 sccb_mask_t sclp_receive_mask;
668 sccb_mask_t sclp_send_mask;
669 } __attribute__((packed));
671 /* Prepare init mask request. Called while sclp_lock is locked. */
673 __sclp_make_init_req(u32 receive_mask, u32 send_mask)
675 struct init_sccb *sccb;
677 sccb = (struct init_sccb *) sclp_init_sccb;
679 memset(&sclp_init_req, 0, sizeof(struct sclp_req));
680 sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK;
681 sclp_init_req.status = SCLP_REQ_FILLED;
682 sclp_init_req.start_count = 0;
683 sclp_init_req.callback = NULL;
684 sclp_init_req.callback_data = NULL;
685 sclp_init_req.sccb = sccb;
686 sccb->header.length = sizeof(struct init_sccb);
687 sccb->mask_length = sizeof(sccb_mask_t);
688 sccb->receive_mask = receive_mask;
689 sccb->send_mask = send_mask;
690 sccb->sclp_receive_mask = 0;
691 sccb->sclp_send_mask = 0;
694 /* Start init mask request. If calculate is non-zero, calculate the mask as
695 * requested by registered listeners. Use zero mask otherwise. Return 0 on
696 * success, non-zero otherwise. */
698 sclp_init_mask(int calculate)
701 struct init_sccb *sccb = (struct init_sccb *) sclp_init_sccb;
702 sccb_mask_t receive_mask;
703 sccb_mask_t send_mask;
708 spin_lock_irqsave(&sclp_lock, flags);
709 /* Check if interface is in appropriate state */
710 if (sclp_mask_state != sclp_mask_state_idle) {
711 spin_unlock_irqrestore(&sclp_lock, flags);
714 if (sclp_activation_state == sclp_activation_state_inactive) {
715 spin_unlock_irqrestore(&sclp_lock, flags);
718 sclp_mask_state = sclp_mask_state_initializing;
721 __sclp_get_mask(&receive_mask, &send_mask);
727 for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) {
728 /* Prepare request */
729 __sclp_make_init_req(receive_mask, send_mask);
730 spin_unlock_irqrestore(&sclp_lock, flags);
731 if (sclp_add_request(&sclp_init_req)) {
732 /* Try again later */
733 wait = jiffies + SCLP_BUSY_INTERVAL * HZ;
734 while (time_before(jiffies, wait))
736 spin_lock_irqsave(&sclp_lock, flags);
739 while (sclp_init_req.status != SCLP_REQ_DONE &&
740 sclp_init_req.status != SCLP_REQ_FAILED)
742 spin_lock_irqsave(&sclp_lock, flags);
743 if (sclp_init_req.status == SCLP_REQ_DONE &&
744 sccb->header.response_code == 0x20) {
745 /* Successful request */
747 sclp_receive_mask = sccb->sclp_receive_mask;
748 sclp_send_mask = sccb->sclp_send_mask;
750 sclp_receive_mask = 0;
753 spin_unlock_irqrestore(&sclp_lock, flags);
754 sclp_dispatch_state_change();
755 spin_lock_irqsave(&sclp_lock, flags);
760 sclp_mask_state = sclp_mask_state_idle;
761 spin_unlock_irqrestore(&sclp_lock, flags);
765 /* Deactivate SCLP interface. On success, new requests will be rejected,
766 * events will no longer be dispatched. Return 0 on success, non-zero
769 sclp_deactivate(void)
774 spin_lock_irqsave(&sclp_lock, flags);
775 /* Deactivate can only be called when active */
776 if (sclp_activation_state != sclp_activation_state_active) {
777 spin_unlock_irqrestore(&sclp_lock, flags);
780 sclp_activation_state = sclp_activation_state_deactivating;
781 spin_unlock_irqrestore(&sclp_lock, flags);
782 rc = sclp_init_mask(0);
783 spin_lock_irqsave(&sclp_lock, flags);
785 sclp_activation_state = sclp_activation_state_inactive;
787 sclp_activation_state = sclp_activation_state_active;
788 spin_unlock_irqrestore(&sclp_lock, flags);
792 EXPORT_SYMBOL(sclp_deactivate);
794 /* Reactivate SCLP interface after sclp_deactivate. On success, new
795 * requests will be accepted, events will be dispatched again. Return 0 on
796 * success, non-zero otherwise. */
798 sclp_reactivate(void)
803 spin_lock_irqsave(&sclp_lock, flags);
804 /* Reactivate can only be called when inactive */
805 if (sclp_activation_state != sclp_activation_state_inactive) {
806 spin_unlock_irqrestore(&sclp_lock, flags);
809 sclp_activation_state = sclp_activation_state_activating;
810 spin_unlock_irqrestore(&sclp_lock, flags);
811 rc = sclp_init_mask(1);
812 spin_lock_irqsave(&sclp_lock, flags);
814 sclp_activation_state = sclp_activation_state_active;
816 sclp_activation_state = sclp_activation_state_inactive;
817 spin_unlock_irqrestore(&sclp_lock, flags);
821 EXPORT_SYMBOL(sclp_reactivate);
823 /* Handler for external interruption used during initialization. Modify
824 * request state to done. */
825 static void sclp_check_handler(unsigned int ext_int_code,
826 unsigned int param32, unsigned long param64)
830 kstat_cpu(smp_processor_id()).irqs[EXTINT_SCP]++;
831 finished_sccb = param32 & 0xfffffff8;
832 /* Is this the interrupt we are waiting for? */
833 if (finished_sccb == 0)
835 if (finished_sccb != (u32) (addr_t) sclp_init_sccb)
836 panic("sclp: unsolicited interrupt for buffer at 0x%x\n",
838 spin_lock(&sclp_lock);
839 if (sclp_running_state == sclp_running_state_running) {
840 sclp_init_req.status = SCLP_REQ_DONE;
841 sclp_running_state = sclp_running_state_idle;
843 spin_unlock(&sclp_lock);
846 /* Initial init mask request timed out. Modify request state to failed. */
848 sclp_check_timeout(unsigned long data)
852 spin_lock_irqsave(&sclp_lock, flags);
853 if (sclp_running_state == sclp_running_state_running) {
854 sclp_init_req.status = SCLP_REQ_FAILED;
855 sclp_running_state = sclp_running_state_idle;
857 spin_unlock_irqrestore(&sclp_lock, flags);
860 /* Perform a check of the SCLP interface. Return zero if the interface is
861 * available and there are no pending requests from a previous instance.
862 * Return non-zero otherwise. */
864 sclp_check_interface(void)
866 struct init_sccb *sccb;
871 spin_lock_irqsave(&sclp_lock, flags);
872 /* Prepare init mask command */
873 rc = register_early_external_interrupt(0x2401, sclp_check_handler,
876 spin_unlock_irqrestore(&sclp_lock, flags);
879 for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
880 __sclp_make_init_req(0, 0);
881 sccb = (struct init_sccb *) sclp_init_req.sccb;
882 rc = sclp_service_call(sclp_init_req.command, sccb);
885 sclp_init_req.status = SCLP_REQ_RUNNING;
886 sclp_running_state = sclp_running_state_running;
887 __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
888 sclp_check_timeout, 0);
889 spin_unlock_irqrestore(&sclp_lock, flags);
890 /* Enable service-signal interruption - needs to happen
891 * with IRQs enabled. */
893 /* Wait for signal from interrupt or timeout */
895 /* Disable service-signal interruption - needs to happen
896 * with IRQs enabled. */
898 spin_lock_irqsave(&sclp_lock, flags);
899 del_timer(&sclp_request_timer);
900 if (sclp_init_req.status == SCLP_REQ_DONE &&
901 sccb->header.response_code == 0x20) {
907 unregister_early_external_interrupt(0x2401, sclp_check_handler,
909 spin_unlock_irqrestore(&sclp_lock, flags);
913 /* Reboot event handler. Reset send and receive mask to prevent pending SCLP
914 * events from interfering with rebooted system. */
916 sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
922 static struct notifier_block sclp_reboot_notifier = {
923 .notifier_call = sclp_reboot_event
927 * Suspend/resume SCLP notifier implementation
930 static void sclp_pm_event(enum sclp_pm_event sclp_pm_event, int rollback)
932 struct sclp_register *reg;
936 spin_lock_irqsave(&sclp_lock, flags);
937 list_for_each_entry(reg, &sclp_reg_list, list)
938 reg->pm_event_posted = 0;
939 spin_unlock_irqrestore(&sclp_lock, flags);
942 spin_lock_irqsave(&sclp_lock, flags);
943 list_for_each_entry(reg, &sclp_reg_list, list) {
944 if (rollback && reg->pm_event_posted)
946 if (!rollback && !reg->pm_event_posted)
949 spin_unlock_irqrestore(&sclp_lock, flags);
952 spin_unlock_irqrestore(&sclp_lock, flags);
953 if (reg->pm_event_fn)
954 reg->pm_event_fn(reg, sclp_pm_event);
955 reg->pm_event_posted = rollback ? 0 : 1;
960 * Susend/resume callbacks for platform device
963 static int sclp_freeze(struct device *dev)
968 sclp_pm_event(SCLP_PM_EVENT_FREEZE, 0);
970 spin_lock_irqsave(&sclp_lock, flags);
971 sclp_suspend_state = sclp_suspend_state_suspended;
972 spin_unlock_irqrestore(&sclp_lock, flags);
974 /* Init supend data */
975 memset(&sclp_suspend_req, 0, sizeof(sclp_suspend_req));
976 sclp_suspend_req.callback = sclp_suspend_req_cb;
977 sclp_suspend_req.status = SCLP_REQ_FILLED;
978 init_completion(&sclp_request_queue_flushed);
980 rc = sclp_add_request(&sclp_suspend_req);
982 wait_for_completion(&sclp_request_queue_flushed);
983 else if (rc != -ENODATA)
986 rc = sclp_deactivate();
992 spin_lock_irqsave(&sclp_lock, flags);
993 sclp_suspend_state = sclp_suspend_state_running;
994 spin_unlock_irqrestore(&sclp_lock, flags);
995 sclp_pm_event(SCLP_PM_EVENT_THAW, 1);
999 static int sclp_undo_suspend(enum sclp_pm_event event)
1001 unsigned long flags;
1004 rc = sclp_reactivate();
1008 spin_lock_irqsave(&sclp_lock, flags);
1009 sclp_suspend_state = sclp_suspend_state_running;
1010 spin_unlock_irqrestore(&sclp_lock, flags);
1012 sclp_pm_event(event, 0);
1016 static int sclp_thaw(struct device *dev)
1018 return sclp_undo_suspend(SCLP_PM_EVENT_THAW);
1021 static int sclp_restore(struct device *dev)
1023 return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE);
1026 static const struct dev_pm_ops sclp_pm_ops = {
1027 .freeze = sclp_freeze,
1029 .restore = sclp_restore,
1032 static struct platform_driver sclp_pdrv = {
1035 .owner = THIS_MODULE,
1040 static struct platform_device *sclp_pdev;
1042 /* Initialize SCLP driver. Return zero if driver is operational, non-zero
1047 unsigned long flags;
1050 spin_lock_irqsave(&sclp_lock, flags);
1051 /* Check for previous or running initialization */
1052 if (sclp_init_state != sclp_init_state_uninitialized)
1054 sclp_init_state = sclp_init_state_initializing;
1055 /* Set up variables */
1056 INIT_LIST_HEAD(&sclp_req_queue);
1057 INIT_LIST_HEAD(&sclp_reg_list);
1058 list_add(&sclp_state_change_event.list, &sclp_reg_list);
1059 init_timer(&sclp_request_timer);
1060 /* Check interface */
1061 spin_unlock_irqrestore(&sclp_lock, flags);
1062 rc = sclp_check_interface();
1063 spin_lock_irqsave(&sclp_lock, flags);
1065 goto fail_init_state_uninitialized;
1066 /* Register reboot handler */
1067 rc = register_reboot_notifier(&sclp_reboot_notifier);
1069 goto fail_init_state_uninitialized;
1070 /* Register interrupt handler */
1071 rc = register_early_external_interrupt(0x2401, sclp_interrupt_handler,
1074 goto fail_unregister_reboot_notifier;
1075 sclp_init_state = sclp_init_state_initialized;
1076 spin_unlock_irqrestore(&sclp_lock, flags);
1077 /* Enable service-signal external interruption - needs to happen with
1083 fail_unregister_reboot_notifier:
1084 unregister_reboot_notifier(&sclp_reboot_notifier);
1085 fail_init_state_uninitialized:
1086 sclp_init_state = sclp_init_state_uninitialized;
1088 spin_unlock_irqrestore(&sclp_lock, flags);
1093 * SCLP panic notifier: If we are suspended, we thaw SCLP in order to be able
1094 * to print the panic message.
1096 static int sclp_panic_notify(struct notifier_block *self,
1097 unsigned long event, void *data)
1099 if (sclp_suspend_state == sclp_suspend_state_suspended)
1100 sclp_undo_suspend(SCLP_PM_EVENT_THAW);
1104 static struct notifier_block sclp_on_panic_nb = {
1105 .notifier_call = sclp_panic_notify,
1106 .priority = SCLP_PANIC_PRIO,
1109 static __init int sclp_initcall(void)
1113 rc = platform_driver_register(&sclp_pdrv);
1116 sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0);
1117 rc = IS_ERR(sclp_pdev) ? PTR_ERR(sclp_pdev) : 0;
1119 goto fail_platform_driver_unregister;
1120 rc = atomic_notifier_chain_register(&panic_notifier_list,
1123 goto fail_platform_device_unregister;
1127 fail_platform_device_unregister:
1128 platform_device_unregister(sclp_pdev);
1129 fail_platform_driver_unregister:
1130 platform_driver_unregister(&sclp_pdrv);
1134 arch_initcall(sclp_initcall);