2 * linux/drivers/s390/cio/qdio_main.c
4 * Linux for s390 qdio support, buffer handling, qdio API and module support.
6 * Copyright 2000,2008 IBM Corp.
7 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
8 * Jan Glauber <jang@linux.vnet.ibm.com>
9 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/timer.h>
15 #include <linux/delay.h>
16 #include <linux/gfp.h>
18 #include <linux/atomic.h>
19 #include <asm/debug.h>
27 #include "qdio_debug.h"
29 MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
30 "Jan Glauber <jang@linux.vnet.ibm.com>");
31 MODULE_DESCRIPTION("QDIO base support");
32 MODULE_LICENSE("GPL");
34 static inline int do_siga_sync(unsigned long schid,
35 unsigned int out_mask, unsigned int in_mask,
38 register unsigned long __fc asm ("0") = fc;
39 register unsigned long __schid asm ("1") = schid;
40 register unsigned long out asm ("2") = out_mask;
41 register unsigned long in asm ("3") = in_mask;
49 : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
53 static inline int do_siga_input(unsigned long schid, unsigned int mask,
56 register unsigned long __fc asm ("0") = fc;
57 register unsigned long __schid asm ("1") = schid;
58 register unsigned long __mask asm ("2") = mask;
66 : "d" (__fc), "d" (__schid), "d" (__mask) : "cc", "memory");
71 * do_siga_output - perform SIGA-w/wt function
72 * @schid: subchannel id or in case of QEBSM the subchannel token
73 * @mask: which output queues to process
74 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
75 * @fc: function code to perform
77 * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION.
78 * Note: For IQDC unicast queues only the highest priority queue is processed.
80 static inline int do_siga_output(unsigned long schid, unsigned long mask,
81 unsigned int *bb, unsigned int fc,
84 register unsigned long __fc asm("0") = fc;
85 register unsigned long __schid asm("1") = schid;
86 register unsigned long __mask asm("2") = mask;
87 register unsigned long __aob asm("3") = aob;
88 int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION;
96 : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask),
99 *bb = ((unsigned int) __fc) >> 31;
103 static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
105 /* all done or next buffer state different */
106 if (ccq == 0 || ccq == 32)
108 /* no buffer processed */
111 /* not all buffers processed */
114 /* notify devices immediately */
115 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
120 * qdio_do_eqbs - extract buffer states for QEBSM
121 * @q: queue to manipulate
122 * @state: state of the extracted buffers
123 * @start: buffer number to start at
124 * @count: count of buffers to examine
125 * @auto_ack: automatically acknowledge buffers
127 * Returns the number of successfully extracted equal buffer states.
128 * Stops processing if a state is different from the last buffers state.
130 static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
131 int start, int count, int auto_ack)
133 int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0;
134 unsigned int ccq = 0;
136 BUG_ON(!q->irq_ptr->sch_token);
140 nr += q->irq_ptr->nr_input_qs;
142 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
144 rc = qdio_check_ccq(q, ccq);
146 return count - tmp_count;
149 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
154 BUG_ON(tmp_count == count);
155 qperf_inc(q, eqbs_partial);
156 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
159 * Retry once, if that fails bail out and process the
160 * extracted buffers before trying again.
165 return count - tmp_count;
168 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
169 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
170 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
171 q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
176 * qdio_do_sqbs - set buffer states for QEBSM
177 * @q: queue to manipulate
178 * @state: new state of the buffers
179 * @start: first buffer number to change
180 * @count: how many buffers to change
182 * Returns the number of successfully changed buffers.
183 * Does retrying until the specified count of buffer states is set or an
186 static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
189 unsigned int ccq = 0;
190 int tmp_count = count, tmp_start = start;
197 BUG_ON(!q->irq_ptr->sch_token);
201 nr += q->irq_ptr->nr_input_qs;
203 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
204 rc = qdio_check_ccq(q, ccq);
207 return count - tmp_count;
210 if (rc == 1 || rc == 2) {
211 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
212 qperf_inc(q, sqbs_partial);
216 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
217 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
218 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
219 q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
223 /* returns number of examined buffers and their common state in *state */
224 static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
225 unsigned char *state, unsigned int count,
226 int auto_ack, int merge_pending)
228 unsigned char __state = 0;
231 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
232 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
235 return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
237 for (i = 0; i < count; i++) {
239 __state = q->slsb.val[bufnr];
240 if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
241 __state = SLSB_P_OUTPUT_EMPTY;
242 } else if (merge_pending) {
243 if ((q->slsb.val[bufnr] & __state) != __state)
245 } else if (q->slsb.val[bufnr] != __state)
247 bufnr = next_buf(bufnr);
253 static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
254 unsigned char *state, int auto_ack)
256 return get_buf_states(q, bufnr, state, 1, auto_ack, 0);
259 /* wrap-around safe setting of slsb states, returns number of changed buffers */
260 static inline int set_buf_states(struct qdio_q *q, int bufnr,
261 unsigned char state, int count)
265 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
266 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
269 return qdio_do_sqbs(q, state, bufnr, count);
271 for (i = 0; i < count; i++) {
272 xchg(&q->slsb.val[bufnr], state);
273 bufnr = next_buf(bufnr);
278 static inline int set_buf_state(struct qdio_q *q, int bufnr,
281 return set_buf_states(q, bufnr, state, 1);
284 /* set slsb states to initial state */
285 static void qdio_init_buf_states(struct qdio_irq *irq_ptr)
290 for_each_input_queue(irq_ptr, q, i)
291 set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
292 QDIO_MAX_BUFFERS_PER_Q);
293 for_each_output_queue(irq_ptr, q, i)
294 set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
295 QDIO_MAX_BUFFERS_PER_Q);
298 static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
301 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
302 unsigned int fc = QDIO_SIGA_SYNC;
305 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
306 qperf_inc(q, siga_sync);
309 schid = q->irq_ptr->sch_token;
310 fc |= QDIO_SIGA_QEBSM_FLAG;
313 cc = do_siga_sync(schid, output, input, fc);
315 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
319 static inline int qdio_siga_sync_q(struct qdio_q *q)
322 return qdio_siga_sync(q, 0, q->mask);
324 return qdio_siga_sync(q, q->mask, 0);
327 static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
330 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
331 unsigned int fc = QDIO_SIGA_WRITE;
334 unsigned long laob = 0;
336 if (q->u.out.use_cq && aob != 0) {
337 fc = QDIO_SIGA_WRITEQ;
342 schid = q->irq_ptr->sch_token;
343 fc |= QDIO_SIGA_QEBSM_FLAG;
346 WARN_ON_ONCE((aob && queue_type(q) != QDIO_IQDIO_QFMT) ||
347 (aob && fc != QDIO_SIGA_WRITEQ));
348 cc = do_siga_output(schid, q->mask, busy_bit, fc, laob);
350 /* hipersocket busy condition */
351 if (unlikely(*busy_bit)) {
352 WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
356 start_time = get_clock();
359 if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
363 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr,
364 "%4x cc2 BB1:%1d", SCH_NO(q), q->nr);
365 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries);
370 static inline int qdio_siga_input(struct qdio_q *q)
372 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
373 unsigned int fc = QDIO_SIGA_READ;
376 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
377 qperf_inc(q, siga_read);
380 schid = q->irq_ptr->sch_token;
381 fc |= QDIO_SIGA_QEBSM_FLAG;
384 cc = do_siga_input(schid, q->mask, fc);
386 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
390 #define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
391 #define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
393 static inline void qdio_sync_queues(struct qdio_q *q)
395 /* PCI capable outbound queues will also be scanned so sync them too */
396 if (pci_out_supported(q))
397 qdio_siga_sync_all(q);
402 int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
403 unsigned char *state)
405 if (need_siga_sync(q))
407 return get_buf_states(q, bufnr, state, 1, 0, 0);
410 static inline void qdio_stop_polling(struct qdio_q *q)
412 if (!q->u.in.polling)
416 qperf_inc(q, stop_polling);
418 /* show the card that we are not polling anymore */
420 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
422 q->u.in.ack_count = 0;
424 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
427 static inline void account_sbals(struct qdio_q *q, int count)
431 q->q_stats.nr_sbal_total += count;
432 if (count == QDIO_MAX_BUFFERS_MASK) {
433 q->q_stats.nr_sbals[7]++;
438 q->q_stats.nr_sbals[pos]++;
441 static void process_buffer_error(struct qdio_q *q, int count)
443 unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
444 SLSB_P_OUTPUT_NOT_INIT;
446 q->qdio_error |= QDIO_ERROR_SLSB_STATE;
448 /* special handling for no target buffer empty */
449 if ((!q->is_input_q &&
450 (q->sbal[q->first_to_check]->element[15].sflags) == 0x10)) {
451 qperf_inc(q, target_full);
452 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
457 DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
458 DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
459 DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
460 DBF_ERROR("F14:%2x F15:%2x",
461 q->sbal[q->first_to_check]->element[14].sflags,
462 q->sbal[q->first_to_check]->element[15].sflags);
466 * Interrupts may be avoided as long as the error is present
467 * so change the buffer state immediately to avoid starvation.
469 set_buf_states(q, q->first_to_check, state, count);
472 static inline void inbound_primed(struct qdio_q *q, int count)
476 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %02x", count);
478 /* for QEBSM the ACK was already set by EQBS */
480 if (!q->u.in.polling) {
482 q->u.in.ack_count = count;
483 q->u.in.ack_start = q->first_to_check;
487 /* delete the previous ACK's */
488 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
490 q->u.in.ack_count = count;
491 q->u.in.ack_start = q->first_to_check;
496 * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
497 * or by the next inbound run.
499 new = add_buf(q->first_to_check, count - 1);
500 if (q->u.in.polling) {
501 /* reset the previous ACK but first set the new one */
502 set_buf_state(q, new, SLSB_P_INPUT_ACK);
503 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
506 set_buf_state(q, new, SLSB_P_INPUT_ACK);
509 q->u.in.ack_start = new;
513 /* need to change ALL buffers to get more interrupts */
514 set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count);
517 static int get_inbound_buffer_frontier(struct qdio_q *q)
520 unsigned char state = 0;
522 q->timestamp = get_clock_fast();
525 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
528 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
529 stop = add_buf(q->first_to_check, count);
531 if (q->first_to_check == stop)
535 * No siga sync here, as a PCI or we after a thin interrupt
536 * already sync'ed the queues.
538 count = get_buf_states(q, q->first_to_check, &state, count, 1, 0);
543 case SLSB_P_INPUT_PRIMED:
544 inbound_primed(q, count);
545 q->first_to_check = add_buf(q->first_to_check, count);
546 if (atomic_sub(count, &q->nr_buf_used) == 0)
547 qperf_inc(q, inbound_queue_full);
548 if (q->irq_ptr->perf_stat_enabled)
549 account_sbals(q, count);
551 case SLSB_P_INPUT_ERROR:
552 process_buffer_error(q, count);
553 q->first_to_check = add_buf(q->first_to_check, count);
554 atomic_sub(count, &q->nr_buf_used);
555 if (q->irq_ptr->perf_stat_enabled)
556 account_sbals_error(q, count);
558 case SLSB_CU_INPUT_EMPTY:
559 case SLSB_P_INPUT_NOT_INIT:
560 case SLSB_P_INPUT_ACK:
561 if (q->irq_ptr->perf_stat_enabled)
562 q->q_stats.nr_sbal_nop++;
563 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
569 return q->first_to_check;
572 static int qdio_inbound_q_moved(struct qdio_q *q)
576 bufnr = get_inbound_buffer_frontier(q);
578 if ((bufnr != q->last_move) || q->qdio_error) {
579 q->last_move = bufnr;
580 if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
581 q->u.in.timestamp = get_clock();
587 static inline int qdio_inbound_q_done(struct qdio_q *q)
589 unsigned char state = 0;
591 if (!atomic_read(&q->nr_buf_used))
594 if (need_siga_sync(q))
596 get_buf_state(q, q->first_to_check, &state, 0);
598 if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
599 /* more work coming */
602 if (is_thinint_irq(q->irq_ptr))
605 /* don't poll under z/VM */
610 * At this point we know, that inbound first_to_check
611 * has (probably) not moved (see qdio_inbound_processing).
613 if (get_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
614 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
621 static inline int contains_aobs(struct qdio_q *q)
623 return !q->is_input_q && q->u.out.use_cq;
626 static inline void qdio_trace_aob(struct qdio_irq *irq, struct qdio_q *q,
627 int i, struct qaob *aob)
631 DBF_DEV_EVENT(DBF_INFO, irq, "AOB%d:%lx", i,
632 (unsigned long) virt_to_phys(aob));
633 DBF_DEV_EVENT(DBF_INFO, irq, "RES00:%lx",
634 (unsigned long) aob->res0[0]);
635 DBF_DEV_EVENT(DBF_INFO, irq, "RES01:%lx",
636 (unsigned long) aob->res0[1]);
637 DBF_DEV_EVENT(DBF_INFO, irq, "RES02:%lx",
638 (unsigned long) aob->res0[2]);
639 DBF_DEV_EVENT(DBF_INFO, irq, "RES03:%lx",
640 (unsigned long) aob->res0[3]);
641 DBF_DEV_EVENT(DBF_INFO, irq, "RES04:%lx",
642 (unsigned long) aob->res0[4]);
643 DBF_DEV_EVENT(DBF_INFO, irq, "RES05:%lx",
644 (unsigned long) aob->res0[5]);
645 DBF_DEV_EVENT(DBF_INFO, irq, "RES1:%x", aob->res1);
646 DBF_DEV_EVENT(DBF_INFO, irq, "RES2:%x", aob->res2);
647 DBF_DEV_EVENT(DBF_INFO, irq, "RES3:%x", aob->res3);
648 DBF_DEV_EVENT(DBF_INFO, irq, "AORC:%u", aob->aorc);
649 DBF_DEV_EVENT(DBF_INFO, irq, "FLAGS:%u", aob->flags);
650 DBF_DEV_EVENT(DBF_INFO, irq, "CBTBS:%u", aob->cbtbs);
651 DBF_DEV_EVENT(DBF_INFO, irq, "SBC:%u", aob->sb_count);
652 for (tmp = 0; tmp < QDIO_MAX_ELEMENTS_PER_BUFFER; ++tmp) {
653 DBF_DEV_EVENT(DBF_INFO, irq, "SBA%d:%lx", tmp,
654 (unsigned long) aob->sba[tmp]);
655 DBF_DEV_EVENT(DBF_INFO, irq, "rSBA%d:%lx", tmp,
656 (unsigned long) q->sbal[i]->element[tmp].addr);
657 DBF_DEV_EVENT(DBF_INFO, irq, "DC%d:%u", tmp, aob->dcount[tmp]);
658 DBF_DEV_EVENT(DBF_INFO, irq, "rDC%d:%u", tmp,
659 q->sbal[i]->element[tmp].length);
661 DBF_DEV_EVENT(DBF_INFO, irq, "USER0:%lx", (unsigned long) aob->user0);
662 for (tmp = 0; tmp < 2; ++tmp) {
663 DBF_DEV_EVENT(DBF_INFO, irq, "RES4%d:%lx", tmp,
664 (unsigned long) aob->res4[tmp]);
666 DBF_DEV_EVENT(DBF_INFO, irq, "USER1:%lx", (unsigned long) aob->user1);
667 DBF_DEV_EVENT(DBF_INFO, irq, "USER2:%lx", (unsigned long) aob->user2);
670 static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
672 unsigned char state = 0;
675 if (!contains_aobs(q))
678 for (j = 0; j < count; ++j) {
679 get_buf_state(q, b, &state, 0);
680 if (state == SLSB_P_OUTPUT_PENDING) {
681 struct qaob *aob = q->u.out.aobs[b];
685 BUG_ON(q->u.out.sbal_state == NULL);
686 q->u.out.sbal_state[b].flags |=
687 QDIO_OUTBUF_STATE_FLAG_PENDING;
688 q->u.out.aobs[b] = NULL;
689 } else if (state == SLSB_P_OUTPUT_EMPTY) {
690 BUG_ON(q->u.out.sbal_state == NULL);
691 q->u.out.sbal_state[b].aob = NULL;
697 static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
700 unsigned long phys_aob = 0;
705 if (!q->aobs[bufnr]) {
706 struct qaob *aob = qdio_allocate_aob();
707 q->aobs[bufnr] = aob;
709 if (q->aobs[bufnr]) {
710 BUG_ON(q->sbal_state == NULL);
711 q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE;
712 q->sbal_state[bufnr].aob = q->aobs[bufnr];
713 q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
714 phys_aob = virt_to_phys(q->aobs[bufnr]);
715 BUG_ON(phys_aob & 0xFF);
722 static void qdio_kick_handler(struct qdio_q *q)
724 int start = q->first_to_kick;
725 int end = q->first_to_check;
728 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
731 count = sub_buf(end, start);
734 qperf_inc(q, inbound_handler);
735 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
737 qperf_inc(q, outbound_handler);
738 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
742 qdio_handle_aobs(q, start, count);
744 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
745 q->irq_ptr->int_parm);
747 /* for the next time */
748 q->first_to_kick = end;
752 static void __qdio_inbound_processing(struct qdio_q *q)
754 qperf_inc(q, tasklet_inbound);
756 if (!qdio_inbound_q_moved(q))
759 qdio_kick_handler(q);
761 if (!qdio_inbound_q_done(q)) {
762 /* means poll time is not yet over */
763 qperf_inc(q, tasklet_inbound_resched);
764 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
765 tasklet_schedule(&q->tasklet);
770 qdio_stop_polling(q);
772 * We need to check again to not lose initiative after
773 * resetting the ACK state.
775 if (!qdio_inbound_q_done(q)) {
776 qperf_inc(q, tasklet_inbound_resched2);
777 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
778 tasklet_schedule(&q->tasklet);
782 void qdio_inbound_processing(unsigned long data)
784 struct qdio_q *q = (struct qdio_q *)data;
785 __qdio_inbound_processing(q);
788 static int get_outbound_buffer_frontier(struct qdio_q *q)
791 unsigned char state = 0;
793 q->timestamp = get_clock_fast();
795 if (need_siga_sync(q))
796 if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
797 !pci_out_supported(q)) ||
798 (queue_type(q) == QDIO_IQDIO_QFMT &&
799 multicast_outbound(q)))
803 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
806 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
807 stop = add_buf(q->first_to_check, count);
808 if (q->first_to_check == stop)
811 count = get_buf_states(q, q->first_to_check, &state, count, 0, 1);
816 case SLSB_P_OUTPUT_PENDING:
818 case SLSB_P_OUTPUT_EMPTY:
819 /* the adapter got it */
820 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
821 "out empty:%1d %02x", q->nr, count);
823 atomic_sub(count, &q->nr_buf_used);
824 q->first_to_check = add_buf(q->first_to_check, count);
825 if (q->irq_ptr->perf_stat_enabled)
826 account_sbals(q, count);
829 case SLSB_P_OUTPUT_ERROR:
830 process_buffer_error(q, count);
831 q->first_to_check = add_buf(q->first_to_check, count);
832 atomic_sub(count, &q->nr_buf_used);
833 if (q->irq_ptr->perf_stat_enabled)
834 account_sbals_error(q, count);
836 case SLSB_CU_OUTPUT_PRIMED:
837 /* the adapter has not fetched the output yet */
838 if (q->irq_ptr->perf_stat_enabled)
839 q->q_stats.nr_sbal_nop++;
840 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
843 case SLSB_P_OUTPUT_NOT_INIT:
844 case SLSB_P_OUTPUT_HALTED:
851 return q->first_to_check;
854 /* all buffers processed? */
855 static inline int qdio_outbound_q_done(struct qdio_q *q)
857 return atomic_read(&q->nr_buf_used) == 0;
860 static inline int qdio_outbound_q_moved(struct qdio_q *q)
864 bufnr = get_outbound_buffer_frontier(q);
866 if ((bufnr != q->last_move) || q->qdio_error) {
867 q->last_move = bufnr;
868 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
874 static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
877 unsigned int busy_bit;
879 if (!need_siga_out(q))
882 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
884 qperf_inc(q, siga_write);
886 cc = qdio_siga_output(q, &busy_bit, aob);
892 while (++retries < QDIO_BUSY_BIT_RETRIES) {
893 mdelay(QDIO_BUSY_BIT_RETRY_DELAY);
896 DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
897 cc |= QDIO_ERROR_SIGA_BUSY;
899 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
903 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
907 DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr);
908 DBF_ERROR("count:%u", retries);
913 static void __qdio_outbound_processing(struct qdio_q *q)
915 qperf_inc(q, tasklet_outbound);
916 BUG_ON(atomic_read(&q->nr_buf_used) < 0);
918 if (qdio_outbound_q_moved(q))
919 qdio_kick_handler(q);
921 if (queue_type(q) == QDIO_ZFCP_QFMT)
922 if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
925 if (q->u.out.pci_out_enabled)
929 * Now we know that queue type is either qeth without pci enabled
930 * or HiperSockets. Make sure buffer switch from PRIMED to EMPTY
931 * is noticed and outbound_handler is called after some time.
933 if (qdio_outbound_q_done(q))
934 del_timer(&q->u.out.timer);
936 if (!timer_pending(&q->u.out.timer))
937 mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
941 if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
943 tasklet_schedule(&q->tasklet);
946 /* outbound tasklet */
947 void qdio_outbound_processing(unsigned long data)
949 struct qdio_q *q = (struct qdio_q *)data;
950 __qdio_outbound_processing(q);
953 void qdio_outbound_timer(unsigned long data)
955 struct qdio_q *q = (struct qdio_q *)data;
957 if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
959 tasklet_schedule(&q->tasklet);
962 static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
967 if (!pci_out_supported(q))
970 for_each_output_queue(q->irq_ptr, out, i)
971 if (!qdio_outbound_q_done(out))
972 tasklet_schedule(&out->tasklet);
975 static void __tiqdio_inbound_processing(struct qdio_q *q)
977 qperf_inc(q, tasklet_inbound);
978 if (need_siga_sync(q) && need_siga_sync_after_ai(q))
982 * The interrupt could be caused by a PCI request. Check the
983 * PCI capable outbound queues.
985 qdio_check_outbound_after_thinint(q);
987 if (!qdio_inbound_q_moved(q))
990 qdio_kick_handler(q);
992 if (!qdio_inbound_q_done(q)) {
993 qperf_inc(q, tasklet_inbound_resched);
994 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
995 tasklet_schedule(&q->tasklet);
1000 qdio_stop_polling(q);
1002 * We need to check again to not lose initiative after
1003 * resetting the ACK state.
1005 if (!qdio_inbound_q_done(q)) {
1006 qperf_inc(q, tasklet_inbound_resched2);
1007 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
1008 tasklet_schedule(&q->tasklet);
1012 void tiqdio_inbound_processing(unsigned long data)
1014 struct qdio_q *q = (struct qdio_q *)data;
1015 __tiqdio_inbound_processing(q);
1018 static inline void qdio_set_state(struct qdio_irq *irq_ptr,
1019 enum qdio_irq_states state)
1021 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
1023 irq_ptr->state = state;
1027 static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
1029 if (irb->esw.esw0.erw.cons) {
1030 DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
1031 DBF_ERROR_HEX(irb, 64);
1032 DBF_ERROR_HEX(irb->ecw, 64);
1036 /* PCI interrupt handler */
1037 static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
1042 if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
1045 for_each_input_queue(irq_ptr, q, i) {
1046 if (q->u.in.queue_start_poll) {
1047 /* skip if polling is enabled or already in work */
1048 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1049 &q->u.in.queue_irq_state)) {
1050 qperf_inc(q, int_discarded);
1053 q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
1054 q->irq_ptr->int_parm);
1056 tasklet_schedule(&q->tasklet);
1060 if (!pci_out_supported(q))
1063 for_each_output_queue(irq_ptr, q, i) {
1064 if (qdio_outbound_q_done(q))
1066 if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
1067 qdio_siga_sync_q(q);
1068 tasklet_schedule(&q->tasklet);
1072 static void qdio_handle_activate_check(struct ccw_device *cdev,
1073 unsigned long intparm, int cstat, int dstat)
1075 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1079 DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
1080 DBF_ERROR("intp :%lx", intparm);
1081 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
1083 if (irq_ptr->nr_input_qs) {
1084 q = irq_ptr->input_qs[0];
1085 } else if (irq_ptr->nr_output_qs) {
1086 q = irq_ptr->output_qs[0];
1092 count = sub_buf(q->first_to_check, q->first_to_kick);
1093 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
1094 q->nr, q->first_to_kick, count, irq_ptr->int_parm);
1096 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1098 * In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen.
1099 * Therefore we call the LGR detection function here.
1104 static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
1107 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1109 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
1113 if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
1115 if (!(dstat & DEV_STAT_DEV_END))
1117 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
1121 DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
1122 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
1123 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1126 /* qdio interrupt handler */
1127 void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
1130 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1133 if (!intparm || !irq_ptr) {
1134 DBF_ERROR("qint:%4x", cdev->private->schid.sch_no);
1138 if (irq_ptr->perf_stat_enabled)
1139 irq_ptr->perf_stat.qdio_int++;
1142 switch (PTR_ERR(irb)) {
1144 DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
1145 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1146 wake_up(&cdev->private->wait_q);
1153 qdio_irq_check_sense(irq_ptr, irb);
1154 cstat = irb->scsw.cmd.cstat;
1155 dstat = irb->scsw.cmd.dstat;
1157 switch (irq_ptr->state) {
1158 case QDIO_IRQ_STATE_INACTIVE:
1159 qdio_establish_handle_irq(cdev, cstat, dstat);
1161 case QDIO_IRQ_STATE_CLEANUP:
1162 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1164 case QDIO_IRQ_STATE_ESTABLISHED:
1165 case QDIO_IRQ_STATE_ACTIVE:
1166 if (cstat & SCHN_STAT_PCI) {
1167 qdio_int_handler_pci(irq_ptr);
1171 qdio_handle_activate_check(cdev, intparm, cstat,
1174 case QDIO_IRQ_STATE_STOPPED:
1179 wake_up(&cdev->private->wait_q);
1183 * qdio_get_ssqd_desc - get qdio subchannel description
1184 * @cdev: ccw device to get description for
1185 * @data: where to store the ssqd
1187 * Returns 0 or an error code. The results of the chsc are stored in the
1188 * specified structure.
1190 int qdio_get_ssqd_desc(struct ccw_device *cdev,
1191 struct qdio_ssqd_desc *data)
1194 if (!cdev || !cdev->private)
1197 DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no);
1198 return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data);
1200 EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
1202 static void qdio_shutdown_queues(struct ccw_device *cdev)
1204 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1208 for_each_input_queue(irq_ptr, q, i)
1209 tasklet_kill(&q->tasklet);
1211 for_each_output_queue(irq_ptr, q, i) {
1212 del_timer(&q->u.out.timer);
1213 tasklet_kill(&q->tasklet);
1218 * qdio_shutdown - shut down a qdio subchannel
1219 * @cdev: associated ccw device
1220 * @how: use halt or clear to shutdown
1222 int qdio_shutdown(struct ccw_device *cdev, int how)
1224 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1226 unsigned long flags;
1231 BUG_ON(irqs_disabled());
1232 DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
1234 mutex_lock(&irq_ptr->setup_mutex);
1236 * Subchannel was already shot down. We cannot prevent being called
1237 * twice since cio may trigger a shutdown asynchronously.
1239 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1240 mutex_unlock(&irq_ptr->setup_mutex);
1245 * Indicate that the device is going down. Scheduling the queue
1246 * tasklets is forbidden from here on.
1248 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1250 tiqdio_remove_input_queues(irq_ptr);
1251 qdio_shutdown_queues(cdev);
1252 qdio_shutdown_debug_entries(irq_ptr, cdev);
1254 /* cleanup subchannel */
1255 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1257 if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
1258 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
1260 /* default behaviour is halt */
1261 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
1263 DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
1264 DBF_ERROR("rc:%4d", rc);
1268 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
1269 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1270 wait_event_interruptible_timeout(cdev->private->wait_q,
1271 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
1272 irq_ptr->state == QDIO_IRQ_STATE_ERR,
1274 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1277 qdio_shutdown_thinint(irq_ptr);
1279 /* restore interrupt handler */
1280 if ((void *)cdev->handler == (void *)qdio_int_handler)
1281 cdev->handler = irq_ptr->orig_handler;
1282 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1284 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1285 mutex_unlock(&irq_ptr->setup_mutex);
1290 EXPORT_SYMBOL_GPL(qdio_shutdown);
1293 * qdio_free - free data structures for a qdio subchannel
1294 * @cdev: associated ccw device
1296 int qdio_free(struct ccw_device *cdev)
1298 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1303 DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no);
1304 mutex_lock(&irq_ptr->setup_mutex);
1306 if (irq_ptr->debug_area != NULL) {
1307 debug_unregister(irq_ptr->debug_area);
1308 irq_ptr->debug_area = NULL;
1310 cdev->private->qdio_data = NULL;
1311 mutex_unlock(&irq_ptr->setup_mutex);
1313 qdio_release_memory(irq_ptr);
1316 EXPORT_SYMBOL_GPL(qdio_free);
1319 * qdio_allocate - allocate qdio queues and associated data
1320 * @init_data: initialization data
1322 int qdio_allocate(struct qdio_initialize *init_data)
1324 struct qdio_irq *irq_ptr;
1326 DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no);
1328 if ((init_data->no_input_qs && !init_data->input_handler) ||
1329 (init_data->no_output_qs && !init_data->output_handler))
1332 if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
1333 (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
1336 if ((!init_data->input_sbal_addr_array) ||
1337 (!init_data->output_sbal_addr_array))
1340 /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1341 irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1345 mutex_init(&irq_ptr->setup_mutex);
1346 qdio_allocate_dbf(init_data, irq_ptr);
1349 * Allocate a page for the chsc calls in qdio_establish.
1350 * Must be pre-allocated since a zfcp recovery will call
1351 * qdio_establish. In case of low memory and swap on a zfcp disk
1352 * we may not be able to allocate memory otherwise.
1354 irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
1355 if (!irq_ptr->chsc_page)
1358 /* qdr is used in ccw1.cda which is u32 */
1359 irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1362 WARN_ON((unsigned long)irq_ptr->qdr & 0xfff);
1364 if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
1365 init_data->no_output_qs))
1368 init_data->cdev->private->qdio_data = irq_ptr;
1369 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1372 qdio_release_memory(irq_ptr);
1376 EXPORT_SYMBOL_GPL(qdio_allocate);
1378 static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
1380 struct qdio_q *q = irq_ptr->input_qs[0];
1383 if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT)
1386 for_each_output_queue(irq_ptr, q, i) {
1388 if (qdio_enable_async_operation(&q->u.out) < 0) {
1393 qdio_disable_async_operation(&q->u.out);
1395 DBF_EVENT("use_cq:%d", use_cq);
1399 * qdio_establish - establish queues on a qdio subchannel
1400 * @init_data: initialization data
1402 int qdio_establish(struct qdio_initialize *init_data)
1404 struct qdio_irq *irq_ptr;
1405 struct ccw_device *cdev = init_data->cdev;
1406 unsigned long saveflags;
1409 DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no);
1411 irq_ptr = cdev->private->qdio_data;
1415 if (cdev->private->state != DEV_STATE_ONLINE)
1418 mutex_lock(&irq_ptr->setup_mutex);
1419 qdio_setup_irq(init_data);
1421 rc = qdio_establish_thinint(irq_ptr);
1423 mutex_unlock(&irq_ptr->setup_mutex);
1424 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1429 irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
1430 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1431 irq_ptr->ccw.count = irq_ptr->equeue.count;
1432 irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
1434 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1435 ccw_device_set_options_mask(cdev, 0);
1437 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1439 DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
1440 DBF_ERROR("rc:%4x", rc);
1442 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1445 mutex_unlock(&irq_ptr->setup_mutex);
1446 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1450 wait_event_interruptible_timeout(cdev->private->wait_q,
1451 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1452 irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1454 if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1455 mutex_unlock(&irq_ptr->setup_mutex);
1456 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1460 qdio_setup_ssqd_info(irq_ptr);
1462 qdio_detect_hsicq(irq_ptr);
1464 /* qebsm is now setup if available, initialize buffer states */
1465 qdio_init_buf_states(irq_ptr);
1467 mutex_unlock(&irq_ptr->setup_mutex);
1468 qdio_print_subchannel_info(irq_ptr, cdev);
1469 qdio_setup_debug_entries(irq_ptr, cdev);
1472 EXPORT_SYMBOL_GPL(qdio_establish);
1475 * qdio_activate - activate queues on a qdio subchannel
1476 * @cdev: associated cdev
1478 int qdio_activate(struct ccw_device *cdev)
1480 struct qdio_irq *irq_ptr;
1482 unsigned long saveflags;
1484 DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no);
1486 irq_ptr = cdev->private->qdio_data;
1490 if (cdev->private->state != DEV_STATE_ONLINE)
1493 mutex_lock(&irq_ptr->setup_mutex);
1494 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1499 irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
1500 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1501 irq_ptr->ccw.count = irq_ptr->aqueue.count;
1502 irq_ptr->ccw.cda = 0;
1504 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1505 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1507 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1508 0, DOIO_DENY_PREFETCH);
1510 DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
1511 DBF_ERROR("rc:%4x", rc);
1513 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1518 if (is_thinint_irq(irq_ptr))
1519 tiqdio_add_input_queues(irq_ptr);
1521 /* wait for subchannel to become active */
1524 switch (irq_ptr->state) {
1525 case QDIO_IRQ_STATE_STOPPED:
1526 case QDIO_IRQ_STATE_ERR:
1530 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1534 mutex_unlock(&irq_ptr->setup_mutex);
1537 EXPORT_SYMBOL_GPL(qdio_activate);
1539 static inline int buf_in_between(int bufnr, int start, int count)
1541 int end = add_buf(start, count);
1544 if (bufnr >= start && bufnr < end)
1550 /* wrap-around case */
1551 if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
1559 * handle_inbound - reset processed input buffers
1560 * @q: queue containing the buffers
1562 * @bufnr: first buffer to process
1563 * @count: how many buffers are emptied
1565 static int handle_inbound(struct qdio_q *q, unsigned int callflags,
1566 int bufnr, int count)
1570 qperf_inc(q, inbound_call);
1572 if (!q->u.in.polling)
1575 /* protect against stop polling setting an ACK for an emptied slsb */
1576 if (count == QDIO_MAX_BUFFERS_PER_Q) {
1577 /* overwriting everything, just delete polling status */
1578 q->u.in.polling = 0;
1579 q->u.in.ack_count = 0;
1581 } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) {
1583 /* partial overwrite, just update ack_start */
1584 diff = add_buf(bufnr, count);
1585 diff = sub_buf(diff, q->u.in.ack_start);
1586 q->u.in.ack_count -= diff;
1587 if (q->u.in.ack_count <= 0) {
1588 q->u.in.polling = 0;
1589 q->u.in.ack_count = 0;
1592 q->u.in.ack_start = add_buf(q->u.in.ack_start, diff);
1595 /* the only ACK will be deleted, so stop polling */
1596 q->u.in.polling = 0;
1600 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
1602 used = atomic_add_return(count, &q->nr_buf_used) - count;
1603 BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
1605 if (need_siga_in(q))
1606 return qdio_siga_input(q);
1612 * handle_outbound - process filled outbound buffers
1613 * @q: queue containing the buffers
1615 * @bufnr: first buffer to process
1616 * @count: how many buffers are filled
1618 static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1619 int bufnr, int count)
1621 unsigned char state = 0;
1624 qperf_inc(q, outbound_call);
1626 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1627 used = atomic_add_return(count, &q->nr_buf_used);
1628 BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
1630 if (used == QDIO_MAX_BUFFERS_PER_Q)
1631 qperf_inc(q, outbound_queue_full);
1633 if (callflags & QDIO_FLAG_PCI_OUT) {
1634 q->u.out.pci_out_enabled = 1;
1635 qperf_inc(q, pci_request_int);
1637 q->u.out.pci_out_enabled = 0;
1639 if (queue_type(q) == QDIO_IQDIO_QFMT) {
1640 unsigned long phys_aob = 0;
1642 /* One SIGA-W per buffer required for unicast HSI */
1643 WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
1645 phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
1647 rc = qdio_kick_outbound_q(q, phys_aob);
1648 } else if (need_siga_sync(q)) {
1649 rc = qdio_siga_sync_q(q);
1651 /* try to fast requeue buffers */
1652 get_buf_state(q, prev_buf(bufnr), &state, 0);
1653 if (state != SLSB_CU_OUTPUT_PRIMED)
1654 rc = qdio_kick_outbound_q(q, 0);
1656 qperf_inc(q, fast_requeue);
1659 /* in case of SIGA errors we must process the error immediately */
1660 if (used >= q->u.out.scan_threshold || rc)
1661 tasklet_schedule(&q->tasklet);
1663 /* free the SBALs in case of no further traffic */
1664 if (!timer_pending(&q->u.out.timer))
1665 mod_timer(&q->u.out.timer, jiffies + HZ);
1670 * do_QDIO - process input or output buffers
1671 * @cdev: associated ccw_device for the qdio subchannel
1672 * @callflags: input or output and special flags from the program
1673 * @q_nr: queue number
1674 * @bufnr: buffer number
1675 * @count: how many buffers to process
1677 int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1678 int q_nr, unsigned int bufnr, unsigned int count)
1680 struct qdio_irq *irq_ptr;
1683 if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
1686 irq_ptr = cdev->private->qdio_data;
1690 DBF_DEV_EVENT(DBF_INFO, irq_ptr,
1691 "do%02x b:%02x c:%02x", callflags, bufnr, count);
1693 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1697 if (callflags & QDIO_FLAG_SYNC_INPUT)
1698 return handle_inbound(irq_ptr->input_qs[q_nr],
1699 callflags, bufnr, count);
1700 else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
1701 return handle_outbound(irq_ptr->output_qs[q_nr],
1702 callflags, bufnr, count);
1705 EXPORT_SYMBOL_GPL(do_QDIO);
1708 * qdio_start_irq - process input buffers
1709 * @cdev: associated ccw_device for the qdio subchannel
1710 * @nr: input queue number
1714 * 1 - irqs not started since new data is available
1716 int qdio_start_irq(struct ccw_device *cdev, int nr)
1719 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1723 q = irq_ptr->input_qs[nr];
1725 WARN_ON(queue_irqs_enabled(q));
1727 clear_nonshared_ind(irq_ptr);
1728 qdio_stop_polling(q);
1729 clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
1732 * We need to check again to not lose initiative after
1733 * resetting the ACK state.
1735 if (test_nonshared_ind(irq_ptr))
1737 if (!qdio_inbound_q_done(q))
1742 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1743 &q->u.in.queue_irq_state))
1749 EXPORT_SYMBOL(qdio_start_irq);
1752 * qdio_get_next_buffers - process input buffers
1753 * @cdev: associated ccw_device for the qdio subchannel
1754 * @nr: input queue number
1755 * @bufnr: first filled buffer number
1756 * @error: buffers are in error state
1760 * = 0 - no new buffers found
1761 * > 0 - number of processed buffers
1763 int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
1768 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1772 q = irq_ptr->input_qs[nr];
1773 WARN_ON(queue_irqs_enabled(q));
1776 * Cannot rely on automatic sync after interrupt since queues may
1777 * also be examined without interrupt.
1779 if (need_siga_sync(q))
1780 qdio_sync_queues(q);
1782 /* check the PCI capable outbound queues. */
1783 qdio_check_outbound_after_thinint(q);
1785 if (!qdio_inbound_q_moved(q))
1788 /* Note: upper-layer MUST stop processing immediately here ... */
1789 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
1792 start = q->first_to_kick;
1793 end = q->first_to_check;
1795 *error = q->qdio_error;
1797 /* for the next time */
1798 q->first_to_kick = end;
1800 return sub_buf(end, start);
1802 EXPORT_SYMBOL(qdio_get_next_buffers);
1805 * qdio_stop_irq - disable interrupt processing for the device
1806 * @cdev: associated ccw_device for the qdio subchannel
1807 * @nr: input queue number
1810 * 0 - interrupts were already disabled
1811 * 1 - interrupts successfully disabled
1813 int qdio_stop_irq(struct ccw_device *cdev, int nr)
1816 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1820 q = irq_ptr->input_qs[nr];
1822 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1823 &q->u.in.queue_irq_state))
1828 EXPORT_SYMBOL(qdio_stop_irq);
1830 static int __init init_QDIO(void)
1834 rc = qdio_debug_init();
1837 rc = qdio_setup_init();
1840 rc = tiqdio_allocate_memory();
1843 rc = tiqdio_register_thinints();
1849 tiqdio_free_memory();
1857 static void __exit exit_QDIO(void)
1859 tiqdio_unregister_thinints();
1860 tiqdio_free_memory();
1865 module_init(init_QDIO);
1866 module_exit(exit_QDIO);