1 // SPDX-License-Identifier: GPL-2.0
3 * Linux for s390 qdio support, buffer handling, qdio API and module support.
5 * Copyright IBM Corp. 2000, 2008
6 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
7 * Jan Glauber <jang@linux.vnet.ibm.com>
8 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/timer.h>
14 #include <linux/delay.h>
15 #include <linux/gfp.h>
17 #include <linux/atomic.h>
18 #include <asm/debug.h>
26 #include "qdio_debug.h"
28 MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
29 "Jan Glauber <jang@linux.vnet.ibm.com>");
30 MODULE_DESCRIPTION("QDIO base support");
31 MODULE_LICENSE("GPL");
33 static inline int do_siga_sync(unsigned long schid,
34 unsigned int out_mask, unsigned int in_mask,
37 register unsigned long __fc asm ("0") = fc;
38 register unsigned long __schid asm ("1") = schid;
39 register unsigned long out asm ("2") = out_mask;
40 register unsigned long in asm ("3") = in_mask;
48 : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
52 static inline int do_siga_input(unsigned long schid, unsigned int mask,
55 register unsigned long __fc asm ("0") = fc;
56 register unsigned long __schid asm ("1") = schid;
57 register unsigned long __mask asm ("2") = mask;
65 : "d" (__fc), "d" (__schid), "d" (__mask) : "cc");
70 * do_siga_output - perform SIGA-w/wt function
71 * @schid: subchannel id or in case of QEBSM the subchannel token
72 * @mask: which output queues to process
73 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
74 * @fc: function code to perform
75 * @aob: asynchronous operation block
77 * Returns condition code.
78 * Note: For IQDC unicast queues only the highest priority queue is processed.
80 static inline int do_siga_output(unsigned long schid, unsigned long mask,
81 unsigned int *bb, unsigned int fc,
84 register unsigned long __fc asm("0") = fc;
85 register unsigned long __schid asm("1") = schid;
86 register unsigned long __mask asm("2") = mask;
87 register unsigned long __aob asm("3") = aob;
94 : "=d" (cc), "+d" (__fc), "+d" (__aob)
95 : "d" (__schid), "d" (__mask)
102 * qdio_do_eqbs - extract buffer states for QEBSM
103 * @q: queue to manipulate
104 * @state: state of the extracted buffers
105 * @start: buffer number to start at
106 * @count: count of buffers to examine
107 * @auto_ack: automatically acknowledge buffers
109 * Returns the number of successfully extracted equal buffer states.
110 * Stops processing if a state is different from the last buffers state.
112 static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
113 int start, int count, int auto_ack)
115 int tmp_count = count, tmp_start = start, nr = q->nr;
116 unsigned int ccq = 0;
121 nr += q->irq_ptr->nr_input_qs;
123 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
129 /* all done, or next buffer state different */
130 return count - tmp_count;
132 /* not all buffers processed */
133 qperf_inc(q, eqbs_partial);
134 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "EQBS part:%02x",
136 return count - tmp_count;
138 /* no buffer processed */
139 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
142 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
143 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
144 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
145 q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE, q->nr,
146 q->first_to_check, count, q->irq_ptr->int_parm);
152 * qdio_do_sqbs - set buffer states for QEBSM
153 * @q: queue to manipulate
154 * @state: new state of the buffers
155 * @start: first buffer number to change
156 * @count: how many buffers to change
158 * Returns the number of successfully changed buffers.
159 * Does retrying until the specified count of buffer states is set or an
162 static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
165 unsigned int ccq = 0;
166 int tmp_count = count, tmp_start = start;
174 nr += q->irq_ptr->nr_input_qs;
176 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
181 /* all done, or active buffer adapter-owned */
182 WARN_ON_ONCE(tmp_count);
183 return count - tmp_count;
185 /* not all buffers processed */
186 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
187 qperf_inc(q, sqbs_partial);
190 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
191 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
192 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
193 q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE, q->nr,
194 q->first_to_check, count, q->irq_ptr->int_parm);
200 * Returns number of examined buffers and their common state in *state.
201 * Requested number of buffers-to-examine must be > 0.
203 static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
204 unsigned char *state, unsigned int count,
205 int auto_ack, int merge_pending)
207 unsigned char __state = 0;
211 return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
213 /* get initial state: */
214 __state = q->slsb.val[bufnr];
216 /* Bail out early if there is no work on the queue: */
217 if (__state & SLSB_OWNER_CU)
220 if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
221 __state = SLSB_P_OUTPUT_EMPTY;
223 for (; i < count; i++) {
224 bufnr = next_buf(bufnr);
226 /* merge PENDING into EMPTY: */
228 q->slsb.val[bufnr] == SLSB_P_OUTPUT_PENDING &&
229 __state == SLSB_P_OUTPUT_EMPTY)
232 /* stop if next state differs from initial state: */
233 if (q->slsb.val[bufnr] != __state)
242 static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
243 unsigned char *state, int auto_ack)
245 return get_buf_states(q, bufnr, state, 1, auto_ack, 0);
248 /* wrap-around safe setting of slsb states, returns number of changed buffers */
249 static inline int set_buf_states(struct qdio_q *q, int bufnr,
250 unsigned char state, int count)
255 return qdio_do_sqbs(q, state, bufnr, count);
257 for (i = 0; i < count; i++) {
258 xchg(&q->slsb.val[bufnr], state);
259 bufnr = next_buf(bufnr);
264 static inline int set_buf_state(struct qdio_q *q, int bufnr,
267 return set_buf_states(q, bufnr, state, 1);
270 /* set slsb states to initial state */
271 static void qdio_init_buf_states(struct qdio_irq *irq_ptr)
276 for_each_input_queue(irq_ptr, q, i)
277 set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
278 QDIO_MAX_BUFFERS_PER_Q);
279 for_each_output_queue(irq_ptr, q, i)
280 set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
281 QDIO_MAX_BUFFERS_PER_Q);
284 static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
287 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
288 unsigned int fc = QDIO_SIGA_SYNC;
291 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
292 qperf_inc(q, siga_sync);
295 schid = q->irq_ptr->sch_token;
296 fc |= QDIO_SIGA_QEBSM_FLAG;
299 cc = do_siga_sync(schid, output, input, fc);
301 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
302 return (cc) ? -EIO : 0;
305 static inline int qdio_siga_sync_q(struct qdio_q *q)
308 return qdio_siga_sync(q, 0, q->mask);
310 return qdio_siga_sync(q, q->mask, 0);
313 static int qdio_siga_output(struct qdio_q *q, unsigned int count,
314 unsigned int *busy_bit, unsigned long aob)
316 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
317 unsigned int fc = QDIO_SIGA_WRITE;
321 if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q)) {
323 fc = QDIO_SIGA_WRITEM;
325 fc = QDIO_SIGA_WRITEQ;
329 schid = q->irq_ptr->sch_token;
330 fc |= QDIO_SIGA_QEBSM_FLAG;
333 cc = do_siga_output(schid, q->mask, busy_bit, fc, aob);
335 /* hipersocket busy condition */
336 if (unlikely(*busy_bit)) {
340 start_time = get_tod_clock_fast();
343 if (get_tod_clock_fast() - start_time < QDIO_BUSY_BIT_PATIENCE)
347 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr,
348 "%4x cc2 BB1:%1d", SCH_NO(q), q->nr);
349 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries);
354 static inline int qdio_siga_input(struct qdio_q *q)
356 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
357 unsigned int fc = QDIO_SIGA_READ;
360 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
361 qperf_inc(q, siga_read);
364 schid = q->irq_ptr->sch_token;
365 fc |= QDIO_SIGA_QEBSM_FLAG;
368 cc = do_siga_input(schid, q->mask, fc);
370 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
371 return (cc) ? -EIO : 0;
374 #define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
375 #define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
377 static inline void qdio_sync_queues(struct qdio_q *q)
379 /* PCI capable outbound queues will also be scanned so sync them too */
380 if (pci_out_supported(q->irq_ptr))
381 qdio_siga_sync_all(q);
386 int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
387 unsigned char *state)
389 if (need_siga_sync(q))
391 return get_buf_state(q, bufnr, state, 0);
394 static inline void qdio_stop_polling(struct qdio_q *q)
396 if (!q->u.in.ack_count)
399 qperf_inc(q, stop_polling);
401 /* show the card that we are not polling anymore */
402 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
404 q->u.in.ack_count = 0;
407 static inline void account_sbals(struct qdio_q *q, unsigned int count)
411 q->q_stats.nr_sbal_total += count;
412 if (count == QDIO_MAX_BUFFERS_MASK) {
413 q->q_stats.nr_sbals[7]++;
417 q->q_stats.nr_sbals[pos]++;
420 static void process_buffer_error(struct qdio_q *q, unsigned int start,
423 q->qdio_error = QDIO_ERROR_SLSB_STATE;
425 /* special handling for no target buffer empty */
426 if (queue_type(q) == QDIO_IQDIO_QFMT && !q->is_input_q &&
427 q->sbal[start]->element[15].sflags == 0x10) {
428 qperf_inc(q, target_full);
429 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", start);
433 DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
434 DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
435 DBF_ERROR("FTC:%3d C:%3d", start, count);
436 DBF_ERROR("F14:%2x F15:%2x",
437 q->sbal[start]->element[14].sflags,
438 q->sbal[start]->element[15].sflags);
441 static inline void inbound_handle_work(struct qdio_q *q, unsigned int start,
442 int count, bool auto_ack)
447 if (!q->u.in.ack_count) {
448 q->u.in.ack_count = count;
449 q->u.in.ack_start = start;
453 /* delete the previous ACK's */
454 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
456 q->u.in.ack_count = count;
457 q->u.in.ack_start = start;
462 * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
463 * or by the next inbound run.
465 new = add_buf(start, count - 1);
466 set_buf_state(q, new, SLSB_P_INPUT_ACK);
468 /* delete the previous ACKs */
469 if (q->u.in.ack_count)
470 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
473 q->u.in.ack_count = 1;
474 q->u.in.ack_start = new;
478 /* need to change ALL buffers to get more interrupts */
479 set_buf_states(q, start, SLSB_P_INPUT_NOT_INIT, count);
482 static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start)
484 unsigned char state = 0;
487 q->timestamp = get_tod_clock_fast();
490 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
493 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
498 * No siga sync here, as a PCI or we after a thin interrupt
499 * already sync'ed the queues.
501 count = get_buf_states(q, start, &state, count, 1, 0);
506 case SLSB_P_INPUT_PRIMED:
507 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr,
510 inbound_handle_work(q, start, count, is_qebsm(q));
511 if (atomic_sub_return(count, &q->nr_buf_used) == 0)
512 qperf_inc(q, inbound_queue_full);
513 if (q->irq_ptr->perf_stat_enabled)
514 account_sbals(q, count);
516 case SLSB_P_INPUT_ERROR:
517 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in err:%1d %02x", q->nr,
520 process_buffer_error(q, start, count);
521 inbound_handle_work(q, start, count, false);
522 if (atomic_sub_return(count, &q->nr_buf_used) == 0)
523 qperf_inc(q, inbound_queue_full);
524 if (q->irq_ptr->perf_stat_enabled)
525 account_sbals_error(q, count);
527 case SLSB_CU_INPUT_EMPTY:
528 case SLSB_P_INPUT_NOT_INIT:
529 case SLSB_P_INPUT_ACK:
530 if (q->irq_ptr->perf_stat_enabled)
531 q->q_stats.nr_sbal_nop++;
532 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop:%1d %#02x",
541 static int qdio_inbound_q_moved(struct qdio_q *q, unsigned int start)
545 count = get_inbound_buffer_frontier(q, start);
547 if (count && !is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
548 q->u.in.timestamp = get_tod_clock();
553 static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
555 unsigned char state = 0;
557 if (!atomic_read(&q->nr_buf_used))
560 if (need_siga_sync(q))
562 get_buf_state(q, start, &state, 0);
564 if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
565 /* more work coming */
568 if (is_thinint_irq(q->irq_ptr))
571 /* don't poll under z/VM */
576 * At this point we know, that inbound first_to_check
577 * has (probably) not moved (see qdio_inbound_processing).
579 if (get_tod_clock_fast() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
580 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", start);
586 static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
588 unsigned char state = 0;
591 for (j = 0; j < count; ++j) {
592 get_buf_state(q, b, &state, 0);
593 if (state == SLSB_P_OUTPUT_PENDING) {
594 struct qaob *aob = q->u.out.aobs[b];
598 q->u.out.sbal_state[b].flags |=
599 QDIO_OUTBUF_STATE_FLAG_PENDING;
600 q->u.out.aobs[b] = NULL;
606 static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
609 unsigned long phys_aob = 0;
611 if (!q->aobs[bufnr]) {
612 struct qaob *aob = qdio_allocate_aob();
613 q->aobs[bufnr] = aob;
615 if (q->aobs[bufnr]) {
616 q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
617 phys_aob = virt_to_phys(q->aobs[bufnr]);
618 WARN_ON_ONCE(phys_aob & 0xFF);
621 q->sbal_state[bufnr].flags = 0;
625 static void qdio_kick_handler(struct qdio_q *q, unsigned int start,
628 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
632 qperf_inc(q, inbound_handler);
633 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
635 qperf_inc(q, outbound_handler);
636 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
640 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
641 q->irq_ptr->int_parm);
643 /* for the next time */
647 static inline int qdio_tasklet_schedule(struct qdio_q *q)
649 if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) {
650 tasklet_schedule(&q->tasklet);
656 static void __qdio_inbound_processing(struct qdio_q *q)
658 unsigned int start = q->first_to_check;
661 qperf_inc(q, tasklet_inbound);
663 count = qdio_inbound_q_moved(q, start);
667 qdio_kick_handler(q, start, count);
668 start = add_buf(start, count);
669 q->first_to_check = start;
671 if (!qdio_inbound_q_done(q, start)) {
672 /* means poll time is not yet over */
673 qperf_inc(q, tasklet_inbound_resched);
674 if (!qdio_tasklet_schedule(q))
678 qdio_stop_polling(q);
680 * We need to check again to not lose initiative after
681 * resetting the ACK state.
683 if (!qdio_inbound_q_done(q, start)) {
684 qperf_inc(q, tasklet_inbound_resched2);
685 qdio_tasklet_schedule(q);
689 void qdio_inbound_processing(unsigned long data)
691 struct qdio_q *q = (struct qdio_q *)data;
692 __qdio_inbound_processing(q);
695 static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start)
697 unsigned char state = 0;
700 q->timestamp = get_tod_clock_fast();
702 if (need_siga_sync(q))
703 if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
704 !pci_out_supported(q->irq_ptr)) ||
705 (queue_type(q) == QDIO_IQDIO_QFMT &&
706 multicast_outbound(q)))
709 count = atomic_read(&q->nr_buf_used);
713 count = get_buf_states(q, start, &state, count, 0, q->u.out.use_cq);
718 case SLSB_P_OUTPUT_EMPTY:
719 case SLSB_P_OUTPUT_PENDING:
720 /* the adapter got it */
721 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
722 "out empty:%1d %02x", q->nr, count);
724 atomic_sub(count, &q->nr_buf_used);
725 if (q->irq_ptr->perf_stat_enabled)
726 account_sbals(q, count);
728 case SLSB_P_OUTPUT_ERROR:
729 process_buffer_error(q, start, count);
730 atomic_sub(count, &q->nr_buf_used);
731 if (q->irq_ptr->perf_stat_enabled)
732 account_sbals_error(q, count);
734 case SLSB_CU_OUTPUT_PRIMED:
735 /* the adapter has not fetched the output yet */
736 if (q->irq_ptr->perf_stat_enabled)
737 q->q_stats.nr_sbal_nop++;
738 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
741 case SLSB_P_OUTPUT_NOT_INIT:
742 case SLSB_P_OUTPUT_HALTED:
750 /* all buffers processed? */
751 static inline int qdio_outbound_q_done(struct qdio_q *q)
753 return atomic_read(&q->nr_buf_used) == 0;
756 static inline int qdio_outbound_q_moved(struct qdio_q *q, unsigned int start)
760 count = get_outbound_buffer_frontier(q, start);
763 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
765 qdio_handle_aobs(q, start, count);
771 static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count,
775 unsigned int busy_bit;
777 if (!need_siga_out(q))
780 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
782 qperf_inc(q, siga_write);
784 cc = qdio_siga_output(q, count, &busy_bit, aob);
790 while (++retries < QDIO_BUSY_BIT_RETRIES) {
791 mdelay(QDIO_BUSY_BIT_RETRY_DELAY);
794 DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
797 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
803 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
808 DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr);
809 DBF_ERROR("count:%u", retries);
814 static void __qdio_outbound_processing(struct qdio_q *q)
816 unsigned int start = q->first_to_check;
819 qperf_inc(q, tasklet_outbound);
820 WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0);
822 count = qdio_outbound_q_moved(q, start);
824 q->first_to_check = add_buf(start, count);
825 qdio_kick_handler(q, start, count);
828 if (queue_type(q) == QDIO_ZFCP_QFMT && !pci_out_supported(q->irq_ptr) &&
829 !qdio_outbound_q_done(q))
832 if (q->u.out.pci_out_enabled)
836 * Now we know that queue type is either qeth without pci enabled
837 * or HiperSockets. Make sure buffer switch from PRIMED to EMPTY
838 * is noticed and outbound_handler is called after some time.
840 if (qdio_outbound_q_done(q))
841 del_timer_sync(&q->u.out.timer);
843 if (!timer_pending(&q->u.out.timer) &&
844 likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
845 mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
849 qdio_tasklet_schedule(q);
852 /* outbound tasklet */
853 void qdio_outbound_processing(unsigned long data)
855 struct qdio_q *q = (struct qdio_q *)data;
856 __qdio_outbound_processing(q);
859 void qdio_outbound_timer(struct timer_list *t)
861 struct qdio_q *q = from_timer(q, t, u.out.timer);
863 qdio_tasklet_schedule(q);
866 static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq)
871 if (!pci_out_supported(irq) || !irq->scan_threshold)
874 for_each_output_queue(irq, out, i)
875 if (!qdio_outbound_q_done(out))
876 qdio_tasklet_schedule(out);
879 void tiqdio_inbound_processing(unsigned long data)
881 struct qdio_q *q = (struct qdio_q *)data;
883 if (need_siga_sync(q) && need_siga_sync_after_ai(q))
886 /* The interrupt could be caused by a PCI request: */
887 qdio_check_outbound_pci_queues(q->irq_ptr);
889 __qdio_inbound_processing(q);
892 static inline void qdio_set_state(struct qdio_irq *irq_ptr,
893 enum qdio_irq_states state)
895 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
897 irq_ptr->state = state;
901 static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
903 if (irb->esw.esw0.erw.cons) {
904 DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
905 DBF_ERROR_HEX(irb, 64);
906 DBF_ERROR_HEX(irb->ecw, 64);
910 /* PCI interrupt handler */
911 static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
916 if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
919 if (irq_ptr->irq_poll) {
920 if (!test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
921 irq_ptr->irq_poll(irq_ptr->cdev, irq_ptr->int_parm);
923 QDIO_PERF_STAT_INC(irq_ptr, int_discarded);
925 for_each_input_queue(irq_ptr, q, i)
926 tasklet_schedule(&q->tasklet);
929 if (!pci_out_supported(irq_ptr) || !irq_ptr->scan_threshold)
932 for_each_output_queue(irq_ptr, q, i) {
933 if (qdio_outbound_q_done(q))
935 if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
937 qdio_tasklet_schedule(q);
941 static void qdio_handle_activate_check(struct ccw_device *cdev,
942 unsigned long intparm, int cstat, int dstat)
944 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
947 DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
948 DBF_ERROR("intp :%lx", intparm);
949 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
951 if (irq_ptr->nr_input_qs) {
952 q = irq_ptr->input_qs[0];
953 } else if (irq_ptr->nr_output_qs) {
954 q = irq_ptr->output_qs[0];
960 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE,
961 q->nr, q->first_to_check, 0, irq_ptr->int_parm);
963 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
965 * In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen.
966 * Therefore we call the LGR detection function here.
971 static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
974 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
976 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
980 if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
982 if (!(dstat & DEV_STAT_DEV_END))
984 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
988 DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
989 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
990 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
993 /* qdio interrupt handler */
994 void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
997 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
998 struct subchannel_id schid;
1001 if (!intparm || !irq_ptr) {
1002 ccw_device_get_schid(cdev, &schid);
1003 DBF_ERROR("qint:%4x", schid.sch_no);
1007 if (irq_ptr->perf_stat_enabled)
1008 irq_ptr->perf_stat.qdio_int++;
1011 DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
1012 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1013 wake_up(&cdev->private->wait_q);
1016 qdio_irq_check_sense(irq_ptr, irb);
1017 cstat = irb->scsw.cmd.cstat;
1018 dstat = irb->scsw.cmd.dstat;
1020 switch (irq_ptr->state) {
1021 case QDIO_IRQ_STATE_INACTIVE:
1022 qdio_establish_handle_irq(cdev, cstat, dstat);
1024 case QDIO_IRQ_STATE_CLEANUP:
1025 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1027 case QDIO_IRQ_STATE_ESTABLISHED:
1028 case QDIO_IRQ_STATE_ACTIVE:
1029 if (cstat & SCHN_STAT_PCI) {
1030 qdio_int_handler_pci(irq_ptr);
1034 qdio_handle_activate_check(cdev, intparm, cstat,
1037 case QDIO_IRQ_STATE_STOPPED:
1042 wake_up(&cdev->private->wait_q);
1046 * qdio_get_ssqd_desc - get qdio subchannel description
1047 * @cdev: ccw device to get description for
1048 * @data: where to store the ssqd
1050 * Returns 0 or an error code. The results of the chsc are stored in the
1051 * specified structure.
1053 int qdio_get_ssqd_desc(struct ccw_device *cdev,
1054 struct qdio_ssqd_desc *data)
1056 struct subchannel_id schid;
1058 if (!cdev || !cdev->private)
1061 ccw_device_get_schid(cdev, &schid);
1062 DBF_EVENT("get ssqd:%4x", schid.sch_no);
1063 return qdio_setup_get_ssqd(NULL, &schid, data);
1065 EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
1067 static void qdio_shutdown_queues(struct qdio_irq *irq_ptr)
1072 for_each_input_queue(irq_ptr, q, i)
1073 tasklet_kill(&q->tasklet);
1075 for_each_output_queue(irq_ptr, q, i) {
1076 del_timer_sync(&q->u.out.timer);
1077 tasklet_kill(&q->tasklet);
1082 * qdio_shutdown - shut down a qdio subchannel
1083 * @cdev: associated ccw device
1084 * @how: use halt or clear to shutdown
1086 int qdio_shutdown(struct ccw_device *cdev, int how)
1088 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1089 struct subchannel_id schid;
1095 WARN_ON_ONCE(irqs_disabled());
1096 ccw_device_get_schid(cdev, &schid);
1097 DBF_EVENT("qshutdown:%4x", schid.sch_no);
1099 mutex_lock(&irq_ptr->setup_mutex);
1101 * Subchannel was already shot down. We cannot prevent being called
1102 * twice since cio may trigger a shutdown asynchronously.
1104 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1105 mutex_unlock(&irq_ptr->setup_mutex);
1110 * Indicate that the device is going down. Scheduling the queue
1111 * tasklets is forbidden from here on.
1113 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1115 tiqdio_remove_device(irq_ptr);
1116 qdio_shutdown_queues(irq_ptr);
1117 qdio_shutdown_debug_entries(irq_ptr);
1119 /* cleanup subchannel */
1120 spin_lock_irq(get_ccwdev_lock(cdev));
1121 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
1122 if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
1123 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
1125 /* default behaviour is halt */
1126 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
1127 spin_unlock_irq(get_ccwdev_lock(cdev));
1129 DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
1130 DBF_ERROR("rc:%4d", rc);
1134 wait_event_interruptible_timeout(cdev->private->wait_q,
1135 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
1136 irq_ptr->state == QDIO_IRQ_STATE_ERR,
1140 qdio_shutdown_thinint(irq_ptr);
1141 qdio_shutdown_irq(irq_ptr);
1143 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1144 mutex_unlock(&irq_ptr->setup_mutex);
1149 EXPORT_SYMBOL_GPL(qdio_shutdown);
1152 * qdio_free - free data structures for a qdio subchannel
1153 * @cdev: associated ccw device
1155 int qdio_free(struct ccw_device *cdev)
1157 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1158 struct subchannel_id schid;
1163 ccw_device_get_schid(cdev, &schid);
1164 DBF_EVENT("qfree:%4x", schid.sch_no);
1165 DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned");
1166 mutex_lock(&irq_ptr->setup_mutex);
1168 irq_ptr->debug_area = NULL;
1169 cdev->private->qdio_data = NULL;
1170 mutex_unlock(&irq_ptr->setup_mutex);
1172 qdio_free_async_data(irq_ptr);
1173 qdio_free_queues(irq_ptr);
1174 free_page((unsigned long) irq_ptr->qdr);
1175 free_page(irq_ptr->chsc_page);
1176 free_page((unsigned long) irq_ptr);
1179 EXPORT_SYMBOL_GPL(qdio_free);
1182 * qdio_allocate - allocate qdio queues and associated data
1183 * @cdev: associated ccw device
1184 * @no_input_qs: allocate this number of Input Queues
1185 * @no_output_qs: allocate this number of Output Queues
1187 int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs,
1188 unsigned int no_output_qs)
1190 struct subchannel_id schid;
1191 struct qdio_irq *irq_ptr;
1194 ccw_device_get_schid(cdev, &schid);
1195 DBF_EVENT("qallocate:%4x", schid.sch_no);
1197 if (no_input_qs > QDIO_MAX_QUEUES_PER_IRQ ||
1198 no_output_qs > QDIO_MAX_QUEUES_PER_IRQ)
1201 /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1202 irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1206 irq_ptr->cdev = cdev;
1207 mutex_init(&irq_ptr->setup_mutex);
1208 if (qdio_allocate_dbf(irq_ptr))
1211 DBF_DEV_EVENT(DBF_ERR, irq_ptr, "alloc niq:%1u noq:%1u", no_input_qs,
1215 * Allocate a page for the chsc calls in qdio_establish.
1216 * Must be pre-allocated since a zfcp recovery will call
1217 * qdio_establish. In case of low memory and swap on a zfcp disk
1218 * we may not be able to allocate memory otherwise.
1220 irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
1221 if (!irq_ptr->chsc_page)
1224 /* qdr is used in ccw1.cda which is u32 */
1225 irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1229 rc = qdio_allocate_qs(irq_ptr, no_input_qs, no_output_qs);
1233 INIT_LIST_HEAD(&irq_ptr->entry);
1234 cdev->private->qdio_data = irq_ptr;
1235 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1239 free_page((unsigned long) irq_ptr->qdr);
1241 free_page(irq_ptr->chsc_page);
1244 free_page((unsigned long) irq_ptr);
1247 EXPORT_SYMBOL_GPL(qdio_allocate);
1249 static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
1251 struct qdio_q *q = irq_ptr->input_qs[0];
1254 if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT)
1257 for_each_output_queue(irq_ptr, q, i) {
1259 if (multicast_outbound(q))
1261 if (qdio_enable_async_operation(&q->u.out) < 0) {
1266 qdio_disable_async_operation(&q->u.out);
1268 DBF_EVENT("use_cq:%d", use_cq);
1271 static void qdio_trace_init_data(struct qdio_irq *irq,
1272 struct qdio_initialize *data)
1274 DBF_DEV_EVENT(DBF_ERR, irq, "qfmt:%1u", data->q_format);
1275 DBF_DEV_HEX(irq, data->adapter_name, 8, DBF_ERR);
1276 DBF_DEV_EVENT(DBF_ERR, irq, "qpff%4x", data->qib_param_field_format);
1277 DBF_DEV_HEX(irq, &data->qib_param_field, sizeof(void *), DBF_ERR);
1278 DBF_DEV_HEX(irq, &data->input_slib_elements, sizeof(void *), DBF_ERR);
1279 DBF_DEV_HEX(irq, &data->output_slib_elements, sizeof(void *), DBF_ERR);
1280 DBF_DEV_EVENT(DBF_ERR, irq, "niq:%1u noq:%1u", data->no_input_qs,
1281 data->no_output_qs);
1282 DBF_DEV_HEX(irq, &data->input_handler, sizeof(void *), DBF_ERR);
1283 DBF_DEV_HEX(irq, &data->output_handler, sizeof(void *), DBF_ERR);
1284 DBF_DEV_HEX(irq, &data->int_parm, sizeof(long), DBF_ERR);
1285 DBF_DEV_HEX(irq, &data->input_sbal_addr_array, sizeof(void *), DBF_ERR);
1286 DBF_DEV_HEX(irq, &data->output_sbal_addr_array, sizeof(void *),
1291 * qdio_establish - establish queues on a qdio subchannel
1292 * @cdev: associated ccw device
1293 * @init_data: initialization data
1295 int qdio_establish(struct ccw_device *cdev,
1296 struct qdio_initialize *init_data)
1298 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1299 struct subchannel_id schid;
1302 ccw_device_get_schid(cdev, &schid);
1303 DBF_EVENT("qestablish:%4x", schid.sch_no);
1308 if (init_data->no_input_qs > irq_ptr->max_input_qs ||
1309 init_data->no_output_qs > irq_ptr->max_output_qs)
1312 if ((init_data->no_input_qs && !init_data->input_handler) ||
1313 (init_data->no_output_qs && !init_data->output_handler))
1316 if (!init_data->input_sbal_addr_array ||
1317 !init_data->output_sbal_addr_array)
1320 mutex_lock(&irq_ptr->setup_mutex);
1321 qdio_trace_init_data(irq_ptr, init_data);
1322 qdio_setup_irq(irq_ptr, init_data);
1324 rc = qdio_establish_thinint(irq_ptr);
1326 qdio_shutdown_irq(irq_ptr);
1327 mutex_unlock(&irq_ptr->setup_mutex);
1332 irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
1333 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1334 irq_ptr->ccw.count = irq_ptr->equeue.count;
1335 irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
1337 spin_lock_irq(get_ccwdev_lock(cdev));
1338 ccw_device_set_options_mask(cdev, 0);
1340 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1341 spin_unlock_irq(get_ccwdev_lock(cdev));
1343 DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
1344 DBF_ERROR("rc:%4x", rc);
1345 qdio_shutdown_thinint(irq_ptr);
1346 qdio_shutdown_irq(irq_ptr);
1347 mutex_unlock(&irq_ptr->setup_mutex);
1351 wait_event_interruptible_timeout(cdev->private->wait_q,
1352 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1353 irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1355 if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1356 mutex_unlock(&irq_ptr->setup_mutex);
1357 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1361 qdio_setup_ssqd_info(irq_ptr);
1363 qdio_detect_hsicq(irq_ptr);
1365 /* qebsm is now setup if available, initialize buffer states */
1366 qdio_init_buf_states(irq_ptr);
1368 mutex_unlock(&irq_ptr->setup_mutex);
1369 qdio_print_subchannel_info(irq_ptr);
1370 qdio_setup_debug_entries(irq_ptr);
1373 EXPORT_SYMBOL_GPL(qdio_establish);
1376 * qdio_activate - activate queues on a qdio subchannel
1377 * @cdev: associated cdev
1379 int qdio_activate(struct ccw_device *cdev)
1381 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1382 struct subchannel_id schid;
1385 ccw_device_get_schid(cdev, &schid);
1386 DBF_EVENT("qactivate:%4x", schid.sch_no);
1391 mutex_lock(&irq_ptr->setup_mutex);
1392 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1397 irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
1398 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1399 irq_ptr->ccw.count = irq_ptr->aqueue.count;
1400 irq_ptr->ccw.cda = 0;
1402 spin_lock_irq(get_ccwdev_lock(cdev));
1403 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1405 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1406 0, DOIO_DENY_PREFETCH);
1407 spin_unlock_irq(get_ccwdev_lock(cdev));
1409 DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
1410 DBF_ERROR("rc:%4x", rc);
1414 if (is_thinint_irq(irq_ptr))
1415 tiqdio_add_device(irq_ptr);
1417 /* wait for subchannel to become active */
1420 switch (irq_ptr->state) {
1421 case QDIO_IRQ_STATE_STOPPED:
1422 case QDIO_IRQ_STATE_ERR:
1426 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1430 mutex_unlock(&irq_ptr->setup_mutex);
1433 EXPORT_SYMBOL_GPL(qdio_activate);
1436 * handle_inbound - reset processed input buffers
1437 * @q: queue containing the buffers
1439 * @bufnr: first buffer to process
1440 * @count: how many buffers are emptied
1442 static int handle_inbound(struct qdio_q *q, unsigned int callflags,
1443 int bufnr, int count)
1447 qperf_inc(q, inbound_call);
1449 /* If any ACKed SBALs are returned to HW, adjust ACK tracking: */
1450 overlap = min(count - sub_buf(q->u.in.ack_start, bufnr),
1453 q->u.in.ack_start = add_buf(q->u.in.ack_start, overlap);
1454 q->u.in.ack_count -= overlap;
1457 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
1458 atomic_add(count, &q->nr_buf_used);
1460 if (need_siga_in(q))
1461 return qdio_siga_input(q);
1467 * handle_outbound - process filled outbound buffers
1468 * @q: queue containing the buffers
1470 * @bufnr: first buffer to process
1471 * @count: how many buffers are filled
1473 static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1474 unsigned int bufnr, unsigned int count)
1476 const unsigned int scan_threshold = q->irq_ptr->scan_threshold;
1477 unsigned char state = 0;
1480 qperf_inc(q, outbound_call);
1482 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1483 used = atomic_add_return(count, &q->nr_buf_used);
1485 if (used == QDIO_MAX_BUFFERS_PER_Q)
1486 qperf_inc(q, outbound_queue_full);
1488 if (callflags & QDIO_FLAG_PCI_OUT) {
1489 q->u.out.pci_out_enabled = 1;
1490 qperf_inc(q, pci_request_int);
1492 q->u.out.pci_out_enabled = 0;
1494 if (queue_type(q) == QDIO_IQDIO_QFMT) {
1495 unsigned long phys_aob = 0;
1497 if (q->u.out.use_cq && count == 1)
1498 phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
1500 rc = qdio_kick_outbound_q(q, count, phys_aob);
1501 } else if (need_siga_sync(q)) {
1502 rc = qdio_siga_sync_q(q);
1503 } else if (count < QDIO_MAX_BUFFERS_PER_Q &&
1504 get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 &&
1505 state == SLSB_CU_OUTPUT_PRIMED) {
1506 /* The previous buffer is not processed yet, tack on. */
1507 qperf_inc(q, fast_requeue);
1509 rc = qdio_kick_outbound_q(q, count, 0);
1512 /* Let drivers implement their own completion scanning: */
1513 if (!scan_threshold)
1516 /* in case of SIGA errors we must process the error immediately */
1517 if (used >= scan_threshold || rc)
1518 qdio_tasklet_schedule(q);
1520 /* free the SBALs in case of no further traffic */
1521 if (!timer_pending(&q->u.out.timer) &&
1522 likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
1523 mod_timer(&q->u.out.timer, jiffies + HZ);
1528 * do_QDIO - process input or output buffers
1529 * @cdev: associated ccw_device for the qdio subchannel
1530 * @callflags: input or output and special flags from the program
1531 * @q_nr: queue number
1532 * @bufnr: buffer number
1533 * @count: how many buffers to process
1535 int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1536 int q_nr, unsigned int bufnr, unsigned int count)
1538 struct qdio_irq *irq_ptr;
1540 if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
1543 irq_ptr = cdev->private->qdio_data;
1547 DBF_DEV_EVENT(DBF_INFO, irq_ptr,
1548 "do%02x b:%02x c:%02x", callflags, bufnr, count);
1550 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1554 if (callflags & QDIO_FLAG_SYNC_INPUT)
1555 return handle_inbound(irq_ptr->input_qs[q_nr],
1556 callflags, bufnr, count);
1557 else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
1558 return handle_outbound(irq_ptr->output_qs[q_nr],
1559 callflags, bufnr, count);
1562 EXPORT_SYMBOL_GPL(do_QDIO);
1565 * qdio_start_irq - enable interrupt processing for the device
1566 * @cdev: associated ccw_device for the qdio subchannel
1570 * 1 - irqs not started since new data is available
1572 int qdio_start_irq(struct ccw_device *cdev)
1575 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1581 for_each_input_queue(irq_ptr, q, i)
1582 qdio_stop_polling(q);
1584 clear_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state);
1587 * We need to check again to not lose initiative after
1588 * resetting the ACK state.
1590 if (test_nonshared_ind(irq_ptr))
1593 for_each_input_queue(irq_ptr, q, i) {
1594 if (!qdio_inbound_q_done(q, q->first_to_check))
1601 if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
1607 EXPORT_SYMBOL(qdio_start_irq);
1609 static int __qdio_inspect_queue(struct qdio_q *q, unsigned int *bufnr,
1610 unsigned int *error)
1612 unsigned int start = q->first_to_check;
1615 count = q->is_input_q ? qdio_inbound_q_moved(q, start) :
1616 qdio_outbound_q_moved(q, start);
1621 *error = q->qdio_error;
1623 /* for the next time */
1624 q->first_to_check = add_buf(start, count);
1630 int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr, bool is_input,
1631 unsigned int *bufnr, unsigned int *error)
1633 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1638 q = is_input ? irq_ptr->input_qs[nr] : irq_ptr->output_qs[nr];
1640 if (need_siga_sync(q))
1641 qdio_siga_sync_q(q);
1643 return __qdio_inspect_queue(q, bufnr, error);
1645 EXPORT_SYMBOL_GPL(qdio_inspect_queue);
1648 * qdio_get_next_buffers - process input buffers
1649 * @cdev: associated ccw_device for the qdio subchannel
1650 * @nr: input queue number
1651 * @bufnr: first filled buffer number
1652 * @error: buffers are in error state
1656 * = 0 - no new buffers found
1657 * > 0 - number of processed buffers
1659 int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
1663 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1667 q = irq_ptr->input_qs[nr];
1670 * Cannot rely on automatic sync after interrupt since queues may
1671 * also be examined without interrupt.
1673 if (need_siga_sync(q))
1674 qdio_sync_queues(q);
1676 qdio_check_outbound_pci_queues(irq_ptr);
1678 /* Note: upper-layer MUST stop processing immediately here ... */
1679 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
1682 return __qdio_inspect_queue(q, bufnr, error);
1684 EXPORT_SYMBOL(qdio_get_next_buffers);
1687 * qdio_stop_irq - disable interrupt processing for the device
1688 * @cdev: associated ccw_device for the qdio subchannel
1691 * 0 - interrupts were already disabled
1692 * 1 - interrupts successfully disabled
1694 int qdio_stop_irq(struct ccw_device *cdev)
1696 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1701 if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
1706 EXPORT_SYMBOL(qdio_stop_irq);
1708 static int __init init_QDIO(void)
1712 rc = qdio_debug_init();
1715 rc = qdio_setup_init();
1718 rc = qdio_thinint_init();
1730 static void __exit exit_QDIO(void)
1732 qdio_thinint_exit();
1737 module_init(init_QDIO);
1738 module_exit(exit_QDIO);