1 // SPDX-License-Identifier: GPL-2.0
3 * Linux for s390 qdio support, buffer handling, qdio API and module support.
5 * Copyright IBM Corp. 2000, 2008
6 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
7 * Jan Glauber <jang@linux.vnet.ibm.com>
8 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/timer.h>
14 #include <linux/delay.h>
15 #include <linux/gfp.h>
17 #include <linux/atomic.h>
18 #include <asm/debug.h>
26 #include "qdio_debug.h"
28 MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
29 "Jan Glauber <jang@linux.vnet.ibm.com>");
30 MODULE_DESCRIPTION("QDIO base support");
31 MODULE_LICENSE("GPL");
33 static inline int do_siga_sync(unsigned long schid,
34 unsigned int out_mask, unsigned int in_mask,
37 register unsigned long __fc asm ("0") = fc;
38 register unsigned long __schid asm ("1") = schid;
39 register unsigned long out asm ("2") = out_mask;
40 register unsigned long in asm ("3") = in_mask;
48 : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
52 static inline int do_siga_input(unsigned long schid, unsigned int mask,
55 register unsigned long __fc asm ("0") = fc;
56 register unsigned long __schid asm ("1") = schid;
57 register unsigned long __mask asm ("2") = mask;
65 : "d" (__fc), "d" (__schid), "d" (__mask) : "cc");
70 * do_siga_output - perform SIGA-w/wt function
71 * @schid: subchannel id or in case of QEBSM the subchannel token
72 * @mask: which output queues to process
73 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
74 * @fc: function code to perform
75 * @aob: asynchronous operation block
77 * Returns condition code.
78 * Note: For IQDC unicast queues only the highest priority queue is processed.
80 static inline int do_siga_output(unsigned long schid, unsigned long mask,
81 unsigned int *bb, unsigned int fc,
84 register unsigned long __fc asm("0") = fc;
85 register unsigned long __schid asm("1") = schid;
86 register unsigned long __mask asm("2") = mask;
87 register unsigned long __aob asm("3") = aob;
94 : "=d" (cc), "+d" (__fc), "+d" (__aob)
95 : "d" (__schid), "d" (__mask)
102 * qdio_do_eqbs - extract buffer states for QEBSM
103 * @q: queue to manipulate
104 * @state: state of the extracted buffers
105 * @start: buffer number to start at
106 * @count: count of buffers to examine
107 * @auto_ack: automatically acknowledge buffers
109 * Returns the number of successfully extracted equal buffer states.
110 * Stops processing if a state is different from the last buffers state.
112 static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
113 int start, int count, int auto_ack)
115 int tmp_count = count, tmp_start = start, nr = q->nr;
116 unsigned int ccq = 0;
121 nr += q->irq_ptr->nr_input_qs;
123 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
129 /* all done, or next buffer state different */
130 return count - tmp_count;
132 /* not all buffers processed */
133 qperf_inc(q, eqbs_partial);
134 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "EQBS part:%02x",
136 return count - tmp_count;
138 /* no buffer processed */
139 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
142 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
143 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
144 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
145 q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE, q->nr,
146 q->first_to_check, count, q->irq_ptr->int_parm);
152 * qdio_do_sqbs - set buffer states for QEBSM
153 * @q: queue to manipulate
154 * @state: new state of the buffers
155 * @start: first buffer number to change
156 * @count: how many buffers to change
158 * Returns the number of successfully changed buffers.
159 * Does retrying until the specified count of buffer states is set or an
162 static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
165 unsigned int ccq = 0;
166 int tmp_count = count, tmp_start = start;
174 nr += q->irq_ptr->nr_input_qs;
176 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
181 /* all done, or active buffer adapter-owned */
182 WARN_ON_ONCE(tmp_count);
183 return count - tmp_count;
185 /* not all buffers processed */
186 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
187 qperf_inc(q, sqbs_partial);
190 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
191 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
192 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
193 q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE, q->nr,
194 q->first_to_check, count, q->irq_ptr->int_parm);
200 * Returns number of examined buffers and their common state in *state.
201 * Requested number of buffers-to-examine must be > 0.
203 static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
204 unsigned char *state, unsigned int count,
207 unsigned char __state = 0;
211 return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
213 /* get initial state: */
214 __state = q->slsb.val[bufnr];
216 /* Bail out early if there is no work on the queue: */
217 if (__state & SLSB_OWNER_CU)
220 for (; i < count; i++) {
221 bufnr = next_buf(bufnr);
223 /* stop if next state differs from initial state: */
224 if (q->slsb.val[bufnr] != __state)
233 static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
234 unsigned char *state, int auto_ack)
236 return get_buf_states(q, bufnr, state, 1, auto_ack);
239 /* wrap-around safe setting of slsb states, returns number of changed buffers */
240 static inline int set_buf_states(struct qdio_q *q, int bufnr,
241 unsigned char state, int count)
246 return qdio_do_sqbs(q, state, bufnr, count);
248 /* Ensure that all preceding changes to the SBALs are visible: */
251 for (i = 0; i < count; i++) {
252 WRITE_ONCE(q->slsb.val[bufnr], state);
253 bufnr = next_buf(bufnr);
256 /* Make our SLSB changes visible: */
262 static inline int set_buf_state(struct qdio_q *q, int bufnr,
265 return set_buf_states(q, bufnr, state, 1);
268 /* set slsb states to initial state */
269 static void qdio_init_buf_states(struct qdio_irq *irq_ptr)
274 for_each_input_queue(irq_ptr, q, i)
275 set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
276 QDIO_MAX_BUFFERS_PER_Q);
277 for_each_output_queue(irq_ptr, q, i)
278 set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
279 QDIO_MAX_BUFFERS_PER_Q);
282 static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
285 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
286 unsigned int fc = QDIO_SIGA_SYNC;
289 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
290 qperf_inc(q, siga_sync);
293 schid = q->irq_ptr->sch_token;
294 fc |= QDIO_SIGA_QEBSM_FLAG;
297 cc = do_siga_sync(schid, output, input, fc);
299 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
300 return (cc) ? -EIO : 0;
303 static inline int qdio_siga_sync_q(struct qdio_q *q)
306 return qdio_siga_sync(q, 0, q->mask);
308 return qdio_siga_sync(q, q->mask, 0);
311 static int qdio_siga_output(struct qdio_q *q, unsigned int count,
312 unsigned int *busy_bit, unsigned long aob)
314 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
315 unsigned int fc = QDIO_SIGA_WRITE;
319 if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q)) {
321 fc = QDIO_SIGA_WRITEM;
323 fc = QDIO_SIGA_WRITEQ;
327 schid = q->irq_ptr->sch_token;
328 fc |= QDIO_SIGA_QEBSM_FLAG;
331 cc = do_siga_output(schid, q->mask, busy_bit, fc, aob);
333 /* hipersocket busy condition */
334 if (unlikely(*busy_bit)) {
338 start_time = get_tod_clock_fast();
341 if (get_tod_clock_fast() - start_time < QDIO_BUSY_BIT_PATIENCE)
345 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr,
346 "%4x cc2 BB1:%1d", SCH_NO(q), q->nr);
347 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries);
352 static inline int qdio_siga_input(struct qdio_q *q)
354 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
355 unsigned int fc = QDIO_SIGA_READ;
358 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
359 qperf_inc(q, siga_read);
362 schid = q->irq_ptr->sch_token;
363 fc |= QDIO_SIGA_QEBSM_FLAG;
366 cc = do_siga_input(schid, q->mask, fc);
368 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
369 return (cc) ? -EIO : 0;
372 #define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
373 #define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
375 static inline void qdio_sync_queues(struct qdio_q *q)
377 /* PCI capable outbound queues will also be scanned so sync them too */
378 if (pci_out_supported(q->irq_ptr))
379 qdio_siga_sync_all(q);
384 int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
385 unsigned char *state)
387 if (need_siga_sync(q))
389 return get_buf_state(q, bufnr, state, 0);
392 static inline void qdio_stop_polling(struct qdio_q *q)
394 if (!q->u.in.batch_count)
397 qperf_inc(q, stop_polling);
399 /* show the card that we are not polling anymore */
400 set_buf_states(q, q->u.in.batch_start, SLSB_P_INPUT_NOT_INIT,
401 q->u.in.batch_count);
402 q->u.in.batch_count = 0;
405 static inline void account_sbals(struct qdio_q *q, unsigned int count)
407 q->q_stats.nr_sbal_total += count;
408 q->q_stats.nr_sbals[ilog2(count)]++;
411 static void process_buffer_error(struct qdio_q *q, unsigned int start,
414 /* special handling for no target buffer empty */
415 if (queue_type(q) == QDIO_IQDIO_QFMT && !q->is_input_q &&
416 q->sbal[start]->element[15].sflags == 0x10) {
417 qperf_inc(q, target_full);
418 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", start);
422 DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
423 DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
424 DBF_ERROR("FTC:%3d C:%3d", start, count);
425 DBF_ERROR("F14:%2x F15:%2x",
426 q->sbal[start]->element[14].sflags,
427 q->sbal[start]->element[15].sflags);
430 static inline void inbound_handle_work(struct qdio_q *q, unsigned int start,
431 int count, bool auto_ack)
433 /* ACK the newest SBAL: */
435 set_buf_state(q, add_buf(start, count - 1), SLSB_P_INPUT_ACK);
437 if (!q->u.in.batch_count)
438 q->u.in.batch_start = start;
439 q->u.in.batch_count += count;
442 static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start,
445 unsigned char state = 0;
448 q->timestamp = get_tod_clock_fast();
450 count = atomic_read(&q->nr_buf_used);
455 * No siga sync here, as a PCI or we after a thin interrupt
456 * already sync'ed the queues.
458 count = get_buf_states(q, start, &state, count, 1);
463 case SLSB_P_INPUT_PRIMED:
464 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr,
467 inbound_handle_work(q, start, count, is_qebsm(q));
468 if (atomic_sub_return(count, &q->nr_buf_used) == 0)
469 qperf_inc(q, inbound_queue_full);
470 if (q->irq_ptr->perf_stat_enabled)
471 account_sbals(q, count);
473 case SLSB_P_INPUT_ERROR:
474 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in err:%1d %02x", q->nr,
477 *error = QDIO_ERROR_SLSB_STATE;
478 process_buffer_error(q, start, count);
479 inbound_handle_work(q, start, count, false);
480 if (atomic_sub_return(count, &q->nr_buf_used) == 0)
481 qperf_inc(q, inbound_queue_full);
482 if (q->irq_ptr->perf_stat_enabled)
483 account_sbals_error(q, count);
485 case SLSB_CU_INPUT_EMPTY:
486 if (q->irq_ptr->perf_stat_enabled)
487 q->q_stats.nr_sbal_nop++;
488 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop:%1d %#02x",
491 case SLSB_P_INPUT_NOT_INIT:
492 case SLSB_P_INPUT_ACK:
493 /* We should never see this state, throw a WARN: */
495 dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1,
496 "found state %#x at index %u on queue %u\n",
497 state, start, q->nr);
502 static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start)
504 unsigned char state = 0;
506 if (!atomic_read(&q->nr_buf_used))
509 if (need_siga_sync(q))
511 get_buf_state(q, start, &state, 0);
513 if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
514 /* more work coming */
520 static inline int qdio_tasklet_schedule(struct qdio_q *q)
522 if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) {
523 tasklet_schedule(&q->u.out.tasklet);
529 static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start,
532 unsigned char state = 0;
535 q->timestamp = get_tod_clock_fast();
537 if (need_siga_sync(q))
538 if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
539 !pci_out_supported(q->irq_ptr)) ||
540 (queue_type(q) == QDIO_IQDIO_QFMT &&
541 multicast_outbound(q)))
544 count = atomic_read(&q->nr_buf_used);
548 count = get_buf_states(q, start, &state, count, 0);
553 case SLSB_P_OUTPUT_PENDING:
554 *error = QDIO_ERROR_SLSB_PENDING;
556 case SLSB_P_OUTPUT_EMPTY:
557 /* the adapter got it */
558 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
559 "out empty:%1d %02x", q->nr, count);
561 atomic_sub(count, &q->nr_buf_used);
562 if (q->irq_ptr->perf_stat_enabled)
563 account_sbals(q, count);
565 case SLSB_P_OUTPUT_ERROR:
566 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out error:%1d %02x",
569 *error = QDIO_ERROR_SLSB_STATE;
570 process_buffer_error(q, start, count);
571 atomic_sub(count, &q->nr_buf_used);
572 if (q->irq_ptr->perf_stat_enabled)
573 account_sbals_error(q, count);
575 case SLSB_CU_OUTPUT_PRIMED:
576 /* the adapter has not fetched the output yet */
577 if (q->irq_ptr->perf_stat_enabled)
578 q->q_stats.nr_sbal_nop++;
579 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
582 case SLSB_P_OUTPUT_HALTED:
584 case SLSB_P_OUTPUT_NOT_INIT:
585 /* We should never see this state, throw a WARN: */
587 dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1,
588 "found state %#x at index %u on queue %u\n",
589 state, start, q->nr);
594 /* all buffers processed? */
595 static inline int qdio_outbound_q_done(struct qdio_q *q)
597 return atomic_read(&q->nr_buf_used) == 0;
600 static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count,
604 unsigned int busy_bit;
606 if (!need_siga_out(q))
609 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
611 qperf_inc(q, siga_write);
613 cc = qdio_siga_output(q, count, &busy_bit, aob);
619 while (++retries < QDIO_BUSY_BIT_RETRIES) {
620 mdelay(QDIO_BUSY_BIT_RETRY_DELAY);
623 DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
626 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
632 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
637 DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr);
638 DBF_ERROR("count:%u", retries);
643 void qdio_outbound_tasklet(struct tasklet_struct *t)
645 struct qdio_output_q *out_q = from_tasklet(out_q, t, tasklet);
646 struct qdio_q *q = container_of(out_q, struct qdio_q, u.out);
647 unsigned int start = q->first_to_check;
648 unsigned int error = 0;
651 qperf_inc(q, tasklet_outbound);
652 WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0);
654 count = get_outbound_buffer_frontier(q, start, &error);
656 q->first_to_check = add_buf(start, count);
658 if (q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE) {
659 qperf_inc(q, outbound_handler);
660 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
663 q->handler(q->irq_ptr->cdev, error, q->nr, start,
664 count, q->irq_ptr->int_parm);
668 if (queue_type(q) == QDIO_ZFCP_QFMT && !pci_out_supported(q->irq_ptr) &&
669 !qdio_outbound_q_done(q))
672 if (q->u.out.pci_out_enabled)
676 * Now we know that queue type is either qeth without pci enabled
677 * or HiperSockets. Make sure buffer switch from PRIMED to EMPTY
678 * is noticed and outbound_handler is called after some time.
680 if (qdio_outbound_q_done(q))
681 del_timer_sync(&q->u.out.timer);
683 if (!timer_pending(&q->u.out.timer) &&
684 likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
685 mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
689 qdio_tasklet_schedule(q);
692 void qdio_outbound_timer(struct timer_list *t)
694 struct qdio_q *q = from_timer(q, t, u.out.timer);
696 qdio_tasklet_schedule(q);
699 static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq)
704 if (!pci_out_supported(irq) || !irq->scan_threshold)
707 for_each_output_queue(irq, out, i)
708 if (!qdio_outbound_q_done(out))
709 qdio_tasklet_schedule(out);
712 static inline void qdio_set_state(struct qdio_irq *irq_ptr,
713 enum qdio_irq_states state)
715 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
717 irq_ptr->state = state;
721 static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
723 if (irb->esw.esw0.erw.cons) {
724 DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
725 DBF_ERROR_HEX(irb, 64);
726 DBF_ERROR_HEX(irb->ecw, 64);
730 /* PCI interrupt handler */
731 static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
736 if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
739 qdio_deliver_irq(irq_ptr);
740 irq_ptr->last_data_irq_time = S390_lowcore.int_clock;
742 if (!pci_out_supported(irq_ptr) || !irq_ptr->scan_threshold)
745 for_each_output_queue(irq_ptr, q, i) {
746 if (qdio_outbound_q_done(q))
748 if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
750 qdio_tasklet_schedule(q);
754 static void qdio_handle_activate_check(struct qdio_irq *irq_ptr,
755 unsigned long intparm, int cstat,
760 DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
761 DBF_ERROR("intp :%lx", intparm);
762 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
764 if (irq_ptr->nr_input_qs) {
765 q = irq_ptr->input_qs[0];
766 } else if (irq_ptr->nr_output_qs) {
767 q = irq_ptr->output_qs[0];
773 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE,
774 q->nr, q->first_to_check, 0, irq_ptr->int_parm);
776 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
778 * In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen.
779 * Therefore we call the LGR detection function here.
784 static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
787 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
791 if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
793 if (!(dstat & DEV_STAT_DEV_END))
795 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
799 DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
800 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
801 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
804 /* qdio interrupt handler */
805 void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
808 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
809 struct subchannel_id schid;
812 if (!intparm || !irq_ptr) {
813 ccw_device_get_schid(cdev, &schid);
814 DBF_ERROR("qint:%4x", schid.sch_no);
818 if (irq_ptr->perf_stat_enabled)
819 irq_ptr->perf_stat.qdio_int++;
822 DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
823 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
824 wake_up(&cdev->private->wait_q);
827 qdio_irq_check_sense(irq_ptr, irb);
828 cstat = irb->scsw.cmd.cstat;
829 dstat = irb->scsw.cmd.dstat;
831 switch (irq_ptr->state) {
832 case QDIO_IRQ_STATE_INACTIVE:
833 qdio_establish_handle_irq(irq_ptr, cstat, dstat);
835 case QDIO_IRQ_STATE_CLEANUP:
836 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
838 case QDIO_IRQ_STATE_ESTABLISHED:
839 case QDIO_IRQ_STATE_ACTIVE:
840 if (cstat & SCHN_STAT_PCI) {
841 qdio_int_handler_pci(irq_ptr);
845 qdio_handle_activate_check(irq_ptr, intparm, cstat,
848 case QDIO_IRQ_STATE_STOPPED:
853 wake_up(&cdev->private->wait_q);
857 * qdio_get_ssqd_desc - get qdio subchannel description
858 * @cdev: ccw device to get description for
859 * @data: where to store the ssqd
861 * Returns 0 or an error code. The results of the chsc are stored in the
862 * specified structure.
864 int qdio_get_ssqd_desc(struct ccw_device *cdev,
865 struct qdio_ssqd_desc *data)
867 struct subchannel_id schid;
869 if (!cdev || !cdev->private)
872 ccw_device_get_schid(cdev, &schid);
873 DBF_EVENT("get ssqd:%4x", schid.sch_no);
874 return qdio_setup_get_ssqd(NULL, &schid, data);
876 EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
878 static void qdio_shutdown_queues(struct qdio_irq *irq_ptr)
883 for_each_output_queue(irq_ptr, q, i) {
884 del_timer_sync(&q->u.out.timer);
885 tasklet_kill(&q->u.out.tasklet);
890 * qdio_shutdown - shut down a qdio subchannel
891 * @cdev: associated ccw device
892 * @how: use halt or clear to shutdown
894 int qdio_shutdown(struct ccw_device *cdev, int how)
896 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
897 struct subchannel_id schid;
903 WARN_ON_ONCE(irqs_disabled());
904 ccw_device_get_schid(cdev, &schid);
905 DBF_EVENT("qshutdown:%4x", schid.sch_no);
907 mutex_lock(&irq_ptr->setup_mutex);
909 * Subchannel was already shot down. We cannot prevent being called
910 * twice since cio may trigger a shutdown asynchronously.
912 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
913 mutex_unlock(&irq_ptr->setup_mutex);
918 * Indicate that the device is going down. Scheduling the queue
919 * tasklets is forbidden from here on.
921 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
923 qdio_shutdown_queues(irq_ptr);
924 qdio_shutdown_debug_entries(irq_ptr);
926 /* cleanup subchannel */
927 spin_lock_irq(get_ccwdev_lock(cdev));
928 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
929 if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
930 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
932 /* default behaviour is halt */
933 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
934 spin_unlock_irq(get_ccwdev_lock(cdev));
936 DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
937 DBF_ERROR("rc:%4d", rc);
941 wait_event_interruptible_timeout(cdev->private->wait_q,
942 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
943 irq_ptr->state == QDIO_IRQ_STATE_ERR,
947 qdio_shutdown_thinint(irq_ptr);
948 qdio_shutdown_irq(irq_ptr);
950 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
951 mutex_unlock(&irq_ptr->setup_mutex);
956 EXPORT_SYMBOL_GPL(qdio_shutdown);
959 * qdio_free - free data structures for a qdio subchannel
960 * @cdev: associated ccw device
962 int qdio_free(struct ccw_device *cdev)
964 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
965 struct subchannel_id schid;
970 ccw_device_get_schid(cdev, &schid);
971 DBF_EVENT("qfree:%4x", schid.sch_no);
972 DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned");
973 mutex_lock(&irq_ptr->setup_mutex);
975 irq_ptr->debug_area = NULL;
976 cdev->private->qdio_data = NULL;
977 mutex_unlock(&irq_ptr->setup_mutex);
979 qdio_free_queues(irq_ptr);
980 free_page((unsigned long) irq_ptr->qdr);
981 free_page(irq_ptr->chsc_page);
982 free_page((unsigned long) irq_ptr);
985 EXPORT_SYMBOL_GPL(qdio_free);
988 * qdio_allocate - allocate qdio queues and associated data
989 * @cdev: associated ccw device
990 * @no_input_qs: allocate this number of Input Queues
991 * @no_output_qs: allocate this number of Output Queues
993 int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs,
994 unsigned int no_output_qs)
996 struct subchannel_id schid;
997 struct qdio_irq *irq_ptr;
1000 ccw_device_get_schid(cdev, &schid);
1001 DBF_EVENT("qallocate:%4x", schid.sch_no);
1003 if (no_input_qs > QDIO_MAX_QUEUES_PER_IRQ ||
1004 no_output_qs > QDIO_MAX_QUEUES_PER_IRQ)
1007 /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1008 irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1012 irq_ptr->cdev = cdev;
1013 mutex_init(&irq_ptr->setup_mutex);
1014 if (qdio_allocate_dbf(irq_ptr))
1017 DBF_DEV_EVENT(DBF_ERR, irq_ptr, "alloc niq:%1u noq:%1u", no_input_qs,
1021 * Allocate a page for the chsc calls in qdio_establish.
1022 * Must be pre-allocated since a zfcp recovery will call
1023 * qdio_establish. In case of low memory and swap on a zfcp disk
1024 * we may not be able to allocate memory otherwise.
1026 irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
1027 if (!irq_ptr->chsc_page)
1030 /* qdr is used in ccw1.cda which is u32 */
1031 irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1035 rc = qdio_allocate_qs(irq_ptr, no_input_qs, no_output_qs);
1039 cdev->private->qdio_data = irq_ptr;
1040 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1044 free_page((unsigned long) irq_ptr->qdr);
1046 free_page(irq_ptr->chsc_page);
1049 free_page((unsigned long) irq_ptr);
1052 EXPORT_SYMBOL_GPL(qdio_allocate);
1054 static void qdio_trace_init_data(struct qdio_irq *irq,
1055 struct qdio_initialize *data)
1057 DBF_DEV_EVENT(DBF_ERR, irq, "qfmt:%1u", data->q_format);
1058 DBF_DEV_EVENT(DBF_ERR, irq, "qpff%4x", data->qib_param_field_format);
1059 DBF_DEV_HEX(irq, &data->qib_param_field, sizeof(void *), DBF_ERR);
1060 DBF_DEV_HEX(irq, &data->input_slib_elements, sizeof(void *), DBF_ERR);
1061 DBF_DEV_HEX(irq, &data->output_slib_elements, sizeof(void *), DBF_ERR);
1062 DBF_DEV_EVENT(DBF_ERR, irq, "niq:%1u noq:%1u", data->no_input_qs,
1063 data->no_output_qs);
1064 DBF_DEV_HEX(irq, &data->input_handler, sizeof(void *), DBF_ERR);
1065 DBF_DEV_HEX(irq, &data->output_handler, sizeof(void *), DBF_ERR);
1066 DBF_DEV_HEX(irq, &data->int_parm, sizeof(long), DBF_ERR);
1067 DBF_DEV_HEX(irq, &data->input_sbal_addr_array, sizeof(void *), DBF_ERR);
1068 DBF_DEV_HEX(irq, &data->output_sbal_addr_array, sizeof(void *),
1073 * qdio_establish - establish queues on a qdio subchannel
1074 * @cdev: associated ccw device
1075 * @init_data: initialization data
1077 int qdio_establish(struct ccw_device *cdev,
1078 struct qdio_initialize *init_data)
1080 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1081 struct subchannel_id schid;
1084 ccw_device_get_schid(cdev, &schid);
1085 DBF_EVENT("qestablish:%4x", schid.sch_no);
1090 if (init_data->no_input_qs > irq_ptr->max_input_qs ||
1091 init_data->no_output_qs > irq_ptr->max_output_qs)
1094 if ((init_data->no_input_qs && !init_data->input_handler) ||
1095 (init_data->no_output_qs && !init_data->output_handler))
1098 if (!init_data->input_sbal_addr_array ||
1099 !init_data->output_sbal_addr_array)
1102 if (!init_data->irq_poll)
1105 mutex_lock(&irq_ptr->setup_mutex);
1106 qdio_trace_init_data(irq_ptr, init_data);
1107 qdio_setup_irq(irq_ptr, init_data);
1109 rc = qdio_establish_thinint(irq_ptr);
1111 qdio_shutdown_irq(irq_ptr);
1112 mutex_unlock(&irq_ptr->setup_mutex);
1117 irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
1118 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1119 irq_ptr->ccw.count = irq_ptr->equeue.count;
1120 irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
1122 spin_lock_irq(get_ccwdev_lock(cdev));
1123 ccw_device_set_options_mask(cdev, 0);
1125 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1126 spin_unlock_irq(get_ccwdev_lock(cdev));
1128 DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
1129 DBF_ERROR("rc:%4x", rc);
1130 qdio_shutdown_thinint(irq_ptr);
1131 qdio_shutdown_irq(irq_ptr);
1132 mutex_unlock(&irq_ptr->setup_mutex);
1136 wait_event_interruptible_timeout(cdev->private->wait_q,
1137 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1138 irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1140 if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1141 mutex_unlock(&irq_ptr->setup_mutex);
1142 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1146 qdio_setup_ssqd_info(irq_ptr);
1148 /* qebsm is now setup if available, initialize buffer states */
1149 qdio_init_buf_states(irq_ptr);
1151 mutex_unlock(&irq_ptr->setup_mutex);
1152 qdio_print_subchannel_info(irq_ptr);
1153 qdio_setup_debug_entries(irq_ptr);
1156 EXPORT_SYMBOL_GPL(qdio_establish);
1159 * qdio_activate - activate queues on a qdio subchannel
1160 * @cdev: associated cdev
1162 int qdio_activate(struct ccw_device *cdev)
1164 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1165 struct subchannel_id schid;
1168 ccw_device_get_schid(cdev, &schid);
1169 DBF_EVENT("qactivate:%4x", schid.sch_no);
1174 mutex_lock(&irq_ptr->setup_mutex);
1175 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1180 irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
1181 irq_ptr->ccw.flags = CCW_FLAG_SLI;
1182 irq_ptr->ccw.count = irq_ptr->aqueue.count;
1183 irq_ptr->ccw.cda = 0;
1185 spin_lock_irq(get_ccwdev_lock(cdev));
1186 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1188 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1189 0, DOIO_DENY_PREFETCH);
1190 spin_unlock_irq(get_ccwdev_lock(cdev));
1192 DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
1193 DBF_ERROR("rc:%4x", rc);
1197 /* wait for subchannel to become active */
1200 switch (irq_ptr->state) {
1201 case QDIO_IRQ_STATE_STOPPED:
1202 case QDIO_IRQ_STATE_ERR:
1206 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1210 mutex_unlock(&irq_ptr->setup_mutex);
1213 EXPORT_SYMBOL_GPL(qdio_activate);
1216 * handle_inbound - reset processed input buffers
1217 * @q: queue containing the buffers
1219 * @bufnr: first buffer to process
1220 * @count: how many buffers are emptied
1222 static int handle_inbound(struct qdio_q *q, unsigned int callflags,
1223 int bufnr, int count)
1227 qperf_inc(q, inbound_call);
1229 /* If any processed SBALs are returned to HW, adjust our tracking: */
1230 overlap = min_t(int, count - sub_buf(q->u.in.batch_start, bufnr),
1231 q->u.in.batch_count);
1233 q->u.in.batch_start = add_buf(q->u.in.batch_start, overlap);
1234 q->u.in.batch_count -= overlap;
1237 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
1238 atomic_add(count, &q->nr_buf_used);
1240 if (need_siga_in(q))
1241 return qdio_siga_input(q);
1247 * handle_outbound - process filled outbound buffers
1248 * @q: queue containing the buffers
1250 * @bufnr: first buffer to process
1251 * @count: how many buffers are filled
1252 * @aob: asynchronous operation block
1254 static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1255 unsigned int bufnr, unsigned int count,
1258 const unsigned int scan_threshold = q->irq_ptr->scan_threshold;
1259 unsigned char state = 0;
1262 qperf_inc(q, outbound_call);
1264 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1265 used = atomic_add_return(count, &q->nr_buf_used);
1267 if (used == QDIO_MAX_BUFFERS_PER_Q)
1268 qperf_inc(q, outbound_queue_full);
1270 if (callflags & QDIO_FLAG_PCI_OUT) {
1271 q->u.out.pci_out_enabled = 1;
1272 qperf_inc(q, pci_request_int);
1274 q->u.out.pci_out_enabled = 0;
1276 if (queue_type(q) == QDIO_IQDIO_QFMT) {
1277 unsigned long phys_aob = aob ? virt_to_phys(aob) : 0;
1279 WARN_ON_ONCE(!IS_ALIGNED(phys_aob, 256));
1280 rc = qdio_kick_outbound_q(q, count, phys_aob);
1281 } else if (need_siga_sync(q)) {
1282 rc = qdio_siga_sync_q(q);
1283 } else if (count < QDIO_MAX_BUFFERS_PER_Q &&
1284 get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 &&
1285 state == SLSB_CU_OUTPUT_PRIMED) {
1286 /* The previous buffer is not processed yet, tack on. */
1287 qperf_inc(q, fast_requeue);
1289 rc = qdio_kick_outbound_q(q, count, 0);
1292 /* Let drivers implement their own completion scanning: */
1293 if (!scan_threshold)
1296 /* in case of SIGA errors we must process the error immediately */
1297 if (used >= scan_threshold || rc)
1298 qdio_tasklet_schedule(q);
1300 /* free the SBALs in case of no further traffic */
1301 if (!timer_pending(&q->u.out.timer) &&
1302 likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE))
1303 mod_timer(&q->u.out.timer, jiffies + HZ);
1308 * do_QDIO - process input or output buffers
1309 * @cdev: associated ccw_device for the qdio subchannel
1310 * @callflags: input or output and special flags from the program
1311 * @q_nr: queue number
1312 * @bufnr: buffer number
1313 * @count: how many buffers to process
1314 * @aob: asynchronous operation block (outbound only)
1316 int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1317 int q_nr, unsigned int bufnr, unsigned int count, struct qaob *aob)
1319 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1321 if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
1327 DBF_DEV_EVENT(DBF_INFO, irq_ptr,
1328 "do%02x b:%02x c:%02x", callflags, bufnr, count);
1330 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1334 if (callflags & QDIO_FLAG_SYNC_INPUT)
1335 return handle_inbound(irq_ptr->input_qs[q_nr],
1336 callflags, bufnr, count);
1337 else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
1338 return handle_outbound(irq_ptr->output_qs[q_nr],
1339 callflags, bufnr, count, aob);
1342 EXPORT_SYMBOL_GPL(do_QDIO);
1345 * qdio_start_irq - enable interrupt processing for the device
1346 * @cdev: associated ccw_device for the qdio subchannel
1350 * 1 - irqs not started since new data is available
1352 int qdio_start_irq(struct ccw_device *cdev)
1355 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1361 for_each_input_queue(irq_ptr, q, i)
1362 qdio_stop_polling(q);
1364 clear_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state);
1367 * We need to check again to not lose initiative after
1368 * resetting the ACK state.
1370 if (test_nonshared_ind(irq_ptr))
1373 for_each_input_queue(irq_ptr, q, i) {
1374 if (!qdio_inbound_q_done(q, q->first_to_check))
1381 if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
1387 EXPORT_SYMBOL(qdio_start_irq);
1389 static int __qdio_inspect_queue(struct qdio_q *q, unsigned int *bufnr,
1390 unsigned int *error)
1392 unsigned int start = q->first_to_check;
1396 count = q->is_input_q ? get_inbound_buffer_frontier(q, start, error) :
1397 get_outbound_buffer_frontier(q, start, error);
1403 /* for the next time */
1404 q->first_to_check = add_buf(start, count);
1409 int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr, bool is_input,
1410 unsigned int *bufnr, unsigned int *error)
1412 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1417 q = is_input ? irq_ptr->input_qs[nr] : irq_ptr->output_qs[nr];
1419 if (need_siga_sync(q))
1420 qdio_siga_sync_q(q);
1422 return __qdio_inspect_queue(q, bufnr, error);
1424 EXPORT_SYMBOL_GPL(qdio_inspect_queue);
1427 * qdio_get_next_buffers - process input buffers
1428 * @cdev: associated ccw_device for the qdio subchannel
1429 * @nr: input queue number
1430 * @bufnr: first filled buffer number
1431 * @error: buffers are in error state
1435 * = 0 - no new buffers found
1436 * > 0 - number of processed buffers
1438 int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
1442 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1446 q = irq_ptr->input_qs[nr];
1449 * Cannot rely on automatic sync after interrupt since queues may
1450 * also be examined without interrupt.
1452 if (need_siga_sync(q))
1453 qdio_sync_queues(q);
1455 qdio_check_outbound_pci_queues(irq_ptr);
1457 /* Note: upper-layer MUST stop processing immediately here ... */
1458 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
1461 return __qdio_inspect_queue(q, bufnr, error);
1463 EXPORT_SYMBOL(qdio_get_next_buffers);
1466 * qdio_stop_irq - disable interrupt processing for the device
1467 * @cdev: associated ccw_device for the qdio subchannel
1470 * 0 - interrupts were already disabled
1471 * 1 - interrupts successfully disabled
1473 int qdio_stop_irq(struct ccw_device *cdev)
1475 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1480 if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
1485 EXPORT_SYMBOL(qdio_stop_irq);
1487 static int __init init_QDIO(void)
1491 rc = qdio_debug_init();
1494 rc = qdio_setup_init();
1497 rc = qdio_thinint_init();
1509 static void __exit exit_QDIO(void)
1511 qdio_thinint_exit();
1516 module_init(init_QDIO);
1517 module_exit(exit_QDIO);