2 * Copyright(c) 2015 - 2018 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/spinlock.h>
49 #include <linux/seqlock.h>
50 #include <linux/netdevice.h>
51 #include <linux/moduleparam.h>
52 #include <linux/bitops.h>
53 #include <linux/timer.h>
54 #include <linux/vmalloc.h>
55 #include <linux/highmem.h>
64 /* must be a power of 2 >= 64 <= 32768 */
65 #define SDMA_DESCQ_CNT 2048
66 #define SDMA_DESC_INTR 64
67 #define INVALID_TAIL 0xffff
69 static uint sdma_descq_cnt = SDMA_DESCQ_CNT;
70 module_param(sdma_descq_cnt, uint, S_IRUGO);
71 MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");
73 static uint sdma_idle_cnt = 250;
74 module_param(sdma_idle_cnt, uint, S_IRUGO);
75 MODULE_PARM_DESC(sdma_idle_cnt, "sdma interrupt idle delay (ns,default 250)");
78 module_param_named(num_sdma, mod_num_sdma, uint, S_IRUGO);
79 MODULE_PARM_DESC(num_sdma, "Set max number SDMA engines to use");
81 static uint sdma_desct_intr = SDMA_DESC_INTR;
82 module_param_named(desct_intr, sdma_desct_intr, uint, S_IRUGO | S_IWUSR);
83 MODULE_PARM_DESC(desct_intr, "Number of SDMA descriptor before interrupt");
85 #define SDMA_WAIT_BATCH_SIZE 20
86 /* max wait time for a SDMA engine to indicate it has halted */
87 #define SDMA_ERR_HALT_TIMEOUT 10 /* ms */
88 /* all SDMA engine errors that cause a halt */
90 #define SD(name) SEND_DMA_##name
91 #define ALL_SDMA_ENG_HALT_ERRS \
92 (SD(ENG_ERR_STATUS_SDMA_WRONG_DW_ERR_SMASK) \
93 | SD(ENG_ERR_STATUS_SDMA_GEN_MISMATCH_ERR_SMASK) \
94 | SD(ENG_ERR_STATUS_SDMA_TOO_LONG_ERR_SMASK) \
95 | SD(ENG_ERR_STATUS_SDMA_TAIL_OUT_OF_BOUNDS_ERR_SMASK) \
96 | SD(ENG_ERR_STATUS_SDMA_FIRST_DESC_ERR_SMASK) \
97 | SD(ENG_ERR_STATUS_SDMA_MEM_READ_ERR_SMASK) \
98 | SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK) \
99 | SD(ENG_ERR_STATUS_SDMA_LENGTH_MISMATCH_ERR_SMASK) \
100 | SD(ENG_ERR_STATUS_SDMA_PACKET_DESC_OVERFLOW_ERR_SMASK) \
101 | SD(ENG_ERR_STATUS_SDMA_HEADER_SELECT_ERR_SMASK) \
102 | SD(ENG_ERR_STATUS_SDMA_HEADER_ADDRESS_ERR_SMASK) \
103 | SD(ENG_ERR_STATUS_SDMA_HEADER_LENGTH_ERR_SMASK) \
104 | SD(ENG_ERR_STATUS_SDMA_TIMEOUT_ERR_SMASK) \
105 | SD(ENG_ERR_STATUS_SDMA_DESC_TABLE_UNC_ERR_SMASK) \
106 | SD(ENG_ERR_STATUS_SDMA_ASSEMBLY_UNC_ERR_SMASK) \
107 | SD(ENG_ERR_STATUS_SDMA_PACKET_TRACKING_UNC_ERR_SMASK) \
108 | SD(ENG_ERR_STATUS_SDMA_HEADER_STORAGE_UNC_ERR_SMASK) \
109 | SD(ENG_ERR_STATUS_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SMASK))
111 /* sdma_sendctrl operations */
112 #define SDMA_SENDCTRL_OP_ENABLE BIT(0)
113 #define SDMA_SENDCTRL_OP_INTENABLE BIT(1)
114 #define SDMA_SENDCTRL_OP_HALT BIT(2)
115 #define SDMA_SENDCTRL_OP_CLEANUP BIT(3)
117 /* handle long defines */
118 #define SDMA_EGRESS_PACKET_OCCUPANCY_SMASK \
119 SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SMASK
120 #define SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT \
121 SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT
123 static const char * const sdma_state_names[] = {
124 [sdma_state_s00_hw_down] = "s00_HwDown",
125 [sdma_state_s10_hw_start_up_halt_wait] = "s10_HwStartUpHaltWait",
126 [sdma_state_s15_hw_start_up_clean_wait] = "s15_HwStartUpCleanWait",
127 [sdma_state_s20_idle] = "s20_Idle",
128 [sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait",
129 [sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait",
130 [sdma_state_s50_hw_halt_wait] = "s50_HwHaltWait",
131 [sdma_state_s60_idle_halt_wait] = "s60_IdleHaltWait",
132 [sdma_state_s80_hw_freeze] = "s80_HwFreeze",
133 [sdma_state_s82_freeze_sw_clean] = "s82_FreezeSwClean",
134 [sdma_state_s99_running] = "s99_Running",
137 #ifdef CONFIG_SDMA_VERBOSITY
138 static const char * const sdma_event_names[] = {
139 [sdma_event_e00_go_hw_down] = "e00_GoHwDown",
140 [sdma_event_e10_go_hw_start] = "e10_GoHwStart",
141 [sdma_event_e15_hw_halt_done] = "e15_HwHaltDone",
142 [sdma_event_e25_hw_clean_up_done] = "e25_HwCleanUpDone",
143 [sdma_event_e30_go_running] = "e30_GoRunning",
144 [sdma_event_e40_sw_cleaned] = "e40_SwCleaned",
145 [sdma_event_e50_hw_cleaned] = "e50_HwCleaned",
146 [sdma_event_e60_hw_halted] = "e60_HwHalted",
147 [sdma_event_e70_go_idle] = "e70_GoIdle",
148 [sdma_event_e80_hw_freeze] = "e80_HwFreeze",
149 [sdma_event_e81_hw_frozen] = "e81_HwFrozen",
150 [sdma_event_e82_hw_unfreeze] = "e82_HwUnfreeze",
151 [sdma_event_e85_link_down] = "e85_LinkDown",
152 [sdma_event_e90_sw_halted] = "e90_SwHalted",
156 static const struct sdma_set_state_action sdma_action_table[] = {
157 [sdma_state_s00_hw_down] = {
158 .go_s99_running_tofalse = 1,
164 [sdma_state_s10_hw_start_up_halt_wait] = {
170 [sdma_state_s15_hw_start_up_clean_wait] = {
176 [sdma_state_s20_idle] = {
182 [sdma_state_s30_sw_clean_up_wait] = {
188 [sdma_state_s40_hw_clean_up_wait] = {
194 [sdma_state_s50_hw_halt_wait] = {
200 [sdma_state_s60_idle_halt_wait] = {
201 .go_s99_running_tofalse = 1,
207 [sdma_state_s80_hw_freeze] = {
213 [sdma_state_s82_freeze_sw_clean] = {
219 [sdma_state_s99_running] = {
224 .go_s99_running_totrue = 1,
228 #define SDMA_TAIL_UPDATE_THRESH 0x1F
230 /* declare all statics here rather than keep sorting */
231 static void sdma_complete(struct kref *);
232 static void sdma_finalput(struct sdma_state *);
233 static void sdma_get(struct sdma_state *);
234 static void sdma_hw_clean_up_task(unsigned long);
235 static void sdma_put(struct sdma_state *);
236 static void sdma_set_state(struct sdma_engine *, enum sdma_states);
237 static void sdma_start_hw_clean_up(struct sdma_engine *);
238 static void sdma_sw_clean_up_task(unsigned long);
239 static void sdma_sendctrl(struct sdma_engine *, unsigned);
240 static void init_sdma_regs(struct sdma_engine *, u32, uint);
241 static void sdma_process_event(
242 struct sdma_engine *sde,
243 enum sdma_events event);
244 static void __sdma_process_event(
245 struct sdma_engine *sde,
246 enum sdma_events event);
247 static void dump_sdma_state(struct sdma_engine *sde);
248 static void sdma_make_progress(struct sdma_engine *sde, u64 status);
249 static void sdma_desc_avail(struct sdma_engine *sde, uint avail);
250 static void sdma_flush_descq(struct sdma_engine *sde);
253 * sdma_state_name() - return state string from enum
256 static const char *sdma_state_name(enum sdma_states state)
258 return sdma_state_names[state];
261 static void sdma_get(struct sdma_state *ss)
266 static void sdma_complete(struct kref *kref)
268 struct sdma_state *ss =
269 container_of(kref, struct sdma_state, kref);
274 static void sdma_put(struct sdma_state *ss)
276 kref_put(&ss->kref, sdma_complete);
279 static void sdma_finalput(struct sdma_state *ss)
282 wait_for_completion(&ss->comp);
285 static inline void write_sde_csr(
286 struct sdma_engine *sde,
290 write_kctxt_csr(sde->dd, sde->this_idx, offset0, value);
293 static inline u64 read_sde_csr(
294 struct sdma_engine *sde,
297 return read_kctxt_csr(sde->dd, sde->this_idx, offset0);
301 * sdma_wait_for_packet_egress() - wait for the VL FIFO occupancy for
302 * sdma engine 'sde' to drop to 0.
304 static void sdma_wait_for_packet_egress(struct sdma_engine *sde,
307 u64 off = 8 * sde->this_idx;
308 struct hfi1_devdata *dd = sde->dd;
315 reg = read_csr(dd, off + SEND_EGRESS_SEND_DMA_STATUS);
317 reg &= SDMA_EGRESS_PACKET_OCCUPANCY_SMASK;
318 reg >>= SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT;
321 /* counter is reest if accupancy count changes */
325 /* timed out - bounce the link */
326 dd_dev_err(dd, "%s: engine %u timeout waiting for packets to egress, remaining count %u, bouncing link\n",
327 __func__, sde->this_idx, (u32)reg);
328 queue_work(dd->pport->link_wq,
329 &dd->pport->link_bounce_work);
337 * sdma_wait() - wait for packet egress to complete for all SDMA engines,
338 * and pause for credit return.
340 void sdma_wait(struct hfi1_devdata *dd)
344 for (i = 0; i < dd->num_sdma; i++) {
345 struct sdma_engine *sde = &dd->per_sdma[i];
347 sdma_wait_for_packet_egress(sde, 0);
351 static inline void sdma_set_desc_cnt(struct sdma_engine *sde, unsigned cnt)
355 if (!(sde->dd->flags & HFI1_HAS_SDMA_TIMEOUT))
358 reg &= SD(DESC_CNT_CNT_MASK);
359 reg <<= SD(DESC_CNT_CNT_SHIFT);
360 write_sde_csr(sde, SD(DESC_CNT), reg);
363 static inline void complete_tx(struct sdma_engine *sde,
364 struct sdma_txreq *tx,
367 /* protect against complete modifying */
368 struct iowait *wait = tx->wait;
369 callback_t complete = tx->complete;
371 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
372 trace_hfi1_sdma_out_sn(sde, tx->sn);
373 if (WARN_ON_ONCE(sde->head_sn != tx->sn))
374 dd_dev_err(sde->dd, "expected %llu got %llu\n",
375 sde->head_sn, tx->sn);
378 __sdma_txclean(sde->dd, tx);
380 (*complete)(tx, res);
381 if (iowait_sdma_dec(wait))
382 iowait_drain_wakeup(wait);
386 * Complete all the sdma requests with a SDMA_TXREQ_S_ABORTED status
388 * Depending on timing there can be txreqs in two places:
389 * - in the descq ring
390 * - in the flush list
392 * To avoid ordering issues the descq ring needs to be flushed
393 * first followed by the flush list.
395 * This routine is called from two places
396 * - From a work queue item
397 * - Directly from the state machine just before setting the
400 * Must be called with head_lock held
403 static void sdma_flush(struct sdma_engine *sde)
405 struct sdma_txreq *txp, *txp_next;
406 LIST_HEAD(flushlist);
410 /* flush from head to tail */
411 sdma_flush_descq(sde);
412 spin_lock_irqsave(&sde->flushlist_lock, flags);
413 /* copy flush list */
414 list_splice_init(&sde->flushlist, &flushlist);
415 spin_unlock_irqrestore(&sde->flushlist_lock, flags);
416 /* flush from flush list */
417 list_for_each_entry_safe(txp, txp_next, &flushlist, list)
418 complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED);
419 /* wakeup QPs orphaned on the dmawait list */
421 struct iowait *w, *nw;
423 seq = read_seqbegin(&sde->waitlock);
424 if (!list_empty(&sde->dmawait)) {
425 write_seqlock(&sde->waitlock);
426 list_for_each_entry_safe(w, nw, &sde->dmawait, list) {
428 w->wakeup(w, SDMA_AVAIL_REASON);
429 list_del_init(&w->list);
432 write_sequnlock(&sde->waitlock);
434 } while (read_seqretry(&sde->waitlock, seq));
438 * Fields a work request for flushing the descq ring
441 * If the engine has been brought to running during
442 * the scheduling delay, the flush is ignored, assuming
443 * that the process of bringing the engine to running
444 * would have done this flush prior to going to running.
447 static void sdma_field_flush(struct work_struct *work)
450 struct sdma_engine *sde =
451 container_of(work, struct sdma_engine, flush_worker);
453 write_seqlock_irqsave(&sde->head_lock, flags);
454 if (!__sdma_running(sde))
456 write_sequnlock_irqrestore(&sde->head_lock, flags);
459 static void sdma_err_halt_wait(struct work_struct *work)
461 struct sdma_engine *sde = container_of(work, struct sdma_engine,
464 unsigned long timeout;
466 timeout = jiffies + msecs_to_jiffies(SDMA_ERR_HALT_TIMEOUT);
468 statuscsr = read_sde_csr(sde, SD(STATUS));
469 statuscsr &= SD(STATUS_ENG_HALTED_SMASK);
472 if (time_after(jiffies, timeout)) {
474 "SDMA engine %d - timeout waiting for engine to halt\n",
477 * Continue anyway. This could happen if there was
478 * an uncorrectable error in the wrong spot.
482 usleep_range(80, 120);
485 sdma_process_event(sde, sdma_event_e15_hw_halt_done);
488 static void sdma_err_progress_check_schedule(struct sdma_engine *sde)
490 if (!is_bx(sde->dd) && HFI1_CAP_IS_KSET(SDMA_AHG)) {
492 struct hfi1_devdata *dd = sde->dd;
494 for (index = 0; index < dd->num_sdma; index++) {
495 struct sdma_engine *curr_sdma = &dd->per_sdma[index];
497 if (curr_sdma != sde)
498 curr_sdma->progress_check_head =
499 curr_sdma->descq_head;
502 "SDMA engine %d - check scheduled\n",
504 mod_timer(&sde->err_progress_check_timer, jiffies + 10);
508 static void sdma_err_progress_check(struct timer_list *t)
511 struct sdma_engine *sde = from_timer(sde, t, err_progress_check_timer);
513 dd_dev_err(sde->dd, "SDE progress check event\n");
514 for (index = 0; index < sde->dd->num_sdma; index++) {
515 struct sdma_engine *curr_sde = &sde->dd->per_sdma[index];
518 /* check progress on each engine except the current one */
522 * We must lock interrupts when acquiring sde->lock,
523 * to avoid a deadlock if interrupt triggers and spins on
524 * the same lock on same CPU
526 spin_lock_irqsave(&curr_sde->tail_lock, flags);
527 write_seqlock(&curr_sde->head_lock);
529 /* skip non-running queues */
530 if (curr_sde->state.current_state != sdma_state_s99_running) {
531 write_sequnlock(&curr_sde->head_lock);
532 spin_unlock_irqrestore(&curr_sde->tail_lock, flags);
536 if ((curr_sde->descq_head != curr_sde->descq_tail) &&
537 (curr_sde->descq_head ==
538 curr_sde->progress_check_head))
539 __sdma_process_event(curr_sde,
540 sdma_event_e90_sw_halted);
541 write_sequnlock(&curr_sde->head_lock);
542 spin_unlock_irqrestore(&curr_sde->tail_lock, flags);
544 schedule_work(&sde->err_halt_worker);
547 static void sdma_hw_clean_up_task(unsigned long opaque)
549 struct sdma_engine *sde = (struct sdma_engine *)opaque;
553 #ifdef CONFIG_SDMA_VERBOSITY
554 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
555 sde->this_idx, slashstrip(__FILE__), __LINE__,
558 statuscsr = read_sde_csr(sde, SD(STATUS));
559 statuscsr &= SD(STATUS_ENG_CLEANED_UP_SMASK);
565 sdma_process_event(sde, sdma_event_e25_hw_clean_up_done);
568 static inline struct sdma_txreq *get_txhead(struct sdma_engine *sde)
570 return sde->tx_ring[sde->tx_head & sde->sdma_mask];
574 * flush ring for recovery
576 static void sdma_flush_descq(struct sdma_engine *sde)
580 struct sdma_txreq *txp = get_txhead(sde);
582 /* The reason for some of the complexity of this code is that
583 * not all descriptors have corresponding txps. So, we have to
584 * be able to skip over descs until we wander into the range of
585 * the next txp on the list.
587 head = sde->descq_head & sde->sdma_mask;
588 tail = sde->descq_tail & sde->sdma_mask;
589 while (head != tail) {
590 /* advance head, wrap if needed */
591 head = ++sde->descq_head & sde->sdma_mask;
592 /* if now past this txp's descs, do the callback */
593 if (txp && txp->next_descq_idx == head) {
594 /* remove from list */
595 sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL;
596 complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED);
597 trace_hfi1_sdma_progress(sde, head, tail, txp);
598 txp = get_txhead(sde);
603 sdma_desc_avail(sde, sdma_descq_freecnt(sde));
606 static void sdma_sw_clean_up_task(unsigned long opaque)
608 struct sdma_engine *sde = (struct sdma_engine *)opaque;
611 spin_lock_irqsave(&sde->tail_lock, flags);
612 write_seqlock(&sde->head_lock);
615 * At this point, the following should always be true:
616 * - We are halted, so no more descriptors are getting retired.
617 * - We are not running, so no one is submitting new work.
618 * - Only we can send the e40_sw_cleaned, so we can't start
619 * running again until we say so. So, the active list and
620 * descq are ours to play with.
624 * In the error clean up sequence, software clean must be called
625 * before the hardware clean so we can use the hardware head in
626 * the progress routine. A hardware clean or SPC unfreeze will
627 * reset the hardware head.
629 * Process all retired requests. The progress routine will use the
630 * latest physical hardware head - we are not running so speed does
633 sdma_make_progress(sde, 0);
638 * Reset our notion of head and tail.
639 * Note that the HW registers have been reset via an earlier
644 sde->desc_avail = sdma_descq_freecnt(sde);
647 __sdma_process_event(sde, sdma_event_e40_sw_cleaned);
649 write_sequnlock(&sde->head_lock);
650 spin_unlock_irqrestore(&sde->tail_lock, flags);
653 static void sdma_sw_tear_down(struct sdma_engine *sde)
655 struct sdma_state *ss = &sde->state;
657 /* Releasing this reference means the state machine has stopped. */
660 /* stop waiting for all unfreeze events to complete */
661 atomic_set(&sde->dd->sdma_unfreeze_count, -1);
662 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
665 static void sdma_start_hw_clean_up(struct sdma_engine *sde)
667 tasklet_hi_schedule(&sde->sdma_hw_clean_up_task);
670 static void sdma_set_state(struct sdma_engine *sde,
671 enum sdma_states next_state)
673 struct sdma_state *ss = &sde->state;
674 const struct sdma_set_state_action *action = sdma_action_table;
677 trace_hfi1_sdma_state(
679 sdma_state_names[ss->current_state],
680 sdma_state_names[next_state]);
682 /* debugging bookkeeping */
683 ss->previous_state = ss->current_state;
684 ss->previous_op = ss->current_op;
685 ss->current_state = next_state;
687 if (ss->previous_state != sdma_state_s99_running &&
688 next_state == sdma_state_s99_running)
691 if (action[next_state].op_enable)
692 op |= SDMA_SENDCTRL_OP_ENABLE;
694 if (action[next_state].op_intenable)
695 op |= SDMA_SENDCTRL_OP_INTENABLE;
697 if (action[next_state].op_halt)
698 op |= SDMA_SENDCTRL_OP_HALT;
700 if (action[next_state].op_cleanup)
701 op |= SDMA_SENDCTRL_OP_CLEANUP;
703 if (action[next_state].go_s99_running_tofalse)
704 ss->go_s99_running = 0;
706 if (action[next_state].go_s99_running_totrue)
707 ss->go_s99_running = 1;
710 sdma_sendctrl(sde, ss->current_op);
714 * sdma_get_descq_cnt() - called when device probed
716 * Return a validated descq count.
718 * This is currently only used in the verbs initialization to build the tx
721 * This will probably be deleted in favor of a more scalable approach to
725 u16 sdma_get_descq_cnt(void)
727 u16 count = sdma_descq_cnt;
730 return SDMA_DESCQ_CNT;
731 /* count must be a power of 2 greater than 64 and less than
732 * 32768. Otherwise return default.
734 if (!is_power_of_2(count))
735 return SDMA_DESCQ_CNT;
736 if (count < 64 || count > 32768)
737 return SDMA_DESCQ_CNT;
742 * sdma_engine_get_vl() - return vl for a given sdma engine
745 * This function returns the vl mapped to a given engine, or an error if
746 * the mapping can't be found. The mapping fields are protected by RCU.
748 int sdma_engine_get_vl(struct sdma_engine *sde)
750 struct hfi1_devdata *dd = sde->dd;
751 struct sdma_vl_map *m;
754 if (sde->this_idx >= TXE_NUM_SDMA_ENGINES)
758 m = rcu_dereference(dd->sdma_map);
763 vl = m->engine_to_vl[sde->this_idx];
770 * sdma_select_engine_vl() - select sdma engine
772 * @selector: a spreading factor
776 * This function returns an engine based on the selector and a vl. The
777 * mapping fields are protected by RCU.
779 struct sdma_engine *sdma_select_engine_vl(
780 struct hfi1_devdata *dd,
784 struct sdma_vl_map *m;
785 struct sdma_map_elem *e;
786 struct sdma_engine *rval;
788 /* NOTE This should only happen if SC->VL changed after the initial
789 * checks on the QP/AH
790 * Default will return engine 0 below
798 m = rcu_dereference(dd->sdma_map);
801 return &dd->per_sdma[0];
803 e = m->map[vl & m->mask];
804 rval = e->sde[selector & e->mask];
808 rval = !rval ? &dd->per_sdma[0] : rval;
809 trace_hfi1_sdma_engine_select(dd, selector, vl, rval->this_idx);
814 * sdma_select_engine_sc() - select sdma engine
816 * @selector: a spreading factor
820 * This function returns an engine based on the selector and an sc.
822 struct sdma_engine *sdma_select_engine_sc(
823 struct hfi1_devdata *dd,
827 u8 vl = sc_to_vlt(dd, sc5);
829 return sdma_select_engine_vl(dd, selector, vl);
832 struct sdma_rht_map_elem {
835 struct sdma_engine *sde[0];
838 struct sdma_rht_node {
839 unsigned long cpu_id;
840 struct sdma_rht_map_elem *map[HFI1_MAX_VLS_SUPPORTED];
841 struct rhash_head node;
844 #define NR_CPUS_HINT 192
846 static const struct rhashtable_params sdma_rht_params = {
847 .nelem_hint = NR_CPUS_HINT,
848 .head_offset = offsetof(struct sdma_rht_node, node),
849 .key_offset = offsetof(struct sdma_rht_node, cpu_id),
850 .key_len = FIELD_SIZEOF(struct sdma_rht_node, cpu_id),
853 .automatic_shrinking = true,
857 * sdma_select_user_engine() - select sdma engine based on user setup
859 * @selector: a spreading factor
862 * This function returns an sdma engine for a user sdma request.
863 * User defined sdma engine affinity setting is honored when applicable,
864 * otherwise system default sdma engine mapping is used. To ensure correct
865 * ordering, the mapping from <selector, vl> to sde must remain unchanged.
867 struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
870 struct sdma_rht_node *rht_node;
871 struct sdma_engine *sde = NULL;
872 unsigned long cpu_id;
875 * To ensure that always the same sdma engine(s) will be
876 * selected make sure the process is pinned to this CPU only.
878 if (current->nr_cpus_allowed != 1)
881 cpu_id = smp_processor_id();
883 rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu_id,
886 if (rht_node && rht_node->map[vl]) {
887 struct sdma_rht_map_elem *map = rht_node->map[vl];
889 sde = map->sde[selector & map->mask];
897 return sdma_select_engine_vl(dd, selector, vl);
900 static void sdma_populate_sde_map(struct sdma_rht_map_elem *map)
904 for (i = 0; i < roundup_pow_of_two(map->ctr ? : 1) - map->ctr; i++)
905 map->sde[map->ctr + i] = map->sde[i];
908 static void sdma_cleanup_sde_map(struct sdma_rht_map_elem *map,
909 struct sdma_engine *sde)
913 /* only need to check the first ctr entries for a match */
914 for (i = 0; i < map->ctr; i++) {
915 if (map->sde[i] == sde) {
916 memmove(&map->sde[i], &map->sde[i + 1],
917 (map->ctr - i - 1) * sizeof(map->sde[0]));
919 pow = roundup_pow_of_two(map->ctr ? : 1);
921 sdma_populate_sde_map(map);
928 * Prevents concurrent reads and writes of the sdma engine cpu_mask
930 static DEFINE_MUTEX(process_to_sde_mutex);
932 ssize_t sdma_set_cpu_to_sde_map(struct sdma_engine *sde, const char *buf,
935 struct hfi1_devdata *dd = sde->dd;
936 cpumask_var_t mask, new_mask;
939 struct sdma_rht_node *rht_node;
941 vl = sdma_engine_get_vl(sde);
942 if (unlikely(vl < 0 || vl >= ARRAY_SIZE(rht_node->map)))
945 ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
949 ret = zalloc_cpumask_var(&new_mask, GFP_KERNEL);
951 free_cpumask_var(mask);
954 ret = cpulist_parse(buf, mask);
958 if (!cpumask_subset(mask, cpu_online_mask)) {
959 dd_dev_warn(sde->dd, "Invalid CPU mask\n");
964 sz = sizeof(struct sdma_rht_map_elem) +
965 (TXE_NUM_SDMA_ENGINES * sizeof(struct sdma_engine *));
967 mutex_lock(&process_to_sde_mutex);
969 for_each_cpu(cpu, mask) {
970 /* Check if we have this already mapped */
971 if (cpumask_test_cpu(cpu, &sde->cpu_mask)) {
972 cpumask_set_cpu(cpu, new_mask);
976 rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu,
979 rht_node = kzalloc(sizeof(*rht_node), GFP_KERNEL);
985 rht_node->map[vl] = kzalloc(sz, GFP_KERNEL);
986 if (!rht_node->map[vl]) {
991 rht_node->cpu_id = cpu;
992 rht_node->map[vl]->mask = 0;
993 rht_node->map[vl]->ctr = 1;
994 rht_node->map[vl]->sde[0] = sde;
996 ret = rhashtable_insert_fast(dd->sdma_rht,
1000 kfree(rht_node->map[vl]);
1002 dd_dev_err(sde->dd, "Failed to set process to sde affinity for cpu %lu\n",
1010 /* Add new user mappings */
1011 if (!rht_node->map[vl])
1012 rht_node->map[vl] = kzalloc(sz, GFP_KERNEL);
1014 if (!rht_node->map[vl]) {
1019 rht_node->map[vl]->ctr++;
1020 ctr = rht_node->map[vl]->ctr;
1021 rht_node->map[vl]->sde[ctr - 1] = sde;
1022 pow = roundup_pow_of_two(ctr);
1023 rht_node->map[vl]->mask = pow - 1;
1025 /* Populate the sde map table */
1026 sdma_populate_sde_map(rht_node->map[vl]);
1028 cpumask_set_cpu(cpu, new_mask);
1031 /* Clean up old mappings */
1032 for_each_cpu(cpu, cpu_online_mask) {
1033 struct sdma_rht_node *rht_node;
1035 /* Don't cleanup sdes that are set in the new mask */
1036 if (cpumask_test_cpu(cpu, mask))
1039 rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu,
1045 /* Remove mappings for old sde */
1046 for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++)
1047 if (rht_node->map[i])
1048 sdma_cleanup_sde_map(rht_node->map[i],
1051 /* Free empty hash table entries */
1052 for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) {
1053 if (!rht_node->map[i])
1056 if (rht_node->map[i]->ctr) {
1063 ret = rhashtable_remove_fast(dd->sdma_rht,
1068 for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++)
1069 kfree(rht_node->map[i]);
1076 cpumask_copy(&sde->cpu_mask, new_mask);
1078 mutex_unlock(&process_to_sde_mutex);
1080 free_cpumask_var(mask);
1081 free_cpumask_var(new_mask);
1082 return ret ? : strnlen(buf, PAGE_SIZE);
1085 ssize_t sdma_get_cpu_to_sde_map(struct sdma_engine *sde, char *buf)
1087 mutex_lock(&process_to_sde_mutex);
1088 if (cpumask_empty(&sde->cpu_mask))
1089 snprintf(buf, PAGE_SIZE, "%s\n", "empty");
1091 cpumap_print_to_pagebuf(true, buf, &sde->cpu_mask);
1092 mutex_unlock(&process_to_sde_mutex);
1093 return strnlen(buf, PAGE_SIZE);
1096 static void sdma_rht_free(void *ptr, void *arg)
1098 struct sdma_rht_node *rht_node = ptr;
1101 for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++)
1102 kfree(rht_node->map[i]);
1108 * sdma_seqfile_dump_cpu_list() - debugfs dump the cpu to sdma mappings
1113 * This routine dumps the process to sde mappings per cpu
1115 void sdma_seqfile_dump_cpu_list(struct seq_file *s,
1116 struct hfi1_devdata *dd,
1117 unsigned long cpuid)
1119 struct sdma_rht_node *rht_node;
1122 rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpuid,
1127 seq_printf(s, "cpu%3lu: ", cpuid);
1128 for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) {
1129 if (!rht_node->map[i] || !rht_node->map[i]->ctr)
1132 seq_printf(s, " vl%d: [", i);
1134 for (j = 0; j < rht_node->map[i]->ctr; j++) {
1135 if (!rht_node->map[i]->sde[j])
1141 seq_printf(s, " sdma%2d",
1142 rht_node->map[i]->sde[j]->this_idx);
1151 * Free the indicated map struct
1153 static void sdma_map_free(struct sdma_vl_map *m)
1157 for (i = 0; m && i < m->actual_vls; i++)
1163 * Handle RCU callback
1165 static void sdma_map_rcu_callback(struct rcu_head *list)
1167 struct sdma_vl_map *m = container_of(list, struct sdma_vl_map, list);
1173 * sdma_map_init - called when # vls change
1175 * @port: port number
1176 * @num_vls: number of vls
1177 * @vl_engines: per vl engine mapping (optional)
1179 * This routine changes the mapping based on the number of vls.
1181 * vl_engines is used to specify a non-uniform vl/engine loading. NULL
1182 * implies auto computing the loading and giving each VLs a uniform
1183 * distribution of engines per VL.
1185 * The auto algorithm computes the sde_per_vl and the number of extra
1186 * engines. Any extra engines are added from the last VL on down.
1188 * rcu locking is used here to control access to the mapping fields.
1190 * If either the num_vls or num_sdma are non-power of 2, the array sizes
1191 * in the struct sdma_vl_map and the struct sdma_map_elem are rounded
1192 * up to the next highest power of 2 and the first entry is reused
1193 * in a round robin fashion.
1195 * If an error occurs the map change is not done and the mapping is
1199 int sdma_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_engines)
1202 int extra, sde_per_vl;
1204 u8 lvl_engines[OPA_MAX_VLS];
1205 struct sdma_vl_map *oldmap, *newmap;
1207 if (!(dd->flags & HFI1_HAS_SEND_DMA))
1211 /* truncate divide */
1212 sde_per_vl = dd->num_sdma / num_vls;
1214 extra = dd->num_sdma % num_vls;
1215 vl_engines = lvl_engines;
1216 /* add extras from last vl down */
1217 for (i = num_vls - 1; i >= 0; i--, extra--)
1218 vl_engines[i] = sde_per_vl + (extra > 0 ? 1 : 0);
1222 sizeof(struct sdma_vl_map) +
1223 roundup_pow_of_two(num_vls) *
1224 sizeof(struct sdma_map_elem *),
1228 newmap->actual_vls = num_vls;
1229 newmap->vls = roundup_pow_of_two(num_vls);
1230 newmap->mask = (1 << ilog2(newmap->vls)) - 1;
1231 /* initialize back-map */
1232 for (i = 0; i < TXE_NUM_SDMA_ENGINES; i++)
1233 newmap->engine_to_vl[i] = -1;
1234 for (i = 0; i < newmap->vls; i++) {
1235 /* save for wrap around */
1236 int first_engine = engine;
1238 if (i < newmap->actual_vls) {
1239 int sz = roundup_pow_of_two(vl_engines[i]);
1241 /* only allocate once */
1242 newmap->map[i] = kzalloc(
1243 sizeof(struct sdma_map_elem) +
1244 sz * sizeof(struct sdma_engine *),
1246 if (!newmap->map[i])
1248 newmap->map[i]->mask = (1 << ilog2(sz)) - 1;
1249 /* assign engines */
1250 for (j = 0; j < sz; j++) {
1251 newmap->map[i]->sde[j] =
1252 &dd->per_sdma[engine];
1253 if (++engine >= first_engine + vl_engines[i])
1254 /* wrap back to first engine */
1255 engine = first_engine;
1257 /* assign back-map */
1258 for (j = 0; j < vl_engines[i]; j++)
1259 newmap->engine_to_vl[first_engine + j] = i;
1261 /* just re-use entry without allocating */
1262 newmap->map[i] = newmap->map[i % num_vls];
1264 engine = first_engine + vl_engines[i];
1266 /* newmap in hand, save old map */
1267 spin_lock_irq(&dd->sde_map_lock);
1268 oldmap = rcu_dereference_protected(dd->sdma_map,
1269 lockdep_is_held(&dd->sde_map_lock));
1271 /* publish newmap */
1272 rcu_assign_pointer(dd->sdma_map, newmap);
1274 spin_unlock_irq(&dd->sde_map_lock);
1275 /* success, free any old map after grace period */
1277 call_rcu(&oldmap->list, sdma_map_rcu_callback);
1280 /* free any partial allocation */
1281 sdma_map_free(newmap);
1286 * sdma_clean() Clean up allocated memory
1287 * @dd: struct hfi1_devdata
1288 * @num_engines: num sdma engines
1290 * This routine can be called regardless of the success of
1293 void sdma_clean(struct hfi1_devdata *dd, size_t num_engines)
1296 struct sdma_engine *sde;
1298 if (dd->sdma_pad_dma) {
1299 dma_free_coherent(&dd->pcidev->dev, 4,
1300 (void *)dd->sdma_pad_dma,
1302 dd->sdma_pad_dma = NULL;
1303 dd->sdma_pad_phys = 0;
1305 if (dd->sdma_heads_dma) {
1306 dma_free_coherent(&dd->pcidev->dev, dd->sdma_heads_size,
1307 (void *)dd->sdma_heads_dma,
1308 dd->sdma_heads_phys);
1309 dd->sdma_heads_dma = NULL;
1310 dd->sdma_heads_phys = 0;
1312 for (i = 0; dd->per_sdma && i < num_engines; ++i) {
1313 sde = &dd->per_sdma[i];
1315 sde->head_dma = NULL;
1321 sde->descq_cnt * sizeof(u64[2]),
1326 sde->descq_phys = 0;
1328 kvfree(sde->tx_ring);
1329 sde->tx_ring = NULL;
1331 spin_lock_irq(&dd->sde_map_lock);
1332 sdma_map_free(rcu_access_pointer(dd->sdma_map));
1333 RCU_INIT_POINTER(dd->sdma_map, NULL);
1334 spin_unlock_irq(&dd->sde_map_lock);
1336 kfree(dd->per_sdma);
1337 dd->per_sdma = NULL;
1340 rhashtable_free_and_destroy(dd->sdma_rht, sdma_rht_free, NULL);
1341 kfree(dd->sdma_rht);
1342 dd->sdma_rht = NULL;
1347 * sdma_init() - called when device probed
1349 * @port: port number (currently only zero)
1351 * Initializes each sde and its csrs.
1352 * Interrupts are not required to be enabled.
1355 * 0 - success, -errno on failure
1357 int sdma_init(struct hfi1_devdata *dd, u8 port)
1360 struct sdma_engine *sde;
1361 struct rhashtable *tmp_sdma_rht;
1364 struct hfi1_pportdata *ppd = dd->pport + port;
1365 u32 per_sdma_credits;
1366 uint idle_cnt = sdma_idle_cnt;
1367 size_t num_engines = chip_sdma_engines(dd);
1370 if (!HFI1_CAP_IS_KSET(SDMA)) {
1371 HFI1_CAP_CLEAR(SDMA_AHG);
1375 /* can't exceed chip support */
1376 mod_num_sdma <= chip_sdma_engines(dd) &&
1377 /* count must be >= vls */
1378 mod_num_sdma >= num_vls)
1379 num_engines = mod_num_sdma;
1381 dd_dev_info(dd, "SDMA mod_num_sdma: %u\n", mod_num_sdma);
1382 dd_dev_info(dd, "SDMA chip_sdma_engines: %u\n", chip_sdma_engines(dd));
1383 dd_dev_info(dd, "SDMA chip_sdma_mem_size: %u\n",
1384 chip_sdma_mem_size(dd));
1387 chip_sdma_mem_size(dd) / (num_engines * SDMA_BLOCK_SIZE);
1389 /* set up freeze waitqueue */
1390 init_waitqueue_head(&dd->sdma_unfreeze_wq);
1391 atomic_set(&dd->sdma_unfreeze_count, 0);
1393 descq_cnt = sdma_get_descq_cnt();
1394 dd_dev_info(dd, "SDMA engines %zu descq_cnt %u\n",
1395 num_engines, descq_cnt);
1397 /* alloc memory for array of send engines */
1398 dd->per_sdma = kcalloc_node(num_engines, sizeof(*dd->per_sdma),
1399 GFP_KERNEL, dd->node);
1403 idle_cnt = ns_to_cclock(dd, idle_cnt);
1406 SDMA_DESC1_HEAD_TO_HOST_FLAG;
1409 SDMA_DESC1_INT_REQ_FLAG;
1411 if (!sdma_desct_intr)
1412 sdma_desct_intr = SDMA_DESC_INTR;
1414 /* Allocate memory for SendDMA descriptor FIFOs */
1415 for (this_idx = 0; this_idx < num_engines; ++this_idx) {
1416 sde = &dd->per_sdma[this_idx];
1419 sde->this_idx = this_idx;
1420 sde->descq_cnt = descq_cnt;
1421 sde->desc_avail = sdma_descq_freecnt(sde);
1422 sde->sdma_shift = ilog2(descq_cnt);
1423 sde->sdma_mask = (1 << sde->sdma_shift) - 1;
1425 /* Create a mask specifically for each interrupt source */
1426 sde->int_mask = (u64)1 << (0 * TXE_NUM_SDMA_ENGINES +
1428 sde->progress_mask = (u64)1 << (1 * TXE_NUM_SDMA_ENGINES +
1430 sde->idle_mask = (u64)1 << (2 * TXE_NUM_SDMA_ENGINES +
1432 /* Create a combined mask to cover all 3 interrupt sources */
1433 sde->imask = sde->int_mask | sde->progress_mask |
1436 spin_lock_init(&sde->tail_lock);
1437 seqlock_init(&sde->head_lock);
1438 spin_lock_init(&sde->senddmactrl_lock);
1439 spin_lock_init(&sde->flushlist_lock);
1440 seqlock_init(&sde->waitlock);
1441 /* insure there is always a zero bit */
1442 sde->ahg_bits = 0xfffffffe00000000ULL;
1444 sdma_set_state(sde, sdma_state_s00_hw_down);
1446 /* set up reference counting */
1447 kref_init(&sde->state.kref);
1448 init_completion(&sde->state.comp);
1450 INIT_LIST_HEAD(&sde->flushlist);
1451 INIT_LIST_HEAD(&sde->dmawait);
1454 get_kctxt_csr_addr(dd, this_idx, SD(TAIL));
1456 tasklet_init(&sde->sdma_hw_clean_up_task, sdma_hw_clean_up_task,
1457 (unsigned long)sde);
1459 tasklet_init(&sde->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
1460 (unsigned long)sde);
1461 INIT_WORK(&sde->err_halt_worker, sdma_err_halt_wait);
1462 INIT_WORK(&sde->flush_worker, sdma_field_flush);
1464 sde->progress_check_head = 0;
1466 timer_setup(&sde->err_progress_check_timer,
1467 sdma_err_progress_check, 0);
1469 sde->descq = dma_alloc_coherent(&dd->pcidev->dev,
1470 descq_cnt * sizeof(u64[2]),
1471 &sde->descq_phys, GFP_KERNEL);
1475 kvzalloc_node(array_size(descq_cnt,
1476 sizeof(struct sdma_txreq *)),
1477 GFP_KERNEL, dd->node);
1482 dd->sdma_heads_size = L1_CACHE_BYTES * num_engines;
1483 /* Allocate memory for DMA of head registers to memory */
1484 dd->sdma_heads_dma = dma_alloc_coherent(&dd->pcidev->dev,
1485 dd->sdma_heads_size,
1486 &dd->sdma_heads_phys,
1488 if (!dd->sdma_heads_dma) {
1489 dd_dev_err(dd, "failed to allocate SendDMA head memory\n");
1493 /* Allocate memory for pad */
1494 dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, sizeof(u32),
1495 &dd->sdma_pad_phys, GFP_KERNEL);
1496 if (!dd->sdma_pad_dma) {
1497 dd_dev_err(dd, "failed to allocate SendDMA pad memory\n");
1501 /* assign each engine to different cacheline and init registers */
1502 curr_head = (void *)dd->sdma_heads_dma;
1503 for (this_idx = 0; this_idx < num_engines; ++this_idx) {
1504 unsigned long phys_offset;
1506 sde = &dd->per_sdma[this_idx];
1508 sde->head_dma = curr_head;
1509 curr_head += L1_CACHE_BYTES;
1510 phys_offset = (unsigned long)sde->head_dma -
1511 (unsigned long)dd->sdma_heads_dma;
1512 sde->head_phys = dd->sdma_heads_phys + phys_offset;
1513 init_sdma_regs(sde, per_sdma_credits, idle_cnt);
1515 dd->flags |= HFI1_HAS_SEND_DMA;
1516 dd->flags |= idle_cnt ? HFI1_HAS_SDMA_TIMEOUT : 0;
1517 dd->num_sdma = num_engines;
1518 ret = sdma_map_init(dd, port, ppd->vls_operational, NULL);
1522 tmp_sdma_rht = kzalloc(sizeof(*tmp_sdma_rht), GFP_KERNEL);
1523 if (!tmp_sdma_rht) {
1528 ret = rhashtable_init(tmp_sdma_rht, &sdma_rht_params);
1531 dd->sdma_rht = tmp_sdma_rht;
1533 dd_dev_info(dd, "SDMA num_sdma: %u\n", dd->num_sdma);
1537 sdma_clean(dd, num_engines);
1542 * sdma_all_running() - called when the link goes up
1545 * This routine moves all engines to the running state.
1547 void sdma_all_running(struct hfi1_devdata *dd)
1549 struct sdma_engine *sde;
1552 /* move all engines to running */
1553 for (i = 0; i < dd->num_sdma; ++i) {
1554 sde = &dd->per_sdma[i];
1555 sdma_process_event(sde, sdma_event_e30_go_running);
1560 * sdma_all_idle() - called when the link goes down
1563 * This routine moves all engines to the idle state.
1565 void sdma_all_idle(struct hfi1_devdata *dd)
1567 struct sdma_engine *sde;
1570 /* idle all engines */
1571 for (i = 0; i < dd->num_sdma; ++i) {
1572 sde = &dd->per_sdma[i];
1573 sdma_process_event(sde, sdma_event_e70_go_idle);
1578 * sdma_start() - called to kick off state processing for all engines
1581 * This routine is for kicking off the state processing for all required
1582 * sdma engines. Interrupts need to be working at this point.
1585 void sdma_start(struct hfi1_devdata *dd)
1588 struct sdma_engine *sde;
1590 /* kick off the engines state processing */
1591 for (i = 0; i < dd->num_sdma; ++i) {
1592 sde = &dd->per_sdma[i];
1593 sdma_process_event(sde, sdma_event_e10_go_hw_start);
1598 * sdma_exit() - used when module is removed
1601 void sdma_exit(struct hfi1_devdata *dd)
1604 struct sdma_engine *sde;
1606 for (this_idx = 0; dd->per_sdma && this_idx < dd->num_sdma;
1608 sde = &dd->per_sdma[this_idx];
1609 if (!list_empty(&sde->dmawait))
1610 dd_dev_err(dd, "sde %u: dmawait list not empty!\n",
1612 sdma_process_event(sde, sdma_event_e00_go_hw_down);
1614 del_timer_sync(&sde->err_progress_check_timer);
1617 * This waits for the state machine to exit so it is not
1618 * necessary to kill the sdma_sw_clean_up_task to make sure
1619 * it is not running.
1621 sdma_finalput(&sde->state);
1626 * unmap the indicated descriptor
1628 static inline void sdma_unmap_desc(
1629 struct hfi1_devdata *dd,
1630 struct sdma_desc *descp)
1632 switch (sdma_mapping_type(descp)) {
1633 case SDMA_MAP_SINGLE:
1636 sdma_mapping_addr(descp),
1637 sdma_mapping_len(descp),
1643 sdma_mapping_addr(descp),
1644 sdma_mapping_len(descp),
1651 * return the mode as indicated by the first
1652 * descriptor in the tx.
1654 static inline u8 ahg_mode(struct sdma_txreq *tx)
1656 return (tx->descp[0].qw[1] & SDMA_DESC1_HEADER_MODE_SMASK)
1657 >> SDMA_DESC1_HEADER_MODE_SHIFT;
1661 * __sdma_txclean() - clean tx of mappings, descp *kmalloc's
1662 * @dd: hfi1_devdata for unmapping
1663 * @tx: tx request to clean
1665 * This is used in the progress routine to clean the tx or
1666 * by the ULP to toss an in-process tx build.
1668 * The code can be called multiple times without issue.
1671 void __sdma_txclean(
1672 struct hfi1_devdata *dd,
1673 struct sdma_txreq *tx)
1678 u8 skip = 0, mode = ahg_mode(tx);
1681 sdma_unmap_desc(dd, &tx->descp[0]);
1682 /* determine number of AHG descriptors to skip */
1683 if (mode > SDMA_AHG_APPLY_UPDATE1)
1685 for (i = 1 + skip; i < tx->num_desc; i++)
1686 sdma_unmap_desc(dd, &tx->descp[i]);
1689 kfree(tx->coalesce_buf);
1690 tx->coalesce_buf = NULL;
1691 /* kmalloc'ed descp */
1692 if (unlikely(tx->desc_limit > ARRAY_SIZE(tx->descs))) {
1693 tx->desc_limit = ARRAY_SIZE(tx->descs);
1698 static inline u16 sdma_gethead(struct sdma_engine *sde)
1700 struct hfi1_devdata *dd = sde->dd;
1704 #ifdef CONFIG_SDMA_VERBOSITY
1705 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
1706 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
1710 use_dmahead = HFI1_CAP_IS_KSET(USE_SDMA_HEAD) && __sdma_running(sde) &&
1711 (dd->flags & HFI1_HAS_SDMA_TIMEOUT);
1712 hwhead = use_dmahead ?
1713 (u16)le64_to_cpu(*sde->head_dma) :
1714 (u16)read_sde_csr(sde, SD(HEAD));
1716 if (unlikely(HFI1_CAP_IS_KSET(SDMA_HEAD_CHECK))) {
1722 swhead = sde->descq_head & sde->sdma_mask;
1723 /* this code is really bad for cache line trading */
1724 swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask;
1725 cnt = sde->descq_cnt;
1727 if (swhead < swtail)
1729 sane = (hwhead >= swhead) & (hwhead <= swtail);
1730 else if (swhead > swtail)
1731 /* wrapped around */
1732 sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
1736 sane = (hwhead == swhead);
1738 if (unlikely(!sane)) {
1739 dd_dev_err(dd, "SDMA(%u) bad head (%s) hwhd=%hu swhd=%hu swtl=%hu cnt=%hu\n",
1741 use_dmahead ? "dma" : "kreg",
1742 hwhead, swhead, swtail, cnt);
1744 /* try one more time, using csr */
1748 /* proceed as if no progress */
1756 * This is called when there are send DMA descriptors that might be
1759 * This is called with head_lock held.
1761 static void sdma_desc_avail(struct sdma_engine *sde, uint avail)
1763 struct iowait *wait, *nw, *twait;
1764 struct iowait *waits[SDMA_WAIT_BATCH_SIZE];
1765 uint i, n = 0, seq, tidx = 0;
1767 #ifdef CONFIG_SDMA_VERBOSITY
1768 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
1769 slashstrip(__FILE__), __LINE__, __func__);
1770 dd_dev_err(sde->dd, "avail: %u\n", avail);
1774 seq = read_seqbegin(&sde->waitlock);
1775 if (!list_empty(&sde->dmawait)) {
1776 /* at least one item */
1777 write_seqlock(&sde->waitlock);
1778 /* Harvest waiters wanting DMA descriptors */
1779 list_for_each_entry_safe(
1788 if (n == ARRAY_SIZE(waits))
1790 iowait_init_priority(wait);
1791 num_desc = iowait_get_all_desc(wait);
1792 if (num_desc > avail)
1795 /* Find the top-priority wait memeber */
1797 twait = waits[tidx];
1799 iowait_priority_update_top(wait,
1804 list_del_init(&wait->list);
1807 write_sequnlock(&sde->waitlock);
1810 } while (read_seqretry(&sde->waitlock, seq));
1812 /* Schedule the top-priority entry first */
1814 waits[tidx]->wakeup(waits[tidx], SDMA_AVAIL_REASON);
1816 for (i = 0; i < n; i++)
1818 waits[i]->wakeup(waits[i], SDMA_AVAIL_REASON);
1821 /* head_lock must be held */
1822 static void sdma_make_progress(struct sdma_engine *sde, u64 status)
1824 struct sdma_txreq *txp = NULL;
1827 int idle_check_done = 0;
1829 hwhead = sdma_gethead(sde);
1831 /* The reason for some of the complexity of this code is that
1832 * not all descriptors have corresponding txps. So, we have to
1833 * be able to skip over descs until we wander into the range of
1834 * the next txp on the list.
1838 txp = get_txhead(sde);
1839 swhead = sde->descq_head & sde->sdma_mask;
1840 trace_hfi1_sdma_progress(sde, hwhead, swhead, txp);
1841 while (swhead != hwhead) {
1842 /* advance head, wrap if needed */
1843 swhead = ++sde->descq_head & sde->sdma_mask;
1845 /* if now past this txp's descs, do the callback */
1846 if (txp && txp->next_descq_idx == swhead) {
1847 /* remove from list */
1848 sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL;
1849 complete_tx(sde, txp, SDMA_TXREQ_S_OK);
1850 /* see if there is another txp */
1851 txp = get_txhead(sde);
1853 trace_hfi1_sdma_progress(sde, hwhead, swhead, txp);
1858 * The SDMA idle interrupt is not guaranteed to be ordered with respect
1859 * to updates to the the dma_head location in host memory. The head
1860 * value read might not be fully up to date. If there are pending
1861 * descriptors and the SDMA idle interrupt fired then read from the
1862 * CSR SDMA head instead to get the latest value from the hardware.
1863 * The hardware SDMA head should be read at most once in this invocation
1864 * of sdma_make_progress(..) which is ensured by idle_check_done flag
1866 if ((status & sde->idle_mask) && !idle_check_done) {
1869 swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask;
1870 if (swtail != hwhead) {
1871 hwhead = (u16)read_sde_csr(sde, SD(HEAD));
1872 idle_check_done = 1;
1877 sde->last_status = status;
1879 sdma_desc_avail(sde, sdma_descq_freecnt(sde));
1883 * sdma_engine_interrupt() - interrupt handler for engine
1885 * @status: sdma interrupt reason
1887 * Status is a mask of the 3 possible interrupts for this engine. It will
1888 * contain bits _only_ for this SDMA engine. It will contain at least one
1889 * bit, it may contain more.
1891 void sdma_engine_interrupt(struct sdma_engine *sde, u64 status)
1893 trace_hfi1_sdma_engine_interrupt(sde, status);
1894 write_seqlock(&sde->head_lock);
1895 sdma_set_desc_cnt(sde, sdma_desct_intr);
1896 if (status & sde->idle_mask)
1897 sde->idle_int_cnt++;
1898 else if (status & sde->progress_mask)
1899 sde->progress_int_cnt++;
1900 else if (status & sde->int_mask)
1901 sde->sdma_int_cnt++;
1902 sdma_make_progress(sde, status);
1903 write_sequnlock(&sde->head_lock);
1907 * sdma_engine_error() - error handler for engine
1909 * @status: sdma interrupt reason
1911 void sdma_engine_error(struct sdma_engine *sde, u64 status)
1913 unsigned long flags;
1915 #ifdef CONFIG_SDMA_VERBOSITY
1916 dd_dev_err(sde->dd, "CONFIG SDMA(%u) error status 0x%llx state %s\n",
1918 (unsigned long long)status,
1919 sdma_state_names[sde->state.current_state]);
1921 spin_lock_irqsave(&sde->tail_lock, flags);
1922 write_seqlock(&sde->head_lock);
1923 if (status & ALL_SDMA_ENG_HALT_ERRS)
1924 __sdma_process_event(sde, sdma_event_e60_hw_halted);
1925 if (status & ~SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK)) {
1927 "SDMA (%u) engine error: 0x%llx state %s\n",
1929 (unsigned long long)status,
1930 sdma_state_names[sde->state.current_state]);
1931 dump_sdma_state(sde);
1933 write_sequnlock(&sde->head_lock);
1934 spin_unlock_irqrestore(&sde->tail_lock, flags);
1937 static void sdma_sendctrl(struct sdma_engine *sde, unsigned op)
1939 u64 set_senddmactrl = 0;
1940 u64 clr_senddmactrl = 0;
1941 unsigned long flags;
1943 #ifdef CONFIG_SDMA_VERBOSITY
1944 dd_dev_err(sde->dd, "CONFIG SDMA(%u) senddmactrl E=%d I=%d H=%d C=%d\n",
1946 (op & SDMA_SENDCTRL_OP_ENABLE) ? 1 : 0,
1947 (op & SDMA_SENDCTRL_OP_INTENABLE) ? 1 : 0,
1948 (op & SDMA_SENDCTRL_OP_HALT) ? 1 : 0,
1949 (op & SDMA_SENDCTRL_OP_CLEANUP) ? 1 : 0);
1952 if (op & SDMA_SENDCTRL_OP_ENABLE)
1953 set_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK);
1955 clr_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK);
1957 if (op & SDMA_SENDCTRL_OP_INTENABLE)
1958 set_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK);
1960 clr_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK);
1962 if (op & SDMA_SENDCTRL_OP_HALT)
1963 set_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK);
1965 clr_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK);
1967 spin_lock_irqsave(&sde->senddmactrl_lock, flags);
1969 sde->p_senddmactrl |= set_senddmactrl;
1970 sde->p_senddmactrl &= ~clr_senddmactrl;
1972 if (op & SDMA_SENDCTRL_OP_CLEANUP)
1973 write_sde_csr(sde, SD(CTRL),
1974 sde->p_senddmactrl |
1975 SD(CTRL_SDMA_CLEANUP_SMASK));
1977 write_sde_csr(sde, SD(CTRL), sde->p_senddmactrl);
1979 spin_unlock_irqrestore(&sde->senddmactrl_lock, flags);
1981 #ifdef CONFIG_SDMA_VERBOSITY
1982 sdma_dumpstate(sde);
1986 static void sdma_setlengen(struct sdma_engine *sde)
1988 #ifdef CONFIG_SDMA_VERBOSITY
1989 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
1990 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
1994 * Set SendDmaLenGen and clear-then-set the MSB of the generation
1995 * count to enable generation checking and load the internal
1996 * generation counter.
1998 write_sde_csr(sde, SD(LEN_GEN),
1999 (sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT));
2000 write_sde_csr(sde, SD(LEN_GEN),
2001 ((sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT)) |
2002 (4ULL << SD(LEN_GEN_GENERATION_SHIFT)));
2005 static inline void sdma_update_tail(struct sdma_engine *sde, u16 tail)
2007 /* Commit writes to memory and advance the tail on the chip */
2008 smp_wmb(); /* see get_txhead() */
2009 writeq(tail, sde->tail_csr);
2013 * This is called when changing to state s10_hw_start_up_halt_wait as
2014 * a result of send buffer errors or send DMA descriptor errors.
2016 static void sdma_hw_start_up(struct sdma_engine *sde)
2020 #ifdef CONFIG_SDMA_VERBOSITY
2021 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
2022 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
2025 sdma_setlengen(sde);
2026 sdma_update_tail(sde, 0); /* Set SendDmaTail */
2029 reg = SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_MASK) <<
2030 SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SHIFT);
2031 write_sde_csr(sde, SD(ENG_ERR_CLEAR), reg);
2035 * set_sdma_integrity
2037 * Set the SEND_DMA_CHECK_ENABLE register for send DMA engine 'sde'.
2039 static void set_sdma_integrity(struct sdma_engine *sde)
2041 struct hfi1_devdata *dd = sde->dd;
2043 write_sde_csr(sde, SD(CHECK_ENABLE),
2044 hfi1_pkt_base_sdma_integrity(dd));
2047 static void init_sdma_regs(
2048 struct sdma_engine *sde,
2053 #ifdef CONFIG_SDMA_VERBOSITY
2054 struct hfi1_devdata *dd = sde->dd;
2056 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n",
2057 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
2060 write_sde_csr(sde, SD(BASE_ADDR), sde->descq_phys);
2061 sdma_setlengen(sde);
2062 sdma_update_tail(sde, 0); /* Set SendDmaTail */
2063 write_sde_csr(sde, SD(RELOAD_CNT), idle_cnt);
2064 write_sde_csr(sde, SD(DESC_CNT), 0);
2065 write_sde_csr(sde, SD(HEAD_ADDR), sde->head_phys);
2066 write_sde_csr(sde, SD(MEMORY),
2067 ((u64)credits << SD(MEMORY_SDMA_MEMORY_CNT_SHIFT)) |
2068 ((u64)(credits * sde->this_idx) <<
2069 SD(MEMORY_SDMA_MEMORY_INDEX_SHIFT)));
2070 write_sde_csr(sde, SD(ENG_ERR_MASK), ~0ull);
2071 set_sdma_integrity(sde);
2072 opmask = OPCODE_CHECK_MASK_DISABLED;
2073 opval = OPCODE_CHECK_VAL_DISABLED;
2074 write_sde_csr(sde, SD(CHECK_OPCODE),
2075 (opmask << SEND_CTXT_CHECK_OPCODE_MASK_SHIFT) |
2076 (opval << SEND_CTXT_CHECK_OPCODE_VALUE_SHIFT));
2079 #ifdef CONFIG_SDMA_VERBOSITY
2081 #define sdma_dumpstate_helper0(reg) do { \
2082 csr = read_csr(sde->dd, reg); \
2083 dd_dev_err(sde->dd, "%36s 0x%016llx\n", #reg, csr); \
2086 #define sdma_dumpstate_helper(reg) do { \
2087 csr = read_sde_csr(sde, reg); \
2088 dd_dev_err(sde->dd, "%36s[%02u] 0x%016llx\n", \
2089 #reg, sde->this_idx, csr); \
2092 #define sdma_dumpstate_helper2(reg) do { \
2093 csr = read_csr(sde->dd, reg + (8 * i)); \
2094 dd_dev_err(sde->dd, "%33s_%02u 0x%016llx\n", \
2098 void sdma_dumpstate(struct sdma_engine *sde)
2103 sdma_dumpstate_helper(SD(CTRL));
2104 sdma_dumpstate_helper(SD(STATUS));
2105 sdma_dumpstate_helper0(SD(ERR_STATUS));
2106 sdma_dumpstate_helper0(SD(ERR_MASK));
2107 sdma_dumpstate_helper(SD(ENG_ERR_STATUS));
2108 sdma_dumpstate_helper(SD(ENG_ERR_MASK));
2110 for (i = 0; i < CCE_NUM_INT_CSRS; ++i) {
2111 sdma_dumpstate_helper2(CCE_INT_STATUS);
2112 sdma_dumpstate_helper2(CCE_INT_MASK);
2113 sdma_dumpstate_helper2(CCE_INT_BLOCKED);
2116 sdma_dumpstate_helper(SD(TAIL));
2117 sdma_dumpstate_helper(SD(HEAD));
2118 sdma_dumpstate_helper(SD(PRIORITY_THLD));
2119 sdma_dumpstate_helper(SD(IDLE_CNT));
2120 sdma_dumpstate_helper(SD(RELOAD_CNT));
2121 sdma_dumpstate_helper(SD(DESC_CNT));
2122 sdma_dumpstate_helper(SD(DESC_FETCHED_CNT));
2123 sdma_dumpstate_helper(SD(MEMORY));
2124 sdma_dumpstate_helper0(SD(ENGINES));
2125 sdma_dumpstate_helper0(SD(MEM_SIZE));
2126 /* sdma_dumpstate_helper(SEND_EGRESS_SEND_DMA_STATUS); */
2127 sdma_dumpstate_helper(SD(BASE_ADDR));
2128 sdma_dumpstate_helper(SD(LEN_GEN));
2129 sdma_dumpstate_helper(SD(HEAD_ADDR));
2130 sdma_dumpstate_helper(SD(CHECK_ENABLE));
2131 sdma_dumpstate_helper(SD(CHECK_VL));
2132 sdma_dumpstate_helper(SD(CHECK_JOB_KEY));
2133 sdma_dumpstate_helper(SD(CHECK_PARTITION_KEY));
2134 sdma_dumpstate_helper(SD(CHECK_SLID));
2135 sdma_dumpstate_helper(SD(CHECK_OPCODE));
2139 static void dump_sdma_state(struct sdma_engine *sde)
2141 struct hw_sdma_desc *descqp;
2146 u16 head, tail, cnt;
2148 head = sde->descq_head & sde->sdma_mask;
2149 tail = sde->descq_tail & sde->sdma_mask;
2150 cnt = sdma_descq_freecnt(sde);
2153 "SDMA (%u) descq_head: %u descq_tail: %u freecnt: %u FLE %d\n",
2154 sde->this_idx, head, tail, cnt,
2155 !list_empty(&sde->flushlist));
2157 /* print info for each entry in the descriptor queue */
2158 while (head != tail) {
2159 char flags[6] = { 'x', 'x', 'x', 'x', 0 };
2161 descqp = &sde->descq[head];
2162 desc[0] = le64_to_cpu(descqp->qw[0]);
2163 desc[1] = le64_to_cpu(descqp->qw[1]);
2164 flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-';
2165 flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ?
2167 flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-';
2168 flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-';
2169 addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT)
2170 & SDMA_DESC0_PHY_ADDR_MASK;
2171 gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT)
2172 & SDMA_DESC1_GENERATION_MASK;
2173 len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT)
2174 & SDMA_DESC0_BYTE_COUNT_MASK;
2176 "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
2177 head, flags, addr, gen, len);
2179 "\tdesc0:0x%016llx desc1 0x%016llx\n",
2181 if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
2183 "\taidx: %u amode: %u alen: %u\n",
2185 SDMA_DESC1_HEADER_INDEX_SMASK) >>
2186 SDMA_DESC1_HEADER_INDEX_SHIFT),
2188 SDMA_DESC1_HEADER_MODE_SMASK) >>
2189 SDMA_DESC1_HEADER_MODE_SHIFT),
2191 SDMA_DESC1_HEADER_DWS_SMASK) >>
2192 SDMA_DESC1_HEADER_DWS_SHIFT));
2194 head &= sde->sdma_mask;
2199 "SDE %u CPU %d STE %s C 0x%llx S 0x%016llx E 0x%llx T(HW) 0x%llx T(SW) 0x%x H(HW) 0x%llx H(SW) 0x%x H(D) 0x%llx DM 0x%llx GL 0x%llx R 0x%llx LIS 0x%llx AHGI 0x%llx TXT %u TXH %u DT %u DH %u FLNE %d DQF %u SLC 0x%llx\n"
2201 * sdma_seqfile_dump_sde() - debugfs dump of sde
2203 * @sde: send dma engine to dump
2205 * This routine dumps the sde to the indicated seq file.
2207 void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde)
2210 struct hw_sdma_desc *descqp;
2216 head = sde->descq_head & sde->sdma_mask;
2217 tail = READ_ONCE(sde->descq_tail) & sde->sdma_mask;
2218 seq_printf(s, SDE_FMT, sde->this_idx,
2220 sdma_state_name(sde->state.current_state),
2221 (unsigned long long)read_sde_csr(sde, SD(CTRL)),
2222 (unsigned long long)read_sde_csr(sde, SD(STATUS)),
2223 (unsigned long long)read_sde_csr(sde, SD(ENG_ERR_STATUS)),
2224 (unsigned long long)read_sde_csr(sde, SD(TAIL)), tail,
2225 (unsigned long long)read_sde_csr(sde, SD(HEAD)), head,
2226 (unsigned long long)le64_to_cpu(*sde->head_dma),
2227 (unsigned long long)read_sde_csr(sde, SD(MEMORY)),
2228 (unsigned long long)read_sde_csr(sde, SD(LEN_GEN)),
2229 (unsigned long long)read_sde_csr(sde, SD(RELOAD_CNT)),
2230 (unsigned long long)sde->last_status,
2231 (unsigned long long)sde->ahg_bits,
2236 !list_empty(&sde->flushlist),
2237 sde->descq_full_count,
2238 (unsigned long long)read_sde_csr(sde, SEND_DMA_CHECK_SLID));
2240 /* print info for each entry in the descriptor queue */
2241 while (head != tail) {
2242 char flags[6] = { 'x', 'x', 'x', 'x', 0 };
2244 descqp = &sde->descq[head];
2245 desc[0] = le64_to_cpu(descqp->qw[0]);
2246 desc[1] = le64_to_cpu(descqp->qw[1]);
2247 flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-';
2248 flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ?
2250 flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-';
2251 flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-';
2252 addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT)
2253 & SDMA_DESC0_PHY_ADDR_MASK;
2254 gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT)
2255 & SDMA_DESC1_GENERATION_MASK;
2256 len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT)
2257 & SDMA_DESC0_BYTE_COUNT_MASK;
2259 "\tdesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
2260 head, flags, addr, gen, len);
2261 if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
2262 seq_printf(s, "\t\tahgidx: %u ahgmode: %u\n",
2264 SDMA_DESC1_HEADER_INDEX_SMASK) >>
2265 SDMA_DESC1_HEADER_INDEX_SHIFT),
2267 SDMA_DESC1_HEADER_MODE_SMASK) >>
2268 SDMA_DESC1_HEADER_MODE_SHIFT));
2269 head = (head + 1) & sde->sdma_mask;
2274 * add the generation number into
2275 * the qw1 and return
2277 static inline u64 add_gen(struct sdma_engine *sde, u64 qw1)
2279 u8 generation = (sde->descq_tail >> sde->sdma_shift) & 3;
2281 qw1 &= ~SDMA_DESC1_GENERATION_SMASK;
2282 qw1 |= ((u64)generation & SDMA_DESC1_GENERATION_MASK)
2283 << SDMA_DESC1_GENERATION_SHIFT;
2288 * This routine submits the indicated tx
2290 * Space has already been guaranteed and
2291 * tail side of ring is locked.
2293 * The hardware tail update is done
2294 * in the caller and that is facilitated
2295 * by returning the new tail.
2297 * There is special case logic for ahg
2298 * to not add the generation number for
2299 * up to 2 descriptors that follow the
2303 static inline u16 submit_tx(struct sdma_engine *sde, struct sdma_txreq *tx)
2307 struct sdma_desc *descp = tx->descp;
2308 u8 skip = 0, mode = ahg_mode(tx);
2310 tail = sde->descq_tail & sde->sdma_mask;
2311 sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]);
2312 sde->descq[tail].qw[1] = cpu_to_le64(add_gen(sde, descp->qw[1]));
2313 trace_hfi1_sdma_descriptor(sde, descp->qw[0], descp->qw[1],
2314 tail, &sde->descq[tail]);
2315 tail = ++sde->descq_tail & sde->sdma_mask;
2317 if (mode > SDMA_AHG_APPLY_UPDATE1)
2319 for (i = 1; i < tx->num_desc; i++, descp++) {
2322 sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]);
2324 /* edits don't have generation */
2328 /* replace generation with real one for non-edits */
2329 qw1 = add_gen(sde, descp->qw[1]);
2331 sde->descq[tail].qw[1] = cpu_to_le64(qw1);
2332 trace_hfi1_sdma_descriptor(sde, descp->qw[0], qw1,
2333 tail, &sde->descq[tail]);
2334 tail = ++sde->descq_tail & sde->sdma_mask;
2336 tx->next_descq_idx = tail;
2337 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2338 tx->sn = sde->tail_sn++;
2339 trace_hfi1_sdma_in_sn(sde, tx->sn);
2340 WARN_ON_ONCE(sde->tx_ring[sde->tx_tail & sde->sdma_mask]);
2342 sde->tx_ring[sde->tx_tail++ & sde->sdma_mask] = tx;
2343 sde->desc_avail -= tx->num_desc;
2348 * Check for progress
2350 static int sdma_check_progress(
2351 struct sdma_engine *sde,
2352 struct iowait_work *wait,
2353 struct sdma_txreq *tx,
2358 sde->desc_avail = sdma_descq_freecnt(sde);
2359 if (tx->num_desc <= sde->desc_avail)
2361 /* pulse the head_lock */
2362 if (wait && iowait_ioww_to_iow(wait)->sleep) {
2365 seq = raw_seqcount_begin(
2366 (const seqcount_t *)&sde->head_lock.seqcount);
2367 ret = wait->iow->sleep(sde, wait, tx, seq, pkts_sent);
2369 sde->desc_avail = sdma_descq_freecnt(sde);
2377 * sdma_send_txreq() - submit a tx req to ring
2378 * @sde: sdma engine to use
2379 * @wait: SE wait structure to use when full (may be NULL)
2380 * @tx: sdma_txreq to submit
2381 * @pkts_sent: has any packet been sent yet?
2383 * The call submits the tx into the ring. If a iowait structure is non-NULL
2384 * the packet will be queued to the list in wait.
2387 * 0 - Success, -EINVAL - sdma_txreq incomplete, -EBUSY - no space in
2388 * ring (wait == NULL)
2389 * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
2391 int sdma_send_txreq(struct sdma_engine *sde,
2392 struct iowait_work *wait,
2393 struct sdma_txreq *tx,
2398 unsigned long flags;
2400 /* user should have supplied entire packet */
2401 if (unlikely(tx->tlen))
2403 tx->wait = iowait_ioww_to_iow(wait);
2404 spin_lock_irqsave(&sde->tail_lock, flags);
2406 if (unlikely(!__sdma_running(sde)))
2408 if (unlikely(tx->num_desc > sde->desc_avail))
2410 tail = submit_tx(sde, tx);
2412 iowait_sdma_inc(iowait_ioww_to_iow(wait));
2413 sdma_update_tail(sde, tail);
2415 spin_unlock_irqrestore(&sde->tail_lock, flags);
2419 iowait_sdma_inc(iowait_ioww_to_iow(wait));
2420 tx->next_descq_idx = 0;
2421 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2422 tx->sn = sde->tail_sn++;
2423 trace_hfi1_sdma_in_sn(sde, tx->sn);
2425 spin_lock(&sde->flushlist_lock);
2426 list_add_tail(&tx->list, &sde->flushlist);
2427 spin_unlock(&sde->flushlist_lock);
2428 iowait_inc_wait_count(wait, tx->num_desc);
2429 queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker);
2433 ret = sdma_check_progress(sde, wait, tx, pkts_sent);
2434 if (ret == -EAGAIN) {
2438 sde->descq_full_count++;
2443 * sdma_send_txlist() - submit a list of tx req to ring
2444 * @sde: sdma engine to use
2445 * @wait: SE wait structure to use when full (may be NULL)
2446 * @tx_list: list of sdma_txreqs to submit
2447 * @count: pointer to a u16 which, after return will contain the total number of
2448 * sdma_txreqs removed from the tx_list. This will include sdma_txreqs
2449 * whose SDMA descriptors are submitted to the ring and the sdma_txreqs
2450 * which are added to SDMA engine flush list if the SDMA engine state is
2453 * The call submits the list into the ring.
2455 * If the iowait structure is non-NULL and not equal to the iowait list
2456 * the unprocessed part of the list will be appended to the list in wait.
2458 * In all cases, the tx_list will be updated so the head of the tx_list is
2459 * the list of descriptors that have yet to be transmitted.
2461 * The intent of this call is to provide a more efficient
2462 * way of submitting multiple packets to SDMA while holding the tail
2467 * -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring (wait == NULL)
2468 * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
2470 int sdma_send_txlist(struct sdma_engine *sde, struct iowait_work *wait,
2471 struct list_head *tx_list, u16 *count_out)
2473 struct sdma_txreq *tx, *tx_next;
2475 unsigned long flags;
2476 u16 tail = INVALID_TAIL;
2477 u32 submit_count = 0, flush_count = 0, total_count;
2479 spin_lock_irqsave(&sde->tail_lock, flags);
2481 list_for_each_entry_safe(tx, tx_next, tx_list, list) {
2482 tx->wait = iowait_ioww_to_iow(wait);
2483 if (unlikely(!__sdma_running(sde)))
2485 if (unlikely(tx->num_desc > sde->desc_avail))
2487 if (unlikely(tx->tlen)) {
2491 list_del_init(&tx->list);
2492 tail = submit_tx(sde, tx);
2494 if (tail != INVALID_TAIL &&
2495 (submit_count & SDMA_TAIL_UPDATE_THRESH) == 0) {
2496 sdma_update_tail(sde, tail);
2497 tail = INVALID_TAIL;
2501 total_count = submit_count + flush_count;
2503 iowait_sdma_add(iowait_ioww_to_iow(wait), total_count);
2504 iowait_starve_clear(submit_count > 0,
2505 iowait_ioww_to_iow(wait));
2507 if (tail != INVALID_TAIL)
2508 sdma_update_tail(sde, tail);
2509 spin_unlock_irqrestore(&sde->tail_lock, flags);
2510 *count_out = total_count;
2513 spin_lock(&sde->flushlist_lock);
2514 list_for_each_entry_safe(tx, tx_next, tx_list, list) {
2515 tx->wait = iowait_ioww_to_iow(wait);
2516 list_del_init(&tx->list);
2517 tx->next_descq_idx = 0;
2518 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2519 tx->sn = sde->tail_sn++;
2520 trace_hfi1_sdma_in_sn(sde, tx->sn);
2522 list_add_tail(&tx->list, &sde->flushlist);
2524 iowait_inc_wait_count(wait, tx->num_desc);
2526 spin_unlock(&sde->flushlist_lock);
2527 queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker);
2531 ret = sdma_check_progress(sde, wait, tx, submit_count > 0);
2532 if (ret == -EAGAIN) {
2536 sde->descq_full_count++;
2540 static void sdma_process_event(struct sdma_engine *sde, enum sdma_events event)
2542 unsigned long flags;
2544 spin_lock_irqsave(&sde->tail_lock, flags);
2545 write_seqlock(&sde->head_lock);
2547 __sdma_process_event(sde, event);
2549 if (sde->state.current_state == sdma_state_s99_running)
2550 sdma_desc_avail(sde, sdma_descq_freecnt(sde));
2552 write_sequnlock(&sde->head_lock);
2553 spin_unlock_irqrestore(&sde->tail_lock, flags);
2556 static void __sdma_process_event(struct sdma_engine *sde,
2557 enum sdma_events event)
2559 struct sdma_state *ss = &sde->state;
2560 int need_progress = 0;
2562 /* CONFIG SDMA temporary */
2563 #ifdef CONFIG_SDMA_VERBOSITY
2564 dd_dev_err(sde->dd, "CONFIG SDMA(%u) [%s] %s\n", sde->this_idx,
2565 sdma_state_names[ss->current_state],
2566 sdma_event_names[event]);
2569 switch (ss->current_state) {
2570 case sdma_state_s00_hw_down:
2572 case sdma_event_e00_go_hw_down:
2574 case sdma_event_e30_go_running:
2576 * If down, but running requested (usually result
2577 * of link up, then we need to start up.
2578 * This can happen when hw down is requested while
2579 * bringing the link up with traffic active on
2582 ss->go_s99_running = 1;
2583 /* fall through -- and start dma engine */
2584 case sdma_event_e10_go_hw_start:
2585 /* This reference means the state machine is started */
2586 sdma_get(&sde->state);
2588 sdma_state_s10_hw_start_up_halt_wait);
2590 case sdma_event_e15_hw_halt_done:
2592 case sdma_event_e25_hw_clean_up_done:
2594 case sdma_event_e40_sw_cleaned:
2595 sdma_sw_tear_down(sde);
2597 case sdma_event_e50_hw_cleaned:
2599 case sdma_event_e60_hw_halted:
2601 case sdma_event_e70_go_idle:
2603 case sdma_event_e80_hw_freeze:
2605 case sdma_event_e81_hw_frozen:
2607 case sdma_event_e82_hw_unfreeze:
2609 case sdma_event_e85_link_down:
2611 case sdma_event_e90_sw_halted:
2616 case sdma_state_s10_hw_start_up_halt_wait:
2618 case sdma_event_e00_go_hw_down:
2619 sdma_set_state(sde, sdma_state_s00_hw_down);
2620 sdma_sw_tear_down(sde);
2622 case sdma_event_e10_go_hw_start:
2624 case sdma_event_e15_hw_halt_done:
2626 sdma_state_s15_hw_start_up_clean_wait);
2627 sdma_start_hw_clean_up(sde);
2629 case sdma_event_e25_hw_clean_up_done:
2631 case sdma_event_e30_go_running:
2632 ss->go_s99_running = 1;
2634 case sdma_event_e40_sw_cleaned:
2636 case sdma_event_e50_hw_cleaned:
2638 case sdma_event_e60_hw_halted:
2639 schedule_work(&sde->err_halt_worker);
2641 case sdma_event_e70_go_idle:
2642 ss->go_s99_running = 0;
2644 case sdma_event_e80_hw_freeze:
2646 case sdma_event_e81_hw_frozen:
2648 case sdma_event_e82_hw_unfreeze:
2650 case sdma_event_e85_link_down:
2652 case sdma_event_e90_sw_halted:
2657 case sdma_state_s15_hw_start_up_clean_wait:
2659 case sdma_event_e00_go_hw_down:
2660 sdma_set_state(sde, sdma_state_s00_hw_down);
2661 sdma_sw_tear_down(sde);
2663 case sdma_event_e10_go_hw_start:
2665 case sdma_event_e15_hw_halt_done:
2667 case sdma_event_e25_hw_clean_up_done:
2668 sdma_hw_start_up(sde);
2669 sdma_set_state(sde, ss->go_s99_running ?
2670 sdma_state_s99_running :
2671 sdma_state_s20_idle);
2673 case sdma_event_e30_go_running:
2674 ss->go_s99_running = 1;
2676 case sdma_event_e40_sw_cleaned:
2678 case sdma_event_e50_hw_cleaned:
2680 case sdma_event_e60_hw_halted:
2682 case sdma_event_e70_go_idle:
2683 ss->go_s99_running = 0;
2685 case sdma_event_e80_hw_freeze:
2687 case sdma_event_e81_hw_frozen:
2689 case sdma_event_e82_hw_unfreeze:
2691 case sdma_event_e85_link_down:
2693 case sdma_event_e90_sw_halted:
2698 case sdma_state_s20_idle:
2700 case sdma_event_e00_go_hw_down:
2701 sdma_set_state(sde, sdma_state_s00_hw_down);
2702 sdma_sw_tear_down(sde);
2704 case sdma_event_e10_go_hw_start:
2706 case sdma_event_e15_hw_halt_done:
2708 case sdma_event_e25_hw_clean_up_done:
2710 case sdma_event_e30_go_running:
2711 sdma_set_state(sde, sdma_state_s99_running);
2712 ss->go_s99_running = 1;
2714 case sdma_event_e40_sw_cleaned:
2716 case sdma_event_e50_hw_cleaned:
2718 case sdma_event_e60_hw_halted:
2719 sdma_set_state(sde, sdma_state_s50_hw_halt_wait);
2720 schedule_work(&sde->err_halt_worker);
2722 case sdma_event_e70_go_idle:
2724 case sdma_event_e85_link_down:
2726 case sdma_event_e80_hw_freeze:
2727 sdma_set_state(sde, sdma_state_s80_hw_freeze);
2728 atomic_dec(&sde->dd->sdma_unfreeze_count);
2729 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
2731 case sdma_event_e81_hw_frozen:
2733 case sdma_event_e82_hw_unfreeze:
2735 case sdma_event_e90_sw_halted:
2740 case sdma_state_s30_sw_clean_up_wait:
2742 case sdma_event_e00_go_hw_down:
2743 sdma_set_state(sde, sdma_state_s00_hw_down);
2745 case sdma_event_e10_go_hw_start:
2747 case sdma_event_e15_hw_halt_done:
2749 case sdma_event_e25_hw_clean_up_done:
2751 case sdma_event_e30_go_running:
2752 ss->go_s99_running = 1;
2754 case sdma_event_e40_sw_cleaned:
2755 sdma_set_state(sde, sdma_state_s40_hw_clean_up_wait);
2756 sdma_start_hw_clean_up(sde);
2758 case sdma_event_e50_hw_cleaned:
2760 case sdma_event_e60_hw_halted:
2762 case sdma_event_e70_go_idle:
2763 ss->go_s99_running = 0;
2765 case sdma_event_e80_hw_freeze:
2767 case sdma_event_e81_hw_frozen:
2769 case sdma_event_e82_hw_unfreeze:
2771 case sdma_event_e85_link_down:
2772 ss->go_s99_running = 0;
2774 case sdma_event_e90_sw_halted:
2779 case sdma_state_s40_hw_clean_up_wait:
2781 case sdma_event_e00_go_hw_down:
2782 sdma_set_state(sde, sdma_state_s00_hw_down);
2783 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2785 case sdma_event_e10_go_hw_start:
2787 case sdma_event_e15_hw_halt_done:
2789 case sdma_event_e25_hw_clean_up_done:
2790 sdma_hw_start_up(sde);
2791 sdma_set_state(sde, ss->go_s99_running ?
2792 sdma_state_s99_running :
2793 sdma_state_s20_idle);
2795 case sdma_event_e30_go_running:
2796 ss->go_s99_running = 1;
2798 case sdma_event_e40_sw_cleaned:
2800 case sdma_event_e50_hw_cleaned:
2802 case sdma_event_e60_hw_halted:
2804 case sdma_event_e70_go_idle:
2805 ss->go_s99_running = 0;
2807 case sdma_event_e80_hw_freeze:
2809 case sdma_event_e81_hw_frozen:
2811 case sdma_event_e82_hw_unfreeze:
2813 case sdma_event_e85_link_down:
2814 ss->go_s99_running = 0;
2816 case sdma_event_e90_sw_halted:
2821 case sdma_state_s50_hw_halt_wait:
2823 case sdma_event_e00_go_hw_down:
2824 sdma_set_state(sde, sdma_state_s00_hw_down);
2825 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2827 case sdma_event_e10_go_hw_start:
2829 case sdma_event_e15_hw_halt_done:
2830 sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait);
2831 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2833 case sdma_event_e25_hw_clean_up_done:
2835 case sdma_event_e30_go_running:
2836 ss->go_s99_running = 1;
2838 case sdma_event_e40_sw_cleaned:
2840 case sdma_event_e50_hw_cleaned:
2842 case sdma_event_e60_hw_halted:
2843 schedule_work(&sde->err_halt_worker);
2845 case sdma_event_e70_go_idle:
2846 ss->go_s99_running = 0;
2848 case sdma_event_e80_hw_freeze:
2850 case sdma_event_e81_hw_frozen:
2852 case sdma_event_e82_hw_unfreeze:
2854 case sdma_event_e85_link_down:
2855 ss->go_s99_running = 0;
2857 case sdma_event_e90_sw_halted:
2862 case sdma_state_s60_idle_halt_wait:
2864 case sdma_event_e00_go_hw_down:
2865 sdma_set_state(sde, sdma_state_s00_hw_down);
2866 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2868 case sdma_event_e10_go_hw_start:
2870 case sdma_event_e15_hw_halt_done:
2871 sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait);
2872 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2874 case sdma_event_e25_hw_clean_up_done:
2876 case sdma_event_e30_go_running:
2877 ss->go_s99_running = 1;
2879 case sdma_event_e40_sw_cleaned:
2881 case sdma_event_e50_hw_cleaned:
2883 case sdma_event_e60_hw_halted:
2884 schedule_work(&sde->err_halt_worker);
2886 case sdma_event_e70_go_idle:
2887 ss->go_s99_running = 0;
2889 case sdma_event_e80_hw_freeze:
2891 case sdma_event_e81_hw_frozen:
2893 case sdma_event_e82_hw_unfreeze:
2895 case sdma_event_e85_link_down:
2897 case sdma_event_e90_sw_halted:
2902 case sdma_state_s80_hw_freeze:
2904 case sdma_event_e00_go_hw_down:
2905 sdma_set_state(sde, sdma_state_s00_hw_down);
2906 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2908 case sdma_event_e10_go_hw_start:
2910 case sdma_event_e15_hw_halt_done:
2912 case sdma_event_e25_hw_clean_up_done:
2914 case sdma_event_e30_go_running:
2915 ss->go_s99_running = 1;
2917 case sdma_event_e40_sw_cleaned:
2919 case sdma_event_e50_hw_cleaned:
2921 case sdma_event_e60_hw_halted:
2923 case sdma_event_e70_go_idle:
2924 ss->go_s99_running = 0;
2926 case sdma_event_e80_hw_freeze:
2928 case sdma_event_e81_hw_frozen:
2929 sdma_set_state(sde, sdma_state_s82_freeze_sw_clean);
2930 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2932 case sdma_event_e82_hw_unfreeze:
2934 case sdma_event_e85_link_down:
2936 case sdma_event_e90_sw_halted:
2941 case sdma_state_s82_freeze_sw_clean:
2943 case sdma_event_e00_go_hw_down:
2944 sdma_set_state(sde, sdma_state_s00_hw_down);
2945 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2947 case sdma_event_e10_go_hw_start:
2949 case sdma_event_e15_hw_halt_done:
2951 case sdma_event_e25_hw_clean_up_done:
2953 case sdma_event_e30_go_running:
2954 ss->go_s99_running = 1;
2956 case sdma_event_e40_sw_cleaned:
2957 /* notify caller this engine is done cleaning */
2958 atomic_dec(&sde->dd->sdma_unfreeze_count);
2959 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
2961 case sdma_event_e50_hw_cleaned:
2963 case sdma_event_e60_hw_halted:
2965 case sdma_event_e70_go_idle:
2966 ss->go_s99_running = 0;
2968 case sdma_event_e80_hw_freeze:
2970 case sdma_event_e81_hw_frozen:
2972 case sdma_event_e82_hw_unfreeze:
2973 sdma_hw_start_up(sde);
2974 sdma_set_state(sde, ss->go_s99_running ?
2975 sdma_state_s99_running :
2976 sdma_state_s20_idle);
2978 case sdma_event_e85_link_down:
2980 case sdma_event_e90_sw_halted:
2985 case sdma_state_s99_running:
2987 case sdma_event_e00_go_hw_down:
2988 sdma_set_state(sde, sdma_state_s00_hw_down);
2989 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2991 case sdma_event_e10_go_hw_start:
2993 case sdma_event_e15_hw_halt_done:
2995 case sdma_event_e25_hw_clean_up_done:
2997 case sdma_event_e30_go_running:
2999 case sdma_event_e40_sw_cleaned:
3001 case sdma_event_e50_hw_cleaned:
3003 case sdma_event_e60_hw_halted:
3005 sdma_err_progress_check_schedule(sde);
3007 case sdma_event_e90_sw_halted:
3009 * SW initiated halt does not perform engines
3012 sdma_set_state(sde, sdma_state_s50_hw_halt_wait);
3013 schedule_work(&sde->err_halt_worker);
3015 case sdma_event_e70_go_idle:
3016 sdma_set_state(sde, sdma_state_s60_idle_halt_wait);
3018 case sdma_event_e85_link_down:
3019 ss->go_s99_running = 0;
3021 case sdma_event_e80_hw_freeze:
3022 sdma_set_state(sde, sdma_state_s80_hw_freeze);
3023 atomic_dec(&sde->dd->sdma_unfreeze_count);
3024 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
3026 case sdma_event_e81_hw_frozen:
3028 case sdma_event_e82_hw_unfreeze:
3034 ss->last_event = event;
3036 sdma_make_progress(sde, 0);
3040 * _extend_sdma_tx_descs() - helper to extend txreq
3042 * This is called once the initial nominal allocation
3043 * of descriptors in the sdma_txreq is exhausted.
3045 * The code will bump the allocation up to the max
3046 * of MAX_DESC (64) descriptors. There doesn't seem
3047 * much point in an interim step. The last descriptor
3048 * is reserved for coalesce buffer in order to support
3049 * cases where input packet has >MAX_DESC iovecs.
3052 static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
3056 /* Handle last descriptor */
3057 if (unlikely((tx->num_desc == (MAX_DESC - 1)))) {
3058 /* if tlen is 0, it is for padding, release last descriptor */
3060 tx->desc_limit = MAX_DESC;
3061 } else if (!tx->coalesce_buf) {
3062 /* allocate coalesce buffer with space for padding */
3063 tx->coalesce_buf = kmalloc(tx->tlen + sizeof(u32),
3065 if (!tx->coalesce_buf)
3067 tx->coalesce_idx = 0;
3072 if (unlikely(tx->num_desc == MAX_DESC))
3075 tx->descp = kmalloc_array(
3077 sizeof(struct sdma_desc),
3082 /* reserve last descriptor for coalescing */
3083 tx->desc_limit = MAX_DESC - 1;
3084 /* copy ones already built */
3085 for (i = 0; i < tx->num_desc; i++)
3086 tx->descp[i] = tx->descs[i];
3089 __sdma_txclean(dd, tx);
3094 * ext_coal_sdma_tx_descs() - extend or coalesce sdma tx descriptors
3096 * This is called once the initial nominal allocation of descriptors
3097 * in the sdma_txreq is exhausted.
3099 * This function calls _extend_sdma_tx_descs to extend or allocate
3100 * coalesce buffer. If there is a allocated coalesce buffer, it will
3101 * copy the input packet data into the coalesce buffer. It also adds
3102 * coalesce buffer descriptor once when whole packet is received.
3106 * 0 - coalescing, don't populate descriptor
3107 * 1 - continue with populating descriptor
3109 int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
3110 int type, void *kvaddr, struct page *page,
3111 unsigned long offset, u16 len)
3116 rval = _extend_sdma_tx_descs(dd, tx);
3118 __sdma_txclean(dd, tx);
3122 /* If coalesce buffer is allocated, copy data into it */
3123 if (tx->coalesce_buf) {
3124 if (type == SDMA_MAP_NONE) {
3125 __sdma_txclean(dd, tx);
3129 if (type == SDMA_MAP_PAGE) {
3130 kvaddr = kmap(page);
3132 } else if (WARN_ON(!kvaddr)) {
3133 __sdma_txclean(dd, tx);
3137 memcpy(tx->coalesce_buf + tx->coalesce_idx, kvaddr, len);
3138 tx->coalesce_idx += len;
3139 if (type == SDMA_MAP_PAGE)
3142 /* If there is more data, return */
3143 if (tx->tlen - tx->coalesce_idx)
3146 /* Whole packet is received; add any padding */
3147 pad_len = tx->packet_len & (sizeof(u32) - 1);
3149 pad_len = sizeof(u32) - pad_len;
3150 memset(tx->coalesce_buf + tx->coalesce_idx, 0, pad_len);
3151 /* padding is taken care of for coalescing case */
3152 tx->packet_len += pad_len;
3153 tx->tlen += pad_len;
3156 /* dma map the coalesce buffer */
3157 addr = dma_map_single(&dd->pcidev->dev,
3162 if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
3163 __sdma_txclean(dd, tx);
3167 /* Add descriptor for coalesce buffer */
3168 tx->desc_limit = MAX_DESC;
3169 return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx,
3176 /* Update sdes when the lmc changes */
3177 void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid)
3179 struct sdma_engine *sde;
3183 sreg = ((mask & SD(CHECK_SLID_MASK_MASK)) <<
3184 SD(CHECK_SLID_MASK_SHIFT)) |
3185 (((lid & mask) & SD(CHECK_SLID_VALUE_MASK)) <<
3186 SD(CHECK_SLID_VALUE_SHIFT));
3188 for (i = 0; i < dd->num_sdma; i++) {
3189 hfi1_cdbg(LINKVERB, "SendDmaEngine[%d].SLID_CHECK = 0x%x",
3191 sde = &dd->per_sdma[i];
3192 write_sde_csr(sde, SD(CHECK_SLID), sreg);
3196 /* tx not dword sized - pad */
3197 int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
3202 if ((unlikely(tx->num_desc == tx->desc_limit))) {
3203 rval = _extend_sdma_tx_descs(dd, tx);
3205 __sdma_txclean(dd, tx);
3209 /* finish the one just added */
3214 sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)));
3215 _sdma_close_tx(dd, tx);
3220 * Add ahg to the sdma_txreq
3222 * The logic will consume up to 3
3223 * descriptors at the beginning of
3226 void _sdma_txreq_ahgadd(
3227 struct sdma_txreq *tx,
3233 u32 i, shift = 0, desc = 0;
3236 WARN_ON_ONCE(num_ahg > 9 || (ahg_hlen & 3) || ahg_hlen == 4);
3239 mode = SDMA_AHG_APPLY_UPDATE1;
3240 else if (num_ahg <= 5)
3241 mode = SDMA_AHG_APPLY_UPDATE2;
3243 mode = SDMA_AHG_APPLY_UPDATE3;
3245 /* initialize to consumed descriptors to zero */
3247 case SDMA_AHG_APPLY_UPDATE3:
3249 tx->descs[2].qw[0] = 0;
3250 tx->descs[2].qw[1] = 0;
3252 case SDMA_AHG_APPLY_UPDATE2:
3254 tx->descs[1].qw[0] = 0;
3255 tx->descs[1].qw[1] = 0;
3259 tx->descs[0].qw[1] |=
3260 (((u64)ahg_entry & SDMA_DESC1_HEADER_INDEX_MASK)
3261 << SDMA_DESC1_HEADER_INDEX_SHIFT) |
3262 (((u64)ahg_hlen & SDMA_DESC1_HEADER_DWS_MASK)
3263 << SDMA_DESC1_HEADER_DWS_SHIFT) |
3264 (((u64)mode & SDMA_DESC1_HEADER_MODE_MASK)
3265 << SDMA_DESC1_HEADER_MODE_SHIFT) |
3266 (((u64)ahg[0] & SDMA_DESC1_HEADER_UPDATE1_MASK)
3267 << SDMA_DESC1_HEADER_UPDATE1_SHIFT);
3268 for (i = 0; i < (num_ahg - 1); i++) {
3269 if (!shift && !(i & 2))
3271 tx->descs[desc].qw[!!(i & 2)] |=
3274 shift = (shift + 32) & 63;
3279 * sdma_ahg_alloc - allocate an AHG entry
3280 * @sde: engine to allocate from
3283 * 0-31 when successful, -EOPNOTSUPP if AHG is not enabled,
3284 * -ENOSPC if an entry is not available
3286 int sdma_ahg_alloc(struct sdma_engine *sde)
3292 trace_hfi1_ahg_allocate(sde, -EINVAL);
3296 nr = ffz(READ_ONCE(sde->ahg_bits));
3298 trace_hfi1_ahg_allocate(sde, -ENOSPC);
3301 oldbit = test_and_set_bit(nr, &sde->ahg_bits);
3306 trace_hfi1_ahg_allocate(sde, nr);
3311 * sdma_ahg_free - free an AHG entry
3312 * @sde: engine to return AHG entry
3313 * @ahg_index: index to free
3315 * This routine frees the indicate AHG entry.
3317 void sdma_ahg_free(struct sdma_engine *sde, int ahg_index)
3321 trace_hfi1_ahg_deallocate(sde, ahg_index);
3322 if (ahg_index < 0 || ahg_index > 31)
3324 clear_bit(ahg_index, &sde->ahg_bits);
3328 * SPC freeze handling for SDMA engines. Called when the driver knows
3329 * the SPC is going into a freeze but before the freeze is fully
3330 * settled. Generally an error interrupt.
3332 * This event will pull the engine out of running so no more entries can be
3333 * added to the engine's queue.
3335 void sdma_freeze_notify(struct hfi1_devdata *dd, int link_down)
3338 enum sdma_events event = link_down ? sdma_event_e85_link_down :
3339 sdma_event_e80_hw_freeze;
3341 /* set up the wait but do not wait here */
3342 atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma);
3344 /* tell all engines to stop running and wait */
3345 for (i = 0; i < dd->num_sdma; i++)
3346 sdma_process_event(&dd->per_sdma[i], event);
3348 /* sdma_freeze() will wait for all engines to have stopped */
3352 * SPC freeze handling for SDMA engines. Called when the driver knows
3353 * the SPC is fully frozen.
3355 void sdma_freeze(struct hfi1_devdata *dd)
3361 * Make sure all engines have moved out of the running state before
3364 ret = wait_event_interruptible(dd->sdma_unfreeze_wq,
3365 atomic_read(&dd->sdma_unfreeze_count) <=
3367 /* interrupted or count is negative, then unloading - just exit */
3368 if (ret || atomic_read(&dd->sdma_unfreeze_count) < 0)
3371 /* set up the count for the next wait */
3372 atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma);
3374 /* tell all engines that the SPC is frozen, they can start cleaning */
3375 for (i = 0; i < dd->num_sdma; i++)
3376 sdma_process_event(&dd->per_sdma[i], sdma_event_e81_hw_frozen);
3379 * Wait for everyone to finish software clean before exiting. The
3380 * software clean will read engine CSRs, so must be completed before
3381 * the next step, which will clear the engine CSRs.
3383 (void)wait_event_interruptible(dd->sdma_unfreeze_wq,
3384 atomic_read(&dd->sdma_unfreeze_count) <= 0);
3385 /* no need to check results - done no matter what */
3389 * SPC freeze handling for the SDMA engines. Called after the SPC is unfrozen.
3391 * The SPC freeze acts like a SDMA halt and a hardware clean combined. All
3392 * that is left is a software clean. We could do it after the SPC is fully
3393 * frozen, but then we'd have to add another state to wait for the unfreeze.
3394 * Instead, just defer the software clean until the unfreeze step.
3396 void sdma_unfreeze(struct hfi1_devdata *dd)
3400 /* tell all engines start freeze clean up */
3401 for (i = 0; i < dd->num_sdma; i++)
3402 sdma_process_event(&dd->per_sdma[i],
3403 sdma_event_e82_hw_unfreeze);
3407 * _sdma_engine_progress_schedule() - schedule progress on engine
3408 * @sde: sdma_engine to schedule progress
3411 void _sdma_engine_progress_schedule(
3412 struct sdma_engine *sde)
3414 trace_hfi1_sdma_engine_progress(sde, sde->progress_mask);
3415 /* assume we have selected a good cpu */
3417 CCE_INT_FORCE + (8 * (IS_SDMA_START / 64)),
3418 sde->progress_mask);