2 * Copyright(c) 2015 - 2018 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/spinlock.h>
49 #include <linux/seqlock.h>
50 #include <linux/netdevice.h>
51 #include <linux/moduleparam.h>
52 #include <linux/bitops.h>
53 #include <linux/timer.h>
54 #include <linux/vmalloc.h>
55 #include <linux/highmem.h>
64 /* must be a power of 2 >= 64 <= 32768 */
65 #define SDMA_DESCQ_CNT 2048
66 #define SDMA_DESC_INTR 64
67 #define INVALID_TAIL 0xffff
69 static uint sdma_descq_cnt = SDMA_DESCQ_CNT;
70 module_param(sdma_descq_cnt, uint, S_IRUGO);
71 MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");
73 static uint sdma_idle_cnt = 250;
74 module_param(sdma_idle_cnt, uint, S_IRUGO);
75 MODULE_PARM_DESC(sdma_idle_cnt, "sdma interrupt idle delay (ns,default 250)");
78 module_param_named(num_sdma, mod_num_sdma, uint, S_IRUGO);
79 MODULE_PARM_DESC(num_sdma, "Set max number SDMA engines to use");
81 static uint sdma_desct_intr = SDMA_DESC_INTR;
82 module_param_named(desct_intr, sdma_desct_intr, uint, S_IRUGO | S_IWUSR);
83 MODULE_PARM_DESC(desct_intr, "Number of SDMA descriptor before interrupt");
85 #define SDMA_WAIT_BATCH_SIZE 20
86 /* max wait time for a SDMA engine to indicate it has halted */
87 #define SDMA_ERR_HALT_TIMEOUT 10 /* ms */
88 /* all SDMA engine errors that cause a halt */
90 #define SD(name) SEND_DMA_##name
91 #define ALL_SDMA_ENG_HALT_ERRS \
92 (SD(ENG_ERR_STATUS_SDMA_WRONG_DW_ERR_SMASK) \
93 | SD(ENG_ERR_STATUS_SDMA_GEN_MISMATCH_ERR_SMASK) \
94 | SD(ENG_ERR_STATUS_SDMA_TOO_LONG_ERR_SMASK) \
95 | SD(ENG_ERR_STATUS_SDMA_TAIL_OUT_OF_BOUNDS_ERR_SMASK) \
96 | SD(ENG_ERR_STATUS_SDMA_FIRST_DESC_ERR_SMASK) \
97 | SD(ENG_ERR_STATUS_SDMA_MEM_READ_ERR_SMASK) \
98 | SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK) \
99 | SD(ENG_ERR_STATUS_SDMA_LENGTH_MISMATCH_ERR_SMASK) \
100 | SD(ENG_ERR_STATUS_SDMA_PACKET_DESC_OVERFLOW_ERR_SMASK) \
101 | SD(ENG_ERR_STATUS_SDMA_HEADER_SELECT_ERR_SMASK) \
102 | SD(ENG_ERR_STATUS_SDMA_HEADER_ADDRESS_ERR_SMASK) \
103 | SD(ENG_ERR_STATUS_SDMA_HEADER_LENGTH_ERR_SMASK) \
104 | SD(ENG_ERR_STATUS_SDMA_TIMEOUT_ERR_SMASK) \
105 | SD(ENG_ERR_STATUS_SDMA_DESC_TABLE_UNC_ERR_SMASK) \
106 | SD(ENG_ERR_STATUS_SDMA_ASSEMBLY_UNC_ERR_SMASK) \
107 | SD(ENG_ERR_STATUS_SDMA_PACKET_TRACKING_UNC_ERR_SMASK) \
108 | SD(ENG_ERR_STATUS_SDMA_HEADER_STORAGE_UNC_ERR_SMASK) \
109 | SD(ENG_ERR_STATUS_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SMASK))
111 /* sdma_sendctrl operations */
112 #define SDMA_SENDCTRL_OP_ENABLE BIT(0)
113 #define SDMA_SENDCTRL_OP_INTENABLE BIT(1)
114 #define SDMA_SENDCTRL_OP_HALT BIT(2)
115 #define SDMA_SENDCTRL_OP_CLEANUP BIT(3)
117 /* handle long defines */
118 #define SDMA_EGRESS_PACKET_OCCUPANCY_SMASK \
119 SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SMASK
120 #define SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT \
121 SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT
123 static const char * const sdma_state_names[] = {
124 [sdma_state_s00_hw_down] = "s00_HwDown",
125 [sdma_state_s10_hw_start_up_halt_wait] = "s10_HwStartUpHaltWait",
126 [sdma_state_s15_hw_start_up_clean_wait] = "s15_HwStartUpCleanWait",
127 [sdma_state_s20_idle] = "s20_Idle",
128 [sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait",
129 [sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait",
130 [sdma_state_s50_hw_halt_wait] = "s50_HwHaltWait",
131 [sdma_state_s60_idle_halt_wait] = "s60_IdleHaltWait",
132 [sdma_state_s80_hw_freeze] = "s80_HwFreeze",
133 [sdma_state_s82_freeze_sw_clean] = "s82_FreezeSwClean",
134 [sdma_state_s99_running] = "s99_Running",
137 #ifdef CONFIG_SDMA_VERBOSITY
138 static const char * const sdma_event_names[] = {
139 [sdma_event_e00_go_hw_down] = "e00_GoHwDown",
140 [sdma_event_e10_go_hw_start] = "e10_GoHwStart",
141 [sdma_event_e15_hw_halt_done] = "e15_HwHaltDone",
142 [sdma_event_e25_hw_clean_up_done] = "e25_HwCleanUpDone",
143 [sdma_event_e30_go_running] = "e30_GoRunning",
144 [sdma_event_e40_sw_cleaned] = "e40_SwCleaned",
145 [sdma_event_e50_hw_cleaned] = "e50_HwCleaned",
146 [sdma_event_e60_hw_halted] = "e60_HwHalted",
147 [sdma_event_e70_go_idle] = "e70_GoIdle",
148 [sdma_event_e80_hw_freeze] = "e80_HwFreeze",
149 [sdma_event_e81_hw_frozen] = "e81_HwFrozen",
150 [sdma_event_e82_hw_unfreeze] = "e82_HwUnfreeze",
151 [sdma_event_e85_link_down] = "e85_LinkDown",
152 [sdma_event_e90_sw_halted] = "e90_SwHalted",
156 static const struct sdma_set_state_action sdma_action_table[] = {
157 [sdma_state_s00_hw_down] = {
158 .go_s99_running_tofalse = 1,
164 [sdma_state_s10_hw_start_up_halt_wait] = {
170 [sdma_state_s15_hw_start_up_clean_wait] = {
176 [sdma_state_s20_idle] = {
182 [sdma_state_s30_sw_clean_up_wait] = {
188 [sdma_state_s40_hw_clean_up_wait] = {
194 [sdma_state_s50_hw_halt_wait] = {
200 [sdma_state_s60_idle_halt_wait] = {
201 .go_s99_running_tofalse = 1,
207 [sdma_state_s80_hw_freeze] = {
213 [sdma_state_s82_freeze_sw_clean] = {
219 [sdma_state_s99_running] = {
224 .go_s99_running_totrue = 1,
228 #define SDMA_TAIL_UPDATE_THRESH 0x1F
230 /* declare all statics here rather than keep sorting */
231 static void sdma_complete(struct kref *);
232 static void sdma_finalput(struct sdma_state *);
233 static void sdma_get(struct sdma_state *);
234 static void sdma_hw_clean_up_task(unsigned long);
235 static void sdma_put(struct sdma_state *);
236 static void sdma_set_state(struct sdma_engine *, enum sdma_states);
237 static void sdma_start_hw_clean_up(struct sdma_engine *);
238 static void sdma_sw_clean_up_task(unsigned long);
239 static void sdma_sendctrl(struct sdma_engine *, unsigned);
240 static void init_sdma_regs(struct sdma_engine *, u32, uint);
241 static void sdma_process_event(
242 struct sdma_engine *sde,
243 enum sdma_events event);
244 static void __sdma_process_event(
245 struct sdma_engine *sde,
246 enum sdma_events event);
247 static void dump_sdma_state(struct sdma_engine *sde);
248 static void sdma_make_progress(struct sdma_engine *sde, u64 status);
249 static void sdma_desc_avail(struct sdma_engine *sde, uint avail);
250 static void sdma_flush_descq(struct sdma_engine *sde);
253 * sdma_state_name() - return state string from enum
256 static const char *sdma_state_name(enum sdma_states state)
258 return sdma_state_names[state];
261 static void sdma_get(struct sdma_state *ss)
266 static void sdma_complete(struct kref *kref)
268 struct sdma_state *ss =
269 container_of(kref, struct sdma_state, kref);
274 static void sdma_put(struct sdma_state *ss)
276 kref_put(&ss->kref, sdma_complete);
279 static void sdma_finalput(struct sdma_state *ss)
282 wait_for_completion(&ss->comp);
285 static inline void write_sde_csr(
286 struct sdma_engine *sde,
290 write_kctxt_csr(sde->dd, sde->this_idx, offset0, value);
293 static inline u64 read_sde_csr(
294 struct sdma_engine *sde,
297 return read_kctxt_csr(sde->dd, sde->this_idx, offset0);
301 * sdma_wait_for_packet_egress() - wait for the VL FIFO occupancy for
302 * sdma engine 'sde' to drop to 0.
304 static void sdma_wait_for_packet_egress(struct sdma_engine *sde,
307 u64 off = 8 * sde->this_idx;
308 struct hfi1_devdata *dd = sde->dd;
315 reg = read_csr(dd, off + SEND_EGRESS_SEND_DMA_STATUS);
317 reg &= SDMA_EGRESS_PACKET_OCCUPANCY_SMASK;
318 reg >>= SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT;
321 /* counter is reest if accupancy count changes */
325 /* timed out - bounce the link */
326 dd_dev_err(dd, "%s: engine %u timeout waiting for packets to egress, remaining count %u, bouncing link\n",
327 __func__, sde->this_idx, (u32)reg);
328 queue_work(dd->pport->link_wq,
329 &dd->pport->link_bounce_work);
337 * sdma_wait() - wait for packet egress to complete for all SDMA engines,
338 * and pause for credit return.
340 void sdma_wait(struct hfi1_devdata *dd)
344 for (i = 0; i < dd->num_sdma; i++) {
345 struct sdma_engine *sde = &dd->per_sdma[i];
347 sdma_wait_for_packet_egress(sde, 0);
351 static inline void sdma_set_desc_cnt(struct sdma_engine *sde, unsigned cnt)
355 if (!(sde->dd->flags & HFI1_HAS_SDMA_TIMEOUT))
358 reg &= SD(DESC_CNT_CNT_MASK);
359 reg <<= SD(DESC_CNT_CNT_SHIFT);
360 write_sde_csr(sde, SD(DESC_CNT), reg);
363 static inline void complete_tx(struct sdma_engine *sde,
364 struct sdma_txreq *tx,
367 /* protect against complete modifying */
368 struct iowait *wait = tx->wait;
369 callback_t complete = tx->complete;
371 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
372 trace_hfi1_sdma_out_sn(sde, tx->sn);
373 if (WARN_ON_ONCE(sde->head_sn != tx->sn))
374 dd_dev_err(sde->dd, "expected %llu got %llu\n",
375 sde->head_sn, tx->sn);
378 __sdma_txclean(sde->dd, tx);
380 (*complete)(tx, res);
381 if (iowait_sdma_dec(wait))
382 iowait_drain_wakeup(wait);
386 * Complete all the sdma requests with a SDMA_TXREQ_S_ABORTED status
388 * Depending on timing there can be txreqs in two places:
389 * - in the descq ring
390 * - in the flush list
392 * To avoid ordering issues the descq ring needs to be flushed
393 * first followed by the flush list.
395 * This routine is called from two places
396 * - From a work queue item
397 * - Directly from the state machine just before setting the
400 * Must be called with head_lock held
403 static void sdma_flush(struct sdma_engine *sde)
405 struct sdma_txreq *txp, *txp_next;
406 LIST_HEAD(flushlist);
410 /* flush from head to tail */
411 sdma_flush_descq(sde);
412 spin_lock_irqsave(&sde->flushlist_lock, flags);
413 /* copy flush list */
414 list_splice_init(&sde->flushlist, &flushlist);
415 spin_unlock_irqrestore(&sde->flushlist_lock, flags);
416 /* flush from flush list */
417 list_for_each_entry_safe(txp, txp_next, &flushlist, list)
418 complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED);
419 /* wakeup QPs orphaned on the dmawait list */
421 struct iowait *w, *nw;
423 seq = read_seqbegin(&sde->waitlock);
424 if (!list_empty(&sde->dmawait)) {
425 write_seqlock(&sde->waitlock);
426 list_for_each_entry_safe(w, nw, &sde->dmawait, list) {
428 w->wakeup(w, SDMA_AVAIL_REASON);
429 list_del_init(&w->list);
432 write_sequnlock(&sde->waitlock);
434 } while (read_seqretry(&sde->waitlock, seq));
438 * Fields a work request for flushing the descq ring
441 * If the engine has been brought to running during
442 * the scheduling delay, the flush is ignored, assuming
443 * that the process of bringing the engine to running
444 * would have done this flush prior to going to running.
447 static void sdma_field_flush(struct work_struct *work)
450 struct sdma_engine *sde =
451 container_of(work, struct sdma_engine, flush_worker);
453 write_seqlock_irqsave(&sde->head_lock, flags);
454 if (!__sdma_running(sde))
456 write_sequnlock_irqrestore(&sde->head_lock, flags);
459 static void sdma_err_halt_wait(struct work_struct *work)
461 struct sdma_engine *sde = container_of(work, struct sdma_engine,
464 unsigned long timeout;
466 timeout = jiffies + msecs_to_jiffies(SDMA_ERR_HALT_TIMEOUT);
468 statuscsr = read_sde_csr(sde, SD(STATUS));
469 statuscsr &= SD(STATUS_ENG_HALTED_SMASK);
472 if (time_after(jiffies, timeout)) {
474 "SDMA engine %d - timeout waiting for engine to halt\n",
477 * Continue anyway. This could happen if there was
478 * an uncorrectable error in the wrong spot.
482 usleep_range(80, 120);
485 sdma_process_event(sde, sdma_event_e15_hw_halt_done);
488 static void sdma_err_progress_check_schedule(struct sdma_engine *sde)
490 if (!is_bx(sde->dd) && HFI1_CAP_IS_KSET(SDMA_AHG)) {
492 struct hfi1_devdata *dd = sde->dd;
494 for (index = 0; index < dd->num_sdma; index++) {
495 struct sdma_engine *curr_sdma = &dd->per_sdma[index];
497 if (curr_sdma != sde)
498 curr_sdma->progress_check_head =
499 curr_sdma->descq_head;
502 "SDMA engine %d - check scheduled\n",
504 mod_timer(&sde->err_progress_check_timer, jiffies + 10);
508 static void sdma_err_progress_check(struct timer_list *t)
511 struct sdma_engine *sde = from_timer(sde, t, err_progress_check_timer);
513 dd_dev_err(sde->dd, "SDE progress check event\n");
514 for (index = 0; index < sde->dd->num_sdma; index++) {
515 struct sdma_engine *curr_sde = &sde->dd->per_sdma[index];
518 /* check progress on each engine except the current one */
522 * We must lock interrupts when acquiring sde->lock,
523 * to avoid a deadlock if interrupt triggers and spins on
524 * the same lock on same CPU
526 spin_lock_irqsave(&curr_sde->tail_lock, flags);
527 write_seqlock(&curr_sde->head_lock);
529 /* skip non-running queues */
530 if (curr_sde->state.current_state != sdma_state_s99_running) {
531 write_sequnlock(&curr_sde->head_lock);
532 spin_unlock_irqrestore(&curr_sde->tail_lock, flags);
536 if ((curr_sde->descq_head != curr_sde->descq_tail) &&
537 (curr_sde->descq_head ==
538 curr_sde->progress_check_head))
539 __sdma_process_event(curr_sde,
540 sdma_event_e90_sw_halted);
541 write_sequnlock(&curr_sde->head_lock);
542 spin_unlock_irqrestore(&curr_sde->tail_lock, flags);
544 schedule_work(&sde->err_halt_worker);
547 static void sdma_hw_clean_up_task(unsigned long opaque)
549 struct sdma_engine *sde = (struct sdma_engine *)opaque;
553 #ifdef CONFIG_SDMA_VERBOSITY
554 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
555 sde->this_idx, slashstrip(__FILE__), __LINE__,
558 statuscsr = read_sde_csr(sde, SD(STATUS));
559 statuscsr &= SD(STATUS_ENG_CLEANED_UP_SMASK);
565 sdma_process_event(sde, sdma_event_e25_hw_clean_up_done);
568 static inline struct sdma_txreq *get_txhead(struct sdma_engine *sde)
570 return sde->tx_ring[sde->tx_head & sde->sdma_mask];
574 * flush ring for recovery
576 static void sdma_flush_descq(struct sdma_engine *sde)
580 struct sdma_txreq *txp = get_txhead(sde);
582 /* The reason for some of the complexity of this code is that
583 * not all descriptors have corresponding txps. So, we have to
584 * be able to skip over descs until we wander into the range of
585 * the next txp on the list.
587 head = sde->descq_head & sde->sdma_mask;
588 tail = sde->descq_tail & sde->sdma_mask;
589 while (head != tail) {
590 /* advance head, wrap if needed */
591 head = ++sde->descq_head & sde->sdma_mask;
592 /* if now past this txp's descs, do the callback */
593 if (txp && txp->next_descq_idx == head) {
594 /* remove from list */
595 sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL;
596 complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED);
597 trace_hfi1_sdma_progress(sde, head, tail, txp);
598 txp = get_txhead(sde);
603 sdma_desc_avail(sde, sdma_descq_freecnt(sde));
606 static void sdma_sw_clean_up_task(unsigned long opaque)
608 struct sdma_engine *sde = (struct sdma_engine *)opaque;
611 spin_lock_irqsave(&sde->tail_lock, flags);
612 write_seqlock(&sde->head_lock);
615 * At this point, the following should always be true:
616 * - We are halted, so no more descriptors are getting retired.
617 * - We are not running, so no one is submitting new work.
618 * - Only we can send the e40_sw_cleaned, so we can't start
619 * running again until we say so. So, the active list and
620 * descq are ours to play with.
624 * In the error clean up sequence, software clean must be called
625 * before the hardware clean so we can use the hardware head in
626 * the progress routine. A hardware clean or SPC unfreeze will
627 * reset the hardware head.
629 * Process all retired requests. The progress routine will use the
630 * latest physical hardware head - we are not running so speed does
633 sdma_make_progress(sde, 0);
638 * Reset our notion of head and tail.
639 * Note that the HW registers have been reset via an earlier
644 sde->desc_avail = sdma_descq_freecnt(sde);
647 __sdma_process_event(sde, sdma_event_e40_sw_cleaned);
649 write_sequnlock(&sde->head_lock);
650 spin_unlock_irqrestore(&sde->tail_lock, flags);
653 static void sdma_sw_tear_down(struct sdma_engine *sde)
655 struct sdma_state *ss = &sde->state;
657 /* Releasing this reference means the state machine has stopped. */
660 /* stop waiting for all unfreeze events to complete */
661 atomic_set(&sde->dd->sdma_unfreeze_count, -1);
662 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
665 static void sdma_start_hw_clean_up(struct sdma_engine *sde)
667 tasklet_hi_schedule(&sde->sdma_hw_clean_up_task);
670 static void sdma_set_state(struct sdma_engine *sde,
671 enum sdma_states next_state)
673 struct sdma_state *ss = &sde->state;
674 const struct sdma_set_state_action *action = sdma_action_table;
677 trace_hfi1_sdma_state(
679 sdma_state_names[ss->current_state],
680 sdma_state_names[next_state]);
682 /* debugging bookkeeping */
683 ss->previous_state = ss->current_state;
684 ss->previous_op = ss->current_op;
685 ss->current_state = next_state;
687 if (ss->previous_state != sdma_state_s99_running &&
688 next_state == sdma_state_s99_running)
691 if (action[next_state].op_enable)
692 op |= SDMA_SENDCTRL_OP_ENABLE;
694 if (action[next_state].op_intenable)
695 op |= SDMA_SENDCTRL_OP_INTENABLE;
697 if (action[next_state].op_halt)
698 op |= SDMA_SENDCTRL_OP_HALT;
700 if (action[next_state].op_cleanup)
701 op |= SDMA_SENDCTRL_OP_CLEANUP;
703 if (action[next_state].go_s99_running_tofalse)
704 ss->go_s99_running = 0;
706 if (action[next_state].go_s99_running_totrue)
707 ss->go_s99_running = 1;
710 sdma_sendctrl(sde, ss->current_op);
714 * sdma_get_descq_cnt() - called when device probed
716 * Return a validated descq count.
718 * This is currently only used in the verbs initialization to build the tx
721 * This will probably be deleted in favor of a more scalable approach to
725 u16 sdma_get_descq_cnt(void)
727 u16 count = sdma_descq_cnt;
730 return SDMA_DESCQ_CNT;
731 /* count must be a power of 2 greater than 64 and less than
732 * 32768. Otherwise return default.
734 if (!is_power_of_2(count))
735 return SDMA_DESCQ_CNT;
736 if (count < 64 || count > 32768)
737 return SDMA_DESCQ_CNT;
742 * sdma_engine_get_vl() - return vl for a given sdma engine
745 * This function returns the vl mapped to a given engine, or an error if
746 * the mapping can't be found. The mapping fields are protected by RCU.
748 int sdma_engine_get_vl(struct sdma_engine *sde)
750 struct hfi1_devdata *dd = sde->dd;
751 struct sdma_vl_map *m;
754 if (sde->this_idx >= TXE_NUM_SDMA_ENGINES)
758 m = rcu_dereference(dd->sdma_map);
763 vl = m->engine_to_vl[sde->this_idx];
770 * sdma_select_engine_vl() - select sdma engine
772 * @selector: a spreading factor
776 * This function returns an engine based on the selector and a vl. The
777 * mapping fields are protected by RCU.
779 struct sdma_engine *sdma_select_engine_vl(
780 struct hfi1_devdata *dd,
784 struct sdma_vl_map *m;
785 struct sdma_map_elem *e;
786 struct sdma_engine *rval;
788 /* NOTE This should only happen if SC->VL changed after the initial
789 * checks on the QP/AH
790 * Default will return engine 0 below
798 m = rcu_dereference(dd->sdma_map);
801 return &dd->per_sdma[0];
803 e = m->map[vl & m->mask];
804 rval = e->sde[selector & e->mask];
808 rval = !rval ? &dd->per_sdma[0] : rval;
809 trace_hfi1_sdma_engine_select(dd, selector, vl, rval->this_idx);
814 * sdma_select_engine_sc() - select sdma engine
816 * @selector: a spreading factor
820 * This function returns an engine based on the selector and an sc.
822 struct sdma_engine *sdma_select_engine_sc(
823 struct hfi1_devdata *dd,
827 u8 vl = sc_to_vlt(dd, sc5);
829 return sdma_select_engine_vl(dd, selector, vl);
832 struct sdma_rht_map_elem {
835 struct sdma_engine *sde[0];
838 struct sdma_rht_node {
839 unsigned long cpu_id;
840 struct sdma_rht_map_elem *map[HFI1_MAX_VLS_SUPPORTED];
841 struct rhash_head node;
844 #define NR_CPUS_HINT 192
846 static const struct rhashtable_params sdma_rht_params = {
847 .nelem_hint = NR_CPUS_HINT,
848 .head_offset = offsetof(struct sdma_rht_node, node),
849 .key_offset = offsetof(struct sdma_rht_node, cpu_id),
850 .key_len = FIELD_SIZEOF(struct sdma_rht_node, cpu_id),
853 .automatic_shrinking = true,
857 * sdma_select_user_engine() - select sdma engine based on user setup
859 * @selector: a spreading factor
862 * This function returns an sdma engine for a user sdma request.
863 * User defined sdma engine affinity setting is honored when applicable,
864 * otherwise system default sdma engine mapping is used. To ensure correct
865 * ordering, the mapping from <selector, vl> to sde must remain unchanged.
867 struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
870 struct sdma_rht_node *rht_node;
871 struct sdma_engine *sde = NULL;
872 const struct cpumask *current_mask = ¤t->cpus_allowed;
873 unsigned long cpu_id;
876 * To ensure that always the same sdma engine(s) will be
877 * selected make sure the process is pinned to this CPU only.
879 if (cpumask_weight(current_mask) != 1)
882 cpu_id = smp_processor_id();
884 rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu_id,
887 if (rht_node && rht_node->map[vl]) {
888 struct sdma_rht_map_elem *map = rht_node->map[vl];
890 sde = map->sde[selector & map->mask];
898 return sdma_select_engine_vl(dd, selector, vl);
901 static void sdma_populate_sde_map(struct sdma_rht_map_elem *map)
905 for (i = 0; i < roundup_pow_of_two(map->ctr ? : 1) - map->ctr; i++)
906 map->sde[map->ctr + i] = map->sde[i];
909 static void sdma_cleanup_sde_map(struct sdma_rht_map_elem *map,
910 struct sdma_engine *sde)
914 /* only need to check the first ctr entries for a match */
915 for (i = 0; i < map->ctr; i++) {
916 if (map->sde[i] == sde) {
917 memmove(&map->sde[i], &map->sde[i + 1],
918 (map->ctr - i - 1) * sizeof(map->sde[0]));
920 pow = roundup_pow_of_two(map->ctr ? : 1);
922 sdma_populate_sde_map(map);
929 * Prevents concurrent reads and writes of the sdma engine cpu_mask
931 static DEFINE_MUTEX(process_to_sde_mutex);
933 ssize_t sdma_set_cpu_to_sde_map(struct sdma_engine *sde, const char *buf,
936 struct hfi1_devdata *dd = sde->dd;
937 cpumask_var_t mask, new_mask;
940 struct sdma_rht_node *rht_node;
942 vl = sdma_engine_get_vl(sde);
943 if (unlikely(vl < 0 || vl >= ARRAY_SIZE(rht_node->map)))
946 ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
950 ret = zalloc_cpumask_var(&new_mask, GFP_KERNEL);
952 free_cpumask_var(mask);
955 ret = cpulist_parse(buf, mask);
959 if (!cpumask_subset(mask, cpu_online_mask)) {
960 dd_dev_warn(sde->dd, "Invalid CPU mask\n");
965 sz = sizeof(struct sdma_rht_map_elem) +
966 (TXE_NUM_SDMA_ENGINES * sizeof(struct sdma_engine *));
968 mutex_lock(&process_to_sde_mutex);
970 for_each_cpu(cpu, mask) {
971 /* Check if we have this already mapped */
972 if (cpumask_test_cpu(cpu, &sde->cpu_mask)) {
973 cpumask_set_cpu(cpu, new_mask);
977 rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu,
980 rht_node = kzalloc(sizeof(*rht_node), GFP_KERNEL);
986 rht_node->map[vl] = kzalloc(sz, GFP_KERNEL);
987 if (!rht_node->map[vl]) {
992 rht_node->cpu_id = cpu;
993 rht_node->map[vl]->mask = 0;
994 rht_node->map[vl]->ctr = 1;
995 rht_node->map[vl]->sde[0] = sde;
997 ret = rhashtable_insert_fast(dd->sdma_rht,
1001 kfree(rht_node->map[vl]);
1003 dd_dev_err(sde->dd, "Failed to set process to sde affinity for cpu %lu\n",
1011 /* Add new user mappings */
1012 if (!rht_node->map[vl])
1013 rht_node->map[vl] = kzalloc(sz, GFP_KERNEL);
1015 if (!rht_node->map[vl]) {
1020 rht_node->map[vl]->ctr++;
1021 ctr = rht_node->map[vl]->ctr;
1022 rht_node->map[vl]->sde[ctr - 1] = sde;
1023 pow = roundup_pow_of_two(ctr);
1024 rht_node->map[vl]->mask = pow - 1;
1026 /* Populate the sde map table */
1027 sdma_populate_sde_map(rht_node->map[vl]);
1029 cpumask_set_cpu(cpu, new_mask);
1032 /* Clean up old mappings */
1033 for_each_cpu(cpu, cpu_online_mask) {
1034 struct sdma_rht_node *rht_node;
1036 /* Don't cleanup sdes that are set in the new mask */
1037 if (cpumask_test_cpu(cpu, mask))
1040 rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu,
1046 /* Remove mappings for old sde */
1047 for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++)
1048 if (rht_node->map[i])
1049 sdma_cleanup_sde_map(rht_node->map[i],
1052 /* Free empty hash table entries */
1053 for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) {
1054 if (!rht_node->map[i])
1057 if (rht_node->map[i]->ctr) {
1064 ret = rhashtable_remove_fast(dd->sdma_rht,
1069 for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++)
1070 kfree(rht_node->map[i]);
1077 cpumask_copy(&sde->cpu_mask, new_mask);
1079 mutex_unlock(&process_to_sde_mutex);
1081 free_cpumask_var(mask);
1082 free_cpumask_var(new_mask);
1083 return ret ? : strnlen(buf, PAGE_SIZE);
1086 ssize_t sdma_get_cpu_to_sde_map(struct sdma_engine *sde, char *buf)
1088 mutex_lock(&process_to_sde_mutex);
1089 if (cpumask_empty(&sde->cpu_mask))
1090 snprintf(buf, PAGE_SIZE, "%s\n", "empty");
1092 cpumap_print_to_pagebuf(true, buf, &sde->cpu_mask);
1093 mutex_unlock(&process_to_sde_mutex);
1094 return strnlen(buf, PAGE_SIZE);
1097 static void sdma_rht_free(void *ptr, void *arg)
1099 struct sdma_rht_node *rht_node = ptr;
1102 for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++)
1103 kfree(rht_node->map[i]);
1109 * sdma_seqfile_dump_cpu_list() - debugfs dump the cpu to sdma mappings
1114 * This routine dumps the process to sde mappings per cpu
1116 void sdma_seqfile_dump_cpu_list(struct seq_file *s,
1117 struct hfi1_devdata *dd,
1118 unsigned long cpuid)
1120 struct sdma_rht_node *rht_node;
1123 rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpuid,
1128 seq_printf(s, "cpu%3lu: ", cpuid);
1129 for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) {
1130 if (!rht_node->map[i] || !rht_node->map[i]->ctr)
1133 seq_printf(s, " vl%d: [", i);
1135 for (j = 0; j < rht_node->map[i]->ctr; j++) {
1136 if (!rht_node->map[i]->sde[j])
1142 seq_printf(s, " sdma%2d",
1143 rht_node->map[i]->sde[j]->this_idx);
1152 * Free the indicated map struct
1154 static void sdma_map_free(struct sdma_vl_map *m)
1158 for (i = 0; m && i < m->actual_vls; i++)
1164 * Handle RCU callback
1166 static void sdma_map_rcu_callback(struct rcu_head *list)
1168 struct sdma_vl_map *m = container_of(list, struct sdma_vl_map, list);
1174 * sdma_map_init - called when # vls change
1176 * @port: port number
1177 * @num_vls: number of vls
1178 * @vl_engines: per vl engine mapping (optional)
1180 * This routine changes the mapping based on the number of vls.
1182 * vl_engines is used to specify a non-uniform vl/engine loading. NULL
1183 * implies auto computing the loading and giving each VLs a uniform
1184 * distribution of engines per VL.
1186 * The auto algorithm computes the sde_per_vl and the number of extra
1187 * engines. Any extra engines are added from the last VL on down.
1189 * rcu locking is used here to control access to the mapping fields.
1191 * If either the num_vls or num_sdma are non-power of 2, the array sizes
1192 * in the struct sdma_vl_map and the struct sdma_map_elem are rounded
1193 * up to the next highest power of 2 and the first entry is reused
1194 * in a round robin fashion.
1196 * If an error occurs the map change is not done and the mapping is
1200 int sdma_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_engines)
1203 int extra, sde_per_vl;
1205 u8 lvl_engines[OPA_MAX_VLS];
1206 struct sdma_vl_map *oldmap, *newmap;
1208 if (!(dd->flags & HFI1_HAS_SEND_DMA))
1212 /* truncate divide */
1213 sde_per_vl = dd->num_sdma / num_vls;
1215 extra = dd->num_sdma % num_vls;
1216 vl_engines = lvl_engines;
1217 /* add extras from last vl down */
1218 for (i = num_vls - 1; i >= 0; i--, extra--)
1219 vl_engines[i] = sde_per_vl + (extra > 0 ? 1 : 0);
1223 sizeof(struct sdma_vl_map) +
1224 roundup_pow_of_two(num_vls) *
1225 sizeof(struct sdma_map_elem *),
1229 newmap->actual_vls = num_vls;
1230 newmap->vls = roundup_pow_of_two(num_vls);
1231 newmap->mask = (1 << ilog2(newmap->vls)) - 1;
1232 /* initialize back-map */
1233 for (i = 0; i < TXE_NUM_SDMA_ENGINES; i++)
1234 newmap->engine_to_vl[i] = -1;
1235 for (i = 0; i < newmap->vls; i++) {
1236 /* save for wrap around */
1237 int first_engine = engine;
1239 if (i < newmap->actual_vls) {
1240 int sz = roundup_pow_of_two(vl_engines[i]);
1242 /* only allocate once */
1243 newmap->map[i] = kzalloc(
1244 sizeof(struct sdma_map_elem) +
1245 sz * sizeof(struct sdma_engine *),
1247 if (!newmap->map[i])
1249 newmap->map[i]->mask = (1 << ilog2(sz)) - 1;
1250 /* assign engines */
1251 for (j = 0; j < sz; j++) {
1252 newmap->map[i]->sde[j] =
1253 &dd->per_sdma[engine];
1254 if (++engine >= first_engine + vl_engines[i])
1255 /* wrap back to first engine */
1256 engine = first_engine;
1258 /* assign back-map */
1259 for (j = 0; j < vl_engines[i]; j++)
1260 newmap->engine_to_vl[first_engine + j] = i;
1262 /* just re-use entry without allocating */
1263 newmap->map[i] = newmap->map[i % num_vls];
1265 engine = first_engine + vl_engines[i];
1267 /* newmap in hand, save old map */
1268 spin_lock_irq(&dd->sde_map_lock);
1269 oldmap = rcu_dereference_protected(dd->sdma_map,
1270 lockdep_is_held(&dd->sde_map_lock));
1272 /* publish newmap */
1273 rcu_assign_pointer(dd->sdma_map, newmap);
1275 spin_unlock_irq(&dd->sde_map_lock);
1276 /* success, free any old map after grace period */
1278 call_rcu(&oldmap->list, sdma_map_rcu_callback);
1281 /* free any partial allocation */
1282 sdma_map_free(newmap);
1287 * sdma_clean() Clean up allocated memory
1288 * @dd: struct hfi1_devdata
1289 * @num_engines: num sdma engines
1291 * This routine can be called regardless of the success of
1294 void sdma_clean(struct hfi1_devdata *dd, size_t num_engines)
1297 struct sdma_engine *sde;
1299 if (dd->sdma_pad_dma) {
1300 dma_free_coherent(&dd->pcidev->dev, 4,
1301 (void *)dd->sdma_pad_dma,
1303 dd->sdma_pad_dma = NULL;
1304 dd->sdma_pad_phys = 0;
1306 if (dd->sdma_heads_dma) {
1307 dma_free_coherent(&dd->pcidev->dev, dd->sdma_heads_size,
1308 (void *)dd->sdma_heads_dma,
1309 dd->sdma_heads_phys);
1310 dd->sdma_heads_dma = NULL;
1311 dd->sdma_heads_phys = 0;
1313 for (i = 0; dd->per_sdma && i < num_engines; ++i) {
1314 sde = &dd->per_sdma[i];
1316 sde->head_dma = NULL;
1322 sde->descq_cnt * sizeof(u64[2]),
1327 sde->descq_phys = 0;
1329 kvfree(sde->tx_ring);
1330 sde->tx_ring = NULL;
1332 spin_lock_irq(&dd->sde_map_lock);
1333 sdma_map_free(rcu_access_pointer(dd->sdma_map));
1334 RCU_INIT_POINTER(dd->sdma_map, NULL);
1335 spin_unlock_irq(&dd->sde_map_lock);
1337 kfree(dd->per_sdma);
1338 dd->per_sdma = NULL;
1341 rhashtable_free_and_destroy(dd->sdma_rht, sdma_rht_free, NULL);
1342 kfree(dd->sdma_rht);
1343 dd->sdma_rht = NULL;
1348 * sdma_init() - called when device probed
1350 * @port: port number (currently only zero)
1352 * Initializes each sde and its csrs.
1353 * Interrupts are not required to be enabled.
1356 * 0 - success, -errno on failure
1358 int sdma_init(struct hfi1_devdata *dd, u8 port)
1361 struct sdma_engine *sde;
1362 struct rhashtable *tmp_sdma_rht;
1365 struct hfi1_pportdata *ppd = dd->pport + port;
1366 u32 per_sdma_credits;
1367 uint idle_cnt = sdma_idle_cnt;
1368 size_t num_engines = chip_sdma_engines(dd);
1371 if (!HFI1_CAP_IS_KSET(SDMA)) {
1372 HFI1_CAP_CLEAR(SDMA_AHG);
1376 /* can't exceed chip support */
1377 mod_num_sdma <= chip_sdma_engines(dd) &&
1378 /* count must be >= vls */
1379 mod_num_sdma >= num_vls)
1380 num_engines = mod_num_sdma;
1382 dd_dev_info(dd, "SDMA mod_num_sdma: %u\n", mod_num_sdma);
1383 dd_dev_info(dd, "SDMA chip_sdma_engines: %u\n", chip_sdma_engines(dd));
1384 dd_dev_info(dd, "SDMA chip_sdma_mem_size: %u\n",
1385 chip_sdma_mem_size(dd));
1388 chip_sdma_mem_size(dd) / (num_engines * SDMA_BLOCK_SIZE);
1390 /* set up freeze waitqueue */
1391 init_waitqueue_head(&dd->sdma_unfreeze_wq);
1392 atomic_set(&dd->sdma_unfreeze_count, 0);
1394 descq_cnt = sdma_get_descq_cnt();
1395 dd_dev_info(dd, "SDMA engines %zu descq_cnt %u\n",
1396 num_engines, descq_cnt);
1398 /* alloc memory for array of send engines */
1399 dd->per_sdma = kcalloc_node(num_engines, sizeof(*dd->per_sdma),
1400 GFP_KERNEL, dd->node);
1404 idle_cnt = ns_to_cclock(dd, idle_cnt);
1407 SDMA_DESC1_HEAD_TO_HOST_FLAG;
1410 SDMA_DESC1_INT_REQ_FLAG;
1412 if (!sdma_desct_intr)
1413 sdma_desct_intr = SDMA_DESC_INTR;
1415 /* Allocate memory for SendDMA descriptor FIFOs */
1416 for (this_idx = 0; this_idx < num_engines; ++this_idx) {
1417 sde = &dd->per_sdma[this_idx];
1420 sde->this_idx = this_idx;
1421 sde->descq_cnt = descq_cnt;
1422 sde->desc_avail = sdma_descq_freecnt(sde);
1423 sde->sdma_shift = ilog2(descq_cnt);
1424 sde->sdma_mask = (1 << sde->sdma_shift) - 1;
1426 /* Create a mask specifically for each interrupt source */
1427 sde->int_mask = (u64)1 << (0 * TXE_NUM_SDMA_ENGINES +
1429 sde->progress_mask = (u64)1 << (1 * TXE_NUM_SDMA_ENGINES +
1431 sde->idle_mask = (u64)1 << (2 * TXE_NUM_SDMA_ENGINES +
1433 /* Create a combined mask to cover all 3 interrupt sources */
1434 sde->imask = sde->int_mask | sde->progress_mask |
1437 spin_lock_init(&sde->tail_lock);
1438 seqlock_init(&sde->head_lock);
1439 spin_lock_init(&sde->senddmactrl_lock);
1440 spin_lock_init(&sde->flushlist_lock);
1441 seqlock_init(&sde->waitlock);
1442 /* insure there is always a zero bit */
1443 sde->ahg_bits = 0xfffffffe00000000ULL;
1445 sdma_set_state(sde, sdma_state_s00_hw_down);
1447 /* set up reference counting */
1448 kref_init(&sde->state.kref);
1449 init_completion(&sde->state.comp);
1451 INIT_LIST_HEAD(&sde->flushlist);
1452 INIT_LIST_HEAD(&sde->dmawait);
1455 get_kctxt_csr_addr(dd, this_idx, SD(TAIL));
1457 tasklet_init(&sde->sdma_hw_clean_up_task, sdma_hw_clean_up_task,
1458 (unsigned long)sde);
1460 tasklet_init(&sde->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
1461 (unsigned long)sde);
1462 INIT_WORK(&sde->err_halt_worker, sdma_err_halt_wait);
1463 INIT_WORK(&sde->flush_worker, sdma_field_flush);
1465 sde->progress_check_head = 0;
1467 timer_setup(&sde->err_progress_check_timer,
1468 sdma_err_progress_check, 0);
1470 sde->descq = dma_alloc_coherent(&dd->pcidev->dev,
1471 descq_cnt * sizeof(u64[2]),
1472 &sde->descq_phys, GFP_KERNEL);
1476 kvzalloc_node(array_size(descq_cnt,
1477 sizeof(struct sdma_txreq *)),
1478 GFP_KERNEL, dd->node);
1483 dd->sdma_heads_size = L1_CACHE_BYTES * num_engines;
1484 /* Allocate memory for DMA of head registers to memory */
1485 dd->sdma_heads_dma = dma_alloc_coherent(&dd->pcidev->dev,
1486 dd->sdma_heads_size,
1487 &dd->sdma_heads_phys,
1489 if (!dd->sdma_heads_dma) {
1490 dd_dev_err(dd, "failed to allocate SendDMA head memory\n");
1494 /* Allocate memory for pad */
1495 dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, sizeof(u32),
1496 &dd->sdma_pad_phys, GFP_KERNEL);
1497 if (!dd->sdma_pad_dma) {
1498 dd_dev_err(dd, "failed to allocate SendDMA pad memory\n");
1502 /* assign each engine to different cacheline and init registers */
1503 curr_head = (void *)dd->sdma_heads_dma;
1504 for (this_idx = 0; this_idx < num_engines; ++this_idx) {
1505 unsigned long phys_offset;
1507 sde = &dd->per_sdma[this_idx];
1509 sde->head_dma = curr_head;
1510 curr_head += L1_CACHE_BYTES;
1511 phys_offset = (unsigned long)sde->head_dma -
1512 (unsigned long)dd->sdma_heads_dma;
1513 sde->head_phys = dd->sdma_heads_phys + phys_offset;
1514 init_sdma_regs(sde, per_sdma_credits, idle_cnt);
1516 dd->flags |= HFI1_HAS_SEND_DMA;
1517 dd->flags |= idle_cnt ? HFI1_HAS_SDMA_TIMEOUT : 0;
1518 dd->num_sdma = num_engines;
1519 ret = sdma_map_init(dd, port, ppd->vls_operational, NULL);
1523 tmp_sdma_rht = kzalloc(sizeof(*tmp_sdma_rht), GFP_KERNEL);
1524 if (!tmp_sdma_rht) {
1529 ret = rhashtable_init(tmp_sdma_rht, &sdma_rht_params);
1532 dd->sdma_rht = tmp_sdma_rht;
1534 dd_dev_info(dd, "SDMA num_sdma: %u\n", dd->num_sdma);
1538 sdma_clean(dd, num_engines);
1543 * sdma_all_running() - called when the link goes up
1546 * This routine moves all engines to the running state.
1548 void sdma_all_running(struct hfi1_devdata *dd)
1550 struct sdma_engine *sde;
1553 /* move all engines to running */
1554 for (i = 0; i < dd->num_sdma; ++i) {
1555 sde = &dd->per_sdma[i];
1556 sdma_process_event(sde, sdma_event_e30_go_running);
1561 * sdma_all_idle() - called when the link goes down
1564 * This routine moves all engines to the idle state.
1566 void sdma_all_idle(struct hfi1_devdata *dd)
1568 struct sdma_engine *sde;
1571 /* idle all engines */
1572 for (i = 0; i < dd->num_sdma; ++i) {
1573 sde = &dd->per_sdma[i];
1574 sdma_process_event(sde, sdma_event_e70_go_idle);
1579 * sdma_start() - called to kick off state processing for all engines
1582 * This routine is for kicking off the state processing for all required
1583 * sdma engines. Interrupts need to be working at this point.
1586 void sdma_start(struct hfi1_devdata *dd)
1589 struct sdma_engine *sde;
1591 /* kick off the engines state processing */
1592 for (i = 0; i < dd->num_sdma; ++i) {
1593 sde = &dd->per_sdma[i];
1594 sdma_process_event(sde, sdma_event_e10_go_hw_start);
1599 * sdma_exit() - used when module is removed
1602 void sdma_exit(struct hfi1_devdata *dd)
1605 struct sdma_engine *sde;
1607 for (this_idx = 0; dd->per_sdma && this_idx < dd->num_sdma;
1609 sde = &dd->per_sdma[this_idx];
1610 if (!list_empty(&sde->dmawait))
1611 dd_dev_err(dd, "sde %u: dmawait list not empty!\n",
1613 sdma_process_event(sde, sdma_event_e00_go_hw_down);
1615 del_timer_sync(&sde->err_progress_check_timer);
1618 * This waits for the state machine to exit so it is not
1619 * necessary to kill the sdma_sw_clean_up_task to make sure
1620 * it is not running.
1622 sdma_finalput(&sde->state);
1627 * unmap the indicated descriptor
1629 static inline void sdma_unmap_desc(
1630 struct hfi1_devdata *dd,
1631 struct sdma_desc *descp)
1633 switch (sdma_mapping_type(descp)) {
1634 case SDMA_MAP_SINGLE:
1637 sdma_mapping_addr(descp),
1638 sdma_mapping_len(descp),
1644 sdma_mapping_addr(descp),
1645 sdma_mapping_len(descp),
1652 * return the mode as indicated by the first
1653 * descriptor in the tx.
1655 static inline u8 ahg_mode(struct sdma_txreq *tx)
1657 return (tx->descp[0].qw[1] & SDMA_DESC1_HEADER_MODE_SMASK)
1658 >> SDMA_DESC1_HEADER_MODE_SHIFT;
1662 * __sdma_txclean() - clean tx of mappings, descp *kmalloc's
1663 * @dd: hfi1_devdata for unmapping
1664 * @tx: tx request to clean
1666 * This is used in the progress routine to clean the tx or
1667 * by the ULP to toss an in-process tx build.
1669 * The code can be called multiple times without issue.
1672 void __sdma_txclean(
1673 struct hfi1_devdata *dd,
1674 struct sdma_txreq *tx)
1679 u8 skip = 0, mode = ahg_mode(tx);
1682 sdma_unmap_desc(dd, &tx->descp[0]);
1683 /* determine number of AHG descriptors to skip */
1684 if (mode > SDMA_AHG_APPLY_UPDATE1)
1686 for (i = 1 + skip; i < tx->num_desc; i++)
1687 sdma_unmap_desc(dd, &tx->descp[i]);
1690 kfree(tx->coalesce_buf);
1691 tx->coalesce_buf = NULL;
1692 /* kmalloc'ed descp */
1693 if (unlikely(tx->desc_limit > ARRAY_SIZE(tx->descs))) {
1694 tx->desc_limit = ARRAY_SIZE(tx->descs);
1699 static inline u16 sdma_gethead(struct sdma_engine *sde)
1701 struct hfi1_devdata *dd = sde->dd;
1705 #ifdef CONFIG_SDMA_VERBOSITY
1706 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
1707 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
1711 use_dmahead = HFI1_CAP_IS_KSET(USE_SDMA_HEAD) && __sdma_running(sde) &&
1712 (dd->flags & HFI1_HAS_SDMA_TIMEOUT);
1713 hwhead = use_dmahead ?
1714 (u16)le64_to_cpu(*sde->head_dma) :
1715 (u16)read_sde_csr(sde, SD(HEAD));
1717 if (unlikely(HFI1_CAP_IS_KSET(SDMA_HEAD_CHECK))) {
1723 swhead = sde->descq_head & sde->sdma_mask;
1724 /* this code is really bad for cache line trading */
1725 swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask;
1726 cnt = sde->descq_cnt;
1728 if (swhead < swtail)
1730 sane = (hwhead >= swhead) & (hwhead <= swtail);
1731 else if (swhead > swtail)
1732 /* wrapped around */
1733 sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
1737 sane = (hwhead == swhead);
1739 if (unlikely(!sane)) {
1740 dd_dev_err(dd, "SDMA(%u) bad head (%s) hwhd=%hu swhd=%hu swtl=%hu cnt=%hu\n",
1742 use_dmahead ? "dma" : "kreg",
1743 hwhead, swhead, swtail, cnt);
1745 /* try one more time, using csr */
1749 /* proceed as if no progress */
1757 * This is called when there are send DMA descriptors that might be
1760 * This is called with head_lock held.
1762 static void sdma_desc_avail(struct sdma_engine *sde, uint avail)
1764 struct iowait *wait, *nw, *twait;
1765 struct iowait *waits[SDMA_WAIT_BATCH_SIZE];
1766 uint i, n = 0, seq, tidx = 0;
1768 #ifdef CONFIG_SDMA_VERBOSITY
1769 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
1770 slashstrip(__FILE__), __LINE__, __func__);
1771 dd_dev_err(sde->dd, "avail: %u\n", avail);
1775 seq = read_seqbegin(&sde->waitlock);
1776 if (!list_empty(&sde->dmawait)) {
1777 /* at least one item */
1778 write_seqlock(&sde->waitlock);
1779 /* Harvest waiters wanting DMA descriptors */
1780 list_for_each_entry_safe(
1789 if (n == ARRAY_SIZE(waits))
1791 iowait_init_priority(wait);
1792 num_desc = iowait_get_all_desc(wait);
1793 if (num_desc > avail)
1796 /* Find the top-priority wait memeber */
1798 twait = waits[tidx];
1800 iowait_priority_update_top(wait,
1805 list_del_init(&wait->list);
1808 write_sequnlock(&sde->waitlock);
1811 } while (read_seqretry(&sde->waitlock, seq));
1813 /* Schedule the top-priority entry first */
1815 waits[tidx]->wakeup(waits[tidx], SDMA_AVAIL_REASON);
1817 for (i = 0; i < n; i++)
1819 waits[i]->wakeup(waits[i], SDMA_AVAIL_REASON);
1822 /* head_lock must be held */
1823 static void sdma_make_progress(struct sdma_engine *sde, u64 status)
1825 struct sdma_txreq *txp = NULL;
1828 int idle_check_done = 0;
1830 hwhead = sdma_gethead(sde);
1832 /* The reason for some of the complexity of this code is that
1833 * not all descriptors have corresponding txps. So, we have to
1834 * be able to skip over descs until we wander into the range of
1835 * the next txp on the list.
1839 txp = get_txhead(sde);
1840 swhead = sde->descq_head & sde->sdma_mask;
1841 trace_hfi1_sdma_progress(sde, hwhead, swhead, txp);
1842 while (swhead != hwhead) {
1843 /* advance head, wrap if needed */
1844 swhead = ++sde->descq_head & sde->sdma_mask;
1846 /* if now past this txp's descs, do the callback */
1847 if (txp && txp->next_descq_idx == swhead) {
1848 /* remove from list */
1849 sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL;
1850 complete_tx(sde, txp, SDMA_TXREQ_S_OK);
1851 /* see if there is another txp */
1852 txp = get_txhead(sde);
1854 trace_hfi1_sdma_progress(sde, hwhead, swhead, txp);
1859 * The SDMA idle interrupt is not guaranteed to be ordered with respect
1860 * to updates to the the dma_head location in host memory. The head
1861 * value read might not be fully up to date. If there are pending
1862 * descriptors and the SDMA idle interrupt fired then read from the
1863 * CSR SDMA head instead to get the latest value from the hardware.
1864 * The hardware SDMA head should be read at most once in this invocation
1865 * of sdma_make_progress(..) which is ensured by idle_check_done flag
1867 if ((status & sde->idle_mask) && !idle_check_done) {
1870 swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask;
1871 if (swtail != hwhead) {
1872 hwhead = (u16)read_sde_csr(sde, SD(HEAD));
1873 idle_check_done = 1;
1878 sde->last_status = status;
1880 sdma_desc_avail(sde, sdma_descq_freecnt(sde));
1884 * sdma_engine_interrupt() - interrupt handler for engine
1886 * @status: sdma interrupt reason
1888 * Status is a mask of the 3 possible interrupts for this engine. It will
1889 * contain bits _only_ for this SDMA engine. It will contain at least one
1890 * bit, it may contain more.
1892 void sdma_engine_interrupt(struct sdma_engine *sde, u64 status)
1894 trace_hfi1_sdma_engine_interrupt(sde, status);
1895 write_seqlock(&sde->head_lock);
1896 sdma_set_desc_cnt(sde, sdma_desct_intr);
1897 if (status & sde->idle_mask)
1898 sde->idle_int_cnt++;
1899 else if (status & sde->progress_mask)
1900 sde->progress_int_cnt++;
1901 else if (status & sde->int_mask)
1902 sde->sdma_int_cnt++;
1903 sdma_make_progress(sde, status);
1904 write_sequnlock(&sde->head_lock);
1908 * sdma_engine_error() - error handler for engine
1910 * @status: sdma interrupt reason
1912 void sdma_engine_error(struct sdma_engine *sde, u64 status)
1914 unsigned long flags;
1916 #ifdef CONFIG_SDMA_VERBOSITY
1917 dd_dev_err(sde->dd, "CONFIG SDMA(%u) error status 0x%llx state %s\n",
1919 (unsigned long long)status,
1920 sdma_state_names[sde->state.current_state]);
1922 spin_lock_irqsave(&sde->tail_lock, flags);
1923 write_seqlock(&sde->head_lock);
1924 if (status & ALL_SDMA_ENG_HALT_ERRS)
1925 __sdma_process_event(sde, sdma_event_e60_hw_halted);
1926 if (status & ~SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK)) {
1928 "SDMA (%u) engine error: 0x%llx state %s\n",
1930 (unsigned long long)status,
1931 sdma_state_names[sde->state.current_state]);
1932 dump_sdma_state(sde);
1934 write_sequnlock(&sde->head_lock);
1935 spin_unlock_irqrestore(&sde->tail_lock, flags);
1938 static void sdma_sendctrl(struct sdma_engine *sde, unsigned op)
1940 u64 set_senddmactrl = 0;
1941 u64 clr_senddmactrl = 0;
1942 unsigned long flags;
1944 #ifdef CONFIG_SDMA_VERBOSITY
1945 dd_dev_err(sde->dd, "CONFIG SDMA(%u) senddmactrl E=%d I=%d H=%d C=%d\n",
1947 (op & SDMA_SENDCTRL_OP_ENABLE) ? 1 : 0,
1948 (op & SDMA_SENDCTRL_OP_INTENABLE) ? 1 : 0,
1949 (op & SDMA_SENDCTRL_OP_HALT) ? 1 : 0,
1950 (op & SDMA_SENDCTRL_OP_CLEANUP) ? 1 : 0);
1953 if (op & SDMA_SENDCTRL_OP_ENABLE)
1954 set_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK);
1956 clr_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK);
1958 if (op & SDMA_SENDCTRL_OP_INTENABLE)
1959 set_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK);
1961 clr_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK);
1963 if (op & SDMA_SENDCTRL_OP_HALT)
1964 set_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK);
1966 clr_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK);
1968 spin_lock_irqsave(&sde->senddmactrl_lock, flags);
1970 sde->p_senddmactrl |= set_senddmactrl;
1971 sde->p_senddmactrl &= ~clr_senddmactrl;
1973 if (op & SDMA_SENDCTRL_OP_CLEANUP)
1974 write_sde_csr(sde, SD(CTRL),
1975 sde->p_senddmactrl |
1976 SD(CTRL_SDMA_CLEANUP_SMASK));
1978 write_sde_csr(sde, SD(CTRL), sde->p_senddmactrl);
1980 spin_unlock_irqrestore(&sde->senddmactrl_lock, flags);
1982 #ifdef CONFIG_SDMA_VERBOSITY
1983 sdma_dumpstate(sde);
1987 static void sdma_setlengen(struct sdma_engine *sde)
1989 #ifdef CONFIG_SDMA_VERBOSITY
1990 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
1991 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
1995 * Set SendDmaLenGen and clear-then-set the MSB of the generation
1996 * count to enable generation checking and load the internal
1997 * generation counter.
1999 write_sde_csr(sde, SD(LEN_GEN),
2000 (sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT));
2001 write_sde_csr(sde, SD(LEN_GEN),
2002 ((sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT)) |
2003 (4ULL << SD(LEN_GEN_GENERATION_SHIFT)));
2006 static inline void sdma_update_tail(struct sdma_engine *sde, u16 tail)
2008 /* Commit writes to memory and advance the tail on the chip */
2009 smp_wmb(); /* see get_txhead() */
2010 writeq(tail, sde->tail_csr);
2014 * This is called when changing to state s10_hw_start_up_halt_wait as
2015 * a result of send buffer errors or send DMA descriptor errors.
2017 static void sdma_hw_start_up(struct sdma_engine *sde)
2021 #ifdef CONFIG_SDMA_VERBOSITY
2022 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
2023 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
2026 sdma_setlengen(sde);
2027 sdma_update_tail(sde, 0); /* Set SendDmaTail */
2030 reg = SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_MASK) <<
2031 SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SHIFT);
2032 write_sde_csr(sde, SD(ENG_ERR_CLEAR), reg);
2036 * set_sdma_integrity
2038 * Set the SEND_DMA_CHECK_ENABLE register for send DMA engine 'sde'.
2040 static void set_sdma_integrity(struct sdma_engine *sde)
2042 struct hfi1_devdata *dd = sde->dd;
2044 write_sde_csr(sde, SD(CHECK_ENABLE),
2045 hfi1_pkt_base_sdma_integrity(dd));
2048 static void init_sdma_regs(
2049 struct sdma_engine *sde,
2054 #ifdef CONFIG_SDMA_VERBOSITY
2055 struct hfi1_devdata *dd = sde->dd;
2057 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n",
2058 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
2061 write_sde_csr(sde, SD(BASE_ADDR), sde->descq_phys);
2062 sdma_setlengen(sde);
2063 sdma_update_tail(sde, 0); /* Set SendDmaTail */
2064 write_sde_csr(sde, SD(RELOAD_CNT), idle_cnt);
2065 write_sde_csr(sde, SD(DESC_CNT), 0);
2066 write_sde_csr(sde, SD(HEAD_ADDR), sde->head_phys);
2067 write_sde_csr(sde, SD(MEMORY),
2068 ((u64)credits << SD(MEMORY_SDMA_MEMORY_CNT_SHIFT)) |
2069 ((u64)(credits * sde->this_idx) <<
2070 SD(MEMORY_SDMA_MEMORY_INDEX_SHIFT)));
2071 write_sde_csr(sde, SD(ENG_ERR_MASK), ~0ull);
2072 set_sdma_integrity(sde);
2073 opmask = OPCODE_CHECK_MASK_DISABLED;
2074 opval = OPCODE_CHECK_VAL_DISABLED;
2075 write_sde_csr(sde, SD(CHECK_OPCODE),
2076 (opmask << SEND_CTXT_CHECK_OPCODE_MASK_SHIFT) |
2077 (opval << SEND_CTXT_CHECK_OPCODE_VALUE_SHIFT));
2080 #ifdef CONFIG_SDMA_VERBOSITY
2082 #define sdma_dumpstate_helper0(reg) do { \
2083 csr = read_csr(sde->dd, reg); \
2084 dd_dev_err(sde->dd, "%36s 0x%016llx\n", #reg, csr); \
2087 #define sdma_dumpstate_helper(reg) do { \
2088 csr = read_sde_csr(sde, reg); \
2089 dd_dev_err(sde->dd, "%36s[%02u] 0x%016llx\n", \
2090 #reg, sde->this_idx, csr); \
2093 #define sdma_dumpstate_helper2(reg) do { \
2094 csr = read_csr(sde->dd, reg + (8 * i)); \
2095 dd_dev_err(sde->dd, "%33s_%02u 0x%016llx\n", \
2099 void sdma_dumpstate(struct sdma_engine *sde)
2104 sdma_dumpstate_helper(SD(CTRL));
2105 sdma_dumpstate_helper(SD(STATUS));
2106 sdma_dumpstate_helper0(SD(ERR_STATUS));
2107 sdma_dumpstate_helper0(SD(ERR_MASK));
2108 sdma_dumpstate_helper(SD(ENG_ERR_STATUS));
2109 sdma_dumpstate_helper(SD(ENG_ERR_MASK));
2111 for (i = 0; i < CCE_NUM_INT_CSRS; ++i) {
2112 sdma_dumpstate_helper2(CCE_INT_STATUS);
2113 sdma_dumpstate_helper2(CCE_INT_MASK);
2114 sdma_dumpstate_helper2(CCE_INT_BLOCKED);
2117 sdma_dumpstate_helper(SD(TAIL));
2118 sdma_dumpstate_helper(SD(HEAD));
2119 sdma_dumpstate_helper(SD(PRIORITY_THLD));
2120 sdma_dumpstate_helper(SD(IDLE_CNT));
2121 sdma_dumpstate_helper(SD(RELOAD_CNT));
2122 sdma_dumpstate_helper(SD(DESC_CNT));
2123 sdma_dumpstate_helper(SD(DESC_FETCHED_CNT));
2124 sdma_dumpstate_helper(SD(MEMORY));
2125 sdma_dumpstate_helper0(SD(ENGINES));
2126 sdma_dumpstate_helper0(SD(MEM_SIZE));
2127 /* sdma_dumpstate_helper(SEND_EGRESS_SEND_DMA_STATUS); */
2128 sdma_dumpstate_helper(SD(BASE_ADDR));
2129 sdma_dumpstate_helper(SD(LEN_GEN));
2130 sdma_dumpstate_helper(SD(HEAD_ADDR));
2131 sdma_dumpstate_helper(SD(CHECK_ENABLE));
2132 sdma_dumpstate_helper(SD(CHECK_VL));
2133 sdma_dumpstate_helper(SD(CHECK_JOB_KEY));
2134 sdma_dumpstate_helper(SD(CHECK_PARTITION_KEY));
2135 sdma_dumpstate_helper(SD(CHECK_SLID));
2136 sdma_dumpstate_helper(SD(CHECK_OPCODE));
2140 static void dump_sdma_state(struct sdma_engine *sde)
2142 struct hw_sdma_desc *descqp;
2147 u16 head, tail, cnt;
2149 head = sde->descq_head & sde->sdma_mask;
2150 tail = sde->descq_tail & sde->sdma_mask;
2151 cnt = sdma_descq_freecnt(sde);
2154 "SDMA (%u) descq_head: %u descq_tail: %u freecnt: %u FLE %d\n",
2155 sde->this_idx, head, tail, cnt,
2156 !list_empty(&sde->flushlist));
2158 /* print info for each entry in the descriptor queue */
2159 while (head != tail) {
2160 char flags[6] = { 'x', 'x', 'x', 'x', 0 };
2162 descqp = &sde->descq[head];
2163 desc[0] = le64_to_cpu(descqp->qw[0]);
2164 desc[1] = le64_to_cpu(descqp->qw[1]);
2165 flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-';
2166 flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ?
2168 flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-';
2169 flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-';
2170 addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT)
2171 & SDMA_DESC0_PHY_ADDR_MASK;
2172 gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT)
2173 & SDMA_DESC1_GENERATION_MASK;
2174 len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT)
2175 & SDMA_DESC0_BYTE_COUNT_MASK;
2177 "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
2178 head, flags, addr, gen, len);
2180 "\tdesc0:0x%016llx desc1 0x%016llx\n",
2182 if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
2184 "\taidx: %u amode: %u alen: %u\n",
2186 SDMA_DESC1_HEADER_INDEX_SMASK) >>
2187 SDMA_DESC1_HEADER_INDEX_SHIFT),
2189 SDMA_DESC1_HEADER_MODE_SMASK) >>
2190 SDMA_DESC1_HEADER_MODE_SHIFT),
2192 SDMA_DESC1_HEADER_DWS_SMASK) >>
2193 SDMA_DESC1_HEADER_DWS_SHIFT));
2195 head &= sde->sdma_mask;
2200 "SDE %u CPU %d STE %s C 0x%llx S 0x%016llx E 0x%llx T(HW) 0x%llx T(SW) 0x%x H(HW) 0x%llx H(SW) 0x%x H(D) 0x%llx DM 0x%llx GL 0x%llx R 0x%llx LIS 0x%llx AHGI 0x%llx TXT %u TXH %u DT %u DH %u FLNE %d DQF %u SLC 0x%llx\n"
2202 * sdma_seqfile_dump_sde() - debugfs dump of sde
2204 * @sde: send dma engine to dump
2206 * This routine dumps the sde to the indicated seq file.
2208 void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde)
2211 struct hw_sdma_desc *descqp;
2217 head = sde->descq_head & sde->sdma_mask;
2218 tail = READ_ONCE(sde->descq_tail) & sde->sdma_mask;
2219 seq_printf(s, SDE_FMT, sde->this_idx,
2221 sdma_state_name(sde->state.current_state),
2222 (unsigned long long)read_sde_csr(sde, SD(CTRL)),
2223 (unsigned long long)read_sde_csr(sde, SD(STATUS)),
2224 (unsigned long long)read_sde_csr(sde, SD(ENG_ERR_STATUS)),
2225 (unsigned long long)read_sde_csr(sde, SD(TAIL)), tail,
2226 (unsigned long long)read_sde_csr(sde, SD(HEAD)), head,
2227 (unsigned long long)le64_to_cpu(*sde->head_dma),
2228 (unsigned long long)read_sde_csr(sde, SD(MEMORY)),
2229 (unsigned long long)read_sde_csr(sde, SD(LEN_GEN)),
2230 (unsigned long long)read_sde_csr(sde, SD(RELOAD_CNT)),
2231 (unsigned long long)sde->last_status,
2232 (unsigned long long)sde->ahg_bits,
2237 !list_empty(&sde->flushlist),
2238 sde->descq_full_count,
2239 (unsigned long long)read_sde_csr(sde, SEND_DMA_CHECK_SLID));
2241 /* print info for each entry in the descriptor queue */
2242 while (head != tail) {
2243 char flags[6] = { 'x', 'x', 'x', 'x', 0 };
2245 descqp = &sde->descq[head];
2246 desc[0] = le64_to_cpu(descqp->qw[0]);
2247 desc[1] = le64_to_cpu(descqp->qw[1]);
2248 flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-';
2249 flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ?
2251 flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-';
2252 flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-';
2253 addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT)
2254 & SDMA_DESC0_PHY_ADDR_MASK;
2255 gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT)
2256 & SDMA_DESC1_GENERATION_MASK;
2257 len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT)
2258 & SDMA_DESC0_BYTE_COUNT_MASK;
2260 "\tdesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
2261 head, flags, addr, gen, len);
2262 if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
2263 seq_printf(s, "\t\tahgidx: %u ahgmode: %u\n",
2265 SDMA_DESC1_HEADER_INDEX_SMASK) >>
2266 SDMA_DESC1_HEADER_INDEX_SHIFT),
2268 SDMA_DESC1_HEADER_MODE_SMASK) >>
2269 SDMA_DESC1_HEADER_MODE_SHIFT));
2270 head = (head + 1) & sde->sdma_mask;
2275 * add the generation number into
2276 * the qw1 and return
2278 static inline u64 add_gen(struct sdma_engine *sde, u64 qw1)
2280 u8 generation = (sde->descq_tail >> sde->sdma_shift) & 3;
2282 qw1 &= ~SDMA_DESC1_GENERATION_SMASK;
2283 qw1 |= ((u64)generation & SDMA_DESC1_GENERATION_MASK)
2284 << SDMA_DESC1_GENERATION_SHIFT;
2289 * This routine submits the indicated tx
2291 * Space has already been guaranteed and
2292 * tail side of ring is locked.
2294 * The hardware tail update is done
2295 * in the caller and that is facilitated
2296 * by returning the new tail.
2298 * There is special case logic for ahg
2299 * to not add the generation number for
2300 * up to 2 descriptors that follow the
2304 static inline u16 submit_tx(struct sdma_engine *sde, struct sdma_txreq *tx)
2308 struct sdma_desc *descp = tx->descp;
2309 u8 skip = 0, mode = ahg_mode(tx);
2311 tail = sde->descq_tail & sde->sdma_mask;
2312 sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]);
2313 sde->descq[tail].qw[1] = cpu_to_le64(add_gen(sde, descp->qw[1]));
2314 trace_hfi1_sdma_descriptor(sde, descp->qw[0], descp->qw[1],
2315 tail, &sde->descq[tail]);
2316 tail = ++sde->descq_tail & sde->sdma_mask;
2318 if (mode > SDMA_AHG_APPLY_UPDATE1)
2320 for (i = 1; i < tx->num_desc; i++, descp++) {
2323 sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]);
2325 /* edits don't have generation */
2329 /* replace generation with real one for non-edits */
2330 qw1 = add_gen(sde, descp->qw[1]);
2332 sde->descq[tail].qw[1] = cpu_to_le64(qw1);
2333 trace_hfi1_sdma_descriptor(sde, descp->qw[0], qw1,
2334 tail, &sde->descq[tail]);
2335 tail = ++sde->descq_tail & sde->sdma_mask;
2337 tx->next_descq_idx = tail;
2338 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2339 tx->sn = sde->tail_sn++;
2340 trace_hfi1_sdma_in_sn(sde, tx->sn);
2341 WARN_ON_ONCE(sde->tx_ring[sde->tx_tail & sde->sdma_mask]);
2343 sde->tx_ring[sde->tx_tail++ & sde->sdma_mask] = tx;
2344 sde->desc_avail -= tx->num_desc;
2349 * Check for progress
2351 static int sdma_check_progress(
2352 struct sdma_engine *sde,
2353 struct iowait_work *wait,
2354 struct sdma_txreq *tx,
2359 sde->desc_avail = sdma_descq_freecnt(sde);
2360 if (tx->num_desc <= sde->desc_avail)
2362 /* pulse the head_lock */
2363 if (wait && iowait_ioww_to_iow(wait)->sleep) {
2366 seq = raw_seqcount_begin(
2367 (const seqcount_t *)&sde->head_lock.seqcount);
2368 ret = wait->iow->sleep(sde, wait, tx, seq, pkts_sent);
2370 sde->desc_avail = sdma_descq_freecnt(sde);
2378 * sdma_send_txreq() - submit a tx req to ring
2379 * @sde: sdma engine to use
2380 * @wait: SE wait structure to use when full (may be NULL)
2381 * @tx: sdma_txreq to submit
2382 * @pkts_sent: has any packet been sent yet?
2384 * The call submits the tx into the ring. If a iowait structure is non-NULL
2385 * the packet will be queued to the list in wait.
2388 * 0 - Success, -EINVAL - sdma_txreq incomplete, -EBUSY - no space in
2389 * ring (wait == NULL)
2390 * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
2392 int sdma_send_txreq(struct sdma_engine *sde,
2393 struct iowait_work *wait,
2394 struct sdma_txreq *tx,
2399 unsigned long flags;
2401 /* user should have supplied entire packet */
2402 if (unlikely(tx->tlen))
2404 tx->wait = iowait_ioww_to_iow(wait);
2405 spin_lock_irqsave(&sde->tail_lock, flags);
2407 if (unlikely(!__sdma_running(sde)))
2409 if (unlikely(tx->num_desc > sde->desc_avail))
2411 tail = submit_tx(sde, tx);
2413 iowait_sdma_inc(iowait_ioww_to_iow(wait));
2414 sdma_update_tail(sde, tail);
2416 spin_unlock_irqrestore(&sde->tail_lock, flags);
2420 iowait_sdma_inc(iowait_ioww_to_iow(wait));
2421 tx->next_descq_idx = 0;
2422 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2423 tx->sn = sde->tail_sn++;
2424 trace_hfi1_sdma_in_sn(sde, tx->sn);
2426 spin_lock(&sde->flushlist_lock);
2427 list_add_tail(&tx->list, &sde->flushlist);
2428 spin_unlock(&sde->flushlist_lock);
2429 iowait_inc_wait_count(wait, tx->num_desc);
2430 queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker);
2434 ret = sdma_check_progress(sde, wait, tx, pkts_sent);
2435 if (ret == -EAGAIN) {
2439 sde->descq_full_count++;
2444 * sdma_send_txlist() - submit a list of tx req to ring
2445 * @sde: sdma engine to use
2446 * @wait: SE wait structure to use when full (may be NULL)
2447 * @tx_list: list of sdma_txreqs to submit
2448 * @count: pointer to a u16 which, after return will contain the total number of
2449 * sdma_txreqs removed from the tx_list. This will include sdma_txreqs
2450 * whose SDMA descriptors are submitted to the ring and the sdma_txreqs
2451 * which are added to SDMA engine flush list if the SDMA engine state is
2454 * The call submits the list into the ring.
2456 * If the iowait structure is non-NULL and not equal to the iowait list
2457 * the unprocessed part of the list will be appended to the list in wait.
2459 * In all cases, the tx_list will be updated so the head of the tx_list is
2460 * the list of descriptors that have yet to be transmitted.
2462 * The intent of this call is to provide a more efficient
2463 * way of submitting multiple packets to SDMA while holding the tail
2468 * -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring (wait == NULL)
2469 * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
2471 int sdma_send_txlist(struct sdma_engine *sde, struct iowait_work *wait,
2472 struct list_head *tx_list, u16 *count_out)
2474 struct sdma_txreq *tx, *tx_next;
2476 unsigned long flags;
2477 u16 tail = INVALID_TAIL;
2478 u32 submit_count = 0, flush_count = 0, total_count;
2480 spin_lock_irqsave(&sde->tail_lock, flags);
2482 list_for_each_entry_safe(tx, tx_next, tx_list, list) {
2483 tx->wait = iowait_ioww_to_iow(wait);
2484 if (unlikely(!__sdma_running(sde)))
2486 if (unlikely(tx->num_desc > sde->desc_avail))
2488 if (unlikely(tx->tlen)) {
2492 list_del_init(&tx->list);
2493 tail = submit_tx(sde, tx);
2495 if (tail != INVALID_TAIL &&
2496 (submit_count & SDMA_TAIL_UPDATE_THRESH) == 0) {
2497 sdma_update_tail(sde, tail);
2498 tail = INVALID_TAIL;
2502 total_count = submit_count + flush_count;
2504 iowait_sdma_add(iowait_ioww_to_iow(wait), total_count);
2505 iowait_starve_clear(submit_count > 0,
2506 iowait_ioww_to_iow(wait));
2508 if (tail != INVALID_TAIL)
2509 sdma_update_tail(sde, tail);
2510 spin_unlock_irqrestore(&sde->tail_lock, flags);
2511 *count_out = total_count;
2514 spin_lock(&sde->flushlist_lock);
2515 list_for_each_entry_safe(tx, tx_next, tx_list, list) {
2516 tx->wait = iowait_ioww_to_iow(wait);
2517 list_del_init(&tx->list);
2518 tx->next_descq_idx = 0;
2519 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2520 tx->sn = sde->tail_sn++;
2521 trace_hfi1_sdma_in_sn(sde, tx->sn);
2523 list_add_tail(&tx->list, &sde->flushlist);
2525 iowait_inc_wait_count(wait, tx->num_desc);
2527 spin_unlock(&sde->flushlist_lock);
2528 queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker);
2532 ret = sdma_check_progress(sde, wait, tx, submit_count > 0);
2533 if (ret == -EAGAIN) {
2537 sde->descq_full_count++;
2541 static void sdma_process_event(struct sdma_engine *sde, enum sdma_events event)
2543 unsigned long flags;
2545 spin_lock_irqsave(&sde->tail_lock, flags);
2546 write_seqlock(&sde->head_lock);
2548 __sdma_process_event(sde, event);
2550 if (sde->state.current_state == sdma_state_s99_running)
2551 sdma_desc_avail(sde, sdma_descq_freecnt(sde));
2553 write_sequnlock(&sde->head_lock);
2554 spin_unlock_irqrestore(&sde->tail_lock, flags);
2557 static void __sdma_process_event(struct sdma_engine *sde,
2558 enum sdma_events event)
2560 struct sdma_state *ss = &sde->state;
2561 int need_progress = 0;
2563 /* CONFIG SDMA temporary */
2564 #ifdef CONFIG_SDMA_VERBOSITY
2565 dd_dev_err(sde->dd, "CONFIG SDMA(%u) [%s] %s\n", sde->this_idx,
2566 sdma_state_names[ss->current_state],
2567 sdma_event_names[event]);
2570 switch (ss->current_state) {
2571 case sdma_state_s00_hw_down:
2573 case sdma_event_e00_go_hw_down:
2575 case sdma_event_e30_go_running:
2577 * If down, but running requested (usually result
2578 * of link up, then we need to start up.
2579 * This can happen when hw down is requested while
2580 * bringing the link up with traffic active on
2583 ss->go_s99_running = 1;
2584 /* fall through -- and start dma engine */
2585 case sdma_event_e10_go_hw_start:
2586 /* This reference means the state machine is started */
2587 sdma_get(&sde->state);
2589 sdma_state_s10_hw_start_up_halt_wait);
2591 case sdma_event_e15_hw_halt_done:
2593 case sdma_event_e25_hw_clean_up_done:
2595 case sdma_event_e40_sw_cleaned:
2596 sdma_sw_tear_down(sde);
2598 case sdma_event_e50_hw_cleaned:
2600 case sdma_event_e60_hw_halted:
2602 case sdma_event_e70_go_idle:
2604 case sdma_event_e80_hw_freeze:
2606 case sdma_event_e81_hw_frozen:
2608 case sdma_event_e82_hw_unfreeze:
2610 case sdma_event_e85_link_down:
2612 case sdma_event_e90_sw_halted:
2617 case sdma_state_s10_hw_start_up_halt_wait:
2619 case sdma_event_e00_go_hw_down:
2620 sdma_set_state(sde, sdma_state_s00_hw_down);
2621 sdma_sw_tear_down(sde);
2623 case sdma_event_e10_go_hw_start:
2625 case sdma_event_e15_hw_halt_done:
2627 sdma_state_s15_hw_start_up_clean_wait);
2628 sdma_start_hw_clean_up(sde);
2630 case sdma_event_e25_hw_clean_up_done:
2632 case sdma_event_e30_go_running:
2633 ss->go_s99_running = 1;
2635 case sdma_event_e40_sw_cleaned:
2637 case sdma_event_e50_hw_cleaned:
2639 case sdma_event_e60_hw_halted:
2640 schedule_work(&sde->err_halt_worker);
2642 case sdma_event_e70_go_idle:
2643 ss->go_s99_running = 0;
2645 case sdma_event_e80_hw_freeze:
2647 case sdma_event_e81_hw_frozen:
2649 case sdma_event_e82_hw_unfreeze:
2651 case sdma_event_e85_link_down:
2653 case sdma_event_e90_sw_halted:
2658 case sdma_state_s15_hw_start_up_clean_wait:
2660 case sdma_event_e00_go_hw_down:
2661 sdma_set_state(sde, sdma_state_s00_hw_down);
2662 sdma_sw_tear_down(sde);
2664 case sdma_event_e10_go_hw_start:
2666 case sdma_event_e15_hw_halt_done:
2668 case sdma_event_e25_hw_clean_up_done:
2669 sdma_hw_start_up(sde);
2670 sdma_set_state(sde, ss->go_s99_running ?
2671 sdma_state_s99_running :
2672 sdma_state_s20_idle);
2674 case sdma_event_e30_go_running:
2675 ss->go_s99_running = 1;
2677 case sdma_event_e40_sw_cleaned:
2679 case sdma_event_e50_hw_cleaned:
2681 case sdma_event_e60_hw_halted:
2683 case sdma_event_e70_go_idle:
2684 ss->go_s99_running = 0;
2686 case sdma_event_e80_hw_freeze:
2688 case sdma_event_e81_hw_frozen:
2690 case sdma_event_e82_hw_unfreeze:
2692 case sdma_event_e85_link_down:
2694 case sdma_event_e90_sw_halted:
2699 case sdma_state_s20_idle:
2701 case sdma_event_e00_go_hw_down:
2702 sdma_set_state(sde, sdma_state_s00_hw_down);
2703 sdma_sw_tear_down(sde);
2705 case sdma_event_e10_go_hw_start:
2707 case sdma_event_e15_hw_halt_done:
2709 case sdma_event_e25_hw_clean_up_done:
2711 case sdma_event_e30_go_running:
2712 sdma_set_state(sde, sdma_state_s99_running);
2713 ss->go_s99_running = 1;
2715 case sdma_event_e40_sw_cleaned:
2717 case sdma_event_e50_hw_cleaned:
2719 case sdma_event_e60_hw_halted:
2720 sdma_set_state(sde, sdma_state_s50_hw_halt_wait);
2721 schedule_work(&sde->err_halt_worker);
2723 case sdma_event_e70_go_idle:
2725 case sdma_event_e85_link_down:
2727 case sdma_event_e80_hw_freeze:
2728 sdma_set_state(sde, sdma_state_s80_hw_freeze);
2729 atomic_dec(&sde->dd->sdma_unfreeze_count);
2730 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
2732 case sdma_event_e81_hw_frozen:
2734 case sdma_event_e82_hw_unfreeze:
2736 case sdma_event_e90_sw_halted:
2741 case sdma_state_s30_sw_clean_up_wait:
2743 case sdma_event_e00_go_hw_down:
2744 sdma_set_state(sde, sdma_state_s00_hw_down);
2746 case sdma_event_e10_go_hw_start:
2748 case sdma_event_e15_hw_halt_done:
2750 case sdma_event_e25_hw_clean_up_done:
2752 case sdma_event_e30_go_running:
2753 ss->go_s99_running = 1;
2755 case sdma_event_e40_sw_cleaned:
2756 sdma_set_state(sde, sdma_state_s40_hw_clean_up_wait);
2757 sdma_start_hw_clean_up(sde);
2759 case sdma_event_e50_hw_cleaned:
2761 case sdma_event_e60_hw_halted:
2763 case sdma_event_e70_go_idle:
2764 ss->go_s99_running = 0;
2766 case sdma_event_e80_hw_freeze:
2768 case sdma_event_e81_hw_frozen:
2770 case sdma_event_e82_hw_unfreeze:
2772 case sdma_event_e85_link_down:
2773 ss->go_s99_running = 0;
2775 case sdma_event_e90_sw_halted:
2780 case sdma_state_s40_hw_clean_up_wait:
2782 case sdma_event_e00_go_hw_down:
2783 sdma_set_state(sde, sdma_state_s00_hw_down);
2784 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2786 case sdma_event_e10_go_hw_start:
2788 case sdma_event_e15_hw_halt_done:
2790 case sdma_event_e25_hw_clean_up_done:
2791 sdma_hw_start_up(sde);
2792 sdma_set_state(sde, ss->go_s99_running ?
2793 sdma_state_s99_running :
2794 sdma_state_s20_idle);
2796 case sdma_event_e30_go_running:
2797 ss->go_s99_running = 1;
2799 case sdma_event_e40_sw_cleaned:
2801 case sdma_event_e50_hw_cleaned:
2803 case sdma_event_e60_hw_halted:
2805 case sdma_event_e70_go_idle:
2806 ss->go_s99_running = 0;
2808 case sdma_event_e80_hw_freeze:
2810 case sdma_event_e81_hw_frozen:
2812 case sdma_event_e82_hw_unfreeze:
2814 case sdma_event_e85_link_down:
2815 ss->go_s99_running = 0;
2817 case sdma_event_e90_sw_halted:
2822 case sdma_state_s50_hw_halt_wait:
2824 case sdma_event_e00_go_hw_down:
2825 sdma_set_state(sde, sdma_state_s00_hw_down);
2826 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2828 case sdma_event_e10_go_hw_start:
2830 case sdma_event_e15_hw_halt_done:
2831 sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait);
2832 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2834 case sdma_event_e25_hw_clean_up_done:
2836 case sdma_event_e30_go_running:
2837 ss->go_s99_running = 1;
2839 case sdma_event_e40_sw_cleaned:
2841 case sdma_event_e50_hw_cleaned:
2843 case sdma_event_e60_hw_halted:
2844 schedule_work(&sde->err_halt_worker);
2846 case sdma_event_e70_go_idle:
2847 ss->go_s99_running = 0;
2849 case sdma_event_e80_hw_freeze:
2851 case sdma_event_e81_hw_frozen:
2853 case sdma_event_e82_hw_unfreeze:
2855 case sdma_event_e85_link_down:
2856 ss->go_s99_running = 0;
2858 case sdma_event_e90_sw_halted:
2863 case sdma_state_s60_idle_halt_wait:
2865 case sdma_event_e00_go_hw_down:
2866 sdma_set_state(sde, sdma_state_s00_hw_down);
2867 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2869 case sdma_event_e10_go_hw_start:
2871 case sdma_event_e15_hw_halt_done:
2872 sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait);
2873 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2875 case sdma_event_e25_hw_clean_up_done:
2877 case sdma_event_e30_go_running:
2878 ss->go_s99_running = 1;
2880 case sdma_event_e40_sw_cleaned:
2882 case sdma_event_e50_hw_cleaned:
2884 case sdma_event_e60_hw_halted:
2885 schedule_work(&sde->err_halt_worker);
2887 case sdma_event_e70_go_idle:
2888 ss->go_s99_running = 0;
2890 case sdma_event_e80_hw_freeze:
2892 case sdma_event_e81_hw_frozen:
2894 case sdma_event_e82_hw_unfreeze:
2896 case sdma_event_e85_link_down:
2898 case sdma_event_e90_sw_halted:
2903 case sdma_state_s80_hw_freeze:
2905 case sdma_event_e00_go_hw_down:
2906 sdma_set_state(sde, sdma_state_s00_hw_down);
2907 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2909 case sdma_event_e10_go_hw_start:
2911 case sdma_event_e15_hw_halt_done:
2913 case sdma_event_e25_hw_clean_up_done:
2915 case sdma_event_e30_go_running:
2916 ss->go_s99_running = 1;
2918 case sdma_event_e40_sw_cleaned:
2920 case sdma_event_e50_hw_cleaned:
2922 case sdma_event_e60_hw_halted:
2924 case sdma_event_e70_go_idle:
2925 ss->go_s99_running = 0;
2927 case sdma_event_e80_hw_freeze:
2929 case sdma_event_e81_hw_frozen:
2930 sdma_set_state(sde, sdma_state_s82_freeze_sw_clean);
2931 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2933 case sdma_event_e82_hw_unfreeze:
2935 case sdma_event_e85_link_down:
2937 case sdma_event_e90_sw_halted:
2942 case sdma_state_s82_freeze_sw_clean:
2944 case sdma_event_e00_go_hw_down:
2945 sdma_set_state(sde, sdma_state_s00_hw_down);
2946 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2948 case sdma_event_e10_go_hw_start:
2950 case sdma_event_e15_hw_halt_done:
2952 case sdma_event_e25_hw_clean_up_done:
2954 case sdma_event_e30_go_running:
2955 ss->go_s99_running = 1;
2957 case sdma_event_e40_sw_cleaned:
2958 /* notify caller this engine is done cleaning */
2959 atomic_dec(&sde->dd->sdma_unfreeze_count);
2960 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
2962 case sdma_event_e50_hw_cleaned:
2964 case sdma_event_e60_hw_halted:
2966 case sdma_event_e70_go_idle:
2967 ss->go_s99_running = 0;
2969 case sdma_event_e80_hw_freeze:
2971 case sdma_event_e81_hw_frozen:
2973 case sdma_event_e82_hw_unfreeze:
2974 sdma_hw_start_up(sde);
2975 sdma_set_state(sde, ss->go_s99_running ?
2976 sdma_state_s99_running :
2977 sdma_state_s20_idle);
2979 case sdma_event_e85_link_down:
2981 case sdma_event_e90_sw_halted:
2986 case sdma_state_s99_running:
2988 case sdma_event_e00_go_hw_down:
2989 sdma_set_state(sde, sdma_state_s00_hw_down);
2990 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2992 case sdma_event_e10_go_hw_start:
2994 case sdma_event_e15_hw_halt_done:
2996 case sdma_event_e25_hw_clean_up_done:
2998 case sdma_event_e30_go_running:
3000 case sdma_event_e40_sw_cleaned:
3002 case sdma_event_e50_hw_cleaned:
3004 case sdma_event_e60_hw_halted:
3006 sdma_err_progress_check_schedule(sde);
3008 case sdma_event_e90_sw_halted:
3010 * SW initiated halt does not perform engines
3013 sdma_set_state(sde, sdma_state_s50_hw_halt_wait);
3014 schedule_work(&sde->err_halt_worker);
3016 case sdma_event_e70_go_idle:
3017 sdma_set_state(sde, sdma_state_s60_idle_halt_wait);
3019 case sdma_event_e85_link_down:
3020 ss->go_s99_running = 0;
3022 case sdma_event_e80_hw_freeze:
3023 sdma_set_state(sde, sdma_state_s80_hw_freeze);
3024 atomic_dec(&sde->dd->sdma_unfreeze_count);
3025 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
3027 case sdma_event_e81_hw_frozen:
3029 case sdma_event_e82_hw_unfreeze:
3035 ss->last_event = event;
3037 sdma_make_progress(sde, 0);
3041 * _extend_sdma_tx_descs() - helper to extend txreq
3043 * This is called once the initial nominal allocation
3044 * of descriptors in the sdma_txreq is exhausted.
3046 * The code will bump the allocation up to the max
3047 * of MAX_DESC (64) descriptors. There doesn't seem
3048 * much point in an interim step. The last descriptor
3049 * is reserved for coalesce buffer in order to support
3050 * cases where input packet has >MAX_DESC iovecs.
3053 static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
3057 /* Handle last descriptor */
3058 if (unlikely((tx->num_desc == (MAX_DESC - 1)))) {
3059 /* if tlen is 0, it is for padding, release last descriptor */
3061 tx->desc_limit = MAX_DESC;
3062 } else if (!tx->coalesce_buf) {
3063 /* allocate coalesce buffer with space for padding */
3064 tx->coalesce_buf = kmalloc(tx->tlen + sizeof(u32),
3066 if (!tx->coalesce_buf)
3068 tx->coalesce_idx = 0;
3073 if (unlikely(tx->num_desc == MAX_DESC))
3076 tx->descp = kmalloc_array(
3078 sizeof(struct sdma_desc),
3083 /* reserve last descriptor for coalescing */
3084 tx->desc_limit = MAX_DESC - 1;
3085 /* copy ones already built */
3086 for (i = 0; i < tx->num_desc; i++)
3087 tx->descp[i] = tx->descs[i];
3090 __sdma_txclean(dd, tx);
3095 * ext_coal_sdma_tx_descs() - extend or coalesce sdma tx descriptors
3097 * This is called once the initial nominal allocation of descriptors
3098 * in the sdma_txreq is exhausted.
3100 * This function calls _extend_sdma_tx_descs to extend or allocate
3101 * coalesce buffer. If there is a allocated coalesce buffer, it will
3102 * copy the input packet data into the coalesce buffer. It also adds
3103 * coalesce buffer descriptor once when whole packet is received.
3107 * 0 - coalescing, don't populate descriptor
3108 * 1 - continue with populating descriptor
3110 int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
3111 int type, void *kvaddr, struct page *page,
3112 unsigned long offset, u16 len)
3117 rval = _extend_sdma_tx_descs(dd, tx);
3119 __sdma_txclean(dd, tx);
3123 /* If coalesce buffer is allocated, copy data into it */
3124 if (tx->coalesce_buf) {
3125 if (type == SDMA_MAP_NONE) {
3126 __sdma_txclean(dd, tx);
3130 if (type == SDMA_MAP_PAGE) {
3131 kvaddr = kmap(page);
3133 } else if (WARN_ON(!kvaddr)) {
3134 __sdma_txclean(dd, tx);
3138 memcpy(tx->coalesce_buf + tx->coalesce_idx, kvaddr, len);
3139 tx->coalesce_idx += len;
3140 if (type == SDMA_MAP_PAGE)
3143 /* If there is more data, return */
3144 if (tx->tlen - tx->coalesce_idx)
3147 /* Whole packet is received; add any padding */
3148 pad_len = tx->packet_len & (sizeof(u32) - 1);
3150 pad_len = sizeof(u32) - pad_len;
3151 memset(tx->coalesce_buf + tx->coalesce_idx, 0, pad_len);
3152 /* padding is taken care of for coalescing case */
3153 tx->packet_len += pad_len;
3154 tx->tlen += pad_len;
3157 /* dma map the coalesce buffer */
3158 addr = dma_map_single(&dd->pcidev->dev,
3163 if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
3164 __sdma_txclean(dd, tx);
3168 /* Add descriptor for coalesce buffer */
3169 tx->desc_limit = MAX_DESC;
3170 return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx,
3177 /* Update sdes when the lmc changes */
3178 void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid)
3180 struct sdma_engine *sde;
3184 sreg = ((mask & SD(CHECK_SLID_MASK_MASK)) <<
3185 SD(CHECK_SLID_MASK_SHIFT)) |
3186 (((lid & mask) & SD(CHECK_SLID_VALUE_MASK)) <<
3187 SD(CHECK_SLID_VALUE_SHIFT));
3189 for (i = 0; i < dd->num_sdma; i++) {
3190 hfi1_cdbg(LINKVERB, "SendDmaEngine[%d].SLID_CHECK = 0x%x",
3192 sde = &dd->per_sdma[i];
3193 write_sde_csr(sde, SD(CHECK_SLID), sreg);
3197 /* tx not dword sized - pad */
3198 int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
3203 if ((unlikely(tx->num_desc == tx->desc_limit))) {
3204 rval = _extend_sdma_tx_descs(dd, tx);
3206 __sdma_txclean(dd, tx);
3210 /* finish the one just added */
3215 sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)));
3216 _sdma_close_tx(dd, tx);
3221 * Add ahg to the sdma_txreq
3223 * The logic will consume up to 3
3224 * descriptors at the beginning of
3227 void _sdma_txreq_ahgadd(
3228 struct sdma_txreq *tx,
3234 u32 i, shift = 0, desc = 0;
3237 WARN_ON_ONCE(num_ahg > 9 || (ahg_hlen & 3) || ahg_hlen == 4);
3240 mode = SDMA_AHG_APPLY_UPDATE1;
3241 else if (num_ahg <= 5)
3242 mode = SDMA_AHG_APPLY_UPDATE2;
3244 mode = SDMA_AHG_APPLY_UPDATE3;
3246 /* initialize to consumed descriptors to zero */
3248 case SDMA_AHG_APPLY_UPDATE3:
3250 tx->descs[2].qw[0] = 0;
3251 tx->descs[2].qw[1] = 0;
3253 case SDMA_AHG_APPLY_UPDATE2:
3255 tx->descs[1].qw[0] = 0;
3256 tx->descs[1].qw[1] = 0;
3260 tx->descs[0].qw[1] |=
3261 (((u64)ahg_entry & SDMA_DESC1_HEADER_INDEX_MASK)
3262 << SDMA_DESC1_HEADER_INDEX_SHIFT) |
3263 (((u64)ahg_hlen & SDMA_DESC1_HEADER_DWS_MASK)
3264 << SDMA_DESC1_HEADER_DWS_SHIFT) |
3265 (((u64)mode & SDMA_DESC1_HEADER_MODE_MASK)
3266 << SDMA_DESC1_HEADER_MODE_SHIFT) |
3267 (((u64)ahg[0] & SDMA_DESC1_HEADER_UPDATE1_MASK)
3268 << SDMA_DESC1_HEADER_UPDATE1_SHIFT);
3269 for (i = 0; i < (num_ahg - 1); i++) {
3270 if (!shift && !(i & 2))
3272 tx->descs[desc].qw[!!(i & 2)] |=
3275 shift = (shift + 32) & 63;
3280 * sdma_ahg_alloc - allocate an AHG entry
3281 * @sde: engine to allocate from
3284 * 0-31 when successful, -EOPNOTSUPP if AHG is not enabled,
3285 * -ENOSPC if an entry is not available
3287 int sdma_ahg_alloc(struct sdma_engine *sde)
3293 trace_hfi1_ahg_allocate(sde, -EINVAL);
3297 nr = ffz(READ_ONCE(sde->ahg_bits));
3299 trace_hfi1_ahg_allocate(sde, -ENOSPC);
3302 oldbit = test_and_set_bit(nr, &sde->ahg_bits);
3307 trace_hfi1_ahg_allocate(sde, nr);
3312 * sdma_ahg_free - free an AHG entry
3313 * @sde: engine to return AHG entry
3314 * @ahg_index: index to free
3316 * This routine frees the indicate AHG entry.
3318 void sdma_ahg_free(struct sdma_engine *sde, int ahg_index)
3322 trace_hfi1_ahg_deallocate(sde, ahg_index);
3323 if (ahg_index < 0 || ahg_index > 31)
3325 clear_bit(ahg_index, &sde->ahg_bits);
3329 * SPC freeze handling for SDMA engines. Called when the driver knows
3330 * the SPC is going into a freeze but before the freeze is fully
3331 * settled. Generally an error interrupt.
3333 * This event will pull the engine out of running so no more entries can be
3334 * added to the engine's queue.
3336 void sdma_freeze_notify(struct hfi1_devdata *dd, int link_down)
3339 enum sdma_events event = link_down ? sdma_event_e85_link_down :
3340 sdma_event_e80_hw_freeze;
3342 /* set up the wait but do not wait here */
3343 atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma);
3345 /* tell all engines to stop running and wait */
3346 for (i = 0; i < dd->num_sdma; i++)
3347 sdma_process_event(&dd->per_sdma[i], event);
3349 /* sdma_freeze() will wait for all engines to have stopped */
3353 * SPC freeze handling for SDMA engines. Called when the driver knows
3354 * the SPC is fully frozen.
3356 void sdma_freeze(struct hfi1_devdata *dd)
3362 * Make sure all engines have moved out of the running state before
3365 ret = wait_event_interruptible(dd->sdma_unfreeze_wq,
3366 atomic_read(&dd->sdma_unfreeze_count) <=
3368 /* interrupted or count is negative, then unloading - just exit */
3369 if (ret || atomic_read(&dd->sdma_unfreeze_count) < 0)
3372 /* set up the count for the next wait */
3373 atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma);
3375 /* tell all engines that the SPC is frozen, they can start cleaning */
3376 for (i = 0; i < dd->num_sdma; i++)
3377 sdma_process_event(&dd->per_sdma[i], sdma_event_e81_hw_frozen);
3380 * Wait for everyone to finish software clean before exiting. The
3381 * software clean will read engine CSRs, so must be completed before
3382 * the next step, which will clear the engine CSRs.
3384 (void)wait_event_interruptible(dd->sdma_unfreeze_wq,
3385 atomic_read(&dd->sdma_unfreeze_count) <= 0);
3386 /* no need to check results - done no matter what */
3390 * SPC freeze handling for the SDMA engines. Called after the SPC is unfrozen.
3392 * The SPC freeze acts like a SDMA halt and a hardware clean combined. All
3393 * that is left is a software clean. We could do it after the SPC is fully
3394 * frozen, but then we'd have to add another state to wait for the unfreeze.
3395 * Instead, just defer the software clean until the unfreeze step.
3397 void sdma_unfreeze(struct hfi1_devdata *dd)
3401 /* tell all engines start freeze clean up */
3402 for (i = 0; i < dd->num_sdma; i++)
3403 sdma_process_event(&dd->per_sdma[i],
3404 sdma_event_e82_hw_unfreeze);
3408 * _sdma_engine_progress_schedule() - schedule progress on engine
3409 * @sde: sdma_engine to schedule progress
3412 void _sdma_engine_progress_schedule(
3413 struct sdma_engine *sde)
3415 trace_hfi1_sdma_engine_progress(sde, sde->progress_mask);
3416 /* assume we have selected a good cpu */
3418 CCE_INT_FORCE + (8 * (IS_SDMA_START / 64)),
3419 sde->progress_mask);