1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2022 HabanaLabs, Ltd.
9 #include "../include/hw_ip/mmu/mmu_general.h"
10 #include "../include/hw_ip/mmu/mmu_v1_1.h"
11 #include "../include/gaudi/gaudi_masks.h"
12 #include "../include/gaudi/gaudi_fw_if.h"
13 #include "../include/gaudi/gaudi_reg_map.h"
14 #include "../include/gaudi/gaudi_async_ids_map_extended.h"
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/firmware.h>
19 #include <linux/hwmon.h>
20 #include <linux/iommu.h>
21 #include <linux/seq_file.h>
24 * Gaudi security scheme:
26 * 1. Host is protected by:
30 * 2. DDR is protected by:
31 * - Range registers (protect the first 512MB)
33 * 3. Configuration is protected by:
37 * MMU is always enabled.
39 * QMAN DMA channels 0,1 (PCI DMAN):
40 * - DMA is not secured.
41 * - PQ and CQ are secured.
42 * - CP is secured: The driver needs to parse CB but WREG should be allowed
43 * because of TDMA (tensor DMA). Hence, WREG is always not
46 * When the driver needs to use DMA it will check that Gaudi is idle, set DMA
47 * channel 0 to be secured, execute the DMA and change it back to not secured.
48 * Currently, the driver doesn't use the DMA while there are compute jobs
51 * The current use cases for the driver to use the DMA are:
52 * - Clear SRAM on context switch (happens on context switch when device is
54 * - MMU page tables area clear (happens on init)
56 * QMAN DMA 2-7, TPC, MME, NIC:
57 * PQ is secured and is located on the Host (HBM CON TPC3 bug)
58 * CQ, CP and the engine are not secured
62 #define GAUDI_BOOT_FIT_FILE "habanalabs/gaudi/gaudi-boot-fit.itb"
63 #define GAUDI_LINUX_FW_FILE "habanalabs/gaudi/gaudi-fit.itb"
64 #define GAUDI_TPC_FW_FILE "habanalabs/gaudi/gaudi_tpc.bin"
66 #define GAUDI_DMA_POOL_BLK_SIZE 0x100 /* 256 bytes */
68 #define GAUDI_RESET_TIMEOUT_MSEC 2000 /* 2000ms */
69 #define GAUDI_RESET_WAIT_MSEC 1 /* 1ms */
70 #define GAUDI_CPU_RESET_WAIT_MSEC 200 /* 200ms */
71 #define GAUDI_TEST_QUEUE_WAIT_USEC 100000 /* 100ms */
73 #define GAUDI_PLDM_RESET_WAIT_MSEC 1000 /* 1s */
74 #define GAUDI_PLDM_HRESET_TIMEOUT_MSEC 20000 /* 20s */
75 #define GAUDI_PLDM_TEST_QUEUE_WAIT_USEC 1000000 /* 1s */
76 #define GAUDI_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100)
77 #define GAUDI_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
78 #define GAUDI_PLDM_TPC_KERNEL_WAIT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
79 #define GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC 4000000 /* 4s */
80 #define GAUDI_MSG_TO_CPU_TIMEOUT_USEC 4000000 /* 4s */
81 #define GAUDI_WAIT_FOR_BL_TIMEOUT_USEC 15000000 /* 15s */
83 #define GAUDI_QMAN0_FENCE_VAL 0x72E91AB9
85 #define GAUDI_MAX_STRING_LEN 20
87 #define GAUDI_CB_POOL_CB_CNT 512
88 #define GAUDI_CB_POOL_CB_SIZE 0x20000 /* 128KB */
90 #define GAUDI_ALLOC_CPU_MEM_RETRY_CNT 3
92 #define GAUDI_NUM_OF_TPC_INTR_CAUSE 20
94 #define GAUDI_NUM_OF_QM_ERR_CAUSE 16
96 #define GAUDI_NUM_OF_QM_ARB_ERR_CAUSE 3
98 #define GAUDI_ARB_WDT_TIMEOUT 0x1000000
100 #define GAUDI_CLK_GATE_DEBUGFS_MASK (\
101 BIT(GAUDI_ENGINE_ID_MME_0) |\
102 BIT(GAUDI_ENGINE_ID_MME_2) |\
103 GENMASK_ULL(GAUDI_ENGINE_ID_TPC_7, GAUDI_ENGINE_ID_TPC_0))
105 #define HBM_SCRUBBING_TIMEOUT_US 1000000 /* 1s */
107 #define GAUDI_PLL_MAX 10
109 #define BIN_REG_STRING_SIZE sizeof("0b10101010101010101010101010101010")
111 #define MONITOR_SOB_STRING_SIZE 256
113 static u32 gaudi_stream_master[GAUDI_STREAM_MASTER_ARR_SIZE] = {
114 GAUDI_QUEUE_ID_DMA_0_0,
115 GAUDI_QUEUE_ID_DMA_0_1,
116 GAUDI_QUEUE_ID_DMA_0_2,
117 GAUDI_QUEUE_ID_DMA_0_3,
118 GAUDI_QUEUE_ID_DMA_1_0,
119 GAUDI_QUEUE_ID_DMA_1_1,
120 GAUDI_QUEUE_ID_DMA_1_2,
121 GAUDI_QUEUE_ID_DMA_1_3
124 static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = {
125 "gaudi cq 0_0", "gaudi cq 0_1", "gaudi cq 0_2", "gaudi cq 0_3",
126 "gaudi cq 1_0", "gaudi cq 1_1", "gaudi cq 1_2", "gaudi cq 1_3",
127 "gaudi cq 5_0", "gaudi cq 5_1", "gaudi cq 5_2", "gaudi cq 5_3",
131 static const u8 gaudi_dma_assignment[GAUDI_DMA_MAX] = {
132 [GAUDI_PCI_DMA_1] = GAUDI_ENGINE_ID_DMA_0,
133 [GAUDI_PCI_DMA_2] = GAUDI_ENGINE_ID_DMA_1,
134 [GAUDI_HBM_DMA_1] = GAUDI_ENGINE_ID_DMA_2,
135 [GAUDI_HBM_DMA_2] = GAUDI_ENGINE_ID_DMA_3,
136 [GAUDI_HBM_DMA_3] = GAUDI_ENGINE_ID_DMA_4,
137 [GAUDI_HBM_DMA_4] = GAUDI_ENGINE_ID_DMA_5,
138 [GAUDI_HBM_DMA_5] = GAUDI_ENGINE_ID_DMA_6,
139 [GAUDI_HBM_DMA_6] = GAUDI_ENGINE_ID_DMA_7
142 static const u8 gaudi_cq_assignment[NUMBER_OF_CMPLT_QUEUES] = {
143 [0] = GAUDI_QUEUE_ID_DMA_0_0,
144 [1] = GAUDI_QUEUE_ID_DMA_0_1,
145 [2] = GAUDI_QUEUE_ID_DMA_0_2,
146 [3] = GAUDI_QUEUE_ID_DMA_0_3,
147 [4] = GAUDI_QUEUE_ID_DMA_1_0,
148 [5] = GAUDI_QUEUE_ID_DMA_1_1,
149 [6] = GAUDI_QUEUE_ID_DMA_1_2,
150 [7] = GAUDI_QUEUE_ID_DMA_1_3,
153 static const u16 gaudi_packet_sizes[MAX_PACKET_ID] = {
154 [PACKET_WREG_32] = sizeof(struct packet_wreg32),
155 [PACKET_WREG_BULK] = sizeof(struct packet_wreg_bulk),
156 [PACKET_MSG_LONG] = sizeof(struct packet_msg_long),
157 [PACKET_MSG_SHORT] = sizeof(struct packet_msg_short),
158 [PACKET_CP_DMA] = sizeof(struct packet_cp_dma),
159 [PACKET_REPEAT] = sizeof(struct packet_repeat),
160 [PACKET_MSG_PROT] = sizeof(struct packet_msg_prot),
161 [PACKET_FENCE] = sizeof(struct packet_fence),
162 [PACKET_LIN_DMA] = sizeof(struct packet_lin_dma),
163 [PACKET_NOP] = sizeof(struct packet_nop),
164 [PACKET_STOP] = sizeof(struct packet_stop),
165 [PACKET_ARB_POINT] = sizeof(struct packet_arb_point),
166 [PACKET_WAIT] = sizeof(struct packet_wait),
167 [PACKET_LOAD_AND_EXE] = sizeof(struct packet_load_and_exe)
170 static inline bool validate_packet_id(enum packet_id id)
174 case PACKET_WREG_BULK:
175 case PACKET_MSG_LONG:
176 case PACKET_MSG_SHORT:
179 case PACKET_MSG_PROT:
184 case PACKET_ARB_POINT:
186 case PACKET_LOAD_AND_EXE:
193 static const char * const
194 gaudi_tpc_interrupts_cause[GAUDI_NUM_OF_TPC_INTR_CAUSE] = {
195 "tpc_address_exceed_slm",
197 "tpc_spu_mac_overflow",
198 "tpc_spu_addsub_overflow",
199 "tpc_spu_abs_overflow",
200 "tpc_spu_fp_dst_nan_inf",
201 "tpc_spu_fp_dst_denorm",
202 "tpc_vpu_mac_overflow",
203 "tpc_vpu_addsub_overflow",
204 "tpc_vpu_abs_overflow",
205 "tpc_vpu_fp_dst_nan_inf",
206 "tpc_vpu_fp_dst_denorm",
208 "tpc_illegal_instruction",
209 "tpc_pc_wrap_around",
217 static const char * const
218 gaudi_qman_error_cause[GAUDI_NUM_OF_QM_ERR_CAUSE] = {
222 "CP error due to undefined OPCODE",
223 "CP encountered STOP OPCODE",
225 "CP WRREG32 or WRBULK returned error",
227 "FENCE 0 inc over max value and clipped",
228 "FENCE 1 inc over max value and clipped",
229 "FENCE 2 inc over max value and clipped",
230 "FENCE 3 inc over max value and clipped",
231 "FENCE 0 dec under min value and clipped",
232 "FENCE 1 dec under min value and clipped",
233 "FENCE 2 dec under min value and clipped",
234 "FENCE 3 dec under min value and clipped"
237 static const char * const
238 gaudi_qman_arb_error_cause[GAUDI_NUM_OF_QM_ARB_ERR_CAUSE] = {
239 "Choice push while full error",
240 "Choice Q watchdog error",
241 "MSG AXI LBW returned with error"
244 enum gaudi_sm_sei_cause {
245 GAUDI_SM_SEI_SO_OVERFLOW,
246 GAUDI_SM_SEI_LBW_4B_UNALIGNED,
247 GAUDI_SM_SEI_AXI_RESPONSE_ERR
250 static enum hl_queue_type gaudi_queue_type[GAUDI_QUEUE_ID_SIZE] = {
251 QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_0_0 */
252 QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_0_1 */
253 QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_0_2 */
254 QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_0_3 */
255 QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_1_0 */
256 QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_1_1 */
257 QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_1_2 */
258 QUEUE_TYPE_EXT, /* GAUDI_QUEUE_ID_DMA_1_3 */
259 QUEUE_TYPE_CPU, /* GAUDI_QUEUE_ID_CPU_PQ */
260 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_2_0 */
261 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_2_1 */
262 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_2_2 */
263 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_2_3 */
264 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_3_0 */
265 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_3_1 */
266 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_3_2 */
267 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_3_3 */
268 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_4_0 */
269 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_4_1 */
270 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_4_2 */
271 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_4_3 */
272 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_5_0 */
273 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_5_1 */
274 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_5_2 */
275 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_5_3 */
276 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_6_0 */
277 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_6_1 */
278 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_6_2 */
279 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_6_3 */
280 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_7_0 */
281 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_7_1 */
282 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_7_2 */
283 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_DMA_7_3 */
284 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_0_0 */
285 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_0_1 */
286 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_0_2 */
287 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_0_3 */
288 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_1_0 */
289 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_1_1 */
290 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_1_2 */
291 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_MME_1_3 */
292 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_0_0 */
293 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_0_1 */
294 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_0_2 */
295 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_0_3 */
296 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_1_0 */
297 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_1_1 */
298 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_1_2 */
299 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_1_3 */
300 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_2_0 */
301 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_2_1 */
302 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_2_2 */
303 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_2_3 */
304 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_3_0 */
305 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_3_1 */
306 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_3_2 */
307 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_3_3 */
308 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_4_0 */
309 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_4_1 */
310 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_4_2 */
311 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_4_3 */
312 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_5_0 */
313 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_5_1 */
314 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_5_2 */
315 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_5_3 */
316 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_6_0 */
317 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_6_1 */
318 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_6_2 */
319 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_6_3 */
320 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_7_0 */
321 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_7_1 */
322 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_7_2 */
323 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_TPC_7_3 */
324 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_0_0 */
325 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_0_1 */
326 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_0_2 */
327 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_0_3 */
328 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_1_0 */
329 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_1_1 */
330 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_1_2 */
331 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_1_3 */
332 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_2_0 */
333 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_2_1 */
334 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_2_2 */
335 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_2_3 */
336 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_3_0 */
337 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_3_1 */
338 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_3_2 */
339 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_3_3 */
340 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_4_0 */
341 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_4_1 */
342 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_4_2 */
343 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_4_3 */
344 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_5_0 */
345 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_5_1 */
346 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_5_2 */
347 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_5_3 */
348 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_6_0 */
349 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_6_1 */
350 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_6_2 */
351 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_6_3 */
352 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_7_0 */
353 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_7_1 */
354 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_7_2 */
355 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_7_3 */
356 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_8_0 */
357 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_8_1 */
358 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_8_2 */
359 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_8_3 */
360 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_9_0 */
361 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_9_1 */
362 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_9_2 */
363 QUEUE_TYPE_INT, /* GAUDI_QUEUE_ID_NIC_9_3 */
366 static struct hl_hw_obj_name_entry gaudi_so_id_to_str[] = {
367 { .id = 0, .name = "SYNC_OBJ_DMA_DOWN_FEEDBACK" },
368 { .id = 1, .name = "SYNC_OBJ_DMA_UP_FEEDBACK" },
369 { .id = 2, .name = "SYNC_OBJ_DMA_STATIC_DRAM_SRAM_FEEDBACK" },
370 { .id = 3, .name = "SYNC_OBJ_DMA_SRAM_DRAM_FEEDBACK" },
371 { .id = 4, .name = "SYNC_OBJ_FIRST_COMPUTE_FINISH" },
372 { .id = 5, .name = "SYNC_OBJ_HOST_DRAM_DONE" },
373 { .id = 6, .name = "SYNC_OBJ_DBG_CTR_DEPRECATED" },
374 { .id = 7, .name = "SYNC_OBJ_DMA_ACTIVATIONS_DRAM_SRAM_FEEDBACK" },
375 { .id = 8, .name = "SYNC_OBJ_ENGINE_SEM_MME_0" },
376 { .id = 9, .name = "SYNC_OBJ_ENGINE_SEM_MME_1" },
377 { .id = 10, .name = "SYNC_OBJ_ENGINE_SEM_TPC_0" },
378 { .id = 11, .name = "SYNC_OBJ_ENGINE_SEM_TPC_1" },
379 { .id = 12, .name = "SYNC_OBJ_ENGINE_SEM_TPC_2" },
380 { .id = 13, .name = "SYNC_OBJ_ENGINE_SEM_TPC_3" },
381 { .id = 14, .name = "SYNC_OBJ_ENGINE_SEM_TPC_4" },
382 { .id = 15, .name = "SYNC_OBJ_ENGINE_SEM_TPC_5" },
383 { .id = 16, .name = "SYNC_OBJ_ENGINE_SEM_TPC_6" },
384 { .id = 17, .name = "SYNC_OBJ_ENGINE_SEM_TPC_7" },
385 { .id = 18, .name = "SYNC_OBJ_ENGINE_SEM_DMA_1" },
386 { .id = 19, .name = "SYNC_OBJ_ENGINE_SEM_DMA_2" },
387 { .id = 20, .name = "SYNC_OBJ_ENGINE_SEM_DMA_3" },
388 { .id = 21, .name = "SYNC_OBJ_ENGINE_SEM_DMA_4" },
389 { .id = 22, .name = "SYNC_OBJ_ENGINE_SEM_DMA_5" },
390 { .id = 23, .name = "SYNC_OBJ_ENGINE_SEM_DMA_6" },
391 { .id = 24, .name = "SYNC_OBJ_ENGINE_SEM_DMA_7" },
392 { .id = 25, .name = "SYNC_OBJ_DBG_CTR_0" },
393 { .id = 26, .name = "SYNC_OBJ_DBG_CTR_1" },
396 static struct hl_hw_obj_name_entry gaudi_monitor_id_to_str[] = {
397 { .id = 200, .name = "MON_OBJ_DMA_DOWN_FEEDBACK_RESET" },
398 { .id = 201, .name = "MON_OBJ_DMA_UP_FEEDBACK_RESET" },
399 { .id = 203, .name = "MON_OBJ_DRAM_TO_SRAM_QUEUE_FENCE" },
400 { .id = 204, .name = "MON_OBJ_TPC_0_CLK_GATE" },
401 { .id = 205, .name = "MON_OBJ_TPC_1_CLK_GATE" },
402 { .id = 206, .name = "MON_OBJ_TPC_2_CLK_GATE" },
403 { .id = 207, .name = "MON_OBJ_TPC_3_CLK_GATE" },
404 { .id = 208, .name = "MON_OBJ_TPC_4_CLK_GATE" },
405 { .id = 209, .name = "MON_OBJ_TPC_5_CLK_GATE" },
406 { .id = 210, .name = "MON_OBJ_TPC_6_CLK_GATE" },
407 { .id = 211, .name = "MON_OBJ_TPC_7_CLK_GATE" },
410 static s64 gaudi_state_dump_specs_props[] = {
411 [SP_SYNC_OBJ_BASE_ADDR] = mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0,
412 [SP_NEXT_SYNC_OBJ_ADDR] = NEXT_SYNC_OBJ_ADDR_INTERVAL,
413 [SP_SYNC_OBJ_AMOUNT] = NUM_OF_SOB_IN_BLOCK,
414 [SP_MON_OBJ_WR_ADDR_LOW] =
415 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0,
416 [SP_MON_OBJ_WR_ADDR_HIGH] =
417 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRH_0,
418 [SP_MON_OBJ_WR_DATA] = mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_DATA_0,
419 [SP_MON_OBJ_ARM_DATA] = mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_ARM_0,
420 [SP_MON_OBJ_STATUS] = mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_0,
421 [SP_MONITORS_AMOUNT] = NUM_OF_MONITORS_IN_BLOCK,
422 [SP_TPC0_CMDQ] = mmTPC0_QM_GLBL_CFG0,
423 [SP_TPC0_CFG_SO] = mmTPC0_CFG_QM_SYNC_OBJECT_ADDR,
424 [SP_NEXT_TPC] = mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0,
425 [SP_MME_CMDQ] = mmMME0_QM_GLBL_CFG0,
426 [SP_MME_CFG_SO] = mmMME0_CTRL_ARCH_DESC_SYNC_OBJECT_ADDR_LOW_LOCAL,
427 [SP_NEXT_MME] = mmMME2_QM_GLBL_CFG0 - mmMME0_QM_GLBL_CFG0,
428 [SP_DMA_CMDQ] = mmDMA0_QM_GLBL_CFG0,
429 [SP_DMA_CFG_SO] = mmDMA0_CORE_WR_COMP_ADDR_LO,
430 [SP_DMA_QUEUES_OFFSET] = mmDMA1_QM_GLBL_CFG0 - mmDMA0_QM_GLBL_CFG0,
431 [SP_NUM_OF_MME_ENGINES] = NUM_OF_MME_ENGINES,
432 [SP_SUB_MME_ENG_NUM] = NUM_OF_MME_SUB_ENGINES,
433 [SP_NUM_OF_DMA_ENGINES] = NUM_OF_DMA_ENGINES,
434 [SP_NUM_OF_TPC_ENGINES] = NUM_OF_TPC_ENGINES,
435 [SP_ENGINE_NUM_OF_QUEUES] = NUM_OF_QUEUES,
436 [SP_ENGINE_NUM_OF_STREAMS] = NUM_OF_STREAMS,
437 [SP_ENGINE_NUM_OF_FENCES] = NUM_OF_FENCES,
438 [SP_FENCE0_CNT_OFFSET] =
439 mmDMA0_QM_CP_FENCE0_CNT_0 - mmDMA0_QM_GLBL_CFG0,
440 [SP_FENCE0_RDATA_OFFSET] =
441 mmDMA0_QM_CP_FENCE0_RDATA_0 - mmDMA0_QM_GLBL_CFG0,
442 [SP_CP_STS_OFFSET] = mmDMA0_QM_CP_STS_0 - mmDMA0_QM_GLBL_CFG0,
446 /* The order here is opposite to the order of the indexing in the h/w.
447 * i.e. SYNC_MGR_W_S is actually 0, SYNC_MGR_E_S is 1, etc.
449 static const char * const gaudi_sync_manager_names[] = {
457 struct ecc_info_extract_params {
463 static int gaudi_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
465 static int gaudi_send_job_on_qman0(struct hl_device *hdev,
466 struct hl_cs_job *job);
467 static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
469 static int gaudi_memset_registers(struct hl_device *hdev, u64 reg_base,
470 u32 num_regs, u32 val);
471 static int gaudi_run_tpc_kernel(struct hl_device *hdev, u64 tpc_kernel,
473 static int gaudi_mmu_clear_pgt_range(struct hl_device *hdev);
474 static int gaudi_cpucp_info_get(struct hl_device *hdev);
475 static void gaudi_disable_clock_gating(struct hl_device *hdev);
476 static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid);
477 static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
479 static u32 gaudi_gen_wait_cb(struct hl_device *hdev,
480 struct hl_gen_wait_properties *prop);
481 static inline enum hl_collective_mode
482 get_collective_mode(struct hl_device *hdev, u32 queue_id)
484 if (gaudi_queue_type[queue_id] == QUEUE_TYPE_EXT)
485 return HL_COLLECTIVE_MASTER;
487 if (queue_id >= GAUDI_QUEUE_ID_DMA_5_0 &&
488 queue_id <= GAUDI_QUEUE_ID_DMA_5_3)
489 return HL_COLLECTIVE_SLAVE;
491 if (queue_id >= GAUDI_QUEUE_ID_TPC_7_0 &&
492 queue_id <= GAUDI_QUEUE_ID_TPC_7_3)
493 return HL_COLLECTIVE_SLAVE;
495 if (queue_id >= GAUDI_QUEUE_ID_NIC_0_0 &&
496 queue_id <= GAUDI_QUEUE_ID_NIC_9_3)
497 return HL_COLLECTIVE_SLAVE;
499 return HL_COLLECTIVE_NOT_SUPPORTED;
502 static inline void set_default_power_values(struct hl_device *hdev)
504 struct asic_fixed_properties *prop = &hdev->asic_prop;
506 if (hdev->card_type == cpucp_card_type_pmc) {
507 prop->max_power_default = MAX_POWER_DEFAULT_PMC;
509 if (prop->fw_security_enabled)
510 prop->dc_power_default = DC_POWER_DEFAULT_PMC_SEC;
512 prop->dc_power_default = DC_POWER_DEFAULT_PMC;
514 prop->max_power_default = MAX_POWER_DEFAULT_PCI;
515 prop->dc_power_default = DC_POWER_DEFAULT_PCI;
519 static int gaudi_set_fixed_properties(struct hl_device *hdev)
521 struct asic_fixed_properties *prop = &hdev->asic_prop;
522 u32 num_sync_stream_queues = 0;
525 prop->max_queues = GAUDI_QUEUE_ID_SIZE;
526 prop->hw_queues_props = kcalloc(prop->max_queues,
527 sizeof(struct hw_queue_properties),
530 if (!prop->hw_queues_props)
533 for (i = 0 ; i < prop->max_queues ; i++) {
534 if (gaudi_queue_type[i] == QUEUE_TYPE_EXT) {
535 prop->hw_queues_props[i].type = QUEUE_TYPE_EXT;
536 prop->hw_queues_props[i].driver_only = 0;
537 prop->hw_queues_props[i].supports_sync_stream = 1;
538 prop->hw_queues_props[i].cb_alloc_flags =
540 num_sync_stream_queues++;
541 } else if (gaudi_queue_type[i] == QUEUE_TYPE_CPU) {
542 prop->hw_queues_props[i].type = QUEUE_TYPE_CPU;
543 prop->hw_queues_props[i].driver_only = 1;
544 prop->hw_queues_props[i].supports_sync_stream = 0;
545 prop->hw_queues_props[i].cb_alloc_flags =
547 } else if (gaudi_queue_type[i] == QUEUE_TYPE_INT) {
548 prop->hw_queues_props[i].type = QUEUE_TYPE_INT;
549 prop->hw_queues_props[i].driver_only = 0;
550 prop->hw_queues_props[i].supports_sync_stream = 0;
551 prop->hw_queues_props[i].cb_alloc_flags =
555 prop->hw_queues_props[i].collective_mode =
556 get_collective_mode(hdev, i);
559 prop->device_dma_offset_for_host_access = HOST_PHYS_BASE;
560 prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
561 prop->collective_first_sob = 0;
562 prop->collective_first_mon = 0;
564 /* 2 SOBs per internal queue stream are reserved for collective */
565 prop->sync_stream_first_sob =
566 ALIGN(NUMBER_OF_SOBS_IN_GRP, HL_MAX_SOBS_PER_MONITOR)
567 * QMAN_STREAMS * HL_RSVD_SOBS;
569 /* 1 monitor per internal queue stream are reserved for collective
570 * 2 monitors per external queue stream are reserved for collective
572 prop->sync_stream_first_mon =
573 (NUMBER_OF_COLLECTIVE_QUEUES * QMAN_STREAMS) +
574 (NUMBER_OF_EXT_HW_QUEUES * 2);
576 prop->dram_base_address = DRAM_PHYS_BASE;
577 prop->dram_size = GAUDI_HBM_SIZE_32GB;
578 prop->dram_end_address = prop->dram_base_address +
580 prop->dram_user_base_address = DRAM_BASE_ADDR_USER;
582 prop->sram_base_address = SRAM_BASE_ADDR;
583 prop->sram_size = SRAM_SIZE;
584 prop->sram_end_address = prop->sram_base_address +
586 prop->sram_user_base_address = prop->sram_base_address +
587 SRAM_USER_BASE_OFFSET;
589 prop->mmu_pgt_addr = MMU_PAGE_TABLES_ADDR;
591 prop->mmu_pgt_size = 0x800000; /* 8MB */
593 prop->mmu_pgt_size = MMU_PAGE_TABLES_SIZE;
594 prop->mmu_pte_size = HL_PTE_SIZE;
595 prop->mmu_hop_table_size = HOP_TABLE_SIZE_512_PTE;
596 prop->mmu_hop0_tables_total_size = HOP0_512_PTE_TABLES_TOTAL_SIZE;
597 prop->dram_page_size = PAGE_SIZE_2MB;
598 prop->dram_supports_virtual_memory = false;
600 prop->pmmu.hop0_shift = MMU_V1_1_HOP0_SHIFT;
601 prop->pmmu.hop1_shift = MMU_V1_1_HOP1_SHIFT;
602 prop->pmmu.hop2_shift = MMU_V1_1_HOP2_SHIFT;
603 prop->pmmu.hop3_shift = MMU_V1_1_HOP3_SHIFT;
604 prop->pmmu.hop4_shift = MMU_V1_1_HOP4_SHIFT;
605 prop->pmmu.hop0_mask = MMU_V1_1_HOP0_MASK;
606 prop->pmmu.hop1_mask = MMU_V1_1_HOP1_MASK;
607 prop->pmmu.hop2_mask = MMU_V1_1_HOP2_MASK;
608 prop->pmmu.hop3_mask = MMU_V1_1_HOP3_MASK;
609 prop->pmmu.hop4_mask = MMU_V1_1_HOP4_MASK;
610 prop->pmmu.start_addr = VA_HOST_SPACE_START;
611 prop->pmmu.end_addr =
612 (VA_HOST_SPACE_START + VA_HOST_SPACE_SIZE / 2) - 1;
613 prop->pmmu.page_size = PAGE_SIZE_4KB;
614 prop->pmmu.num_hops = MMU_ARCH_5_HOPS;
615 prop->pmmu.last_mask = LAST_MASK;
617 /* PMMU and HPMMU are the same except of page size */
618 memcpy(&prop->pmmu_huge, &prop->pmmu, sizeof(prop->pmmu));
619 prop->pmmu_huge.page_size = PAGE_SIZE_2MB;
621 /* shifts and masks are the same in PMMU and DMMU */
622 memcpy(&prop->dmmu, &prop->pmmu, sizeof(prop->pmmu));
623 prop->dmmu.start_addr = (VA_HOST_SPACE_START + VA_HOST_SPACE_SIZE / 2);
624 prop->dmmu.end_addr = VA_HOST_SPACE_END;
625 prop->dmmu.page_size = PAGE_SIZE_2MB;
627 prop->cfg_size = CFG_SIZE;
628 prop->max_asid = MAX_ASID;
629 prop->num_of_events = GAUDI_EVENT_SIZE;
630 prop->tpc_enabled_mask = TPC_ENABLED_MASK;
632 set_default_power_values(hdev);
634 prop->cb_pool_cb_cnt = GAUDI_CB_POOL_CB_CNT;
635 prop->cb_pool_cb_size = GAUDI_CB_POOL_CB_SIZE;
637 prop->pcie_dbi_base_address = mmPCIE_DBI_BASE;
638 prop->pcie_aux_dbi_reg_addr = CFG_BASE + mmPCIE_AUX_DBI;
640 strncpy(prop->cpucp_info.card_name, GAUDI_DEFAULT_CARD_NAME,
643 prop->max_pending_cs = GAUDI_MAX_PENDING_CS;
645 prop->first_available_user_sob[HL_GAUDI_WS_DCORE] =
646 prop->sync_stream_first_sob +
647 (num_sync_stream_queues * HL_RSVD_SOBS);
648 prop->first_available_user_mon[HL_GAUDI_WS_DCORE] =
649 prop->sync_stream_first_mon +
650 (num_sync_stream_queues * HL_RSVD_MONS);
652 prop->first_available_user_msix_interrupt = USHRT_MAX;
654 for (i = 0 ; i < HL_MAX_DCORES ; i++)
655 prop->first_available_cq[i] = USHRT_MAX;
657 prop->fw_cpu_boot_dev_sts0_valid = false;
658 prop->fw_cpu_boot_dev_sts1_valid = false;
659 prop->hard_reset_done_by_fw = false;
660 prop->gic_interrupts_enable = true;
662 prop->server_type = HL_SERVER_TYPE_UNKNOWN;
664 prop->clk_pll_index = HL_GAUDI_MME_PLL;
665 prop->max_freq_value = GAUDI_MAX_CLK_FREQ;
667 prop->use_get_power_for_reset_history = true;
672 static int gaudi_pci_bars_map(struct hl_device *hdev)
674 static const char * const name[] = {"SRAM", "CFG", "HBM"};
675 bool is_wc[3] = {false, false, true};
678 rc = hl_pci_bars_map(hdev, name, is_wc);
682 hdev->rmmio = hdev->pcie_bar[CFG_BAR_ID] +
683 (CFG_BASE - SPI_FLASH_BASE_ADDR);
688 static u64 gaudi_set_hbm_bar_base(struct hl_device *hdev, u64 addr)
690 struct gaudi_device *gaudi = hdev->asic_specific;
691 struct hl_inbound_pci_region pci_region;
695 if ((gaudi) && (gaudi->hbm_bar_cur_addr == addr))
698 if (hdev->asic_prop.iatu_done_by_fw)
701 /* Inbound Region 2 - Bar 4 - Point to HBM */
702 pci_region.mode = PCI_BAR_MATCH_MODE;
703 pci_region.bar = HBM_BAR_ID;
704 pci_region.addr = addr;
705 rc = hl_pci_set_inbound_region(hdev, 2, &pci_region);
710 old_addr = gaudi->hbm_bar_cur_addr;
711 gaudi->hbm_bar_cur_addr = addr;
717 static int gaudi_init_iatu(struct hl_device *hdev)
719 struct hl_inbound_pci_region inbound_region;
720 struct hl_outbound_pci_region outbound_region;
723 if (hdev->asic_prop.iatu_done_by_fw)
726 /* Inbound Region 0 - Bar 0 - Point to SRAM + CFG */
727 inbound_region.mode = PCI_BAR_MATCH_MODE;
728 inbound_region.bar = SRAM_BAR_ID;
729 inbound_region.addr = SRAM_BASE_ADDR;
730 rc = hl_pci_set_inbound_region(hdev, 0, &inbound_region);
734 /* Inbound Region 1 - Bar 2 - Point to SPI FLASH */
735 inbound_region.mode = PCI_BAR_MATCH_MODE;
736 inbound_region.bar = CFG_BAR_ID;
737 inbound_region.addr = SPI_FLASH_BASE_ADDR;
738 rc = hl_pci_set_inbound_region(hdev, 1, &inbound_region);
742 /* Inbound Region 2 - Bar 4 - Point to HBM */
743 inbound_region.mode = PCI_BAR_MATCH_MODE;
744 inbound_region.bar = HBM_BAR_ID;
745 inbound_region.addr = DRAM_PHYS_BASE;
746 rc = hl_pci_set_inbound_region(hdev, 2, &inbound_region);
750 hdev->asic_funcs->set_dma_mask_from_fw(hdev);
752 /* Outbound Region 0 - Point to Host */
753 outbound_region.addr = HOST_PHYS_BASE;
754 outbound_region.size = HOST_PHYS_SIZE;
755 rc = hl_pci_set_outbound_region(hdev, &outbound_region);
761 static enum hl_device_hw_state gaudi_get_hw_state(struct hl_device *hdev)
763 return RREG32(mmHW_STATE);
766 static int gaudi_early_init(struct hl_device *hdev)
768 struct asic_fixed_properties *prop = &hdev->asic_prop;
769 struct pci_dev *pdev = hdev->pdev;
773 rc = gaudi_set_fixed_properties(hdev);
775 dev_err(hdev->dev, "Failed setting fixed properties\n");
779 /* Check BAR sizes */
780 if (pci_resource_len(pdev, SRAM_BAR_ID) != SRAM_BAR_SIZE) {
782 "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n",
784 (unsigned long long) pci_resource_len(pdev,
788 goto free_queue_props;
791 if (pci_resource_len(pdev, CFG_BAR_ID) != CFG_BAR_SIZE) {
793 "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n",
795 (unsigned long long) pci_resource_len(pdev,
799 goto free_queue_props;
802 prop->dram_pci_bar_size = pci_resource_len(pdev, HBM_BAR_ID);
803 hdev->dram_pci_bar_start = pci_resource_start(pdev, HBM_BAR_ID);
805 /* If FW security is enabled at this point it means no access to ELBI */
806 if (hdev->asic_prop.fw_security_enabled) {
807 hdev->asic_prop.iatu_done_by_fw = true;
810 * GIC-security-bit can ONLY be set by CPUCP, so in this stage
811 * decision can only be taken based on PCI ID security.
813 hdev->asic_prop.gic_interrupts_enable = false;
817 rc = hl_pci_elbi_read(hdev, CFG_BASE + mmCPU_BOOT_DEV_STS0,
820 goto free_queue_props;
822 /* Check whether FW is configuring iATU */
823 if ((fw_boot_status & CPU_BOOT_DEV_STS0_ENABLED) &&
824 (fw_boot_status & CPU_BOOT_DEV_STS0_FW_IATU_CONF_EN))
825 hdev->asic_prop.iatu_done_by_fw = true;
828 rc = hl_pci_init(hdev);
830 goto free_queue_props;
832 /* Before continuing in the initialization, we need to read the preboot
833 * version to determine whether we run with a security-enabled firmware
835 rc = hl_fw_read_preboot_status(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS,
837 mmCPU_BOOT_DEV_STS1, mmCPU_BOOT_ERR0,
839 GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC);
841 if (hdev->reset_on_preboot_fail)
842 hdev->asic_funcs->hw_fini(hdev, true, false);
846 if (gaudi_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
848 "H/W state is dirty, must reset before initializing\n");
849 hdev->asic_funcs->hw_fini(hdev, true, false);
857 kfree(hdev->asic_prop.hw_queues_props);
861 static int gaudi_early_fini(struct hl_device *hdev)
863 kfree(hdev->asic_prop.hw_queues_props);
870 * gaudi_fetch_psoc_frequency - Fetch PSOC frequency values
872 * @hdev: pointer to hl_device structure
875 static int gaudi_fetch_psoc_frequency(struct hl_device *hdev)
877 struct asic_fixed_properties *prop = &hdev->asic_prop;
878 u32 nr = 0, nf = 0, od = 0, div_fctr = 0, pll_clk, div_sel;
879 u16 pll_freq_arr[HL_PLL_NUM_OUTPUTS], freq;
882 if (hdev->asic_prop.fw_security_enabled) {
883 struct gaudi_device *gaudi = hdev->asic_specific;
885 if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q))
888 rc = hl_fw_cpucp_pll_info_get(hdev, HL_GAUDI_CPU_PLL, pll_freq_arr);
893 freq = pll_freq_arr[2];
895 /* Backward compatibility */
896 div_fctr = RREG32(mmPSOC_CPU_PLL_DIV_FACTOR_2);
897 div_sel = RREG32(mmPSOC_CPU_PLL_DIV_SEL_2);
898 nr = RREG32(mmPSOC_CPU_PLL_NR);
899 nf = RREG32(mmPSOC_CPU_PLL_NF);
900 od = RREG32(mmPSOC_CPU_PLL_OD);
902 if (div_sel == DIV_SEL_REF_CLK ||
903 div_sel == DIV_SEL_DIVIDED_REF) {
904 if (div_sel == DIV_SEL_REF_CLK)
907 freq = PLL_REF_CLK / (div_fctr + 1);
908 } else if (div_sel == DIV_SEL_PLL_CLK ||
909 div_sel == DIV_SEL_DIVIDED_PLL) {
910 pll_clk = PLL_REF_CLK * (nf + 1) /
911 ((nr + 1) * (od + 1));
912 if (div_sel == DIV_SEL_PLL_CLK)
915 freq = pll_clk / (div_fctr + 1);
918 "Received invalid div select value: %d",
924 prop->psoc_timestamp_frequency = freq;
925 prop->psoc_pci_pll_nr = nr;
926 prop->psoc_pci_pll_nf = nf;
927 prop->psoc_pci_pll_od = od;
928 prop->psoc_pci_pll_div_factor = div_fctr;
933 static int _gaudi_init_tpc_mem(struct hl_device *hdev,
934 dma_addr_t tpc_kernel_src_addr, u32 tpc_kernel_size)
936 struct asic_fixed_properties *prop = &hdev->asic_prop;
937 struct packet_lin_dma *init_tpc_mem_pkt;
938 struct hl_cs_job *job;
945 cb = hl_cb_kernel_create(hdev, PAGE_SIZE, false);
949 init_tpc_mem_pkt = cb->kernel_address;
950 cb_size = sizeof(*init_tpc_mem_pkt);
951 memset(init_tpc_mem_pkt, 0, cb_size);
953 init_tpc_mem_pkt->tsize = cpu_to_le32(tpc_kernel_size);
955 ctl = FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_LIN_DMA);
956 ctl |= FIELD_PREP(GAUDI_PKT_LIN_DMA_CTL_LIN_MASK, 1);
957 ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1);
958 ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
960 init_tpc_mem_pkt->ctl = cpu_to_le32(ctl);
962 init_tpc_mem_pkt->src_addr = cpu_to_le64(tpc_kernel_src_addr);
963 dst_addr = (prop->sram_user_base_address &
964 GAUDI_PKT_LIN_DMA_DST_ADDR_MASK) >>
965 GAUDI_PKT_LIN_DMA_DST_ADDR_SHIFT;
966 init_tpc_mem_pkt->dst_addr |= cpu_to_le64(dst_addr);
968 job = hl_cs_allocate_job(hdev, QUEUE_TYPE_EXT, true);
970 dev_err(hdev->dev, "Failed to allocate a new job\n");
977 atomic_inc(&job->user_cb->cs_cnt);
978 job->user_cb_size = cb_size;
979 job->hw_queue_id = GAUDI_QUEUE_ID_DMA_0_0;
980 job->patched_cb = job->user_cb;
981 job->job_cb_size = job->user_cb_size + sizeof(struct packet_msg_prot);
983 hl_debugfs_add_job(hdev, job);
985 rc = gaudi_send_job_on_qman0(hdev, job);
990 for (tpc_id = 0 ; tpc_id < TPC_NUMBER_OF_ENGINES ; tpc_id++) {
991 rc = gaudi_run_tpc_kernel(hdev, dst_addr, tpc_id);
997 hl_userptr_delete_list(hdev, &job->userptr_list);
998 hl_debugfs_remove_job(hdev, job);
1000 atomic_dec(&cb->cs_cnt);
1004 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
1010 * gaudi_init_tpc_mem() - Initialize TPC memories.
1011 * @hdev: Pointer to hl_device structure.
1013 * Copy TPC kernel fw from firmware file and run it to initialize TPC memories.
1015 * Return: 0 for success, negative value for error.
1017 static int gaudi_init_tpc_mem(struct hl_device *hdev)
1019 const struct firmware *fw;
1022 dma_addr_t dma_handle;
1026 rc = request_firmware(&fw, GAUDI_TPC_FW_FILE, hdev->dev);
1027 if (rc == -EINTR && count-- > 0) {
1033 dev_err(hdev->dev, "Failed to load firmware file %s\n",
1039 cpu_addr = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, fw_size,
1040 &dma_handle, GFP_KERNEL | __GFP_ZERO);
1043 "Failed to allocate %zu of dma memory for TPC kernel\n",
1049 memcpy(cpu_addr, fw->data, fw_size);
1051 rc = _gaudi_init_tpc_mem(hdev, dma_handle, fw_size);
1053 hdev->asic_funcs->asic_dma_free_coherent(hdev, fw->size, cpu_addr,
1057 release_firmware(fw);
1061 static void gaudi_collective_map_sobs(struct hl_device *hdev, u32 stream)
1063 struct gaudi_device *gaudi = hdev->asic_specific;
1064 struct gaudi_collective_properties *prop = &gaudi->collective_props;
1065 struct hl_hw_queue *q;
1066 u32 i, sob_id, sob_group_id, queue_id;
1068 /* Iterate through SOB groups and assign a SOB for each slave queue */
1070 stream * HL_RSVD_SOBS + prop->curr_sob_group_idx[stream];
1071 sob_id = prop->hw_sob_group[sob_group_id].base_sob_id;
1073 queue_id = GAUDI_QUEUE_ID_NIC_0_0 + stream;
1074 for (i = 0 ; i < NIC_NUMBER_OF_ENGINES ; i++) {
1075 q = &hdev->kernel_queues[queue_id + (4 * i)];
1076 q->sync_stream_prop.collective_sob_id = sob_id + i;
1079 /* Both DMA5 and TPC7 use the same resources since only a single
1080 * engine need to participate in the reduction process
1082 queue_id = GAUDI_QUEUE_ID_DMA_5_0 + stream;
1083 q = &hdev->kernel_queues[queue_id];
1084 q->sync_stream_prop.collective_sob_id =
1085 sob_id + NIC_NUMBER_OF_ENGINES;
1087 queue_id = GAUDI_QUEUE_ID_TPC_7_0 + stream;
1088 q = &hdev->kernel_queues[queue_id];
1089 q->sync_stream_prop.collective_sob_id =
1090 sob_id + NIC_NUMBER_OF_ENGINES;
1093 static void gaudi_sob_group_hw_reset(struct kref *ref)
1095 struct gaudi_hw_sob_group *hw_sob_group =
1096 container_of(ref, struct gaudi_hw_sob_group, kref);
1097 struct hl_device *hdev = hw_sob_group->hdev;
1100 for (i = 0 ; i < NUMBER_OF_SOBS_IN_GRP ; i++)
1101 WREG32((mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 +
1102 (hw_sob_group->base_sob_id * 4) + (i * 4)), 0);
1104 kref_init(&hw_sob_group->kref);
1107 static void gaudi_sob_group_reset_error(struct kref *ref)
1109 struct gaudi_hw_sob_group *hw_sob_group =
1110 container_of(ref, struct gaudi_hw_sob_group, kref);
1111 struct hl_device *hdev = hw_sob_group->hdev;
1114 "SOB release shouldn't be called here, base_sob_id: %d\n",
1115 hw_sob_group->base_sob_id);
1118 static void gaudi_collective_mstr_sob_mask_set(struct gaudi_device *gaudi)
1120 struct gaudi_collective_properties *prop;
1123 prop = &gaudi->collective_props;
1125 memset(prop->mstr_sob_mask, 0, sizeof(prop->mstr_sob_mask));
1127 for (i = 0 ; i < NIC_NUMBER_OF_ENGINES ; i++)
1128 if (gaudi->hw_cap_initialized & BIT(HW_CAP_NIC_SHIFT + i))
1129 prop->mstr_sob_mask[i / HL_MAX_SOBS_PER_MONITOR] |=
1130 BIT(i % HL_MAX_SOBS_PER_MONITOR);
1131 /* Set collective engine bit */
1132 prop->mstr_sob_mask[i / HL_MAX_SOBS_PER_MONITOR] |=
1133 BIT(i % HL_MAX_SOBS_PER_MONITOR);
1136 static int gaudi_collective_init(struct hl_device *hdev)
1138 u32 i, sob_id, reserved_sobs_per_group;
1139 struct gaudi_collective_properties *prop;
1140 struct gaudi_device *gaudi;
1142 gaudi = hdev->asic_specific;
1143 prop = &gaudi->collective_props;
1144 sob_id = hdev->asic_prop.collective_first_sob;
1146 /* First sob in group must be aligned to HL_MAX_SOBS_PER_MONITOR */
1147 reserved_sobs_per_group =
1148 ALIGN(NUMBER_OF_SOBS_IN_GRP, HL_MAX_SOBS_PER_MONITOR);
1150 /* Init SOB groups */
1151 for (i = 0 ; i < NUM_SOB_GROUPS; i++) {
1152 prop->hw_sob_group[i].hdev = hdev;
1153 prop->hw_sob_group[i].base_sob_id = sob_id;
1154 sob_id += reserved_sobs_per_group;
1155 gaudi_sob_group_hw_reset(&prop->hw_sob_group[i].kref);
1158 for (i = 0 ; i < QMAN_STREAMS; i++) {
1159 prop->next_sob_group_val[i] = 1;
1160 prop->curr_sob_group_idx[i] = 0;
1161 gaudi_collective_map_sobs(hdev, i);
1164 gaudi_collective_mstr_sob_mask_set(gaudi);
1169 static void gaudi_reset_sob_group(struct hl_device *hdev, u16 sob_group)
1171 struct gaudi_device *gaudi = hdev->asic_specific;
1172 struct gaudi_collective_properties *cprop = &gaudi->collective_props;
1174 kref_put(&cprop->hw_sob_group[sob_group].kref,
1175 gaudi_sob_group_hw_reset);
1178 static void gaudi_collective_master_init_job(struct hl_device *hdev,
1179 struct hl_cs_job *job, u32 stream, u32 sob_group_offset)
1181 u32 master_sob_base, master_monitor, queue_id, cb_size = 0;
1182 struct gaudi_collective_properties *cprop;
1183 struct hl_gen_wait_properties wait_prop;
1184 struct hl_sync_stream_properties *prop;
1185 struct gaudi_device *gaudi;
1187 gaudi = hdev->asic_specific;
1188 cprop = &gaudi->collective_props;
1189 queue_id = job->hw_queue_id;
1190 prop = &hdev->kernel_queues[queue_id].sync_stream_prop;
1193 cprop->hw_sob_group[sob_group_offset].base_sob_id;
1194 master_monitor = prop->collective_mstr_mon_id[0];
1196 cprop->hw_sob_group[sob_group_offset].queue_id = queue_id;
1199 "Generate master wait CBs, sob %d (mask %#x), val:0x%x, mon %u, q %d\n",
1200 master_sob_base, cprop->mstr_sob_mask[0],
1201 cprop->next_sob_group_val[stream],
1202 master_monitor, queue_id);
1204 wait_prop.data = (void *) job->patched_cb;
1205 wait_prop.sob_base = master_sob_base;
1206 wait_prop.sob_mask = cprop->mstr_sob_mask[0];
1207 wait_prop.sob_val = cprop->next_sob_group_val[stream];
1208 wait_prop.mon_id = master_monitor;
1209 wait_prop.q_idx = queue_id;
1210 wait_prop.size = cb_size;
1211 cb_size += gaudi_gen_wait_cb(hdev, &wait_prop);
1213 master_sob_base += HL_MAX_SOBS_PER_MONITOR;
1214 master_monitor = prop->collective_mstr_mon_id[1];
1217 "Generate master wait CBs, sob %d (mask %#x), val:0x%x, mon %u, q %d\n",
1218 master_sob_base, cprop->mstr_sob_mask[1],
1219 cprop->next_sob_group_val[stream],
1220 master_monitor, queue_id);
1222 wait_prop.sob_base = master_sob_base;
1223 wait_prop.sob_mask = cprop->mstr_sob_mask[1];
1224 wait_prop.mon_id = master_monitor;
1225 wait_prop.size = cb_size;
1226 cb_size += gaudi_gen_wait_cb(hdev, &wait_prop);
1229 static void gaudi_collective_slave_init_job(struct hl_device *hdev,
1230 struct hl_cs_job *job, struct hl_cs_compl *cs_cmpl)
1232 struct hl_gen_wait_properties wait_prop;
1233 struct hl_sync_stream_properties *prop;
1234 u32 queue_id, cb_size = 0;
1236 queue_id = job->hw_queue_id;
1237 prop = &hdev->kernel_queues[queue_id].sync_stream_prop;
1239 if (job->cs->encaps_signals) {
1240 /* use the encaps signal handle store earlier in the flow
1241 * and set the SOB information from the encaps
1244 hl_hw_queue_encaps_sig_set_sob_info(hdev, job->cs, job,
1247 dev_dbg(hdev->dev, "collective wait: Sequence %llu found, sob_id: %u, wait for sob_val: %u\n",
1249 cs_cmpl->hw_sob->sob_id,
1253 /* Add to wait CBs using slave monitor */
1254 wait_prop.data = (void *) job->user_cb;
1255 wait_prop.sob_base = cs_cmpl->hw_sob->sob_id;
1256 wait_prop.sob_mask = 0x1;
1257 wait_prop.sob_val = cs_cmpl->sob_val;
1258 wait_prop.mon_id = prop->collective_slave_mon_id;
1259 wait_prop.q_idx = queue_id;
1260 wait_prop.size = cb_size;
1263 "Generate slave wait CB, sob %d, val:%x, mon %d, q %d\n",
1264 cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val,
1265 prop->collective_slave_mon_id, queue_id);
1267 cb_size += gaudi_gen_wait_cb(hdev, &wait_prop);
1270 "generate signal CB, sob_id: %d, sob val: 1, q_idx: %d\n",
1271 prop->collective_sob_id, queue_id);
1273 cb_size += gaudi_gen_signal_cb(hdev, job->user_cb,
1274 prop->collective_sob_id, cb_size, false);
1277 static int gaudi_collective_wait_init_cs(struct hl_cs *cs)
1279 struct hl_cs_compl *signal_cs_cmpl =
1280 container_of(cs->signal_fence, struct hl_cs_compl, base_fence);
1281 struct hl_cs_compl *cs_cmpl =
1282 container_of(cs->fence, struct hl_cs_compl, base_fence);
1283 struct hl_cs_encaps_sig_handle *handle = cs->encaps_sig_hdl;
1284 struct gaudi_collective_properties *cprop;
1285 u32 stream, queue_id, sob_group_offset;
1286 struct gaudi_device *gaudi;
1287 struct hl_device *hdev;
1288 struct hl_cs_job *job;
1293 gaudi = hdev->asic_specific;
1294 cprop = &gaudi->collective_props;
1296 if (cs->encaps_signals) {
1297 cs_cmpl->hw_sob = handle->hw_sob;
1298 /* at this checkpoint we only need the hw_sob pointer
1299 * for the completion check before start going over the jobs
1300 * of the master/slaves, the sob_value will be taken later on
1301 * in gaudi_collective_slave_init_job depends on each
1302 * job wait offset value.
1304 cs_cmpl->sob_val = 0;
1306 /* copy the SOB id and value of the signal CS */
1307 cs_cmpl->hw_sob = signal_cs_cmpl->hw_sob;
1308 cs_cmpl->sob_val = signal_cs_cmpl->sob_val;
1311 /* check again if the signal cs already completed.
1312 * if yes then don't send any wait cs since the hw_sob
1313 * could be in reset already. if signal is not completed
1314 * then get refcount to hw_sob to prevent resetting the sob
1315 * while wait cs is not submitted.
1316 * note that this check is protected by two locks,
1317 * hw queue lock and completion object lock,
1318 * and the same completion object lock also protects
1319 * the hw_sob reset handler function.
1320 * The hw_queue lock prevent out of sync of hw_sob
1321 * refcount value, changed by signal/wait flows.
1323 spin_lock(&signal_cs_cmpl->lock);
1325 if (completion_done(&cs->signal_fence->completion)) {
1326 spin_unlock(&signal_cs_cmpl->lock);
1329 /* Increment kref since all slave queues are now waiting on it */
1330 kref_get(&cs_cmpl->hw_sob->kref);
1332 spin_unlock(&signal_cs_cmpl->lock);
1334 /* Calculate the stream from collective master queue (1st job) */
1335 job = list_first_entry(&cs->job_list, struct hl_cs_job, cs_node);
1336 stream = job->hw_queue_id % 4;
1338 stream * HL_RSVD_SOBS + cprop->curr_sob_group_idx[stream];
1340 list_for_each_entry(job, &cs->job_list, cs_node) {
1341 queue_id = job->hw_queue_id;
1343 if (hdev->kernel_queues[queue_id].collective_mode ==
1344 HL_COLLECTIVE_MASTER)
1345 gaudi_collective_master_init_job(hdev, job, stream,
1348 gaudi_collective_slave_init_job(hdev, job, cs_cmpl);
1351 cs_cmpl->sob_group = sob_group_offset;
1353 /* Handle sob group kref and wraparound */
1354 kref_get(&cprop->hw_sob_group[sob_group_offset].kref);
1355 cprop->next_sob_group_val[stream]++;
1357 if (cprop->next_sob_group_val[stream] == HL_MAX_SOB_VAL) {
1359 * Decrement as we reached the max value.
1360 * The release function won't be called here as we've
1361 * just incremented the refcount.
1363 kref_put(&cprop->hw_sob_group[sob_group_offset].kref,
1364 gaudi_sob_group_reset_error);
1365 cprop->next_sob_group_val[stream] = 1;
1366 /* only two SOBs are currently in use */
1367 cprop->curr_sob_group_idx[stream] =
1368 (cprop->curr_sob_group_idx[stream] + 1) &
1371 gaudi_collective_map_sobs(hdev, stream);
1373 dev_dbg(hdev->dev, "switched to SOB group %d, stream: %d\n",
1374 cprop->curr_sob_group_idx[stream], stream);
1378 hl_fence_put(cs->signal_fence);
1379 cs->signal_fence = NULL;
1384 static int gaudi_collective_wait_create_job(struct hl_device *hdev,
1385 struct hl_ctx *ctx, struct hl_cs *cs,
1386 enum hl_collective_mode mode, u32 queue_id, u32 wait_queue_id,
1387 u32 encaps_signal_offset)
1389 struct hw_queue_properties *hw_queue_prop;
1390 struct hl_cs_counters_atomic *cntr;
1391 struct hl_cs_job *job;
1396 cntr = &hdev->aggregated_cs_counters;
1398 if (mode == HL_COLLECTIVE_MASTER) {
1399 /* CB size of collective master queue contains
1400 * 4 msg short packets for monitor 1 configuration
1402 * 4 msg short packets for monitor 2 configuration
1404 * 2 msg prot packets for completion and MSI-X
1406 cb_size = sizeof(struct packet_msg_short) * 8 +
1407 sizeof(struct packet_fence) * 2 +
1408 sizeof(struct packet_msg_prot) * 2;
1411 /* CB size of collective slave queues contains
1412 * 4 msg short packets for monitor configuration
1414 * 1 additional msg short packet for sob signal
1416 cb_size = sizeof(struct packet_msg_short) * 5 +
1417 sizeof(struct packet_fence);
1421 hw_queue_prop = &hdev->asic_prop.hw_queues_props[queue_id];
1422 job = hl_cs_allocate_job(hdev, hw_queue_prop->type, true);
1424 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1425 atomic64_inc(&cntr->out_of_mem_drop_cnt);
1426 dev_err(hdev->dev, "Failed to allocate a new job\n");
1430 /* Allocate internal mapped CB for non patched CBs */
1431 cb = hl_cb_kernel_create(hdev, cb_size,
1432 hdev->mmu_enable && !patched_cb);
1434 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
1435 atomic64_inc(&cntr->out_of_mem_drop_cnt);
1443 atomic_inc(&job->user_cb->cs_cnt);
1444 job->user_cb_size = cb_size;
1445 job->hw_queue_id = queue_id;
1447 /* since its guaranteed to have only one chunk in the collective wait
1448 * cs, we can use this chunk to set the encapsulated signal offset
1451 if (cs->encaps_signals)
1452 job->encaps_sig_wait_offset = encaps_signal_offset;
1455 * No need in parsing, user CB is the patched CB.
1456 * We call hl_cb_destroy() out of two reasons - we don't need
1457 * the CB in the CB idr anymore and to decrement its refcount as
1458 * it was incremented inside hl_cb_kernel_create().
1461 job->patched_cb = job->user_cb;
1463 job->patched_cb = NULL;
1465 job->job_cb_size = job->user_cb_size;
1466 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
1468 /* increment refcount as for external queues we get completion */
1469 if (hw_queue_prop->type == QUEUE_TYPE_EXT)
1472 cs->jobs_in_queue_cnt[job->hw_queue_id]++;
1474 list_add_tail(&job->cs_node, &cs->job_list);
1476 hl_debugfs_add_job(hdev, job);
1481 static int gaudi_collective_wait_create_jobs(struct hl_device *hdev,
1482 struct hl_ctx *ctx, struct hl_cs *cs,
1483 u32 wait_queue_id, u32 collective_engine_id,
1484 u32 encaps_signal_offset)
1486 struct gaudi_device *gaudi = hdev->asic_specific;
1487 struct hw_queue_properties *hw_queue_prop;
1488 u32 queue_id, collective_queue, num_jobs;
1489 u32 stream, nic_queue, nic_idx = 0;
1493 /* Verify wait queue id is configured as master */
1494 hw_queue_prop = &hdev->asic_prop.hw_queues_props[wait_queue_id];
1495 if (!(hw_queue_prop->collective_mode == HL_COLLECTIVE_MASTER)) {
1497 "Queue %d is not configured as collective master\n",
1502 /* Verify engine id is supported */
1503 if (collective_engine_id != GAUDI_ENGINE_ID_DMA_5 &&
1504 collective_engine_id != GAUDI_ENGINE_ID_TPC_7) {
1506 "Collective wait does not support engine %u\n",
1507 collective_engine_id);
1511 stream = wait_queue_id % 4;
1513 if (collective_engine_id == GAUDI_ENGINE_ID_DMA_5)
1514 collective_queue = GAUDI_QUEUE_ID_DMA_5_0 + stream;
1516 collective_queue = GAUDI_QUEUE_ID_TPC_7_0 + stream;
1518 num_jobs = NUMBER_OF_SOBS_IN_GRP + 1;
1519 nic_queue = GAUDI_QUEUE_ID_NIC_0_0 + stream;
1521 /* First job goes to the collective master queue, it will wait for
1522 * the collective slave queues to finish execution.
1523 * The synchronization is done using two monitors:
1524 * First monitor for NICs 0-7, second monitor for NICs 8-9 and the
1525 * reduction engine (DMA5/TPC7).
1527 * Rest of the jobs goes to the collective slave queues which will
1528 * all wait for the user to signal sob 'cs_cmpl->sob_val'.
1530 for (i = 0 ; i < num_jobs ; i++) {
1532 queue_id = wait_queue_id;
1533 rc = gaudi_collective_wait_create_job(hdev, ctx, cs,
1534 HL_COLLECTIVE_MASTER, queue_id,
1535 wait_queue_id, encaps_signal_offset);
1537 if (nic_idx < NIC_NUMBER_OF_ENGINES) {
1538 if (gaudi->hw_cap_initialized &
1539 BIT(HW_CAP_NIC_SHIFT + nic_idx))
1544 queue_id = nic_queue;
1551 queue_id = collective_queue;
1554 rc = gaudi_collective_wait_create_job(hdev, ctx, cs,
1555 HL_COLLECTIVE_SLAVE, queue_id,
1556 wait_queue_id, encaps_signal_offset);
1566 static int gaudi_late_init(struct hl_device *hdev)
1568 struct gaudi_device *gaudi = hdev->asic_specific;
1571 rc = gaudi->cpucp_info_get(hdev);
1573 dev_err(hdev->dev, "Failed to get cpucp info\n");
1577 if ((hdev->card_type == cpucp_card_type_pci) &&
1578 (hdev->nic_ports_mask & 0x3)) {
1580 "PCI card detected, only 8 ports are enabled\n");
1581 hdev->nic_ports_mask &= ~0x3;
1583 /* Stop and disable unused NIC QMANs */
1584 WREG32(mmNIC0_QM0_GLBL_CFG1, NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK |
1585 NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK |
1586 NIC0_QM0_GLBL_CFG1_CP_STOP_MASK);
1588 WREG32(mmNIC0_QM1_GLBL_CFG1, NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK |
1589 NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK |
1590 NIC0_QM0_GLBL_CFG1_CP_STOP_MASK);
1592 WREG32(mmNIC0_QM0_GLBL_CFG0, 0);
1593 WREG32(mmNIC0_QM1_GLBL_CFG0, 0);
1595 gaudi->hw_cap_initialized &= ~(HW_CAP_NIC0 | HW_CAP_NIC1);
1598 rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_ENABLE_PCI_ACCESS);
1600 dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
1604 /* Scrub both SRAM and DRAM */
1605 rc = hdev->asic_funcs->scrub_device_mem(hdev, 0, 0);
1607 goto disable_pci_access;
1609 rc = gaudi_fetch_psoc_frequency(hdev);
1611 dev_err(hdev->dev, "Failed to fetch psoc frequency\n");
1612 goto disable_pci_access;
1615 rc = gaudi_mmu_clear_pgt_range(hdev);
1617 dev_err(hdev->dev, "Failed to clear MMU page tables range\n");
1618 goto disable_pci_access;
1621 rc = gaudi_init_tpc_mem(hdev);
1623 dev_err(hdev->dev, "Failed to initialize TPC memories\n");
1624 goto disable_pci_access;
1627 rc = gaudi_collective_init(hdev);
1629 dev_err(hdev->dev, "Failed to init collective\n");
1630 goto disable_pci_access;
1633 /* We only support a single ASID for the user, so for the sake of optimization, just
1634 * initialize the ASID one time during device initialization with the fixed value of 1
1636 gaudi_mmu_prepare(hdev, 1);
1638 hl_fw_set_pll_profile(hdev);
1643 hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS);
1648 static void gaudi_late_fini(struct hl_device *hdev)
1650 const struct hwmon_channel_info **channel_info_arr;
1653 if (!hdev->hl_chip_info->info)
1656 channel_info_arr = hdev->hl_chip_info->info;
1658 while (channel_info_arr[i]) {
1659 kfree(channel_info_arr[i]->config);
1660 kfree(channel_info_arr[i]);
1664 kfree(channel_info_arr);
1666 hdev->hl_chip_info->info = NULL;
1669 static int gaudi_alloc_cpu_accessible_dma_mem(struct hl_device *hdev)
1671 dma_addr_t dma_addr_arr[GAUDI_ALLOC_CPU_MEM_RETRY_CNT] = {}, end_addr;
1672 void *virt_addr_arr[GAUDI_ALLOC_CPU_MEM_RETRY_CNT] = {};
1676 * The device CPU works with 40-bits addresses, while bit 39 must be set
1677 * to '1' when accessing the host.
1678 * Bits 49:39 of the full host address are saved for a later
1679 * configuration of the HW to perform extension to 50 bits.
1680 * Because there is a single HW register that holds the extension bits,
1681 * these bits must be identical in all allocated range.
1684 for (i = 0 ; i < GAUDI_ALLOC_CPU_MEM_RETRY_CNT ; i++) {
1686 hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
1687 HL_CPU_ACCESSIBLE_MEM_SIZE,
1689 GFP_KERNEL | __GFP_ZERO);
1690 if (!virt_addr_arr[i]) {
1692 goto free_dma_mem_arr;
1695 end_addr = dma_addr_arr[i] + HL_CPU_ACCESSIBLE_MEM_SIZE - 1;
1696 if (GAUDI_CPU_PCI_MSB_ADDR(dma_addr_arr[i]) ==
1697 GAUDI_CPU_PCI_MSB_ADDR(end_addr))
1701 if (i == GAUDI_ALLOC_CPU_MEM_RETRY_CNT) {
1703 "MSB of CPU accessible DMA memory are not identical in all range\n");
1705 goto free_dma_mem_arr;
1708 hdev->cpu_accessible_dma_mem = virt_addr_arr[i];
1709 hdev->cpu_accessible_dma_address = dma_addr_arr[i];
1710 hdev->cpu_pci_msb_addr =
1711 GAUDI_CPU_PCI_MSB_ADDR(hdev->cpu_accessible_dma_address);
1713 if (!hdev->asic_prop.fw_security_enabled)
1714 GAUDI_PCI_TO_CPU_ADDR(hdev->cpu_accessible_dma_address);
1717 for (j = 0 ; j < i ; j++)
1718 hdev->asic_funcs->asic_dma_free_coherent(hdev,
1719 HL_CPU_ACCESSIBLE_MEM_SIZE,
1726 static void gaudi_free_internal_qmans_pq_mem(struct hl_device *hdev)
1728 struct gaudi_device *gaudi = hdev->asic_specific;
1729 struct gaudi_internal_qman_info *q;
1732 for (i = 0 ; i < GAUDI_QUEUE_ID_SIZE ; i++) {
1733 q = &gaudi->internal_qmans[i];
1734 if (!q->pq_kernel_addr)
1736 hdev->asic_funcs->asic_dma_free_coherent(hdev, q->pq_size,
1742 static int gaudi_alloc_internal_qmans_pq_mem(struct hl_device *hdev)
1744 struct gaudi_device *gaudi = hdev->asic_specific;
1745 struct gaudi_internal_qman_info *q;
1748 for (i = 0 ; i < GAUDI_QUEUE_ID_SIZE ; i++) {
1749 if (gaudi_queue_type[i] != QUEUE_TYPE_INT)
1752 q = &gaudi->internal_qmans[i];
1755 case GAUDI_QUEUE_ID_DMA_2_0 ... GAUDI_QUEUE_ID_DMA_7_3:
1756 q->pq_size = HBM_DMA_QMAN_SIZE_IN_BYTES;
1758 case GAUDI_QUEUE_ID_MME_0_0 ... GAUDI_QUEUE_ID_MME_1_3:
1759 q->pq_size = MME_QMAN_SIZE_IN_BYTES;
1761 case GAUDI_QUEUE_ID_TPC_0_0 ... GAUDI_QUEUE_ID_TPC_7_3:
1762 q->pq_size = TPC_QMAN_SIZE_IN_BYTES;
1764 case GAUDI_QUEUE_ID_NIC_0_0 ... GAUDI_QUEUE_ID_NIC_9_3:
1765 q->pq_size = NIC_QMAN_SIZE_IN_BYTES;
1768 dev_err(hdev->dev, "Bad internal queue index %d", i);
1770 goto free_internal_qmans_pq_mem;
1773 q->pq_kernel_addr = hdev->asic_funcs->asic_dma_alloc_coherent(
1776 GFP_KERNEL | __GFP_ZERO);
1777 if (!q->pq_kernel_addr) {
1779 goto free_internal_qmans_pq_mem;
1785 free_internal_qmans_pq_mem:
1786 gaudi_free_internal_qmans_pq_mem(hdev);
1790 static void gaudi_set_pci_memory_regions(struct hl_device *hdev)
1792 struct asic_fixed_properties *prop = &hdev->asic_prop;
1793 struct pci_mem_region *region;
1796 region = &hdev->pci_mem_region[PCI_REGION_CFG];
1797 region->region_base = CFG_BASE;
1798 region->region_size = CFG_SIZE;
1799 region->offset_in_bar = CFG_BASE - SPI_FLASH_BASE_ADDR;
1800 region->bar_size = CFG_BAR_SIZE;
1801 region->bar_id = CFG_BAR_ID;
1805 region = &hdev->pci_mem_region[PCI_REGION_SRAM];
1806 region->region_base = SRAM_BASE_ADDR;
1807 region->region_size = SRAM_SIZE;
1808 region->offset_in_bar = 0;
1809 region->bar_size = SRAM_BAR_SIZE;
1810 region->bar_id = SRAM_BAR_ID;
1814 region = &hdev->pci_mem_region[PCI_REGION_DRAM];
1815 region->region_base = DRAM_PHYS_BASE;
1816 region->region_size = hdev->asic_prop.dram_size;
1817 region->offset_in_bar = 0;
1818 region->bar_size = prop->dram_pci_bar_size;
1819 region->bar_id = HBM_BAR_ID;
1823 region = &hdev->pci_mem_region[PCI_REGION_SP_SRAM];
1824 region->region_base = PSOC_SCRATCHPAD_ADDR;
1825 region->region_size = PSOC_SCRATCHPAD_SIZE;
1826 region->offset_in_bar = PSOC_SCRATCHPAD_ADDR - SPI_FLASH_BASE_ADDR;
1827 region->bar_size = CFG_BAR_SIZE;
1828 region->bar_id = CFG_BAR_ID;
1832 static int gaudi_sw_init(struct hl_device *hdev)
1834 struct gaudi_device *gaudi;
1835 u32 i, event_id = 0;
1838 /* Allocate device structure */
1839 gaudi = kzalloc(sizeof(*gaudi), GFP_KERNEL);
1843 for (i = 0 ; i < ARRAY_SIZE(gaudi_irq_map_table) ; i++) {
1844 if (gaudi_irq_map_table[i].valid) {
1845 if (event_id == GAUDI_EVENT_SIZE) {
1847 "Event array exceeds the limit of %u events\n",
1850 goto free_gaudi_device;
1853 gaudi->events[event_id++] =
1854 gaudi_irq_map_table[i].fc_id;
1858 gaudi->cpucp_info_get = gaudi_cpucp_info_get;
1860 hdev->asic_specific = gaudi;
1862 /* Create DMA pool for small allocations */
1863 hdev->dma_pool = dma_pool_create(dev_name(hdev->dev),
1864 &hdev->pdev->dev, GAUDI_DMA_POOL_BLK_SIZE, 8, 0);
1865 if (!hdev->dma_pool) {
1866 dev_err(hdev->dev, "failed to create DMA pool\n");
1868 goto free_gaudi_device;
1871 rc = gaudi_alloc_cpu_accessible_dma_mem(hdev);
1875 hdev->cpu_accessible_dma_pool = gen_pool_create(ilog2(32), -1);
1876 if (!hdev->cpu_accessible_dma_pool) {
1878 "Failed to create CPU accessible DMA pool\n");
1880 goto free_cpu_dma_mem;
1883 rc = gen_pool_add(hdev->cpu_accessible_dma_pool,
1884 (uintptr_t) hdev->cpu_accessible_dma_mem,
1885 HL_CPU_ACCESSIBLE_MEM_SIZE, -1);
1888 "Failed to add memory to CPU accessible DMA pool\n");
1890 goto free_cpu_accessible_dma_pool;
1893 rc = gaudi_alloc_internal_qmans_pq_mem(hdev);
1895 goto free_cpu_accessible_dma_pool;
1897 spin_lock_init(&gaudi->hw_queues_lock);
1899 hdev->supports_sync_stream = true;
1900 hdev->supports_coresight = true;
1901 hdev->supports_staged_submission = true;
1902 hdev->supports_wait_for_multi_cs = true;
1904 hdev->asic_funcs->set_pci_memory_regions(hdev);
1905 hdev->stream_master_qid_arr =
1906 hdev->asic_funcs->get_stream_master_qid_arr();
1907 hdev->stream_master_qid_arr_size = GAUDI_STREAM_MASTER_ARR_SIZE;
1911 free_cpu_accessible_dma_pool:
1912 gen_pool_destroy(hdev->cpu_accessible_dma_pool);
1914 if (!hdev->asic_prop.fw_security_enabled)
1915 GAUDI_CPU_TO_PCI_ADDR(hdev->cpu_accessible_dma_address,
1916 hdev->cpu_pci_msb_addr);
1917 hdev->asic_funcs->asic_dma_free_coherent(hdev,
1918 HL_CPU_ACCESSIBLE_MEM_SIZE,
1919 hdev->cpu_accessible_dma_mem,
1920 hdev->cpu_accessible_dma_address);
1922 dma_pool_destroy(hdev->dma_pool);
1928 static int gaudi_sw_fini(struct hl_device *hdev)
1930 struct gaudi_device *gaudi = hdev->asic_specific;
1932 gaudi_free_internal_qmans_pq_mem(hdev);
1934 gen_pool_destroy(hdev->cpu_accessible_dma_pool);
1936 if (!hdev->asic_prop.fw_security_enabled)
1937 GAUDI_CPU_TO_PCI_ADDR(hdev->cpu_accessible_dma_address,
1938 hdev->cpu_pci_msb_addr);
1940 hdev->asic_funcs->asic_dma_free_coherent(hdev,
1941 HL_CPU_ACCESSIBLE_MEM_SIZE,
1942 hdev->cpu_accessible_dma_mem,
1943 hdev->cpu_accessible_dma_address);
1945 dma_pool_destroy(hdev->dma_pool);
1952 static irqreturn_t gaudi_irq_handler_single(int irq, void *arg)
1954 struct hl_device *hdev = arg;
1960 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
1961 hl_irq_handler_cq(irq, &hdev->completion_queue[i]);
1963 hl_irq_handler_eq(irq, &hdev->event_queue);
1969 * For backward compatibility, new MSI interrupts should be set after the
1970 * existing CPU and NIC interrupts.
1972 static int gaudi_pci_irq_vector(struct hl_device *hdev, unsigned int nr,
1977 if ((nr != GAUDI_EVENT_QUEUE_MSI_IDX) && (cpu_eq))
1978 dev_crit(hdev->dev, "CPU EQ must use IRQ %d\n",
1979 GAUDI_EVENT_QUEUE_MSI_IDX);
1981 msi_vec = ((nr < GAUDI_EVENT_QUEUE_MSI_IDX) || (cpu_eq)) ? nr :
1982 (nr + NIC_NUMBER_OF_ENGINES + 1);
1984 return pci_irq_vector(hdev->pdev, msi_vec);
1987 static int gaudi_enable_msi_single(struct hl_device *hdev)
1991 dev_dbg(hdev->dev, "Working in single MSI IRQ mode\n");
1993 irq = gaudi_pci_irq_vector(hdev, 0, false);
1994 rc = request_irq(irq, gaudi_irq_handler_single, 0,
1995 "gaudi single msi", hdev);
1998 "Failed to request single MSI IRQ\n");
2003 static int gaudi_enable_msi_multi(struct hl_device *hdev)
2005 int cq_cnt = hdev->asic_prop.completion_queues_count;
2006 int rc, i, irq_cnt_init, irq;
2008 for (i = 0, irq_cnt_init = 0 ; i < cq_cnt ; i++, irq_cnt_init++) {
2009 irq = gaudi_pci_irq_vector(hdev, i, false);
2010 rc = request_irq(irq, hl_irq_handler_cq, 0, gaudi_irq_name[i],
2011 &hdev->completion_queue[i]);
2013 dev_err(hdev->dev, "Failed to request IRQ %d", irq);
2018 irq = gaudi_pci_irq_vector(hdev, GAUDI_EVENT_QUEUE_MSI_IDX, true);
2019 rc = request_irq(irq, hl_irq_handler_eq, 0, gaudi_irq_name[cq_cnt],
2020 &hdev->event_queue);
2022 dev_err(hdev->dev, "Failed to request IRQ %d", irq);
2029 for (i = 0 ; i < irq_cnt_init ; i++)
2030 free_irq(gaudi_pci_irq_vector(hdev, i, false),
2031 &hdev->completion_queue[i]);
2035 static int gaudi_enable_msi(struct hl_device *hdev)
2037 struct gaudi_device *gaudi = hdev->asic_specific;
2040 if (gaudi->hw_cap_initialized & HW_CAP_MSI)
2043 rc = pci_alloc_irq_vectors(hdev->pdev, 1, 1, PCI_IRQ_MSI);
2045 dev_err(hdev->dev, "MSI: Failed to enable support %d\n", rc);
2049 if (rc < NUMBER_OF_INTERRUPTS) {
2050 gaudi->multi_msi_mode = false;
2051 rc = gaudi_enable_msi_single(hdev);
2053 gaudi->multi_msi_mode = true;
2054 rc = gaudi_enable_msi_multi(hdev);
2058 goto free_pci_irq_vectors;
2060 gaudi->hw_cap_initialized |= HW_CAP_MSI;
2064 free_pci_irq_vectors:
2065 pci_free_irq_vectors(hdev->pdev);
2069 static void gaudi_sync_irqs(struct hl_device *hdev)
2071 struct gaudi_device *gaudi = hdev->asic_specific;
2072 int i, cq_cnt = hdev->asic_prop.completion_queues_count;
2074 if (!(gaudi->hw_cap_initialized & HW_CAP_MSI))
2077 /* Wait for all pending IRQs to be finished */
2078 if (gaudi->multi_msi_mode) {
2079 for (i = 0 ; i < cq_cnt ; i++)
2080 synchronize_irq(gaudi_pci_irq_vector(hdev, i, false));
2082 synchronize_irq(gaudi_pci_irq_vector(hdev,
2083 GAUDI_EVENT_QUEUE_MSI_IDX,
2086 synchronize_irq(gaudi_pci_irq_vector(hdev, 0, false));
2090 static void gaudi_disable_msi(struct hl_device *hdev)
2092 struct gaudi_device *gaudi = hdev->asic_specific;
2093 int i, irq, cq_cnt = hdev->asic_prop.completion_queues_count;
2095 if (!(gaudi->hw_cap_initialized & HW_CAP_MSI))
2098 gaudi_sync_irqs(hdev);
2100 if (gaudi->multi_msi_mode) {
2101 irq = gaudi_pci_irq_vector(hdev, GAUDI_EVENT_QUEUE_MSI_IDX,
2103 free_irq(irq, &hdev->event_queue);
2105 for (i = 0 ; i < cq_cnt ; i++) {
2106 irq = gaudi_pci_irq_vector(hdev, i, false);
2107 free_irq(irq, &hdev->completion_queue[i]);
2110 free_irq(gaudi_pci_irq_vector(hdev, 0, false), hdev);
2113 pci_free_irq_vectors(hdev->pdev);
2115 gaudi->hw_cap_initialized &= ~HW_CAP_MSI;
2118 static void gaudi_init_scrambler_sram(struct hl_device *hdev)
2120 struct gaudi_device *gaudi = hdev->asic_specific;
2122 if (hdev->asic_prop.fw_security_enabled)
2125 if (hdev->asic_prop.fw_app_cpu_boot_dev_sts0 &
2126 CPU_BOOT_DEV_STS0_SRAM_SCR_EN)
2129 if (gaudi->hw_cap_initialized & HW_CAP_SRAM_SCRAMBLER)
2132 if (!hdev->sram_scrambler_enable)
2135 WREG32(mmNIF_RTR_CTRL_0_SCRAM_SRAM_EN,
2136 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
2137 WREG32(mmNIF_RTR_CTRL_1_SCRAM_SRAM_EN,
2138 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
2139 WREG32(mmNIF_RTR_CTRL_2_SCRAM_SRAM_EN,
2140 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
2141 WREG32(mmNIF_RTR_CTRL_3_SCRAM_SRAM_EN,
2142 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
2143 WREG32(mmNIF_RTR_CTRL_4_SCRAM_SRAM_EN,
2144 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
2145 WREG32(mmNIF_RTR_CTRL_5_SCRAM_SRAM_EN,
2146 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
2147 WREG32(mmNIF_RTR_CTRL_6_SCRAM_SRAM_EN,
2148 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
2149 WREG32(mmNIF_RTR_CTRL_7_SCRAM_SRAM_EN,
2150 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
2152 WREG32(mmSIF_RTR_CTRL_0_SCRAM_SRAM_EN,
2153 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
2154 WREG32(mmSIF_RTR_CTRL_1_SCRAM_SRAM_EN,
2155 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
2156 WREG32(mmSIF_RTR_CTRL_2_SCRAM_SRAM_EN,
2157 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
2158 WREG32(mmSIF_RTR_CTRL_3_SCRAM_SRAM_EN,
2159 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
2160 WREG32(mmSIF_RTR_CTRL_4_SCRAM_SRAM_EN,
2161 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
2162 WREG32(mmSIF_RTR_CTRL_5_SCRAM_SRAM_EN,
2163 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
2164 WREG32(mmSIF_RTR_CTRL_6_SCRAM_SRAM_EN,
2165 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
2166 WREG32(mmSIF_RTR_CTRL_7_SCRAM_SRAM_EN,
2167 1 << IF_RTR_CTRL_SCRAM_SRAM_EN_VAL_SHIFT);
2169 WREG32(mmDMA_IF_E_N_DOWN_CH0_SCRAM_SRAM_EN,
2170 1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT);
2171 WREG32(mmDMA_IF_E_N_DOWN_CH1_SCRAM_SRAM_EN,
2172 1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT);
2173 WREG32(mmDMA_IF_E_S_DOWN_CH0_SCRAM_SRAM_EN,
2174 1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT);
2175 WREG32(mmDMA_IF_E_S_DOWN_CH1_SCRAM_SRAM_EN,
2176 1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT);
2177 WREG32(mmDMA_IF_W_N_DOWN_CH0_SCRAM_SRAM_EN,
2178 1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT);
2179 WREG32(mmDMA_IF_W_N_DOWN_CH1_SCRAM_SRAM_EN,
2180 1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT);
2181 WREG32(mmDMA_IF_W_S_DOWN_CH0_SCRAM_SRAM_EN,
2182 1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT);
2183 WREG32(mmDMA_IF_W_S_DOWN_CH1_SCRAM_SRAM_EN,
2184 1 << DMA_IF_DOWN_CHX_SCRAM_SRAM_EN_VAL_SHIFT);
2186 gaudi->hw_cap_initialized |= HW_CAP_SRAM_SCRAMBLER;
2189 static void gaudi_init_scrambler_hbm(struct hl_device *hdev)
2191 struct gaudi_device *gaudi = hdev->asic_specific;
2193 if (hdev->asic_prop.fw_security_enabled)
2196 if (hdev->asic_prop.fw_bootfit_cpu_boot_dev_sts0 &
2197 CPU_BOOT_DEV_STS0_DRAM_SCR_EN)
2200 if (gaudi->hw_cap_initialized & HW_CAP_HBM_SCRAMBLER)
2203 if (!hdev->dram_scrambler_enable)
2206 WREG32(mmNIF_RTR_CTRL_0_SCRAM_HBM_EN,
2207 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
2208 WREG32(mmNIF_RTR_CTRL_1_SCRAM_HBM_EN,
2209 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
2210 WREG32(mmNIF_RTR_CTRL_2_SCRAM_HBM_EN,
2211 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
2212 WREG32(mmNIF_RTR_CTRL_3_SCRAM_HBM_EN,
2213 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
2214 WREG32(mmNIF_RTR_CTRL_4_SCRAM_HBM_EN,
2215 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
2216 WREG32(mmNIF_RTR_CTRL_5_SCRAM_HBM_EN,
2217 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
2218 WREG32(mmNIF_RTR_CTRL_6_SCRAM_HBM_EN,
2219 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
2220 WREG32(mmNIF_RTR_CTRL_7_SCRAM_HBM_EN,
2221 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
2223 WREG32(mmSIF_RTR_CTRL_0_SCRAM_HBM_EN,
2224 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
2225 WREG32(mmSIF_RTR_CTRL_1_SCRAM_HBM_EN,
2226 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
2227 WREG32(mmSIF_RTR_CTRL_2_SCRAM_HBM_EN,
2228 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
2229 WREG32(mmSIF_RTR_CTRL_3_SCRAM_HBM_EN,
2230 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
2231 WREG32(mmSIF_RTR_CTRL_4_SCRAM_HBM_EN,
2232 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
2233 WREG32(mmSIF_RTR_CTRL_5_SCRAM_HBM_EN,
2234 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
2235 WREG32(mmSIF_RTR_CTRL_6_SCRAM_HBM_EN,
2236 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
2237 WREG32(mmSIF_RTR_CTRL_7_SCRAM_HBM_EN,
2238 1 << IF_RTR_CTRL_SCRAM_HBM_EN_VAL_SHIFT);
2240 WREG32(mmDMA_IF_E_N_DOWN_CH0_SCRAM_HBM_EN,
2241 1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT);
2242 WREG32(mmDMA_IF_E_N_DOWN_CH1_SCRAM_HBM_EN,
2243 1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT);
2244 WREG32(mmDMA_IF_E_S_DOWN_CH0_SCRAM_HBM_EN,
2245 1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT);
2246 WREG32(mmDMA_IF_E_S_DOWN_CH1_SCRAM_HBM_EN,
2247 1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT);
2248 WREG32(mmDMA_IF_W_N_DOWN_CH0_SCRAM_HBM_EN,
2249 1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT);
2250 WREG32(mmDMA_IF_W_N_DOWN_CH1_SCRAM_HBM_EN,
2251 1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT);
2252 WREG32(mmDMA_IF_W_S_DOWN_CH0_SCRAM_HBM_EN,
2253 1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT);
2254 WREG32(mmDMA_IF_W_S_DOWN_CH1_SCRAM_HBM_EN,
2255 1 << DMA_IF_DOWN_CHX_SCRAM_HBM_EN_VAL_SHIFT);
2257 gaudi->hw_cap_initialized |= HW_CAP_HBM_SCRAMBLER;
2260 static void gaudi_init_e2e(struct hl_device *hdev)
2262 if (hdev->asic_prop.fw_security_enabled)
2265 if (hdev->asic_prop.fw_bootfit_cpu_boot_dev_sts0 &
2266 CPU_BOOT_DEV_STS0_E2E_CRED_EN)
2269 WREG32(mmSIF_RTR_CTRL_0_E2E_HBM_WR_SIZE, 247 >> 3);
2270 WREG32(mmSIF_RTR_CTRL_0_E2E_HBM_RD_SIZE, 785 >> 3);
2271 WREG32(mmSIF_RTR_CTRL_0_E2E_PCI_WR_SIZE, 49);
2272 WREG32(mmSIF_RTR_CTRL_0_E2E_PCI_RD_SIZE, 101);
2274 WREG32(mmSIF_RTR_CTRL_1_E2E_HBM_WR_SIZE, 275 >> 3);
2275 WREG32(mmSIF_RTR_CTRL_1_E2E_HBM_RD_SIZE, 614 >> 3);
2276 WREG32(mmSIF_RTR_CTRL_1_E2E_PCI_WR_SIZE, 1);
2277 WREG32(mmSIF_RTR_CTRL_1_E2E_PCI_RD_SIZE, 39);
2279 WREG32(mmSIF_RTR_CTRL_2_E2E_HBM_WR_SIZE, 1);
2280 WREG32(mmSIF_RTR_CTRL_2_E2E_HBM_RD_SIZE, 1);
2281 WREG32(mmSIF_RTR_CTRL_2_E2E_PCI_WR_SIZE, 1);
2282 WREG32(mmSIF_RTR_CTRL_2_E2E_PCI_RD_SIZE, 32);
2284 WREG32(mmSIF_RTR_CTRL_3_E2E_HBM_WR_SIZE, 176 >> 3);
2285 WREG32(mmSIF_RTR_CTRL_3_E2E_HBM_RD_SIZE, 32 >> 3);
2286 WREG32(mmSIF_RTR_CTRL_3_E2E_PCI_WR_SIZE, 19);
2287 WREG32(mmSIF_RTR_CTRL_3_E2E_PCI_RD_SIZE, 32);
2289 WREG32(mmSIF_RTR_CTRL_4_E2E_HBM_WR_SIZE, 176 >> 3);
2290 WREG32(mmSIF_RTR_CTRL_4_E2E_HBM_RD_SIZE, 32 >> 3);
2291 WREG32(mmSIF_RTR_CTRL_4_E2E_PCI_WR_SIZE, 19);
2292 WREG32(mmSIF_RTR_CTRL_4_E2E_PCI_RD_SIZE, 32);
2294 WREG32(mmSIF_RTR_CTRL_5_E2E_HBM_WR_SIZE, 1);
2295 WREG32(mmSIF_RTR_CTRL_5_E2E_HBM_RD_SIZE, 1);
2296 WREG32(mmSIF_RTR_CTRL_5_E2E_PCI_WR_SIZE, 1);
2297 WREG32(mmSIF_RTR_CTRL_5_E2E_PCI_RD_SIZE, 32);
2299 WREG32(mmSIF_RTR_CTRL_6_E2E_HBM_WR_SIZE, 275 >> 3);
2300 WREG32(mmSIF_RTR_CTRL_6_E2E_HBM_RD_SIZE, 614 >> 3);
2301 WREG32(mmSIF_RTR_CTRL_6_E2E_PCI_WR_SIZE, 1);
2302 WREG32(mmSIF_RTR_CTRL_6_E2E_PCI_RD_SIZE, 39);
2304 WREG32(mmSIF_RTR_CTRL_7_E2E_HBM_WR_SIZE, 297 >> 3);
2305 WREG32(mmSIF_RTR_CTRL_7_E2E_HBM_RD_SIZE, 908 >> 3);
2306 WREG32(mmSIF_RTR_CTRL_7_E2E_PCI_WR_SIZE, 19);
2307 WREG32(mmSIF_RTR_CTRL_7_E2E_PCI_RD_SIZE, 19);
2309 WREG32(mmNIF_RTR_CTRL_0_E2E_HBM_WR_SIZE, 318 >> 3);
2310 WREG32(mmNIF_RTR_CTRL_0_E2E_HBM_RD_SIZE, 956 >> 3);
2311 WREG32(mmNIF_RTR_CTRL_0_E2E_PCI_WR_SIZE, 79);
2312 WREG32(mmNIF_RTR_CTRL_0_E2E_PCI_RD_SIZE, 163);
2314 WREG32(mmNIF_RTR_CTRL_1_E2E_HBM_WR_SIZE, 275 >> 3);
2315 WREG32(mmNIF_RTR_CTRL_1_E2E_HBM_RD_SIZE, 614 >> 3);
2316 WREG32(mmNIF_RTR_CTRL_1_E2E_PCI_WR_SIZE, 1);
2317 WREG32(mmNIF_RTR_CTRL_1_E2E_PCI_RD_SIZE, 39);
2319 WREG32(mmNIF_RTR_CTRL_2_E2E_HBM_WR_SIZE, 1);
2320 WREG32(mmNIF_RTR_CTRL_2_E2E_HBM_RD_SIZE, 1);
2321 WREG32(mmNIF_RTR_CTRL_2_E2E_PCI_WR_SIZE, 1);
2322 WREG32(mmNIF_RTR_CTRL_2_E2E_PCI_RD_SIZE, 32);
2324 WREG32(mmNIF_RTR_CTRL_3_E2E_HBM_WR_SIZE, 176 >> 3);
2325 WREG32(mmNIF_RTR_CTRL_3_E2E_HBM_RD_SIZE, 32 >> 3);
2326 WREG32(mmNIF_RTR_CTRL_3_E2E_PCI_WR_SIZE, 19);
2327 WREG32(mmNIF_RTR_CTRL_3_E2E_PCI_RD_SIZE, 32);
2329 WREG32(mmNIF_RTR_CTRL_4_E2E_HBM_WR_SIZE, 176 >> 3);
2330 WREG32(mmNIF_RTR_CTRL_4_E2E_HBM_RD_SIZE, 32 >> 3);
2331 WREG32(mmNIF_RTR_CTRL_4_E2E_PCI_WR_SIZE, 19);
2332 WREG32(mmNIF_RTR_CTRL_4_E2E_PCI_RD_SIZE, 32);
2334 WREG32(mmNIF_RTR_CTRL_5_E2E_HBM_WR_SIZE, 1);
2335 WREG32(mmNIF_RTR_CTRL_5_E2E_HBM_RD_SIZE, 1);
2336 WREG32(mmNIF_RTR_CTRL_5_E2E_PCI_WR_SIZE, 1);
2337 WREG32(mmNIF_RTR_CTRL_5_E2E_PCI_RD_SIZE, 32);
2339 WREG32(mmNIF_RTR_CTRL_6_E2E_HBM_WR_SIZE, 275 >> 3);
2340 WREG32(mmNIF_RTR_CTRL_6_E2E_HBM_RD_SIZE, 614 >> 3);
2341 WREG32(mmNIF_RTR_CTRL_6_E2E_PCI_WR_SIZE, 1);
2342 WREG32(mmNIF_RTR_CTRL_6_E2E_PCI_RD_SIZE, 39);
2344 WREG32(mmNIF_RTR_CTRL_7_E2E_HBM_WR_SIZE, 318 >> 3);
2345 WREG32(mmNIF_RTR_CTRL_7_E2E_HBM_RD_SIZE, 956 >> 3);
2346 WREG32(mmNIF_RTR_CTRL_7_E2E_PCI_WR_SIZE, 79);
2347 WREG32(mmNIF_RTR_CTRL_7_E2E_PCI_RD_SIZE, 79);
2349 WREG32(mmDMA_IF_E_N_DOWN_CH0_E2E_HBM_WR_SIZE, 344 >> 3);
2350 WREG32(mmDMA_IF_E_N_DOWN_CH0_E2E_HBM_RD_SIZE, 1000 >> 3);
2351 WREG32(mmDMA_IF_E_N_DOWN_CH0_E2E_PCI_WR_SIZE, 162);
2352 WREG32(mmDMA_IF_E_N_DOWN_CH0_E2E_PCI_RD_SIZE, 338);
2354 WREG32(mmDMA_IF_E_N_DOWN_CH1_E2E_HBM_WR_SIZE, 344 >> 3);
2355 WREG32(mmDMA_IF_E_N_DOWN_CH1_E2E_HBM_RD_SIZE, 1000 >> 3);
2356 WREG32(mmDMA_IF_E_N_DOWN_CH1_E2E_PCI_WR_SIZE, 162);
2357 WREG32(mmDMA_IF_E_N_DOWN_CH1_E2E_PCI_RD_SIZE, 338);
2359 WREG32(mmDMA_IF_E_S_DOWN_CH0_E2E_HBM_WR_SIZE, 344 >> 3);
2360 WREG32(mmDMA_IF_E_S_DOWN_CH0_E2E_HBM_RD_SIZE, 1000 >> 3);
2361 WREG32(mmDMA_IF_E_S_DOWN_CH0_E2E_PCI_WR_SIZE, 162);
2362 WREG32(mmDMA_IF_E_S_DOWN_CH0_E2E_PCI_RD_SIZE, 338);
2364 WREG32(mmDMA_IF_E_S_DOWN_CH1_E2E_HBM_WR_SIZE, 344 >> 3);
2365 WREG32(mmDMA_IF_E_S_DOWN_CH1_E2E_HBM_RD_SIZE, 1000 >> 3);
2366 WREG32(mmDMA_IF_E_S_DOWN_CH1_E2E_PCI_WR_SIZE, 162);
2367 WREG32(mmDMA_IF_E_S_DOWN_CH1_E2E_PCI_RD_SIZE, 338);
2369 WREG32(mmDMA_IF_W_N_DOWN_CH0_E2E_HBM_WR_SIZE, 344 >> 3);
2370 WREG32(mmDMA_IF_W_N_DOWN_CH0_E2E_HBM_RD_SIZE, 1000 >> 3);
2371 WREG32(mmDMA_IF_W_N_DOWN_CH0_E2E_PCI_WR_SIZE, 162);
2372 WREG32(mmDMA_IF_W_N_DOWN_CH0_E2E_PCI_RD_SIZE, 338);
2374 WREG32(mmDMA_IF_W_N_DOWN_CH1_E2E_HBM_WR_SIZE, 344 >> 3);
2375 WREG32(mmDMA_IF_W_N_DOWN_CH1_E2E_HBM_RD_SIZE, 1000 >> 3);
2376 WREG32(mmDMA_IF_W_N_DOWN_CH1_E2E_PCI_WR_SIZE, 162);
2377 WREG32(mmDMA_IF_W_N_DOWN_CH1_E2E_PCI_RD_SIZE, 338);
2379 WREG32(mmDMA_IF_W_S_DOWN_CH0_E2E_HBM_WR_SIZE, 344 >> 3);
2380 WREG32(mmDMA_IF_W_S_DOWN_CH0_E2E_HBM_RD_SIZE, 1000 >> 3);
2381 WREG32(mmDMA_IF_W_S_DOWN_CH0_E2E_PCI_WR_SIZE, 162);
2382 WREG32(mmDMA_IF_W_S_DOWN_CH0_E2E_PCI_RD_SIZE, 338);
2384 WREG32(mmDMA_IF_W_S_DOWN_CH1_E2E_HBM_WR_SIZE, 344 >> 3);
2385 WREG32(mmDMA_IF_W_S_DOWN_CH1_E2E_HBM_RD_SIZE, 1000 >> 3);
2386 WREG32(mmDMA_IF_W_S_DOWN_CH1_E2E_PCI_WR_SIZE, 162);
2387 WREG32(mmDMA_IF_W_S_DOWN_CH1_E2E_PCI_RD_SIZE, 338);
2389 if (!hdev->dram_scrambler_enable) {
2390 WREG32(mmSIF_RTR_CTRL_0_NL_HBM_SEL_0, 0x21);
2391 WREG32(mmSIF_RTR_CTRL_0_NL_HBM_SEL_1, 0x22);
2392 WREG32(mmSIF_RTR_CTRL_0_NL_HBM_OFFSET_18, 0x1F);
2393 WREG32(mmSIF_RTR_CTRL_0_NL_HBM_PC_SEL_3, 0x20);
2395 WREG32(mmSIF_RTR_CTRL_1_NL_HBM_SEL_0, 0x21);
2396 WREG32(mmSIF_RTR_CTRL_1_NL_HBM_SEL_1, 0x22);
2397 WREG32(mmSIF_RTR_CTRL_1_NL_HBM_OFFSET_18, 0x1F);
2398 WREG32(mmSIF_RTR_CTRL_1_NL_HBM_PC_SEL_3, 0x20);
2400 WREG32(mmSIF_RTR_CTRL_2_NL_HBM_SEL_0, 0x21);
2401 WREG32(mmSIF_RTR_CTRL_2_NL_HBM_SEL_1, 0x22);
2402 WREG32(mmSIF_RTR_CTRL_2_NL_HBM_OFFSET_18, 0x1F);
2403 WREG32(mmSIF_RTR_CTRL_2_NL_HBM_PC_SEL_3, 0x20);
2405 WREG32(mmSIF_RTR_CTRL_3_NL_HBM_SEL_0, 0x21);
2406 WREG32(mmSIF_RTR_CTRL_3_NL_HBM_SEL_1, 0x22);
2407 WREG32(mmSIF_RTR_CTRL_3_NL_HBM_OFFSET_18, 0x1F);
2408 WREG32(mmSIF_RTR_CTRL_3_NL_HBM_PC_SEL_3, 0x20);
2410 WREG32(mmSIF_RTR_CTRL_4_NL_HBM_SEL_0, 0x21);
2411 WREG32(mmSIF_RTR_CTRL_4_NL_HBM_SEL_1, 0x22);
2412 WREG32(mmSIF_RTR_CTRL_4_NL_HBM_OFFSET_18, 0x1F);
2413 WREG32(mmSIF_RTR_CTRL_4_NL_HBM_PC_SEL_3, 0x20);
2415 WREG32(mmSIF_RTR_CTRL_5_NL_HBM_SEL_0, 0x21);
2416 WREG32(mmSIF_RTR_CTRL_5_NL_HBM_SEL_1, 0x22);
2417 WREG32(mmSIF_RTR_CTRL_5_NL_HBM_OFFSET_18, 0x1F);
2418 WREG32(mmSIF_RTR_CTRL_5_NL_HBM_PC_SEL_3, 0x20);
2420 WREG32(mmSIF_RTR_CTRL_6_NL_HBM_SEL_0, 0x21);
2421 WREG32(mmSIF_RTR_CTRL_6_NL_HBM_SEL_1, 0x22);
2422 WREG32(mmSIF_RTR_CTRL_6_NL_HBM_OFFSET_18, 0x1F);
2423 WREG32(mmSIF_RTR_CTRL_6_NL_HBM_PC_SEL_3, 0x20);
2425 WREG32(mmSIF_RTR_CTRL_7_NL_HBM_SEL_0, 0x21);
2426 WREG32(mmSIF_RTR_CTRL_7_NL_HBM_SEL_1, 0x22);
2427 WREG32(mmSIF_RTR_CTRL_7_NL_HBM_OFFSET_18, 0x1F);
2428 WREG32(mmSIF_RTR_CTRL_7_NL_HBM_PC_SEL_3, 0x20);
2430 WREG32(mmNIF_RTR_CTRL_0_NL_HBM_SEL_0, 0x21);
2431 WREG32(mmNIF_RTR_CTRL_0_NL_HBM_SEL_1, 0x22);
2432 WREG32(mmNIF_RTR_CTRL_0_NL_HBM_OFFSET_18, 0x1F);
2433 WREG32(mmNIF_RTR_CTRL_0_NL_HBM_PC_SEL_3, 0x20);
2435 WREG32(mmNIF_RTR_CTRL_1_NL_HBM_SEL_0, 0x21);
2436 WREG32(mmNIF_RTR_CTRL_1_NL_HBM_SEL_1, 0x22);
2437 WREG32(mmNIF_RTR_CTRL_1_NL_HBM_OFFSET_18, 0x1F);
2438 WREG32(mmNIF_RTR_CTRL_1_NL_HBM_PC_SEL_3, 0x20);
2440 WREG32(mmNIF_RTR_CTRL_2_NL_HBM_SEL_0, 0x21);
2441 WREG32(mmNIF_RTR_CTRL_2_NL_HBM_SEL_1, 0x22);
2442 WREG32(mmNIF_RTR_CTRL_2_NL_HBM_OFFSET_18, 0x1F);
2443 WREG32(mmNIF_RTR_CTRL_2_NL_HBM_PC_SEL_3, 0x20);
2445 WREG32(mmNIF_RTR_CTRL_3_NL_HBM_SEL_0, 0x21);
2446 WREG32(mmNIF_RTR_CTRL_3_NL_HBM_SEL_1, 0x22);
2447 WREG32(mmNIF_RTR_CTRL_3_NL_HBM_OFFSET_18, 0x1F);
2448 WREG32(mmNIF_RTR_CTRL_3_NL_HBM_PC_SEL_3, 0x20);
2450 WREG32(mmNIF_RTR_CTRL_4_NL_HBM_SEL_0, 0x21);
2451 WREG32(mmNIF_RTR_CTRL_4_NL_HBM_SEL_1, 0x22);
2452 WREG32(mmNIF_RTR_CTRL_4_NL_HBM_OFFSET_18, 0x1F);
2453 WREG32(mmNIF_RTR_CTRL_4_NL_HBM_PC_SEL_3, 0x20);
2455 WREG32(mmNIF_RTR_CTRL_5_NL_HBM_SEL_0, 0x21);
2456 WREG32(mmNIF_RTR_CTRL_5_NL_HBM_SEL_1, 0x22);
2457 WREG32(mmNIF_RTR_CTRL_5_NL_HBM_OFFSET_18, 0x1F);
2458 WREG32(mmNIF_RTR_CTRL_5_NL_HBM_PC_SEL_3, 0x20);
2460 WREG32(mmNIF_RTR_CTRL_6_NL_HBM_SEL_0, 0x21);
2461 WREG32(mmNIF_RTR_CTRL_6_NL_HBM_SEL_1, 0x22);
2462 WREG32(mmNIF_RTR_CTRL_6_NL_HBM_OFFSET_18, 0x1F);
2463 WREG32(mmNIF_RTR_CTRL_6_NL_HBM_PC_SEL_3, 0x20);
2465 WREG32(mmNIF_RTR_CTRL_7_NL_HBM_SEL_0, 0x21);
2466 WREG32(mmNIF_RTR_CTRL_7_NL_HBM_SEL_1, 0x22);
2467 WREG32(mmNIF_RTR_CTRL_7_NL_HBM_OFFSET_18, 0x1F);
2468 WREG32(mmNIF_RTR_CTRL_7_NL_HBM_PC_SEL_3, 0x20);
2470 WREG32(mmDMA_IF_E_N_DOWN_CH0_NL_HBM_SEL_0, 0x21);
2471 WREG32(mmDMA_IF_E_N_DOWN_CH0_NL_HBM_SEL_1, 0x22);
2472 WREG32(mmDMA_IF_E_N_DOWN_CH0_NL_HBM_OFFSET_18, 0x1F);
2473 WREG32(mmDMA_IF_E_N_DOWN_CH0_NL_HBM_PC_SEL_3, 0x20);
2475 WREG32(mmDMA_IF_E_N_DOWN_CH1_NL_HBM_SEL_0, 0x21);
2476 WREG32(mmDMA_IF_E_N_DOWN_CH1_NL_HBM_SEL_1, 0x22);
2477 WREG32(mmDMA_IF_E_N_DOWN_CH1_NL_HBM_OFFSET_18, 0x1F);
2478 WREG32(mmDMA_IF_E_N_DOWN_CH1_NL_HBM_PC_SEL_3, 0x20);
2480 WREG32(mmDMA_IF_E_S_DOWN_CH0_NL_HBM_SEL_0, 0x21);
2481 WREG32(mmDMA_IF_E_S_DOWN_CH0_NL_HBM_SEL_1, 0x22);
2482 WREG32(mmDMA_IF_E_S_DOWN_CH0_NL_HBM_OFFSET_18, 0x1F);
2483 WREG32(mmDMA_IF_E_S_DOWN_CH0_NL_HBM_PC_SEL_3, 0x20);
2485 WREG32(mmDMA_IF_E_S_DOWN_CH1_NL_HBM_SEL_0, 0x21);
2486 WREG32(mmDMA_IF_E_S_DOWN_CH1_NL_HBM_SEL_1, 0x22);
2487 WREG32(mmDMA_IF_E_S_DOWN_CH1_NL_HBM_OFFSET_18, 0x1F);
2488 WREG32(mmDMA_IF_E_S_DOWN_CH1_NL_HBM_PC_SEL_3, 0x20);
2490 WREG32(mmDMA_IF_W_N_DOWN_CH0_NL_HBM_SEL_0, 0x21);
2491 WREG32(mmDMA_IF_W_N_DOWN_CH0_NL_HBM_SEL_1, 0x22);
2492 WREG32(mmDMA_IF_W_N_DOWN_CH0_NL_HBM_OFFSET_18, 0x1F);
2493 WREG32(mmDMA_IF_W_N_DOWN_CH0_NL_HBM_PC_SEL_3, 0x20);
2495 WREG32(mmDMA_IF_W_N_DOWN_CH1_NL_HBM_SEL_0, 0x21);
2496 WREG32(mmDMA_IF_W_N_DOWN_CH1_NL_HBM_SEL_1, 0x22);
2497 WREG32(mmDMA_IF_W_N_DOWN_CH1_NL_HBM_OFFSET_18, 0x1F);
2498 WREG32(mmDMA_IF_W_N_DOWN_CH1_NL_HBM_PC_SEL_3, 0x20);
2500 WREG32(mmDMA_IF_W_S_DOWN_CH0_NL_HBM_SEL_0, 0x21);
2501 WREG32(mmDMA_IF_W_S_DOWN_CH0_NL_HBM_SEL_1, 0x22);
2502 WREG32(mmDMA_IF_W_S_DOWN_CH0_NL_HBM_OFFSET_18, 0x1F);
2503 WREG32(mmDMA_IF_W_S_DOWN_CH0_NL_HBM_PC_SEL_3, 0x20);
2505 WREG32(mmDMA_IF_W_S_DOWN_CH1_NL_HBM_SEL_0, 0x21);
2506 WREG32(mmDMA_IF_W_S_DOWN_CH1_NL_HBM_SEL_1, 0x22);
2507 WREG32(mmDMA_IF_W_S_DOWN_CH1_NL_HBM_OFFSET_18, 0x1F);
2508 WREG32(mmDMA_IF_W_S_DOWN_CH1_NL_HBM_PC_SEL_3, 0x20);
2511 WREG32(mmSIF_RTR_CTRL_0_E2E_HBM_EN,
2512 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
2513 WREG32(mmSIF_RTR_CTRL_0_E2E_PCI_EN,
2514 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
2516 WREG32(mmSIF_RTR_CTRL_1_E2E_HBM_EN,
2517 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
2518 WREG32(mmSIF_RTR_CTRL_1_E2E_PCI_EN,
2519 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
2521 WREG32(mmSIF_RTR_CTRL_2_E2E_HBM_EN,
2522 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
2523 WREG32(mmSIF_RTR_CTRL_2_E2E_PCI_EN,
2524 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
2526 WREG32(mmSIF_RTR_CTRL_3_E2E_HBM_EN,
2527 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
2528 WREG32(mmSIF_RTR_CTRL_3_E2E_PCI_EN,
2529 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
2531 WREG32(mmSIF_RTR_CTRL_4_E2E_HBM_EN,
2532 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
2533 WREG32(mmSIF_RTR_CTRL_4_E2E_PCI_EN,
2534 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
2536 WREG32(mmSIF_RTR_CTRL_5_E2E_HBM_EN,
2537 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
2538 WREG32(mmSIF_RTR_CTRL_5_E2E_PCI_EN,
2539 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
2541 WREG32(mmSIF_RTR_CTRL_6_E2E_HBM_EN,
2542 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
2543 WREG32(mmSIF_RTR_CTRL_6_E2E_PCI_EN,
2544 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
2546 WREG32(mmSIF_RTR_CTRL_7_E2E_HBM_EN,
2547 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
2548 WREG32(mmSIF_RTR_CTRL_7_E2E_PCI_EN,
2549 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
2551 WREG32(mmNIF_RTR_CTRL_0_E2E_HBM_EN,
2552 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
2553 WREG32(mmNIF_RTR_CTRL_0_E2E_PCI_EN,
2554 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
2556 WREG32(mmNIF_RTR_CTRL_1_E2E_HBM_EN,
2557 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
2558 WREG32(mmNIF_RTR_CTRL_1_E2E_PCI_EN,
2559 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
2561 WREG32(mmNIF_RTR_CTRL_2_E2E_HBM_EN,
2562 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
2563 WREG32(mmNIF_RTR_CTRL_2_E2E_PCI_EN,
2564 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
2566 WREG32(mmNIF_RTR_CTRL_3_E2E_HBM_EN,
2567 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
2568 WREG32(mmNIF_RTR_CTRL_3_E2E_PCI_EN,
2569 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
2571 WREG32(mmNIF_RTR_CTRL_4_E2E_HBM_EN,
2572 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
2573 WREG32(mmNIF_RTR_CTRL_4_E2E_PCI_EN,
2574 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
2576 WREG32(mmNIF_RTR_CTRL_5_E2E_HBM_EN,
2577 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
2578 WREG32(mmNIF_RTR_CTRL_5_E2E_PCI_EN,
2579 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
2581 WREG32(mmNIF_RTR_CTRL_6_E2E_HBM_EN,
2582 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
2583 WREG32(mmNIF_RTR_CTRL_6_E2E_PCI_EN,
2584 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
2586 WREG32(mmNIF_RTR_CTRL_7_E2E_HBM_EN,
2587 1 << IF_RTR_CTRL_E2E_HBM_EN_VAL_SHIFT);
2588 WREG32(mmNIF_RTR_CTRL_7_E2E_PCI_EN,
2589 1 << IF_RTR_CTRL_E2E_PCI_EN_VAL_SHIFT);
2591 WREG32(mmDMA_IF_E_N_DOWN_CH0_E2E_HBM_EN,
2592 1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT);
2593 WREG32(mmDMA_IF_E_N_DOWN_CH0_E2E_PCI_EN,
2594 1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT);
2596 WREG32(mmDMA_IF_E_N_DOWN_CH1_E2E_HBM_EN,
2597 1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT);
2598 WREG32(mmDMA_IF_E_N_DOWN_CH1_E2E_PCI_EN,
2599 1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT);
2601 WREG32(mmDMA_IF_E_S_DOWN_CH0_E2E_HBM_EN,
2602 1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT);
2603 WREG32(mmDMA_IF_E_S_DOWN_CH0_E2E_PCI_EN,
2604 1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT);
2606 WREG32(mmDMA_IF_E_S_DOWN_CH1_E2E_HBM_EN,
2607 1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT);
2608 WREG32(mmDMA_IF_E_S_DOWN_CH1_E2E_PCI_EN,
2609 1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT);
2611 WREG32(mmDMA_IF_W_N_DOWN_CH0_E2E_HBM_EN,
2612 1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT);
2613 WREG32(mmDMA_IF_W_N_DOWN_CH0_E2E_PCI_EN,
2614 1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT);
2616 WREG32(mmDMA_IF_W_N_DOWN_CH1_E2E_HBM_EN,
2617 1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT);
2618 WREG32(mmDMA_IF_W_N_DOWN_CH1_E2E_PCI_EN,
2619 1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT);
2621 WREG32(mmDMA_IF_W_S_DOWN_CH0_E2E_HBM_EN,
2622 1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT);
2623 WREG32(mmDMA_IF_W_S_DOWN_CH0_E2E_PCI_EN,
2624 1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT);
2626 WREG32(mmDMA_IF_W_S_DOWN_CH1_E2E_HBM_EN,
2627 1 << DMA_IF_DOWN_CHX_E2E_HBM_EN_VAL_SHIFT);
2628 WREG32(mmDMA_IF_W_S_DOWN_CH1_E2E_PCI_EN,
2629 1 << DMA_IF_DOWN_CHX_E2E_PCI_EN_VAL_SHIFT);
2632 static void gaudi_init_hbm_cred(struct hl_device *hdev)
2634 u32 hbm0_wr, hbm1_wr, hbm0_rd, hbm1_rd;
2636 if (hdev->asic_prop.fw_security_enabled)
2639 if (hdev->asic_prop.fw_bootfit_cpu_boot_dev_sts0 &
2640 CPU_BOOT_DEV_STS0_HBM_CRED_EN)
2643 hbm0_wr = 0x33333333;
2644 hbm0_rd = 0x77777777;
2645 hbm1_wr = 0x55555555;
2646 hbm1_rd = 0xDDDDDDDD;
2648 WREG32(mmDMA_IF_E_N_HBM0_WR_CRED_CNT, hbm0_wr);
2649 WREG32(mmDMA_IF_E_N_HBM1_WR_CRED_CNT, hbm1_wr);
2650 WREG32(mmDMA_IF_E_N_HBM0_RD_CRED_CNT, hbm0_rd);
2651 WREG32(mmDMA_IF_E_N_HBM1_RD_CRED_CNT, hbm1_rd);
2653 WREG32(mmDMA_IF_E_S_HBM0_WR_CRED_CNT, hbm0_wr);
2654 WREG32(mmDMA_IF_E_S_HBM1_WR_CRED_CNT, hbm1_wr);
2655 WREG32(mmDMA_IF_E_S_HBM0_RD_CRED_CNT, hbm0_rd);
2656 WREG32(mmDMA_IF_E_S_HBM1_RD_CRED_CNT, hbm1_rd);
2658 WREG32(mmDMA_IF_W_N_HBM0_WR_CRED_CNT, hbm0_wr);
2659 WREG32(mmDMA_IF_W_N_HBM1_WR_CRED_CNT, hbm1_wr);
2660 WREG32(mmDMA_IF_W_N_HBM0_RD_CRED_CNT, hbm0_rd);
2661 WREG32(mmDMA_IF_W_N_HBM1_RD_CRED_CNT, hbm1_rd);
2663 WREG32(mmDMA_IF_W_S_HBM0_WR_CRED_CNT, hbm0_wr);
2664 WREG32(mmDMA_IF_W_S_HBM1_WR_CRED_CNT, hbm1_wr);
2665 WREG32(mmDMA_IF_W_S_HBM0_RD_CRED_CNT, hbm0_rd);
2666 WREG32(mmDMA_IF_W_S_HBM1_RD_CRED_CNT, hbm1_rd);
2668 WREG32(mmDMA_IF_E_N_HBM_CRED_EN_0,
2669 (1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) |
2670 (1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
2671 WREG32(mmDMA_IF_E_S_HBM_CRED_EN_0,
2672 (1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) |
2673 (1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
2674 WREG32(mmDMA_IF_W_N_HBM_CRED_EN_0,
2675 (1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) |
2676 (1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
2677 WREG32(mmDMA_IF_W_S_HBM_CRED_EN_0,
2678 (1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) |
2679 (1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
2681 WREG32(mmDMA_IF_E_N_HBM_CRED_EN_1,
2682 (1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) |
2683 (1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
2684 WREG32(mmDMA_IF_E_S_HBM_CRED_EN_1,
2685 (1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) |
2686 (1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
2687 WREG32(mmDMA_IF_W_N_HBM_CRED_EN_1,
2688 (1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) |
2689 (1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
2690 WREG32(mmDMA_IF_W_S_HBM_CRED_EN_1,
2691 (1 << DMA_IF_HBM_CRED_EN_READ_CREDIT_EN_SHIFT) |
2692 (1 << DMA_IF_HBM_CRED_EN_WRITE_CREDIT_EN_SHIFT));
2695 static void gaudi_init_golden_registers(struct hl_device *hdev)
2700 gaudi_init_e2e(hdev);
2701 gaudi_init_hbm_cred(hdev);
2703 for (tpc_id = 0, tpc_offset = 0;
2704 tpc_id < TPC_NUMBER_OF_ENGINES;
2705 tpc_id++, tpc_offset += TPC_CFG_OFFSET) {
2706 /* Mask all arithmetic interrupts from TPC */
2707 WREG32(mmTPC0_CFG_TPC_INTR_MASK + tpc_offset, 0x8FFE);
2708 /* Set 16 cache lines */
2709 WREG32_FIELD(TPC0_CFG_MSS_CONFIG, tpc_offset,
2710 ICACHE_FETCH_LINE_NUM, 2);
2713 /* Make sure 1st 128 bytes in SRAM are 0 for Tensor DMA */
2714 for (i = 0 ; i < 128 ; i += 8)
2715 writeq(0, hdev->pcie_bar[SRAM_BAR_ID] + i);
2717 WREG32(mmMME0_CTRL_EUS_ROLLUP_CNT_ADD, 3);
2718 WREG32(mmMME1_CTRL_EUS_ROLLUP_CNT_ADD, 3);
2719 WREG32(mmMME2_CTRL_EUS_ROLLUP_CNT_ADD, 3);
2720 WREG32(mmMME3_CTRL_EUS_ROLLUP_CNT_ADD, 3);
2723 static void gaudi_init_pci_dma_qman(struct hl_device *hdev, int dma_id,
2724 int qman_id, dma_addr_t qman_pq_addr)
2726 struct cpu_dyn_regs *dyn_regs =
2727 &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
2728 u32 mtr_base_en_lo, mtr_base_en_hi, mtr_base_ws_lo, mtr_base_ws_hi;
2729 u32 so_base_en_lo, so_base_en_hi, so_base_ws_lo, so_base_ws_hi;
2730 u32 q_off, dma_qm_offset;
2731 u32 dma_qm_err_cfg, irq_handler_offset;
2733 dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
2735 mtr_base_en_lo = lower_32_bits(CFG_BASE +
2736 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
2737 mtr_base_en_hi = upper_32_bits(CFG_BASE +
2738 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
2739 so_base_en_lo = lower_32_bits(CFG_BASE +
2740 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
2741 so_base_en_hi = upper_32_bits(CFG_BASE +
2742 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
2743 mtr_base_ws_lo = lower_32_bits(CFG_BASE +
2744 mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
2745 mtr_base_ws_hi = upper_32_bits(CFG_BASE +
2746 mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
2747 so_base_ws_lo = lower_32_bits(CFG_BASE +
2748 mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0);
2749 so_base_ws_hi = upper_32_bits(CFG_BASE +
2750 mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0);
2752 q_off = dma_qm_offset + qman_id * 4;
2754 WREG32(mmDMA0_QM_PQ_BASE_LO_0 + q_off, lower_32_bits(qman_pq_addr));
2755 WREG32(mmDMA0_QM_PQ_BASE_HI_0 + q_off, upper_32_bits(qman_pq_addr));
2757 WREG32(mmDMA0_QM_PQ_SIZE_0 + q_off, ilog2(HL_QUEUE_LENGTH));
2758 WREG32(mmDMA0_QM_PQ_PI_0 + q_off, 0);
2759 WREG32(mmDMA0_QM_PQ_CI_0 + q_off, 0);
2761 WREG32(mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off, QMAN_LDMA_SIZE_OFFSET);
2762 WREG32(mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off,
2763 QMAN_LDMA_SRC_OFFSET);
2764 WREG32(mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off,
2765 QMAN_LDMA_DST_OFFSET);
2767 WREG32(mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_en_lo);
2768 WREG32(mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_en_hi);
2769 WREG32(mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_en_lo);
2770 WREG32(mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_en_hi);
2771 WREG32(mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_0 + q_off, mtr_base_ws_lo);
2772 WREG32(mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_0 + q_off, mtr_base_ws_hi);
2773 WREG32(mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_0 + q_off, so_base_ws_lo);
2774 WREG32(mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_0 + q_off, so_base_ws_hi);
2776 WREG32(mmDMA0_QM_CP_BARRIER_CFG_0 + q_off, 0x100);
2778 /* The following configuration is needed only once per QMAN */
2780 irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ?
2781 mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR :
2782 le32_to_cpu(dyn_regs->gic_dma_qm_irq_ctrl);
2784 /* Configure RAZWI IRQ */
2785 dma_qm_err_cfg = PCI_DMA_QMAN_GLBL_ERR_CFG_MSG_EN_MASK;
2786 if (hdev->stop_on_err)
2788 PCI_DMA_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK;
2790 WREG32(mmDMA0_QM_GLBL_ERR_CFG + dma_qm_offset, dma_qm_err_cfg);
2792 WREG32(mmDMA0_QM_GLBL_ERR_ADDR_LO + dma_qm_offset,
2793 lower_32_bits(CFG_BASE + irq_handler_offset));
2794 WREG32(mmDMA0_QM_GLBL_ERR_ADDR_HI + dma_qm_offset,
2795 upper_32_bits(CFG_BASE + irq_handler_offset));
2797 WREG32(mmDMA0_QM_GLBL_ERR_WDATA + dma_qm_offset,
2798 gaudi_irq_map_table[GAUDI_EVENT_DMA0_QM].cpu_id +
2801 WREG32(mmDMA0_QM_ARB_ERR_MSG_EN + dma_qm_offset,
2802 QM_ARB_ERR_MSG_EN_MASK);
2804 /* Increase ARB WDT to support streams architecture */
2805 WREG32(mmDMA0_QM_ARB_SLV_CHOISE_WDT + dma_qm_offset,
2806 GAUDI_ARB_WDT_TIMEOUT);
2808 WREG32(mmDMA0_QM_GLBL_PROT + dma_qm_offset,
2809 QMAN_EXTERNAL_MAKE_TRUSTED);
2811 WREG32(mmDMA0_QM_GLBL_CFG1 + dma_qm_offset, 0);
2815 static void gaudi_init_dma_core(struct hl_device *hdev, int dma_id)
2817 struct cpu_dyn_regs *dyn_regs =
2818 &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
2819 u32 dma_err_cfg = 1 << DMA0_CORE_ERR_CFG_ERR_MSG_EN_SHIFT;
2820 u32 dma_offset = dma_id * DMA_CORE_OFFSET;
2821 u32 irq_handler_offset;
2823 /* Set to maximum possible according to physical size */
2824 WREG32(mmDMA0_CORE_RD_MAX_OUTSTAND + dma_offset, 0);
2825 WREG32(mmDMA0_CORE_RD_MAX_SIZE + dma_offset, 0);
2827 /* WA for H/W bug H3-2116 */
2828 WREG32(mmDMA0_CORE_LBW_MAX_OUTSTAND + dma_offset, 15);
2830 /* STOP_ON bit implies no completion to operation in case of RAZWI */
2831 if (hdev->stop_on_err)
2832 dma_err_cfg |= 1 << DMA0_CORE_ERR_CFG_STOP_ON_ERR_SHIFT;
2834 WREG32(mmDMA0_CORE_ERR_CFG + dma_offset, dma_err_cfg);
2836 irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ?
2837 mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR :
2838 le32_to_cpu(dyn_regs->gic_dma_core_irq_ctrl);
2840 WREG32(mmDMA0_CORE_ERRMSG_ADDR_LO + dma_offset,
2841 lower_32_bits(CFG_BASE + irq_handler_offset));
2842 WREG32(mmDMA0_CORE_ERRMSG_ADDR_HI + dma_offset,
2843 upper_32_bits(CFG_BASE + irq_handler_offset));
2845 WREG32(mmDMA0_CORE_ERRMSG_WDATA + dma_offset,
2846 gaudi_irq_map_table[GAUDI_EVENT_DMA0_CORE].cpu_id + dma_id);
2847 WREG32(mmDMA0_CORE_PROT + dma_offset,
2848 1 << DMA0_CORE_PROT_ERR_VAL_SHIFT);
2849 /* If the channel is secured, it should be in MMU bypass mode */
2850 WREG32(mmDMA0_CORE_SECURE_PROPS + dma_offset,
2851 1 << DMA0_CORE_SECURE_PROPS_MMBP_SHIFT);
2852 WREG32(mmDMA0_CORE_CFG_0 + dma_offset, 1 << DMA0_CORE_CFG_0_EN_SHIFT);
2855 static void gaudi_enable_qman(struct hl_device *hdev, int dma_id,
2858 u32 dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
2860 WREG32(mmDMA0_QM_GLBL_CFG0 + dma_qm_offset, enable_mask);
2863 static void gaudi_init_pci_dma_qmans(struct hl_device *hdev)
2865 struct gaudi_device *gaudi = hdev->asic_specific;
2866 struct hl_hw_queue *q;
2867 int i, j, dma_id, cpu_skip, nic_skip, cq_id = 0, q_idx, msi_vec = 0;
2869 if (gaudi->hw_cap_initialized & HW_CAP_PCI_DMA)
2872 for (i = 0 ; i < PCI_DMA_NUMBER_OF_CHNLS ; i++) {
2873 dma_id = gaudi_dma_assignment[i];
2875 * For queues after the CPU Q need to add 1 to get the correct
2876 * queue. In addition, need to add the CPU EQ and NIC IRQs in
2877 * order to get the correct MSI register.
2881 nic_skip = NIC_NUMBER_OF_ENGINES;
2887 for (j = 0 ; j < QMAN_STREAMS ; j++) {
2888 q_idx = 4 * dma_id + j + cpu_skip;
2889 q = &hdev->kernel_queues[q_idx];
2891 q->msi_vec = nic_skip + cpu_skip + msi_vec++;
2892 gaudi_init_pci_dma_qman(hdev, dma_id, j,
2896 gaudi_init_dma_core(hdev, dma_id);
2898 gaudi_enable_qman(hdev, dma_id, PCI_DMA_QMAN_ENABLE);
2901 gaudi->hw_cap_initialized |= HW_CAP_PCI_DMA;
2904 static void gaudi_init_hbm_dma_qman(struct hl_device *hdev, int dma_id,
2905 int qman_id, u64 qman_base_addr)
2907 struct cpu_dyn_regs *dyn_regs =
2908 &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
2909 u32 mtr_base_en_lo, mtr_base_en_hi, mtr_base_ws_lo, mtr_base_ws_hi;
2910 u32 so_base_en_lo, so_base_en_hi, so_base_ws_lo, so_base_ws_hi;
2911 u32 dma_qm_err_cfg, irq_handler_offset;
2912 u32 q_off, dma_qm_offset;
2914 dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
2916 mtr_base_en_lo = lower_32_bits(CFG_BASE +
2917 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
2918 mtr_base_en_hi = upper_32_bits(CFG_BASE +
2919 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
2920 so_base_en_lo = lower_32_bits(CFG_BASE +
2921 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
2922 so_base_en_hi = upper_32_bits(CFG_BASE +
2923 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
2924 mtr_base_ws_lo = lower_32_bits(CFG_BASE +
2925 mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
2926 mtr_base_ws_hi = upper_32_bits(CFG_BASE +
2927 mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
2928 so_base_ws_lo = lower_32_bits(CFG_BASE +
2929 mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0);
2930 so_base_ws_hi = upper_32_bits(CFG_BASE +
2931 mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0);
2933 q_off = dma_qm_offset + qman_id * 4;
2936 WREG32(mmDMA0_QM_PQ_BASE_LO_0 + q_off,
2937 lower_32_bits(qman_base_addr));
2938 WREG32(mmDMA0_QM_PQ_BASE_HI_0 + q_off,
2939 upper_32_bits(qman_base_addr));
2941 WREG32(mmDMA0_QM_PQ_SIZE_0 + q_off, ilog2(HBM_DMA_QMAN_LENGTH));
2942 WREG32(mmDMA0_QM_PQ_PI_0 + q_off, 0);
2943 WREG32(mmDMA0_QM_PQ_CI_0 + q_off, 0);
2945 WREG32(mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off,
2946 QMAN_CPDMA_SIZE_OFFSET);
2947 WREG32(mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off,
2948 QMAN_CPDMA_SRC_OFFSET);
2949 WREG32(mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off,
2950 QMAN_CPDMA_DST_OFFSET);
2952 irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ?
2953 mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR :
2954 le32_to_cpu(dyn_regs->gic_dma_qm_irq_ctrl);
2956 WREG32(mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off,
2957 QMAN_LDMA_SIZE_OFFSET);
2958 WREG32(mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off,
2959 QMAN_LDMA_SRC_OFFSET);
2960 WREG32(mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off,
2961 QMAN_LDMA_DST_OFFSET);
2963 /* Configure RAZWI IRQ */
2964 dma_qm_err_cfg = HBM_DMA_QMAN_GLBL_ERR_CFG_MSG_EN_MASK;
2965 if (hdev->stop_on_err)
2967 HBM_DMA_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK;
2969 WREG32(mmDMA0_QM_GLBL_ERR_CFG + dma_qm_offset, dma_qm_err_cfg);
2971 WREG32(mmDMA0_QM_GLBL_ERR_ADDR_LO + dma_qm_offset,
2972 lower_32_bits(CFG_BASE + irq_handler_offset));
2973 WREG32(mmDMA0_QM_GLBL_ERR_ADDR_HI + dma_qm_offset,
2974 upper_32_bits(CFG_BASE + irq_handler_offset));
2976 WREG32(mmDMA0_QM_GLBL_ERR_WDATA + dma_qm_offset,
2977 gaudi_irq_map_table[GAUDI_EVENT_DMA0_QM].cpu_id +
2980 WREG32(mmDMA0_QM_ARB_ERR_MSG_EN + dma_qm_offset,
2981 QM_ARB_ERR_MSG_EN_MASK);
2983 /* Increase ARB WDT to support streams architecture */
2984 WREG32(mmDMA0_QM_ARB_SLV_CHOISE_WDT + dma_qm_offset,
2985 GAUDI_ARB_WDT_TIMEOUT);
2987 WREG32(mmDMA0_QM_GLBL_CFG1 + dma_qm_offset, 0);
2988 WREG32(mmDMA0_QM_GLBL_PROT + dma_qm_offset,
2989 QMAN_INTERNAL_MAKE_TRUSTED);
2992 WREG32(mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_en_lo);
2993 WREG32(mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_en_hi);
2994 WREG32(mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_en_lo);
2995 WREG32(mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_en_hi);
2997 /* Configure DMA5 CP_MSG_BASE 2/3 for sync stream collective */
2998 if (gaudi_dma_assignment[dma_id] == GAUDI_ENGINE_ID_DMA_5) {
2999 WREG32(mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_0 + q_off,
3001 WREG32(mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_0 + q_off,
3003 WREG32(mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_0 + q_off,
3005 WREG32(mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_0 + q_off,
3010 static void gaudi_init_hbm_dma_qmans(struct hl_device *hdev)
3012 struct gaudi_device *gaudi = hdev->asic_specific;
3013 struct gaudi_internal_qman_info *q;
3015 int i, j, dma_id, internal_q_index;
3017 if (gaudi->hw_cap_initialized & HW_CAP_HBM_DMA)
3020 for (i = 0 ; i < HBM_DMA_NUMBER_OF_CHNLS ; i++) {
3021 dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_1 + i];
3023 for (j = 0 ; j < QMAN_STREAMS ; j++) {
3025 * Add the CPU queue in order to get the correct queue
3026 * number as all internal queue are placed after it
3028 internal_q_index = dma_id * QMAN_STREAMS + j + 1;
3030 q = &gaudi->internal_qmans[internal_q_index];
3031 qman_base_addr = (u64) q->pq_dma_addr;
3032 gaudi_init_hbm_dma_qman(hdev, dma_id, j,
3036 /* Initializing lower CP for HBM DMA QMAN */
3037 gaudi_init_hbm_dma_qman(hdev, dma_id, 4, 0);
3039 gaudi_init_dma_core(hdev, dma_id);
3041 gaudi_enable_qman(hdev, dma_id, HBM_DMA_QMAN_ENABLE);
3044 gaudi->hw_cap_initialized |= HW_CAP_HBM_DMA;
3047 static void gaudi_init_mme_qman(struct hl_device *hdev, u32 mme_offset,
3048 int qman_id, u64 qman_base_addr)
3050 struct cpu_dyn_regs *dyn_regs =
3051 &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
3052 u32 mtr_base_lo, mtr_base_hi;
3053 u32 so_base_lo, so_base_hi;
3054 u32 irq_handler_offset;
3058 mtr_base_lo = lower_32_bits(CFG_BASE +
3059 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
3060 mtr_base_hi = upper_32_bits(CFG_BASE +
3061 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
3062 so_base_lo = lower_32_bits(CFG_BASE +
3063 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
3064 so_base_hi = upper_32_bits(CFG_BASE +
3065 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
3067 q_off = mme_offset + qman_id * 4;
3070 WREG32(mmMME0_QM_PQ_BASE_LO_0 + q_off,
3071 lower_32_bits(qman_base_addr));
3072 WREG32(mmMME0_QM_PQ_BASE_HI_0 + q_off,
3073 upper_32_bits(qman_base_addr));
3075 WREG32(mmMME0_QM_PQ_SIZE_0 + q_off, ilog2(MME_QMAN_LENGTH));
3076 WREG32(mmMME0_QM_PQ_PI_0 + q_off, 0);
3077 WREG32(mmMME0_QM_PQ_CI_0 + q_off, 0);
3079 WREG32(mmMME0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off,
3080 QMAN_CPDMA_SIZE_OFFSET);
3081 WREG32(mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off,
3082 QMAN_CPDMA_SRC_OFFSET);
3083 WREG32(mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off,
3084 QMAN_CPDMA_DST_OFFSET);
3086 irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ?
3087 mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR :
3088 le32_to_cpu(dyn_regs->gic_mme_qm_irq_ctrl);
3090 WREG32(mmMME0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off,
3091 QMAN_LDMA_SIZE_OFFSET);
3092 WREG32(mmMME0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off,
3093 QMAN_LDMA_SRC_OFFSET);
3094 WREG32(mmMME0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off,
3095 QMAN_LDMA_DST_OFFSET);
3097 /* Configure RAZWI IRQ */
3098 mme_id = mme_offset /
3099 (mmMME1_QM_GLBL_CFG0 - mmMME0_QM_GLBL_CFG0) / 2;
3101 mme_qm_err_cfg = MME_QMAN_GLBL_ERR_CFG_MSG_EN_MASK;
3102 if (hdev->stop_on_err)
3104 MME_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK;
3106 WREG32(mmMME0_QM_GLBL_ERR_CFG + mme_offset, mme_qm_err_cfg);
3108 WREG32(mmMME0_QM_GLBL_ERR_ADDR_LO + mme_offset,
3109 lower_32_bits(CFG_BASE + irq_handler_offset));
3110 WREG32(mmMME0_QM_GLBL_ERR_ADDR_HI + mme_offset,
3111 upper_32_bits(CFG_BASE + irq_handler_offset));
3113 WREG32(mmMME0_QM_GLBL_ERR_WDATA + mme_offset,
3114 gaudi_irq_map_table[GAUDI_EVENT_MME0_QM].cpu_id +
3117 WREG32(mmMME0_QM_ARB_ERR_MSG_EN + mme_offset,
3118 QM_ARB_ERR_MSG_EN_MASK);
3120 /* Increase ARB WDT to support streams architecture */
3121 WREG32(mmMME0_QM_ARB_SLV_CHOISE_WDT + mme_offset,
3122 GAUDI_ARB_WDT_TIMEOUT);
3124 WREG32(mmMME0_QM_GLBL_CFG1 + mme_offset, 0);
3125 WREG32(mmMME0_QM_GLBL_PROT + mme_offset,
3126 QMAN_INTERNAL_MAKE_TRUSTED);
3129 WREG32(mmMME0_QM_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_lo);
3130 WREG32(mmMME0_QM_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_hi);
3131 WREG32(mmMME0_QM_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_lo);
3132 WREG32(mmMME0_QM_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_hi);
3135 static void gaudi_init_mme_qmans(struct hl_device *hdev)
3137 struct gaudi_device *gaudi = hdev->asic_specific;
3138 struct gaudi_internal_qman_info *q;
3141 int i, internal_q_index;
3143 if (gaudi->hw_cap_initialized & HW_CAP_MME)
3147 * map GAUDI_QUEUE_ID_MME_0_X to the N_W_MME (mmMME2_QM_BASE)
3148 * and GAUDI_QUEUE_ID_MME_1_X to the S_W_MME (mmMME0_QM_BASE)
3151 mme_offset = mmMME2_QM_GLBL_CFG0 - mmMME0_QM_GLBL_CFG0;
3153 for (i = 0 ; i < MME_NUMBER_OF_QMANS ; i++) {
3154 internal_q_index = GAUDI_QUEUE_ID_MME_0_0 + i;
3155 q = &gaudi->internal_qmans[internal_q_index];
3156 qman_base_addr = (u64) q->pq_dma_addr;
3157 gaudi_init_mme_qman(hdev, mme_offset, (i & 0x3),
3163 /* Initializing lower CP for MME QMANs */
3164 mme_offset = mmMME2_QM_GLBL_CFG0 - mmMME0_QM_GLBL_CFG0;
3165 gaudi_init_mme_qman(hdev, mme_offset, 4, 0);
3166 gaudi_init_mme_qman(hdev, 0, 4, 0);
3168 WREG32(mmMME2_QM_GLBL_CFG0, QMAN_MME_ENABLE);
3169 WREG32(mmMME0_QM_GLBL_CFG0, QMAN_MME_ENABLE);
3171 gaudi->hw_cap_initialized |= HW_CAP_MME;
3174 static void gaudi_init_tpc_qman(struct hl_device *hdev, u32 tpc_offset,
3175 int qman_id, u64 qman_base_addr)
3177 struct cpu_dyn_regs *dyn_regs =
3178 &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
3179 u32 mtr_base_en_lo, mtr_base_en_hi, mtr_base_ws_lo, mtr_base_ws_hi;
3180 u32 so_base_en_lo, so_base_en_hi, so_base_ws_lo, so_base_ws_hi;
3181 u32 tpc_qm_err_cfg, irq_handler_offset;
3184 mtr_base_en_lo = lower_32_bits(CFG_BASE +
3185 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
3186 mtr_base_en_hi = upper_32_bits(CFG_BASE +
3187 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
3188 so_base_en_lo = lower_32_bits(CFG_BASE +
3189 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
3190 so_base_en_hi = upper_32_bits(CFG_BASE +
3191 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
3192 mtr_base_ws_lo = lower_32_bits(CFG_BASE +
3193 mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
3194 mtr_base_ws_hi = upper_32_bits(CFG_BASE +
3195 mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
3196 so_base_ws_lo = lower_32_bits(CFG_BASE +
3197 mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0);
3198 so_base_ws_hi = upper_32_bits(CFG_BASE +
3199 mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0);
3201 q_off = tpc_offset + qman_id * 4;
3203 tpc_id = tpc_offset /
3204 (mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0);
3207 WREG32(mmTPC0_QM_PQ_BASE_LO_0 + q_off,
3208 lower_32_bits(qman_base_addr));
3209 WREG32(mmTPC0_QM_PQ_BASE_HI_0 + q_off,
3210 upper_32_bits(qman_base_addr));
3212 WREG32(mmTPC0_QM_PQ_SIZE_0 + q_off, ilog2(TPC_QMAN_LENGTH));
3213 WREG32(mmTPC0_QM_PQ_PI_0 + q_off, 0);
3214 WREG32(mmTPC0_QM_PQ_CI_0 + q_off, 0);
3216 WREG32(mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off,
3217 QMAN_CPDMA_SIZE_OFFSET);
3218 WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off,
3219 QMAN_CPDMA_SRC_OFFSET);
3220 WREG32(mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off,
3221 QMAN_CPDMA_DST_OFFSET);
3223 irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ?
3224 mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR :
3225 le32_to_cpu(dyn_regs->gic_tpc_qm_irq_ctrl);
3227 WREG32(mmTPC0_QM_CP_LDMA_TSIZE_OFFSET_0 + q_off,
3228 QMAN_LDMA_SIZE_OFFSET);
3229 WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off,
3230 QMAN_LDMA_SRC_OFFSET);
3231 WREG32(mmTPC0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off,
3232 QMAN_LDMA_DST_OFFSET);
3234 /* Configure RAZWI IRQ */
3235 tpc_qm_err_cfg = TPC_QMAN_GLBL_ERR_CFG_MSG_EN_MASK;
3236 if (hdev->stop_on_err)
3238 TPC_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK;
3240 WREG32(mmTPC0_QM_GLBL_ERR_CFG + tpc_offset, tpc_qm_err_cfg);
3242 WREG32(mmTPC0_QM_GLBL_ERR_ADDR_LO + tpc_offset,
3243 lower_32_bits(CFG_BASE + irq_handler_offset));
3244 WREG32(mmTPC0_QM_GLBL_ERR_ADDR_HI + tpc_offset,
3245 upper_32_bits(CFG_BASE + irq_handler_offset));
3247 WREG32(mmTPC0_QM_GLBL_ERR_WDATA + tpc_offset,
3248 gaudi_irq_map_table[GAUDI_EVENT_TPC0_QM].cpu_id +
3251 WREG32(mmTPC0_QM_ARB_ERR_MSG_EN + tpc_offset,
3252 QM_ARB_ERR_MSG_EN_MASK);
3254 /* Increase ARB WDT to support streams architecture */
3255 WREG32(mmTPC0_QM_ARB_SLV_CHOISE_WDT + tpc_offset,
3256 GAUDI_ARB_WDT_TIMEOUT);
3258 WREG32(mmTPC0_QM_GLBL_CFG1 + tpc_offset, 0);
3259 WREG32(mmTPC0_QM_GLBL_PROT + tpc_offset,
3260 QMAN_INTERNAL_MAKE_TRUSTED);
3263 WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_en_lo);
3264 WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_en_hi);
3265 WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_en_lo);
3266 WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_en_hi);
3268 /* Configure TPC7 CP_MSG_BASE 2/3 for sync stream collective */
3270 WREG32(mmTPC0_QM_CP_MSG_BASE2_ADDR_LO_0 + q_off,
3272 WREG32(mmTPC0_QM_CP_MSG_BASE2_ADDR_HI_0 + q_off,
3274 WREG32(mmTPC0_QM_CP_MSG_BASE3_ADDR_LO_0 + q_off,
3276 WREG32(mmTPC0_QM_CP_MSG_BASE3_ADDR_HI_0 + q_off,
3281 static void gaudi_init_tpc_qmans(struct hl_device *hdev)
3283 struct gaudi_device *gaudi = hdev->asic_specific;
3284 struct gaudi_internal_qman_info *q;
3286 u32 so_base_hi, tpc_offset = 0;
3287 u32 tpc_delta = mmTPC1_CFG_SM_BASE_ADDRESS_HIGH -
3288 mmTPC0_CFG_SM_BASE_ADDRESS_HIGH;
3289 int i, tpc_id, internal_q_index;
3291 if (gaudi->hw_cap_initialized & HW_CAP_TPC_MASK)
3294 so_base_hi = upper_32_bits(CFG_BASE +
3295 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
3297 for (tpc_id = 0 ; tpc_id < TPC_NUMBER_OF_ENGINES ; tpc_id++) {
3298 for (i = 0 ; i < QMAN_STREAMS ; i++) {
3299 internal_q_index = GAUDI_QUEUE_ID_TPC_0_0 +
3300 tpc_id * QMAN_STREAMS + i;
3301 q = &gaudi->internal_qmans[internal_q_index];
3302 qman_base_addr = (u64) q->pq_dma_addr;
3303 gaudi_init_tpc_qman(hdev, tpc_offset, i,
3307 /* Initializing lower CP for TPC QMAN */
3308 gaudi_init_tpc_qman(hdev, tpc_offset, 4, 0);
3310 /* Enable the QMAN and TPC channel */
3311 WREG32(mmTPC0_QM_GLBL_CFG0 + tpc_offset,
3316 WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_HIGH + tpc_id * tpc_delta,
3319 tpc_offset += mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0;
3321 gaudi->hw_cap_initialized |=
3322 FIELD_PREP(HW_CAP_TPC_MASK, 1 << tpc_id);
3326 static void gaudi_init_nic_qman(struct hl_device *hdev, u32 nic_offset,
3327 int qman_id, u64 qman_base_addr, int nic_id)
3329 struct cpu_dyn_regs *dyn_regs =
3330 &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
3331 u32 mtr_base_en_lo, mtr_base_en_hi, mtr_base_ws_lo, mtr_base_ws_hi;
3332 u32 so_base_en_lo, so_base_en_hi, so_base_ws_lo, so_base_ws_hi;
3333 u32 nic_qm_err_cfg, irq_handler_offset;
3336 mtr_base_en_lo = lower_32_bits(CFG_BASE +
3337 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
3338 mtr_base_en_hi = upper_32_bits(CFG_BASE +
3339 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
3340 so_base_en_lo = lower_32_bits(CFG_BASE +
3341 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
3342 so_base_en_hi = upper_32_bits(CFG_BASE +
3343 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0);
3344 mtr_base_ws_lo = lower_32_bits(CFG_BASE +
3345 mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
3346 mtr_base_ws_hi = upper_32_bits(CFG_BASE +
3347 mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0);
3348 so_base_ws_lo = lower_32_bits(CFG_BASE +
3349 mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0);
3350 so_base_ws_hi = upper_32_bits(CFG_BASE +
3351 mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0);
3353 q_off = nic_offset + qman_id * 4;
3355 WREG32(mmNIC0_QM0_PQ_BASE_LO_0 + q_off, lower_32_bits(qman_base_addr));
3356 WREG32(mmNIC0_QM0_PQ_BASE_HI_0 + q_off, upper_32_bits(qman_base_addr));
3358 WREG32(mmNIC0_QM0_PQ_SIZE_0 + q_off, ilog2(NIC_QMAN_LENGTH));
3359 WREG32(mmNIC0_QM0_PQ_PI_0 + q_off, 0);
3360 WREG32(mmNIC0_QM0_PQ_CI_0 + q_off, 0);
3362 WREG32(mmNIC0_QM0_CP_LDMA_TSIZE_OFFSET_0 + q_off,
3363 QMAN_LDMA_SIZE_OFFSET);
3364 WREG32(mmNIC0_QM0_CP_LDMA_SRC_BASE_LO_OFFSET_0 + q_off,
3365 QMAN_LDMA_SRC_OFFSET);
3366 WREG32(mmNIC0_QM0_CP_LDMA_DST_BASE_LO_OFFSET_0 + q_off,
3367 QMAN_LDMA_DST_OFFSET);
3369 WREG32(mmNIC0_QM0_CP_MSG_BASE0_ADDR_LO_0 + q_off, mtr_base_en_lo);
3370 WREG32(mmNIC0_QM0_CP_MSG_BASE0_ADDR_HI_0 + q_off, mtr_base_en_hi);
3371 WREG32(mmNIC0_QM0_CP_MSG_BASE1_ADDR_LO_0 + q_off, so_base_en_lo);
3372 WREG32(mmNIC0_QM0_CP_MSG_BASE1_ADDR_HI_0 + q_off, so_base_en_hi);
3374 /* Configure NIC CP_MSG_BASE 2/3 for sync stream collective */
3375 WREG32(mmNIC0_QM0_CP_MSG_BASE2_ADDR_LO_0 + q_off, mtr_base_ws_lo);
3376 WREG32(mmNIC0_QM0_CP_MSG_BASE2_ADDR_HI_0 + q_off, mtr_base_ws_hi);
3377 WREG32(mmNIC0_QM0_CP_MSG_BASE3_ADDR_LO_0 + q_off, so_base_ws_lo);
3378 WREG32(mmNIC0_QM0_CP_MSG_BASE3_ADDR_HI_0 + q_off, so_base_ws_hi);
3381 irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ?
3382 mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR :
3383 le32_to_cpu(dyn_regs->gic_nic_qm_irq_ctrl);
3385 /* Configure RAZWI IRQ */
3386 nic_qm_err_cfg = NIC_QMAN_GLBL_ERR_CFG_MSG_EN_MASK;
3387 if (hdev->stop_on_err)
3389 NIC_QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK;
3391 WREG32(mmNIC0_QM0_GLBL_ERR_CFG + nic_offset, nic_qm_err_cfg);
3393 WREG32(mmNIC0_QM0_GLBL_ERR_ADDR_LO + nic_offset,
3394 lower_32_bits(CFG_BASE + irq_handler_offset));
3395 WREG32(mmNIC0_QM0_GLBL_ERR_ADDR_HI + nic_offset,
3396 upper_32_bits(CFG_BASE + irq_handler_offset));
3398 WREG32(mmNIC0_QM0_GLBL_ERR_WDATA + nic_offset,
3399 gaudi_irq_map_table[GAUDI_EVENT_NIC0_QM0].cpu_id +
3402 WREG32(mmNIC0_QM0_ARB_ERR_MSG_EN + nic_offset,
3403 QM_ARB_ERR_MSG_EN_MASK);
3405 /* Increase ARB WDT to support streams architecture */
3406 WREG32(mmNIC0_QM0_ARB_SLV_CHOISE_WDT + nic_offset,
3407 GAUDI_ARB_WDT_TIMEOUT);
3409 WREG32(mmNIC0_QM0_GLBL_CFG1 + nic_offset, 0);
3410 WREG32(mmNIC0_QM0_GLBL_PROT + nic_offset,
3411 QMAN_INTERNAL_MAKE_TRUSTED);
3415 static void gaudi_init_nic_qmans(struct hl_device *hdev)
3417 struct gaudi_device *gaudi = hdev->asic_specific;
3418 struct gaudi_internal_qman_info *q;
3421 u32 nic_delta_between_qmans =
3422 mmNIC0_QM1_GLBL_CFG0 - mmNIC0_QM0_GLBL_CFG0;
3423 u32 nic_delta_between_nics =
3424 mmNIC1_QM0_GLBL_CFG0 - mmNIC0_QM0_GLBL_CFG0;
3425 int i, nic_id, internal_q_index;
3427 if (!hdev->nic_ports_mask)
3430 if (gaudi->hw_cap_initialized & HW_CAP_NIC_MASK)
3433 dev_dbg(hdev->dev, "Initializing NIC QMANs\n");
3435 for (nic_id = 0 ; nic_id < NIC_NUMBER_OF_ENGINES ; nic_id++) {
3436 if (!(hdev->nic_ports_mask & (1 << nic_id))) {
3437 nic_offset += nic_delta_between_qmans;
3439 nic_offset -= (nic_delta_between_qmans * 2);
3440 nic_offset += nic_delta_between_nics;
3445 for (i = 0 ; i < QMAN_STREAMS ; i++) {
3446 internal_q_index = GAUDI_QUEUE_ID_NIC_0_0 +
3447 nic_id * QMAN_STREAMS + i;
3448 q = &gaudi->internal_qmans[internal_q_index];
3449 qman_base_addr = (u64) q->pq_dma_addr;
3450 gaudi_init_nic_qman(hdev, nic_offset, (i & 0x3),
3451 qman_base_addr, nic_id);
3454 /* Enable the QMAN */
3455 WREG32(mmNIC0_QM0_GLBL_CFG0 + nic_offset, NIC_QMAN_ENABLE);
3457 nic_offset += nic_delta_between_qmans;
3459 nic_offset -= (nic_delta_between_qmans * 2);
3460 nic_offset += nic_delta_between_nics;
3463 gaudi->hw_cap_initialized |= 1 << (HW_CAP_NIC_SHIFT + nic_id);
3467 static void gaudi_disable_pci_dma_qmans(struct hl_device *hdev)
3469 struct gaudi_device *gaudi = hdev->asic_specific;
3471 if (!(gaudi->hw_cap_initialized & HW_CAP_PCI_DMA))
3474 WREG32(mmDMA0_QM_GLBL_CFG0, 0);
3475 WREG32(mmDMA1_QM_GLBL_CFG0, 0);
3476 WREG32(mmDMA5_QM_GLBL_CFG0, 0);
3479 static void gaudi_disable_hbm_dma_qmans(struct hl_device *hdev)
3481 struct gaudi_device *gaudi = hdev->asic_specific;
3483 if (!(gaudi->hw_cap_initialized & HW_CAP_HBM_DMA))
3486 WREG32(mmDMA2_QM_GLBL_CFG0, 0);
3487 WREG32(mmDMA3_QM_GLBL_CFG0, 0);
3488 WREG32(mmDMA4_QM_GLBL_CFG0, 0);
3489 WREG32(mmDMA6_QM_GLBL_CFG0, 0);
3490 WREG32(mmDMA7_QM_GLBL_CFG0, 0);
3493 static void gaudi_disable_mme_qmans(struct hl_device *hdev)
3495 struct gaudi_device *gaudi = hdev->asic_specific;
3497 if (!(gaudi->hw_cap_initialized & HW_CAP_MME))
3500 WREG32(mmMME2_QM_GLBL_CFG0, 0);
3501 WREG32(mmMME0_QM_GLBL_CFG0, 0);
3504 static void gaudi_disable_tpc_qmans(struct hl_device *hdev)
3506 struct gaudi_device *gaudi = hdev->asic_specific;
3510 if (!(gaudi->hw_cap_initialized & HW_CAP_TPC_MASK))
3513 for (tpc_id = 0 ; tpc_id < TPC_NUMBER_OF_ENGINES ; tpc_id++) {
3514 WREG32(mmTPC0_QM_GLBL_CFG0 + tpc_offset, 0);
3515 tpc_offset += mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0;
3519 static void gaudi_disable_nic_qmans(struct hl_device *hdev)
3521 struct gaudi_device *gaudi = hdev->asic_specific;
3522 u32 nic_mask, nic_offset = 0;
3523 u32 nic_delta_between_qmans =
3524 mmNIC0_QM1_GLBL_CFG0 - mmNIC0_QM0_GLBL_CFG0;
3525 u32 nic_delta_between_nics =
3526 mmNIC1_QM0_GLBL_CFG0 - mmNIC0_QM0_GLBL_CFG0;
3529 for (nic_id = 0 ; nic_id < NIC_NUMBER_OF_ENGINES ; nic_id++) {
3530 nic_mask = 1 << (HW_CAP_NIC_SHIFT + nic_id);
3532 if (gaudi->hw_cap_initialized & nic_mask)
3533 WREG32(mmNIC0_QM0_GLBL_CFG0 + nic_offset, 0);
3535 nic_offset += nic_delta_between_qmans;
3537 nic_offset -= (nic_delta_between_qmans * 2);
3538 nic_offset += nic_delta_between_nics;
3543 static void gaudi_stop_pci_dma_qmans(struct hl_device *hdev)
3545 struct gaudi_device *gaudi = hdev->asic_specific;
3547 if (!(gaudi->hw_cap_initialized & HW_CAP_PCI_DMA))
3550 /* Stop upper CPs of QMANs 0.0 to 1.3 and 5.0 to 5.3 */
3551 WREG32(mmDMA0_QM_GLBL_CFG1, 0xF << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
3552 WREG32(mmDMA1_QM_GLBL_CFG1, 0xF << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
3553 WREG32(mmDMA5_QM_GLBL_CFG1, 0xF << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
3556 static void gaudi_stop_hbm_dma_qmans(struct hl_device *hdev)
3558 struct gaudi_device *gaudi = hdev->asic_specific;
3560 if (!(gaudi->hw_cap_initialized & HW_CAP_HBM_DMA))
3563 /* Stop CPs of HBM DMA QMANs */
3565 WREG32(mmDMA2_QM_GLBL_CFG1, 0x1F << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
3566 WREG32(mmDMA3_QM_GLBL_CFG1, 0x1F << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
3567 WREG32(mmDMA4_QM_GLBL_CFG1, 0x1F << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
3568 WREG32(mmDMA6_QM_GLBL_CFG1, 0x1F << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
3569 WREG32(mmDMA7_QM_GLBL_CFG1, 0x1F << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
3572 static void gaudi_stop_mme_qmans(struct hl_device *hdev)
3574 struct gaudi_device *gaudi = hdev->asic_specific;
3576 if (!(gaudi->hw_cap_initialized & HW_CAP_MME))
3579 /* Stop CPs of MME QMANs */
3580 WREG32(mmMME2_QM_GLBL_CFG1, 0x1F << MME0_QM_GLBL_CFG1_CP_STOP_SHIFT);
3581 WREG32(mmMME0_QM_GLBL_CFG1, 0x1F << MME0_QM_GLBL_CFG1_CP_STOP_SHIFT);
3584 static void gaudi_stop_tpc_qmans(struct hl_device *hdev)
3586 struct gaudi_device *gaudi = hdev->asic_specific;
3588 if (!(gaudi->hw_cap_initialized & HW_CAP_TPC_MASK))
3591 WREG32(mmTPC0_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
3592 WREG32(mmTPC1_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
3593 WREG32(mmTPC2_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
3594 WREG32(mmTPC3_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
3595 WREG32(mmTPC4_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
3596 WREG32(mmTPC5_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
3597 WREG32(mmTPC6_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
3598 WREG32(mmTPC7_QM_GLBL_CFG1, 0x1F << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
3601 static void gaudi_stop_nic_qmans(struct hl_device *hdev)
3603 struct gaudi_device *gaudi = hdev->asic_specific;
3605 /* Stop upper CPs of QMANs */
3607 if (gaudi->hw_cap_initialized & HW_CAP_NIC0)
3608 WREG32(mmNIC0_QM0_GLBL_CFG1,
3609 NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK |
3610 NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK |
3611 NIC0_QM0_GLBL_CFG1_CP_STOP_MASK);
3613 if (gaudi->hw_cap_initialized & HW_CAP_NIC1)
3614 WREG32(mmNIC0_QM1_GLBL_CFG1,
3615 NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK |
3616 NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK |
3617 NIC0_QM0_GLBL_CFG1_CP_STOP_MASK);
3619 if (gaudi->hw_cap_initialized & HW_CAP_NIC2)
3620 WREG32(mmNIC1_QM0_GLBL_CFG1,
3621 NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK |
3622 NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK |
3623 NIC0_QM0_GLBL_CFG1_CP_STOP_MASK);
3625 if (gaudi->hw_cap_initialized & HW_CAP_NIC3)
3626 WREG32(mmNIC1_QM1_GLBL_CFG1,
3627 NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK |
3628 NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK |
3629 NIC0_QM0_GLBL_CFG1_CP_STOP_MASK);
3631 if (gaudi->hw_cap_initialized & HW_CAP_NIC4)
3632 WREG32(mmNIC2_QM0_GLBL_CFG1,
3633 NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK |
3634 NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK |
3635 NIC0_QM0_GLBL_CFG1_CP_STOP_MASK);
3637 if (gaudi->hw_cap_initialized & HW_CAP_NIC5)
3638 WREG32(mmNIC2_QM1_GLBL_CFG1,
3639 NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK |
3640 NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK |
3641 NIC0_QM0_GLBL_CFG1_CP_STOP_MASK);
3643 if (gaudi->hw_cap_initialized & HW_CAP_NIC6)
3644 WREG32(mmNIC3_QM0_GLBL_CFG1,
3645 NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK |
3646 NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK |
3647 NIC0_QM0_GLBL_CFG1_CP_STOP_MASK);
3649 if (gaudi->hw_cap_initialized & HW_CAP_NIC7)
3650 WREG32(mmNIC3_QM1_GLBL_CFG1,
3651 NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK |
3652 NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK |
3653 NIC0_QM0_GLBL_CFG1_CP_STOP_MASK);
3655 if (gaudi->hw_cap_initialized & HW_CAP_NIC8)
3656 WREG32(mmNIC4_QM0_GLBL_CFG1,
3657 NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK |
3658 NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK |
3659 NIC0_QM0_GLBL_CFG1_CP_STOP_MASK);
3661 if (gaudi->hw_cap_initialized & HW_CAP_NIC9)
3662 WREG32(mmNIC4_QM1_GLBL_CFG1,
3663 NIC0_QM0_GLBL_CFG1_PQF_STOP_MASK |
3664 NIC0_QM0_GLBL_CFG1_CQF_STOP_MASK |
3665 NIC0_QM0_GLBL_CFG1_CP_STOP_MASK);
3668 static void gaudi_pci_dma_stall(struct hl_device *hdev)
3670 struct gaudi_device *gaudi = hdev->asic_specific;
3672 if (!(gaudi->hw_cap_initialized & HW_CAP_PCI_DMA))
3675 WREG32(mmDMA0_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT);
3676 WREG32(mmDMA1_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT);
3677 WREG32(mmDMA5_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT);
3680 static void gaudi_hbm_dma_stall(struct hl_device *hdev)
3682 struct gaudi_device *gaudi = hdev->asic_specific;
3684 if (!(gaudi->hw_cap_initialized & HW_CAP_HBM_DMA))
3687 WREG32(mmDMA2_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT);
3688 WREG32(mmDMA3_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT);
3689 WREG32(mmDMA4_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT);
3690 WREG32(mmDMA6_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT);
3691 WREG32(mmDMA7_CORE_CFG_1, 1 << DMA0_CORE_CFG_1_HALT_SHIFT);
3694 static void gaudi_mme_stall(struct hl_device *hdev)
3696 struct gaudi_device *gaudi = hdev->asic_specific;
3698 if (!(gaudi->hw_cap_initialized & HW_CAP_MME))
3701 /* WA for H3-1800 bug: do ACC and SBAB writes twice */
3702 WREG32(mmMME0_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT);
3703 WREG32(mmMME0_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT);
3704 WREG32(mmMME0_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT);
3705 WREG32(mmMME0_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT);
3706 WREG32(mmMME1_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT);
3707 WREG32(mmMME1_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT);
3708 WREG32(mmMME1_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT);
3709 WREG32(mmMME1_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT);
3710 WREG32(mmMME2_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT);
3711 WREG32(mmMME2_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT);
3712 WREG32(mmMME2_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT);
3713 WREG32(mmMME2_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT);
3714 WREG32(mmMME3_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT);
3715 WREG32(mmMME3_ACC_ACC_STALL, 1 << MME_ACC_ACC_STALL_R_SHIFT);
3716 WREG32(mmMME3_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT);
3717 WREG32(mmMME3_SBAB_SB_STALL, 1 << MME_SBAB_SB_STALL_R_SHIFT);
3720 static void gaudi_tpc_stall(struct hl_device *hdev)
3722 struct gaudi_device *gaudi = hdev->asic_specific;
3724 if (!(gaudi->hw_cap_initialized & HW_CAP_TPC_MASK))
3727 WREG32(mmTPC0_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
3728 WREG32(mmTPC1_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
3729 WREG32(mmTPC2_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
3730 WREG32(mmTPC3_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
3731 WREG32(mmTPC4_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
3732 WREG32(mmTPC5_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
3733 WREG32(mmTPC6_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
3734 WREG32(mmTPC7_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
3737 static void gaudi_disable_clock_gating(struct hl_device *hdev)
3742 if (hdev->asic_prop.fw_security_enabled)
3745 for (i = 0, qman_offset = 0 ; i < DMA_NUMBER_OF_CHANNELS ; i++) {
3746 WREG32(mmDMA0_QM_CGM_CFG + qman_offset, 0);
3747 WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, 0);
3749 qman_offset += (mmDMA1_QM_CGM_CFG - mmDMA0_QM_CGM_CFG);
3752 WREG32(mmMME0_QM_CGM_CFG, 0);
3753 WREG32(mmMME0_QM_CGM_CFG1, 0);
3754 WREG32(mmMME2_QM_CGM_CFG, 0);
3755 WREG32(mmMME2_QM_CGM_CFG1, 0);
3757 for (i = 0, qman_offset = 0 ; i < TPC_NUMBER_OF_ENGINES ; i++) {
3758 WREG32(mmTPC0_QM_CGM_CFG + qman_offset, 0);
3759 WREG32(mmTPC0_QM_CGM_CFG1 + qman_offset, 0);
3761 qman_offset += (mmTPC1_QM_CGM_CFG - mmTPC0_QM_CGM_CFG);
3765 static void gaudi_enable_timestamp(struct hl_device *hdev)
3767 /* Disable the timestamp counter */
3768 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
3770 /* Zero the lower/upper parts of the 64-bit counter */
3771 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0xC, 0);
3772 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0x8, 0);
3774 /* Enable the counter */
3775 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 1);
3778 static void gaudi_disable_timestamp(struct hl_device *hdev)
3780 /* Disable the timestamp counter */
3781 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
3784 static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset, bool fw_reset)
3786 u32 wait_timeout_ms;
3789 "Halting compute engines and disabling interrupts\n");
3792 wait_timeout_ms = GAUDI_PLDM_RESET_WAIT_MSEC;
3794 wait_timeout_ms = GAUDI_RESET_WAIT_MSEC;
3799 gaudi_stop_nic_qmans(hdev);
3800 gaudi_stop_mme_qmans(hdev);
3801 gaudi_stop_tpc_qmans(hdev);
3802 gaudi_stop_hbm_dma_qmans(hdev);
3803 gaudi_stop_pci_dma_qmans(hdev);
3805 msleep(wait_timeout_ms);
3807 gaudi_pci_dma_stall(hdev);
3808 gaudi_hbm_dma_stall(hdev);
3809 gaudi_tpc_stall(hdev);
3810 gaudi_mme_stall(hdev);
3812 msleep(wait_timeout_ms);
3814 gaudi_disable_nic_qmans(hdev);
3815 gaudi_disable_mme_qmans(hdev);
3816 gaudi_disable_tpc_qmans(hdev);
3817 gaudi_disable_hbm_dma_qmans(hdev);
3818 gaudi_disable_pci_dma_qmans(hdev);
3820 gaudi_disable_timestamp(hdev);
3823 gaudi_disable_msi(hdev);
3826 static int gaudi_mmu_init(struct hl_device *hdev)
3828 struct asic_fixed_properties *prop = &hdev->asic_prop;
3829 struct gaudi_device *gaudi = hdev->asic_specific;
3833 if (!hdev->mmu_enable)
3836 if (gaudi->hw_cap_initialized & HW_CAP_MMU)
3839 for (i = 0 ; i < prop->max_asid ; i++) {
3840 hop0_addr = prop->mmu_pgt_addr +
3841 (i * prop->mmu_hop_table_size);
3843 rc = gaudi_mmu_update_asid_hop0_addr(hdev, i, hop0_addr);
3846 "failed to set hop0 addr for asid %d\n", i);
3851 /* init MMU cache manage page */
3852 WREG32(mmSTLB_CACHE_INV_BASE_39_8, MMU_CACHE_MNG_ADDR >> 8);
3853 WREG32(mmSTLB_CACHE_INV_BASE_49_40, MMU_CACHE_MNG_ADDR >> 40);
3855 /* mem cache invalidation */
3856 WREG32(mmSTLB_MEM_CACHE_INVALIDATION, 1);
3858 hdev->asic_funcs->mmu_invalidate_cache(hdev, true, 0);
3860 WREG32(mmMMU_UP_MMU_ENABLE, 1);
3861 WREG32(mmMMU_UP_SPI_MASK, 0xF);
3863 WREG32(mmSTLB_HOP_CONFIGURATION,
3864 hdev->mmu_huge_page_opt ? 0x30440 : 0x40440);
3867 * The H/W expects the first PI after init to be 1. After wraparound
3870 gaudi->mmu_cache_inv_pi = 1;
3872 gaudi->hw_cap_initialized |= HW_CAP_MMU;
3880 static int gaudi_load_firmware_to_device(struct hl_device *hdev)
3884 dst = hdev->pcie_bar[HBM_BAR_ID] + LINUX_FW_OFFSET;
3886 return hl_fw_load_fw_to_device(hdev, GAUDI_LINUX_FW_FILE, dst, 0, 0);
3889 static int gaudi_load_boot_fit_to_device(struct hl_device *hdev)
3893 dst = hdev->pcie_bar[SRAM_BAR_ID] + BOOT_FIT_SRAM_OFFSET;
3895 return hl_fw_load_fw_to_device(hdev, GAUDI_BOOT_FIT_FILE, dst, 0, 0);
3898 static void gaudi_init_dynamic_firmware_loader(struct hl_device *hdev)
3900 struct dynamic_fw_load_mgr *dynamic_loader;
3901 struct cpu_dyn_regs *dyn_regs;
3903 dynamic_loader = &hdev->fw_loader.dynamic_loader;
3906 * here we update initial values for few specific dynamic regs (as
3907 * before reading the first descriptor from FW those value has to be
3908 * hard-coded) in later stages of the protocol those values will be
3909 * updated automatically by reading the FW descriptor so data there
3910 * will always be up-to-date
3912 dyn_regs = &dynamic_loader->comm_desc.cpu_dyn_regs;
3913 dyn_regs->kmd_msg_to_cpu =
3914 cpu_to_le32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU);
3915 dyn_regs->cpu_cmd_status_to_host =
3916 cpu_to_le32(mmCPU_CMD_STATUS_TO_HOST);
3918 dynamic_loader->wait_for_bl_timeout = GAUDI_WAIT_FOR_BL_TIMEOUT_USEC;
3921 static void gaudi_init_static_firmware_loader(struct hl_device *hdev)
3923 struct static_fw_load_mgr *static_loader;
3925 static_loader = &hdev->fw_loader.static_loader;
3927 static_loader->preboot_version_max_off = SRAM_SIZE - VERSION_MAX_LEN;
3928 static_loader->boot_fit_version_max_off = SRAM_SIZE - VERSION_MAX_LEN;
3929 static_loader->kmd_msg_to_cpu_reg = mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU;
3930 static_loader->cpu_cmd_status_to_host_reg = mmCPU_CMD_STATUS_TO_HOST;
3931 static_loader->cpu_boot_status_reg = mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS;
3932 static_loader->cpu_boot_dev_status0_reg = mmCPU_BOOT_DEV_STS0;
3933 static_loader->cpu_boot_dev_status1_reg = mmCPU_BOOT_DEV_STS1;
3934 static_loader->boot_err0_reg = mmCPU_BOOT_ERR0;
3935 static_loader->boot_err1_reg = mmCPU_BOOT_ERR1;
3936 static_loader->preboot_version_offset_reg = mmPREBOOT_VER_OFFSET;
3937 static_loader->boot_fit_version_offset_reg = mmUBOOT_VER_OFFSET;
3938 static_loader->sram_offset_mask = ~(lower_32_bits(SRAM_BASE_ADDR));
3939 static_loader->cpu_reset_wait_msec = hdev->pldm ?
3940 GAUDI_PLDM_RESET_WAIT_MSEC :
3941 GAUDI_CPU_RESET_WAIT_MSEC;
3944 static void gaudi_init_firmware_loader(struct hl_device *hdev)
3946 struct asic_fixed_properties *prop = &hdev->asic_prop;
3947 struct fw_load_mgr *fw_loader = &hdev->fw_loader;
3949 /* fill common fields */
3950 fw_loader->fw_comp_loaded = FW_TYPE_NONE;
3951 fw_loader->boot_fit_img.image_name = GAUDI_BOOT_FIT_FILE;
3952 fw_loader->linux_img.image_name = GAUDI_LINUX_FW_FILE;
3953 fw_loader->cpu_timeout = GAUDI_CPU_TIMEOUT_USEC;
3954 fw_loader->boot_fit_timeout = GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC;
3955 fw_loader->skip_bmc = !hdev->bmc_enable;
3956 fw_loader->sram_bar_id = SRAM_BAR_ID;
3957 fw_loader->dram_bar_id = HBM_BAR_ID;
3959 if (prop->dynamic_fw_load)
3960 gaudi_init_dynamic_firmware_loader(hdev);
3962 gaudi_init_static_firmware_loader(hdev);
3965 static int gaudi_init_cpu(struct hl_device *hdev)
3967 struct gaudi_device *gaudi = hdev->asic_specific;
3970 if (!(hdev->fw_components & FW_TYPE_PREBOOT_CPU))
3973 if (gaudi->hw_cap_initialized & HW_CAP_CPU)
3977 * The device CPU works with 40 bits addresses.
3978 * This register sets the extension to 50 bits.
3980 if (!hdev->asic_prop.fw_security_enabled)
3981 WREG32(mmCPU_IF_CPU_MSB_ADDR, hdev->cpu_pci_msb_addr);
3983 rc = hl_fw_init_cpu(hdev);
3988 gaudi->hw_cap_initialized |= HW_CAP_CPU;
3993 static int gaudi_init_cpu_queues(struct hl_device *hdev, u32 cpu_timeout)
3995 struct cpu_dyn_regs *dyn_regs =
3996 &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
3997 struct asic_fixed_properties *prop = &hdev->asic_prop;
3998 struct gaudi_device *gaudi = hdev->asic_specific;
3999 u32 status, irq_handler_offset;
4001 struct hl_hw_queue *cpu_pq =
4002 &hdev->kernel_queues[GAUDI_QUEUE_ID_CPU_PQ];
4005 if (!hdev->cpu_queues_enable)
4008 if (gaudi->hw_cap_initialized & HW_CAP_CPU_Q)
4011 eq = &hdev->event_queue;
4013 WREG32(mmCPU_IF_PQ_BASE_ADDR_LOW, lower_32_bits(cpu_pq->bus_address));
4014 WREG32(mmCPU_IF_PQ_BASE_ADDR_HIGH, upper_32_bits(cpu_pq->bus_address));
4016 WREG32(mmCPU_IF_EQ_BASE_ADDR_LOW, lower_32_bits(eq->bus_address));
4017 WREG32(mmCPU_IF_EQ_BASE_ADDR_HIGH, upper_32_bits(eq->bus_address));
4019 WREG32(mmCPU_IF_CQ_BASE_ADDR_LOW,
4020 lower_32_bits(hdev->cpu_accessible_dma_address));
4021 WREG32(mmCPU_IF_CQ_BASE_ADDR_HIGH,
4022 upper_32_bits(hdev->cpu_accessible_dma_address));
4024 WREG32(mmCPU_IF_PQ_LENGTH, HL_QUEUE_SIZE_IN_BYTES);
4025 WREG32(mmCPU_IF_EQ_LENGTH, HL_EQ_SIZE_IN_BYTES);
4026 WREG32(mmCPU_IF_CQ_LENGTH, HL_CPU_ACCESSIBLE_MEM_SIZE);
4028 /* Used for EQ CI */
4029 WREG32(mmCPU_IF_EQ_RD_OFFS, 0);
4031 WREG32(mmCPU_IF_PF_PQ_PI, 0);
4033 if (gaudi->multi_msi_mode)
4034 WREG32(mmCPU_IF_QUEUE_INIT, PQ_INIT_STATUS_READY_FOR_CP);
4036 WREG32(mmCPU_IF_QUEUE_INIT,
4037 PQ_INIT_STATUS_READY_FOR_CP_SINGLE_MSI);
4039 irq_handler_offset = prop->gic_interrupts_enable ?
4040 mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR :
4041 le32_to_cpu(dyn_regs->gic_host_pi_upd_irq);
4043 WREG32(irq_handler_offset,
4044 gaudi_irq_map_table[GAUDI_EVENT_PI_UPDATE].cpu_id);
4046 err = hl_poll_timeout(
4048 mmCPU_IF_QUEUE_INIT,
4050 (status == PQ_INIT_STATUS_READY_FOR_HOST),
4056 "Failed to communicate with Device CPU (CPU-CP timeout)\n");
4060 /* update FW application security bits */
4061 if (prop->fw_cpu_boot_dev_sts0_valid)
4062 prop->fw_app_cpu_boot_dev_sts0 = RREG32(mmCPU_BOOT_DEV_STS0);
4063 if (prop->fw_cpu_boot_dev_sts1_valid)
4064 prop->fw_app_cpu_boot_dev_sts1 = RREG32(mmCPU_BOOT_DEV_STS1);
4066 gaudi->hw_cap_initialized |= HW_CAP_CPU_Q;
4070 static void gaudi_pre_hw_init(struct hl_device *hdev)
4072 /* Perform read from the device to make sure device is up */
4075 if (!hdev->asic_prop.fw_security_enabled) {
4076 /* Set the access through PCI bars (Linux driver only) as
4079 WREG32(mmPCIE_WRAP_LBW_PROT_OVR,
4080 (PCIE_WRAP_LBW_PROT_OVR_RD_EN_MASK |
4081 PCIE_WRAP_LBW_PROT_OVR_WR_EN_MASK));
4083 /* Perform read to flush the waiting writes to ensure
4084 * configuration was set in the device
4086 RREG32(mmPCIE_WRAP_LBW_PROT_OVR);
4090 * Let's mark in the H/W that we have reached this point. We check
4091 * this value in the reset_before_init function to understand whether
4092 * we need to reset the chip before doing H/W init. This register is
4093 * cleared by the H/W upon H/W reset
4095 WREG32(mmHW_STATE, HL_DEVICE_HW_STATE_DIRTY);
4098 static int gaudi_hw_init(struct hl_device *hdev)
4100 struct gaudi_device *gaudi = hdev->asic_specific;
4103 gaudi_pre_hw_init(hdev);
4105 /* If iATU is done by FW, the HBM bar ALWAYS points to DRAM_PHYS_BASE.
4106 * So we set it here and if anyone tries to move it later to
4107 * a different address, there will be an error
4109 if (hdev->asic_prop.iatu_done_by_fw)
4110 gaudi->hbm_bar_cur_addr = DRAM_PHYS_BASE;
4113 * Before pushing u-boot/linux to device, need to set the hbm bar to
4114 * base address of dram
4116 if (gaudi_set_hbm_bar_base(hdev, DRAM_PHYS_BASE) == U64_MAX) {
4118 "failed to map HBM bar to DRAM base address\n");
4122 rc = gaudi_init_cpu(hdev);
4124 dev_err(hdev->dev, "failed to initialize CPU\n");
4128 /* In case the clock gating was enabled in preboot we need to disable
4129 * it here before touching the MME/TPC registers.
4131 gaudi_disable_clock_gating(hdev);
4133 /* SRAM scrambler must be initialized after CPU is running from HBM */
4134 gaudi_init_scrambler_sram(hdev);
4136 /* This is here just in case we are working without CPU */
4137 gaudi_init_scrambler_hbm(hdev);
4139 gaudi_init_golden_registers(hdev);
4141 rc = gaudi_mmu_init(hdev);
4145 gaudi_init_security(hdev);
4147 gaudi_init_pci_dma_qmans(hdev);
4149 gaudi_init_hbm_dma_qmans(hdev);
4151 gaudi_init_mme_qmans(hdev);
4153 gaudi_init_tpc_qmans(hdev);
4155 gaudi_init_nic_qmans(hdev);
4157 gaudi_enable_timestamp(hdev);
4159 /* MSI must be enabled before CPU queues and NIC are initialized */
4160 rc = gaudi_enable_msi(hdev);
4162 goto disable_queues;
4164 /* must be called after MSI was enabled */
4165 rc = gaudi_init_cpu_queues(hdev, GAUDI_CPU_TIMEOUT_USEC);
4167 dev_err(hdev->dev, "failed to initialize CPU H/W queues %d\n",
4172 /* Perform read from the device to flush all configuration */
4178 gaudi_disable_msi(hdev);
4180 gaudi_disable_mme_qmans(hdev);
4181 gaudi_disable_pci_dma_qmans(hdev);
4186 static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset)
4188 struct cpu_dyn_regs *dyn_regs =
4189 &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
4190 u32 status, reset_timeout_ms, cpu_timeout_ms, irq_handler_offset;
4191 struct gaudi_device *gaudi = hdev->asic_specific;
4192 bool driver_performs_reset;
4195 dev_err(hdev->dev, "GAUDI doesn't support soft-reset\n");
4200 reset_timeout_ms = GAUDI_PLDM_HRESET_TIMEOUT_MSEC;
4201 cpu_timeout_ms = GAUDI_PLDM_RESET_WAIT_MSEC;
4203 reset_timeout_ms = GAUDI_RESET_TIMEOUT_MSEC;
4204 cpu_timeout_ms = GAUDI_CPU_RESET_WAIT_MSEC;
4209 "Firmware performs HARD reset, going to wait %dms\n",
4215 driver_performs_reset = !!(!hdev->asic_prop.fw_security_enabled &&
4216 !hdev->asic_prop.hard_reset_done_by_fw);
4218 /* Set device to handle FLR by H/W as we will put the device CPU to
4221 if (driver_performs_reset)
4222 WREG32(mmPCIE_AUX_FLR_CTRL, (PCIE_AUX_FLR_CTRL_HW_CTRL_MASK |
4223 PCIE_AUX_FLR_CTRL_INT_MASK_MASK));
4225 /* If linux is loaded in the device CPU we need to communicate with it
4226 * via the GIC. Otherwise, we need to use COMMS or the MSG_TO_CPU
4227 * registers in case of old F/Ws
4229 if (hdev->fw_loader.fw_comp_loaded & FW_TYPE_LINUX) {
4230 irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ?
4231 mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR :
4232 le32_to_cpu(dyn_regs->gic_host_halt_irq);
4234 WREG32(irq_handler_offset,
4235 gaudi_irq_map_table[GAUDI_EVENT_HALT_MACHINE].cpu_id);
4237 /* This is a hail-mary attempt to revive the card in the small chance that the
4238 * f/w has experienced a watchdog event, which caused it to return back to preboot.
4239 * In that case, triggering reset through GIC won't help. We need to trigger the
4240 * reset as if Linux wasn't loaded.
4242 * We do it only if the reset cause was HB, because that would be the indication
4245 * In case watchdog hasn't expired but we still got HB, then this won't do any
4248 if (hdev->reset_info.curr_reset_cause == HL_RESET_CAUSE_HEARTBEAT) {
4249 if (hdev->asic_prop.hard_reset_done_by_fw)
4250 hl_fw_ask_hard_reset_without_linux(hdev);
4252 hl_fw_ask_halt_machine_without_linux(hdev);
4255 if (hdev->asic_prop.hard_reset_done_by_fw)
4256 hl_fw_ask_hard_reset_without_linux(hdev);
4258 hl_fw_ask_halt_machine_without_linux(hdev);
4261 if (driver_performs_reset) {
4263 /* Configure the reset registers. Must be done as early as
4264 * possible in case we fail during H/W initialization
4266 WREG32(mmPSOC_GLOBAL_CONF_SOFT_RST_CFG_H,
4267 (CFG_RST_H_DMA_MASK |
4268 CFG_RST_H_MME_MASK |
4270 CFG_RST_H_TPC_7_MASK));
4272 WREG32(mmPSOC_GLOBAL_CONF_SOFT_RST_CFG_L, CFG_RST_L_TPC_MASK);
4274 WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG_H,
4275 (CFG_RST_H_HBM_MASK |
4276 CFG_RST_H_TPC_7_MASK |
4277 CFG_RST_H_NIC_MASK |
4279 CFG_RST_H_DMA_MASK |
4280 CFG_RST_H_MME_MASK |
4281 CFG_RST_H_CPU_MASK |
4282 CFG_RST_H_MMU_MASK));
4284 WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG_L,
4285 (CFG_RST_L_IF_MASK |
4286 CFG_RST_L_PSOC_MASK |
4287 CFG_RST_L_TPC_MASK));
4289 msleep(cpu_timeout_ms);
4291 /* Tell ASIC not to re-initialize PCIe */
4292 WREG32(mmPREBOOT_PCIE_EN, LKD_HARD_RESET_MAGIC);
4294 /* Restart BTL/BLR upon hard-reset */
4295 WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START, 1);
4297 WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST,
4298 1 << PSOC_GLOBAL_CONF_SW_ALL_RST_IND_SHIFT);
4301 "Issued HARD reset command, going to wait %dms\n",
4305 "Firmware performs HARD reset, going to wait %dms\n",
4311 * After hard reset, we can't poll the BTM_FSM register because the PSOC
4312 * itself is in reset. Need to wait until the reset is deasserted
4314 msleep(reset_timeout_ms);
4316 status = RREG32(mmPSOC_GLOBAL_CONF_BTM_FSM);
4317 if (status & PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK)
4319 "Timeout while waiting for device to reset 0x%x\n",
4323 gaudi->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q | HW_CAP_HBM |
4324 HW_CAP_PCI_DMA | HW_CAP_MME | HW_CAP_TPC_MASK |
4325 HW_CAP_HBM_DMA | HW_CAP_PLL | HW_CAP_NIC_MASK |
4326 HW_CAP_MMU | HW_CAP_SRAM_SCRAMBLER |
4327 HW_CAP_HBM_SCRAMBLER);
4329 memset(gaudi->events_stat, 0, sizeof(gaudi->events_stat));
4331 hdev->device_cpu_is_halted = false;
4335 static int gaudi_suspend(struct hl_device *hdev)
4339 rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS);
4341 dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
4346 static int gaudi_resume(struct hl_device *hdev)
4348 return gaudi_init_iatu(hdev);
4351 static int gaudi_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
4352 void *cpu_addr, dma_addr_t dma_addr, size_t size)
4356 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
4357 VM_DONTCOPY | VM_NORESERVE;
4359 rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
4360 (dma_addr - HOST_PHYS_BASE), size);
4362 dev_err(hdev->dev, "dma_mmap_coherent error %d", rc);
4367 static void gaudi_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
4369 struct cpu_dyn_regs *dyn_regs =
4370 &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
4371 u32 db_reg_offset, db_value, dma_qm_offset, q_off, irq_handler_offset;
4372 struct gaudi_device *gaudi = hdev->asic_specific;
4373 bool invalid_queue = false;
4376 switch (hw_queue_id) {
4377 case GAUDI_QUEUE_ID_DMA_0_0...GAUDI_QUEUE_ID_DMA_0_3:
4378 dma_id = gaudi_dma_assignment[GAUDI_PCI_DMA_1];
4379 dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
4380 q_off = dma_qm_offset + (hw_queue_id & 0x3) * 4;
4381 db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
4384 case GAUDI_QUEUE_ID_DMA_1_0...GAUDI_QUEUE_ID_DMA_1_3:
4385 dma_id = gaudi_dma_assignment[GAUDI_PCI_DMA_2];
4386 dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
4387 q_off = dma_qm_offset + (hw_queue_id & 0x3) * 4;
4388 db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
4391 case GAUDI_QUEUE_ID_DMA_2_0...GAUDI_QUEUE_ID_DMA_2_3:
4392 dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_1];
4393 dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
4394 q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4;
4395 db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
4398 case GAUDI_QUEUE_ID_DMA_3_0...GAUDI_QUEUE_ID_DMA_3_3:
4399 dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_2];
4400 dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
4401 q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4;
4402 db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
4405 case GAUDI_QUEUE_ID_DMA_4_0...GAUDI_QUEUE_ID_DMA_4_3:
4406 dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_3];
4407 dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
4408 q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4;
4409 db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
4412 case GAUDI_QUEUE_ID_DMA_5_0...GAUDI_QUEUE_ID_DMA_5_3:
4413 dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_4];
4414 dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
4415 q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4;
4416 db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
4419 case GAUDI_QUEUE_ID_DMA_6_0...GAUDI_QUEUE_ID_DMA_6_3:
4420 dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_5];
4421 dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
4422 q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4;
4423 db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
4426 case GAUDI_QUEUE_ID_DMA_7_0...GAUDI_QUEUE_ID_DMA_7_3:
4427 dma_id = gaudi_dma_assignment[GAUDI_HBM_DMA_6];
4428 dma_qm_offset = dma_id * DMA_QMAN_OFFSET;
4429 q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4;
4430 db_reg_offset = mmDMA0_QM_PQ_PI_0 + q_off;
4433 case GAUDI_QUEUE_ID_CPU_PQ:
4434 if (gaudi->hw_cap_initialized & HW_CAP_CPU_Q)
4435 db_reg_offset = mmCPU_IF_PF_PQ_PI;
4437 invalid_queue = true;
4440 case GAUDI_QUEUE_ID_MME_0_0:
4441 db_reg_offset = mmMME2_QM_PQ_PI_0;
4444 case GAUDI_QUEUE_ID_MME_0_1:
4445 db_reg_offset = mmMME2_QM_PQ_PI_1;
4448 case GAUDI_QUEUE_ID_MME_0_2:
4449 db_reg_offset = mmMME2_QM_PQ_PI_2;
4452 case GAUDI_QUEUE_ID_MME_0_3:
4453 db_reg_offset = mmMME2_QM_PQ_PI_3;
4456 case GAUDI_QUEUE_ID_MME_1_0:
4457 db_reg_offset = mmMME0_QM_PQ_PI_0;
4460 case GAUDI_QUEUE_ID_MME_1_1:
4461 db_reg_offset = mmMME0_QM_PQ_PI_1;
4464 case GAUDI_QUEUE_ID_MME_1_2:
4465 db_reg_offset = mmMME0_QM_PQ_PI_2;
4468 case GAUDI_QUEUE_ID_MME_1_3:
4469 db_reg_offset = mmMME0_QM_PQ_PI_3;
4472 case GAUDI_QUEUE_ID_TPC_0_0:
4473 db_reg_offset = mmTPC0_QM_PQ_PI_0;
4476 case GAUDI_QUEUE_ID_TPC_0_1:
4477 db_reg_offset = mmTPC0_QM_PQ_PI_1;
4480 case GAUDI_QUEUE_ID_TPC_0_2:
4481 db_reg_offset = mmTPC0_QM_PQ_PI_2;
4484 case GAUDI_QUEUE_ID_TPC_0_3:
4485 db_reg_offset = mmTPC0_QM_PQ_PI_3;
4488 case GAUDI_QUEUE_ID_TPC_1_0:
4489 db_reg_offset = mmTPC1_QM_PQ_PI_0;
4492 case GAUDI_QUEUE_ID_TPC_1_1:
4493 db_reg_offset = mmTPC1_QM_PQ_PI_1;
4496 case GAUDI_QUEUE_ID_TPC_1_2:
4497 db_reg_offset = mmTPC1_QM_PQ_PI_2;
4500 case GAUDI_QUEUE_ID_TPC_1_3:
4501 db_reg_offset = mmTPC1_QM_PQ_PI_3;
4504 case GAUDI_QUEUE_ID_TPC_2_0:
4505 db_reg_offset = mmTPC2_QM_PQ_PI_0;
4508 case GAUDI_QUEUE_ID_TPC_2_1:
4509 db_reg_offset = mmTPC2_QM_PQ_PI_1;
4512 case GAUDI_QUEUE_ID_TPC_2_2:
4513 db_reg_offset = mmTPC2_QM_PQ_PI_2;
4516 case GAUDI_QUEUE_ID_TPC_2_3:
4517 db_reg_offset = mmTPC2_QM_PQ_PI_3;
4520 case GAUDI_QUEUE_ID_TPC_3_0:
4521 db_reg_offset = mmTPC3_QM_PQ_PI_0;
4524 case GAUDI_QUEUE_ID_TPC_3_1:
4525 db_reg_offset = mmTPC3_QM_PQ_PI_1;
4528 case GAUDI_QUEUE_ID_TPC_3_2:
4529 db_reg_offset = mmTPC3_QM_PQ_PI_2;
4532 case GAUDI_QUEUE_ID_TPC_3_3:
4533 db_reg_offset = mmTPC3_QM_PQ_PI_3;
4536 case GAUDI_QUEUE_ID_TPC_4_0:
4537 db_reg_offset = mmTPC4_QM_PQ_PI_0;
4540 case GAUDI_QUEUE_ID_TPC_4_1:
4541 db_reg_offset = mmTPC4_QM_PQ_PI_1;
4544 case GAUDI_QUEUE_ID_TPC_4_2:
4545 db_reg_offset = mmTPC4_QM_PQ_PI_2;
4548 case GAUDI_QUEUE_ID_TPC_4_3:
4549 db_reg_offset = mmTPC4_QM_PQ_PI_3;
4552 case GAUDI_QUEUE_ID_TPC_5_0:
4553 db_reg_offset = mmTPC5_QM_PQ_PI_0;
4556 case GAUDI_QUEUE_ID_TPC_5_1:
4557 db_reg_offset = mmTPC5_QM_PQ_PI_1;
4560 case GAUDI_QUEUE_ID_TPC_5_2:
4561 db_reg_offset = mmTPC5_QM_PQ_PI_2;
4564 case GAUDI_QUEUE_ID_TPC_5_3:
4565 db_reg_offset = mmTPC5_QM_PQ_PI_3;
4568 case GAUDI_QUEUE_ID_TPC_6_0:
4569 db_reg_offset = mmTPC6_QM_PQ_PI_0;
4572 case GAUDI_QUEUE_ID_TPC_6_1:
4573 db_reg_offset = mmTPC6_QM_PQ_PI_1;
4576 case GAUDI_QUEUE_ID_TPC_6_2:
4577 db_reg_offset = mmTPC6_QM_PQ_PI_2;
4580 case GAUDI_QUEUE_ID_TPC_6_3:
4581 db_reg_offset = mmTPC6_QM_PQ_PI_3;
4584 case GAUDI_QUEUE_ID_TPC_7_0:
4585 db_reg_offset = mmTPC7_QM_PQ_PI_0;
4588 case GAUDI_QUEUE_ID_TPC_7_1:
4589 db_reg_offset = mmTPC7_QM_PQ_PI_1;
4592 case GAUDI_QUEUE_ID_TPC_7_2:
4593 db_reg_offset = mmTPC7_QM_PQ_PI_2;
4596 case GAUDI_QUEUE_ID_TPC_7_3:
4597 db_reg_offset = mmTPC7_QM_PQ_PI_3;
4600 case GAUDI_QUEUE_ID_NIC_0_0...GAUDI_QUEUE_ID_NIC_0_3:
4601 if (!(gaudi->hw_cap_initialized & HW_CAP_NIC0))
4602 invalid_queue = true;
4604 q_off = ((hw_queue_id - 1) & 0x3) * 4;
4605 db_reg_offset = mmNIC0_QM0_PQ_PI_0 + q_off;
4608 case GAUDI_QUEUE_ID_NIC_1_0...GAUDI_QUEUE_ID_NIC_1_3:
4609 if (!(gaudi->hw_cap_initialized & HW_CAP_NIC1))
4610 invalid_queue = true;
4612 q_off = ((hw_queue_id - 1) & 0x3) * 4;
4613 db_reg_offset = mmNIC0_QM1_PQ_PI_0 + q_off;
4616 case GAUDI_QUEUE_ID_NIC_2_0...GAUDI_QUEUE_ID_NIC_2_3:
4617 if (!(gaudi->hw_cap_initialized & HW_CAP_NIC2))
4618 invalid_queue = true;
4620 q_off = ((hw_queue_id - 1) & 0x3) * 4;
4621 db_reg_offset = mmNIC1_QM0_PQ_PI_0 + q_off;
4624 case GAUDI_QUEUE_ID_NIC_3_0...GAUDI_QUEUE_ID_NIC_3_3:
4625 if (!(gaudi->hw_cap_initialized & HW_CAP_NIC3))
4626 invalid_queue = true;
4628 q_off = ((hw_queue_id - 1) & 0x3) * 4;
4629 db_reg_offset = mmNIC1_QM1_PQ_PI_0 + q_off;
4632 case GAUDI_QUEUE_ID_NIC_4_0...GAUDI_QUEUE_ID_NIC_4_3:
4633 if (!(gaudi->hw_cap_initialized & HW_CAP_NIC4))
4634 invalid_queue = true;
4636 q_off = ((hw_queue_id - 1) & 0x3) * 4;
4637 db_reg_offset = mmNIC2_QM0_PQ_PI_0 + q_off;
4640 case GAUDI_QUEUE_ID_NIC_5_0...GAUDI_QUEUE_ID_NIC_5_3:
4641 if (!(gaudi->hw_cap_initialized & HW_CAP_NIC5))
4642 invalid_queue = true;
4644 q_off = ((hw_queue_id - 1) & 0x3) * 4;
4645 db_reg_offset = mmNIC2_QM1_PQ_PI_0 + q_off;
4648 case GAUDI_QUEUE_ID_NIC_6_0...GAUDI_QUEUE_ID_NIC_6_3:
4649 if (!(gaudi->hw_cap_initialized & HW_CAP_NIC6))
4650 invalid_queue = true;
4652 q_off = ((hw_queue_id - 1) & 0x3) * 4;
4653 db_reg_offset = mmNIC3_QM0_PQ_PI_0 + q_off;
4656 case GAUDI_QUEUE_ID_NIC_7_0...GAUDI_QUEUE_ID_NIC_7_3:
4657 if (!(gaudi->hw_cap_initialized & HW_CAP_NIC7))
4658 invalid_queue = true;
4660 q_off = ((hw_queue_id - 1) & 0x3) * 4;
4661 db_reg_offset = mmNIC3_QM1_PQ_PI_0 + q_off;
4664 case GAUDI_QUEUE_ID_NIC_8_0...GAUDI_QUEUE_ID_NIC_8_3:
4665 if (!(gaudi->hw_cap_initialized & HW_CAP_NIC8))
4666 invalid_queue = true;
4668 q_off = ((hw_queue_id - 1) & 0x3) * 4;
4669 db_reg_offset = mmNIC4_QM0_PQ_PI_0 + q_off;
4672 case GAUDI_QUEUE_ID_NIC_9_0...GAUDI_QUEUE_ID_NIC_9_3:
4673 if (!(gaudi->hw_cap_initialized & HW_CAP_NIC9))
4674 invalid_queue = true;
4676 q_off = ((hw_queue_id - 1) & 0x3) * 4;
4677 db_reg_offset = mmNIC4_QM1_PQ_PI_0 + q_off;
4681 invalid_queue = true;
4684 if (invalid_queue) {
4685 /* Should never get here */
4686 dev_err(hdev->dev, "h/w queue %d is invalid. Can't set pi\n",
4693 /* ring the doorbell */
4694 WREG32(db_reg_offset, db_value);
4696 if (hw_queue_id == GAUDI_QUEUE_ID_CPU_PQ) {
4697 /* make sure device CPU will read latest data from host */
4700 irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ?
4701 mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR :
4702 le32_to_cpu(dyn_regs->gic_host_pi_upd_irq);
4704 WREG32(irq_handler_offset,
4705 gaudi_irq_map_table[GAUDI_EVENT_PI_UPDATE].cpu_id);
4709 static void gaudi_pqe_write(struct hl_device *hdev, __le64 *pqe,
4712 __le64 *pbd = (__le64 *) bd;
4714 /* The QMANs are on the host memory so a simple copy suffice */
4719 static void *gaudi_dma_alloc_coherent(struct hl_device *hdev, size_t size,
4720 dma_addr_t *dma_handle, gfp_t flags)
4722 void *kernel_addr = dma_alloc_coherent(&hdev->pdev->dev, size,
4725 /* Shift to the device's base physical address of host memory */
4727 *dma_handle += HOST_PHYS_BASE;
4732 static void gaudi_dma_free_coherent(struct hl_device *hdev, size_t size,
4733 void *cpu_addr, dma_addr_t dma_handle)
4735 /* Cancel the device's base physical address of host memory */
4736 dma_addr_t fixed_dma_handle = dma_handle - HOST_PHYS_BASE;
4738 dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, fixed_dma_handle);
4741 static int gaudi_hbm_scrubbing(struct hl_device *hdev)
4743 struct asic_fixed_properties *prop = &hdev->asic_prop;
4744 u64 cur_addr = DRAM_BASE_ADDR_USER;
4749 while (cur_addr < prop->dram_end_address) {
4750 for (dma_id = 0 ; dma_id < DMA_NUMBER_OF_CHANNELS ; dma_id++) {
4751 u32 dma_offset = dma_id * DMA_CORE_OFFSET;
4754 min((u64)SZ_2G, prop->dram_end_address - cur_addr);
4757 "Doing HBM scrubbing for 0x%09llx - 0x%09llx\n",
4758 cur_addr, cur_addr + chunk_size);
4760 WREG32(mmDMA0_CORE_SRC_BASE_LO + dma_offset, 0xdeadbeaf);
4761 WREG32(mmDMA0_CORE_SRC_BASE_HI + dma_offset, 0xdeadbeaf);
4762 WREG32(mmDMA0_CORE_DST_BASE_LO + dma_offset,
4763 lower_32_bits(cur_addr));
4764 WREG32(mmDMA0_CORE_DST_BASE_HI + dma_offset,
4765 upper_32_bits(cur_addr));
4766 WREG32(mmDMA0_CORE_DST_TSIZE_0 + dma_offset,
4768 WREG32(mmDMA0_CORE_COMMIT + dma_offset,
4769 ((1 << DMA0_CORE_COMMIT_LIN_SHIFT) |
4770 (1 << DMA0_CORE_COMMIT_MEM_SET_SHIFT)));
4772 cur_addr += chunk_size;
4774 if (cur_addr == prop->dram_end_address)
4778 for (dma_id = 0 ; dma_id < DMA_NUMBER_OF_CHANNELS ; dma_id++) {
4779 u32 dma_offset = dma_id * DMA_CORE_OFFSET;
4781 rc = hl_poll_timeout(
4783 mmDMA0_CORE_STS0 + dma_offset,
4785 ((val & DMA0_CORE_STS0_BUSY_MASK) == 0),
4787 HBM_SCRUBBING_TIMEOUT_US);
4791 "DMA Timeout during HBM scrubbing of DMA #%d\n",
4801 static int gaudi_scrub_device_mem(struct hl_device *hdev, u64 addr, u64 size)
4803 struct asic_fixed_properties *prop = &hdev->asic_prop;
4807 if (!hdev->memory_scrub)
4810 if (!addr && !size) {
4811 /* Wait till device is idle */
4812 rc = hl_poll_timeout(
4814 mmDMA0_CORE_STS0/* dummy */,
4816 (hdev->asic_funcs->is_device_idle(hdev, NULL,
4819 HBM_SCRUBBING_TIMEOUT_US);
4821 dev_err(hdev->dev, "waiting for idle timeout\n");
4826 addr = prop->sram_user_base_address;
4827 size = hdev->pldm ? 0x10000 :
4828 (prop->sram_size - SRAM_USER_BASE_OFFSET);
4829 val = 0x7777777777777777ull;
4831 rc = gaudi_memset_device_memory(hdev, addr, size, val);
4834 "Failed to clear SRAM in mem scrub all\n");
4838 /* Scrub HBM using all DMA channels in parallel */
4839 rc = gaudi_hbm_scrubbing(hdev);
4842 "Failed to clear HBM in mem scrub all\n");
4848 static void *gaudi_get_int_queue_base(struct hl_device *hdev,
4849 u32 queue_id, dma_addr_t *dma_handle,
4852 struct gaudi_device *gaudi = hdev->asic_specific;
4853 struct gaudi_internal_qman_info *q;
4855 if (queue_id >= GAUDI_QUEUE_ID_SIZE ||
4856 gaudi_queue_type[queue_id] != QUEUE_TYPE_INT) {
4857 dev_err(hdev->dev, "Got invalid queue id %d\n", queue_id);
4861 q = &gaudi->internal_qmans[queue_id];
4862 *dma_handle = q->pq_dma_addr;
4863 *queue_len = q->pq_size / QMAN_PQ_ENTRY_SIZE;
4865 return q->pq_kernel_addr;
4868 static int gaudi_send_cpu_message(struct hl_device *hdev, u32 *msg,
4869 u16 len, u32 timeout, u64 *result)
4871 struct gaudi_device *gaudi = hdev->asic_specific;
4873 if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q)) {
4880 timeout = GAUDI_MSG_TO_CPU_TIMEOUT_USEC;
4882 return hl_fw_send_cpu_message(hdev, GAUDI_QUEUE_ID_CPU_PQ, msg, len,
4886 static int gaudi_test_queue(struct hl_device *hdev, u32 hw_queue_id)
4888 struct packet_msg_prot *fence_pkt;
4889 dma_addr_t pkt_dma_addr;
4890 u32 fence_val, tmp, timeout_usec;
4891 dma_addr_t fence_dma_addr;
4896 timeout_usec = GAUDI_PLDM_TEST_QUEUE_WAIT_USEC;
4898 timeout_usec = GAUDI_TEST_QUEUE_WAIT_USEC;
4900 fence_val = GAUDI_QMAN0_FENCE_VAL;
4902 fence_ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL,
4906 "Failed to allocate memory for H/W queue %d testing\n",
4913 fence_pkt = hdev->asic_funcs->asic_dma_pool_zalloc(hdev,
4914 sizeof(struct packet_msg_prot),
4915 GFP_KERNEL, &pkt_dma_addr);
4918 "Failed to allocate packet for H/W queue %d testing\n",
4921 goto free_fence_ptr;
4924 tmp = FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_PROT);
4925 tmp |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 1);
4926 tmp |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
4928 fence_pkt->ctl = cpu_to_le32(tmp);
4929 fence_pkt->value = cpu_to_le32(fence_val);
4930 fence_pkt->addr = cpu_to_le64(fence_dma_addr);
4932 rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id,
4933 sizeof(struct packet_msg_prot),
4937 "Failed to send fence packet to H/W queue %d\n",
4942 rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp, (tmp == fence_val),
4943 1000, timeout_usec, true);
4945 hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
4947 if (rc == -ETIMEDOUT) {
4949 "H/W queue %d test failed (scratch(0x%08llX) == 0x%08X)\n",
4950 hw_queue_id, (unsigned long long) fence_dma_addr, tmp);
4955 hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_pkt,
4958 hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_ptr,
4963 static int gaudi_test_cpu_queue(struct hl_device *hdev)
4965 struct gaudi_device *gaudi = hdev->asic_specific;
4968 * check capability here as send_cpu_message() won't update the result
4969 * value if no capability
4971 if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q))
4974 return hl_fw_test_cpu_queue(hdev);
4977 static int gaudi_test_queues(struct hl_device *hdev)
4979 int i, rc, ret_val = 0;
4981 for (i = 0 ; i < hdev->asic_prop.max_queues ; i++) {
4982 if (hdev->asic_prop.hw_queues_props[i].type == QUEUE_TYPE_EXT) {
4983 rc = gaudi_test_queue(hdev, i);
4989 rc = gaudi_test_cpu_queue(hdev);
4996 static void *gaudi_dma_pool_zalloc(struct hl_device *hdev, size_t size,
4997 gfp_t mem_flags, dma_addr_t *dma_handle)
5001 if (size > GAUDI_DMA_POOL_BLK_SIZE)
5004 kernel_addr = dma_pool_zalloc(hdev->dma_pool, mem_flags, dma_handle);
5006 /* Shift to the device's base physical address of host memory */
5008 *dma_handle += HOST_PHYS_BASE;
5013 static void gaudi_dma_pool_free(struct hl_device *hdev, void *vaddr,
5014 dma_addr_t dma_addr)
5016 /* Cancel the device's base physical address of host memory */
5017 dma_addr_t fixed_dma_addr = dma_addr - HOST_PHYS_BASE;
5019 dma_pool_free(hdev->dma_pool, vaddr, fixed_dma_addr);
5022 static void *gaudi_cpu_accessible_dma_pool_alloc(struct hl_device *hdev,
5023 size_t size, dma_addr_t *dma_handle)
5025 return hl_fw_cpu_accessible_dma_pool_alloc(hdev, size, dma_handle);
5028 static void gaudi_cpu_accessible_dma_pool_free(struct hl_device *hdev,
5029 size_t size, void *vaddr)
5031 hl_fw_cpu_accessible_dma_pool_free(hdev, size, vaddr);
5034 static int gaudi_dma_map_sg(struct hl_device *hdev, struct scatterlist *sgl,
5035 int nents, enum dma_data_direction dir)
5037 struct scatterlist *sg;
5040 if (!dma_map_sg(&hdev->pdev->dev, sgl, nents, dir))
5043 /* Shift to the device's base physical address of host memory */
5044 for_each_sg(sgl, sg, nents, i)
5045 sg->dma_address += HOST_PHYS_BASE;
5050 static void gaudi_dma_unmap_sg(struct hl_device *hdev, struct scatterlist *sgl,
5051 int nents, enum dma_data_direction dir)
5053 struct scatterlist *sg;
5056 /* Cancel the device's base physical address of host memory */
5057 for_each_sg(sgl, sg, nents, i)
5058 sg->dma_address -= HOST_PHYS_BASE;
5060 dma_unmap_sg(&hdev->pdev->dev, sgl, nents, dir);
5063 static u32 gaudi_get_dma_desc_list_size(struct hl_device *hdev,
5064 struct sg_table *sgt)
5066 struct scatterlist *sg, *sg_next_iter;
5067 u32 count, dma_desc_cnt;
5069 dma_addr_t addr, addr_next;
5073 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
5075 len = sg_dma_len(sg);
5076 addr = sg_dma_address(sg);
5081 while ((count + 1) < sgt->nents) {
5082 sg_next_iter = sg_next(sg);
5083 len_next = sg_dma_len(sg_next_iter);
5084 addr_next = sg_dma_address(sg_next_iter);
5089 if ((addr + len == addr_next) &&
5090 (len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
5102 return dma_desc_cnt * sizeof(struct packet_lin_dma);
5105 static int gaudi_pin_memory_before_cs(struct hl_device *hdev,
5106 struct hl_cs_parser *parser,
5107 struct packet_lin_dma *user_dma_pkt,
5108 u64 addr, enum dma_data_direction dir)
5110 struct hl_userptr *userptr;
5113 if (hl_userptr_is_pinned(hdev, addr, le32_to_cpu(user_dma_pkt->tsize),
5114 parser->job_userptr_list, &userptr))
5115 goto already_pinned;
5117 userptr = kzalloc(sizeof(*userptr), GFP_KERNEL);
5121 rc = hl_pin_host_memory(hdev, addr, le32_to_cpu(user_dma_pkt->tsize),
5126 list_add_tail(&userptr->job_node, parser->job_userptr_list);
5128 rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
5129 userptr->sgt->nents, dir);
5131 dev_err(hdev->dev, "failed to map sgt with DMA region\n");
5135 userptr->dma_mapped = true;
5139 parser->patched_cb_size +=
5140 gaudi_get_dma_desc_list_size(hdev, userptr->sgt);
5145 list_del(&userptr->job_node);
5146 hl_unpin_host_memory(hdev, userptr);
5152 static int gaudi_validate_dma_pkt_host(struct hl_device *hdev,
5153 struct hl_cs_parser *parser,
5154 struct packet_lin_dma *user_dma_pkt,
5157 enum dma_data_direction dir;
5158 bool skip_host_mem_pin = false, user_memset;
5162 user_memset = (le32_to_cpu(user_dma_pkt->ctl) &
5163 GAUDI_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
5164 GAUDI_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
5168 skip_host_mem_pin = true;
5170 dev_dbg(hdev->dev, "DMA direction is HOST --> DEVICE\n");
5171 dir = DMA_TO_DEVICE;
5172 addr = le64_to_cpu(user_dma_pkt->src_addr);
5174 dev_dbg(hdev->dev, "DMA direction is DEVICE --> HOST\n");
5175 dir = DMA_FROM_DEVICE;
5176 addr = (le64_to_cpu(user_dma_pkt->dst_addr) &
5177 GAUDI_PKT_LIN_DMA_DST_ADDR_MASK) >>
5178 GAUDI_PKT_LIN_DMA_DST_ADDR_SHIFT;
5181 if (skip_host_mem_pin)
5182 parser->patched_cb_size += sizeof(*user_dma_pkt);
5184 rc = gaudi_pin_memory_before_cs(hdev, parser, user_dma_pkt,
5190 static int gaudi_validate_dma_pkt_no_mmu(struct hl_device *hdev,
5191 struct hl_cs_parser *parser,
5192 struct packet_lin_dma *user_dma_pkt)
5194 bool src_in_host = false;
5195 u64 dst_addr = (le64_to_cpu(user_dma_pkt->dst_addr) &
5196 GAUDI_PKT_LIN_DMA_DST_ADDR_MASK) >>
5197 GAUDI_PKT_LIN_DMA_DST_ADDR_SHIFT;
5199 dev_dbg(hdev->dev, "DMA packet details:\n");
5200 dev_dbg(hdev->dev, "source == 0x%llx\n",
5201 le64_to_cpu(user_dma_pkt->src_addr));
5202 dev_dbg(hdev->dev, "destination == 0x%llx\n", dst_addr);
5203 dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize));
5206 * Special handling for DMA with size 0. Bypass all validations
5207 * because no transactions will be done except for WR_COMP, which
5208 * is not a security issue
5210 if (!le32_to_cpu(user_dma_pkt->tsize)) {
5211 parser->patched_cb_size += sizeof(*user_dma_pkt);
5215 if (parser->hw_queue_id <= GAUDI_QUEUE_ID_DMA_0_3)
5218 return gaudi_validate_dma_pkt_host(hdev, parser, user_dma_pkt,
5222 static int gaudi_validate_load_and_exe_pkt(struct hl_device *hdev,
5223 struct hl_cs_parser *parser,
5224 struct packet_load_and_exe *user_pkt)
5228 cfg = le32_to_cpu(user_pkt->cfg);
5230 if (cfg & GAUDI_PKT_LOAD_AND_EXE_CFG_DST_MASK) {
5232 "User not allowed to use Load and Execute\n");
5236 parser->patched_cb_size += sizeof(struct packet_load_and_exe);
5241 static int gaudi_validate_cb(struct hl_device *hdev,
5242 struct hl_cs_parser *parser, bool is_mmu)
5244 u32 cb_parsed_length = 0;
5247 parser->patched_cb_size = 0;
5249 /* cb_user_size is more than 0 so loop will always be executed */
5250 while (cb_parsed_length < parser->user_cb_size) {
5251 enum packet_id pkt_id;
5253 struct gaudi_packet *user_pkt;
5255 user_pkt = parser->user_cb->kernel_address + cb_parsed_length;
5257 pkt_id = (enum packet_id) (
5258 (le64_to_cpu(user_pkt->header) &
5259 PACKET_HEADER_PACKET_ID_MASK) >>
5260 PACKET_HEADER_PACKET_ID_SHIFT);
5262 if (!validate_packet_id(pkt_id)) {
5263 dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id);
5268 pkt_size = gaudi_packet_sizes[pkt_id];
5269 cb_parsed_length += pkt_size;
5270 if (cb_parsed_length > parser->user_cb_size) {
5272 "packet 0x%x is out of CB boundary\n", pkt_id);
5278 case PACKET_MSG_PROT:
5280 "User not allowed to use MSG_PROT\n");
5285 dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
5290 dev_err(hdev->dev, "User not allowed to use STOP\n");
5294 case PACKET_WREG_BULK:
5296 "User not allowed to use WREG_BULK\n");
5300 case PACKET_LOAD_AND_EXE:
5301 rc = gaudi_validate_load_and_exe_pkt(hdev, parser,
5302 (struct packet_load_and_exe *) user_pkt);
5305 case PACKET_LIN_DMA:
5306 parser->contains_dma_pkt = true;
5308 parser->patched_cb_size += pkt_size;
5310 rc = gaudi_validate_dma_pkt_no_mmu(hdev, parser,
5311 (struct packet_lin_dma *) user_pkt);
5314 case PACKET_WREG_32:
5315 case PACKET_MSG_LONG:
5316 case PACKET_MSG_SHORT:
5320 case PACKET_ARB_POINT:
5321 parser->patched_cb_size += pkt_size;
5325 dev_err(hdev->dev, "Invalid packet header 0x%x\n",
5336 * The new CB should have space at the end for two MSG_PROT packets:
5337 * 1. A packet that will act as a completion packet
5338 * 2. A packet that will generate MSI-X interrupt
5340 if (parser->completion)
5341 parser->patched_cb_size += sizeof(struct packet_msg_prot) * 2;
5346 static int gaudi_patch_dma_packet(struct hl_device *hdev,
5347 struct hl_cs_parser *parser,
5348 struct packet_lin_dma *user_dma_pkt,
5349 struct packet_lin_dma *new_dma_pkt,
5350 u32 *new_dma_pkt_size)
5352 struct hl_userptr *userptr;
5353 struct scatterlist *sg, *sg_next_iter;
5354 u32 count, dma_desc_cnt, user_wrcomp_en_mask, ctl;
5356 dma_addr_t dma_addr, dma_addr_next;
5357 u64 device_memory_addr, addr;
5358 enum dma_data_direction dir;
5359 struct sg_table *sgt;
5360 bool src_in_host = false;
5361 bool skip_host_mem_pin = false;
5364 ctl = le32_to_cpu(user_dma_pkt->ctl);
5366 if (parser->hw_queue_id <= GAUDI_QUEUE_ID_DMA_0_3)
5369 user_memset = (ctl & GAUDI_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
5370 GAUDI_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
5373 addr = le64_to_cpu(user_dma_pkt->src_addr);
5374 device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
5375 dir = DMA_TO_DEVICE;
5377 skip_host_mem_pin = true;
5379 addr = le64_to_cpu(user_dma_pkt->dst_addr);
5380 device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
5381 dir = DMA_FROM_DEVICE;
5384 if ((!skip_host_mem_pin) &&
5385 (!hl_userptr_is_pinned(hdev, addr,
5386 le32_to_cpu(user_dma_pkt->tsize),
5387 parser->job_userptr_list, &userptr))) {
5388 dev_err(hdev->dev, "Userptr 0x%llx + 0x%x NOT mapped\n",
5389 addr, user_dma_pkt->tsize);
5393 if ((user_memset) && (dir == DMA_TO_DEVICE)) {
5394 memcpy(new_dma_pkt, user_dma_pkt, sizeof(*user_dma_pkt));
5395 *new_dma_pkt_size = sizeof(*user_dma_pkt);
5399 user_wrcomp_en_mask = ctl & GAUDI_PKT_LIN_DMA_CTL_WRCOMP_EN_MASK;
5404 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
5405 len = sg_dma_len(sg);
5406 dma_addr = sg_dma_address(sg);
5411 while ((count + 1) < sgt->nents) {
5412 sg_next_iter = sg_next(sg);
5413 len_next = sg_dma_len(sg_next_iter);
5414 dma_addr_next = sg_dma_address(sg_next_iter);
5419 if ((dma_addr + len == dma_addr_next) &&
5420 (len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
5429 ctl = le32_to_cpu(user_dma_pkt->ctl);
5430 if (likely(dma_desc_cnt))
5431 ctl &= ~GAUDI_PKT_CTL_EB_MASK;
5432 ctl &= ~GAUDI_PKT_LIN_DMA_CTL_WRCOMP_EN_MASK;
5433 new_dma_pkt->ctl = cpu_to_le32(ctl);
5434 new_dma_pkt->tsize = cpu_to_le32(len);
5436 if (dir == DMA_TO_DEVICE) {
5437 new_dma_pkt->src_addr = cpu_to_le64(dma_addr);
5438 new_dma_pkt->dst_addr = cpu_to_le64(device_memory_addr);
5440 new_dma_pkt->src_addr = cpu_to_le64(device_memory_addr);
5441 new_dma_pkt->dst_addr = cpu_to_le64(dma_addr);
5445 device_memory_addr += len;
5450 if (!dma_desc_cnt) {
5452 "Error of 0 SG entries when patching DMA packet\n");
5456 /* Fix the last dma packet - wrcomp must be as user set it */
5458 new_dma_pkt->ctl |= cpu_to_le32(user_wrcomp_en_mask);
5460 *new_dma_pkt_size = dma_desc_cnt * sizeof(struct packet_lin_dma);
5465 static int gaudi_patch_cb(struct hl_device *hdev,
5466 struct hl_cs_parser *parser)
5468 u32 cb_parsed_length = 0;
5469 u32 cb_patched_cur_length = 0;
5472 /* cb_user_size is more than 0 so loop will always be executed */
5473 while (cb_parsed_length < parser->user_cb_size) {
5474 enum packet_id pkt_id;
5476 u32 new_pkt_size = 0;
5477 struct gaudi_packet *user_pkt, *kernel_pkt;
5479 user_pkt = parser->user_cb->kernel_address + cb_parsed_length;
5480 kernel_pkt = parser->patched_cb->kernel_address +
5481 cb_patched_cur_length;
5483 pkt_id = (enum packet_id) (
5484 (le64_to_cpu(user_pkt->header) &
5485 PACKET_HEADER_PACKET_ID_MASK) >>
5486 PACKET_HEADER_PACKET_ID_SHIFT);
5488 if (!validate_packet_id(pkt_id)) {
5489 dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id);
5494 pkt_size = gaudi_packet_sizes[pkt_id];
5495 cb_parsed_length += pkt_size;
5496 if (cb_parsed_length > parser->user_cb_size) {
5498 "packet 0x%x is out of CB boundary\n", pkt_id);
5504 case PACKET_LIN_DMA:
5505 rc = gaudi_patch_dma_packet(hdev, parser,
5506 (struct packet_lin_dma *) user_pkt,
5507 (struct packet_lin_dma *) kernel_pkt,
5509 cb_patched_cur_length += new_pkt_size;
5512 case PACKET_MSG_PROT:
5514 "User not allowed to use MSG_PROT\n");
5519 dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
5524 dev_err(hdev->dev, "User not allowed to use STOP\n");
5528 case PACKET_WREG_32:
5529 case PACKET_WREG_BULK:
5530 case PACKET_MSG_LONG:
5531 case PACKET_MSG_SHORT:
5535 case PACKET_ARB_POINT:
5536 case PACKET_LOAD_AND_EXE:
5537 memcpy(kernel_pkt, user_pkt, pkt_size);
5538 cb_patched_cur_length += pkt_size;
5542 dev_err(hdev->dev, "Invalid packet header 0x%x\n",
5555 static int gaudi_parse_cb_mmu(struct hl_device *hdev,
5556 struct hl_cs_parser *parser)
5558 u64 patched_cb_handle;
5559 u32 patched_cb_size;
5560 struct hl_cb *user_cb;
5564 * The new CB should have space at the end for two MSG_PROT pkt:
5565 * 1. A packet that will act as a completion packet
5566 * 2. A packet that will generate MSI interrupt
5568 if (parser->completion)
5569 parser->patched_cb_size = parser->user_cb_size +
5570 sizeof(struct packet_msg_prot) * 2;
5572 parser->patched_cb_size = parser->user_cb_size;
5574 rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx,
5575 parser->patched_cb_size, false, false,
5576 &patched_cb_handle);
5580 "Failed to allocate patched CB for DMA CS %d\n",
5585 patched_cb_handle >>= PAGE_SHIFT;
5586 parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
5587 (u32) patched_cb_handle);
5588 /* hl_cb_get should never fail */
5589 if (!parser->patched_cb) {
5590 dev_crit(hdev->dev, "DMA CB handle invalid 0x%x\n",
5591 (u32) patched_cb_handle);
5597 * The check that parser->user_cb_size <= parser->user_cb->size was done
5598 * in validate_queue_index().
5600 memcpy(parser->patched_cb->kernel_address,
5601 parser->user_cb->kernel_address,
5602 parser->user_cb_size);
5604 patched_cb_size = parser->patched_cb_size;
5606 /* Validate patched CB instead of user CB */
5607 user_cb = parser->user_cb;
5608 parser->user_cb = parser->patched_cb;
5609 rc = gaudi_validate_cb(hdev, parser, true);
5610 parser->user_cb = user_cb;
5613 hl_cb_put(parser->patched_cb);
5617 if (patched_cb_size != parser->patched_cb_size) {
5618 dev_err(hdev->dev, "user CB size mismatch\n");
5619 hl_cb_put(parser->patched_cb);
5626 * Always call cb destroy here because we still have 1 reference
5627 * to it by calling cb_get earlier. After the job will be completed,
5628 * cb_put will release it, but here we want to remove it from the
5631 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
5632 patched_cb_handle << PAGE_SHIFT);
5637 static int gaudi_parse_cb_no_mmu(struct hl_device *hdev,
5638 struct hl_cs_parser *parser)
5640 u64 patched_cb_handle;
5643 rc = gaudi_validate_cb(hdev, parser, false);
5648 rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx,
5649 parser->patched_cb_size, false, false,
5650 &patched_cb_handle);
5653 "Failed to allocate patched CB for DMA CS %d\n", rc);
5657 patched_cb_handle >>= PAGE_SHIFT;
5658 parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
5659 (u32) patched_cb_handle);
5660 /* hl_cb_get should never fail here */
5661 if (!parser->patched_cb) {
5662 dev_crit(hdev->dev, "DMA CB handle invalid 0x%x\n",
5663 (u32) patched_cb_handle);
5668 rc = gaudi_patch_cb(hdev, parser);
5671 hl_cb_put(parser->patched_cb);
5675 * Always call cb destroy here because we still have 1 reference
5676 * to it by calling cb_get earlier. After the job will be completed,
5677 * cb_put will release it, but here we want to remove it from the
5680 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
5681 patched_cb_handle << PAGE_SHIFT);
5685 hl_userptr_delete_list(hdev, parser->job_userptr_list);
5689 static int gaudi_parse_cb_no_ext_queue(struct hl_device *hdev,
5690 struct hl_cs_parser *parser)
5692 struct asic_fixed_properties *asic_prop = &hdev->asic_prop;
5693 struct gaudi_device *gaudi = hdev->asic_specific;
5694 u32 nic_mask_q_id = 1 << (HW_CAP_NIC_SHIFT +
5695 ((parser->hw_queue_id - GAUDI_QUEUE_ID_NIC_0_0) >> 2));
5697 if ((parser->hw_queue_id >= GAUDI_QUEUE_ID_NIC_0_0) &&
5698 (parser->hw_queue_id <= GAUDI_QUEUE_ID_NIC_9_3) &&
5699 (!(gaudi->hw_cap_initialized & nic_mask_q_id))) {
5700 dev_err(hdev->dev, "h/w queue %d is disabled\n",
5701 parser->hw_queue_id);
5705 /* For internal queue jobs just check if CB address is valid */
5706 if (hl_mem_area_inside_range((u64) (uintptr_t) parser->user_cb,
5707 parser->user_cb_size,
5708 asic_prop->sram_user_base_address,
5709 asic_prop->sram_end_address))
5712 if (hl_mem_area_inside_range((u64) (uintptr_t) parser->user_cb,
5713 parser->user_cb_size,
5714 asic_prop->dram_user_base_address,
5715 asic_prop->dram_end_address))
5718 /* PMMU and HPMMU addresses are equal, check only one of them */
5719 if (hl_mem_area_inside_range((u64) (uintptr_t) parser->user_cb,
5720 parser->user_cb_size,
5721 asic_prop->pmmu.start_addr,
5722 asic_prop->pmmu.end_addr))
5726 "CB address 0x%px + 0x%x for internal QMAN is not valid\n",
5727 parser->user_cb, parser->user_cb_size);
5732 static int gaudi_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser)
5734 struct gaudi_device *gaudi = hdev->asic_specific;
5736 if (parser->queue_type == QUEUE_TYPE_INT)
5737 return gaudi_parse_cb_no_ext_queue(hdev, parser);
5739 if (gaudi->hw_cap_initialized & HW_CAP_MMU)
5740 return gaudi_parse_cb_mmu(hdev, parser);
5742 return gaudi_parse_cb_no_mmu(hdev, parser);
5745 static void gaudi_add_end_of_cb_packets(struct hl_device *hdev,
5746 void *kernel_address, u32 len,
5747 u64 cq_addr, u32 cq_val, u32 msi_vec,
5750 struct gaudi_device *gaudi = hdev->asic_specific;
5751 struct packet_msg_prot *cq_pkt;
5755 cq_pkt = kernel_address + len - (sizeof(struct packet_msg_prot) * 2);
5757 tmp = FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_PROT);
5758 tmp |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
5761 tmp |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 1);
5763 cq_pkt->ctl = cpu_to_le32(tmp);
5764 cq_pkt->value = cpu_to_le32(cq_val);
5765 cq_pkt->addr = cpu_to_le64(cq_addr);
5769 tmp = FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_PROT);
5770 tmp |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
5771 cq_pkt->ctl = cpu_to_le32(tmp);
5772 cq_pkt->value = cpu_to_le32(1);
5774 if (gaudi->multi_msi_mode)
5775 msi_addr = mmPCIE_MSI_INTR_0 + msi_vec * 4;
5777 msi_addr = mmPCIE_CORE_MSI_REQ;
5779 cq_pkt->addr = cpu_to_le64(CFG_BASE + msi_addr);
5782 static void gaudi_update_eq_ci(struct hl_device *hdev, u32 val)
5784 WREG32(mmCPU_IF_EQ_RD_OFFS, val);
5787 static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
5790 struct packet_lin_dma *lin_dma_pkt;
5791 struct hl_cs_job *job;
5792 u32 cb_size, ctl, err_cause;
5797 cb = hl_cb_kernel_create(hdev, PAGE_SIZE, false);
5801 lin_dma_pkt = cb->kernel_address;
5802 memset(lin_dma_pkt, 0, sizeof(*lin_dma_pkt));
5803 cb_size = sizeof(*lin_dma_pkt);
5805 ctl = FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_LIN_DMA);
5806 ctl |= FIELD_PREP(GAUDI_PKT_LIN_DMA_CTL_MEMSET_MASK, 1);
5807 ctl |= FIELD_PREP(GAUDI_PKT_LIN_DMA_CTL_LIN_MASK, 1);
5808 ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
5809 ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1);
5811 lin_dma_pkt->ctl = cpu_to_le32(ctl);
5812 lin_dma_pkt->src_addr = cpu_to_le64(val);
5813 lin_dma_pkt->dst_addr |= cpu_to_le64(addr);
5814 lin_dma_pkt->tsize = cpu_to_le32(size);
5816 job = hl_cs_allocate_job(hdev, QUEUE_TYPE_EXT, true);
5818 dev_err(hdev->dev, "Failed to allocate a new job\n");
5823 /* Verify DMA is OK */
5824 err_cause = RREG32(mmDMA0_CORE_ERR_CAUSE);
5825 if (err_cause && !hdev->init_done) {
5827 "Clearing DMA0 engine from errors (cause 0x%x)\n",
5829 WREG32(mmDMA0_CORE_ERR_CAUSE, err_cause);
5834 atomic_inc(&job->user_cb->cs_cnt);
5835 job->user_cb_size = cb_size;
5836 job->hw_queue_id = GAUDI_QUEUE_ID_DMA_0_0;
5837 job->patched_cb = job->user_cb;
5838 job->job_cb_size = job->user_cb_size + sizeof(struct packet_msg_prot);
5840 hl_debugfs_add_job(hdev, job);
5842 rc = gaudi_send_job_on_qman0(hdev, job);
5843 hl_debugfs_remove_job(hdev, job);
5845 atomic_dec(&cb->cs_cnt);
5847 /* Verify DMA is OK */
5848 err_cause = RREG32(mmDMA0_CORE_ERR_CAUSE);
5850 dev_err(hdev->dev, "DMA Failed, cause 0x%x\n", err_cause);
5852 if (!hdev->init_done) {
5854 "Clearing DMA0 engine from errors (cause 0x%x)\n",
5856 WREG32(mmDMA0_CORE_ERR_CAUSE, err_cause);
5863 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, id << PAGE_SHIFT);
5868 static int gaudi_memset_registers(struct hl_device *hdev, u64 reg_base,
5869 u32 num_regs, u32 val)
5871 struct packet_msg_long *pkt;
5872 struct hl_cs_job *job;
5877 cb_size = (sizeof(*pkt) * num_regs) + sizeof(struct packet_msg_prot);
5879 if (cb_size > SZ_2M) {
5880 dev_err(hdev->dev, "CB size must be smaller than %uMB", SZ_2M);
5884 cb = hl_cb_kernel_create(hdev, cb_size, false);
5888 pkt = cb->kernel_address;
5890 ctl = FIELD_PREP(GAUDI_PKT_LONG_CTL_OP_MASK, 0); /* write the value */
5891 ctl |= FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_LONG);
5892 ctl |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 1);
5893 ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1);
5894 ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
5896 for (i = 0; i < num_regs ; i++, pkt++) {
5897 pkt->ctl = cpu_to_le32(ctl);
5898 pkt->value = cpu_to_le32(val);
5899 pkt->addr = cpu_to_le64(reg_base + (i * 4));
5902 job = hl_cs_allocate_job(hdev, QUEUE_TYPE_EXT, true);
5904 dev_err(hdev->dev, "Failed to allocate a new job\n");
5911 atomic_inc(&job->user_cb->cs_cnt);
5912 job->user_cb_size = cb_size;
5913 job->hw_queue_id = GAUDI_QUEUE_ID_DMA_0_0;
5914 job->patched_cb = job->user_cb;
5915 job->job_cb_size = cb_size;
5917 hl_debugfs_add_job(hdev, job);
5919 rc = gaudi_send_job_on_qman0(hdev, job);
5920 hl_debugfs_remove_job(hdev, job);
5922 atomic_dec(&cb->cs_cnt);
5926 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
5931 static int gaudi_restore_sm_registers(struct hl_device *hdev)
5937 base_addr = CFG_BASE + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0;
5938 num_regs = NUM_OF_SOB_IN_BLOCK;
5939 rc = gaudi_memset_registers(hdev, base_addr, num_regs, 0);
5941 dev_err(hdev->dev, "failed resetting SM registers");
5945 base_addr = CFG_BASE + mmSYNC_MNGR_E_S_SYNC_MNGR_OBJS_SOB_OBJ_0;
5946 num_regs = NUM_OF_SOB_IN_BLOCK;
5947 rc = gaudi_memset_registers(hdev, base_addr, num_regs, 0);
5949 dev_err(hdev->dev, "failed resetting SM registers");
5953 base_addr = CFG_BASE + mmSYNC_MNGR_W_N_SYNC_MNGR_OBJS_SOB_OBJ_0;
5954 num_regs = NUM_OF_SOB_IN_BLOCK;
5955 rc = gaudi_memset_registers(hdev, base_addr, num_regs, 0);
5957 dev_err(hdev->dev, "failed resetting SM registers");
5961 base_addr = CFG_BASE + mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_MON_STATUS_0;
5962 num_regs = NUM_OF_MONITORS_IN_BLOCK;
5963 rc = gaudi_memset_registers(hdev, base_addr, num_regs, 0);
5965 dev_err(hdev->dev, "failed resetting SM registers");
5969 base_addr = CFG_BASE + mmSYNC_MNGR_E_S_SYNC_MNGR_OBJS_MON_STATUS_0;
5970 num_regs = NUM_OF_MONITORS_IN_BLOCK;
5971 rc = gaudi_memset_registers(hdev, base_addr, num_regs, 0);
5973 dev_err(hdev->dev, "failed resetting SM registers");
5977 base_addr = CFG_BASE + mmSYNC_MNGR_W_N_SYNC_MNGR_OBJS_MON_STATUS_0;
5978 num_regs = NUM_OF_MONITORS_IN_BLOCK;
5979 rc = gaudi_memset_registers(hdev, base_addr, num_regs, 0);
5981 dev_err(hdev->dev, "failed resetting SM registers");
5985 base_addr = CFG_BASE + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 +
5986 (GAUDI_FIRST_AVAILABLE_W_S_SYNC_OBJECT * 4);
5987 num_regs = NUM_OF_SOB_IN_BLOCK - GAUDI_FIRST_AVAILABLE_W_S_SYNC_OBJECT;
5988 rc = gaudi_memset_registers(hdev, base_addr, num_regs, 0);
5990 dev_err(hdev->dev, "failed resetting SM registers");
5994 base_addr = CFG_BASE + mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_STATUS_0 +
5995 (GAUDI_FIRST_AVAILABLE_W_S_MONITOR * 4);
5996 num_regs = NUM_OF_MONITORS_IN_BLOCK - GAUDI_FIRST_AVAILABLE_W_S_MONITOR;
5997 rc = gaudi_memset_registers(hdev, base_addr, num_regs, 0);
5999 dev_err(hdev->dev, "failed resetting SM registers");
6006 static void gaudi_restore_dma_registers(struct hl_device *hdev)
6008 u32 sob_delta = mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_1 -
6009 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0;
6012 for (i = 0 ; i < DMA_NUMBER_OF_CHANNELS ; i++) {
6013 u64 sob_addr = CFG_BASE +
6014 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0 +
6016 u32 dma_offset = i * DMA_CORE_OFFSET;
6018 WREG32(mmDMA0_CORE_WR_COMP_ADDR_LO + dma_offset,
6019 lower_32_bits(sob_addr));
6020 WREG32(mmDMA0_CORE_WR_COMP_ADDR_HI + dma_offset,
6021 upper_32_bits(sob_addr));
6022 WREG32(mmDMA0_CORE_WR_COMP_WDATA + dma_offset, 0x80000001);
6024 /* For DMAs 2-7, need to restore WR_AWUSER_31_11 as it can be
6025 * modified by the user for SRAM reduction
6028 WREG32(mmDMA0_CORE_WR_AWUSER_31_11 + dma_offset,
6033 static void gaudi_restore_qm_registers(struct hl_device *hdev)
6038 for (i = 0 ; i < DMA_NUMBER_OF_CHANNELS ; i++) {
6039 qman_offset = i * DMA_QMAN_OFFSET;
6040 WREG32(mmDMA0_QM_ARB_CFG_0 + qman_offset, 0);
6043 for (i = 0 ; i < MME_NUMBER_OF_MASTER_ENGINES ; i++) {
6044 qman_offset = i * (mmMME2_QM_BASE - mmMME0_QM_BASE);
6045 WREG32(mmMME0_QM_ARB_CFG_0 + qman_offset, 0);
6048 for (i = 0 ; i < TPC_NUMBER_OF_ENGINES ; i++) {
6049 qman_offset = i * TPC_QMAN_OFFSET;
6050 WREG32(mmTPC0_QM_ARB_CFG_0 + qman_offset, 0);
6053 for (i = 0 ; i < NIC_NUMBER_OF_ENGINES ; i++) {
6054 qman_offset = (i >> 1) * NIC_MACRO_QMAN_OFFSET +
6055 (i & 0x1) * NIC_ENGINE_QMAN_OFFSET;
6056 WREG32(mmNIC0_QM0_ARB_CFG_0 + qman_offset, 0);
6060 static int gaudi_restore_user_registers(struct hl_device *hdev)
6064 rc = gaudi_restore_sm_registers(hdev);
6068 gaudi_restore_dma_registers(hdev);
6069 gaudi_restore_qm_registers(hdev);
6074 static int gaudi_context_switch(struct hl_device *hdev, u32 asid)
6079 static int gaudi_mmu_clear_pgt_range(struct hl_device *hdev)
6081 struct asic_fixed_properties *prop = &hdev->asic_prop;
6082 struct gaudi_device *gaudi = hdev->asic_specific;
6083 u64 addr = prop->mmu_pgt_addr;
6084 u32 size = prop->mmu_pgt_size + MMU_CACHE_MNG_SIZE;
6086 if (!(gaudi->hw_cap_initialized & HW_CAP_MMU))
6089 return gaudi_memset_device_memory(hdev, addr, size, 0);
6092 static void gaudi_restore_phase_topology(struct hl_device *hdev)
6097 static int gaudi_debugfs_read32(struct hl_device *hdev, u64 addr,
6098 bool user_address, u32 *val)
6100 struct asic_fixed_properties *prop = &hdev->asic_prop;
6101 u64 hbm_bar_addr, host_phys_end;
6104 host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
6106 if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
6108 *val = RREG32(addr - CFG_BASE);
6110 } else if ((addr >= SRAM_BASE_ADDR) && (addr < SRAM_BASE_ADDR + SRAM_BAR_SIZE)) {
6112 *val = readl(hdev->pcie_bar[SRAM_BAR_ID] + (addr - SRAM_BASE_ADDR));
6114 } else if (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size) {
6116 u64 bar_base_addr = DRAM_PHYS_BASE + (addr & ~(prop->dram_pci_bar_size - 0x1ull));
6118 hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, bar_base_addr);
6120 if (hbm_bar_addr != U64_MAX) {
6121 *val = readl(hdev->pcie_bar[HBM_BAR_ID] + (addr - bar_base_addr));
6122 hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, hbm_bar_addr);
6125 if (hbm_bar_addr == U64_MAX)
6128 } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
6129 user_address && !iommu_present(&pci_bus_type)) {
6131 *val = *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE);
6140 static int gaudi_debugfs_write32(struct hl_device *hdev, u64 addr,
6141 bool user_address, u32 val)
6143 struct asic_fixed_properties *prop = &hdev->asic_prop;
6144 u64 hbm_bar_addr, host_phys_end;
6147 host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
6149 if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
6151 WREG32(addr - CFG_BASE, val);
6153 } else if ((addr >= SRAM_BASE_ADDR) && (addr < SRAM_BASE_ADDR + SRAM_BAR_SIZE)) {
6155 writel(val, hdev->pcie_bar[SRAM_BAR_ID] + (addr - SRAM_BASE_ADDR));
6157 } else if (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size) {
6159 u64 bar_base_addr = DRAM_PHYS_BASE + (addr & ~(prop->dram_pci_bar_size - 0x1ull));
6161 hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, bar_base_addr);
6163 if (hbm_bar_addr != U64_MAX) {
6164 writel(val, hdev->pcie_bar[HBM_BAR_ID] + (addr - bar_base_addr));
6165 hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, hbm_bar_addr);
6168 if (hbm_bar_addr == U64_MAX)
6171 } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
6172 user_address && !iommu_present(&pci_bus_type)) {
6174 *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
6183 static int gaudi_debugfs_read64(struct hl_device *hdev, u64 addr,
6184 bool user_address, u64 *val)
6186 struct asic_fixed_properties *prop = &hdev->asic_prop;
6187 u64 hbm_bar_addr, host_phys_end;
6190 host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
6192 if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
6194 u32 val_l = RREG32(addr - CFG_BASE);
6195 u32 val_h = RREG32(addr + sizeof(u32) - CFG_BASE);
6197 *val = (((u64) val_h) << 32) | val_l;
6199 } else if ((addr >= SRAM_BASE_ADDR) &&
6200 (addr <= SRAM_BASE_ADDR + SRAM_BAR_SIZE - sizeof(u64))) {
6202 *val = readq(hdev->pcie_bar[SRAM_BAR_ID] + (addr - SRAM_BASE_ADDR));
6204 } else if (addr <= DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64)) {
6206 u64 bar_base_addr = DRAM_PHYS_BASE + (addr & ~(prop->dram_pci_bar_size - 0x1ull));
6208 hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, bar_base_addr);
6210 if (hbm_bar_addr != U64_MAX) {
6211 *val = readq(hdev->pcie_bar[HBM_BAR_ID] + (addr - bar_base_addr));
6212 hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, hbm_bar_addr);
6215 if (hbm_bar_addr == U64_MAX)
6218 } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
6219 user_address && !iommu_present(&pci_bus_type)) {
6221 *val = *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE);
6230 static int gaudi_debugfs_write64(struct hl_device *hdev, u64 addr,
6231 bool user_address, u64 val)
6233 struct asic_fixed_properties *prop = &hdev->asic_prop;
6234 u64 hbm_bar_addr, host_phys_end;
6237 host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
6239 if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
6241 WREG32(addr - CFG_BASE, lower_32_bits(val));
6242 WREG32(addr + sizeof(u32) - CFG_BASE, upper_32_bits(val));
6244 } else if ((addr >= SRAM_BASE_ADDR) &&
6245 (addr <= SRAM_BASE_ADDR + SRAM_BAR_SIZE - sizeof(u64))) {
6247 writeq(val, hdev->pcie_bar[SRAM_BAR_ID] + (addr - SRAM_BASE_ADDR));
6249 } else if (addr <= DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64)) {
6251 u64 bar_base_addr = DRAM_PHYS_BASE + (addr & ~(prop->dram_pci_bar_size - 0x1ull));
6253 hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, bar_base_addr);
6255 if (hbm_bar_addr != U64_MAX) {
6256 writeq(val, hdev->pcie_bar[HBM_BAR_ID] + (addr - bar_base_addr));
6257 hbm_bar_addr = gaudi_set_hbm_bar_base(hdev, hbm_bar_addr);
6260 if (hbm_bar_addr == U64_MAX)
6263 } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
6264 user_address && !iommu_present(&pci_bus_type)) {
6266 *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
6275 static int gaudi_dma_core_transfer(struct hl_device *hdev, int dma_id, u64 addr,
6276 u32 size_to_dma, dma_addr_t dma_addr)
6282 dma_offset = dma_id * DMA_CORE_OFFSET;
6284 WREG32(mmDMA0_CORE_SRC_BASE_LO + dma_offset, lower_32_bits(addr));
6285 WREG32(mmDMA0_CORE_SRC_BASE_HI + dma_offset, upper_32_bits(addr));
6286 WREG32(mmDMA0_CORE_DST_BASE_LO + dma_offset, lower_32_bits(dma_addr));
6287 WREG32(mmDMA0_CORE_DST_BASE_HI + dma_offset, upper_32_bits(dma_addr));
6288 WREG32(mmDMA0_CORE_DST_TSIZE_0 + dma_offset, size_to_dma);
6289 WREG32(mmDMA0_CORE_COMMIT + dma_offset,
6290 (1 << DMA0_CORE_COMMIT_LIN_SHIFT));
6292 rc = hl_poll_timeout(
6294 mmDMA0_CORE_STS0 + dma_offset,
6296 ((val & DMA0_CORE_STS0_BUSY_MASK) == 0),
6302 "DMA %d timed-out during reading of 0x%llx\n",
6307 /* Verify DMA is OK */
6308 err_cause = RREG32(mmDMA0_CORE_ERR_CAUSE + dma_offset);
6310 dev_err(hdev->dev, "DMA Failed, cause 0x%x\n", err_cause);
6312 "Clearing DMA0 engine from errors (cause 0x%x)\n",
6314 WREG32(mmDMA0_CORE_ERR_CAUSE + dma_offset, err_cause);
6322 static int gaudi_debugfs_read_dma(struct hl_device *hdev, u64 addr, u32 size,
6325 u32 dma_core_sts0, err_cause, cfg1, size_left, pos, size_to_dma;
6326 u32 qm_glbl_sts0, qm_cgm_sts;
6327 u64 dma_offset, qm_offset;
6328 dma_addr_t dma_addr;
6333 kernel_addr = hdev->asic_funcs->asic_dma_alloc_coherent(
6336 GFP_KERNEL | __GFP_ZERO);
6341 hdev->asic_funcs->hw_queues_lock(hdev);
6343 dma_id = gaudi_dma_assignment[GAUDI_PCI_DMA_1];
6344 dma_offset = dma_id * DMA_CORE_OFFSET;
6345 qm_offset = dma_id * DMA_QMAN_OFFSET;
6346 dma_core_sts0 = RREG32(mmDMA0_CORE_STS0 + dma_offset);
6347 qm_glbl_sts0 = RREG32(mmDMA0_QM_GLBL_STS0 + qm_offset);
6348 qm_cgm_sts = RREG32(mmDMA0_QM_CGM_STS + qm_offset);
6349 is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts) &&
6350 IS_DMA_IDLE(dma_core_sts0);
6353 dma_id = gaudi_dma_assignment[GAUDI_PCI_DMA_2];
6354 dma_offset = dma_id * DMA_CORE_OFFSET;
6355 qm_offset = dma_id * DMA_QMAN_OFFSET;
6356 dma_core_sts0 = RREG32(mmDMA0_CORE_STS0 + dma_offset);
6357 qm_glbl_sts0 = RREG32(mmDMA0_QM_GLBL_STS0 + qm_offset);
6358 qm_cgm_sts = RREG32(mmDMA0_QM_CGM_STS + qm_offset);
6359 is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts) &&
6360 IS_DMA_IDLE(dma_core_sts0);
6363 dev_err_ratelimited(hdev->dev,
6364 "Can't read via DMA because it is BUSY\n");
6370 cfg1 = RREG32(mmDMA0_QM_GLBL_CFG1 + qm_offset);
6371 WREG32(mmDMA0_QM_GLBL_CFG1 + qm_offset,
6372 0xF << DMA0_QM_GLBL_CFG1_CP_STOP_SHIFT);
6374 /* TODO: remove this by mapping the DMA temporary buffer to the MMU
6375 * using the compute ctx ASID, if exists. If not, use the kernel ctx
6378 WREG32_OR(mmDMA0_CORE_PROT + dma_offset, BIT(DMA0_CORE_PROT_VAL_SHIFT));
6380 /* Verify DMA is OK */
6381 err_cause = RREG32(mmDMA0_CORE_ERR_CAUSE + dma_offset);
6384 "Clearing DMA0 engine from errors (cause 0x%x)\n",
6386 WREG32(mmDMA0_CORE_ERR_CAUSE + dma_offset, err_cause);
6391 size_to_dma = SZ_2M;
6393 while (size_left > 0) {
6395 if (size_left < SZ_2M)
6396 size_to_dma = size_left;
6398 rc = gaudi_dma_core_transfer(hdev, dma_id, addr, size_to_dma,
6403 memcpy(blob_addr + pos, kernel_addr, size_to_dma);
6405 if (size_left <= SZ_2M)
6413 /* TODO: remove this by mapping the DMA temporary buffer to the MMU
6414 * using the compute ctx ASID, if exists. If not, use the kernel ctx
6417 WREG32_AND(mmDMA0_CORE_PROT + dma_offset,
6418 ~BIT(DMA0_CORE_PROT_VAL_SHIFT));
6420 WREG32(mmDMA0_QM_GLBL_CFG1 + qm_offset, cfg1);
6423 hdev->asic_funcs->hw_queues_unlock(hdev);
6425 hdev->asic_funcs->asic_dma_free_coherent(hdev, SZ_2M, kernel_addr,
6431 static u64 gaudi_read_pte(struct hl_device *hdev, u64 addr)
6433 struct gaudi_device *gaudi = hdev->asic_specific;
6435 if (hdev->reset_info.hard_reset_pending)
6438 return readq(hdev->pcie_bar[HBM_BAR_ID] +
6439 (addr - gaudi->hbm_bar_cur_addr));
6442 static void gaudi_write_pte(struct hl_device *hdev, u64 addr, u64 val)
6444 struct gaudi_device *gaudi = hdev->asic_specific;
6446 if (hdev->reset_info.hard_reset_pending)
6449 writeq(val, hdev->pcie_bar[HBM_BAR_ID] +
6450 (addr - gaudi->hbm_bar_cur_addr));
6453 void gaudi_mmu_prepare_reg(struct hl_device *hdev, u64 reg, u32 asid)
6455 /* mask to zero the MMBP and ASID bits */
6456 WREG32_AND(reg, ~0x7FF);
6457 WREG32_OR(reg, asid);
6460 static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid)
6462 struct gaudi_device *gaudi = hdev->asic_specific;
6464 if (!(gaudi->hw_cap_initialized & HW_CAP_MMU))
6467 if (asid & ~DMA0_QM_GLBL_NON_SECURE_PROPS_0_ASID_MASK) {
6468 dev_crit(hdev->dev, "asid %u is too big\n", asid);
6472 gaudi_mmu_prepare_reg(hdev, mmDMA0_QM_GLBL_NON_SECURE_PROPS_0, asid);
6473 gaudi_mmu_prepare_reg(hdev, mmDMA0_QM_GLBL_NON_SECURE_PROPS_1, asid);
6474 gaudi_mmu_prepare_reg(hdev, mmDMA0_QM_GLBL_NON_SECURE_PROPS_2, asid);
6475 gaudi_mmu_prepare_reg(hdev, mmDMA0_QM_GLBL_NON_SECURE_PROPS_3, asid);
6476 gaudi_mmu_prepare_reg(hdev, mmDMA0_QM_GLBL_NON_SECURE_PROPS_4, asid);
6478 gaudi_mmu_prepare_reg(hdev, mmDMA1_QM_GLBL_NON_SECURE_PROPS_0, asid);
6479 gaudi_mmu_prepare_reg(hdev, mmDMA1_QM_GLBL_NON_SECURE_PROPS_1, asid);
6480 gaudi_mmu_prepare_reg(hdev, mmDMA1_QM_GLBL_NON_SECURE_PROPS_2, asid);
6481 gaudi_mmu_prepare_reg(hdev, mmDMA1_QM_GLBL_NON_SECURE_PROPS_3, asid);
6482 gaudi_mmu_prepare_reg(hdev, mmDMA1_QM_GLBL_NON_SECURE_PROPS_4, asid);
6484 gaudi_mmu_prepare_reg(hdev, mmDMA2_QM_GLBL_NON_SECURE_PROPS_0, asid);
6485 gaudi_mmu_prepare_reg(hdev, mmDMA2_QM_GLBL_NON_SECURE_PROPS_1, asid);
6486 gaudi_mmu_prepare_reg(hdev, mmDMA2_QM_GLBL_NON_SECURE_PROPS_2, asid);
6487 gaudi_mmu_prepare_reg(hdev, mmDMA2_QM_GLBL_NON_SECURE_PROPS_3, asid);
6488 gaudi_mmu_prepare_reg(hdev, mmDMA2_QM_GLBL_NON_SECURE_PROPS_4, asid);
6490 gaudi_mmu_prepare_reg(hdev, mmDMA3_QM_GLBL_NON_SECURE_PROPS_0, asid);
6491 gaudi_mmu_prepare_reg(hdev, mmDMA3_QM_GLBL_NON_SECURE_PROPS_1, asid);
6492 gaudi_mmu_prepare_reg(hdev, mmDMA3_QM_GLBL_NON_SECURE_PROPS_2, asid);
6493 gaudi_mmu_prepare_reg(hdev, mmDMA3_QM_GLBL_NON_SECURE_PROPS_3, asid);
6494 gaudi_mmu_prepare_reg(hdev, mmDMA3_QM_GLBL_NON_SECURE_PROPS_4, asid);
6496 gaudi_mmu_prepare_reg(hdev, mmDMA4_QM_GLBL_NON_SECURE_PROPS_0, asid);
6497 gaudi_mmu_prepare_reg(hdev, mmDMA4_QM_GLBL_NON_SECURE_PROPS_1, asid);
6498 gaudi_mmu_prepare_reg(hdev, mmDMA4_QM_GLBL_NON_SECURE_PROPS_2, asid);
6499 gaudi_mmu_prepare_reg(hdev, mmDMA4_QM_GLBL_NON_SECURE_PROPS_3, asid);
6500 gaudi_mmu_prepare_reg(hdev, mmDMA4_QM_GLBL_NON_SECURE_PROPS_4, asid);
6502 gaudi_mmu_prepare_reg(hdev, mmDMA5_QM_GLBL_NON_SECURE_PROPS_0, asid);
6503 gaudi_mmu_prepare_reg(hdev, mmDMA5_QM_GLBL_NON_SECURE_PROPS_1, asid);
6504 gaudi_mmu_prepare_reg(hdev, mmDMA5_QM_GLBL_NON_SECURE_PROPS_2, asid);
6505 gaudi_mmu_prepare_reg(hdev, mmDMA5_QM_GLBL_NON_SECURE_PROPS_3, asid);
6506 gaudi_mmu_prepare_reg(hdev, mmDMA5_QM_GLBL_NON_SECURE_PROPS_4, asid);
6508 gaudi_mmu_prepare_reg(hdev, mmDMA6_QM_GLBL_NON_SECURE_PROPS_0, asid);
6509 gaudi_mmu_prepare_reg(hdev, mmDMA6_QM_GLBL_NON_SECURE_PROPS_1, asid);
6510 gaudi_mmu_prepare_reg(hdev, mmDMA6_QM_GLBL_NON_SECURE_PROPS_2, asid);
6511 gaudi_mmu_prepare_reg(hdev, mmDMA6_QM_GLBL_NON_SECURE_PROPS_3, asid);
6512 gaudi_mmu_prepare_reg(hdev, mmDMA6_QM_GLBL_NON_SECURE_PROPS_4, asid);
6514 gaudi_mmu_prepare_reg(hdev, mmDMA7_QM_GLBL_NON_SECURE_PROPS_0, asid);
6515 gaudi_mmu_prepare_reg(hdev, mmDMA7_QM_GLBL_NON_SECURE_PROPS_1, asid);
6516 gaudi_mmu_prepare_reg(hdev, mmDMA7_QM_GLBL_NON_SECURE_PROPS_2, asid);
6517 gaudi_mmu_prepare_reg(hdev, mmDMA7_QM_GLBL_NON_SECURE_PROPS_3, asid);
6518 gaudi_mmu_prepare_reg(hdev, mmDMA7_QM_GLBL_NON_SECURE_PROPS_4, asid);
6520 gaudi_mmu_prepare_reg(hdev, mmDMA0_CORE_NON_SECURE_PROPS, asid);
6521 gaudi_mmu_prepare_reg(hdev, mmDMA1_CORE_NON_SECURE_PROPS, asid);
6522 gaudi_mmu_prepare_reg(hdev, mmDMA2_CORE_NON_SECURE_PROPS, asid);
6523 gaudi_mmu_prepare_reg(hdev, mmDMA3_CORE_NON_SECURE_PROPS, asid);
6524 gaudi_mmu_prepare_reg(hdev, mmDMA4_CORE_NON_SECURE_PROPS, asid);
6525 gaudi_mmu_prepare_reg(hdev, mmDMA5_CORE_NON_SECURE_PROPS, asid);
6526 gaudi_mmu_prepare_reg(hdev, mmDMA6_CORE_NON_SECURE_PROPS, asid);
6527 gaudi_mmu_prepare_reg(hdev, mmDMA7_CORE_NON_SECURE_PROPS, asid);
6529 gaudi_mmu_prepare_reg(hdev, mmTPC0_QM_GLBL_NON_SECURE_PROPS_0, asid);
6530 gaudi_mmu_prepare_reg(hdev, mmTPC0_QM_GLBL_NON_SECURE_PROPS_1, asid);
6531 gaudi_mmu_prepare_reg(hdev, mmTPC0_QM_GLBL_NON_SECURE_PROPS_2, asid);
6532 gaudi_mmu_prepare_reg(hdev, mmTPC0_QM_GLBL_NON_SECURE_PROPS_3, asid);
6533 gaudi_mmu_prepare_reg(hdev, mmTPC0_QM_GLBL_NON_SECURE_PROPS_4, asid);
6534 gaudi_mmu_prepare_reg(hdev, mmTPC0_CFG_ARUSER_LO, asid);
6535 gaudi_mmu_prepare_reg(hdev, mmTPC0_CFG_AWUSER_LO, asid);
6537 gaudi_mmu_prepare_reg(hdev, mmTPC1_QM_GLBL_NON_SECURE_PROPS_0, asid);
6538 gaudi_mmu_prepare_reg(hdev, mmTPC1_QM_GLBL_NON_SECURE_PROPS_1, asid);
6539 gaudi_mmu_prepare_reg(hdev, mmTPC1_QM_GLBL_NON_SECURE_PROPS_2, asid);
6540 gaudi_mmu_prepare_reg(hdev, mmTPC1_QM_GLBL_NON_SECURE_PROPS_3, asid);
6541 gaudi_mmu_prepare_reg(hdev, mmTPC1_QM_GLBL_NON_SECURE_PROPS_4, asid);
6542 gaudi_mmu_prepare_reg(hdev, mmTPC1_CFG_ARUSER_LO, asid);
6543 gaudi_mmu_prepare_reg(hdev, mmTPC1_CFG_AWUSER_LO, asid);
6545 gaudi_mmu_prepare_reg(hdev, mmTPC2_QM_GLBL_NON_SECURE_PROPS_0, asid);
6546 gaudi_mmu_prepare_reg(hdev, mmTPC2_QM_GLBL_NON_SECURE_PROPS_1, asid);
6547 gaudi_mmu_prepare_reg(hdev, mmTPC2_QM_GLBL_NON_SECURE_PROPS_2, asid);
6548 gaudi_mmu_prepare_reg(hdev, mmTPC2_QM_GLBL_NON_SECURE_PROPS_3, asid);
6549 gaudi_mmu_prepare_reg(hdev, mmTPC2_QM_GLBL_NON_SECURE_PROPS_4, asid);
6550 gaudi_mmu_prepare_reg(hdev, mmTPC2_CFG_ARUSER_LO, asid);
6551 gaudi_mmu_prepare_reg(hdev, mmTPC2_CFG_AWUSER_LO, asid);
6553 gaudi_mmu_prepare_reg(hdev, mmTPC3_QM_GLBL_NON_SECURE_PROPS_0, asid);
6554 gaudi_mmu_prepare_reg(hdev, mmTPC3_QM_GLBL_NON_SECURE_PROPS_1, asid);
6555 gaudi_mmu_prepare_reg(hdev, mmTPC3_QM_GLBL_NON_SECURE_PROPS_2, asid);
6556 gaudi_mmu_prepare_reg(hdev, mmTPC3_QM_GLBL_NON_SECURE_PROPS_3, asid);
6557 gaudi_mmu_prepare_reg(hdev, mmTPC3_QM_GLBL_NON_SECURE_PROPS_4, asid);
6558 gaudi_mmu_prepare_reg(hdev, mmTPC3_CFG_ARUSER_LO, asid);
6559 gaudi_mmu_prepare_reg(hdev, mmTPC3_CFG_AWUSER_LO, asid);
6561 gaudi_mmu_prepare_reg(hdev, mmTPC4_QM_GLBL_NON_SECURE_PROPS_0, asid);
6562 gaudi_mmu_prepare_reg(hdev, mmTPC4_QM_GLBL_NON_SECURE_PROPS_1, asid);
6563 gaudi_mmu_prepare_reg(hdev, mmTPC4_QM_GLBL_NON_SECURE_PROPS_2, asid);
6564 gaudi_mmu_prepare_reg(hdev, mmTPC4_QM_GLBL_NON_SECURE_PROPS_3, asid);
6565 gaudi_mmu_prepare_reg(hdev, mmTPC4_QM_GLBL_NON_SECURE_PROPS_4, asid);
6566 gaudi_mmu_prepare_reg(hdev, mmTPC4_CFG_ARUSER_LO, asid);
6567 gaudi_mmu_prepare_reg(hdev, mmTPC4_CFG_AWUSER_LO, asid);
6569 gaudi_mmu_prepare_reg(hdev, mmTPC5_QM_GLBL_NON_SECURE_PROPS_0, asid);
6570 gaudi_mmu_prepare_reg(hdev, mmTPC5_QM_GLBL_NON_SECURE_PROPS_1, asid);
6571 gaudi_mmu_prepare_reg(hdev, mmTPC5_QM_GLBL_NON_SECURE_PROPS_2, asid);
6572 gaudi_mmu_prepare_reg(hdev, mmTPC5_QM_GLBL_NON_SECURE_PROPS_3, asid);
6573 gaudi_mmu_prepare_reg(hdev, mmTPC5_QM_GLBL_NON_SECURE_PROPS_4, asid);
6574 gaudi_mmu_prepare_reg(hdev, mmTPC5_CFG_ARUSER_LO, asid);
6575 gaudi_mmu_prepare_reg(hdev, mmTPC5_CFG_AWUSER_LO, asid);
6577 gaudi_mmu_prepare_reg(hdev, mmTPC6_QM_GLBL_NON_SECURE_PROPS_0, asid);
6578 gaudi_mmu_prepare_reg(hdev, mmTPC6_QM_GLBL_NON_SECURE_PROPS_1, asid);
6579 gaudi_mmu_prepare_reg(hdev, mmTPC6_QM_GLBL_NON_SECURE_PROPS_2, asid);
6580 gaudi_mmu_prepare_reg(hdev, mmTPC6_QM_GLBL_NON_SECURE_PROPS_3, asid);
6581 gaudi_mmu_prepare_reg(hdev, mmTPC6_QM_GLBL_NON_SECURE_PROPS_4, asid);
6582 gaudi_mmu_prepare_reg(hdev, mmTPC6_CFG_ARUSER_LO, asid);
6583 gaudi_mmu_prepare_reg(hdev, mmTPC6_CFG_AWUSER_LO, asid);
6585 gaudi_mmu_prepare_reg(hdev, mmTPC7_QM_GLBL_NON_SECURE_PROPS_0, asid);
6586 gaudi_mmu_prepare_reg(hdev, mmTPC7_QM_GLBL_NON_SECURE_PROPS_1, asid);
6587 gaudi_mmu_prepare_reg(hdev, mmTPC7_QM_GLBL_NON_SECURE_PROPS_2, asid);
6588 gaudi_mmu_prepare_reg(hdev, mmTPC7_QM_GLBL_NON_SECURE_PROPS_3, asid);
6589 gaudi_mmu_prepare_reg(hdev, mmTPC7_QM_GLBL_NON_SECURE_PROPS_4, asid);
6590 gaudi_mmu_prepare_reg(hdev, mmTPC7_CFG_ARUSER_LO, asid);
6591 gaudi_mmu_prepare_reg(hdev, mmTPC7_CFG_AWUSER_LO, asid);
6593 gaudi_mmu_prepare_reg(hdev, mmMME0_QM_GLBL_NON_SECURE_PROPS_0, asid);
6594 gaudi_mmu_prepare_reg(hdev, mmMME0_QM_GLBL_NON_SECURE_PROPS_1, asid);
6595 gaudi_mmu_prepare_reg(hdev, mmMME0_QM_GLBL_NON_SECURE_PROPS_2, asid);
6596 gaudi_mmu_prepare_reg(hdev, mmMME0_QM_GLBL_NON_SECURE_PROPS_3, asid);
6597 gaudi_mmu_prepare_reg(hdev, mmMME0_QM_GLBL_NON_SECURE_PROPS_4, asid);
6598 gaudi_mmu_prepare_reg(hdev, mmMME2_QM_GLBL_NON_SECURE_PROPS_0, asid);
6599 gaudi_mmu_prepare_reg(hdev, mmMME2_QM_GLBL_NON_SECURE_PROPS_1, asid);
6600 gaudi_mmu_prepare_reg(hdev, mmMME2_QM_GLBL_NON_SECURE_PROPS_2, asid);
6601 gaudi_mmu_prepare_reg(hdev, mmMME2_QM_GLBL_NON_SECURE_PROPS_3, asid);
6602 gaudi_mmu_prepare_reg(hdev, mmMME2_QM_GLBL_NON_SECURE_PROPS_4, asid);
6604 gaudi_mmu_prepare_reg(hdev, mmMME0_SBAB_ARUSER0, asid);
6605 gaudi_mmu_prepare_reg(hdev, mmMME0_SBAB_ARUSER1, asid);
6606 gaudi_mmu_prepare_reg(hdev, mmMME1_SBAB_ARUSER0, asid);
6607 gaudi_mmu_prepare_reg(hdev, mmMME1_SBAB_ARUSER1, asid);
6608 gaudi_mmu_prepare_reg(hdev, mmMME2_SBAB_ARUSER0, asid);
6609 gaudi_mmu_prepare_reg(hdev, mmMME2_SBAB_ARUSER1, asid);
6610 gaudi_mmu_prepare_reg(hdev, mmMME3_SBAB_ARUSER0, asid);
6611 gaudi_mmu_prepare_reg(hdev, mmMME3_SBAB_ARUSER1, asid);
6612 gaudi_mmu_prepare_reg(hdev, mmMME0_ACC_WBC, asid);
6613 gaudi_mmu_prepare_reg(hdev, mmMME1_ACC_WBC, asid);
6614 gaudi_mmu_prepare_reg(hdev, mmMME2_ACC_WBC, asid);
6615 gaudi_mmu_prepare_reg(hdev, mmMME3_ACC_WBC, asid);
6617 if (gaudi->hw_cap_initialized & HW_CAP_NIC0) {
6618 gaudi_mmu_prepare_reg(hdev, mmNIC0_QM0_GLBL_NON_SECURE_PROPS_0,
6620 gaudi_mmu_prepare_reg(hdev, mmNIC0_QM0_GLBL_NON_SECURE_PROPS_1,
6622 gaudi_mmu_prepare_reg(hdev, mmNIC0_QM0_GLBL_NON_SECURE_PROPS_2,
6624 gaudi_mmu_prepare_reg(hdev, mmNIC0_QM0_GLBL_NON_SECURE_PROPS_3,
6626 gaudi_mmu_prepare_reg(hdev, mmNIC0_QM0_GLBL_NON_SECURE_PROPS_4,
6630 if (gaudi->hw_cap_initialized & HW_CAP_NIC1) {
6631 gaudi_mmu_prepare_reg(hdev, mmNIC0_QM1_GLBL_NON_SECURE_PROPS_0,
6633 gaudi_mmu_prepare_reg(hdev, mmNIC0_QM1_GLBL_NON_SECURE_PROPS_1,
6635 gaudi_mmu_prepare_reg(hdev, mmNIC0_QM1_GLBL_NON_SECURE_PROPS_2,
6637 gaudi_mmu_prepare_reg(hdev, mmNIC0_QM1_GLBL_NON_SECURE_PROPS_3,
6639 gaudi_mmu_prepare_reg(hdev, mmNIC0_QM1_GLBL_NON_SECURE_PROPS_4,
6643 if (gaudi->hw_cap_initialized & HW_CAP_NIC2) {
6644 gaudi_mmu_prepare_reg(hdev, mmNIC1_QM0_GLBL_NON_SECURE_PROPS_0,
6646 gaudi_mmu_prepare_reg(hdev, mmNIC1_QM0_GLBL_NON_SECURE_PROPS_1,
6648 gaudi_mmu_prepare_reg(hdev, mmNIC1_QM0_GLBL_NON_SECURE_PROPS_2,
6650 gaudi_mmu_prepare_reg(hdev, mmNIC1_QM0_GLBL_NON_SECURE_PROPS_3,
6652 gaudi_mmu_prepare_reg(hdev, mmNIC1_QM0_GLBL_NON_SECURE_PROPS_4,
6656 if (gaudi->hw_cap_initialized & HW_CAP_NIC3) {
6657 gaudi_mmu_prepare_reg(hdev, mmNIC1_QM1_GLBL_NON_SECURE_PROPS_0,
6659 gaudi_mmu_prepare_reg(hdev, mmNIC1_QM1_GLBL_NON_SECURE_PROPS_1,
6661 gaudi_mmu_prepare_reg(hdev, mmNIC1_QM1_GLBL_NON_SECURE_PROPS_2,
6663 gaudi_mmu_prepare_reg(hdev, mmNIC1_QM1_GLBL_NON_SECURE_PROPS_3,
6665 gaudi_mmu_prepare_reg(hdev, mmNIC1_QM1_GLBL_NON_SECURE_PROPS_4,
6669 if (gaudi->hw_cap_initialized & HW_CAP_NIC4) {
6670 gaudi_mmu_prepare_reg(hdev, mmNIC2_QM0_GLBL_NON_SECURE_PROPS_0,
6672 gaudi_mmu_prepare_reg(hdev, mmNIC2_QM0_GLBL_NON_SECURE_PROPS_1,
6674 gaudi_mmu_prepare_reg(hdev, mmNIC2_QM0_GLBL_NON_SECURE_PROPS_2,
6676 gaudi_mmu_prepare_reg(hdev, mmNIC2_QM0_GLBL_NON_SECURE_PROPS_3,
6678 gaudi_mmu_prepare_reg(hdev, mmNIC2_QM0_GLBL_NON_SECURE_PROPS_4,
6682 if (gaudi->hw_cap_initialized & HW_CAP_NIC5) {
6683 gaudi_mmu_prepare_reg(hdev, mmNIC2_QM1_GLBL_NON_SECURE_PROPS_0,
6685 gaudi_mmu_prepare_reg(hdev, mmNIC2_QM1_GLBL_NON_SECURE_PROPS_1,
6687 gaudi_mmu_prepare_reg(hdev, mmNIC2_QM1_GLBL_NON_SECURE_PROPS_2,
6689 gaudi_mmu_prepare_reg(hdev, mmNIC2_QM1_GLBL_NON_SECURE_PROPS_3,
6691 gaudi_mmu_prepare_reg(hdev, mmNIC2_QM1_GLBL_NON_SECURE_PROPS_4,
6695 if (gaudi->hw_cap_initialized & HW_CAP_NIC6) {
6696 gaudi_mmu_prepare_reg(hdev, mmNIC3_QM0_GLBL_NON_SECURE_PROPS_0,
6698 gaudi_mmu_prepare_reg(hdev, mmNIC3_QM0_GLBL_NON_SECURE_PROPS_1,
6700 gaudi_mmu_prepare_reg(hdev, mmNIC3_QM0_GLBL_NON_SECURE_PROPS_2,
6702 gaudi_mmu_prepare_reg(hdev, mmNIC3_QM0_GLBL_NON_SECURE_PROPS_3,
6704 gaudi_mmu_prepare_reg(hdev, mmNIC3_QM0_GLBL_NON_SECURE_PROPS_4,
6708 if (gaudi->hw_cap_initialized & HW_CAP_NIC7) {
6709 gaudi_mmu_prepare_reg(hdev, mmNIC3_QM1_GLBL_NON_SECURE_PROPS_0,
6711 gaudi_mmu_prepare_reg(hdev, mmNIC3_QM1_GLBL_NON_SECURE_PROPS_1,
6713 gaudi_mmu_prepare_reg(hdev, mmNIC3_QM1_GLBL_NON_SECURE_PROPS_2,
6715 gaudi_mmu_prepare_reg(hdev, mmNIC3_QM1_GLBL_NON_SECURE_PROPS_3,
6717 gaudi_mmu_prepare_reg(hdev, mmNIC3_QM1_GLBL_NON_SECURE_PROPS_4,
6721 if (gaudi->hw_cap_initialized & HW_CAP_NIC8) {
6722 gaudi_mmu_prepare_reg(hdev, mmNIC4_QM0_GLBL_NON_SECURE_PROPS_0,
6724 gaudi_mmu_prepare_reg(hdev, mmNIC4_QM0_GLBL_NON_SECURE_PROPS_1,
6726 gaudi_mmu_prepare_reg(hdev, mmNIC4_QM0_GLBL_NON_SECURE_PROPS_2,
6728 gaudi_mmu_prepare_reg(hdev, mmNIC4_QM0_GLBL_NON_SECURE_PROPS_3,
6730 gaudi_mmu_prepare_reg(hdev, mmNIC4_QM0_GLBL_NON_SECURE_PROPS_4,
6734 if (gaudi->hw_cap_initialized & HW_CAP_NIC9) {
6735 gaudi_mmu_prepare_reg(hdev, mmNIC4_QM1_GLBL_NON_SECURE_PROPS_0,
6737 gaudi_mmu_prepare_reg(hdev, mmNIC4_QM1_GLBL_NON_SECURE_PROPS_1,
6739 gaudi_mmu_prepare_reg(hdev, mmNIC4_QM1_GLBL_NON_SECURE_PROPS_2,
6741 gaudi_mmu_prepare_reg(hdev, mmNIC4_QM1_GLBL_NON_SECURE_PROPS_3,
6743 gaudi_mmu_prepare_reg(hdev, mmNIC4_QM1_GLBL_NON_SECURE_PROPS_4,
6747 gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_ARUSER, asid);
6748 gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_AWUSER, asid);
6751 static int gaudi_send_job_on_qman0(struct hl_device *hdev,
6752 struct hl_cs_job *job)
6754 struct packet_msg_prot *fence_pkt;
6756 dma_addr_t fence_dma_addr;
6758 u32 tmp, timeout, dma_offset;
6762 timeout = GAUDI_PLDM_QMAN0_TIMEOUT_USEC;
6764 timeout = HL_DEVICE_TIMEOUT_USEC;
6766 if (!hdev->asic_funcs->is_device_idle(hdev, NULL, 0, NULL)) {
6767 dev_err_ratelimited(hdev->dev,
6768 "Can't send driver job on QMAN0 because the device is not idle\n");
6772 fence_ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL,
6776 "Failed to allocate fence memory for QMAN0\n");
6780 cb = job->patched_cb;
6782 fence_pkt = cb->kernel_address +
6783 job->job_cb_size - sizeof(struct packet_msg_prot);
6785 tmp = FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_PROT);
6786 tmp |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 1);
6787 tmp |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
6789 fence_pkt->ctl = cpu_to_le32(tmp);
6790 fence_pkt->value = cpu_to_le32(GAUDI_QMAN0_FENCE_VAL);
6791 fence_pkt->addr = cpu_to_le64(fence_dma_addr);
6793 dma_offset = gaudi_dma_assignment[GAUDI_PCI_DMA_1] * DMA_CORE_OFFSET;
6795 WREG32(mmDMA0_CORE_PROT + dma_offset,
6796 BIT(DMA0_CORE_PROT_ERR_VAL_SHIFT) | BIT(DMA0_CORE_PROT_VAL_SHIFT));
6798 rc = hl_hw_queue_send_cb_no_cmpl(hdev, GAUDI_QUEUE_ID_DMA_0_0,
6799 job->job_cb_size, cb->bus_address);
6801 dev_err(hdev->dev, "Failed to send CB on QMAN0, %d\n", rc);
6802 goto free_fence_ptr;
6805 rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp,
6806 (tmp == GAUDI_QMAN0_FENCE_VAL), 1000,
6809 hl_hw_queue_inc_ci_kernel(hdev, GAUDI_QUEUE_ID_DMA_0_0);
6811 if (rc == -ETIMEDOUT) {
6812 dev_err(hdev->dev, "QMAN0 Job timeout (0x%x)\n", tmp);
6813 goto free_fence_ptr;
6817 WREG32(mmDMA0_CORE_PROT + dma_offset, BIT(DMA0_CORE_PROT_ERR_VAL_SHIFT));
6819 hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_ptr,
6824 static void gaudi_get_event_desc(u16 event_type, char *desc, size_t size)
6826 if (event_type >= GAUDI_EVENT_SIZE)
6827 goto event_not_supported;
6829 if (!gaudi_irq_map_table[event_type].valid)
6830 goto event_not_supported;
6832 snprintf(desc, size, gaudi_irq_map_table[event_type].name);
6836 event_not_supported:
6837 snprintf(desc, size, "N/A");
6840 static const char *gaudi_get_razwi_initiator_dma_name(struct hl_device *hdev, u32 x_y,
6841 bool is_write, s32 *engine_id_1,
6844 u32 dma_id[2], dma_offset, err_cause[2], mask, i;
6846 mask = is_write ? DMA0_CORE_ERR_CAUSE_HBW_WR_ERR_MASK :
6847 DMA0_CORE_ERR_CAUSE_HBW_RD_ERR_MASK;
6850 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_0:
6851 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_1:
6855 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_0:
6856 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_1:
6860 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_0:
6861 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_1:
6865 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_0:
6866 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_1:
6871 goto unknown_initiator;
6874 for (i = 0 ; i < 2 ; i++) {
6875 dma_offset = dma_id[i] * DMA_CORE_OFFSET;
6876 err_cause[i] = RREG32(mmDMA0_CORE_ERR_CAUSE + dma_offset);
6880 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_0:
6881 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_1:
6882 if ((err_cause[0] & mask) && !(err_cause[1] & mask)) {
6883 *engine_id_1 = GAUDI_ENGINE_ID_DMA_0;
6885 } else if (!(err_cause[0] & mask) && (err_cause[1] & mask)) {
6886 *engine_id_1 = GAUDI_ENGINE_ID_DMA_2;
6889 *engine_id_1 = GAUDI_ENGINE_ID_DMA_0;
6890 *engine_id_2 = GAUDI_ENGINE_ID_DMA_2;
6891 return "DMA0 or DMA2";
6893 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_0:
6894 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_1:
6895 if ((err_cause[0] & mask) && !(err_cause[1] & mask)) {
6896 *engine_id_1 = GAUDI_ENGINE_ID_DMA_1;
6898 } else if (!(err_cause[0] & mask) && (err_cause[1] & mask)) {
6899 *engine_id_1 = GAUDI_ENGINE_ID_DMA_3;
6902 *engine_id_1 = GAUDI_ENGINE_ID_DMA_1;
6903 *engine_id_2 = GAUDI_ENGINE_ID_DMA_3;
6904 return "DMA1 or DMA3";
6906 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_0:
6907 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_1:
6908 if ((err_cause[0] & mask) && !(err_cause[1] & mask)) {
6909 *engine_id_1 = GAUDI_ENGINE_ID_DMA_4;
6911 } else if (!(err_cause[0] & mask) && (err_cause[1] & mask)) {
6912 *engine_id_1 = GAUDI_ENGINE_ID_DMA_6;
6915 *engine_id_1 = GAUDI_ENGINE_ID_DMA_4;
6916 *engine_id_2 = GAUDI_ENGINE_ID_DMA_6;
6917 return "DMA4 or DMA6";
6919 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_0:
6920 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_1:
6921 if ((err_cause[0] & mask) && !(err_cause[1] & mask)) {
6922 *engine_id_1 = GAUDI_ENGINE_ID_DMA_5;
6924 } else if (!(err_cause[0] & mask) && (err_cause[1] & mask)) {
6925 *engine_id_1 = GAUDI_ENGINE_ID_DMA_7;
6928 *engine_id_1 = GAUDI_ENGINE_ID_DMA_5;
6929 *engine_id_2 = GAUDI_ENGINE_ID_DMA_7;
6930 return "DMA5 or DMA7";
6935 return "unknown initiator";
6938 static const char *gaudi_get_razwi_initiator_name(struct hl_device *hdev, bool is_write,
6939 u32 *engine_id_1, u32 *engine_id_2)
6941 u32 val, x_y, axi_id;
6943 val = is_write ? RREG32(mmMMU_UP_RAZWI_WRITE_ID) :
6944 RREG32(mmMMU_UP_RAZWI_READ_ID);
6945 x_y = val & ((RAZWI_INITIATOR_Y_MASK << RAZWI_INITIATOR_Y_SHIFT) |
6946 (RAZWI_INITIATOR_X_MASK << RAZWI_INITIATOR_X_SHIFT));
6947 axi_id = val & (RAZWI_INITIATOR_AXI_ID_MASK <<
6948 RAZWI_INITIATOR_AXI_ID_SHIFT);
6951 case RAZWI_INITIATOR_ID_X_Y_TPC0_NIC0:
6952 if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC)) {
6953 *engine_id_1 = GAUDI_ENGINE_ID_TPC_0;
6956 if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC)) {
6957 *engine_id_1 = GAUDI_ENGINE_ID_NIC_0;
6961 case RAZWI_INITIATOR_ID_X_Y_TPC1:
6962 *engine_id_1 = GAUDI_ENGINE_ID_TPC_1;
6964 case RAZWI_INITIATOR_ID_X_Y_MME0_0:
6965 case RAZWI_INITIATOR_ID_X_Y_MME0_1:
6966 *engine_id_1 = GAUDI_ENGINE_ID_MME_0;
6968 case RAZWI_INITIATOR_ID_X_Y_MME1_0:
6969 case RAZWI_INITIATOR_ID_X_Y_MME1_1:
6970 *engine_id_1 = GAUDI_ENGINE_ID_MME_1;
6972 case RAZWI_INITIATOR_ID_X_Y_TPC2:
6973 *engine_id_1 = GAUDI_ENGINE_ID_TPC_2;
6975 case RAZWI_INITIATOR_ID_X_Y_TPC3_PCI_CPU_PSOC:
6976 if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC)) {
6977 *engine_id_1 = GAUDI_ENGINE_ID_TPC_3;
6980 /* PCI, CPU or PSOC does not have engine id*/
6981 if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_PCI))
6983 if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_CPU))
6985 if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_PSOC))
6988 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_0:
6989 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_1:
6990 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_0:
6991 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_1:
6992 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_0:
6993 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_1:
6994 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_0:
6995 case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_1:
6996 return gaudi_get_razwi_initiator_dma_name(hdev, x_y, is_write,
6997 engine_id_1, engine_id_2);
6998 case RAZWI_INITIATOR_ID_X_Y_TPC4_NIC1_NIC2:
6999 if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC)) {
7000 *engine_id_1 = GAUDI_ENGINE_ID_TPC_4;
7003 if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC)) {
7004 *engine_id_1 = GAUDI_ENGINE_ID_NIC_1;
7007 if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC_FT)) {
7008 *engine_id_1 = GAUDI_ENGINE_ID_NIC_2;
7012 case RAZWI_INITIATOR_ID_X_Y_TPC5:
7013 *engine_id_1 = GAUDI_ENGINE_ID_TPC_5;
7015 case RAZWI_INITIATOR_ID_X_Y_MME2_0:
7016 case RAZWI_INITIATOR_ID_X_Y_MME2_1:
7017 *engine_id_1 = GAUDI_ENGINE_ID_MME_2;
7019 case RAZWI_INITIATOR_ID_X_Y_MME3_0:
7020 case RAZWI_INITIATOR_ID_X_Y_MME3_1:
7021 *engine_id_1 = GAUDI_ENGINE_ID_MME_3;
7023 case RAZWI_INITIATOR_ID_X_Y_TPC6:
7024 *engine_id_1 = GAUDI_ENGINE_ID_TPC_6;
7026 case RAZWI_INITIATOR_ID_X_Y_TPC7_NIC4_NIC5:
7027 if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC)) {
7028 *engine_id_1 = GAUDI_ENGINE_ID_TPC_7;
7031 if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC)) {
7032 *engine_id_1 = GAUDI_ENGINE_ID_NIC_4;
7035 if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC_FT)) {
7036 *engine_id_1 = GAUDI_ENGINE_ID_NIC_5;
7045 "Unknown RAZWI initiator ID 0x%x [Y=%d, X=%d, AXI_ID=%d]\n",
7047 (val >> RAZWI_INITIATOR_Y_SHIFT) & RAZWI_INITIATOR_Y_MASK,
7048 (val >> RAZWI_INITIATOR_X_SHIFT) & RAZWI_INITIATOR_X_MASK,
7049 (val >> RAZWI_INITIATOR_AXI_ID_SHIFT) &
7050 RAZWI_INITIATOR_AXI_ID_MASK);
7052 return "unknown initiator";
7055 static void gaudi_print_and_get_razwi_info(struct hl_device *hdev, u32 *engine_id_1,
7059 if (RREG32(mmMMU_UP_RAZWI_WRITE_VLD)) {
7060 dev_err_ratelimited(hdev->dev,
7061 "RAZWI event caused by illegal write of %s\n",
7062 gaudi_get_razwi_initiator_name(hdev, true, engine_id_1, engine_id_2));
7063 WREG32(mmMMU_UP_RAZWI_WRITE_VLD, 0);
7066 if (RREG32(mmMMU_UP_RAZWI_READ_VLD)) {
7067 dev_err_ratelimited(hdev->dev,
7068 "RAZWI event caused by illegal read of %s\n",
7069 gaudi_get_razwi_initiator_name(hdev, false, engine_id_1, engine_id_2));
7070 WREG32(mmMMU_UP_RAZWI_READ_VLD, 0);
7074 static void gaudi_print_and_get_mmu_error_info(struct hl_device *hdev, u64 *addr, u8 *type)
7076 struct gaudi_device *gaudi = hdev->asic_specific;
7079 if (!(gaudi->hw_cap_initialized & HW_CAP_MMU))
7082 val = RREG32(mmMMU_UP_PAGE_ERROR_CAPTURE);
7083 if (val & MMU_UP_PAGE_ERROR_CAPTURE_ENTRY_VALID_MASK) {
7084 *addr = val & MMU_UP_PAGE_ERROR_CAPTURE_VA_49_32_MASK;
7086 *addr |= RREG32(mmMMU_UP_PAGE_ERROR_CAPTURE_VA);
7088 dev_err_ratelimited(hdev->dev, "MMU page fault on va 0x%llx\n", *addr);
7089 *type = HL_RAZWI_PAGE_FAULT;
7091 WREG32(mmMMU_UP_PAGE_ERROR_CAPTURE, 0);
7094 val = RREG32(mmMMU_UP_ACCESS_ERROR_CAPTURE);
7095 if (val & MMU_UP_ACCESS_ERROR_CAPTURE_ENTRY_VALID_MASK) {
7096 *addr = val & MMU_UP_ACCESS_ERROR_CAPTURE_VA_49_32_MASK;
7098 *addr |= RREG32(mmMMU_UP_ACCESS_ERROR_CAPTURE_VA);
7100 dev_err_ratelimited(hdev->dev, "MMU access error on va 0x%llx\n", *addr);
7101 *type = HL_RAZWI_MMU_ACCESS_ERROR;
7103 WREG32(mmMMU_UP_ACCESS_ERROR_CAPTURE, 0);
7108 * +-------------------+------------------------------------------------------+
7109 * | Configuration Reg | Description |
7111 * +-------------------+------------------------------------------------------+
7112 * | 0xF30 - 0xF3F |ECC single error indication (1 bit per memory wrapper)|
7113 * | |0xF30 memory wrappers 31:0 (MSB to LSB) |
7114 * | |0xF34 memory wrappers 63:32 |
7115 * | |0xF38 memory wrappers 95:64 |
7116 * | |0xF3C memory wrappers 127:96 |
7117 * +-------------------+------------------------------------------------------+
7118 * | 0xF40 - 0xF4F |ECC double error indication (1 bit per memory wrapper)|
7119 * | |0xF40 memory wrappers 31:0 (MSB to LSB) |
7120 * | |0xF44 memory wrappers 63:32 |
7121 * | |0xF48 memory wrappers 95:64 |
7122 * | |0xF4C memory wrappers 127:96 |
7123 * +-------------------+------------------------------------------------------+
7125 static int gaudi_extract_ecc_info(struct hl_device *hdev,
7126 struct ecc_info_extract_params *params, u64 *ecc_address,
7127 u64 *ecc_syndrom, u8 *memory_wrapper_idx)
7129 u32 i, num_mem_regs, reg, err_bit;
7130 u64 err_addr, err_word = 0;
7132 num_mem_regs = params->num_memories / 32 +
7133 ((params->num_memories % 32) ? 1 : 0);
7135 if (params->block_address >= CFG_BASE)
7136 params->block_address -= CFG_BASE;
7139 err_addr = params->block_address + GAUDI_ECC_DERR0_OFFSET;
7141 err_addr = params->block_address + GAUDI_ECC_SERR0_OFFSET;
7143 /* Set invalid wrapper index */
7144 *memory_wrapper_idx = 0xFF;
7146 /* Iterate through memory wrappers, a single bit must be set */
7147 for (i = 0 ; i < num_mem_regs ; i++) {
7149 err_word = RREG32(err_addr);
7151 err_bit = __ffs(err_word);
7152 *memory_wrapper_idx = err_bit + (32 * i);
7157 if (*memory_wrapper_idx == 0xFF) {
7158 dev_err(hdev->dev, "ECC error information cannot be found\n");
7162 WREG32(params->block_address + GAUDI_ECC_MEM_SEL_OFFSET,
7163 *memory_wrapper_idx);
7166 RREG32(params->block_address + GAUDI_ECC_ADDRESS_OFFSET);
7168 RREG32(params->block_address + GAUDI_ECC_SYNDROME_OFFSET);
7170 /* Clear error indication */
7171 reg = RREG32(params->block_address + GAUDI_ECC_MEM_INFO_CLR_OFFSET);
7173 reg |= FIELD_PREP(GAUDI_ECC_MEM_INFO_CLR_DERR_MASK, 1);
7175 reg |= FIELD_PREP(GAUDI_ECC_MEM_INFO_CLR_SERR_MASK, 1);
7177 WREG32(params->block_address + GAUDI_ECC_MEM_INFO_CLR_OFFSET, reg);
7183 * gaudi_queue_idx_dec - decrement queue index (pi/ci) and handle wrap
7185 * @idx: the current pi/ci value
7186 * @q_len: the queue length (power of 2)
7188 * @return the cyclically decremented index
7190 static inline u32 gaudi_queue_idx_dec(u32 idx, u32 q_len)
7192 u32 mask = q_len - 1;
7195 * modular decrement is equivalent to adding (queue_size -1)
7196 * later we take LSBs to make sure the value is in the
7197 * range [0, queue_len - 1]
7199 return (idx + q_len - 1) & mask;
7203 * gaudi_print_sw_config_stream_data - print SW config stream data
7205 * @hdev: pointer to the habanalabs device structure
7206 * @stream: the QMAN's stream
7207 * @qman_base: base address of QMAN registers block
7209 static void gaudi_print_sw_config_stream_data(struct hl_device *hdev, u32 stream,
7212 u64 cq_ptr_lo, cq_ptr_hi, cq_tsize, cq_ptr;
7213 u32 cq_ptr_lo_off, size;
7215 cq_ptr_lo_off = mmTPC0_QM_CQ_PTR_LO_1 - mmTPC0_QM_CQ_PTR_LO_0;
7217 cq_ptr_lo = qman_base + (mmTPC0_QM_CQ_PTR_LO_0 - mmTPC0_QM_BASE) +
7218 stream * cq_ptr_lo_off;
7219 cq_ptr_hi = cq_ptr_lo +
7220 (mmTPC0_QM_CQ_PTR_HI_0 - mmTPC0_QM_CQ_PTR_LO_0);
7221 cq_tsize = cq_ptr_lo +
7222 (mmTPC0_QM_CQ_TSIZE_0 - mmTPC0_QM_CQ_PTR_LO_0);
7224 cq_ptr = (((u64) RREG32(cq_ptr_hi)) << 32) | RREG32(cq_ptr_lo);
7225 size = RREG32(cq_tsize);
7226 dev_info(hdev->dev, "stop on err: stream: %u, addr: %#llx, size: %u\n",
7227 stream, cq_ptr, size);
7231 * gaudi_print_last_pqes_on_err - print last PQEs on error
7233 * @hdev: pointer to the habanalabs device structure
7234 * @qid_base: first QID of the QMAN (out of 4 streams)
7235 * @stream: the QMAN's stream
7236 * @qman_base: base address of QMAN registers block
7237 * @pr_sw_conf: if true print the SW config stream data (CQ PTR and SIZE)
7239 static void gaudi_print_last_pqes_on_err(struct hl_device *hdev, u32 qid_base,
7240 u32 stream, u64 qman_base,
7243 u32 ci, qm_ci_stream_off, queue_len;
7244 struct hl_hw_queue *q;
7248 q = &hdev->kernel_queues[qid_base + stream];
7250 qm_ci_stream_off = mmTPC0_QM_PQ_CI_1 - mmTPC0_QM_PQ_CI_0;
7251 pq_ci = qman_base + (mmTPC0_QM_PQ_CI_0 - mmTPC0_QM_BASE) +
7252 stream * qm_ci_stream_off;
7254 queue_len = (q->queue_type == QUEUE_TYPE_INT) ?
7255 q->int_queue_len : HL_QUEUE_LENGTH;
7257 hdev->asic_funcs->hw_queues_lock(hdev);
7260 gaudi_print_sw_config_stream_data(hdev, stream, qman_base);
7264 /* we should start printing form ci -1 */
7265 ci = gaudi_queue_idx_dec(ci, queue_len);
7267 for (i = 0; i < PQ_FETCHER_CACHE_SIZE; i++) {
7272 bd = q->kernel_address;
7275 len = le32_to_cpu(bd->len);
7276 /* len 0 means uninitialized entry- break */
7280 addr = le64_to_cpu(bd->ptr);
7282 dev_info(hdev->dev, "stop on err PQE(stream %u): ci: %u, addr: %#llx, size: %u\n",
7283 stream, ci, addr, len);
7285 /* get previous ci, wrap if needed */
7286 ci = gaudi_queue_idx_dec(ci, queue_len);
7289 hdev->asic_funcs->hw_queues_unlock(hdev);
7293 * print_qman_data_on_err - extract QMAN data on error
7295 * @hdev: pointer to the habanalabs device structure
7296 * @qid_base: first QID of the QMAN (out of 4 streams)
7297 * @stream: the QMAN's stream
7298 * @qman_base: base address of QMAN registers block
7300 * This function attempt to exatract as much data as possible on QMAN error.
7301 * On upper CP print the SW config stream data and last 8 PQEs.
7302 * On lower CP print SW config data and last PQEs of ALL 4 upper CPs
7304 static void print_qman_data_on_err(struct hl_device *hdev, u32 qid_base,
7305 u32 stream, u64 qman_base)
7309 if (stream != QMAN_STREAMS) {
7310 gaudi_print_last_pqes_on_err(hdev, qid_base, stream, qman_base,
7315 gaudi_print_sw_config_stream_data(hdev, stream, qman_base);
7317 for (i = 0; i < QMAN_STREAMS; i++)
7318 gaudi_print_last_pqes_on_err(hdev, qid_base, i, qman_base,
7322 static void gaudi_handle_qman_err_generic(struct hl_device *hdev,
7323 const char *qm_name,
7327 u32 i, j, glbl_sts_val, arb_err_val, glbl_sts_clr_val;
7328 u64 glbl_sts_addr, arb_err_addr;
7331 glbl_sts_addr = qman_base + (mmTPC0_QM_GLBL_STS1_0 - mmTPC0_QM_BASE);
7332 arb_err_addr = qman_base + (mmTPC0_QM_ARB_ERR_CAUSE - mmTPC0_QM_BASE);
7334 /* Iterate through all stream GLBL_STS1 registers + Lower CP */
7335 for (i = 0 ; i < QMAN_STREAMS + 1 ; i++) {
7336 glbl_sts_clr_val = 0;
7337 glbl_sts_val = RREG32(glbl_sts_addr + 4 * i);
7342 if (i == QMAN_STREAMS)
7343 snprintf(reg_desc, ARRAY_SIZE(reg_desc), "LowerCP");
7345 snprintf(reg_desc, ARRAY_SIZE(reg_desc), "stream%u", i);
7347 for (j = 0 ; j < GAUDI_NUM_OF_QM_ERR_CAUSE ; j++) {
7348 if (glbl_sts_val & BIT(j)) {
7349 dev_err_ratelimited(hdev->dev,
7350 "%s %s. err cause: %s\n",
7352 gaudi_qman_error_cause[j]);
7353 glbl_sts_clr_val |= BIT(j);
7357 /* Write 1 clear errors */
7358 if (!hdev->stop_on_err)
7359 WREG32(glbl_sts_addr + 4 * i, glbl_sts_clr_val);
7361 print_qman_data_on_err(hdev, qid_base, i, qman_base);
7364 arb_err_val = RREG32(arb_err_addr);
7369 for (j = 0 ; j < GAUDI_NUM_OF_QM_ARB_ERR_CAUSE ; j++) {
7370 if (arb_err_val & BIT(j)) {
7371 dev_err_ratelimited(hdev->dev,
7372 "%s ARB_ERR. err cause: %s\n",
7374 gaudi_qman_arb_error_cause[j]);
7379 static void gaudi_print_sm_sei_info(struct hl_device *hdev, u16 event_type,
7380 struct hl_eq_sm_sei_data *sei_data)
7382 u32 index = event_type - GAUDI_EVENT_DMA_IF_SEI_0;
7384 /* Flip the bits as the enum is ordered in the opposite way */
7385 index = (index ^ 0x3) & 0x3;
7387 switch (sei_data->sei_cause) {
7388 case SM_SEI_SO_OVERFLOW:
7389 dev_err_ratelimited(hdev->dev,
7390 "%s SEI Error: SOB Group %u overflow/underflow",
7391 gaudi_sync_manager_names[index],
7392 le32_to_cpu(sei_data->sei_log));
7394 case SM_SEI_LBW_4B_UNALIGNED:
7395 dev_err_ratelimited(hdev->dev,
7396 "%s SEI Error: Unaligned 4B LBW access, monitor agent address low - %#x",
7397 gaudi_sync_manager_names[index],
7398 le32_to_cpu(sei_data->sei_log));
7400 case SM_SEI_AXI_RESPONSE_ERR:
7401 dev_err_ratelimited(hdev->dev,
7402 "%s SEI Error: AXI ID %u response error",
7403 gaudi_sync_manager_names[index],
7404 le32_to_cpu(sei_data->sei_log));
7407 dev_err_ratelimited(hdev->dev, "Unknown SM SEI cause %u",
7408 le32_to_cpu(sei_data->sei_log));
7413 static void gaudi_handle_ecc_event(struct hl_device *hdev, u16 event_type,
7414 struct hl_eq_ecc_data *ecc_data)
7416 struct ecc_info_extract_params params;
7417 u64 ecc_address = 0, ecc_syndrom = 0;
7418 u8 index, memory_wrapper_idx = 0;
7419 bool extract_info_from_fw;
7422 if (hdev->asic_prop.fw_security_enabled) {
7423 extract_info_from_fw = true;
7424 goto extract_ecc_info;
7427 switch (event_type) {
7428 case GAUDI_EVENT_PCIE_CORE_SERR ... GAUDI_EVENT_PCIE_PHY_DERR:
7429 case GAUDI_EVENT_DMA0_SERR_ECC ... GAUDI_EVENT_MMU_DERR:
7430 extract_info_from_fw = true;
7432 case GAUDI_EVENT_TPC0_SERR ... GAUDI_EVENT_TPC7_SERR:
7433 index = event_type - GAUDI_EVENT_TPC0_SERR;
7434 params.block_address = mmTPC0_CFG_BASE + index * TPC_CFG_OFFSET;
7435 params.num_memories = 90;
7436 params.derr = false;
7437 extract_info_from_fw = false;
7439 case GAUDI_EVENT_TPC0_DERR ... GAUDI_EVENT_TPC7_DERR:
7440 index = event_type - GAUDI_EVENT_TPC0_DERR;
7441 params.block_address =
7442 mmTPC0_CFG_BASE + index * TPC_CFG_OFFSET;
7443 params.num_memories = 90;
7445 extract_info_from_fw = false;
7447 case GAUDI_EVENT_MME0_ACC_SERR:
7448 case GAUDI_EVENT_MME1_ACC_SERR:
7449 case GAUDI_EVENT_MME2_ACC_SERR:
7450 case GAUDI_EVENT_MME3_ACC_SERR:
7451 index = (event_type - GAUDI_EVENT_MME0_ACC_SERR) / 4;
7452 params.block_address = mmMME0_ACC_BASE + index * MME_ACC_OFFSET;
7453 params.num_memories = 128;
7454 params.derr = false;
7455 extract_info_from_fw = false;
7457 case GAUDI_EVENT_MME0_ACC_DERR:
7458 case GAUDI_EVENT_MME1_ACC_DERR:
7459 case GAUDI_EVENT_MME2_ACC_DERR:
7460 case GAUDI_EVENT_MME3_ACC_DERR:
7461 index = (event_type - GAUDI_EVENT_MME0_ACC_DERR) / 4;
7462 params.block_address = mmMME0_ACC_BASE + index * MME_ACC_OFFSET;
7463 params.num_memories = 128;
7465 extract_info_from_fw = false;
7467 case GAUDI_EVENT_MME0_SBAB_SERR:
7468 case GAUDI_EVENT_MME1_SBAB_SERR:
7469 case GAUDI_EVENT_MME2_SBAB_SERR:
7470 case GAUDI_EVENT_MME3_SBAB_SERR:
7471 index = (event_type - GAUDI_EVENT_MME0_SBAB_SERR) / 4;
7472 params.block_address =
7473 mmMME0_SBAB_BASE + index * MME_ACC_OFFSET;
7474 params.num_memories = 33;
7475 params.derr = false;
7476 extract_info_from_fw = false;
7478 case GAUDI_EVENT_MME0_SBAB_DERR:
7479 case GAUDI_EVENT_MME1_SBAB_DERR:
7480 case GAUDI_EVENT_MME2_SBAB_DERR:
7481 case GAUDI_EVENT_MME3_SBAB_DERR:
7482 index = (event_type - GAUDI_EVENT_MME0_SBAB_DERR) / 4;
7483 params.block_address =
7484 mmMME0_SBAB_BASE + index * MME_ACC_OFFSET;
7485 params.num_memories = 33;
7487 extract_info_from_fw = false;
7494 if (extract_info_from_fw) {
7495 ecc_address = le64_to_cpu(ecc_data->ecc_address);
7496 ecc_syndrom = le64_to_cpu(ecc_data->ecc_syndrom);
7497 memory_wrapper_idx = ecc_data->memory_wrapper_idx;
7499 rc = gaudi_extract_ecc_info(hdev, ¶ms, &ecc_address,
7500 &ecc_syndrom, &memory_wrapper_idx);
7506 "ECC error detected. address: %#llx. Syndrom: %#llx. block id %u\n",
7507 ecc_address, ecc_syndrom, memory_wrapper_idx);
7510 static void gaudi_handle_qman_err(struct hl_device *hdev, u16 event_type)
7517 switch (event_type) {
7518 case GAUDI_EVENT_TPC0_QM ... GAUDI_EVENT_TPC7_QM:
7519 index = event_type - GAUDI_EVENT_TPC0_QM;
7520 qid_base = GAUDI_QUEUE_ID_TPC_0_0 + index * QMAN_STREAMS;
7521 qman_base = mmTPC0_QM_BASE + index * TPC_QMAN_OFFSET;
7522 snprintf(desc, ARRAY_SIZE(desc), "%s%d", "TPC_QM", index);
7524 case GAUDI_EVENT_MME0_QM ... GAUDI_EVENT_MME2_QM:
7525 index = event_type - GAUDI_EVENT_MME0_QM;
7526 qid_base = GAUDI_QUEUE_ID_MME_0_0 + index * QMAN_STREAMS;
7527 qman_base = mmMME0_QM_BASE + index * MME_QMAN_OFFSET;
7528 snprintf(desc, ARRAY_SIZE(desc), "%s%d", "MME_QM", index);
7530 case GAUDI_EVENT_DMA0_QM ... GAUDI_EVENT_DMA7_QM:
7531 index = event_type - GAUDI_EVENT_DMA0_QM;
7532 qid_base = GAUDI_QUEUE_ID_DMA_0_0 + index * QMAN_STREAMS;
7533 /* skip GAUDI_QUEUE_ID_CPU_PQ if necessary */
7536 qman_base = mmDMA0_QM_BASE + index * DMA_QMAN_OFFSET;
7537 snprintf(desc, ARRAY_SIZE(desc), "%s%d", "DMA_QM", index);
7539 case GAUDI_EVENT_NIC0_QM0:
7540 qid_base = GAUDI_QUEUE_ID_NIC_0_0;
7541 qman_base = mmNIC0_QM0_BASE;
7542 snprintf(desc, ARRAY_SIZE(desc), "NIC0_QM0");
7544 case GAUDI_EVENT_NIC0_QM1:
7545 qid_base = GAUDI_QUEUE_ID_NIC_1_0;
7546 qman_base = mmNIC0_QM1_BASE;
7547 snprintf(desc, ARRAY_SIZE(desc), "NIC0_QM1");
7549 case GAUDI_EVENT_NIC1_QM0:
7550 qid_base = GAUDI_QUEUE_ID_NIC_2_0;
7551 qman_base = mmNIC1_QM0_BASE;
7552 snprintf(desc, ARRAY_SIZE(desc), "NIC1_QM0");
7554 case GAUDI_EVENT_NIC1_QM1:
7555 qid_base = GAUDI_QUEUE_ID_NIC_3_0;
7556 qman_base = mmNIC1_QM1_BASE;
7557 snprintf(desc, ARRAY_SIZE(desc), "NIC1_QM1");
7559 case GAUDI_EVENT_NIC2_QM0:
7560 qid_base = GAUDI_QUEUE_ID_NIC_4_0;
7561 qman_base = mmNIC2_QM0_BASE;
7562 snprintf(desc, ARRAY_SIZE(desc), "NIC2_QM0");
7564 case GAUDI_EVENT_NIC2_QM1:
7565 qid_base = GAUDI_QUEUE_ID_NIC_5_0;
7566 qman_base = mmNIC2_QM1_BASE;
7567 snprintf(desc, ARRAY_SIZE(desc), "NIC2_QM1");
7569 case GAUDI_EVENT_NIC3_QM0:
7570 qid_base = GAUDI_QUEUE_ID_NIC_6_0;
7571 qman_base = mmNIC3_QM0_BASE;
7572 snprintf(desc, ARRAY_SIZE(desc), "NIC3_QM0");
7574 case GAUDI_EVENT_NIC3_QM1:
7575 qid_base = GAUDI_QUEUE_ID_NIC_7_0;
7576 qman_base = mmNIC3_QM1_BASE;
7577 snprintf(desc, ARRAY_SIZE(desc), "NIC3_QM1");
7579 case GAUDI_EVENT_NIC4_QM0:
7580 qid_base = GAUDI_QUEUE_ID_NIC_8_0;
7581 qman_base = mmNIC4_QM0_BASE;
7582 snprintf(desc, ARRAY_SIZE(desc), "NIC4_QM0");
7584 case GAUDI_EVENT_NIC4_QM1:
7585 qid_base = GAUDI_QUEUE_ID_NIC_9_0;
7586 qman_base = mmNIC4_QM1_BASE;
7587 snprintf(desc, ARRAY_SIZE(desc), "NIC4_QM1");
7593 gaudi_handle_qman_err_generic(hdev, desc, qman_base, qid_base);
7596 static void gaudi_print_irq_info(struct hl_device *hdev, u16 event_type,
7599 u32 engine_id_1, engine_id_2;
7606 * Init engine id by default as not valid and only if razwi initiated from engine with
7607 * engine id it will get valid value.
7608 * Init razwi type to default, will be changed only if razwi caused by page fault of
7611 engine_id_1 = U16_MAX;
7612 engine_id_2 = U16_MAX;
7613 razwi_type = U8_MAX;
7615 gaudi_get_event_desc(event_type, desc, sizeof(desc));
7616 dev_err_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
7620 gaudi_print_and_get_razwi_info(hdev, &engine_id_1, &engine_id_2);
7621 gaudi_print_and_get_mmu_error_info(hdev, &razwi_addr, &razwi_type);
7623 /* In case it's the first razwi, save its parameters*/
7624 rc = atomic_cmpxchg(&hdev->last_error.razwi_write_disable, 0, 1);
7626 hdev->last_error.open_dev_timestamp = hdev->last_successful_open_ktime;
7627 hdev->last_error.razwi_timestamp = ktime_get();
7628 hdev->last_error.razwi_addr = razwi_addr;
7629 hdev->last_error.razwi_engine_id_1 = engine_id_1;
7630 hdev->last_error.razwi_engine_id_2 = engine_id_2;
7632 * If first engine id holds non valid value the razwi initiator
7633 * does not have engine id
7635 hdev->last_error.razwi_non_engine_initiator = (engine_id_1 == U16_MAX);
7636 hdev->last_error.razwi_type = razwi_type;
7642 static void gaudi_print_out_of_sync_info(struct hl_device *hdev,
7643 struct cpucp_pkt_sync_err *sync_err)
7645 struct hl_hw_queue *q = &hdev->kernel_queues[GAUDI_QUEUE_ID_CPU_PQ];
7647 dev_err(hdev->dev, "Out of sync with FW, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%u\n",
7648 sync_err->pi, sync_err->ci, q->pi, atomic_read(&q->ci));
7651 static void gaudi_print_fw_alive_info(struct hl_device *hdev,
7652 struct hl_eq_fw_alive *fw_alive)
7655 "FW alive report: severity=%s, process_id=%u, thread_id=%u, uptime=%llu seconds\n",
7656 (fw_alive->severity == FW_ALIVE_SEVERITY_MINOR) ?
7657 "Minor" : "Critical", fw_alive->process_id,
7658 fw_alive->thread_id, fw_alive->uptime_seconds);
7661 static int gaudi_non_hard_reset_late_init(struct hl_device *hdev)
7663 /* GAUDI doesn't support any reset except hard-reset */
7667 static int gaudi_hbm_read_interrupts(struct hl_device *hdev, int device,
7668 struct hl_eq_hbm_ecc_data *hbm_ecc_data)
7670 u32 base, val, val2, wr_par, rd_par, ca_par, derr, serr, type, ch;
7673 if (hdev->asic_prop.fw_app_cpu_boot_dev_sts0 &
7674 CPU_BOOT_DEV_STS0_HBM_ECC_EN) {
7675 if (!hbm_ecc_data) {
7676 dev_err(hdev->dev, "No FW ECC data");
7680 wr_par = FIELD_GET(CPUCP_PKT_HBM_ECC_INFO_WR_PAR_MASK,
7681 le32_to_cpu(hbm_ecc_data->hbm_ecc_info));
7682 rd_par = FIELD_GET(CPUCP_PKT_HBM_ECC_INFO_RD_PAR_MASK,
7683 le32_to_cpu(hbm_ecc_data->hbm_ecc_info));
7684 ca_par = FIELD_GET(CPUCP_PKT_HBM_ECC_INFO_CA_PAR_MASK,
7685 le32_to_cpu(hbm_ecc_data->hbm_ecc_info));
7686 derr = FIELD_GET(CPUCP_PKT_HBM_ECC_INFO_DERR_MASK,
7687 le32_to_cpu(hbm_ecc_data->hbm_ecc_info));
7688 serr = FIELD_GET(CPUCP_PKT_HBM_ECC_INFO_SERR_MASK,
7689 le32_to_cpu(hbm_ecc_data->hbm_ecc_info));
7690 type = FIELD_GET(CPUCP_PKT_HBM_ECC_INFO_TYPE_MASK,
7691 le32_to_cpu(hbm_ecc_data->hbm_ecc_info));
7692 ch = FIELD_GET(CPUCP_PKT_HBM_ECC_INFO_HBM_CH_MASK,
7693 le32_to_cpu(hbm_ecc_data->hbm_ecc_info));
7696 "HBM%d pc%d interrupts info: WR_PAR=%d, RD_PAR=%d, CA_PAR=%d, SERR=%d, DERR=%d\n",
7697 device, ch, wr_par, rd_par, ca_par, serr, derr);
7699 "HBM%d pc%d ECC info: 1ST_ERR_ADDR=0x%x, 1ST_ERR_TYPE=%d, SEC_CONT_CNT=%u, SEC_CNT=%d, DEC_CNT=%d\n",
7700 device, ch, hbm_ecc_data->first_addr, type,
7701 hbm_ecc_data->sec_cont_cnt, hbm_ecc_data->sec_cnt,
7702 hbm_ecc_data->dec_cnt);
7706 if (hdev->asic_prop.fw_security_enabled) {
7707 dev_info(hdev->dev, "Cannot access MC regs for ECC data while security is enabled\n");
7711 base = GAUDI_HBM_CFG_BASE + device * GAUDI_HBM_CFG_OFFSET;
7712 for (ch = 0 ; ch < GAUDI_HBM_CHANNELS ; ch++) {
7713 val = RREG32_MASK(base + ch * 0x1000 + 0x06C, 0x0000FFFF);
7714 val = (val & 0xFF) | ((val >> 8) & 0xFF);
7718 "HBM%d pc%d interrupts info: WR_PAR=%d, RD_PAR=%d, CA_PAR=%d, SERR=%d, DERR=%d\n",
7719 device, ch * 2, val & 0x1, (val >> 1) & 0x1,
7720 (val >> 2) & 0x1, (val >> 3) & 0x1,
7723 val2 = RREG32(base + ch * 0x1000 + 0x060);
7725 "HBM%d pc%d ECC info: 1ST_ERR_ADDR=0x%x, 1ST_ERR_TYPE=%d, SEC_CONT_CNT=%d, SEC_CNT=%d, DEC_CNT=%d\n",
7727 RREG32(base + ch * 0x1000 + 0x064),
7728 (val2 & 0x200) >> 9, (val2 & 0xFC00) >> 10,
7729 (val2 & 0xFF0000) >> 16,
7730 (val2 & 0xFF000000) >> 24);
7733 val = RREG32_MASK(base + ch * 0x1000 + 0x07C, 0x0000FFFF);
7734 val = (val & 0xFF) | ((val >> 8) & 0xFF);
7738 "HBM%d pc%d interrupts info: WR_PAR=%d, RD_PAR=%d, CA_PAR=%d, SERR=%d, DERR=%d\n",
7739 device, ch * 2 + 1, val & 0x1, (val >> 1) & 0x1,
7740 (val >> 2) & 0x1, (val >> 3) & 0x1,
7743 val2 = RREG32(base + ch * 0x1000 + 0x070);
7745 "HBM%d pc%d ECC info: 1ST_ERR_ADDR=0x%x, 1ST_ERR_TYPE=%d, SEC_CONT_CNT=%d, SEC_CNT=%d, DEC_CNT=%d\n",
7747 RREG32(base + ch * 0x1000 + 0x074),
7748 (val2 & 0x200) >> 9, (val2 & 0xFC00) >> 10,
7749 (val2 & 0xFF0000) >> 16,
7750 (val2 & 0xFF000000) >> 24);
7753 /* Clear interrupts */
7754 RMWREG32(base + (ch * 0x1000) + 0x060, 0x1C8, 0x1FF);
7755 RMWREG32(base + (ch * 0x1000) + 0x070, 0x1C8, 0x1FF);
7756 WREG32(base + (ch * 0x1000) + 0x06C, 0x1F1F);
7757 WREG32(base + (ch * 0x1000) + 0x07C, 0x1F1F);
7758 RMWREG32(base + (ch * 0x1000) + 0x060, 0x0, 0xF);
7759 RMWREG32(base + (ch * 0x1000) + 0x070, 0x0, 0xF);
7762 val = RREG32(base + 0x8F30);
7763 val2 = RREG32(base + 0x8F34);
7767 "HBM %d MC SRAM SERR info: Reg 0x8F30=0x%x, Reg 0x8F34=0x%x\n",
7770 val = RREG32(base + 0x8F40);
7771 val2 = RREG32(base + 0x8F44);
7775 "HBM %d MC SRAM DERR info: Reg 0x8F40=0x%x, Reg 0x8F44=0x%x\n",
7782 static int gaudi_hbm_event_to_dev(u16 hbm_event_type)
7784 switch (hbm_event_type) {
7785 case GAUDI_EVENT_HBM0_SPI_0:
7786 case GAUDI_EVENT_HBM0_SPI_1:
7788 case GAUDI_EVENT_HBM1_SPI_0:
7789 case GAUDI_EVENT_HBM1_SPI_1:
7791 case GAUDI_EVENT_HBM2_SPI_0:
7792 case GAUDI_EVENT_HBM2_SPI_1:
7794 case GAUDI_EVENT_HBM3_SPI_0:
7795 case GAUDI_EVENT_HBM3_SPI_1:
7801 /* Should never happen */
7805 static bool gaudi_tpc_read_interrupts(struct hl_device *hdev, u8 tpc_id,
7806 char *interrupt_name)
7808 u32 tpc_offset = tpc_id * TPC_CFG_OFFSET, tpc_interrupts_cause, i;
7809 bool soft_reset_required = false;
7811 tpc_interrupts_cause = RREG32(mmTPC0_CFG_TPC_INTR_CAUSE + tpc_offset) &
7812 TPC0_CFG_TPC_INTR_CAUSE_CAUSE_MASK;
7814 for (i = 0 ; i < GAUDI_NUM_OF_TPC_INTR_CAUSE ; i++)
7815 if (tpc_interrupts_cause & BIT(i)) {
7816 dev_err_ratelimited(hdev->dev,
7817 "TPC%d_%s interrupt cause: %s\n",
7818 tpc_id, interrupt_name,
7819 gaudi_tpc_interrupts_cause[i]);
7820 /* If this is QM error, we need to soft-reset */
7822 soft_reset_required = true;
7825 /* Clear interrupts */
7826 WREG32(mmTPC0_CFG_TPC_INTR_CAUSE + tpc_offset, 0);
7828 return soft_reset_required;
7831 static int tpc_dec_event_to_tpc_id(u16 tpc_dec_event_type)
7833 return (tpc_dec_event_type - GAUDI_EVENT_TPC0_DEC) >> 1;
7836 static int tpc_krn_event_to_tpc_id(u16 tpc_dec_event_type)
7838 return (tpc_dec_event_type - GAUDI_EVENT_TPC0_KRN_ERR) / 6;
7841 static void gaudi_print_clk_change_info(struct hl_device *hdev,
7844 ktime_t zero_time = ktime_set(0, 0);
7846 mutex_lock(&hdev->clk_throttling.lock);
7848 switch (event_type) {
7849 case GAUDI_EVENT_FIX_POWER_ENV_S:
7850 hdev->clk_throttling.current_reason |= HL_CLK_THROTTLE_POWER;
7851 hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_POWER;
7852 hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].start = ktime_get();
7853 hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = zero_time;
7854 dev_info_ratelimited(hdev->dev,
7855 "Clock throttling due to power consumption\n");
7858 case GAUDI_EVENT_FIX_POWER_ENV_E:
7859 hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_POWER;
7860 hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = ktime_get();
7861 dev_info_ratelimited(hdev->dev,
7862 "Power envelop is safe, back to optimal clock\n");
7865 case GAUDI_EVENT_FIX_THERMAL_ENV_S:
7866 hdev->clk_throttling.current_reason |= HL_CLK_THROTTLE_THERMAL;
7867 hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_THERMAL;
7868 hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].start = ktime_get();
7869 hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = zero_time;
7870 dev_info_ratelimited(hdev->dev,
7871 "Clock throttling due to overheating\n");
7874 case GAUDI_EVENT_FIX_THERMAL_ENV_E:
7875 hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_THERMAL;
7876 hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = ktime_get();
7877 dev_info_ratelimited(hdev->dev,
7878 "Thermal envelop is safe, back to optimal clock\n");
7882 dev_err(hdev->dev, "Received invalid clock change event %d\n",
7887 mutex_unlock(&hdev->clk_throttling.lock);
7890 static void gaudi_handle_eqe(struct hl_device *hdev,
7891 struct hl_eq_entry *eq_entry)
7893 struct gaudi_device *gaudi = hdev->asic_specific;
7894 u32 ctl = le32_to_cpu(eq_entry->hdr.ctl);
7895 u32 fw_fatal_err_flag = 0;
7896 u16 event_type = ((ctl & EQ_CTL_EVENT_TYPE_MASK)
7897 >> EQ_CTL_EVENT_TYPE_SHIFT);
7898 bool reset_required;
7902 if (event_type >= GAUDI_EVENT_SIZE) {
7903 dev_err(hdev->dev, "Event type %u exceeds maximum of %u",
7904 event_type, GAUDI_EVENT_SIZE - 1);
7908 gaudi->events_stat[event_type]++;
7909 gaudi->events_stat_aggregate[event_type]++;
7911 switch (event_type) {
7912 case GAUDI_EVENT_PCIE_CORE_DERR:
7913 case GAUDI_EVENT_PCIE_IF_DERR:
7914 case GAUDI_EVENT_PCIE_PHY_DERR:
7915 case GAUDI_EVENT_TPC0_DERR ... GAUDI_EVENT_TPC7_DERR:
7916 case GAUDI_EVENT_MME0_ACC_DERR:
7917 case GAUDI_EVENT_MME0_SBAB_DERR:
7918 case GAUDI_EVENT_MME1_ACC_DERR:
7919 case GAUDI_EVENT_MME1_SBAB_DERR:
7920 case GAUDI_EVENT_MME2_ACC_DERR:
7921 case GAUDI_EVENT_MME2_SBAB_DERR:
7922 case GAUDI_EVENT_MME3_ACC_DERR:
7923 case GAUDI_EVENT_MME3_SBAB_DERR:
7924 case GAUDI_EVENT_DMA0_DERR_ECC ... GAUDI_EVENT_DMA7_DERR_ECC:
7926 case GAUDI_EVENT_CPU_IF_ECC_DERR:
7927 case GAUDI_EVENT_PSOC_MEM_DERR:
7928 case GAUDI_EVENT_PSOC_CORESIGHT_DERR:
7929 case GAUDI_EVENT_SRAM0_DERR ... GAUDI_EVENT_SRAM28_DERR:
7930 case GAUDI_EVENT_DMA_IF0_DERR ... GAUDI_EVENT_DMA_IF3_DERR:
7931 case GAUDI_EVENT_HBM_0_DERR ... GAUDI_EVENT_HBM_3_DERR:
7932 case GAUDI_EVENT_MMU_DERR:
7933 case GAUDI_EVENT_NIC0_CS_DBG_DERR ... GAUDI_EVENT_NIC4_CS_DBG_DERR:
7934 gaudi_print_irq_info(hdev, event_type, true);
7935 gaudi_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data);
7936 fw_fatal_err_flag = HL_DRV_RESET_FW_FATAL_ERR;
7939 case GAUDI_EVENT_GIC500:
7940 case GAUDI_EVENT_AXI_ECC:
7941 case GAUDI_EVENT_L2_RAM_ECC:
7942 case GAUDI_EVENT_PLL0 ... GAUDI_EVENT_PLL17:
7943 gaudi_print_irq_info(hdev, event_type, false);
7944 fw_fatal_err_flag = HL_DRV_RESET_FW_FATAL_ERR;
7947 case GAUDI_EVENT_HBM0_SPI_0:
7948 case GAUDI_EVENT_HBM1_SPI_0:
7949 case GAUDI_EVENT_HBM2_SPI_0:
7950 case GAUDI_EVENT_HBM3_SPI_0:
7951 gaudi_print_irq_info(hdev, event_type, false);
7952 gaudi_hbm_read_interrupts(hdev,
7953 gaudi_hbm_event_to_dev(event_type),
7954 &eq_entry->hbm_ecc_data);
7955 fw_fatal_err_flag = HL_DRV_RESET_FW_FATAL_ERR;
7958 case GAUDI_EVENT_HBM0_SPI_1:
7959 case GAUDI_EVENT_HBM1_SPI_1:
7960 case GAUDI_EVENT_HBM2_SPI_1:
7961 case GAUDI_EVENT_HBM3_SPI_1:
7962 gaudi_print_irq_info(hdev, event_type, false);
7963 gaudi_hbm_read_interrupts(hdev,
7964 gaudi_hbm_event_to_dev(event_type),
7965 &eq_entry->hbm_ecc_data);
7966 hl_fw_unmask_irq(hdev, event_type);
7969 case GAUDI_EVENT_TPC0_DEC:
7970 case GAUDI_EVENT_TPC1_DEC:
7971 case GAUDI_EVENT_TPC2_DEC:
7972 case GAUDI_EVENT_TPC3_DEC:
7973 case GAUDI_EVENT_TPC4_DEC:
7974 case GAUDI_EVENT_TPC5_DEC:
7975 case GAUDI_EVENT_TPC6_DEC:
7976 case GAUDI_EVENT_TPC7_DEC:
7977 gaudi_print_irq_info(hdev, event_type, true);
7978 reset_required = gaudi_tpc_read_interrupts(hdev,
7979 tpc_dec_event_to_tpc_id(event_type),
7980 "AXI_SLV_DEC_Error");
7981 if (reset_required) {
7982 dev_err(hdev->dev, "reset required due to %s\n",
7983 gaudi_irq_map_table[event_type].name);
7985 hl_device_reset(hdev, 0);
7987 hl_fw_unmask_irq(hdev, event_type);
7991 case GAUDI_EVENT_TPC0_KRN_ERR:
7992 case GAUDI_EVENT_TPC1_KRN_ERR:
7993 case GAUDI_EVENT_TPC2_KRN_ERR:
7994 case GAUDI_EVENT_TPC3_KRN_ERR:
7995 case GAUDI_EVENT_TPC4_KRN_ERR:
7996 case GAUDI_EVENT_TPC5_KRN_ERR:
7997 case GAUDI_EVENT_TPC6_KRN_ERR:
7998 case GAUDI_EVENT_TPC7_KRN_ERR:
7999 gaudi_print_irq_info(hdev, event_type, true);
8000 reset_required = gaudi_tpc_read_interrupts(hdev,
8001 tpc_krn_event_to_tpc_id(event_type),
8003 if (reset_required) {
8004 dev_err(hdev->dev, "reset required due to %s\n",
8005 gaudi_irq_map_table[event_type].name);
8007 hl_device_reset(hdev, 0);
8009 hl_fw_unmask_irq(hdev, event_type);
8013 case GAUDI_EVENT_PCIE_CORE_SERR:
8014 case GAUDI_EVENT_PCIE_IF_SERR:
8015 case GAUDI_EVENT_PCIE_PHY_SERR:
8016 case GAUDI_EVENT_TPC0_SERR ... GAUDI_EVENT_TPC7_SERR:
8017 case GAUDI_EVENT_MME0_ACC_SERR:
8018 case GAUDI_EVENT_MME0_SBAB_SERR:
8019 case GAUDI_EVENT_MME1_ACC_SERR:
8020 case GAUDI_EVENT_MME1_SBAB_SERR:
8021 case GAUDI_EVENT_MME2_ACC_SERR:
8022 case GAUDI_EVENT_MME2_SBAB_SERR:
8023 case GAUDI_EVENT_MME3_ACC_SERR:
8024 case GAUDI_EVENT_MME3_SBAB_SERR:
8025 case GAUDI_EVENT_DMA0_SERR_ECC ... GAUDI_EVENT_DMA7_SERR_ECC:
8026 case GAUDI_EVENT_CPU_IF_ECC_SERR:
8027 case GAUDI_EVENT_PSOC_MEM_SERR:
8028 case GAUDI_EVENT_PSOC_CORESIGHT_SERR:
8029 case GAUDI_EVENT_SRAM0_SERR ... GAUDI_EVENT_SRAM28_SERR:
8030 case GAUDI_EVENT_DMA_IF0_SERR ... GAUDI_EVENT_DMA_IF3_SERR:
8031 case GAUDI_EVENT_HBM_0_SERR ... GAUDI_EVENT_HBM_3_SERR:
8033 case GAUDI_EVENT_MMU_SERR:
8034 gaudi_print_irq_info(hdev, event_type, true);
8035 gaudi_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data);
8036 hl_fw_unmask_irq(hdev, event_type);
8039 case GAUDI_EVENT_PCIE_DEC:
8040 case GAUDI_EVENT_MME0_WBC_RSP:
8041 case GAUDI_EVENT_MME0_SBAB0_RSP:
8042 case GAUDI_EVENT_MME1_WBC_RSP:
8043 case GAUDI_EVENT_MME1_SBAB0_RSP:
8044 case GAUDI_EVENT_MME2_WBC_RSP:
8045 case GAUDI_EVENT_MME2_SBAB0_RSP:
8046 case GAUDI_EVENT_MME3_WBC_RSP:
8047 case GAUDI_EVENT_MME3_SBAB0_RSP:
8048 case GAUDI_EVENT_CPU_AXI_SPLITTER:
8049 case GAUDI_EVENT_PSOC_AXI_DEC:
8050 case GAUDI_EVENT_PSOC_PRSTN_FALL:
8051 case GAUDI_EVENT_MMU_PAGE_FAULT:
8052 case GAUDI_EVENT_MMU_WR_PERM:
8053 case GAUDI_EVENT_RAZWI_OR_ADC:
8054 case GAUDI_EVENT_TPC0_QM ... GAUDI_EVENT_TPC7_QM:
8055 case GAUDI_EVENT_MME0_QM ... GAUDI_EVENT_MME2_QM:
8056 case GAUDI_EVENT_DMA0_QM ... GAUDI_EVENT_DMA7_QM:
8058 case GAUDI_EVENT_NIC0_QM0:
8059 case GAUDI_EVENT_NIC0_QM1:
8060 case GAUDI_EVENT_NIC1_QM0:
8061 case GAUDI_EVENT_NIC1_QM1:
8062 case GAUDI_EVENT_NIC2_QM0:
8063 case GAUDI_EVENT_NIC2_QM1:
8064 case GAUDI_EVENT_NIC3_QM0:
8065 case GAUDI_EVENT_NIC3_QM1:
8066 case GAUDI_EVENT_NIC4_QM0:
8067 case GAUDI_EVENT_NIC4_QM1:
8068 case GAUDI_EVENT_DMA0_CORE ... GAUDI_EVENT_DMA7_CORE:
8069 gaudi_print_irq_info(hdev, event_type, true);
8070 gaudi_handle_qman_err(hdev, event_type);
8071 hl_fw_unmask_irq(hdev, event_type);
8074 case GAUDI_EVENT_RAZWI_OR_ADC_SW:
8075 gaudi_print_irq_info(hdev, event_type, true);
8078 case GAUDI_EVENT_TPC0_BMON_SPMU:
8079 case GAUDI_EVENT_TPC1_BMON_SPMU:
8080 case GAUDI_EVENT_TPC2_BMON_SPMU:
8081 case GAUDI_EVENT_TPC3_BMON_SPMU:
8082 case GAUDI_EVENT_TPC4_BMON_SPMU:
8083 case GAUDI_EVENT_TPC5_BMON_SPMU:
8084 case GAUDI_EVENT_TPC6_BMON_SPMU:
8085 case GAUDI_EVENT_TPC7_BMON_SPMU:
8086 case GAUDI_EVENT_DMA_BM_CH0 ... GAUDI_EVENT_DMA_BM_CH7:
8087 gaudi_print_irq_info(hdev, event_type, false);
8088 hl_fw_unmask_irq(hdev, event_type);
8091 case GAUDI_EVENT_DMA_IF_SEI_0 ... GAUDI_EVENT_DMA_IF_SEI_3:
8092 gaudi_print_irq_info(hdev, event_type, false);
8093 gaudi_print_sm_sei_info(hdev, event_type,
8094 &eq_entry->sm_sei_data);
8095 rc = hl_state_dump(hdev);
8098 "Error during system state dump %d\n", rc);
8099 hl_fw_unmask_irq(hdev, event_type);
8102 case GAUDI_EVENT_FIX_POWER_ENV_S ... GAUDI_EVENT_FIX_THERMAL_ENV_E:
8103 gaudi_print_clk_change_info(hdev, event_type);
8104 hl_fw_unmask_irq(hdev, event_type);
8107 case GAUDI_EVENT_PSOC_GPIO_U16_0:
8108 cause = le64_to_cpu(eq_entry->data[0]) & 0xFF;
8110 "Received high temp H/W interrupt %d (cause %d)\n",
8114 case GAUDI_EVENT_DEV_RESET_REQ:
8115 gaudi_print_irq_info(hdev, event_type, false);
8118 case GAUDI_EVENT_PKT_QUEUE_OUT_SYNC:
8119 gaudi_print_irq_info(hdev, event_type, false);
8120 gaudi_print_out_of_sync_info(hdev, &eq_entry->pkt_sync_err);
8123 case GAUDI_EVENT_FW_ALIVE_S:
8124 gaudi_print_irq_info(hdev, event_type, false);
8125 gaudi_print_fw_alive_info(hdev, &eq_entry->fw_alive);
8129 dev_err(hdev->dev, "Received invalid H/W interrupt %d\n",
8137 if (hdev->asic_prop.fw_security_enabled)
8138 hl_device_reset(hdev, HL_DRV_RESET_HARD
8139 | HL_DRV_RESET_BYPASS_REQ_TO_FW
8140 | fw_fatal_err_flag);
8141 else if (hdev->hard_reset_on_fw_events)
8142 hl_device_reset(hdev, HL_DRV_RESET_HARD | fw_fatal_err_flag);
8144 hl_fw_unmask_irq(hdev, event_type);
8147 static void *gaudi_get_events_stat(struct hl_device *hdev, bool aggregate,
8150 struct gaudi_device *gaudi = hdev->asic_specific;
8153 *size = (u32) sizeof(gaudi->events_stat_aggregate);
8154 return gaudi->events_stat_aggregate;
8157 *size = (u32) sizeof(gaudi->events_stat);
8158 return gaudi->events_stat;
8161 static int gaudi_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
8164 struct gaudi_device *gaudi = hdev->asic_specific;
8165 u32 status, timeout_usec;
8168 if (!(gaudi->hw_cap_initialized & HW_CAP_MMU) ||
8169 hdev->reset_info.hard_reset_pending)
8173 timeout_usec = GAUDI_PLDM_MMU_TIMEOUT_USEC;
8175 timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
8177 /* L0 & L1 invalidation */
8178 WREG32(mmSTLB_INV_PS, 3);
8179 WREG32(mmSTLB_CACHE_INV, gaudi->mmu_cache_inv_pi++);
8180 WREG32(mmSTLB_INV_PS, 2);
8182 rc = hl_poll_timeout(
8190 WREG32(mmSTLB_INV_SET, 0);
8195 static int gaudi_mmu_invalidate_cache_range(struct hl_device *hdev,
8196 bool is_hard, u32 flags,
8197 u32 asid, u64 va, u64 size)
8199 /* Treat as invalidate all because there is no range invalidation
8202 return hdev->asic_funcs->mmu_invalidate_cache(hdev, is_hard, flags);
8205 static int gaudi_mmu_update_asid_hop0_addr(struct hl_device *hdev,
8206 u32 asid, u64 phys_addr)
8208 u32 status, timeout_usec;
8212 timeout_usec = GAUDI_PLDM_MMU_TIMEOUT_USEC;
8214 timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
8216 WREG32(MMU_ASID, asid);
8217 WREG32(MMU_HOP0_PA43_12, phys_addr >> MMU_HOP0_PA43_12_SHIFT);
8218 WREG32(MMU_HOP0_PA49_44, phys_addr >> MMU_HOP0_PA49_44_SHIFT);
8219 WREG32(MMU_BUSY, 0x80000000);
8221 rc = hl_poll_timeout(
8225 !(status & 0x80000000),
8231 "Timeout during MMU hop0 config of asid %d\n", asid);
8238 static int gaudi_send_heartbeat(struct hl_device *hdev)
8240 struct gaudi_device *gaudi = hdev->asic_specific;
8242 if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q))
8245 return hl_fw_send_heartbeat(hdev);
8248 static int gaudi_cpucp_info_get(struct hl_device *hdev)
8250 struct gaudi_device *gaudi = hdev->asic_specific;
8251 struct asic_fixed_properties *prop = &hdev->asic_prop;
8254 if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q))
8257 rc = hl_fw_cpucp_handshake(hdev, mmCPU_BOOT_DEV_STS0,
8258 mmCPU_BOOT_DEV_STS1, mmCPU_BOOT_ERR0,
8263 if (!strlen(prop->cpucp_info.card_name))
8264 strncpy(prop->cpucp_info.card_name, GAUDI_DEFAULT_CARD_NAME,
8267 hdev->card_type = le32_to_cpu(hdev->asic_prop.cpucp_info.card_type);
8269 set_default_power_values(hdev);
8271 hdev->max_power = prop->max_power_default;
8276 static bool gaudi_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
8277 u8 mask_len, struct seq_file *s)
8279 struct gaudi_device *gaudi = hdev->asic_specific;
8280 const char *fmt = "%-5d%-9s%#-14x%#-12x%#x\n";
8281 const char *mme_slave_fmt = "%-5d%-9s%-14s%-12s%#x\n";
8282 const char *nic_fmt = "%-5d%-9s%#-14x%#x\n";
8283 unsigned long *mask = (unsigned long *)mask_arr;
8284 u32 qm_glbl_sts0, qm_cgm_sts, dma_core_sts0, tpc_cfg_sts, mme_arch_sts;
8285 bool is_idle = true, is_eng_idle, is_slave;
8287 int i, dma_id, port;
8291 "\nDMA is_idle QM_GLBL_STS0 QM_CGM_STS DMA_CORE_STS0\n"
8292 "--- ------- ------------ ---------- -------------\n");
8294 for (i = 0 ; i < DMA_NUMBER_OF_CHNLS ; i++) {
8295 dma_id = gaudi_dma_assignment[i];
8296 offset = dma_id * DMA_QMAN_OFFSET;
8298 qm_glbl_sts0 = RREG32(mmDMA0_QM_GLBL_STS0 + offset);
8299 qm_cgm_sts = RREG32(mmDMA0_QM_CGM_STS + offset);
8300 dma_core_sts0 = RREG32(mmDMA0_CORE_STS0 + offset);
8301 is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts) &&
8302 IS_DMA_IDLE(dma_core_sts0);
8303 is_idle &= is_eng_idle;
8305 if (mask && !is_eng_idle)
8306 set_bit(GAUDI_ENGINE_ID_DMA_0 + dma_id, mask);
8308 seq_printf(s, fmt, dma_id,
8309 is_eng_idle ? "Y" : "N", qm_glbl_sts0,
8310 qm_cgm_sts, dma_core_sts0);
8315 "\nTPC is_idle QM_GLBL_STS0 QM_CGM_STS CFG_STATUS\n"
8316 "--- ------- ------------ ---------- ----------\n");
8318 for (i = 0 ; i < TPC_NUMBER_OF_ENGINES ; i++) {
8319 offset = i * TPC_QMAN_OFFSET;
8320 qm_glbl_sts0 = RREG32(mmTPC0_QM_GLBL_STS0 + offset);
8321 qm_cgm_sts = RREG32(mmTPC0_QM_CGM_STS + offset);
8322 tpc_cfg_sts = RREG32(mmTPC0_CFG_STATUS + offset);
8323 is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts) &&
8324 IS_TPC_IDLE(tpc_cfg_sts);
8325 is_idle &= is_eng_idle;
8327 if (mask && !is_eng_idle)
8328 set_bit(GAUDI_ENGINE_ID_TPC_0 + i, mask);
8330 seq_printf(s, fmt, i,
8331 is_eng_idle ? "Y" : "N",
8332 qm_glbl_sts0, qm_cgm_sts, tpc_cfg_sts);
8337 "\nMME is_idle QM_GLBL_STS0 QM_CGM_STS ARCH_STATUS\n"
8338 "--- ------- ------------ ---------- -----------\n");
8340 for (i = 0 ; i < MME_NUMBER_OF_ENGINES ; i++) {
8341 offset = i * MME_QMAN_OFFSET;
8342 mme_arch_sts = RREG32(mmMME0_CTRL_ARCH_STATUS + offset);
8343 is_eng_idle = IS_MME_IDLE(mme_arch_sts);
8345 /* MME 1 & 3 are slaves, no need to check their QMANs */
8348 qm_glbl_sts0 = RREG32(mmMME0_QM_GLBL_STS0 + offset);
8349 qm_cgm_sts = RREG32(mmMME0_QM_CGM_STS + offset);
8350 is_eng_idle &= IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts);
8353 is_idle &= is_eng_idle;
8355 if (mask && !is_eng_idle)
8356 set_bit(GAUDI_ENGINE_ID_MME_0 + i, mask);
8359 seq_printf(s, fmt, i,
8360 is_eng_idle ? "Y" : "N",
8361 qm_glbl_sts0, qm_cgm_sts, mme_arch_sts);
8363 seq_printf(s, mme_slave_fmt, i,
8364 is_eng_idle ? "Y" : "N", "-",
8370 seq_puts(s, "\nNIC is_idle QM_GLBL_STS0 QM_CGM_STS\n"
8371 "--- ------- ------------ ----------\n");
8373 for (i = 0 ; i < (NIC_NUMBER_OF_ENGINES / 2) ; i++) {
8374 offset = i * NIC_MACRO_QMAN_OFFSET;
8376 if (gaudi->hw_cap_initialized & BIT(HW_CAP_NIC_SHIFT + port)) {
8377 qm_glbl_sts0 = RREG32(mmNIC0_QM0_GLBL_STS0 + offset);
8378 qm_cgm_sts = RREG32(mmNIC0_QM0_CGM_STS + offset);
8379 is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts);
8380 is_idle &= is_eng_idle;
8382 if (mask && !is_eng_idle)
8383 set_bit(GAUDI_ENGINE_ID_NIC_0 + port, mask);
8385 seq_printf(s, nic_fmt, port,
8386 is_eng_idle ? "Y" : "N",
8387 qm_glbl_sts0, qm_cgm_sts);
8391 if (gaudi->hw_cap_initialized & BIT(HW_CAP_NIC_SHIFT + port)) {
8392 qm_glbl_sts0 = RREG32(mmNIC0_QM1_GLBL_STS0 + offset);
8393 qm_cgm_sts = RREG32(mmNIC0_QM1_CGM_STS + offset);
8394 is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts);
8395 is_idle &= is_eng_idle;
8397 if (mask && !is_eng_idle)
8398 set_bit(GAUDI_ENGINE_ID_NIC_0 + port, mask);
8400 seq_printf(s, nic_fmt, port,
8401 is_eng_idle ? "Y" : "N",
8402 qm_glbl_sts0, qm_cgm_sts);
8412 static void gaudi_hw_queues_lock(struct hl_device *hdev)
8413 __acquires(&gaudi->hw_queues_lock)
8415 struct gaudi_device *gaudi = hdev->asic_specific;
8417 spin_lock(&gaudi->hw_queues_lock);
8420 static void gaudi_hw_queues_unlock(struct hl_device *hdev)
8421 __releases(&gaudi->hw_queues_lock)
8423 struct gaudi_device *gaudi = hdev->asic_specific;
8425 spin_unlock(&gaudi->hw_queues_lock);
8428 static u32 gaudi_get_pci_id(struct hl_device *hdev)
8430 return hdev->pdev->device;
8433 static int gaudi_get_eeprom_data(struct hl_device *hdev, void *data,
8436 struct gaudi_device *gaudi = hdev->asic_specific;
8438 if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q))
8441 return hl_fw_get_eeprom_data(hdev, data, max_size);
8445 * this function should be used only during initialization and/or after reset,
8446 * when there are no active users.
8448 static int gaudi_run_tpc_kernel(struct hl_device *hdev, u64 tpc_kernel, u32 tpc_id)
8454 offset = tpc_id * (mmTPC1_CFG_STATUS - mmTPC0_CFG_STATUS);
8457 kernel_timeout = GAUDI_PLDM_TPC_KERNEL_WAIT_USEC;
8459 kernel_timeout = HL_DEVICE_TIMEOUT_USEC;
8461 WREG32(mmTPC0_CFG_QM_KERNEL_BASE_ADDRESS_LOW + offset,
8462 lower_32_bits(tpc_kernel));
8463 WREG32(mmTPC0_CFG_QM_KERNEL_BASE_ADDRESS_HIGH + offset,
8464 upper_32_bits(tpc_kernel));
8466 WREG32(mmTPC0_CFG_ICACHE_BASE_ADDERESS_LOW + offset,
8467 lower_32_bits(tpc_kernel));
8468 WREG32(mmTPC0_CFG_ICACHE_BASE_ADDERESS_HIGH + offset,
8469 upper_32_bits(tpc_kernel));
8470 /* set a valid LUT pointer, content is of no significance */
8471 WREG32(mmTPC0_CFG_LUT_FUNC256_BASE_ADDR_LO + offset,
8472 lower_32_bits(tpc_kernel));
8473 WREG32(mmTPC0_CFG_LUT_FUNC256_BASE_ADDR_HI + offset,
8474 upper_32_bits(tpc_kernel));
8476 WREG32(mmTPC0_CFG_QM_SYNC_OBJECT_ADDR + offset,
8477 lower_32_bits(CFG_BASE +
8478 mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0));
8480 WREG32(mmTPC0_CFG_TPC_CMD + offset,
8481 (1 << TPC0_CFG_TPC_CMD_ICACHE_INVALIDATE_SHIFT |
8482 1 << TPC0_CFG_TPC_CMD_ICACHE_PREFETCH_64KB_SHIFT));
8483 /* wait a bit for the engine to start executing */
8484 usleep_range(1000, 1500);
8486 /* wait until engine has finished executing */
8487 rc = hl_poll_timeout(
8489 mmTPC0_CFG_STATUS + offset,
8491 (status & TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK) ==
8492 TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK,
8498 "Timeout while waiting for TPC%d icache prefetch\n",
8503 WREG32(mmTPC0_CFG_TPC_EXECUTE + offset,
8504 1 << TPC0_CFG_TPC_EXECUTE_V_SHIFT);
8506 /* wait a bit for the engine to start executing */
8507 usleep_range(1000, 1500);
8509 /* wait until engine has finished executing */
8510 rc = hl_poll_timeout(
8512 mmTPC0_CFG_STATUS + offset,
8514 (status & TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK) ==
8515 TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK,
8521 "Timeout while waiting for TPC%d vector pipe\n",
8526 rc = hl_poll_timeout(
8528 mmTPC0_CFG_WQ_INFLIGHT_CNTR + offset,
8536 "Timeout while waiting for TPC%d kernel to execute\n",
8544 static int gaudi_internal_cb_pool_init(struct hl_device *hdev,
8547 struct gaudi_device *gaudi = hdev->asic_specific;
8548 int min_alloc_order, rc, collective_cb_size;
8550 if (!(gaudi->hw_cap_initialized & HW_CAP_MMU))
8553 hdev->internal_cb_pool_virt_addr =
8554 hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
8555 HOST_SPACE_INTERNAL_CB_SZ,
8556 &hdev->internal_cb_pool_dma_addr,
8557 GFP_KERNEL | __GFP_ZERO);
8559 if (!hdev->internal_cb_pool_virt_addr)
8562 collective_cb_size = sizeof(struct packet_msg_short) * 5 +
8563 sizeof(struct packet_fence);
8564 min_alloc_order = ilog2(collective_cb_size);
8566 hdev->internal_cb_pool = gen_pool_create(min_alloc_order, -1);
8567 if (!hdev->internal_cb_pool) {
8569 "Failed to create internal CB pool\n");
8571 goto free_internal_cb_pool;
8574 rc = gen_pool_add(hdev->internal_cb_pool,
8575 (uintptr_t) hdev->internal_cb_pool_virt_addr,
8576 HOST_SPACE_INTERNAL_CB_SZ, -1);
8579 "Failed to add memory to internal CB pool\n");
8581 goto destroy_internal_cb_pool;
8584 hdev->internal_cb_va_base = hl_reserve_va_block(hdev, ctx,
8585 HL_VA_RANGE_TYPE_HOST, HOST_SPACE_INTERNAL_CB_SZ,
8586 HL_MMU_VA_ALIGNMENT_NOT_NEEDED);
8588 if (!hdev->internal_cb_va_base) {
8590 goto destroy_internal_cb_pool;
8593 mutex_lock(&ctx->mmu_lock);
8594 rc = hl_mmu_map_contiguous(ctx, hdev->internal_cb_va_base,
8595 hdev->internal_cb_pool_dma_addr,
8596 HOST_SPACE_INTERNAL_CB_SZ);
8598 hdev->asic_funcs->mmu_invalidate_cache(hdev, false, MMU_OP_USERPTR);
8599 mutex_unlock(&ctx->mmu_lock);
8602 goto unreserve_internal_cb_pool;
8606 unreserve_internal_cb_pool:
8607 hl_unreserve_va_block(hdev, ctx, hdev->internal_cb_va_base,
8608 HOST_SPACE_INTERNAL_CB_SZ);
8609 destroy_internal_cb_pool:
8610 gen_pool_destroy(hdev->internal_cb_pool);
8611 free_internal_cb_pool:
8612 hdev->asic_funcs->asic_dma_free_coherent(hdev,
8613 HOST_SPACE_INTERNAL_CB_SZ,
8614 hdev->internal_cb_pool_virt_addr,
8615 hdev->internal_cb_pool_dma_addr);
8620 static void gaudi_internal_cb_pool_fini(struct hl_device *hdev,
8623 struct gaudi_device *gaudi = hdev->asic_specific;
8625 if (!(gaudi->hw_cap_initialized & HW_CAP_MMU))
8628 mutex_lock(&ctx->mmu_lock);
8629 hl_mmu_unmap_contiguous(ctx, hdev->internal_cb_va_base,
8630 HOST_SPACE_INTERNAL_CB_SZ);
8631 hl_unreserve_va_block(hdev, ctx, hdev->internal_cb_va_base,
8632 HOST_SPACE_INTERNAL_CB_SZ);
8633 hdev->asic_funcs->mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR);
8634 mutex_unlock(&ctx->mmu_lock);
8636 gen_pool_destroy(hdev->internal_cb_pool);
8638 hdev->asic_funcs->asic_dma_free_coherent(hdev,
8639 HOST_SPACE_INTERNAL_CB_SZ,
8640 hdev->internal_cb_pool_virt_addr,
8641 hdev->internal_cb_pool_dma_addr);
8644 static int gaudi_ctx_init(struct hl_ctx *ctx)
8648 if (ctx->asid == HL_KERNEL_ASID_ID)
8651 rc = gaudi_internal_cb_pool_init(ctx->hdev, ctx);
8655 rc = gaudi_restore_user_registers(ctx->hdev);
8657 gaudi_internal_cb_pool_fini(ctx->hdev, ctx);
8662 static void gaudi_ctx_fini(struct hl_ctx *ctx)
8664 if (ctx->asid == HL_KERNEL_ASID_ID)
8667 gaudi_internal_cb_pool_fini(ctx->hdev, ctx);
8670 static u32 gaudi_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx)
8672 return gaudi_cq_assignment[cq_idx];
8675 static u32 gaudi_get_signal_cb_size(struct hl_device *hdev)
8677 return sizeof(struct packet_msg_short) +
8678 sizeof(struct packet_msg_prot) * 2;
8681 static u32 gaudi_get_wait_cb_size(struct hl_device *hdev)
8683 return sizeof(struct packet_msg_short) * 4 +
8684 sizeof(struct packet_fence) +
8685 sizeof(struct packet_msg_prot) * 2;
8688 static u32 gaudi_get_sob_addr(struct hl_device *hdev, u32 sob_id)
8690 return mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 + (sob_id * 4);
8693 static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
8696 struct hl_cb *cb = (struct hl_cb *) data;
8697 struct packet_msg_short *pkt;
8698 u32 value, ctl, pkt_size = sizeof(*pkt);
8700 pkt = cb->kernel_address + size;
8701 memset(pkt, 0, pkt_size);
8703 /* Inc by 1, Mode ADD */
8704 value = FIELD_PREP(GAUDI_PKT_SHORT_VAL_SOB_SYNC_VAL_MASK, 1);
8705 value |= FIELD_PREP(GAUDI_PKT_SHORT_VAL_SOB_MOD_MASK, 1);
8707 ctl = FIELD_PREP(GAUDI_PKT_SHORT_CTL_ADDR_MASK, sob_id * 4);
8708 ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OP_MASK, 0); /* write the value */
8709 ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_BASE_MASK, 3); /* W_S SOB base */
8710 ctl |= FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
8711 ctl |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, eb);
8712 ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1);
8713 ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
8715 pkt->value = cpu_to_le32(value);
8716 pkt->ctl = cpu_to_le32(ctl);
8718 return size + pkt_size;
8721 static u32 gaudi_add_mon_msg_short(struct packet_msg_short *pkt, u32 value,
8724 u32 ctl, pkt_size = sizeof(*pkt);
8726 memset(pkt, 0, pkt_size);
8728 ctl = FIELD_PREP(GAUDI_PKT_SHORT_CTL_ADDR_MASK, addr);
8729 ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_BASE_MASK, 2); /* W_S MON base */
8730 ctl |= FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
8731 ctl |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 0);
8732 ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1);
8733 ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 0); /* last pkt MB */
8735 pkt->value = cpu_to_le32(value);
8736 pkt->ctl = cpu_to_le32(ctl);
8741 static u32 gaudi_add_arm_monitor_pkt(struct hl_device *hdev,
8742 struct packet_msg_short *pkt, u16 sob_base, u8 sob_mask,
8743 u16 sob_val, u16 mon_id)
8746 u32 ctl, value, pkt_size = sizeof(*pkt);
8747 u16 msg_addr_offset;
8750 if (hl_gen_sob_mask(sob_base, sob_mask, &mask)) {
8752 "sob_base %u (mask %#x) is not valid\n",
8753 sob_base, sob_mask);
8758 * monitor_base should be the content of the base0 address registers,
8759 * so it will be added to the msg short offsets
8761 monitor_base = mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0;
8764 (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0 + mon_id * 4) -
8767 memset(pkt, 0, pkt_size);
8769 /* Monitor config packet: bind the monitor to a sync object */
8770 value = FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_SYNC_GID_MASK, sob_base / 8);
8771 value |= FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_SYNC_VAL_MASK, sob_val);
8772 value |= FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_MODE_MASK,
8773 0); /* GREATER OR EQUAL*/
8774 value |= FIELD_PREP(GAUDI_PKT_SHORT_VAL_MON_MASK_MASK, mask);
8776 ctl = FIELD_PREP(GAUDI_PKT_SHORT_CTL_ADDR_MASK, msg_addr_offset);
8777 ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OP_MASK, 0); /* write the value */
8778 ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_BASE_MASK, 2); /* W_S MON base */
8779 ctl |= FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
8780 ctl |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 0);
8781 ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1);
8782 ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
8784 pkt->value = cpu_to_le32(value);
8785 pkt->ctl = cpu_to_le32(ctl);
8790 static u32 gaudi_add_fence_pkt(struct packet_fence *pkt)
8792 u32 ctl, cfg, pkt_size = sizeof(*pkt);
8794 memset(pkt, 0, pkt_size);
8796 cfg = FIELD_PREP(GAUDI_PKT_FENCE_CFG_DEC_VAL_MASK, 1);
8797 cfg |= FIELD_PREP(GAUDI_PKT_FENCE_CFG_TARGET_VAL_MASK, 1);
8798 cfg |= FIELD_PREP(GAUDI_PKT_FENCE_CFG_ID_MASK, 2);
8800 ctl = FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_FENCE);
8801 ctl |= FIELD_PREP(GAUDI_PKT_CTL_EB_MASK, 0);
8802 ctl |= FIELD_PREP(GAUDI_PKT_CTL_RB_MASK, 1);
8803 ctl |= FIELD_PREP(GAUDI_PKT_CTL_MB_MASK, 1);
8805 pkt->cfg = cpu_to_le32(cfg);
8806 pkt->ctl = cpu_to_le32(ctl);
8811 static int gaudi_get_fence_addr(struct hl_device *hdev, u32 queue_id, u64 *addr)
8813 u32 offset, nic_index;
8816 case GAUDI_QUEUE_ID_DMA_0_0:
8817 offset = mmDMA0_QM_CP_FENCE2_RDATA_0;
8819 case GAUDI_QUEUE_ID_DMA_0_1:
8820 offset = mmDMA0_QM_CP_FENCE2_RDATA_1;
8822 case GAUDI_QUEUE_ID_DMA_0_2:
8823 offset = mmDMA0_QM_CP_FENCE2_RDATA_2;
8825 case GAUDI_QUEUE_ID_DMA_0_3:
8826 offset = mmDMA0_QM_CP_FENCE2_RDATA_3;
8828 case GAUDI_QUEUE_ID_DMA_1_0:
8829 offset = mmDMA1_QM_CP_FENCE2_RDATA_0;
8831 case GAUDI_QUEUE_ID_DMA_1_1:
8832 offset = mmDMA1_QM_CP_FENCE2_RDATA_1;
8834 case GAUDI_QUEUE_ID_DMA_1_2:
8835 offset = mmDMA1_QM_CP_FENCE2_RDATA_2;
8837 case GAUDI_QUEUE_ID_DMA_1_3:
8838 offset = mmDMA1_QM_CP_FENCE2_RDATA_3;
8840 case GAUDI_QUEUE_ID_DMA_5_0:
8841 offset = mmDMA5_QM_CP_FENCE2_RDATA_0;
8843 case GAUDI_QUEUE_ID_DMA_5_1:
8844 offset = mmDMA5_QM_CP_FENCE2_RDATA_1;
8846 case GAUDI_QUEUE_ID_DMA_5_2:
8847 offset = mmDMA5_QM_CP_FENCE2_RDATA_2;
8849 case GAUDI_QUEUE_ID_DMA_5_3:
8850 offset = mmDMA5_QM_CP_FENCE2_RDATA_3;
8852 case GAUDI_QUEUE_ID_TPC_7_0:
8853 offset = mmTPC7_QM_CP_FENCE2_RDATA_0;
8855 case GAUDI_QUEUE_ID_TPC_7_1:
8856 offset = mmTPC7_QM_CP_FENCE2_RDATA_1;
8858 case GAUDI_QUEUE_ID_TPC_7_2:
8859 offset = mmTPC7_QM_CP_FENCE2_RDATA_2;
8861 case GAUDI_QUEUE_ID_TPC_7_3:
8862 offset = mmTPC7_QM_CP_FENCE2_RDATA_3;
8864 case GAUDI_QUEUE_ID_NIC_0_0:
8865 case GAUDI_QUEUE_ID_NIC_1_0:
8866 case GAUDI_QUEUE_ID_NIC_2_0:
8867 case GAUDI_QUEUE_ID_NIC_3_0:
8868 case GAUDI_QUEUE_ID_NIC_4_0:
8869 case GAUDI_QUEUE_ID_NIC_5_0:
8870 case GAUDI_QUEUE_ID_NIC_6_0:
8871 case GAUDI_QUEUE_ID_NIC_7_0:
8872 case GAUDI_QUEUE_ID_NIC_8_0:
8873 case GAUDI_QUEUE_ID_NIC_9_0:
8874 nic_index = (queue_id - GAUDI_QUEUE_ID_NIC_0_0) >> 2;
8875 offset = mmNIC0_QM0_CP_FENCE2_RDATA_0 +
8876 (nic_index >> 1) * NIC_MACRO_QMAN_OFFSET +
8877 (nic_index & 0x1) * NIC_ENGINE_QMAN_OFFSET;
8879 case GAUDI_QUEUE_ID_NIC_0_1:
8880 case GAUDI_QUEUE_ID_NIC_1_1:
8881 case GAUDI_QUEUE_ID_NIC_2_1:
8882 case GAUDI_QUEUE_ID_NIC_3_1:
8883 case GAUDI_QUEUE_ID_NIC_4_1:
8884 case GAUDI_QUEUE_ID_NIC_5_1:
8885 case GAUDI_QUEUE_ID_NIC_6_1:
8886 case GAUDI_QUEUE_ID_NIC_7_1:
8887 case GAUDI_QUEUE_ID_NIC_8_1:
8888 case GAUDI_QUEUE_ID_NIC_9_1:
8889 nic_index = (queue_id - GAUDI_QUEUE_ID_NIC_0_1) >> 2;
8890 offset = mmNIC0_QM0_CP_FENCE2_RDATA_1 +
8891 (nic_index >> 1) * NIC_MACRO_QMAN_OFFSET +
8892 (nic_index & 0x1) * NIC_ENGINE_QMAN_OFFSET;
8894 case GAUDI_QUEUE_ID_NIC_0_2:
8895 case GAUDI_QUEUE_ID_NIC_1_2:
8896 case GAUDI_QUEUE_ID_NIC_2_2:
8897 case GAUDI_QUEUE_ID_NIC_3_2:
8898 case GAUDI_QUEUE_ID_NIC_4_2:
8899 case GAUDI_QUEUE_ID_NIC_5_2:
8900 case GAUDI_QUEUE_ID_NIC_6_2:
8901 case GAUDI_QUEUE_ID_NIC_7_2:
8902 case GAUDI_QUEUE_ID_NIC_8_2:
8903 case GAUDI_QUEUE_ID_NIC_9_2:
8904 nic_index = (queue_id - GAUDI_QUEUE_ID_NIC_0_2) >> 2;
8905 offset = mmNIC0_QM0_CP_FENCE2_RDATA_2 +
8906 (nic_index >> 1) * NIC_MACRO_QMAN_OFFSET +
8907 (nic_index & 0x1) * NIC_ENGINE_QMAN_OFFSET;
8909 case GAUDI_QUEUE_ID_NIC_0_3:
8910 case GAUDI_QUEUE_ID_NIC_1_3:
8911 case GAUDI_QUEUE_ID_NIC_2_3:
8912 case GAUDI_QUEUE_ID_NIC_3_3:
8913 case GAUDI_QUEUE_ID_NIC_4_3:
8914 case GAUDI_QUEUE_ID_NIC_5_3:
8915 case GAUDI_QUEUE_ID_NIC_6_3:
8916 case GAUDI_QUEUE_ID_NIC_7_3:
8917 case GAUDI_QUEUE_ID_NIC_8_3:
8918 case GAUDI_QUEUE_ID_NIC_9_3:
8919 nic_index = (queue_id - GAUDI_QUEUE_ID_NIC_0_3) >> 2;
8920 offset = mmNIC0_QM0_CP_FENCE2_RDATA_3 +
8921 (nic_index >> 1) * NIC_MACRO_QMAN_OFFSET +
8922 (nic_index & 0x1) * NIC_ENGINE_QMAN_OFFSET;
8928 *addr = CFG_BASE + offset;
8933 static u32 gaudi_add_mon_pkts(void *buf, u16 mon_id, u64 fence_addr)
8937 u16 msg_addr_offset;
8940 * monitor_base should be the content of the base0 address registers,
8941 * so it will be added to the msg short offsets
8943 monitor_base = mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0;
8945 /* First monitor config packet: low address of the sync */
8947 (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 + mon_id * 4) -
8950 size += gaudi_add_mon_msg_short(buf + size, (u32) fence_addr,
8953 /* Second monitor config packet: high address of the sync */
8955 (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRH_0 + mon_id * 4) -
8958 size += gaudi_add_mon_msg_short(buf + size, (u32) (fence_addr >> 32),
8962 * Third monitor config packet: the payload, i.e. what to write when the
8966 (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_DATA_0 + mon_id * 4) -
8969 size += gaudi_add_mon_msg_short(buf + size, 1, msg_addr_offset);
8974 static u32 gaudi_gen_wait_cb(struct hl_device *hdev,
8975 struct hl_gen_wait_properties *prop)
8977 struct hl_cb *cb = (struct hl_cb *) prop->data;
8978 void *buf = cb->kernel_address;
8980 u32 size = prop->size;
8982 if (gaudi_get_fence_addr(hdev, prop->q_idx, &fence_addr)) {
8983 dev_crit(hdev->dev, "wrong queue id %d for wait packet\n",
8988 size += gaudi_add_mon_pkts(buf + size, prop->mon_id, fence_addr);
8989 size += gaudi_add_arm_monitor_pkt(hdev, buf + size, prop->sob_base,
8990 prop->sob_mask, prop->sob_val, prop->mon_id);
8991 size += gaudi_add_fence_pkt(buf + size);
8996 static void gaudi_reset_sob(struct hl_device *hdev, void *data)
8998 struct hl_hw_sob *hw_sob = (struct hl_hw_sob *) data;
9000 dev_dbg(hdev->dev, "reset SOB, q_idx: %d, sob_id: %d\n", hw_sob->q_idx,
9003 WREG32(mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_SOB_OBJ_0 +
9004 hw_sob->sob_id * 4, 0);
9006 kref_init(&hw_sob->kref);
9009 static void gaudi_set_dma_mask_from_fw(struct hl_device *hdev)
9011 if (RREG32(mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_0) ==
9012 HL_POWER9_HOST_MAGIC) {
9013 hdev->power9_64bit_dma_enable = 1;
9014 hdev->dma_mask = 64;
9016 hdev->power9_64bit_dma_enable = 0;
9017 hdev->dma_mask = 48;
9021 static u64 gaudi_get_device_time(struct hl_device *hdev)
9023 u64 device_time = ((u64) RREG32(mmPSOC_TIMESTAMP_CNTCVU)) << 32;
9025 return device_time | RREG32(mmPSOC_TIMESTAMP_CNTCVL);
9028 static int gaudi_get_hw_block_id(struct hl_device *hdev, u64 block_addr,
9029 u32 *block_size, u32 *block_id)
9034 static int gaudi_block_mmap(struct hl_device *hdev,
9035 struct vm_area_struct *vma,
9036 u32 block_id, u32 block_size)
9041 static void gaudi_enable_events_from_fw(struct hl_device *hdev)
9043 struct cpu_dyn_regs *dyn_regs =
9044 &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
9045 u32 irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ?
9046 mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR :
9047 le32_to_cpu(dyn_regs->gic_host_ints_irq);
9049 WREG32(irq_handler_offset,
9050 gaudi_irq_map_table[GAUDI_EVENT_INTS_REGISTER].cpu_id);
9053 static int gaudi_map_pll_idx_to_fw_idx(u32 pll_idx)
9056 case HL_GAUDI_CPU_PLL: return CPU_PLL;
9057 case HL_GAUDI_PCI_PLL: return PCI_PLL;
9058 case HL_GAUDI_NIC_PLL: return NIC_PLL;
9059 case HL_GAUDI_DMA_PLL: return DMA_PLL;
9060 case HL_GAUDI_MESH_PLL: return MESH_PLL;
9061 case HL_GAUDI_MME_PLL: return MME_PLL;
9062 case HL_GAUDI_TPC_PLL: return TPC_PLL;
9063 case HL_GAUDI_IF_PLL: return IF_PLL;
9064 case HL_GAUDI_SRAM_PLL: return SRAM_PLL;
9065 case HL_GAUDI_HBM_PLL: return HBM_PLL;
9066 default: return -EINVAL;
9070 static int gaudi_add_sync_to_engine_map_entry(
9071 struct hl_sync_to_engine_map *map, u32 reg_value,
9072 enum hl_sync_engine_type engine_type, u32 engine_id)
9074 struct hl_sync_to_engine_map_entry *entry;
9076 /* Reg value represents a partial address of sync object,
9077 * it is used as unique identifier. For this we need to
9078 * clear the cutoff cfg base bits from the value.
9080 if (reg_value == 0 || reg_value == 0xffffffff)
9082 reg_value -= (u32)CFG_BASE;
9084 /* create a new hash entry */
9085 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
9088 entry->engine_type = engine_type;
9089 entry->engine_id = engine_id;
9090 entry->sync_id = reg_value;
9091 hash_add(map->tb, &entry->node, reg_value);
9096 static int gaudi_gen_sync_to_engine_map(struct hl_device *hdev,
9097 struct hl_sync_to_engine_map *map)
9099 struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
9103 /* Iterate over TPC engines */
9104 for (i = 0; i < sds->props[SP_NUM_OF_TPC_ENGINES]; ++i) {
9106 reg_value = RREG32(sds->props[SP_TPC0_CFG_SO] +
9107 sds->props[SP_NEXT_TPC] * i);
9109 rc = gaudi_add_sync_to_engine_map_entry(map, reg_value,
9112 goto free_sync_to_engine_map;
9115 /* Iterate over MME engines */
9116 for (i = 0; i < sds->props[SP_NUM_OF_MME_ENGINES]; ++i) {
9117 for (j = 0; j < sds->props[SP_SUB_MME_ENG_NUM]; ++j) {
9119 reg_value = RREG32(sds->props[SP_MME_CFG_SO] +
9120 sds->props[SP_NEXT_MME] * i +
9123 rc = gaudi_add_sync_to_engine_map_entry(
9124 map, reg_value, ENGINE_MME,
9125 i * sds->props[SP_SUB_MME_ENG_NUM] + j);
9127 goto free_sync_to_engine_map;
9131 /* Iterate over DMA engines */
9132 for (i = 0; i < sds->props[SP_NUM_OF_DMA_ENGINES]; ++i) {
9133 reg_value = RREG32(sds->props[SP_DMA_CFG_SO] +
9134 sds->props[SP_DMA_QUEUES_OFFSET] * i);
9135 rc = gaudi_add_sync_to_engine_map_entry(map, reg_value,
9138 goto free_sync_to_engine_map;
9143 free_sync_to_engine_map:
9144 hl_state_dump_free_sync_to_engine_map(map);
9149 static int gaudi_monitor_valid(struct hl_mon_state_dump *mon)
9152 SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_STATUS_0_VALID_MASK,
9156 static void gaudi_fill_sobs_from_mon(char *sobs, struct hl_mon_state_dump *mon)
9158 const size_t max_write = 10;
9162 /* Sync object ID is calculated as follows:
9163 * (8 * group_id + cleared bits in mask)
9165 gid = FIELD_GET(SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_SID_MASK,
9167 mask = FIELD_GET(SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_MASK_MASK,
9170 for (i = 0, offset = 0; mask && offset < MONITOR_SOB_STRING_SIZE -
9171 max_write; mask >>= 1, i++) {
9173 sob = gid * MONITOR_MAX_SOBS + i;
9176 offset += snprintf(sobs + offset, max_write,
9179 offset += snprintf(sobs + offset, max_write, "%u", sob);
9184 static int gaudi_print_single_monitor(char **buf, size_t *size, size_t *offset,
9185 struct hl_device *hdev,
9186 struct hl_mon_state_dump *mon)
9189 char scratch_buf1[BIN_REG_STRING_SIZE],
9190 scratch_buf2[BIN_REG_STRING_SIZE];
9191 char monitored_sobs[MONITOR_SOB_STRING_SIZE] = {0};
9193 name = hl_state_dump_get_monitor_name(hdev, mon);
9197 gaudi_fill_sobs_from_mon(monitored_sobs, mon);
9199 return hl_snprintf_resize(
9201 "Mon id: %u%s, wait for group id: %u mask %s to reach val: %u and write %u to address 0x%llx. Pending: %s. Means sync objects [%s] are being monitored.",
9203 FIELD_GET(SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_SID_MASK,
9205 hl_format_as_binary(
9206 scratch_buf1, sizeof(scratch_buf1),
9208 SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_MASK_MASK,
9210 FIELD_GET(SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0_SOD_MASK,
9213 (((u64)mon->wr_addr_high) << 32) | mon->wr_addr_low,
9214 hl_format_as_binary(
9215 scratch_buf2, sizeof(scratch_buf2),
9217 SYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_STATUS_0_PENDING_MASK,
9223 static int gaudi_print_fences_single_engine(
9224 struct hl_device *hdev, u64 base_offset, u64 status_base_offset,
9225 enum hl_sync_engine_type engine_type, u32 engine_id, char **buf,
9226 size_t *size, size_t *offset)
9228 struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
9229 int rc = -ENOMEM, i;
9230 u32 *statuses, *fences;
9232 statuses = kcalloc(sds->props[SP_ENGINE_NUM_OF_QUEUES],
9233 sizeof(*statuses), GFP_KERNEL);
9237 fences = kcalloc(sds->props[SP_ENGINE_NUM_OF_FENCES] *
9238 sds->props[SP_ENGINE_NUM_OF_QUEUES],
9239 sizeof(*fences), GFP_KERNEL);
9243 for (i = 0; i < sds->props[SP_ENGINE_NUM_OF_FENCES]; ++i)
9244 statuses[i] = RREG32(status_base_offset + i * sizeof(u32));
9246 for (i = 0; i < sds->props[SP_ENGINE_NUM_OF_FENCES] *
9247 sds->props[SP_ENGINE_NUM_OF_QUEUES]; ++i)
9248 fences[i] = RREG32(base_offset + i * sizeof(u32));
9250 /* The actual print */
9251 for (i = 0; i < sds->props[SP_ENGINE_NUM_OF_QUEUES]; ++i) {
9253 u64 fence_cnt, fence_rdata;
9254 const char *engine_name;
9256 if (!FIELD_GET(TPC0_QM_CP_STS_0_FENCE_IN_PROGRESS_MASK,
9261 FIELD_GET(TPC0_QM_CP_STS_0_FENCE_ID_MASK, statuses[i]);
9262 fence_cnt = base_offset + CFG_BASE +
9264 (i + fence_id * sds->props[SP_ENGINE_NUM_OF_QUEUES]);
9265 fence_rdata = fence_cnt - sds->props[SP_FENCE0_CNT_OFFSET] +
9266 sds->props[SP_FENCE0_RDATA_OFFSET];
9267 engine_name = hl_sync_engine_to_string(engine_type);
9269 rc = hl_snprintf_resize(
9271 "%s%u, stream %u: fence id %u cnt = 0x%llx (%s%u_QM.CP_FENCE%u_CNT_%u) rdata = 0x%llx (%s%u_QM.CP_FENCE%u_RDATA_%u) value = %u, cp_status = %u\n",
9272 engine_name, engine_id,
9274 fence_cnt, engine_name, engine_id, fence_id, i,
9275 fence_rdata, engine_name, engine_id, fence_id, i,
9293 static struct hl_state_dump_specs_funcs gaudi_state_dump_funcs = {
9294 .monitor_valid = gaudi_monitor_valid,
9295 .print_single_monitor = gaudi_print_single_monitor,
9296 .gen_sync_to_engine_map = gaudi_gen_sync_to_engine_map,
9297 .print_fences_single_engine = gaudi_print_fences_single_engine,
9300 static void gaudi_state_dump_init(struct hl_device *hdev)
9302 struct hl_state_dump_specs *sds = &hdev->state_dump_specs;
9305 for (i = 0; i < ARRAY_SIZE(gaudi_so_id_to_str); ++i)
9306 hash_add(sds->so_id_to_str_tb,
9307 &gaudi_so_id_to_str[i].node,
9308 gaudi_so_id_to_str[i].id);
9310 for (i = 0; i < ARRAY_SIZE(gaudi_monitor_id_to_str); ++i)
9311 hash_add(sds->monitor_id_to_str_tb,
9312 &gaudi_monitor_id_to_str[i].node,
9313 gaudi_monitor_id_to_str[i].id);
9315 sds->props = gaudi_state_dump_specs_props;
9317 sds->sync_namager_names = gaudi_sync_manager_names;
9319 sds->funcs = gaudi_state_dump_funcs;
9322 static u32 *gaudi_get_stream_master_qid_arr(void)
9324 return gaudi_stream_master;
9327 static const struct hl_asic_funcs gaudi_funcs = {
9328 .early_init = gaudi_early_init,
9329 .early_fini = gaudi_early_fini,
9330 .late_init = gaudi_late_init,
9331 .late_fini = gaudi_late_fini,
9332 .sw_init = gaudi_sw_init,
9333 .sw_fini = gaudi_sw_fini,
9334 .hw_init = gaudi_hw_init,
9335 .hw_fini = gaudi_hw_fini,
9336 .halt_engines = gaudi_halt_engines,
9337 .suspend = gaudi_suspend,
9338 .resume = gaudi_resume,
9340 .ring_doorbell = gaudi_ring_doorbell,
9341 .pqe_write = gaudi_pqe_write,
9342 .asic_dma_alloc_coherent = gaudi_dma_alloc_coherent,
9343 .asic_dma_free_coherent = gaudi_dma_free_coherent,
9344 .scrub_device_mem = gaudi_scrub_device_mem,
9345 .get_int_queue_base = gaudi_get_int_queue_base,
9346 .test_queues = gaudi_test_queues,
9347 .asic_dma_pool_zalloc = gaudi_dma_pool_zalloc,
9348 .asic_dma_pool_free = gaudi_dma_pool_free,
9349 .cpu_accessible_dma_pool_alloc = gaudi_cpu_accessible_dma_pool_alloc,
9350 .cpu_accessible_dma_pool_free = gaudi_cpu_accessible_dma_pool_free,
9351 .hl_dma_unmap_sg = gaudi_dma_unmap_sg,
9352 .cs_parser = gaudi_cs_parser,
9353 .asic_dma_map_sg = gaudi_dma_map_sg,
9354 .get_dma_desc_list_size = gaudi_get_dma_desc_list_size,
9355 .add_end_of_cb_packets = gaudi_add_end_of_cb_packets,
9356 .update_eq_ci = gaudi_update_eq_ci,
9357 .context_switch = gaudi_context_switch,
9358 .restore_phase_topology = gaudi_restore_phase_topology,
9359 .debugfs_read32 = gaudi_debugfs_read32,
9360 .debugfs_write32 = gaudi_debugfs_write32,
9361 .debugfs_read64 = gaudi_debugfs_read64,
9362 .debugfs_write64 = gaudi_debugfs_write64,
9363 .debugfs_read_dma = gaudi_debugfs_read_dma,
9364 .add_device_attr = hl_sysfs_add_dev_clk_attr,
9365 .handle_eqe = gaudi_handle_eqe,
9366 .get_events_stat = gaudi_get_events_stat,
9367 .read_pte = gaudi_read_pte,
9368 .write_pte = gaudi_write_pte,
9369 .mmu_invalidate_cache = gaudi_mmu_invalidate_cache,
9370 .mmu_invalidate_cache_range = gaudi_mmu_invalidate_cache_range,
9371 .send_heartbeat = gaudi_send_heartbeat,
9372 .debug_coresight = gaudi_debug_coresight,
9373 .is_device_idle = gaudi_is_device_idle,
9374 .non_hard_reset_late_init = gaudi_non_hard_reset_late_init,
9375 .hw_queues_lock = gaudi_hw_queues_lock,
9376 .hw_queues_unlock = gaudi_hw_queues_unlock,
9377 .get_pci_id = gaudi_get_pci_id,
9378 .get_eeprom_data = gaudi_get_eeprom_data,
9379 .send_cpu_message = gaudi_send_cpu_message,
9380 .pci_bars_map = gaudi_pci_bars_map,
9381 .init_iatu = gaudi_init_iatu,
9384 .halt_coresight = gaudi_halt_coresight,
9385 .ctx_init = gaudi_ctx_init,
9386 .ctx_fini = gaudi_ctx_fini,
9387 .get_queue_id_for_cq = gaudi_get_queue_id_for_cq,
9388 .load_firmware_to_device = gaudi_load_firmware_to_device,
9389 .load_boot_fit_to_device = gaudi_load_boot_fit_to_device,
9390 .get_signal_cb_size = gaudi_get_signal_cb_size,
9391 .get_wait_cb_size = gaudi_get_wait_cb_size,
9392 .gen_signal_cb = gaudi_gen_signal_cb,
9393 .gen_wait_cb = gaudi_gen_wait_cb,
9394 .reset_sob = gaudi_reset_sob,
9395 .reset_sob_group = gaudi_reset_sob_group,
9396 .set_dma_mask_from_fw = gaudi_set_dma_mask_from_fw,
9397 .get_device_time = gaudi_get_device_time,
9398 .collective_wait_init_cs = gaudi_collective_wait_init_cs,
9399 .collective_wait_create_jobs = gaudi_collective_wait_create_jobs,
9400 .scramble_addr = hl_mmu_scramble_addr,
9401 .descramble_addr = hl_mmu_descramble_addr,
9402 .ack_protection_bits_errors = gaudi_ack_protection_bits_errors,
9403 .get_hw_block_id = gaudi_get_hw_block_id,
9404 .hw_block_mmap = gaudi_block_mmap,
9405 .enable_events_from_fw = gaudi_enable_events_from_fw,
9406 .map_pll_idx_to_fw_idx = gaudi_map_pll_idx_to_fw_idx,
9407 .init_firmware_loader = gaudi_init_firmware_loader,
9408 .init_cpu_scrambler_dram = gaudi_init_scrambler_hbm,
9409 .state_dump_init = gaudi_state_dump_init,
9410 .get_sob_addr = gaudi_get_sob_addr,
9411 .set_pci_memory_regions = gaudi_set_pci_memory_regions,
9412 .get_stream_master_qid_arr = gaudi_get_stream_master_qid_arr
9416 * gaudi_set_asic_funcs - set GAUDI function pointers
9418 * @hdev: pointer to hl_device structure
9421 void gaudi_set_asic_funcs(struct hl_device *hdev)
9423 hdev->asic_funcs = &gaudi_funcs;