1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2022 HabanaLabs, Ltd.
9 #include "../include/hw_ip/mmu/mmu_general.h"
10 #include "../include/hw_ip/mmu/mmu_v1_0.h"
11 #include "../include/goya/asic_reg/goya_masks.h"
12 #include "../include/goya/goya_reg_map.h"
14 #include <linux/pci.h>
15 #include <linux/hwmon.h>
16 #include <linux/iommu.h>
17 #include <linux/seq_file.h>
20 * GOYA security scheme:
22 * 1. Host is protected by:
23 * - Range registers (When MMU is enabled, DMA RR does NOT protect host)
26 * 2. DRAM is protected by:
27 * - Range registers (protect the first 512MB)
28 * - MMU (isolation between users)
30 * 3. Configuration is protected by:
34 * When MMU is disabled:
36 * QMAN DMA: PQ, CQ, CP, DMA are secured.
37 * PQ, CB and the data are on the host.
40 * PQ, CQ and CP are not secured.
41 * PQ, CB and the data are on the SRAM/DRAM.
43 * Since QMAN DMA is secured, the driver is parsing the DMA CB:
44 * - checks DMA pointer
45 * - WREG, MSG_PROT are not allowed.
46 * - MSG_LONG/SHORT are allowed.
48 * A read/write transaction by the QMAN to a protected area will succeed if
49 * and only if the QMAN's CP is secured and MSG_PROT is used
52 * When MMU is enabled:
54 * QMAN DMA: PQ, CQ and CP are secured.
55 * MMU is set to bypass on the Secure props register of the QMAN.
56 * The reasons we don't enable MMU for PQ, CQ and CP are:
57 * - PQ entry is in kernel address space and the driver doesn't map it.
58 * - CP writes to MSIX register and to kernel address space (completion
61 * DMA is not secured but because CP is secured, the driver still needs to parse
62 * the CB, but doesn't need to check the DMA addresses.
64 * For QMAN DMA 0, DMA is also secured because only the driver uses this DMA and
65 * the driver doesn't map memory in MMU.
67 * QMAN TPC/MME: PQ, CQ and CP aren't secured (no change from MMU disabled mode)
69 * DMA RR does NOT protect host because DMA is not secured
73 #define GOYA_BOOT_FIT_FILE "habanalabs/goya/goya-boot-fit.itb"
74 #define GOYA_LINUX_FW_FILE "habanalabs/goya/goya-fit.itb"
76 #define GOYA_MMU_REGS_NUM 63
78 #define GOYA_DMA_POOL_BLK_SIZE 0x100 /* 256 bytes */
80 #define GOYA_RESET_TIMEOUT_MSEC 500 /* 500ms */
81 #define GOYA_PLDM_RESET_TIMEOUT_MSEC 20000 /* 20s */
82 #define GOYA_RESET_WAIT_MSEC 1 /* 1ms */
83 #define GOYA_CPU_RESET_WAIT_MSEC 100 /* 100ms */
84 #define GOYA_PLDM_RESET_WAIT_MSEC 1000 /* 1s */
85 #define GOYA_TEST_QUEUE_WAIT_USEC 100000 /* 100ms */
86 #define GOYA_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100)
87 #define GOYA_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
88 #define GOYA_BOOT_FIT_REQ_TIMEOUT_USEC 1000000 /* 1s */
89 #define GOYA_MSG_TO_CPU_TIMEOUT_USEC 4000000 /* 4s */
90 #define GOYA_WAIT_FOR_BL_TIMEOUT_USEC 15000000 /* 15s */
92 #define GOYA_QMAN0_FENCE_VAL 0xD169B243
94 #define GOYA_MAX_STRING_LEN 20
96 #define GOYA_CB_POOL_CB_CNT 512
97 #define GOYA_CB_POOL_CB_SIZE 0x20000 /* 128KB */
99 #define IS_QM_IDLE(engine, qm_glbl_sts0) \
100 (((qm_glbl_sts0) & engine##_QM_IDLE_MASK) == engine##_QM_IDLE_MASK)
101 #define IS_DMA_QM_IDLE(qm_glbl_sts0) IS_QM_IDLE(DMA, qm_glbl_sts0)
102 #define IS_TPC_QM_IDLE(qm_glbl_sts0) IS_QM_IDLE(TPC, qm_glbl_sts0)
103 #define IS_MME_QM_IDLE(qm_glbl_sts0) IS_QM_IDLE(MME, qm_glbl_sts0)
105 #define IS_CMDQ_IDLE(engine, cmdq_glbl_sts0) \
106 (((cmdq_glbl_sts0) & engine##_CMDQ_IDLE_MASK) == \
107 engine##_CMDQ_IDLE_MASK)
108 #define IS_TPC_CMDQ_IDLE(cmdq_glbl_sts0) \
109 IS_CMDQ_IDLE(TPC, cmdq_glbl_sts0)
110 #define IS_MME_CMDQ_IDLE(cmdq_glbl_sts0) \
111 IS_CMDQ_IDLE(MME, cmdq_glbl_sts0)
113 #define IS_DMA_IDLE(dma_core_sts0) \
114 !((dma_core_sts0) & DMA_CH_0_STS0_DMA_BUSY_MASK)
116 #define IS_TPC_IDLE(tpc_cfg_sts) \
117 (((tpc_cfg_sts) & TPC_CFG_IDLE_MASK) == TPC_CFG_IDLE_MASK)
119 #define IS_MME_IDLE(mme_arch_sts) \
120 (((mme_arch_sts) & MME_ARCH_IDLE_MASK) == MME_ARCH_IDLE_MASK)
122 static const char goya_irq_name[GOYA_MSIX_ENTRIES][GOYA_MAX_STRING_LEN] = {
123 "goya cq 0", "goya cq 1", "goya cq 2", "goya cq 3",
124 "goya cq 4", "goya cpu eq"
127 static u16 goya_packet_sizes[MAX_PACKET_ID] = {
128 [PACKET_WREG_32] = sizeof(struct packet_wreg32),
129 [PACKET_WREG_BULK] = sizeof(struct packet_wreg_bulk),
130 [PACKET_MSG_LONG] = sizeof(struct packet_msg_long),
131 [PACKET_MSG_SHORT] = sizeof(struct packet_msg_short),
132 [PACKET_CP_DMA] = sizeof(struct packet_cp_dma),
133 [PACKET_MSG_PROT] = sizeof(struct packet_msg_prot),
134 [PACKET_FENCE] = sizeof(struct packet_fence),
135 [PACKET_LIN_DMA] = sizeof(struct packet_lin_dma),
136 [PACKET_NOP] = sizeof(struct packet_nop),
137 [PACKET_STOP] = sizeof(struct packet_stop)
140 static inline bool validate_packet_id(enum packet_id id)
144 case PACKET_WREG_BULK:
145 case PACKET_MSG_LONG:
146 case PACKET_MSG_SHORT:
148 case PACKET_MSG_PROT:
159 static u64 goya_mmu_regs[GOYA_MMU_REGS_NUM] = {
160 mmDMA_QM_0_GLBL_NON_SECURE_PROPS,
161 mmDMA_QM_1_GLBL_NON_SECURE_PROPS,
162 mmDMA_QM_2_GLBL_NON_SECURE_PROPS,
163 mmDMA_QM_3_GLBL_NON_SECURE_PROPS,
164 mmDMA_QM_4_GLBL_NON_SECURE_PROPS,
165 mmTPC0_QM_GLBL_SECURE_PROPS,
166 mmTPC0_QM_GLBL_NON_SECURE_PROPS,
167 mmTPC0_CMDQ_GLBL_SECURE_PROPS,
168 mmTPC0_CMDQ_GLBL_NON_SECURE_PROPS,
171 mmTPC1_QM_GLBL_SECURE_PROPS,
172 mmTPC1_QM_GLBL_NON_SECURE_PROPS,
173 mmTPC1_CMDQ_GLBL_SECURE_PROPS,
174 mmTPC1_CMDQ_GLBL_NON_SECURE_PROPS,
177 mmTPC2_QM_GLBL_SECURE_PROPS,
178 mmTPC2_QM_GLBL_NON_SECURE_PROPS,
179 mmTPC2_CMDQ_GLBL_SECURE_PROPS,
180 mmTPC2_CMDQ_GLBL_NON_SECURE_PROPS,
183 mmTPC3_QM_GLBL_SECURE_PROPS,
184 mmTPC3_QM_GLBL_NON_SECURE_PROPS,
185 mmTPC3_CMDQ_GLBL_SECURE_PROPS,
186 mmTPC3_CMDQ_GLBL_NON_SECURE_PROPS,
189 mmTPC4_QM_GLBL_SECURE_PROPS,
190 mmTPC4_QM_GLBL_NON_SECURE_PROPS,
191 mmTPC4_CMDQ_GLBL_SECURE_PROPS,
192 mmTPC4_CMDQ_GLBL_NON_SECURE_PROPS,
195 mmTPC5_QM_GLBL_SECURE_PROPS,
196 mmTPC5_QM_GLBL_NON_SECURE_PROPS,
197 mmTPC5_CMDQ_GLBL_SECURE_PROPS,
198 mmTPC5_CMDQ_GLBL_NON_SECURE_PROPS,
201 mmTPC6_QM_GLBL_SECURE_PROPS,
202 mmTPC6_QM_GLBL_NON_SECURE_PROPS,
203 mmTPC6_CMDQ_GLBL_SECURE_PROPS,
204 mmTPC6_CMDQ_GLBL_NON_SECURE_PROPS,
207 mmTPC7_QM_GLBL_SECURE_PROPS,
208 mmTPC7_QM_GLBL_NON_SECURE_PROPS,
209 mmTPC7_CMDQ_GLBL_SECURE_PROPS,
210 mmTPC7_CMDQ_GLBL_NON_SECURE_PROPS,
213 mmMME_QM_GLBL_SECURE_PROPS,
214 mmMME_QM_GLBL_NON_SECURE_PROPS,
215 mmMME_CMDQ_GLBL_SECURE_PROPS,
216 mmMME_CMDQ_GLBL_NON_SECURE_PROPS,
217 mmMME_SBA_CONTROL_DATA,
218 mmMME_SBB_CONTROL_DATA,
219 mmMME_SBC_CONTROL_DATA,
220 mmMME_WBC_CONTROL_DATA,
221 mmPCIE_WRAP_PSOC_ARUSER,
222 mmPCIE_WRAP_PSOC_AWUSER
225 static u32 goya_all_events[] = {
226 GOYA_ASYNC_EVENT_ID_PCIE_IF,
227 GOYA_ASYNC_EVENT_ID_TPC0_ECC,
228 GOYA_ASYNC_EVENT_ID_TPC1_ECC,
229 GOYA_ASYNC_EVENT_ID_TPC2_ECC,
230 GOYA_ASYNC_EVENT_ID_TPC3_ECC,
231 GOYA_ASYNC_EVENT_ID_TPC4_ECC,
232 GOYA_ASYNC_EVENT_ID_TPC5_ECC,
233 GOYA_ASYNC_EVENT_ID_TPC6_ECC,
234 GOYA_ASYNC_EVENT_ID_TPC7_ECC,
235 GOYA_ASYNC_EVENT_ID_MME_ECC,
236 GOYA_ASYNC_EVENT_ID_MME_ECC_EXT,
237 GOYA_ASYNC_EVENT_ID_MMU_ECC,
238 GOYA_ASYNC_EVENT_ID_DMA_MACRO,
239 GOYA_ASYNC_EVENT_ID_DMA_ECC,
240 GOYA_ASYNC_EVENT_ID_CPU_IF_ECC,
241 GOYA_ASYNC_EVENT_ID_PSOC_MEM,
242 GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT,
243 GOYA_ASYNC_EVENT_ID_SRAM0,
244 GOYA_ASYNC_EVENT_ID_SRAM1,
245 GOYA_ASYNC_EVENT_ID_SRAM2,
246 GOYA_ASYNC_EVENT_ID_SRAM3,
247 GOYA_ASYNC_EVENT_ID_SRAM4,
248 GOYA_ASYNC_EVENT_ID_SRAM5,
249 GOYA_ASYNC_EVENT_ID_SRAM6,
250 GOYA_ASYNC_EVENT_ID_SRAM7,
251 GOYA_ASYNC_EVENT_ID_SRAM8,
252 GOYA_ASYNC_EVENT_ID_SRAM9,
253 GOYA_ASYNC_EVENT_ID_SRAM10,
254 GOYA_ASYNC_EVENT_ID_SRAM11,
255 GOYA_ASYNC_EVENT_ID_SRAM12,
256 GOYA_ASYNC_EVENT_ID_SRAM13,
257 GOYA_ASYNC_EVENT_ID_SRAM14,
258 GOYA_ASYNC_EVENT_ID_SRAM15,
259 GOYA_ASYNC_EVENT_ID_SRAM16,
260 GOYA_ASYNC_EVENT_ID_SRAM17,
261 GOYA_ASYNC_EVENT_ID_SRAM18,
262 GOYA_ASYNC_EVENT_ID_SRAM19,
263 GOYA_ASYNC_EVENT_ID_SRAM20,
264 GOYA_ASYNC_EVENT_ID_SRAM21,
265 GOYA_ASYNC_EVENT_ID_SRAM22,
266 GOYA_ASYNC_EVENT_ID_SRAM23,
267 GOYA_ASYNC_EVENT_ID_SRAM24,
268 GOYA_ASYNC_EVENT_ID_SRAM25,
269 GOYA_ASYNC_EVENT_ID_SRAM26,
270 GOYA_ASYNC_EVENT_ID_SRAM27,
271 GOYA_ASYNC_EVENT_ID_SRAM28,
272 GOYA_ASYNC_EVENT_ID_SRAM29,
273 GOYA_ASYNC_EVENT_ID_GIC500,
274 GOYA_ASYNC_EVENT_ID_PLL0,
275 GOYA_ASYNC_EVENT_ID_PLL1,
276 GOYA_ASYNC_EVENT_ID_PLL3,
277 GOYA_ASYNC_EVENT_ID_PLL4,
278 GOYA_ASYNC_EVENT_ID_PLL5,
279 GOYA_ASYNC_EVENT_ID_PLL6,
280 GOYA_ASYNC_EVENT_ID_AXI_ECC,
281 GOYA_ASYNC_EVENT_ID_L2_RAM_ECC,
282 GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET,
283 GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT,
284 GOYA_ASYNC_EVENT_ID_PCIE_DEC,
285 GOYA_ASYNC_EVENT_ID_TPC0_DEC,
286 GOYA_ASYNC_EVENT_ID_TPC1_DEC,
287 GOYA_ASYNC_EVENT_ID_TPC2_DEC,
288 GOYA_ASYNC_EVENT_ID_TPC3_DEC,
289 GOYA_ASYNC_EVENT_ID_TPC4_DEC,
290 GOYA_ASYNC_EVENT_ID_TPC5_DEC,
291 GOYA_ASYNC_EVENT_ID_TPC6_DEC,
292 GOYA_ASYNC_EVENT_ID_TPC7_DEC,
293 GOYA_ASYNC_EVENT_ID_MME_WACS,
294 GOYA_ASYNC_EVENT_ID_MME_WACSD,
295 GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER,
296 GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC,
297 GOYA_ASYNC_EVENT_ID_PSOC,
298 GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR,
299 GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR,
300 GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR,
301 GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR,
302 GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR,
303 GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR,
304 GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR,
305 GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR,
306 GOYA_ASYNC_EVENT_ID_TPC0_CMDQ,
307 GOYA_ASYNC_EVENT_ID_TPC1_CMDQ,
308 GOYA_ASYNC_EVENT_ID_TPC2_CMDQ,
309 GOYA_ASYNC_EVENT_ID_TPC3_CMDQ,
310 GOYA_ASYNC_EVENT_ID_TPC4_CMDQ,
311 GOYA_ASYNC_EVENT_ID_TPC5_CMDQ,
312 GOYA_ASYNC_EVENT_ID_TPC6_CMDQ,
313 GOYA_ASYNC_EVENT_ID_TPC7_CMDQ,
314 GOYA_ASYNC_EVENT_ID_TPC0_QM,
315 GOYA_ASYNC_EVENT_ID_TPC1_QM,
316 GOYA_ASYNC_EVENT_ID_TPC2_QM,
317 GOYA_ASYNC_EVENT_ID_TPC3_QM,
318 GOYA_ASYNC_EVENT_ID_TPC4_QM,
319 GOYA_ASYNC_EVENT_ID_TPC5_QM,
320 GOYA_ASYNC_EVENT_ID_TPC6_QM,
321 GOYA_ASYNC_EVENT_ID_TPC7_QM,
322 GOYA_ASYNC_EVENT_ID_MME_QM,
323 GOYA_ASYNC_EVENT_ID_MME_CMDQ,
324 GOYA_ASYNC_EVENT_ID_DMA0_QM,
325 GOYA_ASYNC_EVENT_ID_DMA1_QM,
326 GOYA_ASYNC_EVENT_ID_DMA2_QM,
327 GOYA_ASYNC_EVENT_ID_DMA3_QM,
328 GOYA_ASYNC_EVENT_ID_DMA4_QM,
329 GOYA_ASYNC_EVENT_ID_DMA0_CH,
330 GOYA_ASYNC_EVENT_ID_DMA1_CH,
331 GOYA_ASYNC_EVENT_ID_DMA2_CH,
332 GOYA_ASYNC_EVENT_ID_DMA3_CH,
333 GOYA_ASYNC_EVENT_ID_DMA4_CH,
334 GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU,
335 GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU,
336 GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU,
337 GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU,
338 GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU,
339 GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU,
340 GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU,
341 GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU,
342 GOYA_ASYNC_EVENT_ID_DMA_BM_CH0,
343 GOYA_ASYNC_EVENT_ID_DMA_BM_CH1,
344 GOYA_ASYNC_EVENT_ID_DMA_BM_CH2,
345 GOYA_ASYNC_EVENT_ID_DMA_BM_CH3,
346 GOYA_ASYNC_EVENT_ID_DMA_BM_CH4,
347 GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S,
348 GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E,
349 GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S,
350 GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E
353 static s64 goya_state_dump_specs_props[SP_MAX] = {0};
355 static int goya_mmu_clear_pgt_range(struct hl_device *hdev);
356 static int goya_mmu_set_dram_default_page(struct hl_device *hdev);
357 static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev);
358 static void goya_mmu_prepare(struct hl_device *hdev, u32 asid);
360 int goya_set_fixed_properties(struct hl_device *hdev)
362 struct asic_fixed_properties *prop = &hdev->asic_prop;
365 prop->max_queues = GOYA_QUEUE_ID_SIZE;
366 prop->hw_queues_props = kcalloc(prop->max_queues,
367 sizeof(struct hw_queue_properties),
370 if (!prop->hw_queues_props)
373 for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
374 prop->hw_queues_props[i].type = QUEUE_TYPE_EXT;
375 prop->hw_queues_props[i].driver_only = 0;
376 prop->hw_queues_props[i].cb_alloc_flags = CB_ALLOC_KERNEL;
379 for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES ; i++) {
380 prop->hw_queues_props[i].type = QUEUE_TYPE_CPU;
381 prop->hw_queues_props[i].driver_only = 1;
382 prop->hw_queues_props[i].cb_alloc_flags = CB_ALLOC_KERNEL;
385 for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES +
386 NUMBER_OF_INT_HW_QUEUES; i++) {
387 prop->hw_queues_props[i].type = QUEUE_TYPE_INT;
388 prop->hw_queues_props[i].driver_only = 0;
389 prop->hw_queues_props[i].cb_alloc_flags = CB_ALLOC_USER;
392 prop->cfg_base_address = CFG_BASE;
393 prop->device_dma_offset_for_host_access = HOST_PHYS_BASE;
394 prop->host_base_address = HOST_PHYS_BASE;
395 prop->host_end_address = prop->host_base_address + HOST_PHYS_SIZE;
396 prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
397 prop->completion_mode = HL_COMPLETION_MODE_JOB;
398 prop->dram_base_address = DRAM_PHYS_BASE;
399 prop->dram_size = DRAM_PHYS_DEFAULT_SIZE;
400 prop->dram_end_address = prop->dram_base_address + prop->dram_size;
401 prop->dram_user_base_address = DRAM_BASE_ADDR_USER;
403 prop->sram_base_address = SRAM_BASE_ADDR;
404 prop->sram_size = SRAM_SIZE;
405 prop->sram_end_address = prop->sram_base_address + prop->sram_size;
406 prop->sram_user_base_address = prop->sram_base_address +
407 SRAM_USER_BASE_OFFSET;
409 prop->mmu_pgt_addr = MMU_PAGE_TABLES_ADDR;
410 prop->mmu_dram_default_page_addr = MMU_DRAM_DEFAULT_PAGE_ADDR;
412 prop->mmu_pgt_size = 0x800000; /* 8MB */
414 prop->mmu_pgt_size = MMU_PAGE_TABLES_SIZE;
415 prop->mmu_pte_size = HL_PTE_SIZE;
416 prop->mmu_hop_table_size = HOP_TABLE_SIZE_512_PTE;
417 prop->mmu_hop0_tables_total_size = HOP0_512_PTE_TABLES_TOTAL_SIZE;
418 prop->dram_page_size = PAGE_SIZE_2MB;
419 prop->device_mem_alloc_default_page_size = prop->dram_page_size;
420 prop->dram_supports_virtual_memory = true;
422 prop->dmmu.hop_shifts[MMU_HOP0] = MMU_V1_0_HOP0_SHIFT;
423 prop->dmmu.hop_shifts[MMU_HOP1] = MMU_V1_0_HOP1_SHIFT;
424 prop->dmmu.hop_shifts[MMU_HOP2] = MMU_V1_0_HOP2_SHIFT;
425 prop->dmmu.hop_shifts[MMU_HOP3] = MMU_V1_0_HOP3_SHIFT;
426 prop->dmmu.hop_shifts[MMU_HOP4] = MMU_V1_0_HOP4_SHIFT;
427 prop->dmmu.hop_masks[MMU_HOP0] = MMU_V1_0_HOP0_MASK;
428 prop->dmmu.hop_masks[MMU_HOP1] = MMU_V1_0_HOP1_MASK;
429 prop->dmmu.hop_masks[MMU_HOP2] = MMU_V1_0_HOP2_MASK;
430 prop->dmmu.hop_masks[MMU_HOP3] = MMU_V1_0_HOP3_MASK;
431 prop->dmmu.hop_masks[MMU_HOP4] = MMU_V1_0_HOP4_MASK;
432 prop->dmmu.start_addr = VA_DDR_SPACE_START;
433 prop->dmmu.end_addr = VA_DDR_SPACE_END;
434 prop->dmmu.page_size = PAGE_SIZE_2MB;
435 prop->dmmu.num_hops = MMU_ARCH_5_HOPS;
436 prop->dmmu.last_mask = LAST_MASK;
437 /* TODO: will be duplicated until implementing per-MMU props */
438 prop->dmmu.hop_table_size = prop->mmu_hop_table_size;
439 prop->dmmu.hop0_tables_total_size = prop->mmu_hop0_tables_total_size;
441 /* shifts and masks are the same in PMMU and DMMU */
442 memcpy(&prop->pmmu, &prop->dmmu, sizeof(prop->dmmu));
443 prop->pmmu.start_addr = VA_HOST_SPACE_START;
444 prop->pmmu.end_addr = VA_HOST_SPACE_END;
445 prop->pmmu.page_size = PAGE_SIZE_4KB;
446 prop->pmmu.num_hops = MMU_ARCH_5_HOPS;
447 prop->pmmu.last_mask = LAST_MASK;
448 /* TODO: will be duplicated until implementing per-MMU props */
449 prop->pmmu.hop_table_size = prop->mmu_hop_table_size;
450 prop->pmmu.hop0_tables_total_size = prop->mmu_hop0_tables_total_size;
452 /* PMMU and HPMMU are the same except of page size */
453 memcpy(&prop->pmmu_huge, &prop->pmmu, sizeof(prop->pmmu));
454 prop->pmmu_huge.page_size = PAGE_SIZE_2MB;
456 prop->dram_size_for_default_page_mapping = VA_DDR_SPACE_END;
457 prop->cfg_size = CFG_SIZE;
458 prop->max_asid = MAX_ASID;
459 prop->num_of_events = GOYA_ASYNC_EVENT_ID_SIZE;
460 prop->high_pll = PLL_HIGH_DEFAULT;
461 prop->cb_pool_cb_cnt = GOYA_CB_POOL_CB_CNT;
462 prop->cb_pool_cb_size = GOYA_CB_POOL_CB_SIZE;
463 prop->max_power_default = MAX_POWER_DEFAULT;
464 prop->dc_power_default = DC_POWER_DEFAULT;
465 prop->tpc_enabled_mask = TPC_ENABLED_MASK;
466 prop->pcie_dbi_base_address = mmPCIE_DBI_BASE;
467 prop->pcie_aux_dbi_reg_addr = CFG_BASE + mmPCIE_AUX_DBI;
469 strncpy(prop->cpucp_info.card_name, GOYA_DEFAULT_CARD_NAME,
472 prop->max_pending_cs = GOYA_MAX_PENDING_CS;
474 prop->first_available_user_interrupt = USHRT_MAX;
475 prop->tpc_interrupt_id = USHRT_MAX;
477 for (i = 0 ; i < HL_MAX_DCORES ; i++)
478 prop->first_available_cq[i] = USHRT_MAX;
480 prop->fw_cpu_boot_dev_sts0_valid = false;
481 prop->fw_cpu_boot_dev_sts1_valid = false;
482 prop->hard_reset_done_by_fw = false;
483 prop->gic_interrupts_enable = true;
485 prop->server_type = HL_SERVER_TYPE_UNKNOWN;
487 prop->clk_pll_index = HL_GOYA_MME_PLL;
489 prop->use_get_power_for_reset_history = true;
491 prop->configurable_stop_on_err = true;
493 prop->set_max_power_on_device_init = true;
501 * goya_pci_bars_map - Map PCI BARS of Goya device
503 * @hdev: pointer to hl_device structure
505 * Request PCI regions and map them to kernel virtual addresses.
506 * Returns 0 on success
509 static int goya_pci_bars_map(struct hl_device *hdev)
511 static const char * const name[] = {"SRAM_CFG", "MSIX", "DDR"};
512 bool is_wc[3] = {false, false, true};
515 rc = hl_pci_bars_map(hdev, name, is_wc);
519 hdev->rmmio = hdev->pcie_bar[SRAM_CFG_BAR_ID] +
520 (CFG_BASE - SRAM_BASE_ADDR);
525 static u64 goya_set_ddr_bar_base(struct hl_device *hdev, u64 addr)
527 struct goya_device *goya = hdev->asic_specific;
528 struct hl_inbound_pci_region pci_region;
532 if ((goya) && (goya->ddr_bar_cur_addr == addr))
535 /* Inbound Region 1 - Bar 4 - Point to DDR */
536 pci_region.mode = PCI_BAR_MATCH_MODE;
537 pci_region.bar = DDR_BAR_ID;
538 pci_region.addr = addr;
539 rc = hl_pci_set_inbound_region(hdev, 1, &pci_region);
544 old_addr = goya->ddr_bar_cur_addr;
545 goya->ddr_bar_cur_addr = addr;
552 * goya_init_iatu - Initialize the iATU unit inside the PCI controller
554 * @hdev: pointer to hl_device structure
556 * This is needed in case the firmware doesn't initialize the iATU
559 static int goya_init_iatu(struct hl_device *hdev)
561 struct hl_inbound_pci_region inbound_region;
562 struct hl_outbound_pci_region outbound_region;
565 if (hdev->asic_prop.iatu_done_by_fw)
568 /* Inbound Region 0 - Bar 0 - Point to SRAM and CFG */
569 inbound_region.mode = PCI_BAR_MATCH_MODE;
570 inbound_region.bar = SRAM_CFG_BAR_ID;
571 inbound_region.addr = SRAM_BASE_ADDR;
572 rc = hl_pci_set_inbound_region(hdev, 0, &inbound_region);
576 /* Inbound Region 1 - Bar 4 - Point to DDR */
577 inbound_region.mode = PCI_BAR_MATCH_MODE;
578 inbound_region.bar = DDR_BAR_ID;
579 inbound_region.addr = DRAM_PHYS_BASE;
580 rc = hl_pci_set_inbound_region(hdev, 1, &inbound_region);
584 /* Outbound Region 0 - Point to Host */
585 outbound_region.addr = HOST_PHYS_BASE;
586 outbound_region.size = HOST_PHYS_SIZE;
587 rc = hl_pci_set_outbound_region(hdev, &outbound_region);
593 static enum hl_device_hw_state goya_get_hw_state(struct hl_device *hdev)
595 return RREG32(mmHW_STATE);
599 * goya_early_init - GOYA early initialization code
601 * @hdev: pointer to hl_device structure
605 * PCI controller initialization
609 static int goya_early_init(struct hl_device *hdev)
611 struct asic_fixed_properties *prop = &hdev->asic_prop;
612 struct pci_dev *pdev = hdev->pdev;
613 resource_size_t pci_bar_size;
614 u32 fw_boot_status, val;
617 rc = goya_set_fixed_properties(hdev);
619 dev_err(hdev->dev, "Failed to get fixed properties\n");
623 /* Check BAR sizes */
624 pci_bar_size = pci_resource_len(pdev, SRAM_CFG_BAR_ID);
626 if (pci_bar_size != CFG_BAR_SIZE) {
627 dev_err(hdev->dev, "Not " HL_NAME "? BAR %d size %pa, expecting %llu\n",
628 SRAM_CFG_BAR_ID, &pci_bar_size, CFG_BAR_SIZE);
630 goto free_queue_props;
633 pci_bar_size = pci_resource_len(pdev, MSIX_BAR_ID);
635 if (pci_bar_size != MSIX_BAR_SIZE) {
636 dev_err(hdev->dev, "Not " HL_NAME "? BAR %d size %pa, expecting %llu\n",
637 MSIX_BAR_ID, &pci_bar_size, MSIX_BAR_SIZE);
639 goto free_queue_props;
642 prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID);
643 hdev->dram_pci_bar_start = pci_resource_start(pdev, DDR_BAR_ID);
645 /* If FW security is enabled at this point it means no access to ELBI */
646 if (hdev->asic_prop.fw_security_enabled) {
647 hdev->asic_prop.iatu_done_by_fw = true;
651 rc = hl_pci_elbi_read(hdev, CFG_BASE + mmCPU_BOOT_DEV_STS0,
654 goto free_queue_props;
656 /* Check whether FW is configuring iATU */
657 if ((fw_boot_status & CPU_BOOT_DEV_STS0_ENABLED) &&
658 (fw_boot_status & CPU_BOOT_DEV_STS0_FW_IATU_CONF_EN))
659 hdev->asic_prop.iatu_done_by_fw = true;
662 rc = hl_pci_init(hdev);
664 goto free_queue_props;
666 /* Before continuing in the initialization, we need to read the preboot
667 * version to determine whether we run with a security-enabled firmware
669 rc = hl_fw_read_preboot_status(hdev);
671 if (hdev->reset_on_preboot_fail)
672 hdev->asic_funcs->hw_fini(hdev, true, false);
676 if (goya_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
677 dev_dbg(hdev->dev, "H/W state is dirty, must reset before initializing\n");
678 hdev->asic_funcs->hw_fini(hdev, true, false);
682 val = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
683 if (val & PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SRIOV_EN_MASK)
685 "PCI strap is not configured correctly, PCI bus errors may occur\n");
693 kfree(hdev->asic_prop.hw_queues_props);
698 * goya_early_fini - GOYA early finalization code
700 * @hdev: pointer to hl_device structure
705 static int goya_early_fini(struct hl_device *hdev)
707 kfree(hdev->asic_prop.hw_queues_props);
713 static void goya_mmu_prepare_reg(struct hl_device *hdev, u64 reg, u32 asid)
715 /* mask to zero the MMBP and ASID bits */
716 WREG32_AND(reg, ~0x7FF);
717 WREG32_OR(reg, asid);
720 static void goya_qman0_set_security(struct hl_device *hdev, bool secure)
722 struct goya_device *goya = hdev->asic_specific;
724 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
728 WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_FULLY_TRUSTED);
730 WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_PARTLY_TRUSTED);
732 RREG32(mmDMA_QM_0_GLBL_PROT);
736 * goya_fetch_psoc_frequency - Fetch PSOC frequency values
738 * @hdev: pointer to hl_device structure
741 static void goya_fetch_psoc_frequency(struct hl_device *hdev)
743 struct asic_fixed_properties *prop = &hdev->asic_prop;
744 u32 nr = 0, nf = 0, od = 0, div_fctr = 0, pll_clk, div_sel;
745 u16 pll_freq_arr[HL_PLL_NUM_OUTPUTS], freq;
748 if (hdev->asic_prop.fw_security_enabled) {
749 struct goya_device *goya = hdev->asic_specific;
751 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
754 rc = hl_fw_cpucp_pll_info_get(hdev, HL_GOYA_PCI_PLL,
760 freq = pll_freq_arr[1];
762 div_fctr = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
763 div_sel = RREG32(mmPSOC_PCI_PLL_DIV_SEL_1);
764 nr = RREG32(mmPSOC_PCI_PLL_NR);
765 nf = RREG32(mmPSOC_PCI_PLL_NF);
766 od = RREG32(mmPSOC_PCI_PLL_OD);
768 if (div_sel == DIV_SEL_REF_CLK ||
769 div_sel == DIV_SEL_DIVIDED_REF) {
770 if (div_sel == DIV_SEL_REF_CLK)
773 freq = PLL_REF_CLK / (div_fctr + 1);
774 } else if (div_sel == DIV_SEL_PLL_CLK ||
775 div_sel == DIV_SEL_DIVIDED_PLL) {
776 pll_clk = PLL_REF_CLK * (nf + 1) /
777 ((nr + 1) * (od + 1));
778 if (div_sel == DIV_SEL_PLL_CLK)
781 freq = pll_clk / (div_fctr + 1);
784 "Received invalid div select value: %d",
790 prop->psoc_timestamp_frequency = freq;
791 prop->psoc_pci_pll_nr = nr;
792 prop->psoc_pci_pll_nf = nf;
793 prop->psoc_pci_pll_od = od;
794 prop->psoc_pci_pll_div_factor = div_fctr;
798 * goya_set_frequency - set the frequency of the device
800 * @hdev: pointer to habanalabs device structure
801 * @freq: the new frequency value
803 * Change the frequency if needed. This function has no protection against
804 * concurrency, therefore it is assumed that the calling function has protected
805 * itself against the case of calling this function from multiple threads with
808 * Returns 0 if no change was done, otherwise returns 1
810 int goya_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq)
812 struct goya_device *goya = hdev->asic_specific;
814 if ((goya->pm_mng_profile == PM_MANUAL) ||
815 (goya->curr_pll_profile == freq))
818 dev_dbg(hdev->dev, "Changing device frequency to %s\n",
819 freq == PLL_HIGH ? "high" : "low");
821 goya_set_pll_profile(hdev, freq);
823 goya->curr_pll_profile = freq;
828 static void goya_set_freq_to_low_job(struct work_struct *work)
830 struct goya_work_freq *goya_work = container_of(work,
831 struct goya_work_freq,
833 struct hl_device *hdev = goya_work->hdev;
835 mutex_lock(&hdev->fpriv_list_lock);
837 if (!hdev->is_compute_ctx_active)
838 goya_set_frequency(hdev, PLL_LOW);
840 mutex_unlock(&hdev->fpriv_list_lock);
842 schedule_delayed_work(&goya_work->work_freq,
843 usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC));
846 int goya_late_init(struct hl_device *hdev)
848 struct asic_fixed_properties *prop = &hdev->asic_prop;
849 struct goya_device *goya = hdev->asic_specific;
852 goya_fetch_psoc_frequency(hdev);
854 rc = goya_mmu_clear_pgt_range(hdev);
857 "Failed to clear MMU page tables range %d\n", rc);
861 rc = goya_mmu_set_dram_default_page(hdev);
863 dev_err(hdev->dev, "Failed to set DRAM default page %d\n", rc);
867 rc = goya_mmu_add_mappings_for_device_cpu(hdev);
871 rc = goya_init_cpu_queues(hdev);
875 rc = goya_test_cpu_queue(hdev);
879 rc = goya_cpucp_info_get(hdev);
881 dev_err(hdev->dev, "Failed to get cpucp info %d\n", rc);
885 /* Now that we have the DRAM size in ASIC prop, we need to check
886 * its size and configure the DMA_IF DDR wrap protection (which is in
887 * the MMU block) accordingly. The value is the log2 of the DRAM size
889 WREG32(mmMMU_LOG2_DDR_SIZE, ilog2(prop->dram_size));
891 rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_ENABLE_PCI_ACCESS, 0x0);
894 "Failed to enable PCI access from CPU %d\n", rc);
898 /* force setting to low frequency */
899 goya->curr_pll_profile = PLL_LOW;
901 goya->pm_mng_profile = PM_AUTO;
903 goya_set_pll_profile(hdev, PLL_LOW);
905 schedule_delayed_work(&goya->goya_work->work_freq,
906 usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC));
912 * goya_late_fini - GOYA late tear-down code
914 * @hdev: pointer to hl_device structure
916 * Free sensors allocated structures
918 void goya_late_fini(struct hl_device *hdev)
920 struct goya_device *goya = hdev->asic_specific;
922 cancel_delayed_work_sync(&goya->goya_work->work_freq);
924 hl_hwmon_release_resources(hdev);
927 static void goya_set_pci_memory_regions(struct hl_device *hdev)
929 struct asic_fixed_properties *prop = &hdev->asic_prop;
930 struct pci_mem_region *region;
933 region = &hdev->pci_mem_region[PCI_REGION_CFG];
934 region->region_base = CFG_BASE;
935 region->region_size = CFG_SIZE;
936 region->offset_in_bar = CFG_BASE - SRAM_BASE_ADDR;
937 region->bar_size = CFG_BAR_SIZE;
938 region->bar_id = SRAM_CFG_BAR_ID;
942 region = &hdev->pci_mem_region[PCI_REGION_SRAM];
943 region->region_base = SRAM_BASE_ADDR;
944 region->region_size = SRAM_SIZE;
945 region->offset_in_bar = 0;
946 region->bar_size = CFG_BAR_SIZE;
947 region->bar_id = SRAM_CFG_BAR_ID;
951 region = &hdev->pci_mem_region[PCI_REGION_DRAM];
952 region->region_base = DRAM_PHYS_BASE;
953 region->region_size = hdev->asic_prop.dram_size;
954 region->offset_in_bar = 0;
955 region->bar_size = prop->dram_pci_bar_size;
956 region->bar_id = DDR_BAR_ID;
961 * goya_sw_init - Goya software initialization code
963 * @hdev: pointer to hl_device structure
966 static int goya_sw_init(struct hl_device *hdev)
968 struct goya_device *goya;
971 /* Allocate device structure */
972 goya = kzalloc(sizeof(*goya), GFP_KERNEL);
976 /* according to goya_init_iatu */
977 goya->ddr_bar_cur_addr = DRAM_PHYS_BASE;
979 goya->mme_clk = GOYA_PLL_FREQ_LOW;
980 goya->tpc_clk = GOYA_PLL_FREQ_LOW;
981 goya->ic_clk = GOYA_PLL_FREQ_LOW;
983 hdev->asic_specific = goya;
985 /* Create DMA pool for small allocations */
986 hdev->dma_pool = dma_pool_create(dev_name(hdev->dev),
987 &hdev->pdev->dev, GOYA_DMA_POOL_BLK_SIZE, 8, 0);
988 if (!hdev->dma_pool) {
989 dev_err(hdev->dev, "failed to create DMA pool\n");
991 goto free_goya_device;
994 hdev->cpu_accessible_dma_mem = hl_asic_dma_alloc_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE,
995 &hdev->cpu_accessible_dma_address,
996 GFP_KERNEL | __GFP_ZERO);
998 if (!hdev->cpu_accessible_dma_mem) {
1003 dev_dbg(hdev->dev, "cpu accessible memory at bus address %pad\n",
1004 &hdev->cpu_accessible_dma_address);
1006 hdev->cpu_accessible_dma_pool = gen_pool_create(ilog2(32), -1);
1007 if (!hdev->cpu_accessible_dma_pool) {
1009 "Failed to create CPU accessible DMA pool\n");
1011 goto free_cpu_dma_mem;
1014 rc = gen_pool_add(hdev->cpu_accessible_dma_pool,
1015 (uintptr_t) hdev->cpu_accessible_dma_mem,
1016 HL_CPU_ACCESSIBLE_MEM_SIZE, -1);
1019 "Failed to add memory to CPU accessible DMA pool\n");
1021 goto free_cpu_accessible_dma_pool;
1024 spin_lock_init(&goya->hw_queues_lock);
1025 hdev->supports_coresight = true;
1026 hdev->asic_prop.supports_compute_reset = true;
1027 hdev->asic_prop.allow_inference_soft_reset = true;
1028 hdev->supports_wait_for_multi_cs = false;
1029 hdev->supports_ctx_switch = true;
1031 hdev->asic_funcs->set_pci_memory_regions(hdev);
1033 goya->goya_work = kmalloc(sizeof(struct goya_work_freq), GFP_KERNEL);
1034 if (!goya->goya_work) {
1036 goto free_cpu_accessible_dma_pool;
1039 goya->goya_work->hdev = hdev;
1040 INIT_DELAYED_WORK(&goya->goya_work->work_freq, goya_set_freq_to_low_job);
1044 free_cpu_accessible_dma_pool:
1045 gen_pool_destroy(hdev->cpu_accessible_dma_pool);
1047 hl_asic_dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, hdev->cpu_accessible_dma_mem,
1048 hdev->cpu_accessible_dma_address);
1050 dma_pool_destroy(hdev->dma_pool);
1058 * goya_sw_fini - Goya software tear-down code
1060 * @hdev: pointer to hl_device structure
1063 static int goya_sw_fini(struct hl_device *hdev)
1065 struct goya_device *goya = hdev->asic_specific;
1067 gen_pool_destroy(hdev->cpu_accessible_dma_pool);
1069 hl_asic_dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, hdev->cpu_accessible_dma_mem,
1070 hdev->cpu_accessible_dma_address);
1072 dma_pool_destroy(hdev->dma_pool);
1074 kfree(goya->goya_work);
1080 static void goya_init_dma_qman(struct hl_device *hdev, int dma_id,
1081 dma_addr_t bus_address)
1083 struct goya_device *goya = hdev->asic_specific;
1084 u32 mtr_base_lo, mtr_base_hi;
1085 u32 so_base_lo, so_base_hi;
1086 u32 gic_base_lo, gic_base_hi;
1087 u32 reg_off = dma_id * (mmDMA_QM_1_PQ_PI - mmDMA_QM_0_PQ_PI);
1088 u32 dma_err_cfg = QMAN_DMA_ERR_MSG_EN;
1090 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1091 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1092 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1093 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1096 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1098 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1100 WREG32(mmDMA_QM_0_PQ_BASE_LO + reg_off, lower_32_bits(bus_address));
1101 WREG32(mmDMA_QM_0_PQ_BASE_HI + reg_off, upper_32_bits(bus_address));
1103 WREG32(mmDMA_QM_0_PQ_SIZE + reg_off, ilog2(HL_QUEUE_LENGTH));
1104 WREG32(mmDMA_QM_0_PQ_PI + reg_off, 0);
1105 WREG32(mmDMA_QM_0_PQ_CI + reg_off, 0);
1107 WREG32(mmDMA_QM_0_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
1108 WREG32(mmDMA_QM_0_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
1109 WREG32(mmDMA_QM_0_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
1110 WREG32(mmDMA_QM_0_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
1111 WREG32(mmDMA_QM_0_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
1112 WREG32(mmDMA_QM_0_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
1113 WREG32(mmDMA_QM_0_GLBL_ERR_WDATA + reg_off,
1114 GOYA_ASYNC_EVENT_ID_DMA0_QM + dma_id);
1116 /* PQ has buffer of 2 cache lines, while CQ has 8 lines */
1117 WREG32(mmDMA_QM_0_PQ_CFG1 + reg_off, 0x00020002);
1118 WREG32(mmDMA_QM_0_CQ_CFG1 + reg_off, 0x00080008);
1120 if (goya->hw_cap_initialized & HW_CAP_MMU)
1121 WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, QMAN_DMA_PARTLY_TRUSTED);
1123 WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, QMAN_DMA_FULLY_TRUSTED);
1125 if (hdev->stop_on_err)
1126 dma_err_cfg |= 1 << DMA_QM_0_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT;
1128 WREG32(mmDMA_QM_0_GLBL_ERR_CFG + reg_off, dma_err_cfg);
1129 WREG32(mmDMA_QM_0_GLBL_CFG0 + reg_off, QMAN_DMA_ENABLE);
1132 static void goya_init_dma_ch(struct hl_device *hdev, int dma_id)
1134 u32 gic_base_lo, gic_base_hi;
1136 u32 reg_off = dma_id * (mmDMA_CH_1_CFG1 - mmDMA_CH_0_CFG1);
1139 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1141 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1143 WREG32(mmDMA_CH_0_ERRMSG_ADDR_LO + reg_off, gic_base_lo);
1144 WREG32(mmDMA_CH_0_ERRMSG_ADDR_HI + reg_off, gic_base_hi);
1145 WREG32(mmDMA_CH_0_ERRMSG_WDATA + reg_off,
1146 GOYA_ASYNC_EVENT_ID_DMA0_CH + dma_id);
1149 sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1000 +
1152 sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1007;
1154 WREG32(mmDMA_CH_0_WR_COMP_ADDR_HI + reg_off, upper_32_bits(sob_addr));
1155 WREG32(mmDMA_CH_0_WR_COMP_WDATA + reg_off, 0x80000001);
1159 * goya_init_dma_qmans - Initialize QMAN DMA registers
1161 * @hdev: pointer to hl_device structure
1163 * Initialize the H/W registers of the QMAN DMA channels
1166 void goya_init_dma_qmans(struct hl_device *hdev)
1168 struct goya_device *goya = hdev->asic_specific;
1169 struct hl_hw_queue *q;
1172 if (goya->hw_cap_initialized & HW_CAP_DMA)
1175 q = &hdev->kernel_queues[0];
1177 for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++, q++) {
1178 q->cq_id = q->msi_vec = i;
1179 goya_init_dma_qman(hdev, i, q->bus_address);
1180 goya_init_dma_ch(hdev, i);
1183 goya->hw_cap_initialized |= HW_CAP_DMA;
1187 * goya_disable_external_queues - Disable external queues
1189 * @hdev: pointer to hl_device structure
1192 static void goya_disable_external_queues(struct hl_device *hdev)
1194 struct goya_device *goya = hdev->asic_specific;
1196 if (!(goya->hw_cap_initialized & HW_CAP_DMA))
1199 WREG32(mmDMA_QM_0_GLBL_CFG0, 0);
1200 WREG32(mmDMA_QM_1_GLBL_CFG0, 0);
1201 WREG32(mmDMA_QM_2_GLBL_CFG0, 0);
1202 WREG32(mmDMA_QM_3_GLBL_CFG0, 0);
1203 WREG32(mmDMA_QM_4_GLBL_CFG0, 0);
1206 static int goya_stop_queue(struct hl_device *hdev, u32 cfg_reg,
1207 u32 cp_sts_reg, u32 glbl_sts0_reg)
1212 /* use the values of TPC0 as they are all the same*/
1214 WREG32(cfg_reg, 1 << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
1216 status = RREG32(cp_sts_reg);
1217 if (status & TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK) {
1218 rc = hl_poll_timeout(
1222 !(status & TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK),
1224 QMAN_FENCE_TIMEOUT_USEC);
1226 /* if QMAN is stuck in fence no need to check for stop */
1231 rc = hl_poll_timeout(
1235 (status & TPC0_QM_GLBL_STS0_CP_IS_STOP_MASK),
1237 QMAN_STOP_TIMEOUT_USEC);
1241 "Timeout while waiting for QMAN to stop\n");
1249 * goya_stop_external_queues - Stop external queues
1251 * @hdev: pointer to hl_device structure
1253 * Returns 0 on success
1256 static int goya_stop_external_queues(struct hl_device *hdev)
1260 struct goya_device *goya = hdev->asic_specific;
1262 if (!(goya->hw_cap_initialized & HW_CAP_DMA))
1265 rc = goya_stop_queue(hdev,
1266 mmDMA_QM_0_GLBL_CFG1,
1268 mmDMA_QM_0_GLBL_STS0);
1271 dev_err(hdev->dev, "failed to stop DMA QMAN 0\n");
1275 rc = goya_stop_queue(hdev,
1276 mmDMA_QM_1_GLBL_CFG1,
1278 mmDMA_QM_1_GLBL_STS0);
1281 dev_err(hdev->dev, "failed to stop DMA QMAN 1\n");
1285 rc = goya_stop_queue(hdev,
1286 mmDMA_QM_2_GLBL_CFG1,
1288 mmDMA_QM_2_GLBL_STS0);
1291 dev_err(hdev->dev, "failed to stop DMA QMAN 2\n");
1295 rc = goya_stop_queue(hdev,
1296 mmDMA_QM_3_GLBL_CFG1,
1298 mmDMA_QM_3_GLBL_STS0);
1301 dev_err(hdev->dev, "failed to stop DMA QMAN 3\n");
1305 rc = goya_stop_queue(hdev,
1306 mmDMA_QM_4_GLBL_CFG1,
1308 mmDMA_QM_4_GLBL_STS0);
1311 dev_err(hdev->dev, "failed to stop DMA QMAN 4\n");
1319 * goya_init_cpu_queues - Initialize PQ/CQ/EQ of CPU
1321 * @hdev: pointer to hl_device structure
1323 * Returns 0 on success
1326 int goya_init_cpu_queues(struct hl_device *hdev)
1328 struct goya_device *goya = hdev->asic_specific;
1329 struct asic_fixed_properties *prop = &hdev->asic_prop;
1332 struct hl_hw_queue *cpu_pq = &hdev->kernel_queues[GOYA_QUEUE_ID_CPU_PQ];
1335 if (!hdev->cpu_queues_enable)
1338 if (goya->hw_cap_initialized & HW_CAP_CPU_Q)
1341 eq = &hdev->event_queue;
1343 WREG32(mmCPU_PQ_BASE_ADDR_LOW, lower_32_bits(cpu_pq->bus_address));
1344 WREG32(mmCPU_PQ_BASE_ADDR_HIGH, upper_32_bits(cpu_pq->bus_address));
1346 WREG32(mmCPU_EQ_BASE_ADDR_LOW, lower_32_bits(eq->bus_address));
1347 WREG32(mmCPU_EQ_BASE_ADDR_HIGH, upper_32_bits(eq->bus_address));
1349 WREG32(mmCPU_CQ_BASE_ADDR_LOW,
1350 lower_32_bits(VA_CPU_ACCESSIBLE_MEM_ADDR));
1351 WREG32(mmCPU_CQ_BASE_ADDR_HIGH,
1352 upper_32_bits(VA_CPU_ACCESSIBLE_MEM_ADDR));
1354 WREG32(mmCPU_PQ_LENGTH, HL_QUEUE_SIZE_IN_BYTES);
1355 WREG32(mmCPU_EQ_LENGTH, HL_EQ_SIZE_IN_BYTES);
1356 WREG32(mmCPU_CQ_LENGTH, HL_CPU_ACCESSIBLE_MEM_SIZE);
1358 /* Used for EQ CI */
1359 WREG32(mmCPU_EQ_CI, 0);
1361 WREG32(mmCPU_IF_PF_PQ_PI, 0);
1363 WREG32(mmCPU_PQ_INIT_STATUS, PQ_INIT_STATUS_READY_FOR_CP);
1365 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
1366 GOYA_ASYNC_EVENT_ID_PI_UPDATE);
1368 err = hl_poll_timeout(
1370 mmCPU_PQ_INIT_STATUS,
1372 (status == PQ_INIT_STATUS_READY_FOR_HOST),
1374 GOYA_CPU_TIMEOUT_USEC);
1378 "Failed to setup communication with device CPU\n");
1382 /* update FW application security bits */
1383 if (prop->fw_cpu_boot_dev_sts0_valid)
1384 prop->fw_app_cpu_boot_dev_sts0 = RREG32(mmCPU_BOOT_DEV_STS0);
1386 if (prop->fw_cpu_boot_dev_sts1_valid)
1387 prop->fw_app_cpu_boot_dev_sts1 = RREG32(mmCPU_BOOT_DEV_STS1);
1389 goya->hw_cap_initialized |= HW_CAP_CPU_Q;
1393 static void goya_set_pll_refclk(struct hl_device *hdev)
1395 WREG32(mmCPU_PLL_DIV_SEL_0, 0x0);
1396 WREG32(mmCPU_PLL_DIV_SEL_1, 0x0);
1397 WREG32(mmCPU_PLL_DIV_SEL_2, 0x0);
1398 WREG32(mmCPU_PLL_DIV_SEL_3, 0x0);
1400 WREG32(mmIC_PLL_DIV_SEL_0, 0x0);
1401 WREG32(mmIC_PLL_DIV_SEL_1, 0x0);
1402 WREG32(mmIC_PLL_DIV_SEL_2, 0x0);
1403 WREG32(mmIC_PLL_DIV_SEL_3, 0x0);
1405 WREG32(mmMC_PLL_DIV_SEL_0, 0x0);
1406 WREG32(mmMC_PLL_DIV_SEL_1, 0x0);
1407 WREG32(mmMC_PLL_DIV_SEL_2, 0x0);
1408 WREG32(mmMC_PLL_DIV_SEL_3, 0x0);
1410 WREG32(mmPSOC_MME_PLL_DIV_SEL_0, 0x0);
1411 WREG32(mmPSOC_MME_PLL_DIV_SEL_1, 0x0);
1412 WREG32(mmPSOC_MME_PLL_DIV_SEL_2, 0x0);
1413 WREG32(mmPSOC_MME_PLL_DIV_SEL_3, 0x0);
1415 WREG32(mmPSOC_PCI_PLL_DIV_SEL_0, 0x0);
1416 WREG32(mmPSOC_PCI_PLL_DIV_SEL_1, 0x0);
1417 WREG32(mmPSOC_PCI_PLL_DIV_SEL_2, 0x0);
1418 WREG32(mmPSOC_PCI_PLL_DIV_SEL_3, 0x0);
1420 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_0, 0x0);
1421 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_1, 0x0);
1422 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_2, 0x0);
1423 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_3, 0x0);
1425 WREG32(mmTPC_PLL_DIV_SEL_0, 0x0);
1426 WREG32(mmTPC_PLL_DIV_SEL_1, 0x0);
1427 WREG32(mmTPC_PLL_DIV_SEL_2, 0x0);
1428 WREG32(mmTPC_PLL_DIV_SEL_3, 0x0);
1431 static void goya_disable_clk_rlx(struct hl_device *hdev)
1433 WREG32(mmPSOC_MME_PLL_CLK_RLX_0, 0x100010);
1434 WREG32(mmIC_PLL_CLK_RLX_0, 0x100010);
1437 static void _goya_tpc_mbist_workaround(struct hl_device *hdev, u8 tpc_id)
1439 u64 tpc_eml_address;
1440 u32 val, tpc_offset, tpc_eml_offset, tpc_slm_offset;
1443 tpc_offset = tpc_id * 0x40000;
1444 tpc_eml_offset = tpc_id * 0x200000;
1445 tpc_eml_address = (mmTPC0_EML_CFG_BASE + tpc_eml_offset - CFG_BASE);
1446 tpc_slm_offset = tpc_eml_address + 0x100000;
1449 * Workaround for Bug H2 #2443 :
1450 * "TPC SB is not initialized on chip reset"
1453 val = RREG32(mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset);
1454 if (val & TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_ACTIVE_MASK)
1455 dev_warn(hdev->dev, "TPC%d MBIST ACTIVE is not cleared\n",
1458 WREG32(mmTPC0_CFG_FUNC_MBIST_PAT + tpc_offset, val & 0xFFFFF000);
1460 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_0 + tpc_offset, 0x37FF);
1461 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_1 + tpc_offset, 0x303F);
1462 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_2 + tpc_offset, 0x71FF);
1463 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_3 + tpc_offset, 0x71FF);
1464 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_4 + tpc_offset, 0x70FF);
1465 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_5 + tpc_offset, 0x70FF);
1466 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_6 + tpc_offset, 0x70FF);
1467 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_7 + tpc_offset, 0x70FF);
1468 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_8 + tpc_offset, 0x70FF);
1469 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_9 + tpc_offset, 0x70FF);
1471 WREG32_OR(mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset,
1472 1 << TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_START_SHIFT);
1474 err = hl_poll_timeout(
1476 mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset,
1478 (val & TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_DONE_MASK),
1480 HL_DEVICE_TIMEOUT_USEC);
1484 "Timeout while waiting for TPC%d MBIST DONE\n", tpc_id);
1486 WREG32_OR(mmTPC0_EML_CFG_DBG_CNT + tpc_eml_offset,
1487 1 << TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT);
1489 msleep(GOYA_RESET_WAIT_MSEC);
1491 WREG32_AND(mmTPC0_EML_CFG_DBG_CNT + tpc_eml_offset,
1492 ~(1 << TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT));
1494 msleep(GOYA_RESET_WAIT_MSEC);
1496 for (slm_index = 0 ; slm_index < 256 ; slm_index++)
1497 WREG32(tpc_slm_offset + (slm_index << 2), 0);
1499 val = RREG32(tpc_slm_offset);
1502 static void goya_tpc_mbist_workaround(struct hl_device *hdev)
1504 struct goya_device *goya = hdev->asic_specific;
1510 if (goya->hw_cap_initialized & HW_CAP_TPC_MBIST)
1513 /* Workaround for H2 #2443 */
1515 for (i = 0 ; i < TPC_MAX_NUM ; i++)
1516 _goya_tpc_mbist_workaround(hdev, i);
1518 goya->hw_cap_initialized |= HW_CAP_TPC_MBIST;
1522 * goya_init_golden_registers - Initialize golden registers
1524 * @hdev: pointer to hl_device structure
1526 * Initialize the H/W registers of the device
1529 static void goya_init_golden_registers(struct hl_device *hdev)
1531 struct goya_device *goya = hdev->asic_specific;
1532 u32 polynom[10], tpc_intr_mask, offset;
1535 if (goya->hw_cap_initialized & HW_CAP_GOLDEN)
1538 polynom[0] = 0x00020080;
1539 polynom[1] = 0x00401000;
1540 polynom[2] = 0x00200800;
1541 polynom[3] = 0x00002000;
1542 polynom[4] = 0x00080200;
1543 polynom[5] = 0x00040100;
1544 polynom[6] = 0x00100400;
1545 polynom[7] = 0x00004000;
1546 polynom[8] = 0x00010000;
1547 polynom[9] = 0x00008000;
1549 /* Mask all arithmetic interrupts from TPC */
1550 tpc_intr_mask = 0x7FFF;
1552 for (i = 0, offset = 0 ; i < 6 ; i++, offset += 0x20000) {
1553 WREG32(mmSRAM_Y0_X0_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1554 WREG32(mmSRAM_Y0_X1_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1555 WREG32(mmSRAM_Y0_X2_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1556 WREG32(mmSRAM_Y0_X3_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1557 WREG32(mmSRAM_Y0_X4_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1559 WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_L_ARB + offset, 0x204);
1560 WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_L_ARB + offset, 0x204);
1561 WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_L_ARB + offset, 0x204);
1562 WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_L_ARB + offset, 0x204);
1563 WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_L_ARB + offset, 0x204);
1566 WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_E_ARB + offset, 0x206);
1567 WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_E_ARB + offset, 0x206);
1568 WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_E_ARB + offset, 0x206);
1569 WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_E_ARB + offset, 0x207);
1570 WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_E_ARB + offset, 0x207);
1572 WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_W_ARB + offset, 0x207);
1573 WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_W_ARB + offset, 0x207);
1574 WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_W_ARB + offset, 0x206);
1575 WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_W_ARB + offset, 0x206);
1576 WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_W_ARB + offset, 0x206);
1578 WREG32(mmSRAM_Y0_X0_RTR_HBW_WR_RS_E_ARB + offset, 0x101);
1579 WREG32(mmSRAM_Y0_X1_RTR_HBW_WR_RS_E_ARB + offset, 0x102);
1580 WREG32(mmSRAM_Y0_X2_RTR_HBW_WR_RS_E_ARB + offset, 0x103);
1581 WREG32(mmSRAM_Y0_X3_RTR_HBW_WR_RS_E_ARB + offset, 0x104);
1582 WREG32(mmSRAM_Y0_X4_RTR_HBW_WR_RS_E_ARB + offset, 0x105);
1584 WREG32(mmSRAM_Y0_X0_RTR_HBW_WR_RS_W_ARB + offset, 0x105);
1585 WREG32(mmSRAM_Y0_X1_RTR_HBW_WR_RS_W_ARB + offset, 0x104);
1586 WREG32(mmSRAM_Y0_X2_RTR_HBW_WR_RS_W_ARB + offset, 0x103);
1587 WREG32(mmSRAM_Y0_X3_RTR_HBW_WR_RS_W_ARB + offset, 0x102);
1588 WREG32(mmSRAM_Y0_X4_RTR_HBW_WR_RS_W_ARB + offset, 0x101);
1591 WREG32(mmMME_STORE_MAX_CREDIT, 0x21);
1592 WREG32(mmMME_AGU, 0x0f0f0f10);
1593 WREG32(mmMME_SEI_MASK, ~0x0);
1595 WREG32(mmMME6_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1596 WREG32(mmMME5_RTR_HBW_RD_RQ_N_ARB, 0x01040101);
1597 WREG32(mmMME4_RTR_HBW_RD_RQ_N_ARB, 0x01030101);
1598 WREG32(mmMME3_RTR_HBW_RD_RQ_N_ARB, 0x01020101);
1599 WREG32(mmMME2_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1600 WREG32(mmMME1_RTR_HBW_RD_RQ_N_ARB, 0x07010701);
1601 WREG32(mmMME6_RTR_HBW_RD_RQ_S_ARB, 0x04010401);
1602 WREG32(mmMME5_RTR_HBW_RD_RQ_S_ARB, 0x04050401);
1603 WREG32(mmMME4_RTR_HBW_RD_RQ_S_ARB, 0x03070301);
1604 WREG32(mmMME3_RTR_HBW_RD_RQ_S_ARB, 0x01030101);
1605 WREG32(mmMME2_RTR_HBW_RD_RQ_S_ARB, 0x01040101);
1606 WREG32(mmMME1_RTR_HBW_RD_RQ_S_ARB, 0x01050105);
1607 WREG32(mmMME6_RTR_HBW_RD_RQ_W_ARB, 0x01010501);
1608 WREG32(mmMME5_RTR_HBW_RD_RQ_W_ARB, 0x01010501);
1609 WREG32(mmMME4_RTR_HBW_RD_RQ_W_ARB, 0x01040301);
1610 WREG32(mmMME3_RTR_HBW_RD_RQ_W_ARB, 0x01030401);
1611 WREG32(mmMME2_RTR_HBW_RD_RQ_W_ARB, 0x01040101);
1612 WREG32(mmMME1_RTR_HBW_RD_RQ_W_ARB, 0x01050101);
1613 WREG32(mmMME6_RTR_HBW_WR_RQ_N_ARB, 0x02020202);
1614 WREG32(mmMME5_RTR_HBW_WR_RQ_N_ARB, 0x01070101);
1615 WREG32(mmMME4_RTR_HBW_WR_RQ_N_ARB, 0x02020201);
1616 WREG32(mmMME3_RTR_HBW_WR_RQ_N_ARB, 0x07020701);
1617 WREG32(mmMME2_RTR_HBW_WR_RQ_N_ARB, 0x01020101);
1618 WREG32(mmMME1_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
1619 WREG32(mmMME6_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
1620 WREG32(mmMME5_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
1621 WREG32(mmMME4_RTR_HBW_WR_RQ_S_ARB, 0x07020701);
1622 WREG32(mmMME3_RTR_HBW_WR_RQ_S_ARB, 0x02020201);
1623 WREG32(mmMME2_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
1624 WREG32(mmMME1_RTR_HBW_WR_RQ_S_ARB, 0x01020102);
1625 WREG32(mmMME6_RTR_HBW_WR_RQ_W_ARB, 0x01020701);
1626 WREG32(mmMME5_RTR_HBW_WR_RQ_W_ARB, 0x01020701);
1627 WREG32(mmMME4_RTR_HBW_WR_RQ_W_ARB, 0x07020707);
1628 WREG32(mmMME3_RTR_HBW_WR_RQ_W_ARB, 0x01020201);
1629 WREG32(mmMME2_RTR_HBW_WR_RQ_W_ARB, 0x01070201);
1630 WREG32(mmMME1_RTR_HBW_WR_RQ_W_ARB, 0x01070201);
1631 WREG32(mmMME6_RTR_HBW_RD_RS_N_ARB, 0x01070102);
1632 WREG32(mmMME5_RTR_HBW_RD_RS_N_ARB, 0x01070102);
1633 WREG32(mmMME4_RTR_HBW_RD_RS_N_ARB, 0x01060102);
1634 WREG32(mmMME3_RTR_HBW_RD_RS_N_ARB, 0x01040102);
1635 WREG32(mmMME2_RTR_HBW_RD_RS_N_ARB, 0x01020102);
1636 WREG32(mmMME1_RTR_HBW_RD_RS_N_ARB, 0x01020107);
1637 WREG32(mmMME6_RTR_HBW_RD_RS_S_ARB, 0x01020106);
1638 WREG32(mmMME5_RTR_HBW_RD_RS_S_ARB, 0x01020102);
1639 WREG32(mmMME4_RTR_HBW_RD_RS_S_ARB, 0x01040102);
1640 WREG32(mmMME3_RTR_HBW_RD_RS_S_ARB, 0x01060102);
1641 WREG32(mmMME2_RTR_HBW_RD_RS_S_ARB, 0x01070102);
1642 WREG32(mmMME1_RTR_HBW_RD_RS_S_ARB, 0x01070102);
1643 WREG32(mmMME6_RTR_HBW_RD_RS_E_ARB, 0x01020702);
1644 WREG32(mmMME5_RTR_HBW_RD_RS_E_ARB, 0x01020702);
1645 WREG32(mmMME4_RTR_HBW_RD_RS_E_ARB, 0x01040602);
1646 WREG32(mmMME3_RTR_HBW_RD_RS_E_ARB, 0x01060402);
1647 WREG32(mmMME2_RTR_HBW_RD_RS_E_ARB, 0x01070202);
1648 WREG32(mmMME1_RTR_HBW_RD_RS_E_ARB, 0x01070102);
1649 WREG32(mmMME6_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1650 WREG32(mmMME5_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1651 WREG32(mmMME4_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1652 WREG32(mmMME3_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1653 WREG32(mmMME2_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1654 WREG32(mmMME1_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1655 WREG32(mmMME6_RTR_HBW_WR_RS_N_ARB, 0x01050101);
1656 WREG32(mmMME5_RTR_HBW_WR_RS_N_ARB, 0x01040101);
1657 WREG32(mmMME4_RTR_HBW_WR_RS_N_ARB, 0x01030101);
1658 WREG32(mmMME3_RTR_HBW_WR_RS_N_ARB, 0x01020101);
1659 WREG32(mmMME2_RTR_HBW_WR_RS_N_ARB, 0x01010101);
1660 WREG32(mmMME1_RTR_HBW_WR_RS_N_ARB, 0x01010107);
1661 WREG32(mmMME6_RTR_HBW_WR_RS_S_ARB, 0x01010107);
1662 WREG32(mmMME5_RTR_HBW_WR_RS_S_ARB, 0x01010101);
1663 WREG32(mmMME4_RTR_HBW_WR_RS_S_ARB, 0x01020101);
1664 WREG32(mmMME3_RTR_HBW_WR_RS_S_ARB, 0x01030101);
1665 WREG32(mmMME2_RTR_HBW_WR_RS_S_ARB, 0x01040101);
1666 WREG32(mmMME1_RTR_HBW_WR_RS_S_ARB, 0x01050101);
1667 WREG32(mmMME6_RTR_HBW_WR_RS_E_ARB, 0x01010501);
1668 WREG32(mmMME5_RTR_HBW_WR_RS_E_ARB, 0x01010501);
1669 WREG32(mmMME4_RTR_HBW_WR_RS_E_ARB, 0x01040301);
1670 WREG32(mmMME3_RTR_HBW_WR_RS_E_ARB, 0x01030401);
1671 WREG32(mmMME2_RTR_HBW_WR_RS_E_ARB, 0x01040101);
1672 WREG32(mmMME1_RTR_HBW_WR_RS_E_ARB, 0x01050101);
1673 WREG32(mmMME6_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1674 WREG32(mmMME5_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1675 WREG32(mmMME4_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1676 WREG32(mmMME3_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1677 WREG32(mmMME2_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1678 WREG32(mmMME1_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1680 WREG32(mmTPC1_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1681 WREG32(mmTPC1_RTR_HBW_RD_RQ_S_ARB, 0x01010101);
1682 WREG32(mmTPC1_RTR_HBW_RD_RQ_E_ARB, 0x01060101);
1683 WREG32(mmTPC1_RTR_HBW_WR_RQ_N_ARB, 0x02020102);
1684 WREG32(mmTPC1_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
1685 WREG32(mmTPC1_RTR_HBW_WR_RQ_E_ARB, 0x02070202);
1686 WREG32(mmTPC1_RTR_HBW_RD_RS_N_ARB, 0x01020201);
1687 WREG32(mmTPC1_RTR_HBW_RD_RS_S_ARB, 0x01070201);
1688 WREG32(mmTPC1_RTR_HBW_RD_RS_W_ARB, 0x01070202);
1689 WREG32(mmTPC1_RTR_HBW_WR_RS_N_ARB, 0x01010101);
1690 WREG32(mmTPC1_RTR_HBW_WR_RS_S_ARB, 0x01050101);
1691 WREG32(mmTPC1_RTR_HBW_WR_RS_W_ARB, 0x01050101);
1693 WREG32(mmTPC2_RTR_HBW_RD_RQ_N_ARB, 0x01020101);
1694 WREG32(mmTPC2_RTR_HBW_RD_RQ_S_ARB, 0x01050101);
1695 WREG32(mmTPC2_RTR_HBW_RD_RQ_E_ARB, 0x01010201);
1696 WREG32(mmTPC2_RTR_HBW_WR_RQ_N_ARB, 0x02040102);
1697 WREG32(mmTPC2_RTR_HBW_WR_RQ_S_ARB, 0x01050101);
1698 WREG32(mmTPC2_RTR_HBW_WR_RQ_E_ARB, 0x02060202);
1699 WREG32(mmTPC2_RTR_HBW_RD_RS_N_ARB, 0x01020201);
1700 WREG32(mmTPC2_RTR_HBW_RD_RS_S_ARB, 0x01070201);
1701 WREG32(mmTPC2_RTR_HBW_RD_RS_W_ARB, 0x01070202);
1702 WREG32(mmTPC2_RTR_HBW_WR_RS_N_ARB, 0x01010101);
1703 WREG32(mmTPC2_RTR_HBW_WR_RS_S_ARB, 0x01040101);
1704 WREG32(mmTPC2_RTR_HBW_WR_RS_W_ARB, 0x01040101);
1706 WREG32(mmTPC3_RTR_HBW_RD_RQ_N_ARB, 0x01030101);
1707 WREG32(mmTPC3_RTR_HBW_RD_RQ_S_ARB, 0x01040101);
1708 WREG32(mmTPC3_RTR_HBW_RD_RQ_E_ARB, 0x01040301);
1709 WREG32(mmTPC3_RTR_HBW_WR_RQ_N_ARB, 0x02060102);
1710 WREG32(mmTPC3_RTR_HBW_WR_RQ_S_ARB, 0x01040101);
1711 WREG32(mmTPC3_RTR_HBW_WR_RQ_E_ARB, 0x01040301);
1712 WREG32(mmTPC3_RTR_HBW_RD_RS_N_ARB, 0x01040201);
1713 WREG32(mmTPC3_RTR_HBW_RD_RS_S_ARB, 0x01060201);
1714 WREG32(mmTPC3_RTR_HBW_RD_RS_W_ARB, 0x01060402);
1715 WREG32(mmTPC3_RTR_HBW_WR_RS_N_ARB, 0x01020101);
1716 WREG32(mmTPC3_RTR_HBW_WR_RS_S_ARB, 0x01030101);
1717 WREG32(mmTPC3_RTR_HBW_WR_RS_W_ARB, 0x01030401);
1719 WREG32(mmTPC4_RTR_HBW_RD_RQ_N_ARB, 0x01040101);
1720 WREG32(mmTPC4_RTR_HBW_RD_RQ_S_ARB, 0x01030101);
1721 WREG32(mmTPC4_RTR_HBW_RD_RQ_E_ARB, 0x01030401);
1722 WREG32(mmTPC4_RTR_HBW_WR_RQ_N_ARB, 0x02070102);
1723 WREG32(mmTPC4_RTR_HBW_WR_RQ_S_ARB, 0x01030101);
1724 WREG32(mmTPC4_RTR_HBW_WR_RQ_E_ARB, 0x02060702);
1725 WREG32(mmTPC4_RTR_HBW_RD_RS_N_ARB, 0x01060201);
1726 WREG32(mmTPC4_RTR_HBW_RD_RS_S_ARB, 0x01040201);
1727 WREG32(mmTPC4_RTR_HBW_RD_RS_W_ARB, 0x01040602);
1728 WREG32(mmTPC4_RTR_HBW_WR_RS_N_ARB, 0x01030101);
1729 WREG32(mmTPC4_RTR_HBW_WR_RS_S_ARB, 0x01020101);
1730 WREG32(mmTPC4_RTR_HBW_WR_RS_W_ARB, 0x01040301);
1732 WREG32(mmTPC5_RTR_HBW_RD_RQ_N_ARB, 0x01050101);
1733 WREG32(mmTPC5_RTR_HBW_RD_RQ_S_ARB, 0x01020101);
1734 WREG32(mmTPC5_RTR_HBW_RD_RQ_E_ARB, 0x01200501);
1735 WREG32(mmTPC5_RTR_HBW_WR_RQ_N_ARB, 0x02070102);
1736 WREG32(mmTPC5_RTR_HBW_WR_RQ_S_ARB, 0x01020101);
1737 WREG32(mmTPC5_RTR_HBW_WR_RQ_E_ARB, 0x02020602);
1738 WREG32(mmTPC5_RTR_HBW_RD_RS_N_ARB, 0x01070201);
1739 WREG32(mmTPC5_RTR_HBW_RD_RS_S_ARB, 0x01020201);
1740 WREG32(mmTPC5_RTR_HBW_RD_RS_W_ARB, 0x01020702);
1741 WREG32(mmTPC5_RTR_HBW_WR_RS_N_ARB, 0x01040101);
1742 WREG32(mmTPC5_RTR_HBW_WR_RS_S_ARB, 0x01010101);
1743 WREG32(mmTPC5_RTR_HBW_WR_RS_W_ARB, 0x01010501);
1745 WREG32(mmTPC6_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1746 WREG32(mmTPC6_RTR_HBW_RD_RQ_S_ARB, 0x01010101);
1747 WREG32(mmTPC6_RTR_HBW_RD_RQ_E_ARB, 0x01010601);
1748 WREG32(mmTPC6_RTR_HBW_WR_RQ_N_ARB, 0x01010101);
1749 WREG32(mmTPC6_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
1750 WREG32(mmTPC6_RTR_HBW_WR_RQ_E_ARB, 0x02020702);
1751 WREG32(mmTPC6_RTR_HBW_RD_RS_N_ARB, 0x01010101);
1752 WREG32(mmTPC6_RTR_HBW_RD_RS_S_ARB, 0x01010101);
1753 WREG32(mmTPC6_RTR_HBW_RD_RS_W_ARB, 0x01020702);
1754 WREG32(mmTPC6_RTR_HBW_WR_RS_N_ARB, 0x01050101);
1755 WREG32(mmTPC6_RTR_HBW_WR_RS_S_ARB, 0x01010101);
1756 WREG32(mmTPC6_RTR_HBW_WR_RS_W_ARB, 0x01010501);
1758 for (i = 0, offset = 0 ; i < 10 ; i++, offset += 4) {
1759 WREG32(mmMME1_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1760 WREG32(mmMME2_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1761 WREG32(mmMME3_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1762 WREG32(mmMME4_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1763 WREG32(mmMME5_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1764 WREG32(mmMME6_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1766 WREG32(mmTPC0_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1767 WREG32(mmTPC1_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1768 WREG32(mmTPC2_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1769 WREG32(mmTPC3_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1770 WREG32(mmTPC4_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1771 WREG32(mmTPC5_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1772 WREG32(mmTPC6_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1773 WREG32(mmTPC7_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1775 WREG32(mmPCI_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1776 WREG32(mmDMA_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1779 for (i = 0, offset = 0 ; i < 6 ; i++, offset += 0x40000) {
1780 WREG32(mmMME1_RTR_SCRAMB_EN + offset,
1781 1 << MME1_RTR_SCRAMB_EN_VAL_SHIFT);
1782 WREG32(mmMME1_RTR_NON_LIN_SCRAMB + offset,
1783 1 << MME1_RTR_NON_LIN_SCRAMB_EN_SHIFT);
1786 for (i = 0, offset = 0 ; i < 8 ; i++, offset += 0x40000) {
1788 * Workaround for Bug H2 #2441 :
1789 * "ST.NOP set trace event illegal opcode"
1791 WREG32(mmTPC0_CFG_TPC_INTR_MASK + offset, tpc_intr_mask);
1793 WREG32(mmTPC0_NRTR_SCRAMB_EN + offset,
1794 1 << TPC0_NRTR_SCRAMB_EN_VAL_SHIFT);
1795 WREG32(mmTPC0_NRTR_NON_LIN_SCRAMB + offset,
1796 1 << TPC0_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
1798 WREG32_FIELD(TPC0_CFG_MSS_CONFIG, offset,
1799 ICACHE_FETCH_LINE_NUM, 2);
1802 WREG32(mmDMA_NRTR_SCRAMB_EN, 1 << DMA_NRTR_SCRAMB_EN_VAL_SHIFT);
1803 WREG32(mmDMA_NRTR_NON_LIN_SCRAMB,
1804 1 << DMA_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
1806 WREG32(mmPCI_NRTR_SCRAMB_EN, 1 << PCI_NRTR_SCRAMB_EN_VAL_SHIFT);
1807 WREG32(mmPCI_NRTR_NON_LIN_SCRAMB,
1808 1 << PCI_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
1811 * Workaround for H2 #HW-23 bug
1812 * Set DMA max outstanding read requests to 240 on DMA CH 1.
1813 * This limitation is still large enough to not affect Gen4 bandwidth.
1814 * We need to only limit that DMA channel because the user can only read
1815 * from Host using DMA CH 1
1817 WREG32(mmDMA_CH_1_CFG0, 0x0fff00F0);
1819 WREG32(mmTPC_PLL_CLK_RLX_0, 0x200020);
1821 goya->hw_cap_initialized |= HW_CAP_GOLDEN;
1824 static void goya_init_mme_qman(struct hl_device *hdev)
1826 u32 mtr_base_lo, mtr_base_hi;
1827 u32 so_base_lo, so_base_hi;
1828 u32 gic_base_lo, gic_base_hi;
1831 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1832 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1833 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1834 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1837 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1839 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1841 qman_base_addr = hdev->asic_prop.sram_base_address +
1842 MME_QMAN_BASE_OFFSET;
1844 WREG32(mmMME_QM_PQ_BASE_LO, lower_32_bits(qman_base_addr));
1845 WREG32(mmMME_QM_PQ_BASE_HI, upper_32_bits(qman_base_addr));
1846 WREG32(mmMME_QM_PQ_SIZE, ilog2(MME_QMAN_LENGTH));
1847 WREG32(mmMME_QM_PQ_PI, 0);
1848 WREG32(mmMME_QM_PQ_CI, 0);
1849 WREG32(mmMME_QM_CP_LDMA_SRC_BASE_LO_OFFSET, 0x10C0);
1850 WREG32(mmMME_QM_CP_LDMA_SRC_BASE_HI_OFFSET, 0x10C4);
1851 WREG32(mmMME_QM_CP_LDMA_TSIZE_OFFSET, 0x10C8);
1852 WREG32(mmMME_QM_CP_LDMA_COMMIT_OFFSET, 0x10CC);
1854 WREG32(mmMME_QM_CP_MSG_BASE0_ADDR_LO, mtr_base_lo);
1855 WREG32(mmMME_QM_CP_MSG_BASE0_ADDR_HI, mtr_base_hi);
1856 WREG32(mmMME_QM_CP_MSG_BASE1_ADDR_LO, so_base_lo);
1857 WREG32(mmMME_QM_CP_MSG_BASE1_ADDR_HI, so_base_hi);
1859 /* QMAN CQ has 8 cache lines */
1860 WREG32(mmMME_QM_CQ_CFG1, 0x00080008);
1862 WREG32(mmMME_QM_GLBL_ERR_ADDR_LO, gic_base_lo);
1863 WREG32(mmMME_QM_GLBL_ERR_ADDR_HI, gic_base_hi);
1865 WREG32(mmMME_QM_GLBL_ERR_WDATA, GOYA_ASYNC_EVENT_ID_MME_QM);
1867 WREG32(mmMME_QM_GLBL_ERR_CFG, QMAN_MME_ERR_MSG_EN);
1869 WREG32(mmMME_QM_GLBL_PROT, QMAN_MME_ERR_PROT);
1871 WREG32(mmMME_QM_GLBL_CFG0, QMAN_MME_ENABLE);
1874 static void goya_init_mme_cmdq(struct hl_device *hdev)
1876 u32 mtr_base_lo, mtr_base_hi;
1877 u32 so_base_lo, so_base_hi;
1878 u32 gic_base_lo, gic_base_hi;
1880 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1881 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1882 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1883 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1886 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1888 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1890 WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_LO, mtr_base_lo);
1891 WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_HI, mtr_base_hi);
1892 WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_LO, so_base_lo);
1893 WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_HI, so_base_hi);
1895 /* CMDQ CQ has 20 cache lines */
1896 WREG32(mmMME_CMDQ_CQ_CFG1, 0x00140014);
1898 WREG32(mmMME_CMDQ_GLBL_ERR_ADDR_LO, gic_base_lo);
1899 WREG32(mmMME_CMDQ_GLBL_ERR_ADDR_HI, gic_base_hi);
1901 WREG32(mmMME_CMDQ_GLBL_ERR_WDATA, GOYA_ASYNC_EVENT_ID_MME_CMDQ);
1903 WREG32(mmMME_CMDQ_GLBL_ERR_CFG, CMDQ_MME_ERR_MSG_EN);
1905 WREG32(mmMME_CMDQ_GLBL_PROT, CMDQ_MME_ERR_PROT);
1907 WREG32(mmMME_CMDQ_GLBL_CFG0, CMDQ_MME_ENABLE);
1910 void goya_init_mme_qmans(struct hl_device *hdev)
1912 struct goya_device *goya = hdev->asic_specific;
1913 u32 so_base_lo, so_base_hi;
1915 if (goya->hw_cap_initialized & HW_CAP_MME)
1918 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1919 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1921 WREG32(mmMME_SM_BASE_ADDRESS_LOW, so_base_lo);
1922 WREG32(mmMME_SM_BASE_ADDRESS_HIGH, so_base_hi);
1924 goya_init_mme_qman(hdev);
1925 goya_init_mme_cmdq(hdev);
1927 goya->hw_cap_initialized |= HW_CAP_MME;
1930 static void goya_init_tpc_qman(struct hl_device *hdev, u32 base_off, int tpc_id)
1932 u32 mtr_base_lo, mtr_base_hi;
1933 u32 so_base_lo, so_base_hi;
1934 u32 gic_base_lo, gic_base_hi;
1936 u32 reg_off = tpc_id * (mmTPC1_QM_PQ_PI - mmTPC0_QM_PQ_PI);
1938 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1939 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1940 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1941 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1944 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1946 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1948 qman_base_addr = hdev->asic_prop.sram_base_address + base_off;
1950 WREG32(mmTPC0_QM_PQ_BASE_LO + reg_off, lower_32_bits(qman_base_addr));
1951 WREG32(mmTPC0_QM_PQ_BASE_HI + reg_off, upper_32_bits(qman_base_addr));
1952 WREG32(mmTPC0_QM_PQ_SIZE + reg_off, ilog2(TPC_QMAN_LENGTH));
1953 WREG32(mmTPC0_QM_PQ_PI + reg_off, 0);
1954 WREG32(mmTPC0_QM_PQ_CI + reg_off, 0);
1955 WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET + reg_off, 0x10C0);
1956 WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET + reg_off, 0x10C4);
1957 WREG32(mmTPC0_QM_CP_LDMA_TSIZE_OFFSET + reg_off, 0x10C8);
1958 WREG32(mmTPC0_QM_CP_LDMA_COMMIT_OFFSET + reg_off, 0x10CC);
1960 WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
1961 WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
1962 WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
1963 WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
1965 WREG32(mmTPC0_QM_CQ_CFG1 + reg_off, 0x00080008);
1967 WREG32(mmTPC0_QM_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
1968 WREG32(mmTPC0_QM_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
1970 WREG32(mmTPC0_QM_GLBL_ERR_WDATA + reg_off,
1971 GOYA_ASYNC_EVENT_ID_TPC0_QM + tpc_id);
1973 WREG32(mmTPC0_QM_GLBL_ERR_CFG + reg_off, QMAN_TPC_ERR_MSG_EN);
1975 WREG32(mmTPC0_QM_GLBL_PROT + reg_off, QMAN_TPC_ERR_PROT);
1977 WREG32(mmTPC0_QM_GLBL_CFG0 + reg_off, QMAN_TPC_ENABLE);
1980 static void goya_init_tpc_cmdq(struct hl_device *hdev, int tpc_id)
1982 u32 mtr_base_lo, mtr_base_hi;
1983 u32 so_base_lo, so_base_hi;
1984 u32 gic_base_lo, gic_base_hi;
1985 u32 reg_off = tpc_id * (mmTPC1_CMDQ_CQ_CFG1 - mmTPC0_CMDQ_CQ_CFG1);
1987 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1988 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1989 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1990 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1993 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1995 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1997 WREG32(mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
1998 WREG32(mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
1999 WREG32(mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
2000 WREG32(mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
2002 WREG32(mmTPC0_CMDQ_CQ_CFG1 + reg_off, 0x00140014);
2004 WREG32(mmTPC0_CMDQ_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
2005 WREG32(mmTPC0_CMDQ_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
2007 WREG32(mmTPC0_CMDQ_GLBL_ERR_WDATA + reg_off,
2008 GOYA_ASYNC_EVENT_ID_TPC0_CMDQ + tpc_id);
2010 WREG32(mmTPC0_CMDQ_GLBL_ERR_CFG + reg_off, CMDQ_TPC_ERR_MSG_EN);
2012 WREG32(mmTPC0_CMDQ_GLBL_PROT + reg_off, CMDQ_TPC_ERR_PROT);
2014 WREG32(mmTPC0_CMDQ_GLBL_CFG0 + reg_off, CMDQ_TPC_ENABLE);
2017 void goya_init_tpc_qmans(struct hl_device *hdev)
2019 struct goya_device *goya = hdev->asic_specific;
2020 u32 so_base_lo, so_base_hi;
2021 u32 cfg_off = mmTPC1_CFG_SM_BASE_ADDRESS_LOW -
2022 mmTPC0_CFG_SM_BASE_ADDRESS_LOW;
2025 if (goya->hw_cap_initialized & HW_CAP_TPC)
2028 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
2029 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
2031 for (i = 0 ; i < TPC_MAX_NUM ; i++) {
2032 WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_LOW + i * cfg_off,
2034 WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_HIGH + i * cfg_off,
2038 goya_init_tpc_qman(hdev, TPC0_QMAN_BASE_OFFSET, 0);
2039 goya_init_tpc_qman(hdev, TPC1_QMAN_BASE_OFFSET, 1);
2040 goya_init_tpc_qman(hdev, TPC2_QMAN_BASE_OFFSET, 2);
2041 goya_init_tpc_qman(hdev, TPC3_QMAN_BASE_OFFSET, 3);
2042 goya_init_tpc_qman(hdev, TPC4_QMAN_BASE_OFFSET, 4);
2043 goya_init_tpc_qman(hdev, TPC5_QMAN_BASE_OFFSET, 5);
2044 goya_init_tpc_qman(hdev, TPC6_QMAN_BASE_OFFSET, 6);
2045 goya_init_tpc_qman(hdev, TPC7_QMAN_BASE_OFFSET, 7);
2047 for (i = 0 ; i < TPC_MAX_NUM ; i++)
2048 goya_init_tpc_cmdq(hdev, i);
2050 goya->hw_cap_initialized |= HW_CAP_TPC;
2054 * goya_disable_internal_queues - Disable internal queues
2056 * @hdev: pointer to hl_device structure
2059 static void goya_disable_internal_queues(struct hl_device *hdev)
2061 struct goya_device *goya = hdev->asic_specific;
2063 if (!(goya->hw_cap_initialized & HW_CAP_MME))
2066 WREG32(mmMME_QM_GLBL_CFG0, 0);
2067 WREG32(mmMME_CMDQ_GLBL_CFG0, 0);
2070 if (!(goya->hw_cap_initialized & HW_CAP_TPC))
2073 WREG32(mmTPC0_QM_GLBL_CFG0, 0);
2074 WREG32(mmTPC0_CMDQ_GLBL_CFG0, 0);
2076 WREG32(mmTPC1_QM_GLBL_CFG0, 0);
2077 WREG32(mmTPC1_CMDQ_GLBL_CFG0, 0);
2079 WREG32(mmTPC2_QM_GLBL_CFG0, 0);
2080 WREG32(mmTPC2_CMDQ_GLBL_CFG0, 0);
2082 WREG32(mmTPC3_QM_GLBL_CFG0, 0);
2083 WREG32(mmTPC3_CMDQ_GLBL_CFG0, 0);
2085 WREG32(mmTPC4_QM_GLBL_CFG0, 0);
2086 WREG32(mmTPC4_CMDQ_GLBL_CFG0, 0);
2088 WREG32(mmTPC5_QM_GLBL_CFG0, 0);
2089 WREG32(mmTPC5_CMDQ_GLBL_CFG0, 0);
2091 WREG32(mmTPC6_QM_GLBL_CFG0, 0);
2092 WREG32(mmTPC6_CMDQ_GLBL_CFG0, 0);
2094 WREG32(mmTPC7_QM_GLBL_CFG0, 0);
2095 WREG32(mmTPC7_CMDQ_GLBL_CFG0, 0);
2099 * goya_stop_internal_queues - Stop internal queues
2101 * @hdev: pointer to hl_device structure
2103 * Returns 0 on success
2106 static int goya_stop_internal_queues(struct hl_device *hdev)
2108 struct goya_device *goya = hdev->asic_specific;
2111 if (!(goya->hw_cap_initialized & HW_CAP_MME))
2115 * Each queue (QMAN) is a separate H/W logic. That means that each
2116 * QMAN can be stopped independently and failure to stop one does NOT
2117 * mandate we should not try to stop other QMANs
2120 rc = goya_stop_queue(hdev,
2123 mmMME_QM_GLBL_STS0);
2126 dev_err(hdev->dev, "failed to stop MME QMAN\n");
2130 rc = goya_stop_queue(hdev,
2131 mmMME_CMDQ_GLBL_CFG1,
2133 mmMME_CMDQ_GLBL_STS0);
2136 dev_err(hdev->dev, "failed to stop MME CMDQ\n");
2141 if (!(goya->hw_cap_initialized & HW_CAP_TPC))
2144 rc = goya_stop_queue(hdev,
2145 mmTPC0_QM_GLBL_CFG1,
2147 mmTPC0_QM_GLBL_STS0);
2150 dev_err(hdev->dev, "failed to stop TPC 0 QMAN\n");
2154 rc = goya_stop_queue(hdev,
2155 mmTPC0_CMDQ_GLBL_CFG1,
2157 mmTPC0_CMDQ_GLBL_STS0);
2160 dev_err(hdev->dev, "failed to stop TPC 0 CMDQ\n");
2164 rc = goya_stop_queue(hdev,
2165 mmTPC1_QM_GLBL_CFG1,
2167 mmTPC1_QM_GLBL_STS0);
2170 dev_err(hdev->dev, "failed to stop TPC 1 QMAN\n");
2174 rc = goya_stop_queue(hdev,
2175 mmTPC1_CMDQ_GLBL_CFG1,
2177 mmTPC1_CMDQ_GLBL_STS0);
2180 dev_err(hdev->dev, "failed to stop TPC 1 CMDQ\n");
2184 rc = goya_stop_queue(hdev,
2185 mmTPC2_QM_GLBL_CFG1,
2187 mmTPC2_QM_GLBL_STS0);
2190 dev_err(hdev->dev, "failed to stop TPC 2 QMAN\n");
2194 rc = goya_stop_queue(hdev,
2195 mmTPC2_CMDQ_GLBL_CFG1,
2197 mmTPC2_CMDQ_GLBL_STS0);
2200 dev_err(hdev->dev, "failed to stop TPC 2 CMDQ\n");
2204 rc = goya_stop_queue(hdev,
2205 mmTPC3_QM_GLBL_CFG1,
2207 mmTPC3_QM_GLBL_STS0);
2210 dev_err(hdev->dev, "failed to stop TPC 3 QMAN\n");
2214 rc = goya_stop_queue(hdev,
2215 mmTPC3_CMDQ_GLBL_CFG1,
2217 mmTPC3_CMDQ_GLBL_STS0);
2220 dev_err(hdev->dev, "failed to stop TPC 3 CMDQ\n");
2224 rc = goya_stop_queue(hdev,
2225 mmTPC4_QM_GLBL_CFG1,
2227 mmTPC4_QM_GLBL_STS0);
2230 dev_err(hdev->dev, "failed to stop TPC 4 QMAN\n");
2234 rc = goya_stop_queue(hdev,
2235 mmTPC4_CMDQ_GLBL_CFG1,
2237 mmTPC4_CMDQ_GLBL_STS0);
2240 dev_err(hdev->dev, "failed to stop TPC 4 CMDQ\n");
2244 rc = goya_stop_queue(hdev,
2245 mmTPC5_QM_GLBL_CFG1,
2247 mmTPC5_QM_GLBL_STS0);
2250 dev_err(hdev->dev, "failed to stop TPC 5 QMAN\n");
2254 rc = goya_stop_queue(hdev,
2255 mmTPC5_CMDQ_GLBL_CFG1,
2257 mmTPC5_CMDQ_GLBL_STS0);
2260 dev_err(hdev->dev, "failed to stop TPC 5 CMDQ\n");
2264 rc = goya_stop_queue(hdev,
2265 mmTPC6_QM_GLBL_CFG1,
2267 mmTPC6_QM_GLBL_STS0);
2270 dev_err(hdev->dev, "failed to stop TPC 6 QMAN\n");
2274 rc = goya_stop_queue(hdev,
2275 mmTPC6_CMDQ_GLBL_CFG1,
2277 mmTPC6_CMDQ_GLBL_STS0);
2280 dev_err(hdev->dev, "failed to stop TPC 6 CMDQ\n");
2284 rc = goya_stop_queue(hdev,
2285 mmTPC7_QM_GLBL_CFG1,
2287 mmTPC7_QM_GLBL_STS0);
2290 dev_err(hdev->dev, "failed to stop TPC 7 QMAN\n");
2294 rc = goya_stop_queue(hdev,
2295 mmTPC7_CMDQ_GLBL_CFG1,
2297 mmTPC7_CMDQ_GLBL_STS0);
2300 dev_err(hdev->dev, "failed to stop TPC 7 CMDQ\n");
2307 static void goya_dma_stall(struct hl_device *hdev)
2309 struct goya_device *goya = hdev->asic_specific;
2311 if (!(goya->hw_cap_initialized & HW_CAP_DMA))
2314 WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT);
2315 WREG32(mmDMA_QM_1_GLBL_CFG1, 1 << DMA_QM_1_GLBL_CFG1_DMA_STOP_SHIFT);
2316 WREG32(mmDMA_QM_2_GLBL_CFG1, 1 << DMA_QM_2_GLBL_CFG1_DMA_STOP_SHIFT);
2317 WREG32(mmDMA_QM_3_GLBL_CFG1, 1 << DMA_QM_3_GLBL_CFG1_DMA_STOP_SHIFT);
2318 WREG32(mmDMA_QM_4_GLBL_CFG1, 1 << DMA_QM_4_GLBL_CFG1_DMA_STOP_SHIFT);
2321 static void goya_tpc_stall(struct hl_device *hdev)
2323 struct goya_device *goya = hdev->asic_specific;
2325 if (!(goya->hw_cap_initialized & HW_CAP_TPC))
2328 WREG32(mmTPC0_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
2329 WREG32(mmTPC1_CFG_TPC_STALL, 1 << TPC1_CFG_TPC_STALL_V_SHIFT);
2330 WREG32(mmTPC2_CFG_TPC_STALL, 1 << TPC2_CFG_TPC_STALL_V_SHIFT);
2331 WREG32(mmTPC3_CFG_TPC_STALL, 1 << TPC3_CFG_TPC_STALL_V_SHIFT);
2332 WREG32(mmTPC4_CFG_TPC_STALL, 1 << TPC4_CFG_TPC_STALL_V_SHIFT);
2333 WREG32(mmTPC5_CFG_TPC_STALL, 1 << TPC5_CFG_TPC_STALL_V_SHIFT);
2334 WREG32(mmTPC6_CFG_TPC_STALL, 1 << TPC6_CFG_TPC_STALL_V_SHIFT);
2335 WREG32(mmTPC7_CFG_TPC_STALL, 1 << TPC7_CFG_TPC_STALL_V_SHIFT);
2338 static void goya_mme_stall(struct hl_device *hdev)
2340 struct goya_device *goya = hdev->asic_specific;
2342 if (!(goya->hw_cap_initialized & HW_CAP_MME))
2345 WREG32(mmMME_STALL, 0xFFFFFFFF);
2348 static int goya_enable_msix(struct hl_device *hdev)
2350 struct goya_device *goya = hdev->asic_specific;
2351 int cq_cnt = hdev->asic_prop.completion_queues_count;
2352 int rc, i, irq_cnt_init, irq;
2354 if (goya->hw_cap_initialized & HW_CAP_MSIX)
2357 rc = pci_alloc_irq_vectors(hdev->pdev, GOYA_MSIX_ENTRIES,
2358 GOYA_MSIX_ENTRIES, PCI_IRQ_MSIX);
2361 "MSI-X: Failed to enable support -- %d/%d\n",
2362 GOYA_MSIX_ENTRIES, rc);
2366 for (i = 0, irq_cnt_init = 0 ; i < cq_cnt ; i++, irq_cnt_init++) {
2367 irq = pci_irq_vector(hdev->pdev, i);
2368 rc = request_irq(irq, hl_irq_handler_cq, 0, goya_irq_name[i],
2369 &hdev->completion_queue[i]);
2371 dev_err(hdev->dev, "Failed to request IRQ %d", irq);
2376 irq = pci_irq_vector(hdev->pdev, GOYA_EVENT_QUEUE_MSIX_IDX);
2378 rc = request_irq(irq, hl_irq_handler_eq, 0,
2379 goya_irq_name[GOYA_EVENT_QUEUE_MSIX_IDX],
2380 &hdev->event_queue);
2382 dev_err(hdev->dev, "Failed to request IRQ %d", irq);
2386 goya->hw_cap_initialized |= HW_CAP_MSIX;
2390 for (i = 0 ; i < irq_cnt_init ; i++)
2391 free_irq(pci_irq_vector(hdev->pdev, i),
2392 &hdev->completion_queue[i]);
2394 pci_free_irq_vectors(hdev->pdev);
2398 static void goya_sync_irqs(struct hl_device *hdev)
2400 struct goya_device *goya = hdev->asic_specific;
2403 if (!(goya->hw_cap_initialized & HW_CAP_MSIX))
2406 /* Wait for all pending IRQs to be finished */
2407 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
2408 synchronize_irq(pci_irq_vector(hdev->pdev, i));
2410 synchronize_irq(pci_irq_vector(hdev->pdev, GOYA_EVENT_QUEUE_MSIX_IDX));
2413 static void goya_disable_msix(struct hl_device *hdev)
2415 struct goya_device *goya = hdev->asic_specific;
2418 if (!(goya->hw_cap_initialized & HW_CAP_MSIX))
2421 goya_sync_irqs(hdev);
2423 irq = pci_irq_vector(hdev->pdev, GOYA_EVENT_QUEUE_MSIX_IDX);
2424 free_irq(irq, &hdev->event_queue);
2426 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
2427 irq = pci_irq_vector(hdev->pdev, i);
2428 free_irq(irq, &hdev->completion_queue[i]);
2431 pci_free_irq_vectors(hdev->pdev);
2433 goya->hw_cap_initialized &= ~HW_CAP_MSIX;
2436 static void goya_enable_timestamp(struct hl_device *hdev)
2438 /* Disable the timestamp counter */
2439 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
2441 /* Zero the lower/upper parts of the 64-bit counter */
2442 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0xC, 0);
2443 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0x8, 0);
2445 /* Enable the counter */
2446 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 1);
2449 static void goya_disable_timestamp(struct hl_device *hdev)
2451 /* Disable the timestamp counter */
2452 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
2455 static void goya_halt_engines(struct hl_device *hdev, bool hard_reset, bool fw_reset)
2457 u32 wait_timeout_ms;
2460 wait_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
2462 wait_timeout_ms = GOYA_RESET_WAIT_MSEC;
2464 goya_stop_external_queues(hdev);
2465 goya_stop_internal_queues(hdev);
2467 msleep(wait_timeout_ms);
2469 goya_dma_stall(hdev);
2470 goya_tpc_stall(hdev);
2471 goya_mme_stall(hdev);
2473 msleep(wait_timeout_ms);
2475 goya_disable_external_queues(hdev);
2476 goya_disable_internal_queues(hdev);
2478 goya_disable_timestamp(hdev);
2481 goya_disable_msix(hdev);
2482 goya_mmu_remove_device_cpu_mappings(hdev);
2484 goya_sync_irqs(hdev);
2489 * goya_load_firmware_to_device() - Load LINUX FW code to device.
2490 * @hdev: Pointer to hl_device structure.
2492 * Copy LINUX fw code from firmware file to HBM BAR.
2494 * Return: 0 on success, non-zero for failure.
2496 static int goya_load_firmware_to_device(struct hl_device *hdev)
2500 dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET;
2502 return hl_fw_load_fw_to_device(hdev, GOYA_LINUX_FW_FILE, dst, 0, 0);
2506 * goya_load_boot_fit_to_device() - Load boot fit to device.
2507 * @hdev: Pointer to hl_device structure.
2509 * Copy boot fit file to SRAM BAR.
2511 * Return: 0 on success, non-zero for failure.
2513 static int goya_load_boot_fit_to_device(struct hl_device *hdev)
2517 dst = hdev->pcie_bar[SRAM_CFG_BAR_ID] + BOOT_FIT_SRAM_OFFSET;
2519 return hl_fw_load_fw_to_device(hdev, GOYA_BOOT_FIT_FILE, dst, 0, 0);
2522 static void goya_init_dynamic_firmware_loader(struct hl_device *hdev)
2524 struct dynamic_fw_load_mgr *dynamic_loader;
2525 struct cpu_dyn_regs *dyn_regs;
2527 dynamic_loader = &hdev->fw_loader.dynamic_loader;
2530 * here we update initial values for few specific dynamic regs (as
2531 * before reading the first descriptor from FW those value has to be
2532 * hard-coded) in later stages of the protocol those values will be
2533 * updated automatically by reading the FW descriptor so data there
2534 * will always be up-to-date
2536 dyn_regs = &dynamic_loader->comm_desc.cpu_dyn_regs;
2537 dyn_regs->kmd_msg_to_cpu =
2538 cpu_to_le32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU);
2539 dyn_regs->cpu_cmd_status_to_host =
2540 cpu_to_le32(mmCPU_CMD_STATUS_TO_HOST);
2542 dynamic_loader->wait_for_bl_timeout = GOYA_WAIT_FOR_BL_TIMEOUT_USEC;
2545 static void goya_init_static_firmware_loader(struct hl_device *hdev)
2547 struct static_fw_load_mgr *static_loader;
2549 static_loader = &hdev->fw_loader.static_loader;
2551 static_loader->preboot_version_max_off = SRAM_SIZE - VERSION_MAX_LEN;
2552 static_loader->boot_fit_version_max_off = SRAM_SIZE - VERSION_MAX_LEN;
2553 static_loader->kmd_msg_to_cpu_reg = mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU;
2554 static_loader->cpu_cmd_status_to_host_reg = mmCPU_CMD_STATUS_TO_HOST;
2555 static_loader->cpu_boot_status_reg = mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS;
2556 static_loader->cpu_boot_dev_status0_reg = mmCPU_BOOT_DEV_STS0;
2557 static_loader->cpu_boot_dev_status1_reg = mmCPU_BOOT_DEV_STS1;
2558 static_loader->boot_err0_reg = mmCPU_BOOT_ERR0;
2559 static_loader->boot_err1_reg = mmCPU_BOOT_ERR1;
2560 static_loader->preboot_version_offset_reg = mmPREBOOT_VER_OFFSET;
2561 static_loader->boot_fit_version_offset_reg = mmUBOOT_VER_OFFSET;
2562 static_loader->sram_offset_mask = ~(lower_32_bits(SRAM_BASE_ADDR));
2565 static void goya_init_firmware_preload_params(struct hl_device *hdev)
2567 struct pre_fw_load_props *pre_fw_load = &hdev->fw_loader.pre_fw_load;
2569 pre_fw_load->cpu_boot_status_reg = mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS;
2570 pre_fw_load->sts_boot_dev_sts0_reg = mmCPU_BOOT_DEV_STS0;
2571 pre_fw_load->sts_boot_dev_sts1_reg = mmCPU_BOOT_DEV_STS1;
2572 pre_fw_load->boot_err0_reg = mmCPU_BOOT_ERR0;
2573 pre_fw_load->boot_err1_reg = mmCPU_BOOT_ERR1;
2574 pre_fw_load->wait_for_preboot_timeout = GOYA_BOOT_FIT_REQ_TIMEOUT_USEC;
2577 static void goya_init_firmware_loader(struct hl_device *hdev)
2579 struct asic_fixed_properties *prop = &hdev->asic_prop;
2580 struct fw_load_mgr *fw_loader = &hdev->fw_loader;
2582 /* fill common fields */
2583 fw_loader->fw_comp_loaded = FW_TYPE_NONE;
2584 fw_loader->boot_fit_img.image_name = GOYA_BOOT_FIT_FILE;
2585 fw_loader->linux_img.image_name = GOYA_LINUX_FW_FILE;
2586 fw_loader->cpu_timeout = GOYA_CPU_TIMEOUT_USEC;
2587 fw_loader->boot_fit_timeout = GOYA_BOOT_FIT_REQ_TIMEOUT_USEC;
2588 fw_loader->skip_bmc = false;
2589 fw_loader->sram_bar_id = SRAM_CFG_BAR_ID;
2590 fw_loader->dram_bar_id = DDR_BAR_ID;
2592 if (prop->dynamic_fw_load)
2593 goya_init_dynamic_firmware_loader(hdev);
2595 goya_init_static_firmware_loader(hdev);
2598 static int goya_init_cpu(struct hl_device *hdev)
2600 struct goya_device *goya = hdev->asic_specific;
2603 if (!(hdev->fw_components & FW_TYPE_PREBOOT_CPU))
2606 if (goya->hw_cap_initialized & HW_CAP_CPU)
2610 * Before pushing u-boot/linux to device, need to set the ddr bar to
2611 * base address of dram
2613 if (goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE) == U64_MAX) {
2615 "failed to map DDR bar to DRAM base address\n");
2619 rc = hl_fw_init_cpu(hdev);
2624 goya->hw_cap_initialized |= HW_CAP_CPU;
2629 static int goya_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
2632 u32 status, timeout_usec;
2636 timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
2638 timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
2640 WREG32(MMU_HOP0_PA43_12, phys_addr >> MMU_HOP0_PA43_12_SHIFT);
2641 WREG32(MMU_HOP0_PA49_44, phys_addr >> MMU_HOP0_PA49_44_SHIFT);
2642 WREG32(MMU_ASID_BUSY, 0x80000000 | asid);
2644 rc = hl_poll_timeout(
2648 !(status & 0x80000000),
2654 "Timeout during MMU hop0 config of asid %d\n", asid);
2661 int goya_mmu_init(struct hl_device *hdev)
2663 struct asic_fixed_properties *prop = &hdev->asic_prop;
2664 struct goya_device *goya = hdev->asic_specific;
2668 if (!hdev->mmu_enable)
2671 if (goya->hw_cap_initialized & HW_CAP_MMU)
2674 hdev->dram_default_page_mapping = true;
2676 for (i = 0 ; i < prop->max_asid ; i++) {
2677 hop0_addr = prop->mmu_pgt_addr +
2678 (i * prop->mmu_hop_table_size);
2680 rc = goya_mmu_update_asid_hop0_addr(hdev, i, hop0_addr);
2683 "failed to set hop0 addr for asid %d\n", i);
2688 goya->hw_cap_initialized |= HW_CAP_MMU;
2690 /* init MMU cache manage page */
2691 WREG32(mmSTLB_CACHE_INV_BASE_39_8,
2692 lower_32_bits(MMU_CACHE_MNG_ADDR >> 8));
2693 WREG32(mmSTLB_CACHE_INV_BASE_49_40, MMU_CACHE_MNG_ADDR >> 40);
2695 /* Remove follower feature due to performance bug */
2696 WREG32_AND(mmSTLB_STLB_FEATURE_EN,
2697 (~STLB_STLB_FEATURE_EN_FOLLOWER_EN_MASK));
2699 hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR | MMU_OP_PHYS_PACK);
2701 WREG32(mmMMU_MMU_ENABLE, 1);
2702 WREG32(mmMMU_SPI_MASK, 0xF);
2711 * goya_hw_init - Goya hardware initialization code
2713 * @hdev: pointer to hl_device structure
2715 * Returns 0 on success
2718 static int goya_hw_init(struct hl_device *hdev)
2720 struct asic_fixed_properties *prop = &hdev->asic_prop;
2723 /* Perform read from the device to make sure device is up */
2724 RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
2727 * Let's mark in the H/W that we have reached this point. We check
2728 * this value in the reset_before_init function to understand whether
2729 * we need to reset the chip before doing H/W init. This register is
2730 * cleared by the H/W upon H/W reset
2732 WREG32(mmHW_STATE, HL_DEVICE_HW_STATE_DIRTY);
2734 rc = goya_init_cpu(hdev);
2736 dev_err(hdev->dev, "failed to initialize CPU\n");
2740 goya_tpc_mbist_workaround(hdev);
2742 goya_init_golden_registers(hdev);
2745 * After CPU initialization is finished, change DDR bar mapping inside
2746 * iATU to point to the start address of the MMU page tables
2748 if (goya_set_ddr_bar_base(hdev, (MMU_PAGE_TABLES_ADDR &
2749 ~(prop->dram_pci_bar_size - 0x1ull))) == U64_MAX) {
2751 "failed to map DDR bar to MMU page tables\n");
2755 rc = goya_mmu_init(hdev);
2759 goya_init_security(hdev);
2761 goya_init_dma_qmans(hdev);
2763 goya_init_mme_qmans(hdev);
2765 goya_init_tpc_qmans(hdev);
2767 goya_enable_timestamp(hdev);
2769 /* MSI-X must be enabled before CPU queues are initialized */
2770 rc = goya_enable_msix(hdev);
2772 goto disable_queues;
2774 /* Perform read from the device to flush all MSI-X configuration */
2775 RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
2780 goya_disable_internal_queues(hdev);
2781 goya_disable_external_queues(hdev);
2786 static int goya_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset)
2788 struct goya_device *goya = hdev->asic_specific;
2789 u32 reset_timeout_ms, cpu_timeout_ms, status;
2792 reset_timeout_ms = GOYA_PLDM_RESET_TIMEOUT_MSEC;
2793 cpu_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
2795 reset_timeout_ms = GOYA_RESET_TIMEOUT_MSEC;
2796 cpu_timeout_ms = GOYA_CPU_RESET_WAIT_MSEC;
2800 /* I don't know what is the state of the CPU so make sure it is
2801 * stopped in any means necessary
2803 WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_GOTO_WFE);
2804 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
2805 GOYA_ASYNC_EVENT_ID_HALT_MACHINE);
2807 msleep(cpu_timeout_ms);
2809 goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE);
2810 goya_disable_clk_rlx(hdev);
2811 goya_set_pll_refclk(hdev);
2813 WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, RESET_ALL);
2815 "Issued HARD reset command, going to wait %dms\n",
2818 WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, DMA_MME_TPC_RESET);
2820 "Issued SOFT reset command, going to wait %dms\n",
2825 * After hard reset, we can't poll the BTM_FSM register because the PSOC
2826 * itself is in reset. In either reset we need to wait until the reset
2829 msleep(reset_timeout_ms);
2831 status = RREG32(mmPSOC_GLOBAL_CONF_BTM_FSM);
2832 if (status & PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK)
2834 "Timeout while waiting for device to reset 0x%x\n",
2837 if (!hard_reset && goya) {
2838 goya->hw_cap_initialized &= ~(HW_CAP_DMA | HW_CAP_MME |
2839 HW_CAP_GOLDEN | HW_CAP_TPC);
2840 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
2841 GOYA_ASYNC_EVENT_ID_SOFT_RESET);
2845 /* Chicken bit to re-initiate boot sequencer flow */
2846 WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START,
2847 1 << PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_IND_SHIFT);
2848 /* Move boot manager FSM to pre boot sequencer init state */
2849 WREG32(mmPSOC_GLOBAL_CONF_SW_BTM_FSM,
2850 0xA << PSOC_GLOBAL_CONF_SW_BTM_FSM_CTRL_SHIFT);
2853 goya->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q |
2854 HW_CAP_DDR_0 | HW_CAP_DDR_1 |
2855 HW_CAP_DMA | HW_CAP_MME |
2856 HW_CAP_MMU | HW_CAP_TPC_MBIST |
2857 HW_CAP_GOLDEN | HW_CAP_TPC);
2859 memset(goya->events_stat, 0, sizeof(goya->events_stat));
2864 int goya_suspend(struct hl_device *hdev)
2868 rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0);
2870 dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
2875 int goya_resume(struct hl_device *hdev)
2877 return goya_init_iatu(hdev);
2880 static int goya_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
2881 void *cpu_addr, dma_addr_t dma_addr, size_t size)
2885 vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
2886 VM_DONTCOPY | VM_NORESERVE);
2888 rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
2889 (dma_addr - HOST_PHYS_BASE), size);
2891 dev_err(hdev->dev, "dma_mmap_coherent error %d", rc);
2896 void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
2898 u32 db_reg_offset, db_value;
2900 switch (hw_queue_id) {
2901 case GOYA_QUEUE_ID_DMA_0:
2902 db_reg_offset = mmDMA_QM_0_PQ_PI;
2905 case GOYA_QUEUE_ID_DMA_1:
2906 db_reg_offset = mmDMA_QM_1_PQ_PI;
2909 case GOYA_QUEUE_ID_DMA_2:
2910 db_reg_offset = mmDMA_QM_2_PQ_PI;
2913 case GOYA_QUEUE_ID_DMA_3:
2914 db_reg_offset = mmDMA_QM_3_PQ_PI;
2917 case GOYA_QUEUE_ID_DMA_4:
2918 db_reg_offset = mmDMA_QM_4_PQ_PI;
2921 case GOYA_QUEUE_ID_CPU_PQ:
2922 db_reg_offset = mmCPU_IF_PF_PQ_PI;
2925 case GOYA_QUEUE_ID_MME:
2926 db_reg_offset = mmMME_QM_PQ_PI;
2929 case GOYA_QUEUE_ID_TPC0:
2930 db_reg_offset = mmTPC0_QM_PQ_PI;
2933 case GOYA_QUEUE_ID_TPC1:
2934 db_reg_offset = mmTPC1_QM_PQ_PI;
2937 case GOYA_QUEUE_ID_TPC2:
2938 db_reg_offset = mmTPC2_QM_PQ_PI;
2941 case GOYA_QUEUE_ID_TPC3:
2942 db_reg_offset = mmTPC3_QM_PQ_PI;
2945 case GOYA_QUEUE_ID_TPC4:
2946 db_reg_offset = mmTPC4_QM_PQ_PI;
2949 case GOYA_QUEUE_ID_TPC5:
2950 db_reg_offset = mmTPC5_QM_PQ_PI;
2953 case GOYA_QUEUE_ID_TPC6:
2954 db_reg_offset = mmTPC6_QM_PQ_PI;
2957 case GOYA_QUEUE_ID_TPC7:
2958 db_reg_offset = mmTPC7_QM_PQ_PI;
2962 /* Should never get here */
2963 dev_err(hdev->dev, "H/W queue %d is invalid. Can't set pi\n",
2970 /* ring the doorbell */
2971 WREG32(db_reg_offset, db_value);
2973 if (hw_queue_id == GOYA_QUEUE_ID_CPU_PQ) {
2974 /* make sure device CPU will read latest data from host */
2976 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
2977 GOYA_ASYNC_EVENT_ID_PI_UPDATE);
2981 void goya_pqe_write(struct hl_device *hdev, __le64 *pqe, struct hl_bd *bd)
2983 /* The QMANs are on the SRAM so need to copy to IO space */
2984 memcpy_toio((void __iomem *) pqe, bd, sizeof(struct hl_bd));
2987 static void *goya_dma_alloc_coherent(struct hl_device *hdev, size_t size,
2988 dma_addr_t *dma_handle, gfp_t flags)
2990 void *kernel_addr = dma_alloc_coherent(&hdev->pdev->dev, size,
2993 /* Shift to the device's base physical address of host memory */
2995 *dma_handle += HOST_PHYS_BASE;
3000 static void goya_dma_free_coherent(struct hl_device *hdev, size_t size,
3001 void *cpu_addr, dma_addr_t dma_handle)
3003 /* Cancel the device's base physical address of host memory */
3004 dma_addr_t fixed_dma_handle = dma_handle - HOST_PHYS_BASE;
3006 dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, fixed_dma_handle);
3009 int goya_scrub_device_mem(struct hl_device *hdev)
3014 void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
3015 dma_addr_t *dma_handle, u16 *queue_len)
3020 *dma_handle = hdev->asic_prop.sram_base_address;
3022 base = (__force void *) hdev->pcie_bar[SRAM_CFG_BAR_ID];
3025 case GOYA_QUEUE_ID_MME:
3026 offset = MME_QMAN_BASE_OFFSET;
3027 *queue_len = MME_QMAN_LENGTH;
3029 case GOYA_QUEUE_ID_TPC0:
3030 offset = TPC0_QMAN_BASE_OFFSET;
3031 *queue_len = TPC_QMAN_LENGTH;
3033 case GOYA_QUEUE_ID_TPC1:
3034 offset = TPC1_QMAN_BASE_OFFSET;
3035 *queue_len = TPC_QMAN_LENGTH;
3037 case GOYA_QUEUE_ID_TPC2:
3038 offset = TPC2_QMAN_BASE_OFFSET;
3039 *queue_len = TPC_QMAN_LENGTH;
3041 case GOYA_QUEUE_ID_TPC3:
3042 offset = TPC3_QMAN_BASE_OFFSET;
3043 *queue_len = TPC_QMAN_LENGTH;
3045 case GOYA_QUEUE_ID_TPC4:
3046 offset = TPC4_QMAN_BASE_OFFSET;
3047 *queue_len = TPC_QMAN_LENGTH;
3049 case GOYA_QUEUE_ID_TPC5:
3050 offset = TPC5_QMAN_BASE_OFFSET;
3051 *queue_len = TPC_QMAN_LENGTH;
3053 case GOYA_QUEUE_ID_TPC6:
3054 offset = TPC6_QMAN_BASE_OFFSET;
3055 *queue_len = TPC_QMAN_LENGTH;
3057 case GOYA_QUEUE_ID_TPC7:
3058 offset = TPC7_QMAN_BASE_OFFSET;
3059 *queue_len = TPC_QMAN_LENGTH;
3062 dev_err(hdev->dev, "Got invalid queue id %d\n", queue_id);
3067 *dma_handle += offset;
3072 static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
3074 struct packet_msg_prot *fence_pkt;
3076 dma_addr_t fence_dma_addr;
3082 timeout = GOYA_PLDM_QMAN0_TIMEOUT_USEC;
3084 timeout = HL_DEVICE_TIMEOUT_USEC;
3086 if (!hdev->asic_funcs->is_device_idle(hdev, NULL, 0, NULL)) {
3087 dev_err_ratelimited(hdev->dev,
3088 "Can't send driver job on QMAN0 because the device is not idle\n");
3092 fence_ptr = hl_asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL, &fence_dma_addr);
3095 "Failed to allocate fence memory for QMAN0\n");
3099 goya_qman0_set_security(hdev, true);
3101 cb = job->patched_cb;
3103 fence_pkt = cb->kernel_address +
3104 job->job_cb_size - sizeof(struct packet_msg_prot);
3106 tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
3107 (1 << GOYA_PKT_CTL_EB_SHIFT) |
3108 (1 << GOYA_PKT_CTL_MB_SHIFT);
3109 fence_pkt->ctl = cpu_to_le32(tmp);
3110 fence_pkt->value = cpu_to_le32(GOYA_QMAN0_FENCE_VAL);
3111 fence_pkt->addr = cpu_to_le64(fence_dma_addr);
3113 rc = hl_hw_queue_send_cb_no_cmpl(hdev, GOYA_QUEUE_ID_DMA_0,
3114 job->job_cb_size, cb->bus_address);
3116 dev_err(hdev->dev, "Failed to send CB on QMAN0, %d\n", rc);
3117 goto free_fence_ptr;
3120 rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp,
3121 (tmp == GOYA_QMAN0_FENCE_VAL), 1000,
3124 hl_hw_queue_inc_ci_kernel(hdev, GOYA_QUEUE_ID_DMA_0);
3126 if (rc == -ETIMEDOUT) {
3127 dev_err(hdev->dev, "QMAN0 Job timeout (0x%x)\n", tmp);
3128 goto free_fence_ptr;
3132 hl_asic_dma_pool_free(hdev, (void *) fence_ptr, fence_dma_addr);
3134 goya_qman0_set_security(hdev, false);
3139 int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len,
3140 u32 timeout, u64 *result)
3142 struct goya_device *goya = hdev->asic_specific;
3144 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q)) {
3151 timeout = GOYA_MSG_TO_CPU_TIMEOUT_USEC;
3153 return hl_fw_send_cpu_message(hdev, GOYA_QUEUE_ID_CPU_PQ, msg, len,
3157 int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
3159 struct packet_msg_prot *fence_pkt;
3160 dma_addr_t pkt_dma_addr;
3162 dma_addr_t fence_dma_addr;
3166 fence_val = GOYA_QMAN0_FENCE_VAL;
3168 fence_ptr = hl_asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL, &fence_dma_addr);
3171 "Failed to allocate memory for H/W queue %d testing\n",
3178 fence_pkt = hl_asic_dma_pool_zalloc(hdev, sizeof(struct packet_msg_prot), GFP_KERNEL,
3182 "Failed to allocate packet for H/W queue %d testing\n",
3185 goto free_fence_ptr;
3188 tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
3189 (1 << GOYA_PKT_CTL_EB_SHIFT) |
3190 (1 << GOYA_PKT_CTL_MB_SHIFT);
3191 fence_pkt->ctl = cpu_to_le32(tmp);
3192 fence_pkt->value = cpu_to_le32(fence_val);
3193 fence_pkt->addr = cpu_to_le64(fence_dma_addr);
3195 rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id,
3196 sizeof(struct packet_msg_prot),
3200 "Failed to send fence packet to H/W queue %d\n",
3205 rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp, (tmp == fence_val),
3206 1000, GOYA_TEST_QUEUE_WAIT_USEC, true);
3208 hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
3210 if (rc == -ETIMEDOUT) {
3212 "H/W queue %d test failed (scratch(0x%08llX) == 0x%08X)\n",
3213 hw_queue_id, (unsigned long long) fence_dma_addr, tmp);
3218 hl_asic_dma_pool_free(hdev, (void *) fence_pkt, pkt_dma_addr);
3220 hl_asic_dma_pool_free(hdev, (void *) fence_ptr, fence_dma_addr);
3224 int goya_test_cpu_queue(struct hl_device *hdev)
3226 struct goya_device *goya = hdev->asic_specific;
3229 * check capability here as send_cpu_message() won't update the result
3230 * value if no capability
3232 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
3235 return hl_fw_test_cpu_queue(hdev);
3238 int goya_test_queues(struct hl_device *hdev)
3240 int i, rc, ret_val = 0;
3242 for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
3243 rc = goya_test_queue(hdev, i);
3251 static void *goya_dma_pool_zalloc(struct hl_device *hdev, size_t size,
3252 gfp_t mem_flags, dma_addr_t *dma_handle)
3256 if (size > GOYA_DMA_POOL_BLK_SIZE)
3259 kernel_addr = dma_pool_zalloc(hdev->dma_pool, mem_flags, dma_handle);
3261 /* Shift to the device's base physical address of host memory */
3263 *dma_handle += HOST_PHYS_BASE;
3268 static void goya_dma_pool_free(struct hl_device *hdev, void *vaddr,
3269 dma_addr_t dma_addr)
3271 /* Cancel the device's base physical address of host memory */
3272 dma_addr_t fixed_dma_addr = dma_addr - HOST_PHYS_BASE;
3274 dma_pool_free(hdev->dma_pool, vaddr, fixed_dma_addr);
3277 void *goya_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
3278 dma_addr_t *dma_handle)
3282 vaddr = hl_fw_cpu_accessible_dma_pool_alloc(hdev, size, dma_handle);
3283 *dma_handle = (*dma_handle) - hdev->cpu_accessible_dma_address +
3284 VA_CPU_ACCESSIBLE_MEM_ADDR;
3289 void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
3292 hl_fw_cpu_accessible_dma_pool_free(hdev, size, vaddr);
3295 u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt)
3297 struct scatterlist *sg, *sg_next_iter;
3298 u32 count, dma_desc_cnt;
3300 dma_addr_t addr, addr_next;
3304 for_each_sgtable_dma_sg(sgt, sg, count) {
3305 len = sg_dma_len(sg);
3306 addr = sg_dma_address(sg);
3311 while ((count + 1) < sgt->nents) {
3312 sg_next_iter = sg_next(sg);
3313 len_next = sg_dma_len(sg_next_iter);
3314 addr_next = sg_dma_address(sg_next_iter);
3319 if ((addr + len == addr_next) &&
3320 (len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
3332 return dma_desc_cnt * sizeof(struct packet_lin_dma);
3335 static int goya_pin_memory_before_cs(struct hl_device *hdev,
3336 struct hl_cs_parser *parser,
3337 struct packet_lin_dma *user_dma_pkt,
3338 u64 addr, enum dma_data_direction dir)
3340 struct hl_userptr *userptr;
3343 if (hl_userptr_is_pinned(hdev, addr, le32_to_cpu(user_dma_pkt->tsize),
3344 parser->job_userptr_list, &userptr))
3345 goto already_pinned;
3347 userptr = kzalloc(sizeof(*userptr), GFP_KERNEL);
3351 rc = hl_pin_host_memory(hdev, addr, le32_to_cpu(user_dma_pkt->tsize),
3356 list_add_tail(&userptr->job_node, parser->job_userptr_list);
3358 rc = hdev->asic_funcs->asic_dma_map_sgtable(hdev, userptr->sgt, dir);
3360 dev_err(hdev->dev, "failed to map sgt with DMA region\n");
3364 userptr->dma_mapped = true;
3368 parser->patched_cb_size +=
3369 goya_get_dma_desc_list_size(hdev, userptr->sgt);
3374 list_del(&userptr->job_node);
3375 hl_unpin_host_memory(hdev, userptr);
3381 static int goya_validate_dma_pkt_host(struct hl_device *hdev,
3382 struct hl_cs_parser *parser,
3383 struct packet_lin_dma *user_dma_pkt)
3385 u64 device_memory_addr, addr;
3386 enum dma_data_direction dir;
3387 enum hl_goya_dma_direction user_dir;
3388 bool sram_addr = true;
3389 bool skip_host_mem_pin = false;
3394 ctl = le32_to_cpu(user_dma_pkt->ctl);
3396 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3397 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3399 user_memset = (ctl & GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
3400 GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
3403 case HL_DMA_HOST_TO_DRAM:
3404 dev_dbg(hdev->dev, "DMA direction is HOST --> DRAM\n");
3405 dir = DMA_TO_DEVICE;
3407 addr = le64_to_cpu(user_dma_pkt->src_addr);
3408 device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3410 skip_host_mem_pin = true;
3413 case HL_DMA_DRAM_TO_HOST:
3414 dev_dbg(hdev->dev, "DMA direction is DRAM --> HOST\n");
3415 dir = DMA_FROM_DEVICE;
3417 addr = le64_to_cpu(user_dma_pkt->dst_addr);
3418 device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3421 case HL_DMA_HOST_TO_SRAM:
3422 dev_dbg(hdev->dev, "DMA direction is HOST --> SRAM\n");
3423 dir = DMA_TO_DEVICE;
3424 addr = le64_to_cpu(user_dma_pkt->src_addr);
3425 device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3427 skip_host_mem_pin = true;
3430 case HL_DMA_SRAM_TO_HOST:
3431 dev_dbg(hdev->dev, "DMA direction is SRAM --> HOST\n");
3432 dir = DMA_FROM_DEVICE;
3433 addr = le64_to_cpu(user_dma_pkt->dst_addr);
3434 device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3437 dev_err(hdev->dev, "DMA direction %d is unsupported/undefined\n", user_dir);
3442 if (!hl_mem_area_inside_range(device_memory_addr,
3443 le32_to_cpu(user_dma_pkt->tsize),
3444 hdev->asic_prop.sram_user_base_address,
3445 hdev->asic_prop.sram_end_address)) {
3448 "SRAM address 0x%llx + 0x%x is invalid\n",
3450 user_dma_pkt->tsize);
3454 if (!hl_mem_area_inside_range(device_memory_addr,
3455 le32_to_cpu(user_dma_pkt->tsize),
3456 hdev->asic_prop.dram_user_base_address,
3457 hdev->asic_prop.dram_end_address)) {
3460 "DRAM address 0x%llx + 0x%x is invalid\n",
3462 user_dma_pkt->tsize);
3467 if (skip_host_mem_pin)
3468 parser->patched_cb_size += sizeof(*user_dma_pkt);
3470 if ((dir == DMA_TO_DEVICE) &&
3471 (parser->hw_queue_id > GOYA_QUEUE_ID_DMA_1)) {
3473 "Can't DMA from host on queue other then 1\n");
3477 rc = goya_pin_memory_before_cs(hdev, parser, user_dma_pkt,
3484 static int goya_validate_dma_pkt_no_host(struct hl_device *hdev,
3485 struct hl_cs_parser *parser,
3486 struct packet_lin_dma *user_dma_pkt)
3488 u64 sram_memory_addr, dram_memory_addr;
3489 enum hl_goya_dma_direction user_dir;
3492 ctl = le32_to_cpu(user_dma_pkt->ctl);
3493 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3494 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3496 if (user_dir == HL_DMA_DRAM_TO_SRAM) {
3497 dev_dbg(hdev->dev, "DMA direction is DRAM --> SRAM\n");
3498 dram_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3499 sram_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3501 dev_dbg(hdev->dev, "DMA direction is SRAM --> DRAM\n");
3502 sram_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3503 dram_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3506 if (!hl_mem_area_inside_range(sram_memory_addr,
3507 le32_to_cpu(user_dma_pkt->tsize),
3508 hdev->asic_prop.sram_user_base_address,
3509 hdev->asic_prop.sram_end_address)) {
3510 dev_err(hdev->dev, "SRAM address 0x%llx + 0x%x is invalid\n",
3511 sram_memory_addr, user_dma_pkt->tsize);
3515 if (!hl_mem_area_inside_range(dram_memory_addr,
3516 le32_to_cpu(user_dma_pkt->tsize),
3517 hdev->asic_prop.dram_user_base_address,
3518 hdev->asic_prop.dram_end_address)) {
3519 dev_err(hdev->dev, "DRAM address 0x%llx + 0x%x is invalid\n",
3520 dram_memory_addr, user_dma_pkt->tsize);
3524 parser->patched_cb_size += sizeof(*user_dma_pkt);
3529 static int goya_validate_dma_pkt_no_mmu(struct hl_device *hdev,
3530 struct hl_cs_parser *parser,
3531 struct packet_lin_dma *user_dma_pkt)
3533 enum hl_goya_dma_direction user_dir;
3537 dev_dbg(hdev->dev, "DMA packet details:\n");
3538 dev_dbg(hdev->dev, "source == 0x%llx\n",
3539 le64_to_cpu(user_dma_pkt->src_addr));
3540 dev_dbg(hdev->dev, "destination == 0x%llx\n",
3541 le64_to_cpu(user_dma_pkt->dst_addr));
3542 dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize));
3544 ctl = le32_to_cpu(user_dma_pkt->ctl);
3545 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3546 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3549 * Special handling for DMA with size 0. The H/W has a bug where
3550 * this can cause the QMAN DMA to get stuck, so block it here.
3552 if (user_dma_pkt->tsize == 0) {
3554 "Got DMA with size 0, might reset the device\n");
3558 if ((user_dir == HL_DMA_DRAM_TO_SRAM) || (user_dir == HL_DMA_SRAM_TO_DRAM))
3559 rc = goya_validate_dma_pkt_no_host(hdev, parser, user_dma_pkt);
3561 rc = goya_validate_dma_pkt_host(hdev, parser, user_dma_pkt);
3566 static int goya_validate_dma_pkt_mmu(struct hl_device *hdev,
3567 struct hl_cs_parser *parser,
3568 struct packet_lin_dma *user_dma_pkt)
3570 dev_dbg(hdev->dev, "DMA packet details:\n");
3571 dev_dbg(hdev->dev, "source == 0x%llx\n",
3572 le64_to_cpu(user_dma_pkt->src_addr));
3573 dev_dbg(hdev->dev, "destination == 0x%llx\n",
3574 le64_to_cpu(user_dma_pkt->dst_addr));
3575 dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize));
3579 * We can't allow user to read from Host using QMANs other than 1.
3580 * PMMU and HPMMU addresses are equal, check only one of them.
3582 if (parser->hw_queue_id != GOYA_QUEUE_ID_DMA_1 &&
3583 hl_mem_area_inside_range(le64_to_cpu(user_dma_pkt->src_addr),
3584 le32_to_cpu(user_dma_pkt->tsize),
3585 hdev->asic_prop.pmmu.start_addr,
3586 hdev->asic_prop.pmmu.end_addr)) {
3588 "Can't DMA from host on queue other then 1\n");
3592 if (user_dma_pkt->tsize == 0) {
3594 "Got DMA with size 0, might reset the device\n");
3598 parser->patched_cb_size += sizeof(*user_dma_pkt);
3603 static int goya_validate_wreg32(struct hl_device *hdev,
3604 struct hl_cs_parser *parser,
3605 struct packet_wreg32 *wreg_pkt)
3607 struct goya_device *goya = hdev->asic_specific;
3608 u32 sob_start_addr, sob_end_addr;
3611 reg_offset = le32_to_cpu(wreg_pkt->ctl) &
3612 GOYA_PKT_WREG32_CTL_REG_OFFSET_MASK;
3614 dev_dbg(hdev->dev, "WREG32 packet details:\n");
3615 dev_dbg(hdev->dev, "reg_offset == 0x%x\n", reg_offset);
3616 dev_dbg(hdev->dev, "value == 0x%x\n",
3617 le32_to_cpu(wreg_pkt->value));
3619 if (reg_offset != (mmDMA_CH_0_WR_COMP_ADDR_LO & 0x1FFF)) {
3620 dev_err(hdev->dev, "WREG32 packet with illegal address 0x%x\n",
3626 * With MMU, DMA channels are not secured, so it doesn't matter where
3627 * the WR COMP will be written to because it will go out with
3628 * non-secured property
3630 if (goya->hw_cap_initialized & HW_CAP_MMU)
3633 sob_start_addr = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
3634 sob_end_addr = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1023);
3636 if ((le32_to_cpu(wreg_pkt->value) < sob_start_addr) ||
3637 (le32_to_cpu(wreg_pkt->value) > sob_end_addr)) {
3639 dev_err(hdev->dev, "WREG32 packet with illegal value 0x%x\n",
3647 static int goya_validate_cb(struct hl_device *hdev,
3648 struct hl_cs_parser *parser, bool is_mmu)
3650 u32 cb_parsed_length = 0;
3653 parser->patched_cb_size = 0;
3655 /* cb_user_size is more than 0 so loop will always be executed */
3656 while (cb_parsed_length < parser->user_cb_size) {
3657 enum packet_id pkt_id;
3659 struct goya_packet *user_pkt;
3661 user_pkt = parser->user_cb->kernel_address + cb_parsed_length;
3663 pkt_id = (enum packet_id) (
3664 (le64_to_cpu(user_pkt->header) &
3665 PACKET_HEADER_PACKET_ID_MASK) >>
3666 PACKET_HEADER_PACKET_ID_SHIFT);
3668 if (!validate_packet_id(pkt_id)) {
3669 dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id);
3674 pkt_size = goya_packet_sizes[pkt_id];
3675 cb_parsed_length += pkt_size;
3676 if (cb_parsed_length > parser->user_cb_size) {
3678 "packet 0x%x is out of CB boundary\n", pkt_id);
3684 case PACKET_WREG_32:
3686 * Although it is validated after copy in patch_cb(),
3687 * need to validate here as well because patch_cb() is
3688 * not called in MMU path while this function is called
3690 rc = goya_validate_wreg32(hdev,
3691 parser, (struct packet_wreg32 *) user_pkt);
3692 parser->patched_cb_size += pkt_size;
3695 case PACKET_WREG_BULK:
3697 "User not allowed to use WREG_BULK\n");
3701 case PACKET_MSG_PROT:
3703 "User not allowed to use MSG_PROT\n");
3708 dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
3713 dev_err(hdev->dev, "User not allowed to use STOP\n");
3717 case PACKET_LIN_DMA:
3719 rc = goya_validate_dma_pkt_mmu(hdev, parser,
3720 (struct packet_lin_dma *) user_pkt);
3722 rc = goya_validate_dma_pkt_no_mmu(hdev, parser,
3723 (struct packet_lin_dma *) user_pkt);
3726 case PACKET_MSG_LONG:
3727 case PACKET_MSG_SHORT:
3730 parser->patched_cb_size += pkt_size;
3734 dev_err(hdev->dev, "Invalid packet header 0x%x\n",
3745 * The new CB should have space at the end for two MSG_PROT packets:
3746 * 1. A packet that will act as a completion packet
3747 * 2. A packet that will generate MSI-X interrupt
3749 parser->patched_cb_size += sizeof(struct packet_msg_prot) * 2;
3754 static int goya_patch_dma_packet(struct hl_device *hdev,
3755 struct hl_cs_parser *parser,
3756 struct packet_lin_dma *user_dma_pkt,
3757 struct packet_lin_dma *new_dma_pkt,
3758 u32 *new_dma_pkt_size)
3760 struct hl_userptr *userptr;
3761 struct scatterlist *sg, *sg_next_iter;
3762 u32 count, dma_desc_cnt;
3764 dma_addr_t dma_addr, dma_addr_next;
3765 enum hl_goya_dma_direction user_dir;
3766 u64 device_memory_addr, addr;
3767 enum dma_data_direction dir;
3768 struct sg_table *sgt;
3769 bool skip_host_mem_pin = false;
3771 u32 user_rdcomp_mask, user_wrcomp_mask, ctl;
3773 ctl = le32_to_cpu(user_dma_pkt->ctl);
3775 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3776 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3778 user_memset = (ctl & GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
3779 GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
3781 if ((user_dir == HL_DMA_DRAM_TO_SRAM) || (user_dir == HL_DMA_SRAM_TO_DRAM) ||
3782 (user_dma_pkt->tsize == 0)) {
3783 memcpy(new_dma_pkt, user_dma_pkt, sizeof(*new_dma_pkt));
3784 *new_dma_pkt_size = sizeof(*new_dma_pkt);
3788 if ((user_dir == HL_DMA_HOST_TO_DRAM) || (user_dir == HL_DMA_HOST_TO_SRAM)) {
3789 addr = le64_to_cpu(user_dma_pkt->src_addr);
3790 device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3791 dir = DMA_TO_DEVICE;
3793 skip_host_mem_pin = true;
3795 addr = le64_to_cpu(user_dma_pkt->dst_addr);
3796 device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3797 dir = DMA_FROM_DEVICE;
3800 if ((!skip_host_mem_pin) &&
3801 (hl_userptr_is_pinned(hdev, addr,
3802 le32_to_cpu(user_dma_pkt->tsize),
3803 parser->job_userptr_list, &userptr) == false)) {
3804 dev_err(hdev->dev, "Userptr 0x%llx + 0x%x NOT mapped\n",
3805 addr, user_dma_pkt->tsize);
3809 if ((user_memset) && (dir == DMA_TO_DEVICE)) {
3810 memcpy(new_dma_pkt, user_dma_pkt, sizeof(*user_dma_pkt));
3811 *new_dma_pkt_size = sizeof(*user_dma_pkt);
3815 user_rdcomp_mask = ctl & GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK;
3817 user_wrcomp_mask = ctl & GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK;
3822 for_each_sgtable_dma_sg(sgt, sg, count) {
3823 len = sg_dma_len(sg);
3824 dma_addr = sg_dma_address(sg);
3829 while ((count + 1) < sgt->nents) {
3830 sg_next_iter = sg_next(sg);
3831 len_next = sg_dma_len(sg_next_iter);
3832 dma_addr_next = sg_dma_address(sg_next_iter);
3837 if ((dma_addr + len == dma_addr_next) &&
3838 (len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
3847 ctl = le32_to_cpu(user_dma_pkt->ctl);
3848 if (likely(dma_desc_cnt))
3849 ctl &= ~GOYA_PKT_CTL_EB_MASK;
3850 ctl &= ~(GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK |
3851 GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK);
3852 new_dma_pkt->ctl = cpu_to_le32(ctl);
3853 new_dma_pkt->tsize = cpu_to_le32((u32) len);
3855 if (dir == DMA_TO_DEVICE) {
3856 new_dma_pkt->src_addr = cpu_to_le64(dma_addr);
3857 new_dma_pkt->dst_addr = cpu_to_le64(device_memory_addr);
3859 new_dma_pkt->src_addr = cpu_to_le64(device_memory_addr);
3860 new_dma_pkt->dst_addr = cpu_to_le64(dma_addr);
3864 device_memory_addr += len;
3869 if (!dma_desc_cnt) {
3871 "Error of 0 SG entries when patching DMA packet\n");
3875 /* Fix the last dma packet - rdcomp/wrcomp must be as user set them */
3877 new_dma_pkt->ctl |= cpu_to_le32(user_rdcomp_mask | user_wrcomp_mask);
3879 *new_dma_pkt_size = dma_desc_cnt * sizeof(struct packet_lin_dma);
3884 static int goya_patch_cb(struct hl_device *hdev,
3885 struct hl_cs_parser *parser)
3887 u32 cb_parsed_length = 0;
3888 u32 cb_patched_cur_length = 0;
3891 /* cb_user_size is more than 0 so loop will always be executed */
3892 while (cb_parsed_length < parser->user_cb_size) {
3893 enum packet_id pkt_id;
3895 u32 new_pkt_size = 0;
3896 struct goya_packet *user_pkt, *kernel_pkt;
3898 user_pkt = parser->user_cb->kernel_address + cb_parsed_length;
3899 kernel_pkt = parser->patched_cb->kernel_address +
3900 cb_patched_cur_length;
3902 pkt_id = (enum packet_id) (
3903 (le64_to_cpu(user_pkt->header) &
3904 PACKET_HEADER_PACKET_ID_MASK) >>
3905 PACKET_HEADER_PACKET_ID_SHIFT);
3907 if (!validate_packet_id(pkt_id)) {
3908 dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id);
3913 pkt_size = goya_packet_sizes[pkt_id];
3914 cb_parsed_length += pkt_size;
3915 if (cb_parsed_length > parser->user_cb_size) {
3917 "packet 0x%x is out of CB boundary\n", pkt_id);
3923 case PACKET_LIN_DMA:
3924 rc = goya_patch_dma_packet(hdev, parser,
3925 (struct packet_lin_dma *) user_pkt,
3926 (struct packet_lin_dma *) kernel_pkt,
3928 cb_patched_cur_length += new_pkt_size;
3931 case PACKET_WREG_32:
3932 memcpy(kernel_pkt, user_pkt, pkt_size);
3933 cb_patched_cur_length += pkt_size;
3934 rc = goya_validate_wreg32(hdev, parser,
3935 (struct packet_wreg32 *) kernel_pkt);
3938 case PACKET_WREG_BULK:
3940 "User not allowed to use WREG_BULK\n");
3944 case PACKET_MSG_PROT:
3946 "User not allowed to use MSG_PROT\n");
3951 dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
3956 dev_err(hdev->dev, "User not allowed to use STOP\n");
3960 case PACKET_MSG_LONG:
3961 case PACKET_MSG_SHORT:
3964 memcpy(kernel_pkt, user_pkt, pkt_size);
3965 cb_patched_cur_length += pkt_size;
3969 dev_err(hdev->dev, "Invalid packet header 0x%x\n",
3982 static int goya_parse_cb_mmu(struct hl_device *hdev,
3983 struct hl_cs_parser *parser)
3986 u32 patched_cb_size;
3987 struct hl_cb *user_cb;
3991 * The new CB should have space at the end for two MSG_PROT pkt:
3992 * 1. A packet that will act as a completion packet
3993 * 2. A packet that will generate MSI-X interrupt
3995 parser->patched_cb_size = parser->user_cb_size +
3996 sizeof(struct packet_msg_prot) * 2;
3998 rc = hl_cb_create(hdev, &hdev->kernel_mem_mgr, hdev->kernel_ctx,
3999 parser->patched_cb_size, false, false,
4004 "Failed to allocate patched CB for DMA CS %d\n",
4009 parser->patched_cb = hl_cb_get(&hdev->kernel_mem_mgr, handle);
4010 /* hl_cb_get should never fail here */
4011 if (!parser->patched_cb) {
4012 dev_crit(hdev->dev, "DMA CB handle invalid 0x%llx\n", handle);
4018 * The check that parser->user_cb_size <= parser->user_cb->size was done
4019 * in validate_queue_index().
4021 memcpy(parser->patched_cb->kernel_address,
4022 parser->user_cb->kernel_address,
4023 parser->user_cb_size);
4025 patched_cb_size = parser->patched_cb_size;
4027 /* validate patched CB instead of user CB */
4028 user_cb = parser->user_cb;
4029 parser->user_cb = parser->patched_cb;
4030 rc = goya_validate_cb(hdev, parser, true);
4031 parser->user_cb = user_cb;
4034 hl_cb_put(parser->patched_cb);
4038 if (patched_cb_size != parser->patched_cb_size) {
4039 dev_err(hdev->dev, "user CB size mismatch\n");
4040 hl_cb_put(parser->patched_cb);
4047 * Always call cb destroy here because we still have 1 reference
4048 * to it by calling cb_get earlier. After the job will be completed,
4049 * cb_put will release it, but here we want to remove it from the
4052 hl_cb_destroy(&hdev->kernel_mem_mgr, handle);
4057 static int goya_parse_cb_no_mmu(struct hl_device *hdev,
4058 struct hl_cs_parser *parser)
4063 rc = goya_validate_cb(hdev, parser, false);
4068 rc = hl_cb_create(hdev, &hdev->kernel_mem_mgr, hdev->kernel_ctx,
4069 parser->patched_cb_size, false, false,
4073 "Failed to allocate patched CB for DMA CS %d\n", rc);
4077 parser->patched_cb = hl_cb_get(&hdev->kernel_mem_mgr, handle);
4078 /* hl_cb_get should never fail here */
4079 if (!parser->patched_cb) {
4080 dev_crit(hdev->dev, "DMA CB handle invalid 0x%llx\n", handle);
4085 rc = goya_patch_cb(hdev, parser);
4088 hl_cb_put(parser->patched_cb);
4092 * Always call cb destroy here because we still have 1 reference
4093 * to it by calling cb_get earlier. After the job will be completed,
4094 * cb_put will release it, but here we want to remove it from the
4097 hl_cb_destroy(&hdev->kernel_mem_mgr, handle);
4101 hl_userptr_delete_list(hdev, parser->job_userptr_list);
4105 static int goya_parse_cb_no_ext_queue(struct hl_device *hdev,
4106 struct hl_cs_parser *parser)
4108 struct asic_fixed_properties *asic_prop = &hdev->asic_prop;
4109 struct goya_device *goya = hdev->asic_specific;
4111 if (goya->hw_cap_initialized & HW_CAP_MMU)
4114 /* For internal queue jobs, just check if CB address is valid */
4115 if (hl_mem_area_inside_range(
4116 (u64) (uintptr_t) parser->user_cb,
4117 parser->user_cb_size,
4118 asic_prop->sram_user_base_address,
4119 asic_prop->sram_end_address))
4122 if (hl_mem_area_inside_range(
4123 (u64) (uintptr_t) parser->user_cb,
4124 parser->user_cb_size,
4125 asic_prop->dram_user_base_address,
4126 asic_prop->dram_end_address))
4130 "Internal CB address 0x%px + 0x%x is not in SRAM nor in DRAM\n",
4131 parser->user_cb, parser->user_cb_size);
4136 int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser)
4138 struct goya_device *goya = hdev->asic_specific;
4140 if (parser->queue_type == QUEUE_TYPE_INT)
4141 return goya_parse_cb_no_ext_queue(hdev, parser);
4143 if (goya->hw_cap_initialized & HW_CAP_MMU)
4144 return goya_parse_cb_mmu(hdev, parser);
4146 return goya_parse_cb_no_mmu(hdev, parser);
4149 void goya_add_end_of_cb_packets(struct hl_device *hdev, void *kernel_address,
4150 u32 len, u32 original_len, u64 cq_addr, u32 cq_val,
4151 u32 msix_vec, bool eb)
4153 struct packet_msg_prot *cq_pkt;
4156 cq_pkt = kernel_address + len - (sizeof(struct packet_msg_prot) * 2);
4158 tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
4159 (1 << GOYA_PKT_CTL_EB_SHIFT) |
4160 (1 << GOYA_PKT_CTL_MB_SHIFT);
4161 cq_pkt->ctl = cpu_to_le32(tmp);
4162 cq_pkt->value = cpu_to_le32(cq_val);
4163 cq_pkt->addr = cpu_to_le64(cq_addr);
4167 tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
4168 (1 << GOYA_PKT_CTL_MB_SHIFT);
4169 cq_pkt->ctl = cpu_to_le32(tmp);
4170 cq_pkt->value = cpu_to_le32(msix_vec & 0x7FF);
4171 cq_pkt->addr = cpu_to_le64(CFG_BASE + mmPCIE_DBI_MSIX_DOORBELL_OFF);
4174 void goya_update_eq_ci(struct hl_device *hdev, u32 val)
4176 WREG32(mmCPU_EQ_CI, val);
4179 void goya_restore_phase_topology(struct hl_device *hdev)
4184 static void goya_clear_sm_regs(struct hl_device *hdev)
4186 int i, num_of_sob_in_longs, num_of_mon_in_longs;
4188 num_of_sob_in_longs =
4189 ((mmSYNC_MNGR_SOB_OBJ_1023 - mmSYNC_MNGR_SOB_OBJ_0) + 4);
4191 num_of_mon_in_longs =
4192 ((mmSYNC_MNGR_MON_STATUS_255 - mmSYNC_MNGR_MON_STATUS_0) + 4);
4194 for (i = 0 ; i < num_of_sob_in_longs ; i += 4)
4195 WREG32(mmSYNC_MNGR_SOB_OBJ_0 + i, 0);
4197 for (i = 0 ; i < num_of_mon_in_longs ; i += 4)
4198 WREG32(mmSYNC_MNGR_MON_STATUS_0 + i, 0);
4200 /* Flush all WREG to prevent race */
4201 i = RREG32(mmSYNC_MNGR_SOB_OBJ_0);
4204 static int goya_debugfs_read_dma(struct hl_device *hdev, u64 addr, u32 size, void *blob_addr)
4206 dev_err(hdev->dev, "Reading via DMA is unimplemented yet\n");
4210 static u64 goya_read_pte(struct hl_device *hdev, u64 addr)
4212 struct goya_device *goya = hdev->asic_specific;
4214 if (hdev->reset_info.hard_reset_pending)
4217 return readq(hdev->pcie_bar[DDR_BAR_ID] +
4218 (addr - goya->ddr_bar_cur_addr));
4221 static void goya_write_pte(struct hl_device *hdev, u64 addr, u64 val)
4223 struct goya_device *goya = hdev->asic_specific;
4225 if (hdev->reset_info.hard_reset_pending)
4228 writeq(val, hdev->pcie_bar[DDR_BAR_ID] +
4229 (addr - goya->ddr_bar_cur_addr));
4232 static const char *_goya_get_event_desc(u16 event_type)
4234 switch (event_type) {
4235 case GOYA_ASYNC_EVENT_ID_PCIE_IF:
4237 case GOYA_ASYNC_EVENT_ID_TPC0_ECC:
4238 case GOYA_ASYNC_EVENT_ID_TPC1_ECC:
4239 case GOYA_ASYNC_EVENT_ID_TPC2_ECC:
4240 case GOYA_ASYNC_EVENT_ID_TPC3_ECC:
4241 case GOYA_ASYNC_EVENT_ID_TPC4_ECC:
4242 case GOYA_ASYNC_EVENT_ID_TPC5_ECC:
4243 case GOYA_ASYNC_EVENT_ID_TPC6_ECC:
4244 case GOYA_ASYNC_EVENT_ID_TPC7_ECC:
4246 case GOYA_ASYNC_EVENT_ID_MME_ECC:
4248 case GOYA_ASYNC_EVENT_ID_MME_ECC_EXT:
4249 return "MME_ecc_ext";
4250 case GOYA_ASYNC_EVENT_ID_MMU_ECC:
4252 case GOYA_ASYNC_EVENT_ID_DMA_MACRO:
4254 case GOYA_ASYNC_EVENT_ID_DMA_ECC:
4256 case GOYA_ASYNC_EVENT_ID_CPU_IF_ECC:
4257 return "CPU_if_ecc";
4258 case GOYA_ASYNC_EVENT_ID_PSOC_MEM:
4260 case GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT:
4261 return "PSOC_coresight";
4262 case GOYA_ASYNC_EVENT_ID_SRAM0 ... GOYA_ASYNC_EVENT_ID_SRAM29:
4264 case GOYA_ASYNC_EVENT_ID_GIC500:
4266 case GOYA_ASYNC_EVENT_ID_PLL0 ... GOYA_ASYNC_EVENT_ID_PLL6:
4268 case GOYA_ASYNC_EVENT_ID_AXI_ECC:
4270 case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC:
4271 return "L2_ram_ecc";
4272 case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET:
4273 return "PSOC_gpio_05_sw_reset";
4274 case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT:
4275 return "PSOC_gpio_10_vrhot_icrit";
4276 case GOYA_ASYNC_EVENT_ID_PCIE_DEC:
4278 case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
4279 case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
4280 case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
4281 case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
4282 case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
4283 case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
4284 case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
4285 case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
4287 case GOYA_ASYNC_EVENT_ID_MME_WACS:
4289 case GOYA_ASYNC_EVENT_ID_MME_WACSD:
4291 case GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER:
4292 return "CPU_axi_splitter";
4293 case GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC:
4294 return "PSOC_axi_dec";
4295 case GOYA_ASYNC_EVENT_ID_PSOC:
4297 case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
4298 case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
4299 case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
4300 case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
4301 case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
4302 case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
4303 case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
4304 case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
4305 return "TPC%d_krn_err";
4306 case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_CMDQ:
4308 case GOYA_ASYNC_EVENT_ID_TPC0_QM ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
4310 case GOYA_ASYNC_EVENT_ID_MME_QM:
4312 case GOYA_ASYNC_EVENT_ID_MME_CMDQ:
4314 case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
4316 case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
4318 case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU:
4319 case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU:
4320 case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU:
4321 case GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU:
4322 case GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU:
4323 case GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU:
4324 case GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU:
4325 case GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU:
4326 return "TPC%d_bmon_spmu";
4327 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0 ... GOYA_ASYNC_EVENT_ID_DMA_BM_CH4:
4328 return "DMA_bm_ch%d";
4329 case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S:
4330 return "POWER_ENV_S";
4331 case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E:
4332 return "POWER_ENV_E";
4333 case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S:
4334 return "THERMAL_ENV_S";
4335 case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E:
4336 return "THERMAL_ENV_E";
4337 case GOYA_ASYNC_EVENT_PKT_QUEUE_OUT_SYNC:
4338 return "QUEUE_OUT_OF_SYNC";
4344 static void goya_get_event_desc(u16 event_type, char *desc, size_t size)
4348 switch (event_type) {
4349 case GOYA_ASYNC_EVENT_ID_TPC0_ECC:
4350 case GOYA_ASYNC_EVENT_ID_TPC1_ECC:
4351 case GOYA_ASYNC_EVENT_ID_TPC2_ECC:
4352 case GOYA_ASYNC_EVENT_ID_TPC3_ECC:
4353 case GOYA_ASYNC_EVENT_ID_TPC4_ECC:
4354 case GOYA_ASYNC_EVENT_ID_TPC5_ECC:
4355 case GOYA_ASYNC_EVENT_ID_TPC6_ECC:
4356 case GOYA_ASYNC_EVENT_ID_TPC7_ECC:
4357 index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_ECC) / 3;
4358 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4360 case GOYA_ASYNC_EVENT_ID_SRAM0 ... GOYA_ASYNC_EVENT_ID_SRAM29:
4361 index = event_type - GOYA_ASYNC_EVENT_ID_SRAM0;
4362 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4364 case GOYA_ASYNC_EVENT_ID_PLL0 ... GOYA_ASYNC_EVENT_ID_PLL6:
4365 index = event_type - GOYA_ASYNC_EVENT_ID_PLL0;
4366 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4368 case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
4369 case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
4370 case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
4371 case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
4372 case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
4373 case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
4374 case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
4375 case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
4376 index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_DEC) / 3;
4377 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4379 case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
4380 case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
4381 case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
4382 case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
4383 case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
4384 case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
4385 case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
4386 case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
4387 index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR) / 10;
4388 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4390 case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_CMDQ:
4391 index = event_type - GOYA_ASYNC_EVENT_ID_TPC0_CMDQ;
4392 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4394 case GOYA_ASYNC_EVENT_ID_TPC0_QM ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
4395 index = event_type - GOYA_ASYNC_EVENT_ID_TPC0_QM;
4396 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4398 case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
4399 index = event_type - GOYA_ASYNC_EVENT_ID_DMA0_QM;
4400 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4402 case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
4403 index = event_type - GOYA_ASYNC_EVENT_ID_DMA0_CH;
4404 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4406 case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU:
4407 case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU:
4408 case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU:
4409 case GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU:
4410 case GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU:
4411 case GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU:
4412 case GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU:
4413 case GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU:
4414 index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU) / 10;
4415 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4417 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0 ... GOYA_ASYNC_EVENT_ID_DMA_BM_CH4:
4418 index = event_type - GOYA_ASYNC_EVENT_ID_DMA_BM_CH0;
4419 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4421 case GOYA_ASYNC_EVENT_PKT_QUEUE_OUT_SYNC:
4422 snprintf(desc, size, _goya_get_event_desc(event_type));
4425 snprintf(desc, size, _goya_get_event_desc(event_type));
4430 static void goya_print_razwi_info(struct hl_device *hdev)
4432 if (RREG32(mmDMA_MACRO_RAZWI_LBW_WT_VLD)) {
4433 dev_err_ratelimited(hdev->dev, "Illegal write to LBW\n");
4434 WREG32(mmDMA_MACRO_RAZWI_LBW_WT_VLD, 0);
4437 if (RREG32(mmDMA_MACRO_RAZWI_LBW_RD_VLD)) {
4438 dev_err_ratelimited(hdev->dev, "Illegal read from LBW\n");
4439 WREG32(mmDMA_MACRO_RAZWI_LBW_RD_VLD, 0);
4442 if (RREG32(mmDMA_MACRO_RAZWI_HBW_WT_VLD)) {
4443 dev_err_ratelimited(hdev->dev, "Illegal write to HBW\n");
4444 WREG32(mmDMA_MACRO_RAZWI_HBW_WT_VLD, 0);
4447 if (RREG32(mmDMA_MACRO_RAZWI_HBW_RD_VLD)) {
4448 dev_err_ratelimited(hdev->dev, "Illegal read from HBW\n");
4449 WREG32(mmDMA_MACRO_RAZWI_HBW_RD_VLD, 0);
4453 static void goya_print_mmu_error_info(struct hl_device *hdev)
4455 struct goya_device *goya = hdev->asic_specific;
4459 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4462 val = RREG32(mmMMU_PAGE_ERROR_CAPTURE);
4463 if (val & MMU_PAGE_ERROR_CAPTURE_ENTRY_VALID_MASK) {
4464 addr = val & MMU_PAGE_ERROR_CAPTURE_VA_49_32_MASK;
4466 addr |= RREG32(mmMMU_PAGE_ERROR_CAPTURE_VA);
4468 dev_err_ratelimited(hdev->dev, "MMU page fault on va 0x%llx\n",
4471 WREG32(mmMMU_PAGE_ERROR_CAPTURE, 0);
4475 static void goya_print_out_of_sync_info(struct hl_device *hdev,
4476 struct cpucp_pkt_sync_err *sync_err)
4478 struct hl_hw_queue *q = &hdev->kernel_queues[GOYA_QUEUE_ID_CPU_PQ];
4480 dev_err(hdev->dev, "Out of sync with FW, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%d\n",
4481 le32_to_cpu(sync_err->pi), le32_to_cpu(sync_err->ci), q->pi, atomic_read(&q->ci));
4484 static void goya_print_irq_info(struct hl_device *hdev, u16 event_type,
4489 goya_get_event_desc(event_type, desc, sizeof(desc));
4490 dev_err_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
4494 goya_print_razwi_info(hdev);
4495 goya_print_mmu_error_info(hdev);
4499 static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
4500 size_t irq_arr_size)
4502 struct cpucp_unmask_irq_arr_packet *pkt;
4503 size_t total_pkt_size;
4506 int irq_num_entries, irq_arr_index;
4507 __le32 *goya_irq_arr;
4509 total_pkt_size = sizeof(struct cpucp_unmask_irq_arr_packet) +
4512 /* data should be aligned to 8 bytes in order to CPU-CP to copy it */
4513 total_pkt_size = (total_pkt_size + 0x7) & ~0x7;
4515 /* total_pkt_size is casted to u16 later on */
4516 if (total_pkt_size > USHRT_MAX) {
4517 dev_err(hdev->dev, "too many elements in IRQ array\n");
4521 pkt = kzalloc(total_pkt_size, GFP_KERNEL);
4525 irq_num_entries = irq_arr_size / sizeof(irq_arr[0]);
4526 pkt->length = cpu_to_le32(irq_num_entries);
4528 /* We must perform any necessary endianness conversation on the irq
4529 * array being passed to the goya hardware
4531 for (irq_arr_index = 0, goya_irq_arr = (__le32 *) &pkt->irqs;
4532 irq_arr_index < irq_num_entries ; irq_arr_index++)
4533 goya_irq_arr[irq_arr_index] =
4534 cpu_to_le32(irq_arr[irq_arr_index]);
4536 pkt->cpucp_pkt.ctl = cpu_to_le32(CPUCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
4537 CPUCP_PKT_CTL_OPCODE_SHIFT);
4539 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt,
4540 total_pkt_size, 0, &result);
4543 dev_err(hdev->dev, "failed to unmask IRQ array\n");
4550 static int goya_compute_reset_late_init(struct hl_device *hdev)
4553 * Unmask all IRQs since some could have been received
4554 * during the soft reset
4556 return goya_unmask_irq_arr(hdev, goya_all_events,
4557 sizeof(goya_all_events));
4560 static int goya_unmask_irq(struct hl_device *hdev, u16 event_type)
4562 struct cpucp_packet pkt;
4566 memset(&pkt, 0, sizeof(pkt));
4568 pkt.ctl = cpu_to_le32(CPUCP_PACKET_UNMASK_RAZWI_IRQ <<
4569 CPUCP_PKT_CTL_OPCODE_SHIFT);
4570 pkt.value = cpu_to_le64(event_type);
4572 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
4576 dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type);
4581 static void goya_print_clk_change_info(struct hl_device *hdev, u16 event_type)
4583 ktime_t zero_time = ktime_set(0, 0);
4585 mutex_lock(&hdev->clk_throttling.lock);
4587 switch (event_type) {
4588 case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S:
4589 hdev->clk_throttling.current_reason |= HL_CLK_THROTTLE_POWER;
4590 hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_POWER;
4591 hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].start = ktime_get();
4592 hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = zero_time;
4593 dev_info_ratelimited(hdev->dev,
4594 "Clock throttling due to power consumption\n");
4597 case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E:
4598 hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_POWER;
4599 hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = ktime_get();
4600 dev_info_ratelimited(hdev->dev,
4601 "Power envelop is safe, back to optimal clock\n");
4604 case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S:
4605 hdev->clk_throttling.current_reason |= HL_CLK_THROTTLE_THERMAL;
4606 hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_THERMAL;
4607 hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].start = ktime_get();
4608 hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = zero_time;
4609 dev_info_ratelimited(hdev->dev,
4610 "Clock throttling due to overheating\n");
4613 case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E:
4614 hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_THERMAL;
4615 hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = ktime_get();
4616 dev_info_ratelimited(hdev->dev,
4617 "Thermal envelop is safe, back to optimal clock\n");
4621 dev_err(hdev->dev, "Received invalid clock change event %d\n",
4626 mutex_unlock(&hdev->clk_throttling.lock);
4629 void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
4631 u32 ctl = le32_to_cpu(eq_entry->hdr.ctl);
4632 u16 event_type = ((ctl & EQ_CTL_EVENT_TYPE_MASK)
4633 >> EQ_CTL_EVENT_TYPE_SHIFT);
4634 struct goya_device *goya = hdev->asic_specific;
4636 if (event_type >= GOYA_ASYNC_EVENT_ID_SIZE) {
4637 dev_err(hdev->dev, "Event type %u exceeds maximum of %u",
4638 event_type, GOYA_ASYNC_EVENT_ID_SIZE - 1);
4642 goya->events_stat[event_type]++;
4643 goya->events_stat_aggregate[event_type]++;
4645 switch (event_type) {
4646 case GOYA_ASYNC_EVENT_ID_PCIE_IF:
4647 case GOYA_ASYNC_EVENT_ID_TPC0_ECC:
4648 case GOYA_ASYNC_EVENT_ID_TPC1_ECC:
4649 case GOYA_ASYNC_EVENT_ID_TPC2_ECC:
4650 case GOYA_ASYNC_EVENT_ID_TPC3_ECC:
4651 case GOYA_ASYNC_EVENT_ID_TPC4_ECC:
4652 case GOYA_ASYNC_EVENT_ID_TPC5_ECC:
4653 case GOYA_ASYNC_EVENT_ID_TPC6_ECC:
4654 case GOYA_ASYNC_EVENT_ID_TPC7_ECC:
4655 case GOYA_ASYNC_EVENT_ID_MME_ECC:
4656 case GOYA_ASYNC_EVENT_ID_MME_ECC_EXT:
4657 case GOYA_ASYNC_EVENT_ID_MMU_ECC:
4658 case GOYA_ASYNC_EVENT_ID_DMA_MACRO:
4659 case GOYA_ASYNC_EVENT_ID_DMA_ECC:
4660 case GOYA_ASYNC_EVENT_ID_CPU_IF_ECC:
4661 case GOYA_ASYNC_EVENT_ID_PSOC_MEM:
4662 case GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT:
4663 case GOYA_ASYNC_EVENT_ID_SRAM0 ... GOYA_ASYNC_EVENT_ID_SRAM29:
4664 case GOYA_ASYNC_EVENT_ID_GIC500:
4665 case GOYA_ASYNC_EVENT_ID_PLL0 ... GOYA_ASYNC_EVENT_ID_PLL6:
4666 case GOYA_ASYNC_EVENT_ID_AXI_ECC:
4667 case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC:
4668 goya_print_irq_info(hdev, event_type, false);
4669 if (hdev->hard_reset_on_fw_events)
4670 hl_device_reset(hdev, (HL_DRV_RESET_HARD |
4671 HL_DRV_RESET_FW_FATAL_ERR));
4674 case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET:
4675 goya_print_irq_info(hdev, event_type, false);
4676 if (hdev->hard_reset_on_fw_events)
4677 hl_device_reset(hdev, HL_DRV_RESET_HARD);
4680 case GOYA_ASYNC_EVENT_ID_PCIE_DEC:
4681 case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
4682 case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
4683 case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
4684 case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
4685 case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
4686 case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
4687 case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
4688 case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
4689 case GOYA_ASYNC_EVENT_ID_MME_WACS:
4690 case GOYA_ASYNC_EVENT_ID_MME_WACSD:
4691 case GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER:
4692 case GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC:
4693 case GOYA_ASYNC_EVENT_ID_PSOC:
4694 case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
4695 case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
4696 case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
4697 case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
4698 case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
4699 case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
4700 case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
4701 case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
4702 case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
4703 case GOYA_ASYNC_EVENT_ID_MME_QM:
4704 case GOYA_ASYNC_EVENT_ID_MME_CMDQ:
4705 case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
4706 case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
4707 goya_print_irq_info(hdev, event_type, true);
4708 goya_unmask_irq(hdev, event_type);
4711 case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT:
4712 case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU:
4713 case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU:
4714 case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU:
4715 case GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU:
4716 case GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU:
4717 case GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU:
4718 case GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU:
4719 case GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU:
4720 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0 ... GOYA_ASYNC_EVENT_ID_DMA_BM_CH4:
4721 goya_print_irq_info(hdev, event_type, false);
4722 goya_unmask_irq(hdev, event_type);
4725 case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S:
4726 case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E:
4727 case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S:
4728 case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E:
4729 goya_print_clk_change_info(hdev, event_type);
4730 goya_unmask_irq(hdev, event_type);
4733 case GOYA_ASYNC_EVENT_PKT_QUEUE_OUT_SYNC:
4734 goya_print_irq_info(hdev, event_type, false);
4735 goya_print_out_of_sync_info(hdev, &eq_entry->pkt_sync_err);
4736 if (hdev->hard_reset_on_fw_events)
4737 hl_device_reset(hdev, HL_DRV_RESET_HARD);
4739 hl_fw_unmask_irq(hdev, event_type);
4743 dev_err(hdev->dev, "Received invalid H/W interrupt %d\n",
4749 void *goya_get_events_stat(struct hl_device *hdev, bool aggregate, u32 *size)
4751 struct goya_device *goya = hdev->asic_specific;
4754 *size = (u32) sizeof(goya->events_stat_aggregate);
4755 return goya->events_stat_aggregate;
4758 *size = (u32) sizeof(goya->events_stat);
4759 return goya->events_stat;
4762 static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size,
4763 u64 val, bool is_dram)
4765 struct packet_lin_dma *lin_dma_pkt;
4766 struct hl_cs_job *job;
4769 int rc, lin_dma_pkts_cnt;
4771 lin_dma_pkts_cnt = DIV_ROUND_UP_ULL(size, SZ_2G);
4772 cb_size = lin_dma_pkts_cnt * sizeof(struct packet_lin_dma) +
4773 sizeof(struct packet_msg_prot);
4774 cb = hl_cb_kernel_create(hdev, cb_size, false);
4778 lin_dma_pkt = cb->kernel_address;
4781 memset(lin_dma_pkt, 0, sizeof(*lin_dma_pkt));
4783 ctl = ((PACKET_LIN_DMA << GOYA_PKT_CTL_OPCODE_SHIFT) |
4784 (1 << GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT) |
4785 (1 << GOYA_PKT_LIN_DMA_CTL_WO_SHIFT) |
4786 (1 << GOYA_PKT_CTL_RB_SHIFT) |
4787 (1 << GOYA_PKT_CTL_MB_SHIFT));
4788 ctl |= (is_dram ? HL_DMA_HOST_TO_DRAM : HL_DMA_HOST_TO_SRAM) <<
4789 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
4790 lin_dma_pkt->ctl = cpu_to_le32(ctl);
4792 lin_dma_pkt->src_addr = cpu_to_le64(val);
4793 lin_dma_pkt->dst_addr = cpu_to_le64(addr);
4794 if (lin_dma_pkts_cnt > 1)
4795 lin_dma_pkt->tsize = cpu_to_le32(SZ_2G);
4797 lin_dma_pkt->tsize = cpu_to_le32(size);
4802 } while (--lin_dma_pkts_cnt);
4804 job = hl_cs_allocate_job(hdev, QUEUE_TYPE_EXT, true);
4806 dev_err(hdev->dev, "Failed to allocate a new job\n");
4813 atomic_inc(&job->user_cb->cs_cnt);
4814 job->user_cb_size = cb_size;
4815 job->hw_queue_id = GOYA_QUEUE_ID_DMA_0;
4816 job->patched_cb = job->user_cb;
4817 job->job_cb_size = job->user_cb_size;
4819 hl_debugfs_add_job(hdev, job);
4821 rc = goya_send_job_on_qman0(hdev, job);
4823 hl_debugfs_remove_job(hdev, job);
4825 atomic_dec(&cb->cs_cnt);
4829 hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle);
4834 int goya_context_switch(struct hl_device *hdev, u32 asid)
4836 struct asic_fixed_properties *prop = &hdev->asic_prop;
4837 u64 addr = prop->sram_base_address, sob_addr;
4838 u32 size = hdev->pldm ? 0x10000 : prop->sram_size;
4839 u64 val = 0x7777777777777777ull;
4841 u32 channel_off = mmDMA_CH_1_WR_COMP_ADDR_LO -
4842 mmDMA_CH_0_WR_COMP_ADDR_LO;
4844 rc = goya_memset_device_memory(hdev, addr, size, val, false);
4846 dev_err(hdev->dev, "Failed to clear SRAM in context switch\n");
4850 /* we need to reset registers that the user is allowed to change */
4851 sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1007;
4852 WREG32(mmDMA_CH_0_WR_COMP_ADDR_LO, lower_32_bits(sob_addr));
4854 for (dma_id = 1 ; dma_id < NUMBER_OF_EXT_HW_QUEUES ; dma_id++) {
4855 sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1000 +
4857 WREG32(mmDMA_CH_0_WR_COMP_ADDR_LO + channel_off * dma_id,
4858 lower_32_bits(sob_addr));
4861 WREG32(mmTPC_PLL_CLK_RLX_0, 0x200020);
4863 goya_clear_sm_regs(hdev);
4868 static int goya_mmu_clear_pgt_range(struct hl_device *hdev)
4870 struct asic_fixed_properties *prop = &hdev->asic_prop;
4871 struct goya_device *goya = hdev->asic_specific;
4872 u64 addr = prop->mmu_pgt_addr;
4873 u32 size = prop->mmu_pgt_size + MMU_DRAM_DEFAULT_PAGE_SIZE +
4876 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4879 return goya_memset_device_memory(hdev, addr, size, 0, true);
4882 static int goya_mmu_set_dram_default_page(struct hl_device *hdev)
4884 struct goya_device *goya = hdev->asic_specific;
4885 u64 addr = hdev->asic_prop.mmu_dram_default_page_addr;
4886 u32 size = MMU_DRAM_DEFAULT_PAGE_SIZE;
4887 u64 val = 0x9999999999999999ull;
4889 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4892 return goya_memset_device_memory(hdev, addr, size, val, true);
4895 static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev)
4897 struct asic_fixed_properties *prop = &hdev->asic_prop;
4898 struct goya_device *goya = hdev->asic_specific;
4902 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4905 for (off = 0 ; off < CPU_FW_IMAGE_SIZE ; off += PAGE_SIZE_2MB) {
4906 rc = hl_mmu_map_page(hdev->kernel_ctx,
4907 prop->dram_base_address + off,
4908 prop->dram_base_address + off, PAGE_SIZE_2MB,
4909 (off + PAGE_SIZE_2MB) == CPU_FW_IMAGE_SIZE);
4911 dev_err(hdev->dev, "Map failed for address 0x%llx\n",
4912 prop->dram_base_address + off);
4917 if (!(hdev->cpu_accessible_dma_address & (PAGE_SIZE_2MB - 1))) {
4918 rc = hl_mmu_map_page(hdev->kernel_ctx,
4919 VA_CPU_ACCESSIBLE_MEM_ADDR,
4920 hdev->cpu_accessible_dma_address,
4921 PAGE_SIZE_2MB, true);
4925 "Map failed for CPU accessible memory\n");
4926 off -= PAGE_SIZE_2MB;
4930 for (cpu_off = 0 ; cpu_off < SZ_2M ; cpu_off += PAGE_SIZE_4KB) {
4931 rc = hl_mmu_map_page(hdev->kernel_ctx,
4932 VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off,
4933 hdev->cpu_accessible_dma_address + cpu_off,
4934 PAGE_SIZE_4KB, true);
4937 "Map failed for CPU accessible memory\n");
4938 cpu_off -= PAGE_SIZE_4KB;
4944 goya_mmu_prepare_reg(hdev, mmCPU_IF_ARUSER_OVR, HL_KERNEL_ASID_ID);
4945 goya_mmu_prepare_reg(hdev, mmCPU_IF_AWUSER_OVR, HL_KERNEL_ASID_ID);
4946 WREG32(mmCPU_IF_ARUSER_OVR_EN, 0x7FF);
4947 WREG32(mmCPU_IF_AWUSER_OVR_EN, 0x7FF);
4949 /* Make sure configuration is flushed to device */
4950 RREG32(mmCPU_IF_AWUSER_OVR_EN);
4952 goya->device_cpu_mmu_mappings_done = true;
4957 for (; cpu_off >= 0 ; cpu_off -= PAGE_SIZE_4KB)
4958 if (hl_mmu_unmap_page(hdev->kernel_ctx,
4959 VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off,
4960 PAGE_SIZE_4KB, true))
4961 dev_warn_ratelimited(hdev->dev,
4962 "failed to unmap address 0x%llx\n",
4963 VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off);
4965 for (; off >= 0 ; off -= PAGE_SIZE_2MB)
4966 if (hl_mmu_unmap_page(hdev->kernel_ctx,
4967 prop->dram_base_address + off, PAGE_SIZE_2MB,
4969 dev_warn_ratelimited(hdev->dev,
4970 "failed to unmap address 0x%llx\n",
4971 prop->dram_base_address + off);
4976 void goya_mmu_remove_device_cpu_mappings(struct hl_device *hdev)
4978 struct asic_fixed_properties *prop = &hdev->asic_prop;
4979 struct goya_device *goya = hdev->asic_specific;
4982 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4985 if (!goya->device_cpu_mmu_mappings_done)
4988 WREG32(mmCPU_IF_ARUSER_OVR_EN, 0);
4989 WREG32(mmCPU_IF_AWUSER_OVR_EN, 0);
4991 if (!(hdev->cpu_accessible_dma_address & (PAGE_SIZE_2MB - 1))) {
4992 if (hl_mmu_unmap_page(hdev->kernel_ctx,
4993 VA_CPU_ACCESSIBLE_MEM_ADDR,
4994 PAGE_SIZE_2MB, true))
4996 "Failed to unmap CPU accessible memory\n");
4998 for (cpu_off = 0 ; cpu_off < SZ_2M ; cpu_off += PAGE_SIZE_4KB)
4999 if (hl_mmu_unmap_page(hdev->kernel_ctx,
5000 VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off,
5002 (cpu_off + PAGE_SIZE_4KB) >= SZ_2M))
5003 dev_warn_ratelimited(hdev->dev,
5004 "failed to unmap address 0x%llx\n",
5005 VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off);
5008 for (off = 0 ; off < CPU_FW_IMAGE_SIZE ; off += PAGE_SIZE_2MB)
5009 if (hl_mmu_unmap_page(hdev->kernel_ctx,
5010 prop->dram_base_address + off, PAGE_SIZE_2MB,
5011 (off + PAGE_SIZE_2MB) >= CPU_FW_IMAGE_SIZE))
5012 dev_warn_ratelimited(hdev->dev,
5013 "Failed to unmap address 0x%llx\n",
5014 prop->dram_base_address + off);
5016 goya->device_cpu_mmu_mappings_done = false;
5019 static void goya_mmu_prepare(struct hl_device *hdev, u32 asid)
5021 struct goya_device *goya = hdev->asic_specific;
5024 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
5027 if (asid & ~MME_QM_GLBL_SECURE_PROPS_ASID_MASK) {
5028 dev_crit(hdev->dev, "asid %u is too big\n", asid);
5032 /* zero the MMBP and ASID bits and then set the ASID */
5033 for (i = 0 ; i < GOYA_MMU_REGS_NUM ; i++)
5034 goya_mmu_prepare_reg(hdev, goya_mmu_regs[i], asid);
5037 static int goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
5040 struct goya_device *goya = hdev->asic_specific;
5041 u32 status, timeout_usec;
5044 if (!(goya->hw_cap_initialized & HW_CAP_MMU) ||
5045 hdev->reset_info.hard_reset_pending)
5048 /* no need in L1 only invalidation in Goya */
5053 timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
5055 timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
5057 /* L0 & L1 invalidation */
5058 WREG32(mmSTLB_INV_ALL_START, 1);
5060 rc = hl_poll_timeout(
5062 mmSTLB_INV_ALL_START,
5071 static int goya_mmu_invalidate_cache_range(struct hl_device *hdev,
5072 bool is_hard, u32 flags,
5073 u32 asid, u64 va, u64 size)
5075 /* Treat as invalidate all because there is no range invalidation
5078 return hl_mmu_invalidate_cache(hdev, is_hard, flags);
5081 int goya_send_heartbeat(struct hl_device *hdev)
5083 struct goya_device *goya = hdev->asic_specific;
5085 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
5088 return hl_fw_send_heartbeat(hdev);
5091 int goya_cpucp_info_get(struct hl_device *hdev)
5093 struct goya_device *goya = hdev->asic_specific;
5094 struct asic_fixed_properties *prop = &hdev->asic_prop;
5098 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
5101 rc = hl_fw_cpucp_handshake(hdev, mmCPU_BOOT_DEV_STS0,
5102 mmCPU_BOOT_DEV_STS1, mmCPU_BOOT_ERR0,
5107 dram_size = le64_to_cpu(prop->cpucp_info.dram_size);
5109 if ((!is_power_of_2(dram_size)) ||
5110 (dram_size < DRAM_PHYS_DEFAULT_SIZE)) {
5112 "F/W reported invalid DRAM size %llu. Trying to use default size\n",
5114 dram_size = DRAM_PHYS_DEFAULT_SIZE;
5117 prop->dram_size = dram_size;
5118 prop->dram_end_address = prop->dram_base_address + dram_size;
5121 if (!strlen(prop->cpucp_info.card_name))
5122 strncpy(prop->cpucp_info.card_name, GOYA_DEFAULT_CARD_NAME,
5128 static bool goya_is_device_idle(struct hl_device *hdev, u64 *mask_arr, u8 mask_len,
5129 struct engines_data *e)
5131 const char *fmt = "%-5d%-9s%#-14x%#-16x%#x\n";
5132 const char *dma_fmt = "%-5d%-9s%#-14x%#x\n";
5133 unsigned long *mask = (unsigned long *)mask_arr;
5134 u32 qm_glbl_sts0, cmdq_glbl_sts0, dma_core_sts0, tpc_cfg_sts,
5136 bool is_idle = true, is_eng_idle;
5141 hl_engine_data_sprintf(e, "\nDMA is_idle QM_GLBL_STS0 DMA_CORE_STS0\n"
5142 "--- ------- ------------ -------------\n");
5144 offset = mmDMA_QM_1_GLBL_STS0 - mmDMA_QM_0_GLBL_STS0;
5146 for (i = 0 ; i < DMA_MAX_NUM ; i++) {
5147 qm_glbl_sts0 = RREG32(mmDMA_QM_0_GLBL_STS0 + i * offset);
5148 dma_core_sts0 = RREG32(mmDMA_CH_0_STS0 + i * offset);
5149 is_eng_idle = IS_DMA_QM_IDLE(qm_glbl_sts0) &&
5150 IS_DMA_IDLE(dma_core_sts0);
5151 is_idle &= is_eng_idle;
5153 if (mask && !is_eng_idle)
5154 set_bit(GOYA_ENGINE_ID_DMA_0 + i, mask);
5156 hl_engine_data_sprintf(e, dma_fmt, i, is_eng_idle ? "Y" : "N",
5157 qm_glbl_sts0, dma_core_sts0);
5161 hl_engine_data_sprintf(e,
5162 "\nTPC is_idle QM_GLBL_STS0 CMDQ_GLBL_STS0 CFG_STATUS\n"
5163 "--- ------- ------------ -------------- ----------\n");
5165 offset = mmTPC1_QM_GLBL_STS0 - mmTPC0_QM_GLBL_STS0;
5167 for (i = 0 ; i < TPC_MAX_NUM ; i++) {
5168 qm_glbl_sts0 = RREG32(mmTPC0_QM_GLBL_STS0 + i * offset);
5169 cmdq_glbl_sts0 = RREG32(mmTPC0_CMDQ_GLBL_STS0 + i * offset);
5170 tpc_cfg_sts = RREG32(mmTPC0_CFG_STATUS + i * offset);
5171 is_eng_idle = IS_TPC_QM_IDLE(qm_glbl_sts0) &&
5172 IS_TPC_CMDQ_IDLE(cmdq_glbl_sts0) &&
5173 IS_TPC_IDLE(tpc_cfg_sts);
5174 is_idle &= is_eng_idle;
5176 if (mask && !is_eng_idle)
5177 set_bit(GOYA_ENGINE_ID_TPC_0 + i, mask);
5179 hl_engine_data_sprintf(e, fmt, i, is_eng_idle ? "Y" : "N",
5180 qm_glbl_sts0, cmdq_glbl_sts0, tpc_cfg_sts);
5184 hl_engine_data_sprintf(e,
5185 "\nMME is_idle QM_GLBL_STS0 CMDQ_GLBL_STS0 ARCH_STATUS\n"
5186 "--- ------- ------------ -------------- -----------\n");
5188 qm_glbl_sts0 = RREG32(mmMME_QM_GLBL_STS0);
5189 cmdq_glbl_sts0 = RREG32(mmMME_CMDQ_GLBL_STS0);
5190 mme_arch_sts = RREG32(mmMME_ARCH_STATUS);
5191 is_eng_idle = IS_MME_QM_IDLE(qm_glbl_sts0) &&
5192 IS_MME_CMDQ_IDLE(cmdq_glbl_sts0) &&
5193 IS_MME_IDLE(mme_arch_sts);
5194 is_idle &= is_eng_idle;
5196 if (mask && !is_eng_idle)
5197 set_bit(GOYA_ENGINE_ID_MME_0, mask);
5199 hl_engine_data_sprintf(e, fmt, 0, is_eng_idle ? "Y" : "N", qm_glbl_sts0,
5200 cmdq_glbl_sts0, mme_arch_sts);
5201 hl_engine_data_sprintf(e, "\n");
5207 static void goya_hw_queues_lock(struct hl_device *hdev)
5208 __acquires(&goya->hw_queues_lock)
5210 struct goya_device *goya = hdev->asic_specific;
5212 spin_lock(&goya->hw_queues_lock);
5215 static void goya_hw_queues_unlock(struct hl_device *hdev)
5216 __releases(&goya->hw_queues_lock)
5218 struct goya_device *goya = hdev->asic_specific;
5220 spin_unlock(&goya->hw_queues_lock);
5223 static u32 goya_get_pci_id(struct hl_device *hdev)
5225 return hdev->pdev->device;
5228 static int goya_get_eeprom_data(struct hl_device *hdev, void *data,
5231 struct goya_device *goya = hdev->asic_specific;
5233 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
5236 return hl_fw_get_eeprom_data(hdev, data, max_size);
5239 static void goya_cpu_init_scrambler_dram(struct hl_device *hdev)
5244 static int goya_ctx_init(struct hl_ctx *ctx)
5246 if (ctx->asid != HL_KERNEL_ASID_ID)
5247 goya_mmu_prepare(ctx->hdev, ctx->asid);
5252 static int goya_pre_schedule_cs(struct hl_cs *cs)
5257 u32 goya_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx)
5262 static u32 goya_get_signal_cb_size(struct hl_device *hdev)
5267 static u32 goya_get_wait_cb_size(struct hl_device *hdev)
5272 static u32 goya_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
5278 static u32 goya_gen_wait_cb(struct hl_device *hdev,
5279 struct hl_gen_wait_properties *prop)
5284 static void goya_reset_sob(struct hl_device *hdev, void *data)
5289 static void goya_reset_sob_group(struct hl_device *hdev, u16 sob_group)
5294 u64 goya_get_device_time(struct hl_device *hdev)
5296 u64 device_time = ((u64) RREG32(mmPSOC_TIMESTAMP_CNTCVU)) << 32;
5298 return device_time | RREG32(mmPSOC_TIMESTAMP_CNTCVL);
5301 static int goya_collective_wait_init_cs(struct hl_cs *cs)
5306 static int goya_collective_wait_create_jobs(struct hl_device *hdev,
5307 struct hl_ctx *ctx, struct hl_cs *cs, u32 wait_queue_id,
5308 u32 collective_engine_id, u32 encaps_signal_offset)
5313 static void goya_ctx_fini(struct hl_ctx *ctx)
5318 static int goya_get_hw_block_id(struct hl_device *hdev, u64 block_addr,
5319 u32 *block_size, u32 *block_id)
5324 static int goya_block_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
5325 u32 block_id, u32 block_size)
5330 static void goya_enable_events_from_fw(struct hl_device *hdev)
5332 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
5333 GOYA_ASYNC_EVENT_ID_INTS_REGISTER);
5336 static int goya_ack_mmu_page_fault_or_access_error(struct hl_device *hdev, u64 mmu_cap_mask)
5341 static int goya_map_pll_idx_to_fw_idx(u32 pll_idx)
5344 case HL_GOYA_CPU_PLL: return CPU_PLL;
5345 case HL_GOYA_PCI_PLL: return PCI_PLL;
5346 case HL_GOYA_MME_PLL: return MME_PLL;
5347 case HL_GOYA_TPC_PLL: return TPC_PLL;
5348 case HL_GOYA_IC_PLL: return IC_PLL;
5349 case HL_GOYA_MC_PLL: return MC_PLL;
5350 case HL_GOYA_EMMC_PLL: return EMMC_PLL;
5351 default: return -EINVAL;
5355 static int goya_gen_sync_to_engine_map(struct hl_device *hdev,
5356 struct hl_sync_to_engine_map *map)
5358 /* Not implemented */
5362 static int goya_monitor_valid(struct hl_mon_state_dump *mon)
5364 /* Not implemented */
5368 static int goya_print_single_monitor(char **buf, size_t *size, size_t *offset,
5369 struct hl_device *hdev,
5370 struct hl_mon_state_dump *mon)
5372 /* Not implemented */
5377 static int goya_print_fences_single_engine(
5378 struct hl_device *hdev, u64 base_offset, u64 status_base_offset,
5379 enum hl_sync_engine_type engine_type, u32 engine_id, char **buf,
5380 size_t *size, size_t *offset)
5382 /* Not implemented */
5387 static struct hl_state_dump_specs_funcs goya_state_dump_funcs = {
5388 .monitor_valid = goya_monitor_valid,
5389 .print_single_monitor = goya_print_single_monitor,
5390 .gen_sync_to_engine_map = goya_gen_sync_to_engine_map,
5391 .print_fences_single_engine = goya_print_fences_single_engine,
5394 static void goya_state_dump_init(struct hl_device *hdev)
5396 /* Not implemented */
5397 hdev->state_dump_specs.props = goya_state_dump_specs_props;
5398 hdev->state_dump_specs.funcs = goya_state_dump_funcs;
5401 static u32 goya_get_sob_addr(struct hl_device *hdev, u32 sob_id)
5406 static u32 *goya_get_stream_master_qid_arr(void)
5411 static int goya_get_monitor_dump(struct hl_device *hdev, void *data)
5416 static void goya_check_if_razwi_happened(struct hl_device *hdev)
5420 static int goya_scrub_device_dram(struct hl_device *hdev, u64 val)
5425 static int goya_set_dram_properties(struct hl_device *hdev)
5430 static int goya_set_binning_masks(struct hl_device *hdev)
5435 static int goya_send_device_activity(struct hl_device *hdev, bool open)
5440 static const struct hl_asic_funcs goya_funcs = {
5441 .early_init = goya_early_init,
5442 .early_fini = goya_early_fini,
5443 .late_init = goya_late_init,
5444 .late_fini = goya_late_fini,
5445 .sw_init = goya_sw_init,
5446 .sw_fini = goya_sw_fini,
5447 .hw_init = goya_hw_init,
5448 .hw_fini = goya_hw_fini,
5449 .halt_engines = goya_halt_engines,
5450 .suspend = goya_suspend,
5451 .resume = goya_resume,
5453 .ring_doorbell = goya_ring_doorbell,
5454 .pqe_write = goya_pqe_write,
5455 .asic_dma_alloc_coherent = goya_dma_alloc_coherent,
5456 .asic_dma_free_coherent = goya_dma_free_coherent,
5457 .scrub_device_mem = goya_scrub_device_mem,
5458 .scrub_device_dram = goya_scrub_device_dram,
5459 .get_int_queue_base = goya_get_int_queue_base,
5460 .test_queues = goya_test_queues,
5461 .asic_dma_pool_zalloc = goya_dma_pool_zalloc,
5462 .asic_dma_pool_free = goya_dma_pool_free,
5463 .cpu_accessible_dma_pool_alloc = goya_cpu_accessible_dma_pool_alloc,
5464 .cpu_accessible_dma_pool_free = goya_cpu_accessible_dma_pool_free,
5465 .hl_dma_unmap_sgtable = hl_dma_unmap_sgtable,
5466 .cs_parser = goya_cs_parser,
5467 .asic_dma_map_sgtable = hl_dma_map_sgtable,
5468 .add_end_of_cb_packets = goya_add_end_of_cb_packets,
5469 .update_eq_ci = goya_update_eq_ci,
5470 .context_switch = goya_context_switch,
5471 .restore_phase_topology = goya_restore_phase_topology,
5472 .debugfs_read_dma = goya_debugfs_read_dma,
5473 .add_device_attr = goya_add_device_attr,
5474 .handle_eqe = goya_handle_eqe,
5475 .get_events_stat = goya_get_events_stat,
5476 .read_pte = goya_read_pte,
5477 .write_pte = goya_write_pte,
5478 .mmu_invalidate_cache = goya_mmu_invalidate_cache,
5479 .mmu_invalidate_cache_range = goya_mmu_invalidate_cache_range,
5480 .mmu_prefetch_cache_range = NULL,
5481 .send_heartbeat = goya_send_heartbeat,
5482 .debug_coresight = goya_debug_coresight,
5483 .is_device_idle = goya_is_device_idle,
5484 .compute_reset_late_init = goya_compute_reset_late_init,
5485 .hw_queues_lock = goya_hw_queues_lock,
5486 .hw_queues_unlock = goya_hw_queues_unlock,
5487 .get_pci_id = goya_get_pci_id,
5488 .get_eeprom_data = goya_get_eeprom_data,
5489 .get_monitor_dump = goya_get_monitor_dump,
5490 .send_cpu_message = goya_send_cpu_message,
5491 .pci_bars_map = goya_pci_bars_map,
5492 .init_iatu = goya_init_iatu,
5495 .halt_coresight = goya_halt_coresight,
5496 .ctx_init = goya_ctx_init,
5497 .ctx_fini = goya_ctx_fini,
5498 .pre_schedule_cs = goya_pre_schedule_cs,
5499 .get_queue_id_for_cq = goya_get_queue_id_for_cq,
5500 .load_firmware_to_device = goya_load_firmware_to_device,
5501 .load_boot_fit_to_device = goya_load_boot_fit_to_device,
5502 .get_signal_cb_size = goya_get_signal_cb_size,
5503 .get_wait_cb_size = goya_get_wait_cb_size,
5504 .gen_signal_cb = goya_gen_signal_cb,
5505 .gen_wait_cb = goya_gen_wait_cb,
5506 .reset_sob = goya_reset_sob,
5507 .reset_sob_group = goya_reset_sob_group,
5508 .get_device_time = goya_get_device_time,
5509 .pb_print_security_errors = NULL,
5510 .collective_wait_init_cs = goya_collective_wait_init_cs,
5511 .collective_wait_create_jobs = goya_collective_wait_create_jobs,
5512 .get_dec_base_addr = NULL,
5513 .scramble_addr = hl_mmu_scramble_addr,
5514 .descramble_addr = hl_mmu_descramble_addr,
5515 .ack_protection_bits_errors = goya_ack_protection_bits_errors,
5516 .get_hw_block_id = goya_get_hw_block_id,
5517 .hw_block_mmap = goya_block_mmap,
5518 .enable_events_from_fw = goya_enable_events_from_fw,
5519 .ack_mmu_errors = goya_ack_mmu_page_fault_or_access_error,
5520 .map_pll_idx_to_fw_idx = goya_map_pll_idx_to_fw_idx,
5521 .init_firmware_preload_params = goya_init_firmware_preload_params,
5522 .init_firmware_loader = goya_init_firmware_loader,
5523 .init_cpu_scrambler_dram = goya_cpu_init_scrambler_dram,
5524 .state_dump_init = goya_state_dump_init,
5525 .get_sob_addr = &goya_get_sob_addr,
5526 .set_pci_memory_regions = goya_set_pci_memory_regions,
5527 .get_stream_master_qid_arr = goya_get_stream_master_qid_arr,
5528 .check_if_razwi_happened = goya_check_if_razwi_happened,
5529 .mmu_get_real_page_size = hl_mmu_get_real_page_size,
5530 .access_dev_mem = hl_access_dev_mem,
5531 .set_dram_bar_base = goya_set_ddr_bar_base,
5532 .send_device_activity = goya_send_device_activity,
5533 .set_dram_properties = goya_set_dram_properties,
5534 .set_binning_masks = goya_set_binning_masks,
5538 * goya_set_asic_funcs - set Goya function pointers
5540 * @*hdev: pointer to hl_device structure
5543 void goya_set_asic_funcs(struct hl_device *hdev)
5545 hdev->asic_funcs = &goya_funcs;