1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2019 HabanaLabs, Ltd.
9 #include "../include/hw_ip/mmu/mmu_general.h"
10 #include "../include/hw_ip/mmu/mmu_v1_0.h"
11 #include "../include/goya/asic_reg/goya_masks.h"
12 #include "../include/goya/goya_reg_map.h"
14 #include <linux/pci.h>
15 #include <linux/hwmon.h>
16 #include <linux/iommu.h>
17 #include <linux/seq_file.h>
20 * GOYA security scheme:
22 * 1. Host is protected by:
23 * - Range registers (When MMU is enabled, DMA RR does NOT protect host)
26 * 2. DRAM is protected by:
27 * - Range registers (protect the first 512MB)
28 * - MMU (isolation between users)
30 * 3. Configuration is protected by:
34 * When MMU is disabled:
36 * QMAN DMA: PQ, CQ, CP, DMA are secured.
37 * PQ, CB and the data are on the host.
40 * PQ, CQ and CP are not secured.
41 * PQ, CB and the data are on the SRAM/DRAM.
43 * Since QMAN DMA is secured, the driver is parsing the DMA CB:
44 * - checks DMA pointer
45 * - WREG, MSG_PROT are not allowed.
46 * - MSG_LONG/SHORT are allowed.
48 * A read/write transaction by the QMAN to a protected area will succeed if
49 * and only if the QMAN's CP is secured and MSG_PROT is used
52 * When MMU is enabled:
54 * QMAN DMA: PQ, CQ and CP are secured.
55 * MMU is set to bypass on the Secure props register of the QMAN.
56 * The reasons we don't enable MMU for PQ, CQ and CP are:
57 * - PQ entry is in kernel address space and the driver doesn't map it.
58 * - CP writes to MSIX register and to kernel address space (completion
61 * DMA is not secured but because CP is secured, the driver still needs to parse
62 * the CB, but doesn't need to check the DMA addresses.
64 * For QMAN DMA 0, DMA is also secured because only the driver uses this DMA and
65 * the driver doesn't map memory in MMU.
67 * QMAN TPC/MME: PQ, CQ and CP aren't secured (no change from MMU disabled mode)
69 * DMA RR does NOT protect host because DMA is not secured
73 #define GOYA_BOOT_FIT_FILE "habanalabs/goya/goya-boot-fit.itb"
74 #define GOYA_LINUX_FW_FILE "habanalabs/goya/goya-fit.itb"
76 #define GOYA_MMU_REGS_NUM 63
78 #define GOYA_DMA_POOL_BLK_SIZE 0x100 /* 256 bytes */
80 #define GOYA_RESET_TIMEOUT_MSEC 500 /* 500ms */
81 #define GOYA_PLDM_RESET_TIMEOUT_MSEC 20000 /* 20s */
82 #define GOYA_RESET_WAIT_MSEC 1 /* 1ms */
83 #define GOYA_CPU_RESET_WAIT_MSEC 100 /* 100ms */
84 #define GOYA_PLDM_RESET_WAIT_MSEC 1000 /* 1s */
85 #define GOYA_TEST_QUEUE_WAIT_USEC 100000 /* 100ms */
86 #define GOYA_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100)
87 #define GOYA_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
88 #define GOYA_BOOT_FIT_REQ_TIMEOUT_USEC 1000000 /* 1s */
89 #define GOYA_MSG_TO_CPU_TIMEOUT_USEC 4000000 /* 4s */
91 #define GOYA_QMAN0_FENCE_VAL 0xD169B243
93 #define GOYA_MAX_STRING_LEN 20
95 #define GOYA_CB_POOL_CB_CNT 512
96 #define GOYA_CB_POOL_CB_SIZE 0x20000 /* 128KB */
98 #define IS_QM_IDLE(engine, qm_glbl_sts0) \
99 (((qm_glbl_sts0) & engine##_QM_IDLE_MASK) == engine##_QM_IDLE_MASK)
100 #define IS_DMA_QM_IDLE(qm_glbl_sts0) IS_QM_IDLE(DMA, qm_glbl_sts0)
101 #define IS_TPC_QM_IDLE(qm_glbl_sts0) IS_QM_IDLE(TPC, qm_glbl_sts0)
102 #define IS_MME_QM_IDLE(qm_glbl_sts0) IS_QM_IDLE(MME, qm_glbl_sts0)
104 #define IS_CMDQ_IDLE(engine, cmdq_glbl_sts0) \
105 (((cmdq_glbl_sts0) & engine##_CMDQ_IDLE_MASK) == \
106 engine##_CMDQ_IDLE_MASK)
107 #define IS_TPC_CMDQ_IDLE(cmdq_glbl_sts0) \
108 IS_CMDQ_IDLE(TPC, cmdq_glbl_sts0)
109 #define IS_MME_CMDQ_IDLE(cmdq_glbl_sts0) \
110 IS_CMDQ_IDLE(MME, cmdq_glbl_sts0)
112 #define IS_DMA_IDLE(dma_core_sts0) \
113 !((dma_core_sts0) & DMA_CH_0_STS0_DMA_BUSY_MASK)
115 #define IS_TPC_IDLE(tpc_cfg_sts) \
116 (((tpc_cfg_sts) & TPC_CFG_IDLE_MASK) == TPC_CFG_IDLE_MASK)
118 #define IS_MME_IDLE(mme_arch_sts) \
119 (((mme_arch_sts) & MME_ARCH_IDLE_MASK) == MME_ARCH_IDLE_MASK)
122 * this enum kept here for compatibility with old FW (in which each asic has
123 * unique PLL numbering
125 enum goya_pll_index {
135 static enum pll_index goya_pll_map[PLL_MAX] = {
136 [CPU_PLL] = GOYA_CPU_PLL,
137 [IC_PLL] = GOYA_IC_PLL,
138 [MC_PLL] = GOYA_MC_PLL,
139 [MME_PLL] = GOYA_MME_PLL,
140 [PCI_PLL] = GOYA_PCI_PLL,
141 [EMMC_PLL] = GOYA_EMMC_PLL,
142 [TPC_PLL] = GOYA_TPC_PLL,
145 static const char goya_irq_name[GOYA_MSIX_ENTRIES][GOYA_MAX_STRING_LEN] = {
146 "goya cq 0", "goya cq 1", "goya cq 2", "goya cq 3",
147 "goya cq 4", "goya cpu eq"
150 static u16 goya_packet_sizes[MAX_PACKET_ID] = {
151 [PACKET_WREG_32] = sizeof(struct packet_wreg32),
152 [PACKET_WREG_BULK] = sizeof(struct packet_wreg_bulk),
153 [PACKET_MSG_LONG] = sizeof(struct packet_msg_long),
154 [PACKET_MSG_SHORT] = sizeof(struct packet_msg_short),
155 [PACKET_CP_DMA] = sizeof(struct packet_cp_dma),
156 [PACKET_MSG_PROT] = sizeof(struct packet_msg_prot),
157 [PACKET_FENCE] = sizeof(struct packet_fence),
158 [PACKET_LIN_DMA] = sizeof(struct packet_lin_dma),
159 [PACKET_NOP] = sizeof(struct packet_nop),
160 [PACKET_STOP] = sizeof(struct packet_stop)
163 static inline bool validate_packet_id(enum packet_id id)
167 case PACKET_WREG_BULK:
168 case PACKET_MSG_LONG:
169 case PACKET_MSG_SHORT:
171 case PACKET_MSG_PROT:
182 static u64 goya_mmu_regs[GOYA_MMU_REGS_NUM] = {
183 mmDMA_QM_0_GLBL_NON_SECURE_PROPS,
184 mmDMA_QM_1_GLBL_NON_SECURE_PROPS,
185 mmDMA_QM_2_GLBL_NON_SECURE_PROPS,
186 mmDMA_QM_3_GLBL_NON_SECURE_PROPS,
187 mmDMA_QM_4_GLBL_NON_SECURE_PROPS,
188 mmTPC0_QM_GLBL_SECURE_PROPS,
189 mmTPC0_QM_GLBL_NON_SECURE_PROPS,
190 mmTPC0_CMDQ_GLBL_SECURE_PROPS,
191 mmTPC0_CMDQ_GLBL_NON_SECURE_PROPS,
194 mmTPC1_QM_GLBL_SECURE_PROPS,
195 mmTPC1_QM_GLBL_NON_SECURE_PROPS,
196 mmTPC1_CMDQ_GLBL_SECURE_PROPS,
197 mmTPC1_CMDQ_GLBL_NON_SECURE_PROPS,
200 mmTPC2_QM_GLBL_SECURE_PROPS,
201 mmTPC2_QM_GLBL_NON_SECURE_PROPS,
202 mmTPC2_CMDQ_GLBL_SECURE_PROPS,
203 mmTPC2_CMDQ_GLBL_NON_SECURE_PROPS,
206 mmTPC3_QM_GLBL_SECURE_PROPS,
207 mmTPC3_QM_GLBL_NON_SECURE_PROPS,
208 mmTPC3_CMDQ_GLBL_SECURE_PROPS,
209 mmTPC3_CMDQ_GLBL_NON_SECURE_PROPS,
212 mmTPC4_QM_GLBL_SECURE_PROPS,
213 mmTPC4_QM_GLBL_NON_SECURE_PROPS,
214 mmTPC4_CMDQ_GLBL_SECURE_PROPS,
215 mmTPC4_CMDQ_GLBL_NON_SECURE_PROPS,
218 mmTPC5_QM_GLBL_SECURE_PROPS,
219 mmTPC5_QM_GLBL_NON_SECURE_PROPS,
220 mmTPC5_CMDQ_GLBL_SECURE_PROPS,
221 mmTPC5_CMDQ_GLBL_NON_SECURE_PROPS,
224 mmTPC6_QM_GLBL_SECURE_PROPS,
225 mmTPC6_QM_GLBL_NON_SECURE_PROPS,
226 mmTPC6_CMDQ_GLBL_SECURE_PROPS,
227 mmTPC6_CMDQ_GLBL_NON_SECURE_PROPS,
230 mmTPC7_QM_GLBL_SECURE_PROPS,
231 mmTPC7_QM_GLBL_NON_SECURE_PROPS,
232 mmTPC7_CMDQ_GLBL_SECURE_PROPS,
233 mmTPC7_CMDQ_GLBL_NON_SECURE_PROPS,
236 mmMME_QM_GLBL_SECURE_PROPS,
237 mmMME_QM_GLBL_NON_SECURE_PROPS,
238 mmMME_CMDQ_GLBL_SECURE_PROPS,
239 mmMME_CMDQ_GLBL_NON_SECURE_PROPS,
240 mmMME_SBA_CONTROL_DATA,
241 mmMME_SBB_CONTROL_DATA,
242 mmMME_SBC_CONTROL_DATA,
243 mmMME_WBC_CONTROL_DATA,
244 mmPCIE_WRAP_PSOC_ARUSER,
245 mmPCIE_WRAP_PSOC_AWUSER
248 static u32 goya_all_events[] = {
249 GOYA_ASYNC_EVENT_ID_PCIE_IF,
250 GOYA_ASYNC_EVENT_ID_TPC0_ECC,
251 GOYA_ASYNC_EVENT_ID_TPC1_ECC,
252 GOYA_ASYNC_EVENT_ID_TPC2_ECC,
253 GOYA_ASYNC_EVENT_ID_TPC3_ECC,
254 GOYA_ASYNC_EVENT_ID_TPC4_ECC,
255 GOYA_ASYNC_EVENT_ID_TPC5_ECC,
256 GOYA_ASYNC_EVENT_ID_TPC6_ECC,
257 GOYA_ASYNC_EVENT_ID_TPC7_ECC,
258 GOYA_ASYNC_EVENT_ID_MME_ECC,
259 GOYA_ASYNC_EVENT_ID_MME_ECC_EXT,
260 GOYA_ASYNC_EVENT_ID_MMU_ECC,
261 GOYA_ASYNC_EVENT_ID_DMA_MACRO,
262 GOYA_ASYNC_EVENT_ID_DMA_ECC,
263 GOYA_ASYNC_EVENT_ID_CPU_IF_ECC,
264 GOYA_ASYNC_EVENT_ID_PSOC_MEM,
265 GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT,
266 GOYA_ASYNC_EVENT_ID_SRAM0,
267 GOYA_ASYNC_EVENT_ID_SRAM1,
268 GOYA_ASYNC_EVENT_ID_SRAM2,
269 GOYA_ASYNC_EVENT_ID_SRAM3,
270 GOYA_ASYNC_EVENT_ID_SRAM4,
271 GOYA_ASYNC_EVENT_ID_SRAM5,
272 GOYA_ASYNC_EVENT_ID_SRAM6,
273 GOYA_ASYNC_EVENT_ID_SRAM7,
274 GOYA_ASYNC_EVENT_ID_SRAM8,
275 GOYA_ASYNC_EVENT_ID_SRAM9,
276 GOYA_ASYNC_EVENT_ID_SRAM10,
277 GOYA_ASYNC_EVENT_ID_SRAM11,
278 GOYA_ASYNC_EVENT_ID_SRAM12,
279 GOYA_ASYNC_EVENT_ID_SRAM13,
280 GOYA_ASYNC_EVENT_ID_SRAM14,
281 GOYA_ASYNC_EVENT_ID_SRAM15,
282 GOYA_ASYNC_EVENT_ID_SRAM16,
283 GOYA_ASYNC_EVENT_ID_SRAM17,
284 GOYA_ASYNC_EVENT_ID_SRAM18,
285 GOYA_ASYNC_EVENT_ID_SRAM19,
286 GOYA_ASYNC_EVENT_ID_SRAM20,
287 GOYA_ASYNC_EVENT_ID_SRAM21,
288 GOYA_ASYNC_EVENT_ID_SRAM22,
289 GOYA_ASYNC_EVENT_ID_SRAM23,
290 GOYA_ASYNC_EVENT_ID_SRAM24,
291 GOYA_ASYNC_EVENT_ID_SRAM25,
292 GOYA_ASYNC_EVENT_ID_SRAM26,
293 GOYA_ASYNC_EVENT_ID_SRAM27,
294 GOYA_ASYNC_EVENT_ID_SRAM28,
295 GOYA_ASYNC_EVENT_ID_SRAM29,
296 GOYA_ASYNC_EVENT_ID_GIC500,
297 GOYA_ASYNC_EVENT_ID_PLL0,
298 GOYA_ASYNC_EVENT_ID_PLL1,
299 GOYA_ASYNC_EVENT_ID_PLL3,
300 GOYA_ASYNC_EVENT_ID_PLL4,
301 GOYA_ASYNC_EVENT_ID_PLL5,
302 GOYA_ASYNC_EVENT_ID_PLL6,
303 GOYA_ASYNC_EVENT_ID_AXI_ECC,
304 GOYA_ASYNC_EVENT_ID_L2_RAM_ECC,
305 GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET,
306 GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT,
307 GOYA_ASYNC_EVENT_ID_PCIE_DEC,
308 GOYA_ASYNC_EVENT_ID_TPC0_DEC,
309 GOYA_ASYNC_EVENT_ID_TPC1_DEC,
310 GOYA_ASYNC_EVENT_ID_TPC2_DEC,
311 GOYA_ASYNC_EVENT_ID_TPC3_DEC,
312 GOYA_ASYNC_EVENT_ID_TPC4_DEC,
313 GOYA_ASYNC_EVENT_ID_TPC5_DEC,
314 GOYA_ASYNC_EVENT_ID_TPC6_DEC,
315 GOYA_ASYNC_EVENT_ID_TPC7_DEC,
316 GOYA_ASYNC_EVENT_ID_MME_WACS,
317 GOYA_ASYNC_EVENT_ID_MME_WACSD,
318 GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER,
319 GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC,
320 GOYA_ASYNC_EVENT_ID_PSOC,
321 GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR,
322 GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR,
323 GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR,
324 GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR,
325 GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR,
326 GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR,
327 GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR,
328 GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR,
329 GOYA_ASYNC_EVENT_ID_TPC0_CMDQ,
330 GOYA_ASYNC_EVENT_ID_TPC1_CMDQ,
331 GOYA_ASYNC_EVENT_ID_TPC2_CMDQ,
332 GOYA_ASYNC_EVENT_ID_TPC3_CMDQ,
333 GOYA_ASYNC_EVENT_ID_TPC4_CMDQ,
334 GOYA_ASYNC_EVENT_ID_TPC5_CMDQ,
335 GOYA_ASYNC_EVENT_ID_TPC6_CMDQ,
336 GOYA_ASYNC_EVENT_ID_TPC7_CMDQ,
337 GOYA_ASYNC_EVENT_ID_TPC0_QM,
338 GOYA_ASYNC_EVENT_ID_TPC1_QM,
339 GOYA_ASYNC_EVENT_ID_TPC2_QM,
340 GOYA_ASYNC_EVENT_ID_TPC3_QM,
341 GOYA_ASYNC_EVENT_ID_TPC4_QM,
342 GOYA_ASYNC_EVENT_ID_TPC5_QM,
343 GOYA_ASYNC_EVENT_ID_TPC6_QM,
344 GOYA_ASYNC_EVENT_ID_TPC7_QM,
345 GOYA_ASYNC_EVENT_ID_MME_QM,
346 GOYA_ASYNC_EVENT_ID_MME_CMDQ,
347 GOYA_ASYNC_EVENT_ID_DMA0_QM,
348 GOYA_ASYNC_EVENT_ID_DMA1_QM,
349 GOYA_ASYNC_EVENT_ID_DMA2_QM,
350 GOYA_ASYNC_EVENT_ID_DMA3_QM,
351 GOYA_ASYNC_EVENT_ID_DMA4_QM,
352 GOYA_ASYNC_EVENT_ID_DMA0_CH,
353 GOYA_ASYNC_EVENT_ID_DMA1_CH,
354 GOYA_ASYNC_EVENT_ID_DMA2_CH,
355 GOYA_ASYNC_EVENT_ID_DMA3_CH,
356 GOYA_ASYNC_EVENT_ID_DMA4_CH,
357 GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU,
358 GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU,
359 GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU,
360 GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU,
361 GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU,
362 GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU,
363 GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU,
364 GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU,
365 GOYA_ASYNC_EVENT_ID_DMA_BM_CH0,
366 GOYA_ASYNC_EVENT_ID_DMA_BM_CH1,
367 GOYA_ASYNC_EVENT_ID_DMA_BM_CH2,
368 GOYA_ASYNC_EVENT_ID_DMA_BM_CH3,
369 GOYA_ASYNC_EVENT_ID_DMA_BM_CH4,
370 GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S,
371 GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E,
372 GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S,
373 GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E
376 static int goya_mmu_clear_pgt_range(struct hl_device *hdev);
377 static int goya_mmu_set_dram_default_page(struct hl_device *hdev);
378 static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev);
379 static void goya_mmu_prepare(struct hl_device *hdev, u32 asid);
381 int goya_get_fixed_properties(struct hl_device *hdev)
383 struct asic_fixed_properties *prop = &hdev->asic_prop;
386 prop->max_queues = GOYA_QUEUE_ID_SIZE;
387 prop->hw_queues_props = kcalloc(prop->max_queues,
388 sizeof(struct hw_queue_properties),
391 if (!prop->hw_queues_props)
394 for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
395 prop->hw_queues_props[i].type = QUEUE_TYPE_EXT;
396 prop->hw_queues_props[i].driver_only = 0;
397 prop->hw_queues_props[i].cb_alloc_flags = CB_ALLOC_KERNEL;
400 for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES ; i++) {
401 prop->hw_queues_props[i].type = QUEUE_TYPE_CPU;
402 prop->hw_queues_props[i].driver_only = 1;
403 prop->hw_queues_props[i].cb_alloc_flags = CB_ALLOC_KERNEL;
406 for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES +
407 NUMBER_OF_INT_HW_QUEUES; i++) {
408 prop->hw_queues_props[i].type = QUEUE_TYPE_INT;
409 prop->hw_queues_props[i].driver_only = 0;
410 prop->hw_queues_props[i].cb_alloc_flags = CB_ALLOC_USER;
413 prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
415 prop->dram_base_address = DRAM_PHYS_BASE;
416 prop->dram_size = DRAM_PHYS_DEFAULT_SIZE;
417 prop->dram_end_address = prop->dram_base_address + prop->dram_size;
418 prop->dram_user_base_address = DRAM_BASE_ADDR_USER;
420 prop->sram_base_address = SRAM_BASE_ADDR;
421 prop->sram_size = SRAM_SIZE;
422 prop->sram_end_address = prop->sram_base_address + prop->sram_size;
423 prop->sram_user_base_address = prop->sram_base_address +
424 SRAM_USER_BASE_OFFSET;
426 prop->mmu_pgt_addr = MMU_PAGE_TABLES_ADDR;
427 prop->mmu_dram_default_page_addr = MMU_DRAM_DEFAULT_PAGE_ADDR;
429 prop->mmu_pgt_size = 0x800000; /* 8MB */
431 prop->mmu_pgt_size = MMU_PAGE_TABLES_SIZE;
432 prop->mmu_pte_size = HL_PTE_SIZE;
433 prop->mmu_hop_table_size = HOP_TABLE_SIZE;
434 prop->mmu_hop0_tables_total_size = HOP0_TABLES_TOTAL_SIZE;
435 prop->dram_page_size = PAGE_SIZE_2MB;
436 prop->dram_supports_virtual_memory = true;
438 prop->dmmu.hop0_shift = HOP0_SHIFT;
439 prop->dmmu.hop1_shift = HOP1_SHIFT;
440 prop->dmmu.hop2_shift = HOP2_SHIFT;
441 prop->dmmu.hop3_shift = HOP3_SHIFT;
442 prop->dmmu.hop4_shift = HOP4_SHIFT;
443 prop->dmmu.hop0_mask = HOP0_MASK;
444 prop->dmmu.hop1_mask = HOP1_MASK;
445 prop->dmmu.hop2_mask = HOP2_MASK;
446 prop->dmmu.hop3_mask = HOP3_MASK;
447 prop->dmmu.hop4_mask = HOP4_MASK;
448 prop->dmmu.start_addr = VA_DDR_SPACE_START;
449 prop->dmmu.end_addr = VA_DDR_SPACE_END;
450 prop->dmmu.page_size = PAGE_SIZE_2MB;
451 prop->dmmu.num_hops = MMU_ARCH_5_HOPS;
453 /* shifts and masks are the same in PMMU and DMMU */
454 memcpy(&prop->pmmu, &prop->dmmu, sizeof(prop->dmmu));
455 prop->pmmu.start_addr = VA_HOST_SPACE_START;
456 prop->pmmu.end_addr = VA_HOST_SPACE_END;
457 prop->pmmu.page_size = PAGE_SIZE_4KB;
458 prop->pmmu.num_hops = MMU_ARCH_5_HOPS;
460 /* PMMU and HPMMU are the same except of page size */
461 memcpy(&prop->pmmu_huge, &prop->pmmu, sizeof(prop->pmmu));
462 prop->pmmu_huge.page_size = PAGE_SIZE_2MB;
464 prop->dram_size_for_default_page_mapping = VA_DDR_SPACE_END;
465 prop->cfg_size = CFG_SIZE;
466 prop->max_asid = MAX_ASID;
467 prop->num_of_events = GOYA_ASYNC_EVENT_ID_SIZE;
468 prop->high_pll = PLL_HIGH_DEFAULT;
469 prop->cb_pool_cb_cnt = GOYA_CB_POOL_CB_CNT;
470 prop->cb_pool_cb_size = GOYA_CB_POOL_CB_SIZE;
471 prop->max_power_default = MAX_POWER_DEFAULT;
472 prop->dc_power_default = DC_POWER_DEFAULT;
473 prop->tpc_enabled_mask = TPC_ENABLED_MASK;
474 prop->pcie_dbi_base_address = mmPCIE_DBI_BASE;
475 prop->pcie_aux_dbi_reg_addr = CFG_BASE + mmPCIE_AUX_DBI;
477 strncpy(prop->cpucp_info.card_name, GOYA_DEFAULT_CARD_NAME,
480 prop->max_pending_cs = GOYA_MAX_PENDING_CS;
482 prop->first_available_user_msix_interrupt = USHRT_MAX;
484 for (i = 0 ; i < HL_MAX_DCORES ; i++)
485 prop->first_available_cq[i] = USHRT_MAX;
487 prop->fw_security_status_valid = false;
488 prop->hard_reset_done_by_fw = false;
494 * goya_pci_bars_map - Map PCI BARS of Goya device
496 * @hdev: pointer to hl_device structure
498 * Request PCI regions and map them to kernel virtual addresses.
499 * Returns 0 on success
502 static int goya_pci_bars_map(struct hl_device *hdev)
504 static const char * const name[] = {"SRAM_CFG", "MSIX", "DDR"};
505 bool is_wc[3] = {false, false, true};
508 rc = hl_pci_bars_map(hdev, name, is_wc);
512 hdev->rmmio = hdev->pcie_bar[SRAM_CFG_BAR_ID] +
513 (CFG_BASE - SRAM_BASE_ADDR);
518 static u64 goya_set_ddr_bar_base(struct hl_device *hdev, u64 addr)
520 struct goya_device *goya = hdev->asic_specific;
521 struct hl_inbound_pci_region pci_region;
525 if ((goya) && (goya->ddr_bar_cur_addr == addr))
528 /* Inbound Region 1 - Bar 4 - Point to DDR */
529 pci_region.mode = PCI_BAR_MATCH_MODE;
530 pci_region.bar = DDR_BAR_ID;
531 pci_region.addr = addr;
532 rc = hl_pci_set_inbound_region(hdev, 1, &pci_region);
537 old_addr = goya->ddr_bar_cur_addr;
538 goya->ddr_bar_cur_addr = addr;
545 * goya_init_iatu - Initialize the iATU unit inside the PCI controller
547 * @hdev: pointer to hl_device structure
549 * This is needed in case the firmware doesn't initialize the iATU
552 static int goya_init_iatu(struct hl_device *hdev)
554 struct hl_inbound_pci_region inbound_region;
555 struct hl_outbound_pci_region outbound_region;
558 if (hdev->asic_prop.iatu_done_by_fw) {
559 hdev->asic_funcs->set_dma_mask_from_fw(hdev);
563 /* Inbound Region 0 - Bar 0 - Point to SRAM and CFG */
564 inbound_region.mode = PCI_BAR_MATCH_MODE;
565 inbound_region.bar = SRAM_CFG_BAR_ID;
566 inbound_region.addr = SRAM_BASE_ADDR;
567 rc = hl_pci_set_inbound_region(hdev, 0, &inbound_region);
571 /* Inbound Region 1 - Bar 4 - Point to DDR */
572 inbound_region.mode = PCI_BAR_MATCH_MODE;
573 inbound_region.bar = DDR_BAR_ID;
574 inbound_region.addr = DRAM_PHYS_BASE;
575 rc = hl_pci_set_inbound_region(hdev, 1, &inbound_region);
579 hdev->asic_funcs->set_dma_mask_from_fw(hdev);
581 /* Outbound Region 0 - Point to Host */
582 outbound_region.addr = HOST_PHYS_BASE;
583 outbound_region.size = HOST_PHYS_SIZE;
584 rc = hl_pci_set_outbound_region(hdev, &outbound_region);
590 static enum hl_device_hw_state goya_get_hw_state(struct hl_device *hdev)
592 return RREG32(mmHW_STATE);
596 * goya_early_init - GOYA early initialization code
598 * @hdev: pointer to hl_device structure
602 * PCI controller initialization
606 static int goya_early_init(struct hl_device *hdev)
608 struct asic_fixed_properties *prop = &hdev->asic_prop;
609 struct pci_dev *pdev = hdev->pdev;
610 u32 fw_boot_status, val;
613 rc = goya_get_fixed_properties(hdev);
615 dev_err(hdev->dev, "Failed to get fixed properties\n");
619 /* Check BAR sizes */
620 if (pci_resource_len(pdev, SRAM_CFG_BAR_ID) != CFG_BAR_SIZE) {
622 "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n",
624 (unsigned long long) pci_resource_len(pdev,
628 goto free_queue_props;
631 if (pci_resource_len(pdev, MSIX_BAR_ID) != MSIX_BAR_SIZE) {
633 "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n",
635 (unsigned long long) pci_resource_len(pdev,
639 goto free_queue_props;
642 prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID);
644 /* If FW security is enabled at this point it means no access to ELBI */
645 if (!hdev->asic_prop.fw_security_disabled) {
646 hdev->asic_prop.iatu_done_by_fw = true;
650 rc = hl_pci_elbi_read(hdev, CFG_BASE + mmCPU_BOOT_DEV_STS0,
653 goto free_queue_props;
655 /* Check whether FW is configuring iATU */
656 if ((fw_boot_status & CPU_BOOT_DEV_STS0_ENABLED) &&
657 (fw_boot_status & CPU_BOOT_DEV_STS0_FW_IATU_CONF_EN))
658 hdev->asic_prop.iatu_done_by_fw = true;
661 rc = hl_pci_init(hdev);
663 goto free_queue_props;
665 /* Before continuing in the initialization, we need to read the preboot
666 * version to determine whether we run with a security-enabled firmware
668 rc = hl_fw_read_preboot_status(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS,
669 mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0,
670 GOYA_BOOT_FIT_REQ_TIMEOUT_USEC);
672 if (hdev->reset_on_preboot_fail)
673 hdev->asic_funcs->hw_fini(hdev, true);
677 if (goya_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
679 "H/W state is dirty, must reset before initializing\n");
680 hdev->asic_funcs->hw_fini(hdev, true);
684 val = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
685 if (val & PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SRIOV_EN_MASK)
687 "PCI strap is not configured correctly, PCI bus errors may occur\n");
695 kfree(hdev->asic_prop.hw_queues_props);
700 * goya_early_fini - GOYA early finalization code
702 * @hdev: pointer to hl_device structure
707 static int goya_early_fini(struct hl_device *hdev)
709 kfree(hdev->asic_prop.hw_queues_props);
715 static void goya_mmu_prepare_reg(struct hl_device *hdev, u64 reg, u32 asid)
717 /* mask to zero the MMBP and ASID bits */
718 WREG32_AND(reg, ~0x7FF);
719 WREG32_OR(reg, asid);
722 static void goya_qman0_set_security(struct hl_device *hdev, bool secure)
724 struct goya_device *goya = hdev->asic_specific;
726 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
730 WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_FULLY_TRUSTED);
732 WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_PARTLY_TRUSTED);
734 RREG32(mmDMA_QM_0_GLBL_PROT);
738 * goya_fetch_psoc_frequency - Fetch PSOC frequency values
740 * @hdev: pointer to hl_device structure
743 static void goya_fetch_psoc_frequency(struct hl_device *hdev)
745 struct asic_fixed_properties *prop = &hdev->asic_prop;
746 u32 nr = 0, nf = 0, od = 0, div_fctr = 0, pll_clk, div_sel;
747 u16 pll_freq_arr[HL_PLL_NUM_OUTPUTS], freq;
750 if (hdev->asic_prop.fw_security_disabled) {
751 div_fctr = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
752 div_sel = RREG32(mmPSOC_PCI_PLL_DIV_SEL_1);
753 nr = RREG32(mmPSOC_PCI_PLL_NR);
754 nf = RREG32(mmPSOC_PCI_PLL_NF);
755 od = RREG32(mmPSOC_PCI_PLL_OD);
757 if (div_sel == DIV_SEL_REF_CLK ||
758 div_sel == DIV_SEL_DIVIDED_REF) {
759 if (div_sel == DIV_SEL_REF_CLK)
762 freq = PLL_REF_CLK / (div_fctr + 1);
763 } else if (div_sel == DIV_SEL_PLL_CLK ||
764 div_sel == DIV_SEL_DIVIDED_PLL) {
765 pll_clk = PLL_REF_CLK * (nf + 1) /
766 ((nr + 1) * (od + 1));
767 if (div_sel == DIV_SEL_PLL_CLK)
770 freq = pll_clk / (div_fctr + 1);
773 "Received invalid div select value: %d",
778 rc = hl_fw_cpucp_pll_info_get(hdev, PCI_PLL, pll_freq_arr);
783 freq = pll_freq_arr[1];
786 prop->psoc_timestamp_frequency = freq;
787 prop->psoc_pci_pll_nr = nr;
788 prop->psoc_pci_pll_nf = nf;
789 prop->psoc_pci_pll_od = od;
790 prop->psoc_pci_pll_div_factor = div_fctr;
793 int goya_late_init(struct hl_device *hdev)
795 struct asic_fixed_properties *prop = &hdev->asic_prop;
798 goya_fetch_psoc_frequency(hdev);
800 rc = goya_mmu_clear_pgt_range(hdev);
803 "Failed to clear MMU page tables range %d\n", rc);
807 rc = goya_mmu_set_dram_default_page(hdev);
809 dev_err(hdev->dev, "Failed to set DRAM default page %d\n", rc);
813 rc = goya_mmu_add_mappings_for_device_cpu(hdev);
817 rc = goya_init_cpu_queues(hdev);
821 rc = goya_test_cpu_queue(hdev);
825 rc = goya_cpucp_info_get(hdev);
827 dev_err(hdev->dev, "Failed to get cpucp info %d\n", rc);
831 /* Now that we have the DRAM size in ASIC prop, we need to check
832 * its size and configure the DMA_IF DDR wrap protection (which is in
833 * the MMU block) accordingly. The value is the log2 of the DRAM size
835 WREG32(mmMMU_LOG2_DDR_SIZE, ilog2(prop->dram_size));
837 rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_ENABLE_PCI_ACCESS);
840 "Failed to enable PCI access from CPU %d\n", rc);
848 * goya_late_fini - GOYA late tear-down code
850 * @hdev: pointer to hl_device structure
852 * Free sensors allocated structures
854 void goya_late_fini(struct hl_device *hdev)
856 const struct hwmon_channel_info **channel_info_arr;
859 if (!hdev->hl_chip_info->info)
862 channel_info_arr = hdev->hl_chip_info->info;
864 while (channel_info_arr[i]) {
865 kfree(channel_info_arr[i]->config);
866 kfree(channel_info_arr[i]);
870 kfree(channel_info_arr);
872 hdev->hl_chip_info->info = NULL;
876 * goya_sw_init - Goya software initialization code
878 * @hdev: pointer to hl_device structure
881 static int goya_sw_init(struct hl_device *hdev)
883 struct goya_device *goya;
886 /* Allocate device structure */
887 goya = kzalloc(sizeof(*goya), GFP_KERNEL);
891 /* according to goya_init_iatu */
892 goya->ddr_bar_cur_addr = DRAM_PHYS_BASE;
894 goya->mme_clk = GOYA_PLL_FREQ_LOW;
895 goya->tpc_clk = GOYA_PLL_FREQ_LOW;
896 goya->ic_clk = GOYA_PLL_FREQ_LOW;
898 hdev->asic_specific = goya;
900 /* store legacy PLL map */
901 hdev->legacy_pll_map = goya_pll_map;
903 /* Create DMA pool for small allocations */
904 hdev->dma_pool = dma_pool_create(dev_name(hdev->dev),
905 &hdev->pdev->dev, GOYA_DMA_POOL_BLK_SIZE, 8, 0);
906 if (!hdev->dma_pool) {
907 dev_err(hdev->dev, "failed to create DMA pool\n");
909 goto free_goya_device;
912 hdev->cpu_accessible_dma_mem =
913 hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
914 HL_CPU_ACCESSIBLE_MEM_SIZE,
915 &hdev->cpu_accessible_dma_address,
916 GFP_KERNEL | __GFP_ZERO);
918 if (!hdev->cpu_accessible_dma_mem) {
923 dev_dbg(hdev->dev, "cpu accessible memory at bus address %pad\n",
924 &hdev->cpu_accessible_dma_address);
926 hdev->cpu_accessible_dma_pool = gen_pool_create(ilog2(32), -1);
927 if (!hdev->cpu_accessible_dma_pool) {
929 "Failed to create CPU accessible DMA pool\n");
931 goto free_cpu_dma_mem;
934 rc = gen_pool_add(hdev->cpu_accessible_dma_pool,
935 (uintptr_t) hdev->cpu_accessible_dma_mem,
936 HL_CPU_ACCESSIBLE_MEM_SIZE, -1);
939 "Failed to add memory to CPU accessible DMA pool\n");
941 goto free_cpu_accessible_dma_pool;
944 spin_lock_init(&goya->hw_queues_lock);
945 hdev->supports_coresight = true;
946 hdev->supports_soft_reset = true;
950 free_cpu_accessible_dma_pool:
951 gen_pool_destroy(hdev->cpu_accessible_dma_pool);
953 hdev->asic_funcs->asic_dma_free_coherent(hdev,
954 HL_CPU_ACCESSIBLE_MEM_SIZE,
955 hdev->cpu_accessible_dma_mem,
956 hdev->cpu_accessible_dma_address);
958 dma_pool_destroy(hdev->dma_pool);
966 * goya_sw_fini - Goya software tear-down code
968 * @hdev: pointer to hl_device structure
971 static int goya_sw_fini(struct hl_device *hdev)
973 struct goya_device *goya = hdev->asic_specific;
975 gen_pool_destroy(hdev->cpu_accessible_dma_pool);
977 hdev->asic_funcs->asic_dma_free_coherent(hdev,
978 HL_CPU_ACCESSIBLE_MEM_SIZE,
979 hdev->cpu_accessible_dma_mem,
980 hdev->cpu_accessible_dma_address);
982 dma_pool_destroy(hdev->dma_pool);
989 static void goya_init_dma_qman(struct hl_device *hdev, int dma_id,
990 dma_addr_t bus_address)
992 struct goya_device *goya = hdev->asic_specific;
993 u32 mtr_base_lo, mtr_base_hi;
994 u32 so_base_lo, so_base_hi;
995 u32 gic_base_lo, gic_base_hi;
996 u32 reg_off = dma_id * (mmDMA_QM_1_PQ_PI - mmDMA_QM_0_PQ_PI);
997 u32 dma_err_cfg = QMAN_DMA_ERR_MSG_EN;
999 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1000 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1001 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1002 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1005 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1007 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1009 WREG32(mmDMA_QM_0_PQ_BASE_LO + reg_off, lower_32_bits(bus_address));
1010 WREG32(mmDMA_QM_0_PQ_BASE_HI + reg_off, upper_32_bits(bus_address));
1012 WREG32(mmDMA_QM_0_PQ_SIZE + reg_off, ilog2(HL_QUEUE_LENGTH));
1013 WREG32(mmDMA_QM_0_PQ_PI + reg_off, 0);
1014 WREG32(mmDMA_QM_0_PQ_CI + reg_off, 0);
1016 WREG32(mmDMA_QM_0_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
1017 WREG32(mmDMA_QM_0_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
1018 WREG32(mmDMA_QM_0_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
1019 WREG32(mmDMA_QM_0_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
1020 WREG32(mmDMA_QM_0_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
1021 WREG32(mmDMA_QM_0_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
1022 WREG32(mmDMA_QM_0_GLBL_ERR_WDATA + reg_off,
1023 GOYA_ASYNC_EVENT_ID_DMA0_QM + dma_id);
1025 /* PQ has buffer of 2 cache lines, while CQ has 8 lines */
1026 WREG32(mmDMA_QM_0_PQ_CFG1 + reg_off, 0x00020002);
1027 WREG32(mmDMA_QM_0_CQ_CFG1 + reg_off, 0x00080008);
1029 if (goya->hw_cap_initialized & HW_CAP_MMU)
1030 WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, QMAN_DMA_PARTLY_TRUSTED);
1032 WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, QMAN_DMA_FULLY_TRUSTED);
1034 if (hdev->stop_on_err)
1035 dma_err_cfg |= 1 << DMA_QM_0_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT;
1037 WREG32(mmDMA_QM_0_GLBL_ERR_CFG + reg_off, dma_err_cfg);
1038 WREG32(mmDMA_QM_0_GLBL_CFG0 + reg_off, QMAN_DMA_ENABLE);
1041 static void goya_init_dma_ch(struct hl_device *hdev, int dma_id)
1043 u32 gic_base_lo, gic_base_hi;
1045 u32 reg_off = dma_id * (mmDMA_CH_1_CFG1 - mmDMA_CH_0_CFG1);
1048 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1050 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1052 WREG32(mmDMA_CH_0_ERRMSG_ADDR_LO + reg_off, gic_base_lo);
1053 WREG32(mmDMA_CH_0_ERRMSG_ADDR_HI + reg_off, gic_base_hi);
1054 WREG32(mmDMA_CH_0_ERRMSG_WDATA + reg_off,
1055 GOYA_ASYNC_EVENT_ID_DMA0_CH + dma_id);
1058 sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1000 +
1061 sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1007;
1063 WREG32(mmDMA_CH_0_WR_COMP_ADDR_HI + reg_off, upper_32_bits(sob_addr));
1064 WREG32(mmDMA_CH_0_WR_COMP_WDATA + reg_off, 0x80000001);
1068 * goya_init_dma_qmans - Initialize QMAN DMA registers
1070 * @hdev: pointer to hl_device structure
1072 * Initialize the H/W registers of the QMAN DMA channels
1075 void goya_init_dma_qmans(struct hl_device *hdev)
1077 struct goya_device *goya = hdev->asic_specific;
1078 struct hl_hw_queue *q;
1081 if (goya->hw_cap_initialized & HW_CAP_DMA)
1084 q = &hdev->kernel_queues[0];
1086 for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++, q++) {
1087 q->cq_id = q->msi_vec = i;
1088 goya_init_dma_qman(hdev, i, q->bus_address);
1089 goya_init_dma_ch(hdev, i);
1092 goya->hw_cap_initialized |= HW_CAP_DMA;
1096 * goya_disable_external_queues - Disable external queues
1098 * @hdev: pointer to hl_device structure
1101 static void goya_disable_external_queues(struct hl_device *hdev)
1103 struct goya_device *goya = hdev->asic_specific;
1105 if (!(goya->hw_cap_initialized & HW_CAP_DMA))
1108 WREG32(mmDMA_QM_0_GLBL_CFG0, 0);
1109 WREG32(mmDMA_QM_1_GLBL_CFG0, 0);
1110 WREG32(mmDMA_QM_2_GLBL_CFG0, 0);
1111 WREG32(mmDMA_QM_3_GLBL_CFG0, 0);
1112 WREG32(mmDMA_QM_4_GLBL_CFG0, 0);
1115 static int goya_stop_queue(struct hl_device *hdev, u32 cfg_reg,
1116 u32 cp_sts_reg, u32 glbl_sts0_reg)
1121 /* use the values of TPC0 as they are all the same*/
1123 WREG32(cfg_reg, 1 << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
1125 status = RREG32(cp_sts_reg);
1126 if (status & TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK) {
1127 rc = hl_poll_timeout(
1131 !(status & TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK),
1133 QMAN_FENCE_TIMEOUT_USEC);
1135 /* if QMAN is stuck in fence no need to check for stop */
1140 rc = hl_poll_timeout(
1144 (status & TPC0_QM_GLBL_STS0_CP_IS_STOP_MASK),
1146 QMAN_STOP_TIMEOUT_USEC);
1150 "Timeout while waiting for QMAN to stop\n");
1158 * goya_stop_external_queues - Stop external queues
1160 * @hdev: pointer to hl_device structure
1162 * Returns 0 on success
1165 static int goya_stop_external_queues(struct hl_device *hdev)
1169 struct goya_device *goya = hdev->asic_specific;
1171 if (!(goya->hw_cap_initialized & HW_CAP_DMA))
1174 rc = goya_stop_queue(hdev,
1175 mmDMA_QM_0_GLBL_CFG1,
1177 mmDMA_QM_0_GLBL_STS0);
1180 dev_err(hdev->dev, "failed to stop DMA QMAN 0\n");
1184 rc = goya_stop_queue(hdev,
1185 mmDMA_QM_1_GLBL_CFG1,
1187 mmDMA_QM_1_GLBL_STS0);
1190 dev_err(hdev->dev, "failed to stop DMA QMAN 1\n");
1194 rc = goya_stop_queue(hdev,
1195 mmDMA_QM_2_GLBL_CFG1,
1197 mmDMA_QM_2_GLBL_STS0);
1200 dev_err(hdev->dev, "failed to stop DMA QMAN 2\n");
1204 rc = goya_stop_queue(hdev,
1205 mmDMA_QM_3_GLBL_CFG1,
1207 mmDMA_QM_3_GLBL_STS0);
1210 dev_err(hdev->dev, "failed to stop DMA QMAN 3\n");
1214 rc = goya_stop_queue(hdev,
1215 mmDMA_QM_4_GLBL_CFG1,
1217 mmDMA_QM_4_GLBL_STS0);
1220 dev_err(hdev->dev, "failed to stop DMA QMAN 4\n");
1228 * goya_init_cpu_queues - Initialize PQ/CQ/EQ of CPU
1230 * @hdev: pointer to hl_device structure
1232 * Returns 0 on success
1235 int goya_init_cpu_queues(struct hl_device *hdev)
1237 struct goya_device *goya = hdev->asic_specific;
1238 struct asic_fixed_properties *prop = &hdev->asic_prop;
1241 struct hl_hw_queue *cpu_pq = &hdev->kernel_queues[GOYA_QUEUE_ID_CPU_PQ];
1244 if (!hdev->cpu_queues_enable)
1247 if (goya->hw_cap_initialized & HW_CAP_CPU_Q)
1250 eq = &hdev->event_queue;
1252 WREG32(mmCPU_PQ_BASE_ADDR_LOW, lower_32_bits(cpu_pq->bus_address));
1253 WREG32(mmCPU_PQ_BASE_ADDR_HIGH, upper_32_bits(cpu_pq->bus_address));
1255 WREG32(mmCPU_EQ_BASE_ADDR_LOW, lower_32_bits(eq->bus_address));
1256 WREG32(mmCPU_EQ_BASE_ADDR_HIGH, upper_32_bits(eq->bus_address));
1258 WREG32(mmCPU_CQ_BASE_ADDR_LOW,
1259 lower_32_bits(VA_CPU_ACCESSIBLE_MEM_ADDR));
1260 WREG32(mmCPU_CQ_BASE_ADDR_HIGH,
1261 upper_32_bits(VA_CPU_ACCESSIBLE_MEM_ADDR));
1263 WREG32(mmCPU_PQ_LENGTH, HL_QUEUE_SIZE_IN_BYTES);
1264 WREG32(mmCPU_EQ_LENGTH, HL_EQ_SIZE_IN_BYTES);
1265 WREG32(mmCPU_CQ_LENGTH, HL_CPU_ACCESSIBLE_MEM_SIZE);
1267 /* Used for EQ CI */
1268 WREG32(mmCPU_EQ_CI, 0);
1270 WREG32(mmCPU_IF_PF_PQ_PI, 0);
1272 WREG32(mmCPU_PQ_INIT_STATUS, PQ_INIT_STATUS_READY_FOR_CP);
1274 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
1275 GOYA_ASYNC_EVENT_ID_PI_UPDATE);
1277 err = hl_poll_timeout(
1279 mmCPU_PQ_INIT_STATUS,
1281 (status == PQ_INIT_STATUS_READY_FOR_HOST),
1283 GOYA_CPU_TIMEOUT_USEC);
1287 "Failed to setup communication with device CPU\n");
1291 /* update FW application security bits */
1292 if (prop->fw_security_status_valid)
1293 prop->fw_app_security_map = RREG32(mmCPU_BOOT_DEV_STS0);
1295 goya->hw_cap_initialized |= HW_CAP_CPU_Q;
1299 static void goya_set_pll_refclk(struct hl_device *hdev)
1301 WREG32(mmCPU_PLL_DIV_SEL_0, 0x0);
1302 WREG32(mmCPU_PLL_DIV_SEL_1, 0x0);
1303 WREG32(mmCPU_PLL_DIV_SEL_2, 0x0);
1304 WREG32(mmCPU_PLL_DIV_SEL_3, 0x0);
1306 WREG32(mmIC_PLL_DIV_SEL_0, 0x0);
1307 WREG32(mmIC_PLL_DIV_SEL_1, 0x0);
1308 WREG32(mmIC_PLL_DIV_SEL_2, 0x0);
1309 WREG32(mmIC_PLL_DIV_SEL_3, 0x0);
1311 WREG32(mmMC_PLL_DIV_SEL_0, 0x0);
1312 WREG32(mmMC_PLL_DIV_SEL_1, 0x0);
1313 WREG32(mmMC_PLL_DIV_SEL_2, 0x0);
1314 WREG32(mmMC_PLL_DIV_SEL_3, 0x0);
1316 WREG32(mmPSOC_MME_PLL_DIV_SEL_0, 0x0);
1317 WREG32(mmPSOC_MME_PLL_DIV_SEL_1, 0x0);
1318 WREG32(mmPSOC_MME_PLL_DIV_SEL_2, 0x0);
1319 WREG32(mmPSOC_MME_PLL_DIV_SEL_3, 0x0);
1321 WREG32(mmPSOC_PCI_PLL_DIV_SEL_0, 0x0);
1322 WREG32(mmPSOC_PCI_PLL_DIV_SEL_1, 0x0);
1323 WREG32(mmPSOC_PCI_PLL_DIV_SEL_2, 0x0);
1324 WREG32(mmPSOC_PCI_PLL_DIV_SEL_3, 0x0);
1326 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_0, 0x0);
1327 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_1, 0x0);
1328 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_2, 0x0);
1329 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_3, 0x0);
1331 WREG32(mmTPC_PLL_DIV_SEL_0, 0x0);
1332 WREG32(mmTPC_PLL_DIV_SEL_1, 0x0);
1333 WREG32(mmTPC_PLL_DIV_SEL_2, 0x0);
1334 WREG32(mmTPC_PLL_DIV_SEL_3, 0x0);
1337 static void goya_disable_clk_rlx(struct hl_device *hdev)
1339 WREG32(mmPSOC_MME_PLL_CLK_RLX_0, 0x100010);
1340 WREG32(mmIC_PLL_CLK_RLX_0, 0x100010);
1343 static void _goya_tpc_mbist_workaround(struct hl_device *hdev, u8 tpc_id)
1345 u64 tpc_eml_address;
1346 u32 val, tpc_offset, tpc_eml_offset, tpc_slm_offset;
1349 tpc_offset = tpc_id * 0x40000;
1350 tpc_eml_offset = tpc_id * 0x200000;
1351 tpc_eml_address = (mmTPC0_EML_CFG_BASE + tpc_eml_offset - CFG_BASE);
1352 tpc_slm_offset = tpc_eml_address + 0x100000;
1355 * Workaround for Bug H2 #2443 :
1356 * "TPC SB is not initialized on chip reset"
1359 val = RREG32(mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset);
1360 if (val & TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_ACTIVE_MASK)
1361 dev_warn(hdev->dev, "TPC%d MBIST ACTIVE is not cleared\n",
1364 WREG32(mmTPC0_CFG_FUNC_MBIST_PAT + tpc_offset, val & 0xFFFFF000);
1366 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_0 + tpc_offset, 0x37FF);
1367 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_1 + tpc_offset, 0x303F);
1368 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_2 + tpc_offset, 0x71FF);
1369 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_3 + tpc_offset, 0x71FF);
1370 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_4 + tpc_offset, 0x70FF);
1371 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_5 + tpc_offset, 0x70FF);
1372 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_6 + tpc_offset, 0x70FF);
1373 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_7 + tpc_offset, 0x70FF);
1374 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_8 + tpc_offset, 0x70FF);
1375 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_9 + tpc_offset, 0x70FF);
1377 WREG32_OR(mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset,
1378 1 << TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_START_SHIFT);
1380 err = hl_poll_timeout(
1382 mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset,
1384 (val & TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_DONE_MASK),
1386 HL_DEVICE_TIMEOUT_USEC);
1390 "Timeout while waiting for TPC%d MBIST DONE\n", tpc_id);
1392 WREG32_OR(mmTPC0_EML_CFG_DBG_CNT + tpc_eml_offset,
1393 1 << TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT);
1395 msleep(GOYA_RESET_WAIT_MSEC);
1397 WREG32_AND(mmTPC0_EML_CFG_DBG_CNT + tpc_eml_offset,
1398 ~(1 << TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT));
1400 msleep(GOYA_RESET_WAIT_MSEC);
1402 for (slm_index = 0 ; slm_index < 256 ; slm_index++)
1403 WREG32(tpc_slm_offset + (slm_index << 2), 0);
1405 val = RREG32(tpc_slm_offset);
1408 static void goya_tpc_mbist_workaround(struct hl_device *hdev)
1410 struct goya_device *goya = hdev->asic_specific;
1416 if (goya->hw_cap_initialized & HW_CAP_TPC_MBIST)
1419 /* Workaround for H2 #2443 */
1421 for (i = 0 ; i < TPC_MAX_NUM ; i++)
1422 _goya_tpc_mbist_workaround(hdev, i);
1424 goya->hw_cap_initialized |= HW_CAP_TPC_MBIST;
1428 * goya_init_golden_registers - Initialize golden registers
1430 * @hdev: pointer to hl_device structure
1432 * Initialize the H/W registers of the device
1435 static void goya_init_golden_registers(struct hl_device *hdev)
1437 struct goya_device *goya = hdev->asic_specific;
1438 u32 polynom[10], tpc_intr_mask, offset;
1441 if (goya->hw_cap_initialized & HW_CAP_GOLDEN)
1444 polynom[0] = 0x00020080;
1445 polynom[1] = 0x00401000;
1446 polynom[2] = 0x00200800;
1447 polynom[3] = 0x00002000;
1448 polynom[4] = 0x00080200;
1449 polynom[5] = 0x00040100;
1450 polynom[6] = 0x00100400;
1451 polynom[7] = 0x00004000;
1452 polynom[8] = 0x00010000;
1453 polynom[9] = 0x00008000;
1455 /* Mask all arithmetic interrupts from TPC */
1456 tpc_intr_mask = 0x7FFF;
1458 for (i = 0, offset = 0 ; i < 6 ; i++, offset += 0x20000) {
1459 WREG32(mmSRAM_Y0_X0_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1460 WREG32(mmSRAM_Y0_X1_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1461 WREG32(mmSRAM_Y0_X2_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1462 WREG32(mmSRAM_Y0_X3_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1463 WREG32(mmSRAM_Y0_X4_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1465 WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_L_ARB + offset, 0x204);
1466 WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_L_ARB + offset, 0x204);
1467 WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_L_ARB + offset, 0x204);
1468 WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_L_ARB + offset, 0x204);
1469 WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_L_ARB + offset, 0x204);
1472 WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_E_ARB + offset, 0x206);
1473 WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_E_ARB + offset, 0x206);
1474 WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_E_ARB + offset, 0x206);
1475 WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_E_ARB + offset, 0x207);
1476 WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_E_ARB + offset, 0x207);
1478 WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_W_ARB + offset, 0x207);
1479 WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_W_ARB + offset, 0x207);
1480 WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_W_ARB + offset, 0x206);
1481 WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_W_ARB + offset, 0x206);
1482 WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_W_ARB + offset, 0x206);
1484 WREG32(mmSRAM_Y0_X0_RTR_HBW_WR_RS_E_ARB + offset, 0x101);
1485 WREG32(mmSRAM_Y0_X1_RTR_HBW_WR_RS_E_ARB + offset, 0x102);
1486 WREG32(mmSRAM_Y0_X2_RTR_HBW_WR_RS_E_ARB + offset, 0x103);
1487 WREG32(mmSRAM_Y0_X3_RTR_HBW_WR_RS_E_ARB + offset, 0x104);
1488 WREG32(mmSRAM_Y0_X4_RTR_HBW_WR_RS_E_ARB + offset, 0x105);
1490 WREG32(mmSRAM_Y0_X0_RTR_HBW_WR_RS_W_ARB + offset, 0x105);
1491 WREG32(mmSRAM_Y0_X1_RTR_HBW_WR_RS_W_ARB + offset, 0x104);
1492 WREG32(mmSRAM_Y0_X2_RTR_HBW_WR_RS_W_ARB + offset, 0x103);
1493 WREG32(mmSRAM_Y0_X3_RTR_HBW_WR_RS_W_ARB + offset, 0x102);
1494 WREG32(mmSRAM_Y0_X4_RTR_HBW_WR_RS_W_ARB + offset, 0x101);
1497 WREG32(mmMME_STORE_MAX_CREDIT, 0x21);
1498 WREG32(mmMME_AGU, 0x0f0f0f10);
1499 WREG32(mmMME_SEI_MASK, ~0x0);
1501 WREG32(mmMME6_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1502 WREG32(mmMME5_RTR_HBW_RD_RQ_N_ARB, 0x01040101);
1503 WREG32(mmMME4_RTR_HBW_RD_RQ_N_ARB, 0x01030101);
1504 WREG32(mmMME3_RTR_HBW_RD_RQ_N_ARB, 0x01020101);
1505 WREG32(mmMME2_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1506 WREG32(mmMME1_RTR_HBW_RD_RQ_N_ARB, 0x07010701);
1507 WREG32(mmMME6_RTR_HBW_RD_RQ_S_ARB, 0x04010401);
1508 WREG32(mmMME5_RTR_HBW_RD_RQ_S_ARB, 0x04050401);
1509 WREG32(mmMME4_RTR_HBW_RD_RQ_S_ARB, 0x03070301);
1510 WREG32(mmMME3_RTR_HBW_RD_RQ_S_ARB, 0x01030101);
1511 WREG32(mmMME2_RTR_HBW_RD_RQ_S_ARB, 0x01040101);
1512 WREG32(mmMME1_RTR_HBW_RD_RQ_S_ARB, 0x01050105);
1513 WREG32(mmMME6_RTR_HBW_RD_RQ_W_ARB, 0x01010501);
1514 WREG32(mmMME5_RTR_HBW_RD_RQ_W_ARB, 0x01010501);
1515 WREG32(mmMME4_RTR_HBW_RD_RQ_W_ARB, 0x01040301);
1516 WREG32(mmMME3_RTR_HBW_RD_RQ_W_ARB, 0x01030401);
1517 WREG32(mmMME2_RTR_HBW_RD_RQ_W_ARB, 0x01040101);
1518 WREG32(mmMME1_RTR_HBW_RD_RQ_W_ARB, 0x01050101);
1519 WREG32(mmMME6_RTR_HBW_WR_RQ_N_ARB, 0x02020202);
1520 WREG32(mmMME5_RTR_HBW_WR_RQ_N_ARB, 0x01070101);
1521 WREG32(mmMME4_RTR_HBW_WR_RQ_N_ARB, 0x02020201);
1522 WREG32(mmMME3_RTR_HBW_WR_RQ_N_ARB, 0x07020701);
1523 WREG32(mmMME2_RTR_HBW_WR_RQ_N_ARB, 0x01020101);
1524 WREG32(mmMME1_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
1525 WREG32(mmMME6_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
1526 WREG32(mmMME5_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
1527 WREG32(mmMME4_RTR_HBW_WR_RQ_S_ARB, 0x07020701);
1528 WREG32(mmMME3_RTR_HBW_WR_RQ_S_ARB, 0x02020201);
1529 WREG32(mmMME2_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
1530 WREG32(mmMME1_RTR_HBW_WR_RQ_S_ARB, 0x01020102);
1531 WREG32(mmMME6_RTR_HBW_WR_RQ_W_ARB, 0x01020701);
1532 WREG32(mmMME5_RTR_HBW_WR_RQ_W_ARB, 0x01020701);
1533 WREG32(mmMME4_RTR_HBW_WR_RQ_W_ARB, 0x07020707);
1534 WREG32(mmMME3_RTR_HBW_WR_RQ_W_ARB, 0x01020201);
1535 WREG32(mmMME2_RTR_HBW_WR_RQ_W_ARB, 0x01070201);
1536 WREG32(mmMME1_RTR_HBW_WR_RQ_W_ARB, 0x01070201);
1537 WREG32(mmMME6_RTR_HBW_RD_RS_N_ARB, 0x01070102);
1538 WREG32(mmMME5_RTR_HBW_RD_RS_N_ARB, 0x01070102);
1539 WREG32(mmMME4_RTR_HBW_RD_RS_N_ARB, 0x01060102);
1540 WREG32(mmMME3_RTR_HBW_RD_RS_N_ARB, 0x01040102);
1541 WREG32(mmMME2_RTR_HBW_RD_RS_N_ARB, 0x01020102);
1542 WREG32(mmMME1_RTR_HBW_RD_RS_N_ARB, 0x01020107);
1543 WREG32(mmMME6_RTR_HBW_RD_RS_S_ARB, 0x01020106);
1544 WREG32(mmMME5_RTR_HBW_RD_RS_S_ARB, 0x01020102);
1545 WREG32(mmMME4_RTR_HBW_RD_RS_S_ARB, 0x01040102);
1546 WREG32(mmMME3_RTR_HBW_RD_RS_S_ARB, 0x01060102);
1547 WREG32(mmMME2_RTR_HBW_RD_RS_S_ARB, 0x01070102);
1548 WREG32(mmMME1_RTR_HBW_RD_RS_S_ARB, 0x01070102);
1549 WREG32(mmMME6_RTR_HBW_RD_RS_E_ARB, 0x01020702);
1550 WREG32(mmMME5_RTR_HBW_RD_RS_E_ARB, 0x01020702);
1551 WREG32(mmMME4_RTR_HBW_RD_RS_E_ARB, 0x01040602);
1552 WREG32(mmMME3_RTR_HBW_RD_RS_E_ARB, 0x01060402);
1553 WREG32(mmMME2_RTR_HBW_RD_RS_E_ARB, 0x01070202);
1554 WREG32(mmMME1_RTR_HBW_RD_RS_E_ARB, 0x01070102);
1555 WREG32(mmMME6_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1556 WREG32(mmMME5_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1557 WREG32(mmMME4_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1558 WREG32(mmMME3_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1559 WREG32(mmMME2_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1560 WREG32(mmMME1_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1561 WREG32(mmMME6_RTR_HBW_WR_RS_N_ARB, 0x01050101);
1562 WREG32(mmMME5_RTR_HBW_WR_RS_N_ARB, 0x01040101);
1563 WREG32(mmMME4_RTR_HBW_WR_RS_N_ARB, 0x01030101);
1564 WREG32(mmMME3_RTR_HBW_WR_RS_N_ARB, 0x01020101);
1565 WREG32(mmMME2_RTR_HBW_WR_RS_N_ARB, 0x01010101);
1566 WREG32(mmMME1_RTR_HBW_WR_RS_N_ARB, 0x01010107);
1567 WREG32(mmMME6_RTR_HBW_WR_RS_S_ARB, 0x01010107);
1568 WREG32(mmMME5_RTR_HBW_WR_RS_S_ARB, 0x01010101);
1569 WREG32(mmMME4_RTR_HBW_WR_RS_S_ARB, 0x01020101);
1570 WREG32(mmMME3_RTR_HBW_WR_RS_S_ARB, 0x01030101);
1571 WREG32(mmMME2_RTR_HBW_WR_RS_S_ARB, 0x01040101);
1572 WREG32(mmMME1_RTR_HBW_WR_RS_S_ARB, 0x01050101);
1573 WREG32(mmMME6_RTR_HBW_WR_RS_E_ARB, 0x01010501);
1574 WREG32(mmMME5_RTR_HBW_WR_RS_E_ARB, 0x01010501);
1575 WREG32(mmMME4_RTR_HBW_WR_RS_E_ARB, 0x01040301);
1576 WREG32(mmMME3_RTR_HBW_WR_RS_E_ARB, 0x01030401);
1577 WREG32(mmMME2_RTR_HBW_WR_RS_E_ARB, 0x01040101);
1578 WREG32(mmMME1_RTR_HBW_WR_RS_E_ARB, 0x01050101);
1579 WREG32(mmMME6_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1580 WREG32(mmMME5_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1581 WREG32(mmMME4_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1582 WREG32(mmMME3_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1583 WREG32(mmMME2_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1584 WREG32(mmMME1_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1586 WREG32(mmTPC1_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1587 WREG32(mmTPC1_RTR_HBW_RD_RQ_S_ARB, 0x01010101);
1588 WREG32(mmTPC1_RTR_HBW_RD_RQ_E_ARB, 0x01060101);
1589 WREG32(mmTPC1_RTR_HBW_WR_RQ_N_ARB, 0x02020102);
1590 WREG32(mmTPC1_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
1591 WREG32(mmTPC1_RTR_HBW_WR_RQ_E_ARB, 0x02070202);
1592 WREG32(mmTPC1_RTR_HBW_RD_RS_N_ARB, 0x01020201);
1593 WREG32(mmTPC1_RTR_HBW_RD_RS_S_ARB, 0x01070201);
1594 WREG32(mmTPC1_RTR_HBW_RD_RS_W_ARB, 0x01070202);
1595 WREG32(mmTPC1_RTR_HBW_WR_RS_N_ARB, 0x01010101);
1596 WREG32(mmTPC1_RTR_HBW_WR_RS_S_ARB, 0x01050101);
1597 WREG32(mmTPC1_RTR_HBW_WR_RS_W_ARB, 0x01050101);
1599 WREG32(mmTPC2_RTR_HBW_RD_RQ_N_ARB, 0x01020101);
1600 WREG32(mmTPC2_RTR_HBW_RD_RQ_S_ARB, 0x01050101);
1601 WREG32(mmTPC2_RTR_HBW_RD_RQ_E_ARB, 0x01010201);
1602 WREG32(mmTPC2_RTR_HBW_WR_RQ_N_ARB, 0x02040102);
1603 WREG32(mmTPC2_RTR_HBW_WR_RQ_S_ARB, 0x01050101);
1604 WREG32(mmTPC2_RTR_HBW_WR_RQ_E_ARB, 0x02060202);
1605 WREG32(mmTPC2_RTR_HBW_RD_RS_N_ARB, 0x01020201);
1606 WREG32(mmTPC2_RTR_HBW_RD_RS_S_ARB, 0x01070201);
1607 WREG32(mmTPC2_RTR_HBW_RD_RS_W_ARB, 0x01070202);
1608 WREG32(mmTPC2_RTR_HBW_WR_RS_N_ARB, 0x01010101);
1609 WREG32(mmTPC2_RTR_HBW_WR_RS_S_ARB, 0x01040101);
1610 WREG32(mmTPC2_RTR_HBW_WR_RS_W_ARB, 0x01040101);
1612 WREG32(mmTPC3_RTR_HBW_RD_RQ_N_ARB, 0x01030101);
1613 WREG32(mmTPC3_RTR_HBW_RD_RQ_S_ARB, 0x01040101);
1614 WREG32(mmTPC3_RTR_HBW_RD_RQ_E_ARB, 0x01040301);
1615 WREG32(mmTPC3_RTR_HBW_WR_RQ_N_ARB, 0x02060102);
1616 WREG32(mmTPC3_RTR_HBW_WR_RQ_S_ARB, 0x01040101);
1617 WREG32(mmTPC3_RTR_HBW_WR_RQ_E_ARB, 0x01040301);
1618 WREG32(mmTPC3_RTR_HBW_RD_RS_N_ARB, 0x01040201);
1619 WREG32(mmTPC3_RTR_HBW_RD_RS_S_ARB, 0x01060201);
1620 WREG32(mmTPC3_RTR_HBW_RD_RS_W_ARB, 0x01060402);
1621 WREG32(mmTPC3_RTR_HBW_WR_RS_N_ARB, 0x01020101);
1622 WREG32(mmTPC3_RTR_HBW_WR_RS_S_ARB, 0x01030101);
1623 WREG32(mmTPC3_RTR_HBW_WR_RS_W_ARB, 0x01030401);
1625 WREG32(mmTPC4_RTR_HBW_RD_RQ_N_ARB, 0x01040101);
1626 WREG32(mmTPC4_RTR_HBW_RD_RQ_S_ARB, 0x01030101);
1627 WREG32(mmTPC4_RTR_HBW_RD_RQ_E_ARB, 0x01030401);
1628 WREG32(mmTPC4_RTR_HBW_WR_RQ_N_ARB, 0x02070102);
1629 WREG32(mmTPC4_RTR_HBW_WR_RQ_S_ARB, 0x01030101);
1630 WREG32(mmTPC4_RTR_HBW_WR_RQ_E_ARB, 0x02060702);
1631 WREG32(mmTPC4_RTR_HBW_RD_RS_N_ARB, 0x01060201);
1632 WREG32(mmTPC4_RTR_HBW_RD_RS_S_ARB, 0x01040201);
1633 WREG32(mmTPC4_RTR_HBW_RD_RS_W_ARB, 0x01040602);
1634 WREG32(mmTPC4_RTR_HBW_WR_RS_N_ARB, 0x01030101);
1635 WREG32(mmTPC4_RTR_HBW_WR_RS_S_ARB, 0x01020101);
1636 WREG32(mmTPC4_RTR_HBW_WR_RS_W_ARB, 0x01040301);
1638 WREG32(mmTPC5_RTR_HBW_RD_RQ_N_ARB, 0x01050101);
1639 WREG32(mmTPC5_RTR_HBW_RD_RQ_S_ARB, 0x01020101);
1640 WREG32(mmTPC5_RTR_HBW_RD_RQ_E_ARB, 0x01200501);
1641 WREG32(mmTPC5_RTR_HBW_WR_RQ_N_ARB, 0x02070102);
1642 WREG32(mmTPC5_RTR_HBW_WR_RQ_S_ARB, 0x01020101);
1643 WREG32(mmTPC5_RTR_HBW_WR_RQ_E_ARB, 0x02020602);
1644 WREG32(mmTPC5_RTR_HBW_RD_RS_N_ARB, 0x01070201);
1645 WREG32(mmTPC5_RTR_HBW_RD_RS_S_ARB, 0x01020201);
1646 WREG32(mmTPC5_RTR_HBW_RD_RS_W_ARB, 0x01020702);
1647 WREG32(mmTPC5_RTR_HBW_WR_RS_N_ARB, 0x01040101);
1648 WREG32(mmTPC5_RTR_HBW_WR_RS_S_ARB, 0x01010101);
1649 WREG32(mmTPC5_RTR_HBW_WR_RS_W_ARB, 0x01010501);
1651 WREG32(mmTPC6_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1652 WREG32(mmTPC6_RTR_HBW_RD_RQ_S_ARB, 0x01010101);
1653 WREG32(mmTPC6_RTR_HBW_RD_RQ_E_ARB, 0x01010601);
1654 WREG32(mmTPC6_RTR_HBW_WR_RQ_N_ARB, 0x01010101);
1655 WREG32(mmTPC6_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
1656 WREG32(mmTPC6_RTR_HBW_WR_RQ_E_ARB, 0x02020702);
1657 WREG32(mmTPC6_RTR_HBW_RD_RS_N_ARB, 0x01010101);
1658 WREG32(mmTPC6_RTR_HBW_RD_RS_S_ARB, 0x01010101);
1659 WREG32(mmTPC6_RTR_HBW_RD_RS_W_ARB, 0x01020702);
1660 WREG32(mmTPC6_RTR_HBW_WR_RS_N_ARB, 0x01050101);
1661 WREG32(mmTPC6_RTR_HBW_WR_RS_S_ARB, 0x01010101);
1662 WREG32(mmTPC6_RTR_HBW_WR_RS_W_ARB, 0x01010501);
1664 for (i = 0, offset = 0 ; i < 10 ; i++, offset += 4) {
1665 WREG32(mmMME1_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1666 WREG32(mmMME2_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1667 WREG32(mmMME3_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1668 WREG32(mmMME4_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1669 WREG32(mmMME5_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1670 WREG32(mmMME6_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1672 WREG32(mmTPC0_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1673 WREG32(mmTPC1_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1674 WREG32(mmTPC2_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1675 WREG32(mmTPC3_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1676 WREG32(mmTPC4_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1677 WREG32(mmTPC5_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1678 WREG32(mmTPC6_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1679 WREG32(mmTPC7_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1681 WREG32(mmPCI_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1682 WREG32(mmDMA_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1685 for (i = 0, offset = 0 ; i < 6 ; i++, offset += 0x40000) {
1686 WREG32(mmMME1_RTR_SCRAMB_EN + offset,
1687 1 << MME1_RTR_SCRAMB_EN_VAL_SHIFT);
1688 WREG32(mmMME1_RTR_NON_LIN_SCRAMB + offset,
1689 1 << MME1_RTR_NON_LIN_SCRAMB_EN_SHIFT);
1692 for (i = 0, offset = 0 ; i < 8 ; i++, offset += 0x40000) {
1694 * Workaround for Bug H2 #2441 :
1695 * "ST.NOP set trace event illegal opcode"
1697 WREG32(mmTPC0_CFG_TPC_INTR_MASK + offset, tpc_intr_mask);
1699 WREG32(mmTPC0_NRTR_SCRAMB_EN + offset,
1700 1 << TPC0_NRTR_SCRAMB_EN_VAL_SHIFT);
1701 WREG32(mmTPC0_NRTR_NON_LIN_SCRAMB + offset,
1702 1 << TPC0_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
1704 WREG32_FIELD(TPC0_CFG_MSS_CONFIG, offset,
1705 ICACHE_FETCH_LINE_NUM, 2);
1708 WREG32(mmDMA_NRTR_SCRAMB_EN, 1 << DMA_NRTR_SCRAMB_EN_VAL_SHIFT);
1709 WREG32(mmDMA_NRTR_NON_LIN_SCRAMB,
1710 1 << DMA_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
1712 WREG32(mmPCI_NRTR_SCRAMB_EN, 1 << PCI_NRTR_SCRAMB_EN_VAL_SHIFT);
1713 WREG32(mmPCI_NRTR_NON_LIN_SCRAMB,
1714 1 << PCI_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
1717 * Workaround for H2 #HW-23 bug
1718 * Set DMA max outstanding read requests to 240 on DMA CH 1.
1719 * This limitation is still large enough to not affect Gen4 bandwidth.
1720 * We need to only limit that DMA channel because the user can only read
1721 * from Host using DMA CH 1
1723 WREG32(mmDMA_CH_1_CFG0, 0x0fff00F0);
1725 WREG32(mmTPC_PLL_CLK_RLX_0, 0x200020);
1727 goya->hw_cap_initialized |= HW_CAP_GOLDEN;
1730 static void goya_init_mme_qman(struct hl_device *hdev)
1732 u32 mtr_base_lo, mtr_base_hi;
1733 u32 so_base_lo, so_base_hi;
1734 u32 gic_base_lo, gic_base_hi;
1737 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1738 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1739 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1740 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1743 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1745 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1747 qman_base_addr = hdev->asic_prop.sram_base_address +
1748 MME_QMAN_BASE_OFFSET;
1750 WREG32(mmMME_QM_PQ_BASE_LO, lower_32_bits(qman_base_addr));
1751 WREG32(mmMME_QM_PQ_BASE_HI, upper_32_bits(qman_base_addr));
1752 WREG32(mmMME_QM_PQ_SIZE, ilog2(MME_QMAN_LENGTH));
1753 WREG32(mmMME_QM_PQ_PI, 0);
1754 WREG32(mmMME_QM_PQ_CI, 0);
1755 WREG32(mmMME_QM_CP_LDMA_SRC_BASE_LO_OFFSET, 0x10C0);
1756 WREG32(mmMME_QM_CP_LDMA_SRC_BASE_HI_OFFSET, 0x10C4);
1757 WREG32(mmMME_QM_CP_LDMA_TSIZE_OFFSET, 0x10C8);
1758 WREG32(mmMME_QM_CP_LDMA_COMMIT_OFFSET, 0x10CC);
1760 WREG32(mmMME_QM_CP_MSG_BASE0_ADDR_LO, mtr_base_lo);
1761 WREG32(mmMME_QM_CP_MSG_BASE0_ADDR_HI, mtr_base_hi);
1762 WREG32(mmMME_QM_CP_MSG_BASE1_ADDR_LO, so_base_lo);
1763 WREG32(mmMME_QM_CP_MSG_BASE1_ADDR_HI, so_base_hi);
1765 /* QMAN CQ has 8 cache lines */
1766 WREG32(mmMME_QM_CQ_CFG1, 0x00080008);
1768 WREG32(mmMME_QM_GLBL_ERR_ADDR_LO, gic_base_lo);
1769 WREG32(mmMME_QM_GLBL_ERR_ADDR_HI, gic_base_hi);
1771 WREG32(mmMME_QM_GLBL_ERR_WDATA, GOYA_ASYNC_EVENT_ID_MME_QM);
1773 WREG32(mmMME_QM_GLBL_ERR_CFG, QMAN_MME_ERR_MSG_EN);
1775 WREG32(mmMME_QM_GLBL_PROT, QMAN_MME_ERR_PROT);
1777 WREG32(mmMME_QM_GLBL_CFG0, QMAN_MME_ENABLE);
1780 static void goya_init_mme_cmdq(struct hl_device *hdev)
1782 u32 mtr_base_lo, mtr_base_hi;
1783 u32 so_base_lo, so_base_hi;
1784 u32 gic_base_lo, gic_base_hi;
1786 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1787 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1788 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1789 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1792 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1794 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1796 WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_LO, mtr_base_lo);
1797 WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_HI, mtr_base_hi);
1798 WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_LO, so_base_lo);
1799 WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_HI, so_base_hi);
1801 /* CMDQ CQ has 20 cache lines */
1802 WREG32(mmMME_CMDQ_CQ_CFG1, 0x00140014);
1804 WREG32(mmMME_CMDQ_GLBL_ERR_ADDR_LO, gic_base_lo);
1805 WREG32(mmMME_CMDQ_GLBL_ERR_ADDR_HI, gic_base_hi);
1807 WREG32(mmMME_CMDQ_GLBL_ERR_WDATA, GOYA_ASYNC_EVENT_ID_MME_CMDQ);
1809 WREG32(mmMME_CMDQ_GLBL_ERR_CFG, CMDQ_MME_ERR_MSG_EN);
1811 WREG32(mmMME_CMDQ_GLBL_PROT, CMDQ_MME_ERR_PROT);
1813 WREG32(mmMME_CMDQ_GLBL_CFG0, CMDQ_MME_ENABLE);
1816 void goya_init_mme_qmans(struct hl_device *hdev)
1818 struct goya_device *goya = hdev->asic_specific;
1819 u32 so_base_lo, so_base_hi;
1821 if (goya->hw_cap_initialized & HW_CAP_MME)
1824 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1825 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1827 WREG32(mmMME_SM_BASE_ADDRESS_LOW, so_base_lo);
1828 WREG32(mmMME_SM_BASE_ADDRESS_HIGH, so_base_hi);
1830 goya_init_mme_qman(hdev);
1831 goya_init_mme_cmdq(hdev);
1833 goya->hw_cap_initialized |= HW_CAP_MME;
1836 static void goya_init_tpc_qman(struct hl_device *hdev, u32 base_off, int tpc_id)
1838 u32 mtr_base_lo, mtr_base_hi;
1839 u32 so_base_lo, so_base_hi;
1840 u32 gic_base_lo, gic_base_hi;
1842 u32 reg_off = tpc_id * (mmTPC1_QM_PQ_PI - mmTPC0_QM_PQ_PI);
1844 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1845 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1846 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1847 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1850 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1852 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1854 qman_base_addr = hdev->asic_prop.sram_base_address + base_off;
1856 WREG32(mmTPC0_QM_PQ_BASE_LO + reg_off, lower_32_bits(qman_base_addr));
1857 WREG32(mmTPC0_QM_PQ_BASE_HI + reg_off, upper_32_bits(qman_base_addr));
1858 WREG32(mmTPC0_QM_PQ_SIZE + reg_off, ilog2(TPC_QMAN_LENGTH));
1859 WREG32(mmTPC0_QM_PQ_PI + reg_off, 0);
1860 WREG32(mmTPC0_QM_PQ_CI + reg_off, 0);
1861 WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET + reg_off, 0x10C0);
1862 WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET + reg_off, 0x10C4);
1863 WREG32(mmTPC0_QM_CP_LDMA_TSIZE_OFFSET + reg_off, 0x10C8);
1864 WREG32(mmTPC0_QM_CP_LDMA_COMMIT_OFFSET + reg_off, 0x10CC);
1866 WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
1867 WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
1868 WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
1869 WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
1871 WREG32(mmTPC0_QM_CQ_CFG1 + reg_off, 0x00080008);
1873 WREG32(mmTPC0_QM_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
1874 WREG32(mmTPC0_QM_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
1876 WREG32(mmTPC0_QM_GLBL_ERR_WDATA + reg_off,
1877 GOYA_ASYNC_EVENT_ID_TPC0_QM + tpc_id);
1879 WREG32(mmTPC0_QM_GLBL_ERR_CFG + reg_off, QMAN_TPC_ERR_MSG_EN);
1881 WREG32(mmTPC0_QM_GLBL_PROT + reg_off, QMAN_TPC_ERR_PROT);
1883 WREG32(mmTPC0_QM_GLBL_CFG0 + reg_off, QMAN_TPC_ENABLE);
1886 static void goya_init_tpc_cmdq(struct hl_device *hdev, int tpc_id)
1888 u32 mtr_base_lo, mtr_base_hi;
1889 u32 so_base_lo, so_base_hi;
1890 u32 gic_base_lo, gic_base_hi;
1891 u32 reg_off = tpc_id * (mmTPC1_CMDQ_CQ_CFG1 - mmTPC0_CMDQ_CQ_CFG1);
1893 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1894 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1895 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1896 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1899 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1901 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1903 WREG32(mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
1904 WREG32(mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
1905 WREG32(mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
1906 WREG32(mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
1908 WREG32(mmTPC0_CMDQ_CQ_CFG1 + reg_off, 0x00140014);
1910 WREG32(mmTPC0_CMDQ_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
1911 WREG32(mmTPC0_CMDQ_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
1913 WREG32(mmTPC0_CMDQ_GLBL_ERR_WDATA + reg_off,
1914 GOYA_ASYNC_EVENT_ID_TPC0_CMDQ + tpc_id);
1916 WREG32(mmTPC0_CMDQ_GLBL_ERR_CFG + reg_off, CMDQ_TPC_ERR_MSG_EN);
1918 WREG32(mmTPC0_CMDQ_GLBL_PROT + reg_off, CMDQ_TPC_ERR_PROT);
1920 WREG32(mmTPC0_CMDQ_GLBL_CFG0 + reg_off, CMDQ_TPC_ENABLE);
1923 void goya_init_tpc_qmans(struct hl_device *hdev)
1925 struct goya_device *goya = hdev->asic_specific;
1926 u32 so_base_lo, so_base_hi;
1927 u32 cfg_off = mmTPC1_CFG_SM_BASE_ADDRESS_LOW -
1928 mmTPC0_CFG_SM_BASE_ADDRESS_LOW;
1931 if (goya->hw_cap_initialized & HW_CAP_TPC)
1934 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1935 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1937 for (i = 0 ; i < TPC_MAX_NUM ; i++) {
1938 WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_LOW + i * cfg_off,
1940 WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_HIGH + i * cfg_off,
1944 goya_init_tpc_qman(hdev, TPC0_QMAN_BASE_OFFSET, 0);
1945 goya_init_tpc_qman(hdev, TPC1_QMAN_BASE_OFFSET, 1);
1946 goya_init_tpc_qman(hdev, TPC2_QMAN_BASE_OFFSET, 2);
1947 goya_init_tpc_qman(hdev, TPC3_QMAN_BASE_OFFSET, 3);
1948 goya_init_tpc_qman(hdev, TPC4_QMAN_BASE_OFFSET, 4);
1949 goya_init_tpc_qman(hdev, TPC5_QMAN_BASE_OFFSET, 5);
1950 goya_init_tpc_qman(hdev, TPC6_QMAN_BASE_OFFSET, 6);
1951 goya_init_tpc_qman(hdev, TPC7_QMAN_BASE_OFFSET, 7);
1953 for (i = 0 ; i < TPC_MAX_NUM ; i++)
1954 goya_init_tpc_cmdq(hdev, i);
1956 goya->hw_cap_initialized |= HW_CAP_TPC;
1960 * goya_disable_internal_queues - Disable internal queues
1962 * @hdev: pointer to hl_device structure
1965 static void goya_disable_internal_queues(struct hl_device *hdev)
1967 struct goya_device *goya = hdev->asic_specific;
1969 if (!(goya->hw_cap_initialized & HW_CAP_MME))
1972 WREG32(mmMME_QM_GLBL_CFG0, 0);
1973 WREG32(mmMME_CMDQ_GLBL_CFG0, 0);
1976 if (!(goya->hw_cap_initialized & HW_CAP_TPC))
1979 WREG32(mmTPC0_QM_GLBL_CFG0, 0);
1980 WREG32(mmTPC0_CMDQ_GLBL_CFG0, 0);
1982 WREG32(mmTPC1_QM_GLBL_CFG0, 0);
1983 WREG32(mmTPC1_CMDQ_GLBL_CFG0, 0);
1985 WREG32(mmTPC2_QM_GLBL_CFG0, 0);
1986 WREG32(mmTPC2_CMDQ_GLBL_CFG0, 0);
1988 WREG32(mmTPC3_QM_GLBL_CFG0, 0);
1989 WREG32(mmTPC3_CMDQ_GLBL_CFG0, 0);
1991 WREG32(mmTPC4_QM_GLBL_CFG0, 0);
1992 WREG32(mmTPC4_CMDQ_GLBL_CFG0, 0);
1994 WREG32(mmTPC5_QM_GLBL_CFG0, 0);
1995 WREG32(mmTPC5_CMDQ_GLBL_CFG0, 0);
1997 WREG32(mmTPC6_QM_GLBL_CFG0, 0);
1998 WREG32(mmTPC6_CMDQ_GLBL_CFG0, 0);
2000 WREG32(mmTPC7_QM_GLBL_CFG0, 0);
2001 WREG32(mmTPC7_CMDQ_GLBL_CFG0, 0);
2005 * goya_stop_internal_queues - Stop internal queues
2007 * @hdev: pointer to hl_device structure
2009 * Returns 0 on success
2012 static int goya_stop_internal_queues(struct hl_device *hdev)
2014 struct goya_device *goya = hdev->asic_specific;
2017 if (!(goya->hw_cap_initialized & HW_CAP_MME))
2021 * Each queue (QMAN) is a separate H/W logic. That means that each
2022 * QMAN can be stopped independently and failure to stop one does NOT
2023 * mandate we should not try to stop other QMANs
2026 rc = goya_stop_queue(hdev,
2029 mmMME_QM_GLBL_STS0);
2032 dev_err(hdev->dev, "failed to stop MME QMAN\n");
2036 rc = goya_stop_queue(hdev,
2037 mmMME_CMDQ_GLBL_CFG1,
2039 mmMME_CMDQ_GLBL_STS0);
2042 dev_err(hdev->dev, "failed to stop MME CMDQ\n");
2047 if (!(goya->hw_cap_initialized & HW_CAP_TPC))
2050 rc = goya_stop_queue(hdev,
2051 mmTPC0_QM_GLBL_CFG1,
2053 mmTPC0_QM_GLBL_STS0);
2056 dev_err(hdev->dev, "failed to stop TPC 0 QMAN\n");
2060 rc = goya_stop_queue(hdev,
2061 mmTPC0_CMDQ_GLBL_CFG1,
2063 mmTPC0_CMDQ_GLBL_STS0);
2066 dev_err(hdev->dev, "failed to stop TPC 0 CMDQ\n");
2070 rc = goya_stop_queue(hdev,
2071 mmTPC1_QM_GLBL_CFG1,
2073 mmTPC1_QM_GLBL_STS0);
2076 dev_err(hdev->dev, "failed to stop TPC 1 QMAN\n");
2080 rc = goya_stop_queue(hdev,
2081 mmTPC1_CMDQ_GLBL_CFG1,
2083 mmTPC1_CMDQ_GLBL_STS0);
2086 dev_err(hdev->dev, "failed to stop TPC 1 CMDQ\n");
2090 rc = goya_stop_queue(hdev,
2091 mmTPC2_QM_GLBL_CFG1,
2093 mmTPC2_QM_GLBL_STS0);
2096 dev_err(hdev->dev, "failed to stop TPC 2 QMAN\n");
2100 rc = goya_stop_queue(hdev,
2101 mmTPC2_CMDQ_GLBL_CFG1,
2103 mmTPC2_CMDQ_GLBL_STS0);
2106 dev_err(hdev->dev, "failed to stop TPC 2 CMDQ\n");
2110 rc = goya_stop_queue(hdev,
2111 mmTPC3_QM_GLBL_CFG1,
2113 mmTPC3_QM_GLBL_STS0);
2116 dev_err(hdev->dev, "failed to stop TPC 3 QMAN\n");
2120 rc = goya_stop_queue(hdev,
2121 mmTPC3_CMDQ_GLBL_CFG1,
2123 mmTPC3_CMDQ_GLBL_STS0);
2126 dev_err(hdev->dev, "failed to stop TPC 3 CMDQ\n");
2130 rc = goya_stop_queue(hdev,
2131 mmTPC4_QM_GLBL_CFG1,
2133 mmTPC4_QM_GLBL_STS0);
2136 dev_err(hdev->dev, "failed to stop TPC 4 QMAN\n");
2140 rc = goya_stop_queue(hdev,
2141 mmTPC4_CMDQ_GLBL_CFG1,
2143 mmTPC4_CMDQ_GLBL_STS0);
2146 dev_err(hdev->dev, "failed to stop TPC 4 CMDQ\n");
2150 rc = goya_stop_queue(hdev,
2151 mmTPC5_QM_GLBL_CFG1,
2153 mmTPC5_QM_GLBL_STS0);
2156 dev_err(hdev->dev, "failed to stop TPC 5 QMAN\n");
2160 rc = goya_stop_queue(hdev,
2161 mmTPC5_CMDQ_GLBL_CFG1,
2163 mmTPC5_CMDQ_GLBL_STS0);
2166 dev_err(hdev->dev, "failed to stop TPC 5 CMDQ\n");
2170 rc = goya_stop_queue(hdev,
2171 mmTPC6_QM_GLBL_CFG1,
2173 mmTPC6_QM_GLBL_STS0);
2176 dev_err(hdev->dev, "failed to stop TPC 6 QMAN\n");
2180 rc = goya_stop_queue(hdev,
2181 mmTPC6_CMDQ_GLBL_CFG1,
2183 mmTPC6_CMDQ_GLBL_STS0);
2186 dev_err(hdev->dev, "failed to stop TPC 6 CMDQ\n");
2190 rc = goya_stop_queue(hdev,
2191 mmTPC7_QM_GLBL_CFG1,
2193 mmTPC7_QM_GLBL_STS0);
2196 dev_err(hdev->dev, "failed to stop TPC 7 QMAN\n");
2200 rc = goya_stop_queue(hdev,
2201 mmTPC7_CMDQ_GLBL_CFG1,
2203 mmTPC7_CMDQ_GLBL_STS0);
2206 dev_err(hdev->dev, "failed to stop TPC 7 CMDQ\n");
2213 static void goya_dma_stall(struct hl_device *hdev)
2215 struct goya_device *goya = hdev->asic_specific;
2217 if (!(goya->hw_cap_initialized & HW_CAP_DMA))
2220 WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT);
2221 WREG32(mmDMA_QM_1_GLBL_CFG1, 1 << DMA_QM_1_GLBL_CFG1_DMA_STOP_SHIFT);
2222 WREG32(mmDMA_QM_2_GLBL_CFG1, 1 << DMA_QM_2_GLBL_CFG1_DMA_STOP_SHIFT);
2223 WREG32(mmDMA_QM_3_GLBL_CFG1, 1 << DMA_QM_3_GLBL_CFG1_DMA_STOP_SHIFT);
2224 WREG32(mmDMA_QM_4_GLBL_CFG1, 1 << DMA_QM_4_GLBL_CFG1_DMA_STOP_SHIFT);
2227 static void goya_tpc_stall(struct hl_device *hdev)
2229 struct goya_device *goya = hdev->asic_specific;
2231 if (!(goya->hw_cap_initialized & HW_CAP_TPC))
2234 WREG32(mmTPC0_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
2235 WREG32(mmTPC1_CFG_TPC_STALL, 1 << TPC1_CFG_TPC_STALL_V_SHIFT);
2236 WREG32(mmTPC2_CFG_TPC_STALL, 1 << TPC2_CFG_TPC_STALL_V_SHIFT);
2237 WREG32(mmTPC3_CFG_TPC_STALL, 1 << TPC3_CFG_TPC_STALL_V_SHIFT);
2238 WREG32(mmTPC4_CFG_TPC_STALL, 1 << TPC4_CFG_TPC_STALL_V_SHIFT);
2239 WREG32(mmTPC5_CFG_TPC_STALL, 1 << TPC5_CFG_TPC_STALL_V_SHIFT);
2240 WREG32(mmTPC6_CFG_TPC_STALL, 1 << TPC6_CFG_TPC_STALL_V_SHIFT);
2241 WREG32(mmTPC7_CFG_TPC_STALL, 1 << TPC7_CFG_TPC_STALL_V_SHIFT);
2244 static void goya_mme_stall(struct hl_device *hdev)
2246 struct goya_device *goya = hdev->asic_specific;
2248 if (!(goya->hw_cap_initialized & HW_CAP_MME))
2251 WREG32(mmMME_STALL, 0xFFFFFFFF);
2254 static int goya_enable_msix(struct hl_device *hdev)
2256 struct goya_device *goya = hdev->asic_specific;
2257 int cq_cnt = hdev->asic_prop.completion_queues_count;
2258 int rc, i, irq_cnt_init, irq;
2260 if (goya->hw_cap_initialized & HW_CAP_MSIX)
2263 rc = pci_alloc_irq_vectors(hdev->pdev, GOYA_MSIX_ENTRIES,
2264 GOYA_MSIX_ENTRIES, PCI_IRQ_MSIX);
2267 "MSI-X: Failed to enable support -- %d/%d\n",
2268 GOYA_MSIX_ENTRIES, rc);
2272 for (i = 0, irq_cnt_init = 0 ; i < cq_cnt ; i++, irq_cnt_init++) {
2273 irq = pci_irq_vector(hdev->pdev, i);
2274 rc = request_irq(irq, hl_irq_handler_cq, 0, goya_irq_name[i],
2275 &hdev->completion_queue[i]);
2277 dev_err(hdev->dev, "Failed to request IRQ %d", irq);
2282 irq = pci_irq_vector(hdev->pdev, GOYA_EVENT_QUEUE_MSIX_IDX);
2284 rc = request_irq(irq, hl_irq_handler_eq, 0,
2285 goya_irq_name[GOYA_EVENT_QUEUE_MSIX_IDX],
2286 &hdev->event_queue);
2288 dev_err(hdev->dev, "Failed to request IRQ %d", irq);
2292 goya->hw_cap_initialized |= HW_CAP_MSIX;
2296 for (i = 0 ; i < irq_cnt_init ; i++)
2297 free_irq(pci_irq_vector(hdev->pdev, i),
2298 &hdev->completion_queue[i]);
2300 pci_free_irq_vectors(hdev->pdev);
2304 static void goya_sync_irqs(struct hl_device *hdev)
2306 struct goya_device *goya = hdev->asic_specific;
2309 if (!(goya->hw_cap_initialized & HW_CAP_MSIX))
2312 /* Wait for all pending IRQs to be finished */
2313 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
2314 synchronize_irq(pci_irq_vector(hdev->pdev, i));
2316 synchronize_irq(pci_irq_vector(hdev->pdev, GOYA_EVENT_QUEUE_MSIX_IDX));
2319 static void goya_disable_msix(struct hl_device *hdev)
2321 struct goya_device *goya = hdev->asic_specific;
2324 if (!(goya->hw_cap_initialized & HW_CAP_MSIX))
2327 goya_sync_irqs(hdev);
2329 irq = pci_irq_vector(hdev->pdev, GOYA_EVENT_QUEUE_MSIX_IDX);
2330 free_irq(irq, &hdev->event_queue);
2332 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
2333 irq = pci_irq_vector(hdev->pdev, i);
2334 free_irq(irq, &hdev->completion_queue[i]);
2337 pci_free_irq_vectors(hdev->pdev);
2339 goya->hw_cap_initialized &= ~HW_CAP_MSIX;
2342 static void goya_enable_timestamp(struct hl_device *hdev)
2344 /* Disable the timestamp counter */
2345 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
2347 /* Zero the lower/upper parts of the 64-bit counter */
2348 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0xC, 0);
2349 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0x8, 0);
2351 /* Enable the counter */
2352 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 1);
2355 static void goya_disable_timestamp(struct hl_device *hdev)
2357 /* Disable the timestamp counter */
2358 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
2361 static void goya_halt_engines(struct hl_device *hdev, bool hard_reset)
2363 u32 wait_timeout_ms;
2366 "Halting compute engines and disabling interrupts\n");
2369 wait_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
2371 wait_timeout_ms = GOYA_RESET_WAIT_MSEC;
2373 goya_stop_external_queues(hdev);
2374 goya_stop_internal_queues(hdev);
2376 msleep(wait_timeout_ms);
2378 goya_dma_stall(hdev);
2379 goya_tpc_stall(hdev);
2380 goya_mme_stall(hdev);
2382 msleep(wait_timeout_ms);
2384 goya_disable_external_queues(hdev);
2385 goya_disable_internal_queues(hdev);
2387 goya_disable_timestamp(hdev);
2390 goya_disable_msix(hdev);
2391 goya_mmu_remove_device_cpu_mappings(hdev);
2393 goya_sync_irqs(hdev);
2398 * goya_load_firmware_to_device() - Load LINUX FW code to device.
2399 * @hdev: Pointer to hl_device structure.
2401 * Copy LINUX fw code from firmware file to HBM BAR.
2403 * Return: 0 on success, non-zero for failure.
2405 static int goya_load_firmware_to_device(struct hl_device *hdev)
2409 dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET;
2411 return hl_fw_load_fw_to_device(hdev, GOYA_LINUX_FW_FILE, dst, 0, 0);
2415 * goya_load_boot_fit_to_device() - Load boot fit to device.
2416 * @hdev: Pointer to hl_device structure.
2418 * Copy boot fit file to SRAM BAR.
2420 * Return: 0 on success, non-zero for failure.
2422 static int goya_load_boot_fit_to_device(struct hl_device *hdev)
2426 dst = hdev->pcie_bar[SRAM_CFG_BAR_ID] + BOOT_FIT_SRAM_OFFSET;
2428 return hl_fw_load_fw_to_device(hdev, GOYA_BOOT_FIT_FILE, dst, 0, 0);
2432 * FW component passes an offset from SRAM_BASE_ADDR in SCRATCHPAD_xx.
2433 * The version string should be located by that offset.
2435 static int goya_read_device_fw_version(struct hl_device *hdev,
2436 enum hl_fw_component fwc)
2444 ver_off = RREG32(mmUBOOT_VER_OFFSET);
2445 dest = hdev->asic_prop.uboot_ver;
2448 case FW_COMP_PREBOOT:
2449 ver_off = RREG32(mmPREBOOT_VER_OFFSET);
2450 dest = hdev->asic_prop.preboot_ver;
2454 dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc);
2458 ver_off &= ~((u32)SRAM_BASE_ADDR);
2460 if (ver_off < SRAM_SIZE - VERSION_MAX_LEN) {
2461 memcpy_fromio(dest, hdev->pcie_bar[SRAM_CFG_BAR_ID] + ver_off,
2464 dev_err(hdev->dev, "%s version offset (0x%x) is above SRAM\n",
2466 strcpy(dest, "unavailable");
2474 static int goya_init_cpu(struct hl_device *hdev)
2476 struct goya_device *goya = hdev->asic_specific;
2479 if (!(hdev->fw_components & FW_TYPE_PREBOOT_CPU))
2482 if (goya->hw_cap_initialized & HW_CAP_CPU)
2486 * Before pushing u-boot/linux to device, need to set the ddr bar to
2487 * base address of dram
2489 if (goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE) == U64_MAX) {
2491 "failed to map DDR bar to DRAM base address\n");
2495 rc = hl_fw_init_cpu(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS,
2496 mmPSOC_GLOBAL_CONF_UBOOT_MAGIC,
2497 mmCPU_CMD_STATUS_TO_HOST,
2498 mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0,
2499 false, GOYA_CPU_TIMEOUT_USEC,
2500 GOYA_BOOT_FIT_REQ_TIMEOUT_USEC);
2505 goya->hw_cap_initialized |= HW_CAP_CPU;
2510 static int goya_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
2513 u32 status, timeout_usec;
2517 timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
2519 timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
2521 WREG32(MMU_HOP0_PA43_12, phys_addr >> MMU_HOP0_PA43_12_SHIFT);
2522 WREG32(MMU_HOP0_PA49_44, phys_addr >> MMU_HOP0_PA49_44_SHIFT);
2523 WREG32(MMU_ASID_BUSY, 0x80000000 | asid);
2525 rc = hl_poll_timeout(
2529 !(status & 0x80000000),
2535 "Timeout during MMU hop0 config of asid %d\n", asid);
2542 int goya_mmu_init(struct hl_device *hdev)
2544 struct asic_fixed_properties *prop = &hdev->asic_prop;
2545 struct goya_device *goya = hdev->asic_specific;
2549 if (!hdev->mmu_enable)
2552 if (goya->hw_cap_initialized & HW_CAP_MMU)
2555 hdev->dram_default_page_mapping = true;
2557 for (i = 0 ; i < prop->max_asid ; i++) {
2558 hop0_addr = prop->mmu_pgt_addr +
2559 (i * prop->mmu_hop_table_size);
2561 rc = goya_mmu_update_asid_hop0_addr(hdev, i, hop0_addr);
2564 "failed to set hop0 addr for asid %d\n", i);
2569 goya->hw_cap_initialized |= HW_CAP_MMU;
2571 /* init MMU cache manage page */
2572 WREG32(mmSTLB_CACHE_INV_BASE_39_8,
2573 lower_32_bits(MMU_CACHE_MNG_ADDR >> 8));
2574 WREG32(mmSTLB_CACHE_INV_BASE_49_40, MMU_CACHE_MNG_ADDR >> 40);
2576 /* Remove follower feature due to performance bug */
2577 WREG32_AND(mmSTLB_STLB_FEATURE_EN,
2578 (~STLB_STLB_FEATURE_EN_FOLLOWER_EN_MASK));
2580 hdev->asic_funcs->mmu_invalidate_cache(hdev, true,
2581 VM_TYPE_USERPTR | VM_TYPE_PHYS_PACK);
2583 WREG32(mmMMU_MMU_ENABLE, 1);
2584 WREG32(mmMMU_SPI_MASK, 0xF);
2593 * goya_hw_init - Goya hardware initialization code
2595 * @hdev: pointer to hl_device structure
2597 * Returns 0 on success
2600 static int goya_hw_init(struct hl_device *hdev)
2602 struct asic_fixed_properties *prop = &hdev->asic_prop;
2605 /* Perform read from the device to make sure device is up */
2606 RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
2609 * Let's mark in the H/W that we have reached this point. We check
2610 * this value in the reset_before_init function to understand whether
2611 * we need to reset the chip before doing H/W init. This register is
2612 * cleared by the H/W upon H/W reset
2614 WREG32(mmHW_STATE, HL_DEVICE_HW_STATE_DIRTY);
2616 rc = goya_init_cpu(hdev);
2618 dev_err(hdev->dev, "failed to initialize CPU\n");
2622 goya_tpc_mbist_workaround(hdev);
2624 goya_init_golden_registers(hdev);
2627 * After CPU initialization is finished, change DDR bar mapping inside
2628 * iATU to point to the start address of the MMU page tables
2630 if (goya_set_ddr_bar_base(hdev, (MMU_PAGE_TABLES_ADDR &
2631 ~(prop->dram_pci_bar_size - 0x1ull))) == U64_MAX) {
2633 "failed to map DDR bar to MMU page tables\n");
2637 rc = goya_mmu_init(hdev);
2641 goya_init_security(hdev);
2643 goya_init_dma_qmans(hdev);
2645 goya_init_mme_qmans(hdev);
2647 goya_init_tpc_qmans(hdev);
2649 goya_enable_timestamp(hdev);
2651 /* MSI-X must be enabled before CPU queues are initialized */
2652 rc = goya_enable_msix(hdev);
2654 goto disable_queues;
2656 /* Perform read from the device to flush all MSI-X configuration */
2657 RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
2662 goya_disable_internal_queues(hdev);
2663 goya_disable_external_queues(hdev);
2669 * goya_hw_fini - Goya hardware tear-down code
2671 * @hdev: pointer to hl_device structure
2672 * @hard_reset: should we do hard reset to all engines or just reset the
2673 * compute/dma engines
2675 static void goya_hw_fini(struct hl_device *hdev, bool hard_reset)
2677 struct goya_device *goya = hdev->asic_specific;
2678 u32 reset_timeout_ms, cpu_timeout_ms, status;
2681 reset_timeout_ms = GOYA_PLDM_RESET_TIMEOUT_MSEC;
2682 cpu_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
2684 reset_timeout_ms = GOYA_RESET_TIMEOUT_MSEC;
2685 cpu_timeout_ms = GOYA_CPU_RESET_WAIT_MSEC;
2689 /* I don't know what is the state of the CPU so make sure it is
2690 * stopped in any means necessary
2692 WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_GOTO_WFE);
2693 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
2694 GOYA_ASYNC_EVENT_ID_HALT_MACHINE);
2696 msleep(cpu_timeout_ms);
2698 goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE);
2699 goya_disable_clk_rlx(hdev);
2700 goya_set_pll_refclk(hdev);
2702 WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, RESET_ALL);
2704 "Issued HARD reset command, going to wait %dms\n",
2707 WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, DMA_MME_TPC_RESET);
2709 "Issued SOFT reset command, going to wait %dms\n",
2714 * After hard reset, we can't poll the BTM_FSM register because the PSOC
2715 * itself is in reset. In either reset we need to wait until the reset
2718 msleep(reset_timeout_ms);
2720 status = RREG32(mmPSOC_GLOBAL_CONF_BTM_FSM);
2721 if (status & PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK)
2723 "Timeout while waiting for device to reset 0x%x\n",
2726 if (!hard_reset && goya) {
2727 goya->hw_cap_initialized &= ~(HW_CAP_DMA | HW_CAP_MME |
2728 HW_CAP_GOLDEN | HW_CAP_TPC);
2729 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
2730 GOYA_ASYNC_EVENT_ID_SOFT_RESET);
2734 /* Chicken bit to re-initiate boot sequencer flow */
2735 WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START,
2736 1 << PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_IND_SHIFT);
2737 /* Move boot manager FSM to pre boot sequencer init state */
2738 WREG32(mmPSOC_GLOBAL_CONF_SW_BTM_FSM,
2739 0xA << PSOC_GLOBAL_CONF_SW_BTM_FSM_CTRL_SHIFT);
2742 goya->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q |
2743 HW_CAP_DDR_0 | HW_CAP_DDR_1 |
2744 HW_CAP_DMA | HW_CAP_MME |
2745 HW_CAP_MMU | HW_CAP_TPC_MBIST |
2746 HW_CAP_GOLDEN | HW_CAP_TPC);
2748 memset(goya->events_stat, 0, sizeof(goya->events_stat));
2752 int goya_suspend(struct hl_device *hdev)
2756 rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS);
2758 dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
2763 int goya_resume(struct hl_device *hdev)
2765 return goya_init_iatu(hdev);
2768 static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
2769 void *cpu_addr, dma_addr_t dma_addr, size_t size)
2773 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
2774 VM_DONTCOPY | VM_NORESERVE;
2776 rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
2777 (dma_addr - HOST_PHYS_BASE), size);
2779 dev_err(hdev->dev, "dma_mmap_coherent error %d", rc);
2784 void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
2786 u32 db_reg_offset, db_value;
2788 switch (hw_queue_id) {
2789 case GOYA_QUEUE_ID_DMA_0:
2790 db_reg_offset = mmDMA_QM_0_PQ_PI;
2793 case GOYA_QUEUE_ID_DMA_1:
2794 db_reg_offset = mmDMA_QM_1_PQ_PI;
2797 case GOYA_QUEUE_ID_DMA_2:
2798 db_reg_offset = mmDMA_QM_2_PQ_PI;
2801 case GOYA_QUEUE_ID_DMA_3:
2802 db_reg_offset = mmDMA_QM_3_PQ_PI;
2805 case GOYA_QUEUE_ID_DMA_4:
2806 db_reg_offset = mmDMA_QM_4_PQ_PI;
2809 case GOYA_QUEUE_ID_CPU_PQ:
2810 db_reg_offset = mmCPU_IF_PF_PQ_PI;
2813 case GOYA_QUEUE_ID_MME:
2814 db_reg_offset = mmMME_QM_PQ_PI;
2817 case GOYA_QUEUE_ID_TPC0:
2818 db_reg_offset = mmTPC0_QM_PQ_PI;
2821 case GOYA_QUEUE_ID_TPC1:
2822 db_reg_offset = mmTPC1_QM_PQ_PI;
2825 case GOYA_QUEUE_ID_TPC2:
2826 db_reg_offset = mmTPC2_QM_PQ_PI;
2829 case GOYA_QUEUE_ID_TPC3:
2830 db_reg_offset = mmTPC3_QM_PQ_PI;
2833 case GOYA_QUEUE_ID_TPC4:
2834 db_reg_offset = mmTPC4_QM_PQ_PI;
2837 case GOYA_QUEUE_ID_TPC5:
2838 db_reg_offset = mmTPC5_QM_PQ_PI;
2841 case GOYA_QUEUE_ID_TPC6:
2842 db_reg_offset = mmTPC6_QM_PQ_PI;
2845 case GOYA_QUEUE_ID_TPC7:
2846 db_reg_offset = mmTPC7_QM_PQ_PI;
2850 /* Should never get here */
2851 dev_err(hdev->dev, "H/W queue %d is invalid. Can't set pi\n",
2858 /* ring the doorbell */
2859 WREG32(db_reg_offset, db_value);
2861 if (hw_queue_id == GOYA_QUEUE_ID_CPU_PQ) {
2862 /* make sure device CPU will read latest data from host */
2864 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
2865 GOYA_ASYNC_EVENT_ID_PI_UPDATE);
2869 void goya_pqe_write(struct hl_device *hdev, __le64 *pqe, struct hl_bd *bd)
2871 /* The QMANs are on the SRAM so need to copy to IO space */
2872 memcpy_toio((void __iomem *) pqe, bd, sizeof(struct hl_bd));
2875 static void *goya_dma_alloc_coherent(struct hl_device *hdev, size_t size,
2876 dma_addr_t *dma_handle, gfp_t flags)
2878 void *kernel_addr = dma_alloc_coherent(&hdev->pdev->dev, size,
2881 /* Shift to the device's base physical address of host memory */
2883 *dma_handle += HOST_PHYS_BASE;
2888 static void goya_dma_free_coherent(struct hl_device *hdev, size_t size,
2889 void *cpu_addr, dma_addr_t dma_handle)
2891 /* Cancel the device's base physical address of host memory */
2892 dma_addr_t fixed_dma_handle = dma_handle - HOST_PHYS_BASE;
2894 dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, fixed_dma_handle);
2897 int goya_scrub_device_mem(struct hl_device *hdev, u64 addr, u64 size)
2902 void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
2903 dma_addr_t *dma_handle, u16 *queue_len)
2908 *dma_handle = hdev->asic_prop.sram_base_address;
2910 base = (void *) hdev->pcie_bar[SRAM_CFG_BAR_ID];
2913 case GOYA_QUEUE_ID_MME:
2914 offset = MME_QMAN_BASE_OFFSET;
2915 *queue_len = MME_QMAN_LENGTH;
2917 case GOYA_QUEUE_ID_TPC0:
2918 offset = TPC0_QMAN_BASE_OFFSET;
2919 *queue_len = TPC_QMAN_LENGTH;
2921 case GOYA_QUEUE_ID_TPC1:
2922 offset = TPC1_QMAN_BASE_OFFSET;
2923 *queue_len = TPC_QMAN_LENGTH;
2925 case GOYA_QUEUE_ID_TPC2:
2926 offset = TPC2_QMAN_BASE_OFFSET;
2927 *queue_len = TPC_QMAN_LENGTH;
2929 case GOYA_QUEUE_ID_TPC3:
2930 offset = TPC3_QMAN_BASE_OFFSET;
2931 *queue_len = TPC_QMAN_LENGTH;
2933 case GOYA_QUEUE_ID_TPC4:
2934 offset = TPC4_QMAN_BASE_OFFSET;
2935 *queue_len = TPC_QMAN_LENGTH;
2937 case GOYA_QUEUE_ID_TPC5:
2938 offset = TPC5_QMAN_BASE_OFFSET;
2939 *queue_len = TPC_QMAN_LENGTH;
2941 case GOYA_QUEUE_ID_TPC6:
2942 offset = TPC6_QMAN_BASE_OFFSET;
2943 *queue_len = TPC_QMAN_LENGTH;
2945 case GOYA_QUEUE_ID_TPC7:
2946 offset = TPC7_QMAN_BASE_OFFSET;
2947 *queue_len = TPC_QMAN_LENGTH;
2950 dev_err(hdev->dev, "Got invalid queue id %d\n", queue_id);
2955 *dma_handle += offset;
2960 static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
2962 struct packet_msg_prot *fence_pkt;
2964 dma_addr_t fence_dma_addr;
2970 timeout = GOYA_PLDM_QMAN0_TIMEOUT_USEC;
2972 timeout = HL_DEVICE_TIMEOUT_USEC;
2974 if (!hdev->asic_funcs->is_device_idle(hdev, NULL, 0, NULL)) {
2975 dev_err_ratelimited(hdev->dev,
2976 "Can't send driver job on QMAN0 because the device is not idle\n");
2980 fence_ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL,
2984 "Failed to allocate fence memory for QMAN0\n");
2988 goya_qman0_set_security(hdev, true);
2990 cb = job->patched_cb;
2992 fence_pkt = cb->kernel_address +
2993 job->job_cb_size - sizeof(struct packet_msg_prot);
2995 tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
2996 (1 << GOYA_PKT_CTL_EB_SHIFT) |
2997 (1 << GOYA_PKT_CTL_MB_SHIFT);
2998 fence_pkt->ctl = cpu_to_le32(tmp);
2999 fence_pkt->value = cpu_to_le32(GOYA_QMAN0_FENCE_VAL);
3000 fence_pkt->addr = cpu_to_le64(fence_dma_addr);
3002 rc = hl_hw_queue_send_cb_no_cmpl(hdev, GOYA_QUEUE_ID_DMA_0,
3003 job->job_cb_size, cb->bus_address);
3005 dev_err(hdev->dev, "Failed to send CB on QMAN0, %d\n", rc);
3006 goto free_fence_ptr;
3009 rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp,
3010 (tmp == GOYA_QMAN0_FENCE_VAL), 1000,
3013 hl_hw_queue_inc_ci_kernel(hdev, GOYA_QUEUE_ID_DMA_0);
3015 if (rc == -ETIMEDOUT) {
3016 dev_err(hdev->dev, "QMAN0 Job timeout (0x%x)\n", tmp);
3017 goto free_fence_ptr;
3021 hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_ptr,
3024 goya_qman0_set_security(hdev, false);
3029 int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len,
3030 u32 timeout, u64 *result)
3032 struct goya_device *goya = hdev->asic_specific;
3034 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q)) {
3041 timeout = GOYA_MSG_TO_CPU_TIMEOUT_USEC;
3043 return hl_fw_send_cpu_message(hdev, GOYA_QUEUE_ID_CPU_PQ, msg, len,
3047 int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
3049 struct packet_msg_prot *fence_pkt;
3050 dma_addr_t pkt_dma_addr;
3052 dma_addr_t fence_dma_addr;
3056 fence_val = GOYA_QMAN0_FENCE_VAL;
3058 fence_ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL,
3062 "Failed to allocate memory for H/W queue %d testing\n",
3069 fence_pkt = hdev->asic_funcs->asic_dma_pool_zalloc(hdev,
3070 sizeof(struct packet_msg_prot),
3071 GFP_KERNEL, &pkt_dma_addr);
3074 "Failed to allocate packet for H/W queue %d testing\n",
3077 goto free_fence_ptr;
3080 tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
3081 (1 << GOYA_PKT_CTL_EB_SHIFT) |
3082 (1 << GOYA_PKT_CTL_MB_SHIFT);
3083 fence_pkt->ctl = cpu_to_le32(tmp);
3084 fence_pkt->value = cpu_to_le32(fence_val);
3085 fence_pkt->addr = cpu_to_le64(fence_dma_addr);
3087 rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id,
3088 sizeof(struct packet_msg_prot),
3092 "Failed to send fence packet to H/W queue %d\n",
3097 rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp, (tmp == fence_val),
3098 1000, GOYA_TEST_QUEUE_WAIT_USEC, true);
3100 hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
3102 if (rc == -ETIMEDOUT) {
3104 "H/W queue %d test failed (scratch(0x%08llX) == 0x%08X)\n",
3105 hw_queue_id, (unsigned long long) fence_dma_addr, tmp);
3110 hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_pkt,
3113 hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_ptr,
3118 int goya_test_cpu_queue(struct hl_device *hdev)
3120 struct goya_device *goya = hdev->asic_specific;
3123 * check capability here as send_cpu_message() won't update the result
3124 * value if no capability
3126 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
3129 return hl_fw_test_cpu_queue(hdev);
3132 int goya_test_queues(struct hl_device *hdev)
3134 int i, rc, ret_val = 0;
3136 for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
3137 rc = goya_test_queue(hdev, i);
3145 static void *goya_dma_pool_zalloc(struct hl_device *hdev, size_t size,
3146 gfp_t mem_flags, dma_addr_t *dma_handle)
3150 if (size > GOYA_DMA_POOL_BLK_SIZE)
3153 kernel_addr = dma_pool_zalloc(hdev->dma_pool, mem_flags, dma_handle);
3155 /* Shift to the device's base physical address of host memory */
3157 *dma_handle += HOST_PHYS_BASE;
3162 static void goya_dma_pool_free(struct hl_device *hdev, void *vaddr,
3163 dma_addr_t dma_addr)
3165 /* Cancel the device's base physical address of host memory */
3166 dma_addr_t fixed_dma_addr = dma_addr - HOST_PHYS_BASE;
3168 dma_pool_free(hdev->dma_pool, vaddr, fixed_dma_addr);
3171 void *goya_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
3172 dma_addr_t *dma_handle)
3176 vaddr = hl_fw_cpu_accessible_dma_pool_alloc(hdev, size, dma_handle);
3177 *dma_handle = (*dma_handle) - hdev->cpu_accessible_dma_address +
3178 VA_CPU_ACCESSIBLE_MEM_ADDR;
3183 void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
3186 hl_fw_cpu_accessible_dma_pool_free(hdev, size, vaddr);
3189 static int goya_dma_map_sg(struct hl_device *hdev, struct scatterlist *sgl,
3190 int nents, enum dma_data_direction dir)
3192 struct scatterlist *sg;
3195 if (!dma_map_sg(&hdev->pdev->dev, sgl, nents, dir))
3198 /* Shift to the device's base physical address of host memory */
3199 for_each_sg(sgl, sg, nents, i)
3200 sg->dma_address += HOST_PHYS_BASE;
3205 static void goya_dma_unmap_sg(struct hl_device *hdev, struct scatterlist *sgl,
3206 int nents, enum dma_data_direction dir)
3208 struct scatterlist *sg;
3211 /* Cancel the device's base physical address of host memory */
3212 for_each_sg(sgl, sg, nents, i)
3213 sg->dma_address -= HOST_PHYS_BASE;
3215 dma_unmap_sg(&hdev->pdev->dev, sgl, nents, dir);
3218 u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt)
3220 struct scatterlist *sg, *sg_next_iter;
3221 u32 count, dma_desc_cnt;
3223 dma_addr_t addr, addr_next;
3227 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
3229 len = sg_dma_len(sg);
3230 addr = sg_dma_address(sg);
3235 while ((count + 1) < sgt->nents) {
3236 sg_next_iter = sg_next(sg);
3237 len_next = sg_dma_len(sg_next_iter);
3238 addr_next = sg_dma_address(sg_next_iter);
3243 if ((addr + len == addr_next) &&
3244 (len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
3256 return dma_desc_cnt * sizeof(struct packet_lin_dma);
3259 static int goya_pin_memory_before_cs(struct hl_device *hdev,
3260 struct hl_cs_parser *parser,
3261 struct packet_lin_dma *user_dma_pkt,
3262 u64 addr, enum dma_data_direction dir)
3264 struct hl_userptr *userptr;
3267 if (hl_userptr_is_pinned(hdev, addr, le32_to_cpu(user_dma_pkt->tsize),
3268 parser->job_userptr_list, &userptr))
3269 goto already_pinned;
3271 userptr = kzalloc(sizeof(*userptr), GFP_KERNEL);
3275 rc = hl_pin_host_memory(hdev, addr, le32_to_cpu(user_dma_pkt->tsize),
3280 list_add_tail(&userptr->job_node, parser->job_userptr_list);
3282 rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
3283 userptr->sgt->nents, dir);
3285 dev_err(hdev->dev, "failed to map sgt with DMA region\n");
3289 userptr->dma_mapped = true;
3293 parser->patched_cb_size +=
3294 goya_get_dma_desc_list_size(hdev, userptr->sgt);
3299 hl_unpin_host_memory(hdev, userptr);
3305 static int goya_validate_dma_pkt_host(struct hl_device *hdev,
3306 struct hl_cs_parser *parser,
3307 struct packet_lin_dma *user_dma_pkt)
3309 u64 device_memory_addr, addr;
3310 enum dma_data_direction dir;
3311 enum goya_dma_direction user_dir;
3312 bool sram_addr = true;
3313 bool skip_host_mem_pin = false;
3318 ctl = le32_to_cpu(user_dma_pkt->ctl);
3320 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3321 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3323 user_memset = (ctl & GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
3324 GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
3327 case DMA_HOST_TO_DRAM:
3328 dev_dbg(hdev->dev, "DMA direction is HOST --> DRAM\n");
3329 dir = DMA_TO_DEVICE;
3331 addr = le64_to_cpu(user_dma_pkt->src_addr);
3332 device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3334 skip_host_mem_pin = true;
3337 case DMA_DRAM_TO_HOST:
3338 dev_dbg(hdev->dev, "DMA direction is DRAM --> HOST\n");
3339 dir = DMA_FROM_DEVICE;
3341 addr = le64_to_cpu(user_dma_pkt->dst_addr);
3342 device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3345 case DMA_HOST_TO_SRAM:
3346 dev_dbg(hdev->dev, "DMA direction is HOST --> SRAM\n");
3347 dir = DMA_TO_DEVICE;
3348 addr = le64_to_cpu(user_dma_pkt->src_addr);
3349 device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3351 skip_host_mem_pin = true;
3354 case DMA_SRAM_TO_HOST:
3355 dev_dbg(hdev->dev, "DMA direction is SRAM --> HOST\n");
3356 dir = DMA_FROM_DEVICE;
3357 addr = le64_to_cpu(user_dma_pkt->dst_addr);
3358 device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3361 dev_err(hdev->dev, "DMA direction is undefined\n");
3366 if (!hl_mem_area_inside_range(device_memory_addr,
3367 le32_to_cpu(user_dma_pkt->tsize),
3368 hdev->asic_prop.sram_user_base_address,
3369 hdev->asic_prop.sram_end_address)) {
3372 "SRAM address 0x%llx + 0x%x is invalid\n",
3374 user_dma_pkt->tsize);
3378 if (!hl_mem_area_inside_range(device_memory_addr,
3379 le32_to_cpu(user_dma_pkt->tsize),
3380 hdev->asic_prop.dram_user_base_address,
3381 hdev->asic_prop.dram_end_address)) {
3384 "DRAM address 0x%llx + 0x%x is invalid\n",
3386 user_dma_pkt->tsize);
3391 if (skip_host_mem_pin)
3392 parser->patched_cb_size += sizeof(*user_dma_pkt);
3394 if ((dir == DMA_TO_DEVICE) &&
3395 (parser->hw_queue_id > GOYA_QUEUE_ID_DMA_1)) {
3397 "Can't DMA from host on queue other then 1\n");
3401 rc = goya_pin_memory_before_cs(hdev, parser, user_dma_pkt,
3408 static int goya_validate_dma_pkt_no_host(struct hl_device *hdev,
3409 struct hl_cs_parser *parser,
3410 struct packet_lin_dma *user_dma_pkt)
3412 u64 sram_memory_addr, dram_memory_addr;
3413 enum goya_dma_direction user_dir;
3416 ctl = le32_to_cpu(user_dma_pkt->ctl);
3417 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3418 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3420 if (user_dir == DMA_DRAM_TO_SRAM) {
3421 dev_dbg(hdev->dev, "DMA direction is DRAM --> SRAM\n");
3422 dram_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3423 sram_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3425 dev_dbg(hdev->dev, "DMA direction is SRAM --> DRAM\n");
3426 sram_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3427 dram_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3430 if (!hl_mem_area_inside_range(sram_memory_addr,
3431 le32_to_cpu(user_dma_pkt->tsize),
3432 hdev->asic_prop.sram_user_base_address,
3433 hdev->asic_prop.sram_end_address)) {
3434 dev_err(hdev->dev, "SRAM address 0x%llx + 0x%x is invalid\n",
3435 sram_memory_addr, user_dma_pkt->tsize);
3439 if (!hl_mem_area_inside_range(dram_memory_addr,
3440 le32_to_cpu(user_dma_pkt->tsize),
3441 hdev->asic_prop.dram_user_base_address,
3442 hdev->asic_prop.dram_end_address)) {
3443 dev_err(hdev->dev, "DRAM address 0x%llx + 0x%x is invalid\n",
3444 dram_memory_addr, user_dma_pkt->tsize);
3448 parser->patched_cb_size += sizeof(*user_dma_pkt);
3453 static int goya_validate_dma_pkt_no_mmu(struct hl_device *hdev,
3454 struct hl_cs_parser *parser,
3455 struct packet_lin_dma *user_dma_pkt)
3457 enum goya_dma_direction user_dir;
3461 dev_dbg(hdev->dev, "DMA packet details:\n");
3462 dev_dbg(hdev->dev, "source == 0x%llx\n",
3463 le64_to_cpu(user_dma_pkt->src_addr));
3464 dev_dbg(hdev->dev, "destination == 0x%llx\n",
3465 le64_to_cpu(user_dma_pkt->dst_addr));
3466 dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize));
3468 ctl = le32_to_cpu(user_dma_pkt->ctl);
3469 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3470 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3473 * Special handling for DMA with size 0. The H/W has a bug where
3474 * this can cause the QMAN DMA to get stuck, so block it here.
3476 if (user_dma_pkt->tsize == 0) {
3478 "Got DMA with size 0, might reset the device\n");
3482 if ((user_dir == DMA_DRAM_TO_SRAM) || (user_dir == DMA_SRAM_TO_DRAM))
3483 rc = goya_validate_dma_pkt_no_host(hdev, parser, user_dma_pkt);
3485 rc = goya_validate_dma_pkt_host(hdev, parser, user_dma_pkt);
3490 static int goya_validate_dma_pkt_mmu(struct hl_device *hdev,
3491 struct hl_cs_parser *parser,
3492 struct packet_lin_dma *user_dma_pkt)
3494 dev_dbg(hdev->dev, "DMA packet details:\n");
3495 dev_dbg(hdev->dev, "source == 0x%llx\n",
3496 le64_to_cpu(user_dma_pkt->src_addr));
3497 dev_dbg(hdev->dev, "destination == 0x%llx\n",
3498 le64_to_cpu(user_dma_pkt->dst_addr));
3499 dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize));
3503 * We can't allow user to read from Host using QMANs other than 1.
3504 * PMMU and HPMMU addresses are equal, check only one of them.
3506 if (parser->hw_queue_id != GOYA_QUEUE_ID_DMA_1 &&
3507 hl_mem_area_inside_range(le64_to_cpu(user_dma_pkt->src_addr),
3508 le32_to_cpu(user_dma_pkt->tsize),
3509 hdev->asic_prop.pmmu.start_addr,
3510 hdev->asic_prop.pmmu.end_addr)) {
3512 "Can't DMA from host on queue other then 1\n");
3516 if (user_dma_pkt->tsize == 0) {
3518 "Got DMA with size 0, might reset the device\n");
3522 parser->patched_cb_size += sizeof(*user_dma_pkt);
3527 static int goya_validate_wreg32(struct hl_device *hdev,
3528 struct hl_cs_parser *parser,
3529 struct packet_wreg32 *wreg_pkt)
3531 struct goya_device *goya = hdev->asic_specific;
3532 u32 sob_start_addr, sob_end_addr;
3535 reg_offset = le32_to_cpu(wreg_pkt->ctl) &
3536 GOYA_PKT_WREG32_CTL_REG_OFFSET_MASK;
3538 dev_dbg(hdev->dev, "WREG32 packet details:\n");
3539 dev_dbg(hdev->dev, "reg_offset == 0x%x\n", reg_offset);
3540 dev_dbg(hdev->dev, "value == 0x%x\n",
3541 le32_to_cpu(wreg_pkt->value));
3543 if (reg_offset != (mmDMA_CH_0_WR_COMP_ADDR_LO & 0x1FFF)) {
3544 dev_err(hdev->dev, "WREG32 packet with illegal address 0x%x\n",
3550 * With MMU, DMA channels are not secured, so it doesn't matter where
3551 * the WR COMP will be written to because it will go out with
3552 * non-secured property
3554 if (goya->hw_cap_initialized & HW_CAP_MMU)
3557 sob_start_addr = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
3558 sob_end_addr = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1023);
3560 if ((le32_to_cpu(wreg_pkt->value) < sob_start_addr) ||
3561 (le32_to_cpu(wreg_pkt->value) > sob_end_addr)) {
3563 dev_err(hdev->dev, "WREG32 packet with illegal value 0x%x\n",
3571 static int goya_validate_cb(struct hl_device *hdev,
3572 struct hl_cs_parser *parser, bool is_mmu)
3574 u32 cb_parsed_length = 0;
3577 parser->patched_cb_size = 0;
3579 /* cb_user_size is more than 0 so loop will always be executed */
3580 while (cb_parsed_length < parser->user_cb_size) {
3581 enum packet_id pkt_id;
3583 struct goya_packet *user_pkt;
3585 user_pkt = parser->user_cb->kernel_address + cb_parsed_length;
3587 pkt_id = (enum packet_id) (
3588 (le64_to_cpu(user_pkt->header) &
3589 PACKET_HEADER_PACKET_ID_MASK) >>
3590 PACKET_HEADER_PACKET_ID_SHIFT);
3592 if (!validate_packet_id(pkt_id)) {
3593 dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id);
3598 pkt_size = goya_packet_sizes[pkt_id];
3599 cb_parsed_length += pkt_size;
3600 if (cb_parsed_length > parser->user_cb_size) {
3602 "packet 0x%x is out of CB boundary\n", pkt_id);
3608 case PACKET_WREG_32:
3610 * Although it is validated after copy in patch_cb(),
3611 * need to validate here as well because patch_cb() is
3612 * not called in MMU path while this function is called
3614 rc = goya_validate_wreg32(hdev,
3615 parser, (struct packet_wreg32 *) user_pkt);
3616 parser->patched_cb_size += pkt_size;
3619 case PACKET_WREG_BULK:
3621 "User not allowed to use WREG_BULK\n");
3625 case PACKET_MSG_PROT:
3627 "User not allowed to use MSG_PROT\n");
3632 dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
3637 dev_err(hdev->dev, "User not allowed to use STOP\n");
3641 case PACKET_LIN_DMA:
3643 rc = goya_validate_dma_pkt_mmu(hdev, parser,
3644 (struct packet_lin_dma *) user_pkt);
3646 rc = goya_validate_dma_pkt_no_mmu(hdev, parser,
3647 (struct packet_lin_dma *) user_pkt);
3650 case PACKET_MSG_LONG:
3651 case PACKET_MSG_SHORT:
3654 parser->patched_cb_size += pkt_size;
3658 dev_err(hdev->dev, "Invalid packet header 0x%x\n",
3669 * The new CB should have space at the end for two MSG_PROT packets:
3670 * 1. A packet that will act as a completion packet
3671 * 2. A packet that will generate MSI-X interrupt
3673 parser->patched_cb_size += sizeof(struct packet_msg_prot) * 2;
3678 static int goya_patch_dma_packet(struct hl_device *hdev,
3679 struct hl_cs_parser *parser,
3680 struct packet_lin_dma *user_dma_pkt,
3681 struct packet_lin_dma *new_dma_pkt,
3682 u32 *new_dma_pkt_size)
3684 struct hl_userptr *userptr;
3685 struct scatterlist *sg, *sg_next_iter;
3686 u32 count, dma_desc_cnt;
3688 dma_addr_t dma_addr, dma_addr_next;
3689 enum goya_dma_direction user_dir;
3690 u64 device_memory_addr, addr;
3691 enum dma_data_direction dir;
3692 struct sg_table *sgt;
3693 bool skip_host_mem_pin = false;
3695 u32 user_rdcomp_mask, user_wrcomp_mask, ctl;
3697 ctl = le32_to_cpu(user_dma_pkt->ctl);
3699 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3700 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3702 user_memset = (ctl & GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
3703 GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
3705 if ((user_dir == DMA_DRAM_TO_SRAM) || (user_dir == DMA_SRAM_TO_DRAM) ||
3706 (user_dma_pkt->tsize == 0)) {
3707 memcpy(new_dma_pkt, user_dma_pkt, sizeof(*new_dma_pkt));
3708 *new_dma_pkt_size = sizeof(*new_dma_pkt);
3712 if ((user_dir == DMA_HOST_TO_DRAM) || (user_dir == DMA_HOST_TO_SRAM)) {
3713 addr = le64_to_cpu(user_dma_pkt->src_addr);
3714 device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3715 dir = DMA_TO_DEVICE;
3717 skip_host_mem_pin = true;
3719 addr = le64_to_cpu(user_dma_pkt->dst_addr);
3720 device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3721 dir = DMA_FROM_DEVICE;
3724 if ((!skip_host_mem_pin) &&
3725 (hl_userptr_is_pinned(hdev, addr,
3726 le32_to_cpu(user_dma_pkt->tsize),
3727 parser->job_userptr_list, &userptr) == false)) {
3728 dev_err(hdev->dev, "Userptr 0x%llx + 0x%x NOT mapped\n",
3729 addr, user_dma_pkt->tsize);
3733 if ((user_memset) && (dir == DMA_TO_DEVICE)) {
3734 memcpy(new_dma_pkt, user_dma_pkt, sizeof(*user_dma_pkt));
3735 *new_dma_pkt_size = sizeof(*user_dma_pkt);
3739 user_rdcomp_mask = ctl & GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK;
3741 user_wrcomp_mask = ctl & GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK;
3746 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
3747 len = sg_dma_len(sg);
3748 dma_addr = sg_dma_address(sg);
3753 while ((count + 1) < sgt->nents) {
3754 sg_next_iter = sg_next(sg);
3755 len_next = sg_dma_len(sg_next_iter);
3756 dma_addr_next = sg_dma_address(sg_next_iter);
3761 if ((dma_addr + len == dma_addr_next) &&
3762 (len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
3771 ctl = le32_to_cpu(user_dma_pkt->ctl);
3772 if (likely(dma_desc_cnt))
3773 ctl &= ~GOYA_PKT_CTL_EB_MASK;
3774 ctl &= ~(GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK |
3775 GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK);
3776 new_dma_pkt->ctl = cpu_to_le32(ctl);
3777 new_dma_pkt->tsize = cpu_to_le32((u32) len);
3779 if (dir == DMA_TO_DEVICE) {
3780 new_dma_pkt->src_addr = cpu_to_le64(dma_addr);
3781 new_dma_pkt->dst_addr = cpu_to_le64(device_memory_addr);
3783 new_dma_pkt->src_addr = cpu_to_le64(device_memory_addr);
3784 new_dma_pkt->dst_addr = cpu_to_le64(dma_addr);
3788 device_memory_addr += len;
3793 if (!dma_desc_cnt) {
3795 "Error of 0 SG entries when patching DMA packet\n");
3799 /* Fix the last dma packet - rdcomp/wrcomp must be as user set them */
3801 new_dma_pkt->ctl |= cpu_to_le32(user_rdcomp_mask | user_wrcomp_mask);
3803 *new_dma_pkt_size = dma_desc_cnt * sizeof(struct packet_lin_dma);
3808 static int goya_patch_cb(struct hl_device *hdev,
3809 struct hl_cs_parser *parser)
3811 u32 cb_parsed_length = 0;
3812 u32 cb_patched_cur_length = 0;
3815 /* cb_user_size is more than 0 so loop will always be executed */
3816 while (cb_parsed_length < parser->user_cb_size) {
3817 enum packet_id pkt_id;
3819 u32 new_pkt_size = 0;
3820 struct goya_packet *user_pkt, *kernel_pkt;
3822 user_pkt = parser->user_cb->kernel_address + cb_parsed_length;
3823 kernel_pkt = parser->patched_cb->kernel_address +
3824 cb_patched_cur_length;
3826 pkt_id = (enum packet_id) (
3827 (le64_to_cpu(user_pkt->header) &
3828 PACKET_HEADER_PACKET_ID_MASK) >>
3829 PACKET_HEADER_PACKET_ID_SHIFT);
3831 if (!validate_packet_id(pkt_id)) {
3832 dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id);
3837 pkt_size = goya_packet_sizes[pkt_id];
3838 cb_parsed_length += pkt_size;
3839 if (cb_parsed_length > parser->user_cb_size) {
3841 "packet 0x%x is out of CB boundary\n", pkt_id);
3847 case PACKET_LIN_DMA:
3848 rc = goya_patch_dma_packet(hdev, parser,
3849 (struct packet_lin_dma *) user_pkt,
3850 (struct packet_lin_dma *) kernel_pkt,
3852 cb_patched_cur_length += new_pkt_size;
3855 case PACKET_WREG_32:
3856 memcpy(kernel_pkt, user_pkt, pkt_size);
3857 cb_patched_cur_length += pkt_size;
3858 rc = goya_validate_wreg32(hdev, parser,
3859 (struct packet_wreg32 *) kernel_pkt);
3862 case PACKET_WREG_BULK:
3864 "User not allowed to use WREG_BULK\n");
3868 case PACKET_MSG_PROT:
3870 "User not allowed to use MSG_PROT\n");
3875 dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
3880 dev_err(hdev->dev, "User not allowed to use STOP\n");
3884 case PACKET_MSG_LONG:
3885 case PACKET_MSG_SHORT:
3888 memcpy(kernel_pkt, user_pkt, pkt_size);
3889 cb_patched_cur_length += pkt_size;
3893 dev_err(hdev->dev, "Invalid packet header 0x%x\n",
3906 static int goya_parse_cb_mmu(struct hl_device *hdev,
3907 struct hl_cs_parser *parser)
3909 u64 patched_cb_handle;
3910 u32 patched_cb_size;
3911 struct hl_cb *user_cb;
3915 * The new CB should have space at the end for two MSG_PROT pkt:
3916 * 1. A packet that will act as a completion packet
3917 * 2. A packet that will generate MSI-X interrupt
3919 parser->patched_cb_size = parser->user_cb_size +
3920 sizeof(struct packet_msg_prot) * 2;
3922 rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx,
3923 parser->patched_cb_size, false, false,
3924 &patched_cb_handle);
3928 "Failed to allocate patched CB for DMA CS %d\n",
3933 patched_cb_handle >>= PAGE_SHIFT;
3934 parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
3935 (u32) patched_cb_handle);
3936 /* hl_cb_get should never fail here */
3937 if (!parser->patched_cb) {
3938 dev_crit(hdev->dev, "DMA CB handle invalid 0x%x\n",
3939 (u32) patched_cb_handle);
3945 * The check that parser->user_cb_size <= parser->user_cb->size was done
3946 * in validate_queue_index().
3948 memcpy(parser->patched_cb->kernel_address,
3949 parser->user_cb->kernel_address,
3950 parser->user_cb_size);
3952 patched_cb_size = parser->patched_cb_size;
3954 /* validate patched CB instead of user CB */
3955 user_cb = parser->user_cb;
3956 parser->user_cb = parser->patched_cb;
3957 rc = goya_validate_cb(hdev, parser, true);
3958 parser->user_cb = user_cb;
3961 hl_cb_put(parser->patched_cb);
3965 if (patched_cb_size != parser->patched_cb_size) {
3966 dev_err(hdev->dev, "user CB size mismatch\n");
3967 hl_cb_put(parser->patched_cb);
3974 * Always call cb destroy here because we still have 1 reference
3975 * to it by calling cb_get earlier. After the job will be completed,
3976 * cb_put will release it, but here we want to remove it from the
3979 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
3980 patched_cb_handle << PAGE_SHIFT);
3985 static int goya_parse_cb_no_mmu(struct hl_device *hdev,
3986 struct hl_cs_parser *parser)
3988 u64 patched_cb_handle;
3991 rc = goya_validate_cb(hdev, parser, false);
3996 rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx,
3997 parser->patched_cb_size, false, false,
3998 &patched_cb_handle);
4001 "Failed to allocate patched CB for DMA CS %d\n", rc);
4005 patched_cb_handle >>= PAGE_SHIFT;
4006 parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
4007 (u32) patched_cb_handle);
4008 /* hl_cb_get should never fail here */
4009 if (!parser->patched_cb) {
4010 dev_crit(hdev->dev, "DMA CB handle invalid 0x%x\n",
4011 (u32) patched_cb_handle);
4016 rc = goya_patch_cb(hdev, parser);
4019 hl_cb_put(parser->patched_cb);
4023 * Always call cb destroy here because we still have 1 reference
4024 * to it by calling cb_get earlier. After the job will be completed,
4025 * cb_put will release it, but here we want to remove it from the
4028 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
4029 patched_cb_handle << PAGE_SHIFT);
4033 hl_userptr_delete_list(hdev, parser->job_userptr_list);
4037 static int goya_parse_cb_no_ext_queue(struct hl_device *hdev,
4038 struct hl_cs_parser *parser)
4040 struct asic_fixed_properties *asic_prop = &hdev->asic_prop;
4041 struct goya_device *goya = hdev->asic_specific;
4043 if (goya->hw_cap_initialized & HW_CAP_MMU)
4046 /* For internal queue jobs, just check if CB address is valid */
4047 if (hl_mem_area_inside_range(
4048 (u64) (uintptr_t) parser->user_cb,
4049 parser->user_cb_size,
4050 asic_prop->sram_user_base_address,
4051 asic_prop->sram_end_address))
4054 if (hl_mem_area_inside_range(
4055 (u64) (uintptr_t) parser->user_cb,
4056 parser->user_cb_size,
4057 asic_prop->dram_user_base_address,
4058 asic_prop->dram_end_address))
4062 "Internal CB address 0x%px + 0x%x is not in SRAM nor in DRAM\n",
4063 parser->user_cb, parser->user_cb_size);
4068 int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser)
4070 struct goya_device *goya = hdev->asic_specific;
4072 if (parser->queue_type == QUEUE_TYPE_INT)
4073 return goya_parse_cb_no_ext_queue(hdev, parser);
4075 if (goya->hw_cap_initialized & HW_CAP_MMU)
4076 return goya_parse_cb_mmu(hdev, parser);
4078 return goya_parse_cb_no_mmu(hdev, parser);
4081 void goya_add_end_of_cb_packets(struct hl_device *hdev, void *kernel_address,
4082 u32 len, u64 cq_addr, u32 cq_val, u32 msix_vec,
4085 struct packet_msg_prot *cq_pkt;
4088 cq_pkt = kernel_address + len - (sizeof(struct packet_msg_prot) * 2);
4090 tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
4091 (1 << GOYA_PKT_CTL_EB_SHIFT) |
4092 (1 << GOYA_PKT_CTL_MB_SHIFT);
4093 cq_pkt->ctl = cpu_to_le32(tmp);
4094 cq_pkt->value = cpu_to_le32(cq_val);
4095 cq_pkt->addr = cpu_to_le64(cq_addr);
4099 tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
4100 (1 << GOYA_PKT_CTL_MB_SHIFT);
4101 cq_pkt->ctl = cpu_to_le32(tmp);
4102 cq_pkt->value = cpu_to_le32(msix_vec & 0x7FF);
4103 cq_pkt->addr = cpu_to_le64(CFG_BASE + mmPCIE_DBI_MSIX_DOORBELL_OFF);
4106 void goya_update_eq_ci(struct hl_device *hdev, u32 val)
4108 WREG32(mmCPU_EQ_CI, val);
4111 void goya_restore_phase_topology(struct hl_device *hdev)
4116 static void goya_clear_sm_regs(struct hl_device *hdev)
4118 int i, num_of_sob_in_longs, num_of_mon_in_longs;
4120 num_of_sob_in_longs =
4121 ((mmSYNC_MNGR_SOB_OBJ_1023 - mmSYNC_MNGR_SOB_OBJ_0) + 4);
4123 num_of_mon_in_longs =
4124 ((mmSYNC_MNGR_MON_STATUS_255 - mmSYNC_MNGR_MON_STATUS_0) + 4);
4126 for (i = 0 ; i < num_of_sob_in_longs ; i += 4)
4127 WREG32(mmSYNC_MNGR_SOB_OBJ_0 + i, 0);
4129 for (i = 0 ; i < num_of_mon_in_longs ; i += 4)
4130 WREG32(mmSYNC_MNGR_MON_STATUS_0 + i, 0);
4132 /* Flush all WREG to prevent race */
4133 i = RREG32(mmSYNC_MNGR_SOB_OBJ_0);
4137 * goya_debugfs_read32 - read a 32bit value from a given device or a host mapped
4140 * @hdev: pointer to hl_device structure
4141 * @addr: device or host mapped address
4142 * @val: returned value
4144 * In case of DDR address that is not mapped into the default aperture that
4145 * the DDR bar exposes, the function will configure the iATU so that the DDR
4146 * bar will be positioned at a base address that allows reading from the
4147 * required address. Configuring the iATU during normal operation can
4148 * lead to undefined behavior and therefore, should be done with extreme care
4151 static int goya_debugfs_read32(struct hl_device *hdev, u64 addr,
4152 bool user_address, u32 *val)
4154 struct asic_fixed_properties *prop = &hdev->asic_prop;
4155 u64 ddr_bar_addr, host_phys_end;
4158 host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
4160 if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
4161 *val = RREG32(addr - CFG_BASE);
4163 } else if ((addr >= SRAM_BASE_ADDR) &&
4164 (addr < SRAM_BASE_ADDR + SRAM_SIZE)) {
4166 *val = readl(hdev->pcie_bar[SRAM_CFG_BAR_ID] +
4167 (addr - SRAM_BASE_ADDR));
4169 } else if (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size) {
4171 u64 bar_base_addr = DRAM_PHYS_BASE +
4172 (addr & ~(prop->dram_pci_bar_size - 0x1ull));
4174 ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr);
4175 if (ddr_bar_addr != U64_MAX) {
4176 *val = readl(hdev->pcie_bar[DDR_BAR_ID] +
4177 (addr - bar_base_addr));
4179 ddr_bar_addr = goya_set_ddr_bar_base(hdev,
4182 if (ddr_bar_addr == U64_MAX)
4185 } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
4186 user_address && !iommu_present(&pci_bus_type)) {
4187 *val = *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE);
4197 * goya_debugfs_write32 - write a 32bit value to a given device or a host mapped
4200 * @hdev: pointer to hl_device structure
4201 * @addr: device or host mapped address
4202 * @val: returned value
4204 * In case of DDR address that is not mapped into the default aperture that
4205 * the DDR bar exposes, the function will configure the iATU so that the DDR
4206 * bar will be positioned at a base address that allows writing to the
4207 * required address. Configuring the iATU during normal operation can
4208 * lead to undefined behavior and therefore, should be done with extreme care
4211 static int goya_debugfs_write32(struct hl_device *hdev, u64 addr,
4212 bool user_address, u32 val)
4214 struct asic_fixed_properties *prop = &hdev->asic_prop;
4215 u64 ddr_bar_addr, host_phys_end;
4218 host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
4220 if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
4221 WREG32(addr - CFG_BASE, val);
4223 } else if ((addr >= SRAM_BASE_ADDR) &&
4224 (addr < SRAM_BASE_ADDR + SRAM_SIZE)) {
4226 writel(val, hdev->pcie_bar[SRAM_CFG_BAR_ID] +
4227 (addr - SRAM_BASE_ADDR));
4229 } else if (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size) {
4231 u64 bar_base_addr = DRAM_PHYS_BASE +
4232 (addr & ~(prop->dram_pci_bar_size - 0x1ull));
4234 ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr);
4235 if (ddr_bar_addr != U64_MAX) {
4236 writel(val, hdev->pcie_bar[DDR_BAR_ID] +
4237 (addr - bar_base_addr));
4239 ddr_bar_addr = goya_set_ddr_bar_base(hdev,
4242 if (ddr_bar_addr == U64_MAX)
4245 } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
4246 user_address && !iommu_present(&pci_bus_type)) {
4247 *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
4256 static int goya_debugfs_read64(struct hl_device *hdev, u64 addr,
4257 bool user_address, u64 *val)
4259 struct asic_fixed_properties *prop = &hdev->asic_prop;
4260 u64 ddr_bar_addr, host_phys_end;
4263 host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
4265 if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
4266 u32 val_l = RREG32(addr - CFG_BASE);
4267 u32 val_h = RREG32(addr + sizeof(u32) - CFG_BASE);
4269 *val = (((u64) val_h) << 32) | val_l;
4271 } else if ((addr >= SRAM_BASE_ADDR) &&
4272 (addr <= SRAM_BASE_ADDR + SRAM_SIZE - sizeof(u64))) {
4274 *val = readq(hdev->pcie_bar[SRAM_CFG_BAR_ID] +
4275 (addr - SRAM_BASE_ADDR));
4278 DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64)) {
4280 u64 bar_base_addr = DRAM_PHYS_BASE +
4281 (addr & ~(prop->dram_pci_bar_size - 0x1ull));
4283 ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr);
4284 if (ddr_bar_addr != U64_MAX) {
4285 *val = readq(hdev->pcie_bar[DDR_BAR_ID] +
4286 (addr - bar_base_addr));
4288 ddr_bar_addr = goya_set_ddr_bar_base(hdev,
4291 if (ddr_bar_addr == U64_MAX)
4294 } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
4295 user_address && !iommu_present(&pci_bus_type)) {
4296 *val = *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE);
4305 static int goya_debugfs_write64(struct hl_device *hdev, u64 addr,
4306 bool user_address, u64 val)
4308 struct asic_fixed_properties *prop = &hdev->asic_prop;
4309 u64 ddr_bar_addr, host_phys_end;
4312 host_phys_end = HOST_PHYS_BASE + HOST_PHYS_SIZE;
4314 if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
4315 WREG32(addr - CFG_BASE, lower_32_bits(val));
4316 WREG32(addr + sizeof(u32) - CFG_BASE, upper_32_bits(val));
4318 } else if ((addr >= SRAM_BASE_ADDR) &&
4319 (addr <= SRAM_BASE_ADDR + SRAM_SIZE - sizeof(u64))) {
4321 writeq(val, hdev->pcie_bar[SRAM_CFG_BAR_ID] +
4322 (addr - SRAM_BASE_ADDR));
4325 DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64)) {
4327 u64 bar_base_addr = DRAM_PHYS_BASE +
4328 (addr & ~(prop->dram_pci_bar_size - 0x1ull));
4330 ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr);
4331 if (ddr_bar_addr != U64_MAX) {
4332 writeq(val, hdev->pcie_bar[DDR_BAR_ID] +
4333 (addr - bar_base_addr));
4335 ddr_bar_addr = goya_set_ddr_bar_base(hdev,
4338 if (ddr_bar_addr == U64_MAX)
4341 } else if (addr >= HOST_PHYS_BASE && addr < host_phys_end &&
4342 user_address && !iommu_present(&pci_bus_type)) {
4343 *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
4352 static int goya_debugfs_read_dma(struct hl_device *hdev, u64 addr, u32 size,
4355 dev_err(hdev->dev, "Reading via DMA is unimplemented yet\n");
4359 static u64 goya_read_pte(struct hl_device *hdev, u64 addr)
4361 struct goya_device *goya = hdev->asic_specific;
4363 if (hdev->hard_reset_pending)
4366 return readq(hdev->pcie_bar[DDR_BAR_ID] +
4367 (addr - goya->ddr_bar_cur_addr));
4370 static void goya_write_pte(struct hl_device *hdev, u64 addr, u64 val)
4372 struct goya_device *goya = hdev->asic_specific;
4374 if (hdev->hard_reset_pending)
4377 writeq(val, hdev->pcie_bar[DDR_BAR_ID] +
4378 (addr - goya->ddr_bar_cur_addr));
4381 static const char *_goya_get_event_desc(u16 event_type)
4383 switch (event_type) {
4384 case GOYA_ASYNC_EVENT_ID_PCIE_IF:
4386 case GOYA_ASYNC_EVENT_ID_TPC0_ECC:
4387 case GOYA_ASYNC_EVENT_ID_TPC1_ECC:
4388 case GOYA_ASYNC_EVENT_ID_TPC2_ECC:
4389 case GOYA_ASYNC_EVENT_ID_TPC3_ECC:
4390 case GOYA_ASYNC_EVENT_ID_TPC4_ECC:
4391 case GOYA_ASYNC_EVENT_ID_TPC5_ECC:
4392 case GOYA_ASYNC_EVENT_ID_TPC6_ECC:
4393 case GOYA_ASYNC_EVENT_ID_TPC7_ECC:
4395 case GOYA_ASYNC_EVENT_ID_MME_ECC:
4397 case GOYA_ASYNC_EVENT_ID_MME_ECC_EXT:
4398 return "MME_ecc_ext";
4399 case GOYA_ASYNC_EVENT_ID_MMU_ECC:
4401 case GOYA_ASYNC_EVENT_ID_DMA_MACRO:
4403 case GOYA_ASYNC_EVENT_ID_DMA_ECC:
4405 case GOYA_ASYNC_EVENT_ID_CPU_IF_ECC:
4406 return "CPU_if_ecc";
4407 case GOYA_ASYNC_EVENT_ID_PSOC_MEM:
4409 case GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT:
4410 return "PSOC_coresight";
4411 case GOYA_ASYNC_EVENT_ID_SRAM0 ... GOYA_ASYNC_EVENT_ID_SRAM29:
4413 case GOYA_ASYNC_EVENT_ID_GIC500:
4415 case GOYA_ASYNC_EVENT_ID_PLL0 ... GOYA_ASYNC_EVENT_ID_PLL6:
4417 case GOYA_ASYNC_EVENT_ID_AXI_ECC:
4419 case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC:
4420 return "L2_ram_ecc";
4421 case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET:
4422 return "PSOC_gpio_05_sw_reset";
4423 case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT:
4424 return "PSOC_gpio_10_vrhot_icrit";
4425 case GOYA_ASYNC_EVENT_ID_PCIE_DEC:
4427 case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
4428 case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
4429 case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
4430 case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
4431 case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
4432 case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
4433 case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
4434 case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
4436 case GOYA_ASYNC_EVENT_ID_MME_WACS:
4438 case GOYA_ASYNC_EVENT_ID_MME_WACSD:
4440 case GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER:
4441 return "CPU_axi_splitter";
4442 case GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC:
4443 return "PSOC_axi_dec";
4444 case GOYA_ASYNC_EVENT_ID_PSOC:
4446 case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
4447 case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
4448 case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
4449 case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
4450 case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
4451 case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
4452 case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
4453 case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
4454 return "TPC%d_krn_err";
4455 case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_CMDQ:
4457 case GOYA_ASYNC_EVENT_ID_TPC0_QM ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
4459 case GOYA_ASYNC_EVENT_ID_MME_QM:
4461 case GOYA_ASYNC_EVENT_ID_MME_CMDQ:
4463 case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
4465 case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
4467 case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU:
4468 case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU:
4469 case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU:
4470 case GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU:
4471 case GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU:
4472 case GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU:
4473 case GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU:
4474 case GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU:
4475 return "TPC%d_bmon_spmu";
4476 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0 ... GOYA_ASYNC_EVENT_ID_DMA_BM_CH4:
4477 return "DMA_bm_ch%d";
4478 case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S:
4479 return "POWER_ENV_S";
4480 case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E:
4481 return "POWER_ENV_E";
4482 case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S:
4483 return "THERMAL_ENV_S";
4484 case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E:
4485 return "THERMAL_ENV_E";
4486 case GOYA_ASYNC_EVENT_PKT_QUEUE_OUT_SYNC:
4487 return "QUEUE_OUT_OF_SYNC";
4493 static void goya_get_event_desc(u16 event_type, char *desc, size_t size)
4497 switch (event_type) {
4498 case GOYA_ASYNC_EVENT_ID_TPC0_ECC:
4499 case GOYA_ASYNC_EVENT_ID_TPC1_ECC:
4500 case GOYA_ASYNC_EVENT_ID_TPC2_ECC:
4501 case GOYA_ASYNC_EVENT_ID_TPC3_ECC:
4502 case GOYA_ASYNC_EVENT_ID_TPC4_ECC:
4503 case GOYA_ASYNC_EVENT_ID_TPC5_ECC:
4504 case GOYA_ASYNC_EVENT_ID_TPC6_ECC:
4505 case GOYA_ASYNC_EVENT_ID_TPC7_ECC:
4506 index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_ECC) / 3;
4507 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4509 case GOYA_ASYNC_EVENT_ID_SRAM0 ... GOYA_ASYNC_EVENT_ID_SRAM29:
4510 index = event_type - GOYA_ASYNC_EVENT_ID_SRAM0;
4511 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4513 case GOYA_ASYNC_EVENT_ID_PLL0 ... GOYA_ASYNC_EVENT_ID_PLL6:
4514 index = event_type - GOYA_ASYNC_EVENT_ID_PLL0;
4515 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4517 case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
4518 case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
4519 case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
4520 case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
4521 case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
4522 case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
4523 case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
4524 case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
4525 index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_DEC) / 3;
4526 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4528 case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
4529 case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
4530 case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
4531 case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
4532 case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
4533 case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
4534 case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
4535 case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
4536 index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR) / 10;
4537 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4539 case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_CMDQ:
4540 index = event_type - GOYA_ASYNC_EVENT_ID_TPC0_CMDQ;
4541 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4543 case GOYA_ASYNC_EVENT_ID_TPC0_QM ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
4544 index = event_type - GOYA_ASYNC_EVENT_ID_TPC0_QM;
4545 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4547 case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
4548 index = event_type - GOYA_ASYNC_EVENT_ID_DMA0_QM;
4549 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4551 case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
4552 index = event_type - GOYA_ASYNC_EVENT_ID_DMA0_CH;
4553 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4555 case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU:
4556 case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU:
4557 case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU:
4558 case GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU:
4559 case GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU:
4560 case GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU:
4561 case GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU:
4562 case GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU:
4563 index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU) / 10;
4564 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4566 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0 ... GOYA_ASYNC_EVENT_ID_DMA_BM_CH4:
4567 index = event_type - GOYA_ASYNC_EVENT_ID_DMA_BM_CH0;
4568 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4570 case GOYA_ASYNC_EVENT_PKT_QUEUE_OUT_SYNC:
4571 snprintf(desc, size, _goya_get_event_desc(event_type));
4574 snprintf(desc, size, _goya_get_event_desc(event_type));
4579 static void goya_print_razwi_info(struct hl_device *hdev)
4581 if (RREG32(mmDMA_MACRO_RAZWI_LBW_WT_VLD)) {
4582 dev_err_ratelimited(hdev->dev, "Illegal write to LBW\n");
4583 WREG32(mmDMA_MACRO_RAZWI_LBW_WT_VLD, 0);
4586 if (RREG32(mmDMA_MACRO_RAZWI_LBW_RD_VLD)) {
4587 dev_err_ratelimited(hdev->dev, "Illegal read from LBW\n");
4588 WREG32(mmDMA_MACRO_RAZWI_LBW_RD_VLD, 0);
4591 if (RREG32(mmDMA_MACRO_RAZWI_HBW_WT_VLD)) {
4592 dev_err_ratelimited(hdev->dev, "Illegal write to HBW\n");
4593 WREG32(mmDMA_MACRO_RAZWI_HBW_WT_VLD, 0);
4596 if (RREG32(mmDMA_MACRO_RAZWI_HBW_RD_VLD)) {
4597 dev_err_ratelimited(hdev->dev, "Illegal read from HBW\n");
4598 WREG32(mmDMA_MACRO_RAZWI_HBW_RD_VLD, 0);
4602 static void goya_print_mmu_error_info(struct hl_device *hdev)
4604 struct goya_device *goya = hdev->asic_specific;
4608 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4611 val = RREG32(mmMMU_PAGE_ERROR_CAPTURE);
4612 if (val & MMU_PAGE_ERROR_CAPTURE_ENTRY_VALID_MASK) {
4613 addr = val & MMU_PAGE_ERROR_CAPTURE_VA_49_32_MASK;
4615 addr |= RREG32(mmMMU_PAGE_ERROR_CAPTURE_VA);
4617 dev_err_ratelimited(hdev->dev, "MMU page fault on va 0x%llx\n",
4620 WREG32(mmMMU_PAGE_ERROR_CAPTURE, 0);
4624 static void goya_print_out_of_sync_info(struct hl_device *hdev,
4625 struct cpucp_pkt_sync_err *sync_err)
4627 struct hl_hw_queue *q = &hdev->kernel_queues[GOYA_QUEUE_ID_CPU_PQ];
4629 dev_err(hdev->dev, "Out of sync with FW, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%u\n",
4630 sync_err->pi, sync_err->ci, q->pi, atomic_read(&q->ci));
4633 static void goya_print_irq_info(struct hl_device *hdev, u16 event_type,
4638 goya_get_event_desc(event_type, desc, sizeof(desc));
4639 dev_err_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
4643 goya_print_razwi_info(hdev);
4644 goya_print_mmu_error_info(hdev);
4648 static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
4649 size_t irq_arr_size)
4651 struct cpucp_unmask_irq_arr_packet *pkt;
4652 size_t total_pkt_size;
4655 int irq_num_entries, irq_arr_index;
4656 __le32 *goya_irq_arr;
4658 total_pkt_size = sizeof(struct cpucp_unmask_irq_arr_packet) +
4661 /* data should be aligned to 8 bytes in order to CPU-CP to copy it */
4662 total_pkt_size = (total_pkt_size + 0x7) & ~0x7;
4664 /* total_pkt_size is casted to u16 later on */
4665 if (total_pkt_size > USHRT_MAX) {
4666 dev_err(hdev->dev, "too many elements in IRQ array\n");
4670 pkt = kzalloc(total_pkt_size, GFP_KERNEL);
4674 irq_num_entries = irq_arr_size / sizeof(irq_arr[0]);
4675 pkt->length = cpu_to_le32(irq_num_entries);
4677 /* We must perform any necessary endianness conversation on the irq
4678 * array being passed to the goya hardware
4680 for (irq_arr_index = 0, goya_irq_arr = (__le32 *) &pkt->irqs;
4681 irq_arr_index < irq_num_entries ; irq_arr_index++)
4682 goya_irq_arr[irq_arr_index] =
4683 cpu_to_le32(irq_arr[irq_arr_index]);
4685 pkt->cpucp_pkt.ctl = cpu_to_le32(CPUCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
4686 CPUCP_PKT_CTL_OPCODE_SHIFT);
4688 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt,
4689 total_pkt_size, 0, &result);
4692 dev_err(hdev->dev, "failed to unmask IRQ array\n");
4699 static int goya_soft_reset_late_init(struct hl_device *hdev)
4702 * Unmask all IRQs since some could have been received
4703 * during the soft reset
4705 return goya_unmask_irq_arr(hdev, goya_all_events,
4706 sizeof(goya_all_events));
4709 static int goya_unmask_irq(struct hl_device *hdev, u16 event_type)
4711 struct cpucp_packet pkt;
4715 memset(&pkt, 0, sizeof(pkt));
4717 pkt.ctl = cpu_to_le32(CPUCP_PACKET_UNMASK_RAZWI_IRQ <<
4718 CPUCP_PKT_CTL_OPCODE_SHIFT);
4719 pkt.value = cpu_to_le64(event_type);
4721 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
4725 dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type);
4730 static void goya_print_clk_change_info(struct hl_device *hdev, u16 event_type)
4732 switch (event_type) {
4733 case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S:
4734 hdev->clk_throttling_reason |= HL_CLK_THROTTLE_POWER;
4735 dev_info_ratelimited(hdev->dev,
4736 "Clock throttling due to power consumption\n");
4738 case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E:
4739 hdev->clk_throttling_reason &= ~HL_CLK_THROTTLE_POWER;
4740 dev_info_ratelimited(hdev->dev,
4741 "Power envelop is safe, back to optimal clock\n");
4743 case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S:
4744 hdev->clk_throttling_reason |= HL_CLK_THROTTLE_THERMAL;
4745 dev_info_ratelimited(hdev->dev,
4746 "Clock throttling due to overheating\n");
4748 case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E:
4749 hdev->clk_throttling_reason &= ~HL_CLK_THROTTLE_THERMAL;
4750 dev_info_ratelimited(hdev->dev,
4751 "Thermal envelop is safe, back to optimal clock\n");
4755 dev_err(hdev->dev, "Received invalid clock change event %d\n",
4761 void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
4763 u32 ctl = le32_to_cpu(eq_entry->hdr.ctl);
4764 u16 event_type = ((ctl & EQ_CTL_EVENT_TYPE_MASK)
4765 >> EQ_CTL_EVENT_TYPE_SHIFT);
4766 struct goya_device *goya = hdev->asic_specific;
4768 goya->events_stat[event_type]++;
4769 goya->events_stat_aggregate[event_type]++;
4771 switch (event_type) {
4772 case GOYA_ASYNC_EVENT_ID_PCIE_IF:
4773 case GOYA_ASYNC_EVENT_ID_TPC0_ECC:
4774 case GOYA_ASYNC_EVENT_ID_TPC1_ECC:
4775 case GOYA_ASYNC_EVENT_ID_TPC2_ECC:
4776 case GOYA_ASYNC_EVENT_ID_TPC3_ECC:
4777 case GOYA_ASYNC_EVENT_ID_TPC4_ECC:
4778 case GOYA_ASYNC_EVENT_ID_TPC5_ECC:
4779 case GOYA_ASYNC_EVENT_ID_TPC6_ECC:
4780 case GOYA_ASYNC_EVENT_ID_TPC7_ECC:
4781 case GOYA_ASYNC_EVENT_ID_MME_ECC:
4782 case GOYA_ASYNC_EVENT_ID_MME_ECC_EXT:
4783 case GOYA_ASYNC_EVENT_ID_MMU_ECC:
4784 case GOYA_ASYNC_EVENT_ID_DMA_MACRO:
4785 case GOYA_ASYNC_EVENT_ID_DMA_ECC:
4786 case GOYA_ASYNC_EVENT_ID_CPU_IF_ECC:
4787 case GOYA_ASYNC_EVENT_ID_PSOC_MEM:
4788 case GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT:
4789 case GOYA_ASYNC_EVENT_ID_SRAM0 ... GOYA_ASYNC_EVENT_ID_SRAM29:
4790 case GOYA_ASYNC_EVENT_ID_GIC500:
4791 case GOYA_ASYNC_EVENT_ID_PLL0 ... GOYA_ASYNC_EVENT_ID_PLL6:
4792 case GOYA_ASYNC_EVENT_ID_AXI_ECC:
4793 case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC:
4794 case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET:
4795 goya_print_irq_info(hdev, event_type, false);
4796 if (hdev->hard_reset_on_fw_events)
4797 hl_device_reset(hdev, HL_RESET_HARD);
4800 case GOYA_ASYNC_EVENT_ID_PCIE_DEC:
4801 case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
4802 case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
4803 case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
4804 case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
4805 case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
4806 case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
4807 case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
4808 case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
4809 case GOYA_ASYNC_EVENT_ID_MME_WACS:
4810 case GOYA_ASYNC_EVENT_ID_MME_WACSD:
4811 case GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER:
4812 case GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC:
4813 case GOYA_ASYNC_EVENT_ID_PSOC:
4814 case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
4815 case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
4816 case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
4817 case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
4818 case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
4819 case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
4820 case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
4821 case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
4822 case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
4823 case GOYA_ASYNC_EVENT_ID_MME_QM:
4824 case GOYA_ASYNC_EVENT_ID_MME_CMDQ:
4825 case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
4826 case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
4827 goya_print_irq_info(hdev, event_type, true);
4828 goya_unmask_irq(hdev, event_type);
4831 case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT:
4832 case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU:
4833 case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU:
4834 case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU:
4835 case GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU:
4836 case GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU:
4837 case GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU:
4838 case GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU:
4839 case GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU:
4840 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0 ... GOYA_ASYNC_EVENT_ID_DMA_BM_CH4:
4841 goya_print_irq_info(hdev, event_type, false);
4842 goya_unmask_irq(hdev, event_type);
4845 case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S:
4846 case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E:
4847 case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S:
4848 case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E:
4849 goya_print_clk_change_info(hdev, event_type);
4850 goya_unmask_irq(hdev, event_type);
4853 case GOYA_ASYNC_EVENT_PKT_QUEUE_OUT_SYNC:
4854 goya_print_irq_info(hdev, event_type, false);
4855 goya_print_out_of_sync_info(hdev, &eq_entry->pkt_sync_err);
4856 if (hdev->hard_reset_on_fw_events)
4857 hl_device_reset(hdev, HL_RESET_HARD);
4859 hl_fw_unmask_irq(hdev, event_type);
4863 dev_err(hdev->dev, "Received invalid H/W interrupt %d\n",
4869 void *goya_get_events_stat(struct hl_device *hdev, bool aggregate, u32 *size)
4871 struct goya_device *goya = hdev->asic_specific;
4874 *size = (u32) sizeof(goya->events_stat_aggregate);
4875 return goya->events_stat_aggregate;
4878 *size = (u32) sizeof(goya->events_stat);
4879 return goya->events_stat;
4882 static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size,
4883 u64 val, bool is_dram)
4885 struct packet_lin_dma *lin_dma_pkt;
4886 struct hl_cs_job *job;
4889 int rc, lin_dma_pkts_cnt;
4891 lin_dma_pkts_cnt = DIV_ROUND_UP_ULL(size, SZ_2G);
4892 cb_size = lin_dma_pkts_cnt * sizeof(struct packet_lin_dma) +
4893 sizeof(struct packet_msg_prot);
4894 cb = hl_cb_kernel_create(hdev, cb_size, false);
4898 lin_dma_pkt = cb->kernel_address;
4901 memset(lin_dma_pkt, 0, sizeof(*lin_dma_pkt));
4903 ctl = ((PACKET_LIN_DMA << GOYA_PKT_CTL_OPCODE_SHIFT) |
4904 (1 << GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT) |
4905 (1 << GOYA_PKT_LIN_DMA_CTL_WO_SHIFT) |
4906 (1 << GOYA_PKT_CTL_RB_SHIFT) |
4907 (1 << GOYA_PKT_CTL_MB_SHIFT));
4908 ctl |= (is_dram ? DMA_HOST_TO_DRAM : DMA_HOST_TO_SRAM) <<
4909 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
4910 lin_dma_pkt->ctl = cpu_to_le32(ctl);
4912 lin_dma_pkt->src_addr = cpu_to_le64(val);
4913 lin_dma_pkt->dst_addr = cpu_to_le64(addr);
4914 if (lin_dma_pkts_cnt > 1)
4915 lin_dma_pkt->tsize = cpu_to_le32(SZ_2G);
4917 lin_dma_pkt->tsize = cpu_to_le32(size);
4922 } while (--lin_dma_pkts_cnt);
4924 job = hl_cs_allocate_job(hdev, QUEUE_TYPE_EXT, true);
4926 dev_err(hdev->dev, "Failed to allocate a new job\n");
4933 atomic_inc(&job->user_cb->cs_cnt);
4934 job->user_cb_size = cb_size;
4935 job->hw_queue_id = GOYA_QUEUE_ID_DMA_0;
4936 job->patched_cb = job->user_cb;
4937 job->job_cb_size = job->user_cb_size;
4939 hl_debugfs_add_job(hdev, job);
4941 rc = goya_send_job_on_qman0(hdev, job);
4943 hl_debugfs_remove_job(hdev, job);
4945 atomic_dec(&cb->cs_cnt);
4949 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
4954 int goya_context_switch(struct hl_device *hdev, u32 asid)
4956 struct asic_fixed_properties *prop = &hdev->asic_prop;
4957 u64 addr = prop->sram_base_address, sob_addr;
4958 u32 size = hdev->pldm ? 0x10000 : prop->sram_size;
4959 u64 val = 0x7777777777777777ull;
4961 u32 channel_off = mmDMA_CH_1_WR_COMP_ADDR_LO -
4962 mmDMA_CH_0_WR_COMP_ADDR_LO;
4964 rc = goya_memset_device_memory(hdev, addr, size, val, false);
4966 dev_err(hdev->dev, "Failed to clear SRAM in context switch\n");
4970 /* we need to reset registers that the user is allowed to change */
4971 sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1007;
4972 WREG32(mmDMA_CH_0_WR_COMP_ADDR_LO, lower_32_bits(sob_addr));
4974 for (dma_id = 1 ; dma_id < NUMBER_OF_EXT_HW_QUEUES ; dma_id++) {
4975 sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1000 +
4977 WREG32(mmDMA_CH_0_WR_COMP_ADDR_LO + channel_off * dma_id,
4978 lower_32_bits(sob_addr));
4981 WREG32(mmTPC_PLL_CLK_RLX_0, 0x200020);
4983 goya_clear_sm_regs(hdev);
4988 static int goya_mmu_clear_pgt_range(struct hl_device *hdev)
4990 struct asic_fixed_properties *prop = &hdev->asic_prop;
4991 struct goya_device *goya = hdev->asic_specific;
4992 u64 addr = prop->mmu_pgt_addr;
4993 u32 size = prop->mmu_pgt_size + MMU_DRAM_DEFAULT_PAGE_SIZE +
4996 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4999 return goya_memset_device_memory(hdev, addr, size, 0, true);
5002 static int goya_mmu_set_dram_default_page(struct hl_device *hdev)
5004 struct goya_device *goya = hdev->asic_specific;
5005 u64 addr = hdev->asic_prop.mmu_dram_default_page_addr;
5006 u32 size = MMU_DRAM_DEFAULT_PAGE_SIZE;
5007 u64 val = 0x9999999999999999ull;
5009 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
5012 return goya_memset_device_memory(hdev, addr, size, val, true);
5015 static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev)
5017 struct asic_fixed_properties *prop = &hdev->asic_prop;
5018 struct goya_device *goya = hdev->asic_specific;
5022 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
5025 for (off = 0 ; off < CPU_FW_IMAGE_SIZE ; off += PAGE_SIZE_2MB) {
5026 rc = hl_mmu_map_page(hdev->kernel_ctx,
5027 prop->dram_base_address + off,
5028 prop->dram_base_address + off, PAGE_SIZE_2MB,
5029 (off + PAGE_SIZE_2MB) == CPU_FW_IMAGE_SIZE);
5031 dev_err(hdev->dev, "Map failed for address 0x%llx\n",
5032 prop->dram_base_address + off);
5037 if (!(hdev->cpu_accessible_dma_address & (PAGE_SIZE_2MB - 1))) {
5038 rc = hl_mmu_map_page(hdev->kernel_ctx,
5039 VA_CPU_ACCESSIBLE_MEM_ADDR,
5040 hdev->cpu_accessible_dma_address,
5041 PAGE_SIZE_2MB, true);
5045 "Map failed for CPU accessible memory\n");
5046 off -= PAGE_SIZE_2MB;
5050 for (cpu_off = 0 ; cpu_off < SZ_2M ; cpu_off += PAGE_SIZE_4KB) {
5051 rc = hl_mmu_map_page(hdev->kernel_ctx,
5052 VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off,
5053 hdev->cpu_accessible_dma_address + cpu_off,
5054 PAGE_SIZE_4KB, true);
5057 "Map failed for CPU accessible memory\n");
5058 cpu_off -= PAGE_SIZE_4KB;
5064 goya_mmu_prepare_reg(hdev, mmCPU_IF_ARUSER_OVR, HL_KERNEL_ASID_ID);
5065 goya_mmu_prepare_reg(hdev, mmCPU_IF_AWUSER_OVR, HL_KERNEL_ASID_ID);
5066 WREG32(mmCPU_IF_ARUSER_OVR_EN, 0x7FF);
5067 WREG32(mmCPU_IF_AWUSER_OVR_EN, 0x7FF);
5069 /* Make sure configuration is flushed to device */
5070 RREG32(mmCPU_IF_AWUSER_OVR_EN);
5072 goya->device_cpu_mmu_mappings_done = true;
5077 for (; cpu_off >= 0 ; cpu_off -= PAGE_SIZE_4KB)
5078 if (hl_mmu_unmap_page(hdev->kernel_ctx,
5079 VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off,
5080 PAGE_SIZE_4KB, true))
5081 dev_warn_ratelimited(hdev->dev,
5082 "failed to unmap address 0x%llx\n",
5083 VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off);
5085 for (; off >= 0 ; off -= PAGE_SIZE_2MB)
5086 if (hl_mmu_unmap_page(hdev->kernel_ctx,
5087 prop->dram_base_address + off, PAGE_SIZE_2MB,
5089 dev_warn_ratelimited(hdev->dev,
5090 "failed to unmap address 0x%llx\n",
5091 prop->dram_base_address + off);
5096 void goya_mmu_remove_device_cpu_mappings(struct hl_device *hdev)
5098 struct asic_fixed_properties *prop = &hdev->asic_prop;
5099 struct goya_device *goya = hdev->asic_specific;
5102 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
5105 if (!goya->device_cpu_mmu_mappings_done)
5108 WREG32(mmCPU_IF_ARUSER_OVR_EN, 0);
5109 WREG32(mmCPU_IF_AWUSER_OVR_EN, 0);
5111 if (!(hdev->cpu_accessible_dma_address & (PAGE_SIZE_2MB - 1))) {
5112 if (hl_mmu_unmap_page(hdev->kernel_ctx,
5113 VA_CPU_ACCESSIBLE_MEM_ADDR,
5114 PAGE_SIZE_2MB, true))
5116 "Failed to unmap CPU accessible memory\n");
5118 for (cpu_off = 0 ; cpu_off < SZ_2M ; cpu_off += PAGE_SIZE_4KB)
5119 if (hl_mmu_unmap_page(hdev->kernel_ctx,
5120 VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off,
5122 (cpu_off + PAGE_SIZE_4KB) >= SZ_2M))
5123 dev_warn_ratelimited(hdev->dev,
5124 "failed to unmap address 0x%llx\n",
5125 VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off);
5128 for (off = 0 ; off < CPU_FW_IMAGE_SIZE ; off += PAGE_SIZE_2MB)
5129 if (hl_mmu_unmap_page(hdev->kernel_ctx,
5130 prop->dram_base_address + off, PAGE_SIZE_2MB,
5131 (off + PAGE_SIZE_2MB) >= CPU_FW_IMAGE_SIZE))
5132 dev_warn_ratelimited(hdev->dev,
5133 "Failed to unmap address 0x%llx\n",
5134 prop->dram_base_address + off);
5136 goya->device_cpu_mmu_mappings_done = false;
5139 static void goya_mmu_prepare(struct hl_device *hdev, u32 asid)
5141 struct goya_device *goya = hdev->asic_specific;
5144 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
5147 if (asid & ~MME_QM_GLBL_SECURE_PROPS_ASID_MASK) {
5148 dev_crit(hdev->dev, "asid %u is too big\n", asid);
5152 /* zero the MMBP and ASID bits and then set the ASID */
5153 for (i = 0 ; i < GOYA_MMU_REGS_NUM ; i++)
5154 goya_mmu_prepare_reg(hdev, goya_mmu_regs[i], asid);
5157 static int goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
5160 struct goya_device *goya = hdev->asic_specific;
5161 u32 status, timeout_usec;
5164 if (!(goya->hw_cap_initialized & HW_CAP_MMU) ||
5165 hdev->hard_reset_pending)
5168 /* no need in L1 only invalidation in Goya */
5173 timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
5175 timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
5177 /* L0 & L1 invalidation */
5178 WREG32(mmSTLB_INV_ALL_START, 1);
5180 rc = hl_poll_timeout(
5182 mmSTLB_INV_ALL_START,
5189 dev_err_ratelimited(hdev->dev,
5190 "MMU cache invalidation timeout\n");
5191 hl_device_reset(hdev, HL_RESET_HARD);
5197 static int goya_mmu_invalidate_cache_range(struct hl_device *hdev,
5198 bool is_hard, u32 asid, u64 va, u64 size)
5200 struct goya_device *goya = hdev->asic_specific;
5201 u32 status, timeout_usec, inv_data, pi;
5204 if (!(goya->hw_cap_initialized & HW_CAP_MMU) ||
5205 hdev->hard_reset_pending)
5208 /* no need in L1 only invalidation in Goya */
5213 timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
5215 timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
5218 * TODO: currently invalidate entire L0 & L1 as in regular hard
5219 * invalidation. Need to apply invalidation of specific cache lines with
5220 * mask of ASID & VA & size.
5221 * Note that L1 with be flushed entirely in any case.
5224 /* L0 & L1 invalidation */
5225 inv_data = RREG32(mmSTLB_CACHE_INV);
5227 pi = ((inv_data & STLB_CACHE_INV_PRODUCER_INDEX_MASK) + 1) & 0xFF;
5228 WREG32(mmSTLB_CACHE_INV,
5229 (inv_data & STLB_CACHE_INV_INDEX_MASK_MASK) | pi);
5231 rc = hl_poll_timeout(
5233 mmSTLB_INV_CONSUMER_INDEX,
5240 dev_err_ratelimited(hdev->dev,
5241 "MMU cache invalidation timeout\n");
5242 hl_device_reset(hdev, HL_RESET_HARD);
5248 int goya_send_heartbeat(struct hl_device *hdev)
5250 struct goya_device *goya = hdev->asic_specific;
5252 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
5255 return hl_fw_send_heartbeat(hdev);
5258 int goya_cpucp_info_get(struct hl_device *hdev)
5260 struct goya_device *goya = hdev->asic_specific;
5261 struct asic_fixed_properties *prop = &hdev->asic_prop;
5265 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
5268 rc = hl_fw_cpucp_handshake(hdev, mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0);
5272 dram_size = le64_to_cpu(prop->cpucp_info.dram_size);
5274 if ((!is_power_of_2(dram_size)) ||
5275 (dram_size < DRAM_PHYS_DEFAULT_SIZE)) {
5277 "F/W reported invalid DRAM size %llu. Trying to use default size\n",
5279 dram_size = DRAM_PHYS_DEFAULT_SIZE;
5282 prop->dram_size = dram_size;
5283 prop->dram_end_address = prop->dram_base_address + dram_size;
5286 if (!strlen(prop->cpucp_info.card_name))
5287 strncpy(prop->cpucp_info.card_name, GOYA_DEFAULT_CARD_NAME,
5293 static void goya_set_clock_gating(struct hl_device *hdev)
5295 /* clock gating not supported in Goya */
5298 static void goya_disable_clock_gating(struct hl_device *hdev)
5300 /* clock gating not supported in Goya */
5303 static bool goya_is_device_idle(struct hl_device *hdev, u64 *mask_arr,
5304 u8 mask_len, struct seq_file *s)
5306 const char *fmt = "%-5d%-9s%#-14x%#-16x%#x\n";
5307 const char *dma_fmt = "%-5d%-9s%#-14x%#x\n";
5308 unsigned long *mask = (unsigned long *)mask_arr;
5309 u32 qm_glbl_sts0, cmdq_glbl_sts0, dma_core_sts0, tpc_cfg_sts,
5311 bool is_idle = true, is_eng_idle;
5316 seq_puts(s, "\nDMA is_idle QM_GLBL_STS0 DMA_CORE_STS0\n"
5317 "--- ------- ------------ -------------\n");
5319 offset = mmDMA_QM_1_GLBL_STS0 - mmDMA_QM_0_GLBL_STS0;
5321 for (i = 0 ; i < DMA_MAX_NUM ; i++) {
5322 qm_glbl_sts0 = RREG32(mmDMA_QM_0_GLBL_STS0 + i * offset);
5323 dma_core_sts0 = RREG32(mmDMA_CH_0_STS0 + i * offset);
5324 is_eng_idle = IS_DMA_QM_IDLE(qm_glbl_sts0) &&
5325 IS_DMA_IDLE(dma_core_sts0);
5326 is_idle &= is_eng_idle;
5328 if (mask && !is_eng_idle)
5329 set_bit(GOYA_ENGINE_ID_DMA_0 + i, mask);
5331 seq_printf(s, dma_fmt, i, is_eng_idle ? "Y" : "N",
5332 qm_glbl_sts0, dma_core_sts0);
5337 "\nTPC is_idle QM_GLBL_STS0 CMDQ_GLBL_STS0 CFG_STATUS\n"
5338 "--- ------- ------------ -------------- ----------\n");
5340 offset = mmTPC1_QM_GLBL_STS0 - mmTPC0_QM_GLBL_STS0;
5342 for (i = 0 ; i < TPC_MAX_NUM ; i++) {
5343 qm_glbl_sts0 = RREG32(mmTPC0_QM_GLBL_STS0 + i * offset);
5344 cmdq_glbl_sts0 = RREG32(mmTPC0_CMDQ_GLBL_STS0 + i * offset);
5345 tpc_cfg_sts = RREG32(mmTPC0_CFG_STATUS + i * offset);
5346 is_eng_idle = IS_TPC_QM_IDLE(qm_glbl_sts0) &&
5347 IS_TPC_CMDQ_IDLE(cmdq_glbl_sts0) &&
5348 IS_TPC_IDLE(tpc_cfg_sts);
5349 is_idle &= is_eng_idle;
5351 if (mask && !is_eng_idle)
5352 set_bit(GOYA_ENGINE_ID_TPC_0 + i, mask);
5354 seq_printf(s, fmt, i, is_eng_idle ? "Y" : "N",
5355 qm_glbl_sts0, cmdq_glbl_sts0, tpc_cfg_sts);
5360 "\nMME is_idle QM_GLBL_STS0 CMDQ_GLBL_STS0 ARCH_STATUS\n"
5361 "--- ------- ------------ -------------- -----------\n");
5363 qm_glbl_sts0 = RREG32(mmMME_QM_GLBL_STS0);
5364 cmdq_glbl_sts0 = RREG32(mmMME_CMDQ_GLBL_STS0);
5365 mme_arch_sts = RREG32(mmMME_ARCH_STATUS);
5366 is_eng_idle = IS_MME_QM_IDLE(qm_glbl_sts0) &&
5367 IS_MME_CMDQ_IDLE(cmdq_glbl_sts0) &&
5368 IS_MME_IDLE(mme_arch_sts);
5369 is_idle &= is_eng_idle;
5371 if (mask && !is_eng_idle)
5372 set_bit(GOYA_ENGINE_ID_MME_0, mask);
5374 seq_printf(s, fmt, 0, is_eng_idle ? "Y" : "N", qm_glbl_sts0,
5375 cmdq_glbl_sts0, mme_arch_sts);
5382 static void goya_hw_queues_lock(struct hl_device *hdev)
5383 __acquires(&goya->hw_queues_lock)
5385 struct goya_device *goya = hdev->asic_specific;
5387 spin_lock(&goya->hw_queues_lock);
5390 static void goya_hw_queues_unlock(struct hl_device *hdev)
5391 __releases(&goya->hw_queues_lock)
5393 struct goya_device *goya = hdev->asic_specific;
5395 spin_unlock(&goya->hw_queues_lock);
5398 static u32 goya_get_pci_id(struct hl_device *hdev)
5400 return hdev->pdev->device;
5403 static int goya_get_eeprom_data(struct hl_device *hdev, void *data,
5406 struct goya_device *goya = hdev->asic_specific;
5408 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
5411 return hl_fw_get_eeprom_data(hdev, data, max_size);
5414 static int goya_ctx_init(struct hl_ctx *ctx)
5416 if (ctx->asid != HL_KERNEL_ASID_ID)
5417 goya_mmu_prepare(ctx->hdev, ctx->asid);
5422 u32 goya_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx)
5427 static u32 goya_get_signal_cb_size(struct hl_device *hdev)
5432 static u32 goya_get_wait_cb_size(struct hl_device *hdev)
5437 static u32 goya_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
5443 static u32 goya_gen_wait_cb(struct hl_device *hdev,
5444 struct hl_gen_wait_properties *prop)
5449 static void goya_reset_sob(struct hl_device *hdev, void *data)
5454 static void goya_reset_sob_group(struct hl_device *hdev, u16 sob_group)
5459 static void goya_set_dma_mask_from_fw(struct hl_device *hdev)
5461 if (RREG32(mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_0) ==
5462 HL_POWER9_HOST_MAGIC) {
5463 dev_dbg(hdev->dev, "Working in 64-bit DMA mode\n");
5464 hdev->power9_64bit_dma_enable = 1;
5465 hdev->dma_mask = 64;
5467 dev_dbg(hdev->dev, "Working in 48-bit DMA mode\n");
5468 hdev->power9_64bit_dma_enable = 0;
5469 hdev->dma_mask = 48;
5473 u64 goya_get_device_time(struct hl_device *hdev)
5475 u64 device_time = ((u64) RREG32(mmPSOC_TIMESTAMP_CNTCVU)) << 32;
5477 return device_time | RREG32(mmPSOC_TIMESTAMP_CNTCVL);
5480 static void goya_collective_wait_init_cs(struct hl_cs *cs)
5485 static int goya_collective_wait_create_jobs(struct hl_device *hdev,
5486 struct hl_ctx *ctx, struct hl_cs *cs, u32 wait_queue_id,
5487 u32 collective_engine_id)
5492 static void goya_ctx_fini(struct hl_ctx *ctx)
5497 static int goya_get_hw_block_id(struct hl_device *hdev, u64 block_addr,
5498 u32 *block_size, u32 *block_id)
5503 static int goya_block_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
5504 u32 block_id, u32 block_size)
5509 static void goya_enable_events_from_fw(struct hl_device *hdev)
5511 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
5512 GOYA_ASYNC_EVENT_ID_INTS_REGISTER);
5515 static const struct hl_asic_funcs goya_funcs = {
5516 .early_init = goya_early_init,
5517 .early_fini = goya_early_fini,
5518 .late_init = goya_late_init,
5519 .late_fini = goya_late_fini,
5520 .sw_init = goya_sw_init,
5521 .sw_fini = goya_sw_fini,
5522 .hw_init = goya_hw_init,
5523 .hw_fini = goya_hw_fini,
5524 .halt_engines = goya_halt_engines,
5525 .suspend = goya_suspend,
5526 .resume = goya_resume,
5527 .cb_mmap = goya_cb_mmap,
5528 .ring_doorbell = goya_ring_doorbell,
5529 .pqe_write = goya_pqe_write,
5530 .asic_dma_alloc_coherent = goya_dma_alloc_coherent,
5531 .asic_dma_free_coherent = goya_dma_free_coherent,
5532 .scrub_device_mem = goya_scrub_device_mem,
5533 .get_int_queue_base = goya_get_int_queue_base,
5534 .test_queues = goya_test_queues,
5535 .asic_dma_pool_zalloc = goya_dma_pool_zalloc,
5536 .asic_dma_pool_free = goya_dma_pool_free,
5537 .cpu_accessible_dma_pool_alloc = goya_cpu_accessible_dma_pool_alloc,
5538 .cpu_accessible_dma_pool_free = goya_cpu_accessible_dma_pool_free,
5539 .hl_dma_unmap_sg = goya_dma_unmap_sg,
5540 .cs_parser = goya_cs_parser,
5541 .asic_dma_map_sg = goya_dma_map_sg,
5542 .get_dma_desc_list_size = goya_get_dma_desc_list_size,
5543 .add_end_of_cb_packets = goya_add_end_of_cb_packets,
5544 .update_eq_ci = goya_update_eq_ci,
5545 .context_switch = goya_context_switch,
5546 .restore_phase_topology = goya_restore_phase_topology,
5547 .debugfs_read32 = goya_debugfs_read32,
5548 .debugfs_write32 = goya_debugfs_write32,
5549 .debugfs_read64 = goya_debugfs_read64,
5550 .debugfs_write64 = goya_debugfs_write64,
5551 .debugfs_read_dma = goya_debugfs_read_dma,
5552 .add_device_attr = goya_add_device_attr,
5553 .handle_eqe = goya_handle_eqe,
5554 .set_pll_profile = goya_set_pll_profile,
5555 .get_events_stat = goya_get_events_stat,
5556 .read_pte = goya_read_pte,
5557 .write_pte = goya_write_pte,
5558 .mmu_invalidate_cache = goya_mmu_invalidate_cache,
5559 .mmu_invalidate_cache_range = goya_mmu_invalidate_cache_range,
5560 .send_heartbeat = goya_send_heartbeat,
5561 .set_clock_gating = goya_set_clock_gating,
5562 .disable_clock_gating = goya_disable_clock_gating,
5563 .debug_coresight = goya_debug_coresight,
5564 .is_device_idle = goya_is_device_idle,
5565 .soft_reset_late_init = goya_soft_reset_late_init,
5566 .hw_queues_lock = goya_hw_queues_lock,
5567 .hw_queues_unlock = goya_hw_queues_unlock,
5568 .get_pci_id = goya_get_pci_id,
5569 .get_eeprom_data = goya_get_eeprom_data,
5570 .send_cpu_message = goya_send_cpu_message,
5571 .pci_bars_map = goya_pci_bars_map,
5572 .init_iatu = goya_init_iatu,
5575 .halt_coresight = goya_halt_coresight,
5576 .ctx_init = goya_ctx_init,
5577 .ctx_fini = goya_ctx_fini,
5578 .get_clk_rate = goya_get_clk_rate,
5579 .get_queue_id_for_cq = goya_get_queue_id_for_cq,
5580 .read_device_fw_version = goya_read_device_fw_version,
5581 .load_firmware_to_device = goya_load_firmware_to_device,
5582 .load_boot_fit_to_device = goya_load_boot_fit_to_device,
5583 .get_signal_cb_size = goya_get_signal_cb_size,
5584 .get_wait_cb_size = goya_get_wait_cb_size,
5585 .gen_signal_cb = goya_gen_signal_cb,
5586 .gen_wait_cb = goya_gen_wait_cb,
5587 .reset_sob = goya_reset_sob,
5588 .reset_sob_group = goya_reset_sob_group,
5589 .set_dma_mask_from_fw = goya_set_dma_mask_from_fw,
5590 .get_device_time = goya_get_device_time,
5591 .collective_wait_init_cs = goya_collective_wait_init_cs,
5592 .collective_wait_create_jobs = goya_collective_wait_create_jobs,
5593 .scramble_addr = hl_mmu_scramble_addr,
5594 .descramble_addr = hl_mmu_descramble_addr,
5595 .ack_protection_bits_errors = goya_ack_protection_bits_errors,
5596 .get_hw_block_id = goya_get_hw_block_id,
5597 .hw_block_mmap = goya_block_mmap,
5598 .enable_events_from_fw = goya_enable_events_from_fw
5602 * goya_set_asic_funcs - set Goya function pointers
5604 * @*hdev: pointer to hl_device structure
5607 void goya_set_asic_funcs(struct hl_device *hdev)
5609 hdev->asic_funcs = &goya_funcs;