1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2019 HabanaLabs, Ltd.
9 #include "../include/hw_ip/mmu/mmu_general.h"
10 #include "../include/hw_ip/mmu/mmu_v1_0.h"
11 #include "../include/goya/asic_reg/goya_masks.h"
12 #include "../include/goya/goya_reg_map.h"
14 #include <linux/pci.h>
15 #include <linux/hwmon.h>
16 #include <linux/iommu.h>
17 #include <linux/seq_file.h>
20 * GOYA security scheme:
22 * 1. Host is protected by:
23 * - Range registers (When MMU is enabled, DMA RR does NOT protect host)
26 * 2. DRAM is protected by:
27 * - Range registers (protect the first 512MB)
28 * - MMU (isolation between users)
30 * 3. Configuration is protected by:
34 * When MMU is disabled:
36 * QMAN DMA: PQ, CQ, CP, DMA are secured.
37 * PQ, CB and the data are on the host.
40 * PQ, CQ and CP are not secured.
41 * PQ, CB and the data are on the SRAM/DRAM.
43 * Since QMAN DMA is secured, the driver is parsing the DMA CB:
44 * - checks DMA pointer
45 * - WREG, MSG_PROT are not allowed.
46 * - MSG_LONG/SHORT are allowed.
48 * A read/write transaction by the QMAN to a protected area will succeed if
49 * and only if the QMAN's CP is secured and MSG_PROT is used
52 * When MMU is enabled:
54 * QMAN DMA: PQ, CQ and CP are secured.
55 * MMU is set to bypass on the Secure props register of the QMAN.
56 * The reasons we don't enable MMU for PQ, CQ and CP are:
57 * - PQ entry is in kernel address space and the driver doesn't map it.
58 * - CP writes to MSIX register and to kernel address space (completion
61 * DMA is not secured but because CP is secured, the driver still needs to parse
62 * the CB, but doesn't need to check the DMA addresses.
64 * For QMAN DMA 0, DMA is also secured because only the driver uses this DMA and
65 * the driver doesn't map memory in MMU.
67 * QMAN TPC/MME: PQ, CQ and CP aren't secured (no change from MMU disabled mode)
69 * DMA RR does NOT protect host because DMA is not secured
73 #define GOYA_BOOT_FIT_FILE "habanalabs/goya/goya-boot-fit.itb"
74 #define GOYA_LINUX_FW_FILE "habanalabs/goya/goya-fit.itb"
76 #define GOYA_MMU_REGS_NUM 63
78 #define GOYA_DMA_POOL_BLK_SIZE 0x100 /* 256 bytes */
80 #define GOYA_RESET_TIMEOUT_MSEC 500 /* 500ms */
81 #define GOYA_PLDM_RESET_TIMEOUT_MSEC 20000 /* 20s */
82 #define GOYA_RESET_WAIT_MSEC 1 /* 1ms */
83 #define GOYA_CPU_RESET_WAIT_MSEC 100 /* 100ms */
84 #define GOYA_PLDM_RESET_WAIT_MSEC 1000 /* 1s */
85 #define GOYA_TEST_QUEUE_WAIT_USEC 100000 /* 100ms */
86 #define GOYA_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100)
87 #define GOYA_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
88 #define GOYA_BOOT_FIT_REQ_TIMEOUT_USEC 1000000 /* 1s */
89 #define GOYA_MSG_TO_CPU_TIMEOUT_USEC 4000000 /* 4s */
91 #define GOYA_QMAN0_FENCE_VAL 0xD169B243
93 #define GOYA_MAX_STRING_LEN 20
95 #define GOYA_CB_POOL_CB_CNT 512
96 #define GOYA_CB_POOL_CB_SIZE 0x20000 /* 128KB */
98 #define IS_QM_IDLE(engine, qm_glbl_sts0) \
99 (((qm_glbl_sts0) & engine##_QM_IDLE_MASK) == engine##_QM_IDLE_MASK)
100 #define IS_DMA_QM_IDLE(qm_glbl_sts0) IS_QM_IDLE(DMA, qm_glbl_sts0)
101 #define IS_TPC_QM_IDLE(qm_glbl_sts0) IS_QM_IDLE(TPC, qm_glbl_sts0)
102 #define IS_MME_QM_IDLE(qm_glbl_sts0) IS_QM_IDLE(MME, qm_glbl_sts0)
104 #define IS_CMDQ_IDLE(engine, cmdq_glbl_sts0) \
105 (((cmdq_glbl_sts0) & engine##_CMDQ_IDLE_MASK) == \
106 engine##_CMDQ_IDLE_MASK)
107 #define IS_TPC_CMDQ_IDLE(cmdq_glbl_sts0) \
108 IS_CMDQ_IDLE(TPC, cmdq_glbl_sts0)
109 #define IS_MME_CMDQ_IDLE(cmdq_glbl_sts0) \
110 IS_CMDQ_IDLE(MME, cmdq_glbl_sts0)
112 #define IS_DMA_IDLE(dma_core_sts0) \
113 !((dma_core_sts0) & DMA_CH_0_STS0_DMA_BUSY_MASK)
115 #define IS_TPC_IDLE(tpc_cfg_sts) \
116 (((tpc_cfg_sts) & TPC_CFG_IDLE_MASK) == TPC_CFG_IDLE_MASK)
118 #define IS_MME_IDLE(mme_arch_sts) \
119 (((mme_arch_sts) & MME_ARCH_IDLE_MASK) == MME_ARCH_IDLE_MASK)
122 static const char goya_irq_name[GOYA_MSIX_ENTRIES][GOYA_MAX_STRING_LEN] = {
123 "goya cq 0", "goya cq 1", "goya cq 2", "goya cq 3",
124 "goya cq 4", "goya cpu eq"
127 static u16 goya_packet_sizes[MAX_PACKET_ID] = {
128 [PACKET_WREG_32] = sizeof(struct packet_wreg32),
129 [PACKET_WREG_BULK] = sizeof(struct packet_wreg_bulk),
130 [PACKET_MSG_LONG] = sizeof(struct packet_msg_long),
131 [PACKET_MSG_SHORT] = sizeof(struct packet_msg_short),
132 [PACKET_CP_DMA] = sizeof(struct packet_cp_dma),
133 [PACKET_MSG_PROT] = sizeof(struct packet_msg_prot),
134 [PACKET_FENCE] = sizeof(struct packet_fence),
135 [PACKET_LIN_DMA] = sizeof(struct packet_lin_dma),
136 [PACKET_NOP] = sizeof(struct packet_nop),
137 [PACKET_STOP] = sizeof(struct packet_stop)
140 static inline bool validate_packet_id(enum packet_id id)
144 case PACKET_WREG_BULK:
145 case PACKET_MSG_LONG:
146 case PACKET_MSG_SHORT:
148 case PACKET_MSG_PROT:
159 static u64 goya_mmu_regs[GOYA_MMU_REGS_NUM] = {
160 mmDMA_QM_0_GLBL_NON_SECURE_PROPS,
161 mmDMA_QM_1_GLBL_NON_SECURE_PROPS,
162 mmDMA_QM_2_GLBL_NON_SECURE_PROPS,
163 mmDMA_QM_3_GLBL_NON_SECURE_PROPS,
164 mmDMA_QM_4_GLBL_NON_SECURE_PROPS,
165 mmTPC0_QM_GLBL_SECURE_PROPS,
166 mmTPC0_QM_GLBL_NON_SECURE_PROPS,
167 mmTPC0_CMDQ_GLBL_SECURE_PROPS,
168 mmTPC0_CMDQ_GLBL_NON_SECURE_PROPS,
171 mmTPC1_QM_GLBL_SECURE_PROPS,
172 mmTPC1_QM_GLBL_NON_SECURE_PROPS,
173 mmTPC1_CMDQ_GLBL_SECURE_PROPS,
174 mmTPC1_CMDQ_GLBL_NON_SECURE_PROPS,
177 mmTPC2_QM_GLBL_SECURE_PROPS,
178 mmTPC2_QM_GLBL_NON_SECURE_PROPS,
179 mmTPC2_CMDQ_GLBL_SECURE_PROPS,
180 mmTPC2_CMDQ_GLBL_NON_SECURE_PROPS,
183 mmTPC3_QM_GLBL_SECURE_PROPS,
184 mmTPC3_QM_GLBL_NON_SECURE_PROPS,
185 mmTPC3_CMDQ_GLBL_SECURE_PROPS,
186 mmTPC3_CMDQ_GLBL_NON_SECURE_PROPS,
189 mmTPC4_QM_GLBL_SECURE_PROPS,
190 mmTPC4_QM_GLBL_NON_SECURE_PROPS,
191 mmTPC4_CMDQ_GLBL_SECURE_PROPS,
192 mmTPC4_CMDQ_GLBL_NON_SECURE_PROPS,
195 mmTPC5_QM_GLBL_SECURE_PROPS,
196 mmTPC5_QM_GLBL_NON_SECURE_PROPS,
197 mmTPC5_CMDQ_GLBL_SECURE_PROPS,
198 mmTPC5_CMDQ_GLBL_NON_SECURE_PROPS,
201 mmTPC6_QM_GLBL_SECURE_PROPS,
202 mmTPC6_QM_GLBL_NON_SECURE_PROPS,
203 mmTPC6_CMDQ_GLBL_SECURE_PROPS,
204 mmTPC6_CMDQ_GLBL_NON_SECURE_PROPS,
207 mmTPC7_QM_GLBL_SECURE_PROPS,
208 mmTPC7_QM_GLBL_NON_SECURE_PROPS,
209 mmTPC7_CMDQ_GLBL_SECURE_PROPS,
210 mmTPC7_CMDQ_GLBL_NON_SECURE_PROPS,
213 mmMME_QM_GLBL_SECURE_PROPS,
214 mmMME_QM_GLBL_NON_SECURE_PROPS,
215 mmMME_CMDQ_GLBL_SECURE_PROPS,
216 mmMME_CMDQ_GLBL_NON_SECURE_PROPS,
217 mmMME_SBA_CONTROL_DATA,
218 mmMME_SBB_CONTROL_DATA,
219 mmMME_SBC_CONTROL_DATA,
220 mmMME_WBC_CONTROL_DATA,
221 mmPCIE_WRAP_PSOC_ARUSER,
222 mmPCIE_WRAP_PSOC_AWUSER
225 static u32 goya_all_events[] = {
226 GOYA_ASYNC_EVENT_ID_PCIE_IF,
227 GOYA_ASYNC_EVENT_ID_TPC0_ECC,
228 GOYA_ASYNC_EVENT_ID_TPC1_ECC,
229 GOYA_ASYNC_EVENT_ID_TPC2_ECC,
230 GOYA_ASYNC_EVENT_ID_TPC3_ECC,
231 GOYA_ASYNC_EVENT_ID_TPC4_ECC,
232 GOYA_ASYNC_EVENT_ID_TPC5_ECC,
233 GOYA_ASYNC_EVENT_ID_TPC6_ECC,
234 GOYA_ASYNC_EVENT_ID_TPC7_ECC,
235 GOYA_ASYNC_EVENT_ID_MME_ECC,
236 GOYA_ASYNC_EVENT_ID_MME_ECC_EXT,
237 GOYA_ASYNC_EVENT_ID_MMU_ECC,
238 GOYA_ASYNC_EVENT_ID_DMA_MACRO,
239 GOYA_ASYNC_EVENT_ID_DMA_ECC,
240 GOYA_ASYNC_EVENT_ID_CPU_IF_ECC,
241 GOYA_ASYNC_EVENT_ID_PSOC_MEM,
242 GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT,
243 GOYA_ASYNC_EVENT_ID_SRAM0,
244 GOYA_ASYNC_EVENT_ID_SRAM1,
245 GOYA_ASYNC_EVENT_ID_SRAM2,
246 GOYA_ASYNC_EVENT_ID_SRAM3,
247 GOYA_ASYNC_EVENT_ID_SRAM4,
248 GOYA_ASYNC_EVENT_ID_SRAM5,
249 GOYA_ASYNC_EVENT_ID_SRAM6,
250 GOYA_ASYNC_EVENT_ID_SRAM7,
251 GOYA_ASYNC_EVENT_ID_SRAM8,
252 GOYA_ASYNC_EVENT_ID_SRAM9,
253 GOYA_ASYNC_EVENT_ID_SRAM10,
254 GOYA_ASYNC_EVENT_ID_SRAM11,
255 GOYA_ASYNC_EVENT_ID_SRAM12,
256 GOYA_ASYNC_EVENT_ID_SRAM13,
257 GOYA_ASYNC_EVENT_ID_SRAM14,
258 GOYA_ASYNC_EVENT_ID_SRAM15,
259 GOYA_ASYNC_EVENT_ID_SRAM16,
260 GOYA_ASYNC_EVENT_ID_SRAM17,
261 GOYA_ASYNC_EVENT_ID_SRAM18,
262 GOYA_ASYNC_EVENT_ID_SRAM19,
263 GOYA_ASYNC_EVENT_ID_SRAM20,
264 GOYA_ASYNC_EVENT_ID_SRAM21,
265 GOYA_ASYNC_EVENT_ID_SRAM22,
266 GOYA_ASYNC_EVENT_ID_SRAM23,
267 GOYA_ASYNC_EVENT_ID_SRAM24,
268 GOYA_ASYNC_EVENT_ID_SRAM25,
269 GOYA_ASYNC_EVENT_ID_SRAM26,
270 GOYA_ASYNC_EVENT_ID_SRAM27,
271 GOYA_ASYNC_EVENT_ID_SRAM28,
272 GOYA_ASYNC_EVENT_ID_SRAM29,
273 GOYA_ASYNC_EVENT_ID_GIC500,
274 GOYA_ASYNC_EVENT_ID_PLL0,
275 GOYA_ASYNC_EVENT_ID_PLL1,
276 GOYA_ASYNC_EVENT_ID_PLL3,
277 GOYA_ASYNC_EVENT_ID_PLL4,
278 GOYA_ASYNC_EVENT_ID_PLL5,
279 GOYA_ASYNC_EVENT_ID_PLL6,
280 GOYA_ASYNC_EVENT_ID_AXI_ECC,
281 GOYA_ASYNC_EVENT_ID_L2_RAM_ECC,
282 GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET,
283 GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT,
284 GOYA_ASYNC_EVENT_ID_PCIE_DEC,
285 GOYA_ASYNC_EVENT_ID_TPC0_DEC,
286 GOYA_ASYNC_EVENT_ID_TPC1_DEC,
287 GOYA_ASYNC_EVENT_ID_TPC2_DEC,
288 GOYA_ASYNC_EVENT_ID_TPC3_DEC,
289 GOYA_ASYNC_EVENT_ID_TPC4_DEC,
290 GOYA_ASYNC_EVENT_ID_TPC5_DEC,
291 GOYA_ASYNC_EVENT_ID_TPC6_DEC,
292 GOYA_ASYNC_EVENT_ID_TPC7_DEC,
293 GOYA_ASYNC_EVENT_ID_MME_WACS,
294 GOYA_ASYNC_EVENT_ID_MME_WACSD,
295 GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER,
296 GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC,
297 GOYA_ASYNC_EVENT_ID_PSOC,
298 GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR,
299 GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR,
300 GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR,
301 GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR,
302 GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR,
303 GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR,
304 GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR,
305 GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR,
306 GOYA_ASYNC_EVENT_ID_TPC0_CMDQ,
307 GOYA_ASYNC_EVENT_ID_TPC1_CMDQ,
308 GOYA_ASYNC_EVENT_ID_TPC2_CMDQ,
309 GOYA_ASYNC_EVENT_ID_TPC3_CMDQ,
310 GOYA_ASYNC_EVENT_ID_TPC4_CMDQ,
311 GOYA_ASYNC_EVENT_ID_TPC5_CMDQ,
312 GOYA_ASYNC_EVENT_ID_TPC6_CMDQ,
313 GOYA_ASYNC_EVENT_ID_TPC7_CMDQ,
314 GOYA_ASYNC_EVENT_ID_TPC0_QM,
315 GOYA_ASYNC_EVENT_ID_TPC1_QM,
316 GOYA_ASYNC_EVENT_ID_TPC2_QM,
317 GOYA_ASYNC_EVENT_ID_TPC3_QM,
318 GOYA_ASYNC_EVENT_ID_TPC4_QM,
319 GOYA_ASYNC_EVENT_ID_TPC5_QM,
320 GOYA_ASYNC_EVENT_ID_TPC6_QM,
321 GOYA_ASYNC_EVENT_ID_TPC7_QM,
322 GOYA_ASYNC_EVENT_ID_MME_QM,
323 GOYA_ASYNC_EVENT_ID_MME_CMDQ,
324 GOYA_ASYNC_EVENT_ID_DMA0_QM,
325 GOYA_ASYNC_EVENT_ID_DMA1_QM,
326 GOYA_ASYNC_EVENT_ID_DMA2_QM,
327 GOYA_ASYNC_EVENT_ID_DMA3_QM,
328 GOYA_ASYNC_EVENT_ID_DMA4_QM,
329 GOYA_ASYNC_EVENT_ID_DMA0_CH,
330 GOYA_ASYNC_EVENT_ID_DMA1_CH,
331 GOYA_ASYNC_EVENT_ID_DMA2_CH,
332 GOYA_ASYNC_EVENT_ID_DMA3_CH,
333 GOYA_ASYNC_EVENT_ID_DMA4_CH,
334 GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU,
335 GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU,
336 GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU,
337 GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU,
338 GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU,
339 GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU,
340 GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU,
341 GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU,
342 GOYA_ASYNC_EVENT_ID_DMA_BM_CH0,
343 GOYA_ASYNC_EVENT_ID_DMA_BM_CH1,
344 GOYA_ASYNC_EVENT_ID_DMA_BM_CH2,
345 GOYA_ASYNC_EVENT_ID_DMA_BM_CH3,
346 GOYA_ASYNC_EVENT_ID_DMA_BM_CH4,
347 GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S,
348 GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E,
349 GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S,
350 GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E
353 static int goya_mmu_clear_pgt_range(struct hl_device *hdev);
354 static int goya_mmu_set_dram_default_page(struct hl_device *hdev);
355 static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev);
356 static void goya_mmu_prepare(struct hl_device *hdev, u32 asid);
358 int goya_get_fixed_properties(struct hl_device *hdev)
360 struct asic_fixed_properties *prop = &hdev->asic_prop;
363 prop->max_queues = GOYA_QUEUE_ID_SIZE;
364 prop->hw_queues_props = kcalloc(prop->max_queues,
365 sizeof(struct hw_queue_properties),
368 if (!prop->hw_queues_props)
371 for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
372 prop->hw_queues_props[i].type = QUEUE_TYPE_EXT;
373 prop->hw_queues_props[i].driver_only = 0;
374 prop->hw_queues_props[i].cb_alloc_flags = CB_ALLOC_KERNEL;
377 for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES ; i++) {
378 prop->hw_queues_props[i].type = QUEUE_TYPE_CPU;
379 prop->hw_queues_props[i].driver_only = 1;
380 prop->hw_queues_props[i].cb_alloc_flags = CB_ALLOC_KERNEL;
383 for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES +
384 NUMBER_OF_INT_HW_QUEUES; i++) {
385 prop->hw_queues_props[i].type = QUEUE_TYPE_INT;
386 prop->hw_queues_props[i].driver_only = 0;
387 prop->hw_queues_props[i].cb_alloc_flags = CB_ALLOC_USER;
390 prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
392 prop->dram_base_address = DRAM_PHYS_BASE;
393 prop->dram_size = DRAM_PHYS_DEFAULT_SIZE;
394 prop->dram_end_address = prop->dram_base_address + prop->dram_size;
395 prop->dram_user_base_address = DRAM_BASE_ADDR_USER;
397 prop->sram_base_address = SRAM_BASE_ADDR;
398 prop->sram_size = SRAM_SIZE;
399 prop->sram_end_address = prop->sram_base_address + prop->sram_size;
400 prop->sram_user_base_address = prop->sram_base_address +
401 SRAM_USER_BASE_OFFSET;
403 prop->mmu_pgt_addr = MMU_PAGE_TABLES_ADDR;
404 prop->mmu_dram_default_page_addr = MMU_DRAM_DEFAULT_PAGE_ADDR;
406 prop->mmu_pgt_size = 0x800000; /* 8MB */
408 prop->mmu_pgt_size = MMU_PAGE_TABLES_SIZE;
409 prop->mmu_pte_size = HL_PTE_SIZE;
410 prop->mmu_hop_table_size = HOP_TABLE_SIZE;
411 prop->mmu_hop0_tables_total_size = HOP0_TABLES_TOTAL_SIZE;
412 prop->dram_page_size = PAGE_SIZE_2MB;
414 prop->dmmu.hop0_shift = HOP0_SHIFT;
415 prop->dmmu.hop1_shift = HOP1_SHIFT;
416 prop->dmmu.hop2_shift = HOP2_SHIFT;
417 prop->dmmu.hop3_shift = HOP3_SHIFT;
418 prop->dmmu.hop4_shift = HOP4_SHIFT;
419 prop->dmmu.hop0_mask = HOP0_MASK;
420 prop->dmmu.hop1_mask = HOP1_MASK;
421 prop->dmmu.hop2_mask = HOP2_MASK;
422 prop->dmmu.hop3_mask = HOP3_MASK;
423 prop->dmmu.hop4_mask = HOP4_MASK;
424 prop->dmmu.start_addr = VA_DDR_SPACE_START;
425 prop->dmmu.end_addr = VA_DDR_SPACE_END;
426 prop->dmmu.page_size = PAGE_SIZE_2MB;
427 prop->dmmu.num_hops = MMU_ARCH_5_HOPS;
429 /* shifts and masks are the same in PMMU and DMMU */
430 memcpy(&prop->pmmu, &prop->dmmu, sizeof(prop->dmmu));
431 prop->pmmu.start_addr = VA_HOST_SPACE_START;
432 prop->pmmu.end_addr = VA_HOST_SPACE_END;
433 prop->pmmu.page_size = PAGE_SIZE_4KB;
434 prop->pmmu.num_hops = MMU_ARCH_5_HOPS;
436 /* PMMU and HPMMU are the same except of page size */
437 memcpy(&prop->pmmu_huge, &prop->pmmu, sizeof(prop->pmmu));
438 prop->pmmu_huge.page_size = PAGE_SIZE_2MB;
440 prop->dram_size_for_default_page_mapping = VA_DDR_SPACE_END;
441 prop->cfg_size = CFG_SIZE;
442 prop->max_asid = MAX_ASID;
443 prop->num_of_events = GOYA_ASYNC_EVENT_ID_SIZE;
444 prop->high_pll = PLL_HIGH_DEFAULT;
445 prop->cb_pool_cb_cnt = GOYA_CB_POOL_CB_CNT;
446 prop->cb_pool_cb_size = GOYA_CB_POOL_CB_SIZE;
447 prop->max_power_default = MAX_POWER_DEFAULT;
448 prop->tpc_enabled_mask = TPC_ENABLED_MASK;
449 prop->pcie_dbi_base_address = mmPCIE_DBI_BASE;
450 prop->pcie_aux_dbi_reg_addr = CFG_BASE + mmPCIE_AUX_DBI;
452 strncpy(prop->cpucp_info.card_name, GOYA_DEFAULT_CARD_NAME,
455 prop->max_pending_cs = GOYA_MAX_PENDING_CS;
457 /* disable fw security for now, set it in a later stage */
458 prop->fw_security_disabled = true;
459 prop->fw_security_status_valid = false;
465 * goya_pci_bars_map - Map PCI BARS of Goya device
467 * @hdev: pointer to hl_device structure
469 * Request PCI regions and map them to kernel virtual addresses.
470 * Returns 0 on success
473 static int goya_pci_bars_map(struct hl_device *hdev)
475 static const char * const name[] = {"SRAM_CFG", "MSIX", "DDR"};
476 bool is_wc[3] = {false, false, true};
479 rc = hl_pci_bars_map(hdev, name, is_wc);
483 hdev->rmmio = hdev->pcie_bar[SRAM_CFG_BAR_ID] +
484 (CFG_BASE - SRAM_BASE_ADDR);
489 static u64 goya_set_ddr_bar_base(struct hl_device *hdev, u64 addr)
491 struct goya_device *goya = hdev->asic_specific;
492 struct hl_inbound_pci_region pci_region;
496 if ((goya) && (goya->ddr_bar_cur_addr == addr))
499 /* Inbound Region 1 - Bar 4 - Point to DDR */
500 pci_region.mode = PCI_BAR_MATCH_MODE;
501 pci_region.bar = DDR_BAR_ID;
502 pci_region.addr = addr;
503 rc = hl_pci_set_inbound_region(hdev, 1, &pci_region);
508 old_addr = goya->ddr_bar_cur_addr;
509 goya->ddr_bar_cur_addr = addr;
516 * goya_init_iatu - Initialize the iATU unit inside the PCI controller
518 * @hdev: pointer to hl_device structure
520 * This is needed in case the firmware doesn't initialize the iATU
523 static int goya_init_iatu(struct hl_device *hdev)
525 struct hl_inbound_pci_region inbound_region;
526 struct hl_outbound_pci_region outbound_region;
529 /* Inbound Region 0 - Bar 0 - Point to SRAM and CFG */
530 inbound_region.mode = PCI_BAR_MATCH_MODE;
531 inbound_region.bar = SRAM_CFG_BAR_ID;
532 inbound_region.addr = SRAM_BASE_ADDR;
533 rc = hl_pci_set_inbound_region(hdev, 0, &inbound_region);
537 /* Inbound Region 1 - Bar 4 - Point to DDR */
538 inbound_region.mode = PCI_BAR_MATCH_MODE;
539 inbound_region.bar = DDR_BAR_ID;
540 inbound_region.addr = DRAM_PHYS_BASE;
541 rc = hl_pci_set_inbound_region(hdev, 1, &inbound_region);
545 hdev->asic_funcs->set_dma_mask_from_fw(hdev);
547 /* Outbound Region 0 - Point to Host */
548 outbound_region.addr = HOST_PHYS_BASE;
549 outbound_region.size = HOST_PHYS_SIZE;
550 rc = hl_pci_set_outbound_region(hdev, &outbound_region);
557 * goya_early_init - GOYA early initialization code
559 * @hdev: pointer to hl_device structure
563 * PCI controller initialization
567 static int goya_early_init(struct hl_device *hdev)
569 struct asic_fixed_properties *prop = &hdev->asic_prop;
570 struct pci_dev *pdev = hdev->pdev;
574 rc = goya_get_fixed_properties(hdev);
576 dev_err(hdev->dev, "Failed to get fixed properties\n");
580 /* Check BAR sizes */
581 if (pci_resource_len(pdev, SRAM_CFG_BAR_ID) != CFG_BAR_SIZE) {
583 "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n",
585 (unsigned long long) pci_resource_len(pdev,
589 goto free_queue_props;
592 if (pci_resource_len(pdev, MSIX_BAR_ID) != MSIX_BAR_SIZE) {
594 "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n",
596 (unsigned long long) pci_resource_len(pdev,
600 goto free_queue_props;
603 prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID);
605 rc = hl_pci_init(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS,
606 mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0,
607 GOYA_BOOT_FIT_REQ_TIMEOUT_USEC);
609 goto free_queue_props;
612 val = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
613 if (val & PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SRIOV_EN_MASK)
615 "PCI strap is not configured correctly, PCI bus errors may occur\n");
618 dev_info(hdev->dev, "firmware-level security is %s\n",
619 hdev->asic_prop.fw_security_disabled ? "disabled" : "enabled");
624 kfree(hdev->asic_prop.hw_queues_props);
629 * goya_early_fini - GOYA early finalization code
631 * @hdev: pointer to hl_device structure
636 static int goya_early_fini(struct hl_device *hdev)
638 kfree(hdev->asic_prop.hw_queues_props);
644 static void goya_mmu_prepare_reg(struct hl_device *hdev, u64 reg, u32 asid)
646 /* mask to zero the MMBP and ASID bits */
647 WREG32_AND(reg, ~0x7FF);
648 WREG32_OR(reg, asid);
651 static void goya_qman0_set_security(struct hl_device *hdev, bool secure)
653 struct goya_device *goya = hdev->asic_specific;
655 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
659 WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_FULLY_TRUSTED);
661 WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_PARTLY_TRUSTED);
663 RREG32(mmDMA_QM_0_GLBL_PROT);
667 * goya_fetch_psoc_frequency - Fetch PSOC frequency values
669 * @hdev: pointer to hl_device structure
672 static void goya_fetch_psoc_frequency(struct hl_device *hdev)
674 struct asic_fixed_properties *prop = &hdev->asic_prop;
677 u32 div_fctr = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
678 u32 div_sel = RREG32(mmPSOC_PCI_PLL_DIV_SEL_1);
679 u32 nr = RREG32(mmPSOC_PCI_PLL_NR);
680 u32 nf = RREG32(mmPSOC_PCI_PLL_NF);
681 u32 od = RREG32(mmPSOC_PCI_PLL_OD);
683 if (div_sel == DIV_SEL_REF_CLK || div_sel == DIV_SEL_DIVIDED_REF) {
684 if (div_sel == DIV_SEL_REF_CLK)
685 trace_freq = PLL_REF_CLK;
687 trace_freq = PLL_REF_CLK / (div_fctr + 1);
688 } else if (div_sel == DIV_SEL_PLL_CLK ||
689 div_sel == DIV_SEL_DIVIDED_PLL) {
690 pll_clk = PLL_REF_CLK * (nf + 1) / ((nr + 1) * (od + 1));
691 if (div_sel == DIV_SEL_PLL_CLK)
692 trace_freq = pll_clk;
694 trace_freq = pll_clk / (div_fctr + 1);
697 "Received invalid div select value: %d", div_sel);
700 prop->psoc_timestamp_frequency = trace_freq;
701 prop->psoc_pci_pll_nr = nr;
702 prop->psoc_pci_pll_nf = nf;
703 prop->psoc_pci_pll_od = od;
704 prop->psoc_pci_pll_div_factor = div_fctr;
707 int goya_late_init(struct hl_device *hdev)
709 struct asic_fixed_properties *prop = &hdev->asic_prop;
712 goya_fetch_psoc_frequency(hdev);
714 rc = goya_mmu_clear_pgt_range(hdev);
717 "Failed to clear MMU page tables range %d\n", rc);
721 rc = goya_mmu_set_dram_default_page(hdev);
723 dev_err(hdev->dev, "Failed to set DRAM default page %d\n", rc);
727 rc = goya_mmu_add_mappings_for_device_cpu(hdev);
731 rc = goya_init_cpu_queues(hdev);
735 rc = goya_test_cpu_queue(hdev);
739 rc = goya_cpucp_info_get(hdev);
741 dev_err(hdev->dev, "Failed to get cpucp info %d\n", rc);
745 /* Now that we have the DRAM size in ASIC prop, we need to check
746 * its size and configure the DMA_IF DDR wrap protection (which is in
747 * the MMU block) accordingly. The value is the log2 of the DRAM size
749 WREG32(mmMMU_LOG2_DDR_SIZE, ilog2(prop->dram_size));
751 rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_ENABLE_PCI_ACCESS);
754 "Failed to enable PCI access from CPU %d\n", rc);
758 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
759 GOYA_ASYNC_EVENT_ID_INTS_REGISTER);
765 * goya_late_fini - GOYA late tear-down code
767 * @hdev: pointer to hl_device structure
769 * Free sensors allocated structures
771 void goya_late_fini(struct hl_device *hdev)
773 const struct hwmon_channel_info **channel_info_arr;
776 if (!hdev->hl_chip_info->info)
779 channel_info_arr = hdev->hl_chip_info->info;
781 while (channel_info_arr[i]) {
782 kfree(channel_info_arr[i]->config);
783 kfree(channel_info_arr[i]);
787 kfree(channel_info_arr);
789 hdev->hl_chip_info->info = NULL;
793 * goya_sw_init - Goya software initialization code
795 * @hdev: pointer to hl_device structure
798 static int goya_sw_init(struct hl_device *hdev)
800 struct goya_device *goya;
803 /* Allocate device structure */
804 goya = kzalloc(sizeof(*goya), GFP_KERNEL);
808 /* according to goya_init_iatu */
809 goya->ddr_bar_cur_addr = DRAM_PHYS_BASE;
811 goya->mme_clk = GOYA_PLL_FREQ_LOW;
812 goya->tpc_clk = GOYA_PLL_FREQ_LOW;
813 goya->ic_clk = GOYA_PLL_FREQ_LOW;
815 hdev->asic_specific = goya;
817 /* Create DMA pool for small allocations */
818 hdev->dma_pool = dma_pool_create(dev_name(hdev->dev),
819 &hdev->pdev->dev, GOYA_DMA_POOL_BLK_SIZE, 8, 0);
820 if (!hdev->dma_pool) {
821 dev_err(hdev->dev, "failed to create DMA pool\n");
823 goto free_goya_device;
826 hdev->cpu_accessible_dma_mem =
827 hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
828 HL_CPU_ACCESSIBLE_MEM_SIZE,
829 &hdev->cpu_accessible_dma_address,
830 GFP_KERNEL | __GFP_ZERO);
832 if (!hdev->cpu_accessible_dma_mem) {
837 dev_dbg(hdev->dev, "cpu accessible memory at bus address %pad\n",
838 &hdev->cpu_accessible_dma_address);
840 hdev->cpu_accessible_dma_pool = gen_pool_create(ilog2(32), -1);
841 if (!hdev->cpu_accessible_dma_pool) {
843 "Failed to create CPU accessible DMA pool\n");
845 goto free_cpu_dma_mem;
848 rc = gen_pool_add(hdev->cpu_accessible_dma_pool,
849 (uintptr_t) hdev->cpu_accessible_dma_mem,
850 HL_CPU_ACCESSIBLE_MEM_SIZE, -1);
853 "Failed to add memory to CPU accessible DMA pool\n");
855 goto free_cpu_accessible_dma_pool;
858 spin_lock_init(&goya->hw_queues_lock);
859 hdev->supports_coresight = true;
860 hdev->supports_soft_reset = true;
864 free_cpu_accessible_dma_pool:
865 gen_pool_destroy(hdev->cpu_accessible_dma_pool);
867 hdev->asic_funcs->asic_dma_free_coherent(hdev,
868 HL_CPU_ACCESSIBLE_MEM_SIZE,
869 hdev->cpu_accessible_dma_mem,
870 hdev->cpu_accessible_dma_address);
872 dma_pool_destroy(hdev->dma_pool);
880 * goya_sw_fini - Goya software tear-down code
882 * @hdev: pointer to hl_device structure
885 static int goya_sw_fini(struct hl_device *hdev)
887 struct goya_device *goya = hdev->asic_specific;
889 gen_pool_destroy(hdev->cpu_accessible_dma_pool);
891 hdev->asic_funcs->asic_dma_free_coherent(hdev,
892 HL_CPU_ACCESSIBLE_MEM_SIZE,
893 hdev->cpu_accessible_dma_mem,
894 hdev->cpu_accessible_dma_address);
896 dma_pool_destroy(hdev->dma_pool);
903 static void goya_init_dma_qman(struct hl_device *hdev, int dma_id,
904 dma_addr_t bus_address)
906 struct goya_device *goya = hdev->asic_specific;
907 u32 mtr_base_lo, mtr_base_hi;
908 u32 so_base_lo, so_base_hi;
909 u32 gic_base_lo, gic_base_hi;
910 u32 reg_off = dma_id * (mmDMA_QM_1_PQ_PI - mmDMA_QM_0_PQ_PI);
911 u32 dma_err_cfg = QMAN_DMA_ERR_MSG_EN;
913 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
914 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
915 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
916 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
919 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
921 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
923 WREG32(mmDMA_QM_0_PQ_BASE_LO + reg_off, lower_32_bits(bus_address));
924 WREG32(mmDMA_QM_0_PQ_BASE_HI + reg_off, upper_32_bits(bus_address));
926 WREG32(mmDMA_QM_0_PQ_SIZE + reg_off, ilog2(HL_QUEUE_LENGTH));
927 WREG32(mmDMA_QM_0_PQ_PI + reg_off, 0);
928 WREG32(mmDMA_QM_0_PQ_CI + reg_off, 0);
930 WREG32(mmDMA_QM_0_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
931 WREG32(mmDMA_QM_0_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
932 WREG32(mmDMA_QM_0_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
933 WREG32(mmDMA_QM_0_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
934 WREG32(mmDMA_QM_0_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
935 WREG32(mmDMA_QM_0_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
936 WREG32(mmDMA_QM_0_GLBL_ERR_WDATA + reg_off,
937 GOYA_ASYNC_EVENT_ID_DMA0_QM + dma_id);
939 /* PQ has buffer of 2 cache lines, while CQ has 8 lines */
940 WREG32(mmDMA_QM_0_PQ_CFG1 + reg_off, 0x00020002);
941 WREG32(mmDMA_QM_0_CQ_CFG1 + reg_off, 0x00080008);
943 if (goya->hw_cap_initialized & HW_CAP_MMU)
944 WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, QMAN_DMA_PARTLY_TRUSTED);
946 WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, QMAN_DMA_FULLY_TRUSTED);
948 if (hdev->stop_on_err)
949 dma_err_cfg |= 1 << DMA_QM_0_GLBL_ERR_CFG_DMA_STOP_ON_ERR_SHIFT;
951 WREG32(mmDMA_QM_0_GLBL_ERR_CFG + reg_off, dma_err_cfg);
952 WREG32(mmDMA_QM_0_GLBL_CFG0 + reg_off, QMAN_DMA_ENABLE);
955 static void goya_init_dma_ch(struct hl_device *hdev, int dma_id)
957 u32 gic_base_lo, gic_base_hi;
959 u32 reg_off = dma_id * (mmDMA_CH_1_CFG1 - mmDMA_CH_0_CFG1);
962 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
964 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
966 WREG32(mmDMA_CH_0_ERRMSG_ADDR_LO + reg_off, gic_base_lo);
967 WREG32(mmDMA_CH_0_ERRMSG_ADDR_HI + reg_off, gic_base_hi);
968 WREG32(mmDMA_CH_0_ERRMSG_WDATA + reg_off,
969 GOYA_ASYNC_EVENT_ID_DMA0_CH + dma_id);
972 sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1000 +
975 sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1007;
977 WREG32(mmDMA_CH_0_WR_COMP_ADDR_HI + reg_off, upper_32_bits(sob_addr));
978 WREG32(mmDMA_CH_0_WR_COMP_WDATA + reg_off, 0x80000001);
982 * goya_init_dma_qmans - Initialize QMAN DMA registers
984 * @hdev: pointer to hl_device structure
986 * Initialize the H/W registers of the QMAN DMA channels
989 void goya_init_dma_qmans(struct hl_device *hdev)
991 struct goya_device *goya = hdev->asic_specific;
992 struct hl_hw_queue *q;
995 if (goya->hw_cap_initialized & HW_CAP_DMA)
998 q = &hdev->kernel_queues[0];
1000 for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++, q++) {
1001 q->cq_id = q->msi_vec = i;
1002 goya_init_dma_qman(hdev, i, q->bus_address);
1003 goya_init_dma_ch(hdev, i);
1006 goya->hw_cap_initialized |= HW_CAP_DMA;
1010 * goya_disable_external_queues - Disable external queues
1012 * @hdev: pointer to hl_device structure
1015 static void goya_disable_external_queues(struct hl_device *hdev)
1017 struct goya_device *goya = hdev->asic_specific;
1019 if (!(goya->hw_cap_initialized & HW_CAP_DMA))
1022 WREG32(mmDMA_QM_0_GLBL_CFG0, 0);
1023 WREG32(mmDMA_QM_1_GLBL_CFG0, 0);
1024 WREG32(mmDMA_QM_2_GLBL_CFG0, 0);
1025 WREG32(mmDMA_QM_3_GLBL_CFG0, 0);
1026 WREG32(mmDMA_QM_4_GLBL_CFG0, 0);
1029 static int goya_stop_queue(struct hl_device *hdev, u32 cfg_reg,
1030 u32 cp_sts_reg, u32 glbl_sts0_reg)
1035 /* use the values of TPC0 as they are all the same*/
1037 WREG32(cfg_reg, 1 << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
1039 status = RREG32(cp_sts_reg);
1040 if (status & TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK) {
1041 rc = hl_poll_timeout(
1045 !(status & TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK),
1047 QMAN_FENCE_TIMEOUT_USEC);
1049 /* if QMAN is stuck in fence no need to check for stop */
1054 rc = hl_poll_timeout(
1058 (status & TPC0_QM_GLBL_STS0_CP_IS_STOP_MASK),
1060 QMAN_STOP_TIMEOUT_USEC);
1064 "Timeout while waiting for QMAN to stop\n");
1072 * goya_stop_external_queues - Stop external queues
1074 * @hdev: pointer to hl_device structure
1076 * Returns 0 on success
1079 static int goya_stop_external_queues(struct hl_device *hdev)
1083 struct goya_device *goya = hdev->asic_specific;
1085 if (!(goya->hw_cap_initialized & HW_CAP_DMA))
1088 rc = goya_stop_queue(hdev,
1089 mmDMA_QM_0_GLBL_CFG1,
1091 mmDMA_QM_0_GLBL_STS0);
1094 dev_err(hdev->dev, "failed to stop DMA QMAN 0\n");
1098 rc = goya_stop_queue(hdev,
1099 mmDMA_QM_1_GLBL_CFG1,
1101 mmDMA_QM_1_GLBL_STS0);
1104 dev_err(hdev->dev, "failed to stop DMA QMAN 1\n");
1108 rc = goya_stop_queue(hdev,
1109 mmDMA_QM_2_GLBL_CFG1,
1111 mmDMA_QM_2_GLBL_STS0);
1114 dev_err(hdev->dev, "failed to stop DMA QMAN 2\n");
1118 rc = goya_stop_queue(hdev,
1119 mmDMA_QM_3_GLBL_CFG1,
1121 mmDMA_QM_3_GLBL_STS0);
1124 dev_err(hdev->dev, "failed to stop DMA QMAN 3\n");
1128 rc = goya_stop_queue(hdev,
1129 mmDMA_QM_4_GLBL_CFG1,
1131 mmDMA_QM_4_GLBL_STS0);
1134 dev_err(hdev->dev, "failed to stop DMA QMAN 4\n");
1142 * goya_init_cpu_queues - Initialize PQ/CQ/EQ of CPU
1144 * @hdev: pointer to hl_device structure
1146 * Returns 0 on success
1149 int goya_init_cpu_queues(struct hl_device *hdev)
1151 struct goya_device *goya = hdev->asic_specific;
1154 struct hl_hw_queue *cpu_pq = &hdev->kernel_queues[GOYA_QUEUE_ID_CPU_PQ];
1157 if (!hdev->cpu_queues_enable)
1160 if (goya->hw_cap_initialized & HW_CAP_CPU_Q)
1163 eq = &hdev->event_queue;
1165 WREG32(mmCPU_PQ_BASE_ADDR_LOW, lower_32_bits(cpu_pq->bus_address));
1166 WREG32(mmCPU_PQ_BASE_ADDR_HIGH, upper_32_bits(cpu_pq->bus_address));
1168 WREG32(mmCPU_EQ_BASE_ADDR_LOW, lower_32_bits(eq->bus_address));
1169 WREG32(mmCPU_EQ_BASE_ADDR_HIGH, upper_32_bits(eq->bus_address));
1171 WREG32(mmCPU_CQ_BASE_ADDR_LOW,
1172 lower_32_bits(VA_CPU_ACCESSIBLE_MEM_ADDR));
1173 WREG32(mmCPU_CQ_BASE_ADDR_HIGH,
1174 upper_32_bits(VA_CPU_ACCESSIBLE_MEM_ADDR));
1176 WREG32(mmCPU_PQ_LENGTH, HL_QUEUE_SIZE_IN_BYTES);
1177 WREG32(mmCPU_EQ_LENGTH, HL_EQ_SIZE_IN_BYTES);
1178 WREG32(mmCPU_CQ_LENGTH, HL_CPU_ACCESSIBLE_MEM_SIZE);
1180 /* Used for EQ CI */
1181 WREG32(mmCPU_EQ_CI, 0);
1183 WREG32(mmCPU_IF_PF_PQ_PI, 0);
1185 WREG32(mmCPU_PQ_INIT_STATUS, PQ_INIT_STATUS_READY_FOR_CP);
1187 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
1188 GOYA_ASYNC_EVENT_ID_PI_UPDATE);
1190 err = hl_poll_timeout(
1192 mmCPU_PQ_INIT_STATUS,
1194 (status == PQ_INIT_STATUS_READY_FOR_HOST),
1196 GOYA_CPU_TIMEOUT_USEC);
1200 "Failed to setup communication with device CPU\n");
1204 goya->hw_cap_initialized |= HW_CAP_CPU_Q;
1208 static void goya_set_pll_refclk(struct hl_device *hdev)
1210 WREG32(mmCPU_PLL_DIV_SEL_0, 0x0);
1211 WREG32(mmCPU_PLL_DIV_SEL_1, 0x0);
1212 WREG32(mmCPU_PLL_DIV_SEL_2, 0x0);
1213 WREG32(mmCPU_PLL_DIV_SEL_3, 0x0);
1215 WREG32(mmIC_PLL_DIV_SEL_0, 0x0);
1216 WREG32(mmIC_PLL_DIV_SEL_1, 0x0);
1217 WREG32(mmIC_PLL_DIV_SEL_2, 0x0);
1218 WREG32(mmIC_PLL_DIV_SEL_3, 0x0);
1220 WREG32(mmMC_PLL_DIV_SEL_0, 0x0);
1221 WREG32(mmMC_PLL_DIV_SEL_1, 0x0);
1222 WREG32(mmMC_PLL_DIV_SEL_2, 0x0);
1223 WREG32(mmMC_PLL_DIV_SEL_3, 0x0);
1225 WREG32(mmPSOC_MME_PLL_DIV_SEL_0, 0x0);
1226 WREG32(mmPSOC_MME_PLL_DIV_SEL_1, 0x0);
1227 WREG32(mmPSOC_MME_PLL_DIV_SEL_2, 0x0);
1228 WREG32(mmPSOC_MME_PLL_DIV_SEL_3, 0x0);
1230 WREG32(mmPSOC_PCI_PLL_DIV_SEL_0, 0x0);
1231 WREG32(mmPSOC_PCI_PLL_DIV_SEL_1, 0x0);
1232 WREG32(mmPSOC_PCI_PLL_DIV_SEL_2, 0x0);
1233 WREG32(mmPSOC_PCI_PLL_DIV_SEL_3, 0x0);
1235 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_0, 0x0);
1236 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_1, 0x0);
1237 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_2, 0x0);
1238 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_3, 0x0);
1240 WREG32(mmTPC_PLL_DIV_SEL_0, 0x0);
1241 WREG32(mmTPC_PLL_DIV_SEL_1, 0x0);
1242 WREG32(mmTPC_PLL_DIV_SEL_2, 0x0);
1243 WREG32(mmTPC_PLL_DIV_SEL_3, 0x0);
1246 static void goya_disable_clk_rlx(struct hl_device *hdev)
1248 WREG32(mmPSOC_MME_PLL_CLK_RLX_0, 0x100010);
1249 WREG32(mmIC_PLL_CLK_RLX_0, 0x100010);
1252 static void _goya_tpc_mbist_workaround(struct hl_device *hdev, u8 tpc_id)
1254 u64 tpc_eml_address;
1255 u32 val, tpc_offset, tpc_eml_offset, tpc_slm_offset;
1258 tpc_offset = tpc_id * 0x40000;
1259 tpc_eml_offset = tpc_id * 0x200000;
1260 tpc_eml_address = (mmTPC0_EML_CFG_BASE + tpc_eml_offset - CFG_BASE);
1261 tpc_slm_offset = tpc_eml_address + 0x100000;
1264 * Workaround for Bug H2 #2443 :
1265 * "TPC SB is not initialized on chip reset"
1268 val = RREG32(mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset);
1269 if (val & TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_ACTIVE_MASK)
1270 dev_warn(hdev->dev, "TPC%d MBIST ACTIVE is not cleared\n",
1273 WREG32(mmTPC0_CFG_FUNC_MBIST_PAT + tpc_offset, val & 0xFFFFF000);
1275 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_0 + tpc_offset, 0x37FF);
1276 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_1 + tpc_offset, 0x303F);
1277 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_2 + tpc_offset, 0x71FF);
1278 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_3 + tpc_offset, 0x71FF);
1279 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_4 + tpc_offset, 0x70FF);
1280 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_5 + tpc_offset, 0x70FF);
1281 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_6 + tpc_offset, 0x70FF);
1282 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_7 + tpc_offset, 0x70FF);
1283 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_8 + tpc_offset, 0x70FF);
1284 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_9 + tpc_offset, 0x70FF);
1286 WREG32_OR(mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset,
1287 1 << TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_START_SHIFT);
1289 err = hl_poll_timeout(
1291 mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset,
1293 (val & TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_DONE_MASK),
1295 HL_DEVICE_TIMEOUT_USEC);
1299 "Timeout while waiting for TPC%d MBIST DONE\n", tpc_id);
1301 WREG32_OR(mmTPC0_EML_CFG_DBG_CNT + tpc_eml_offset,
1302 1 << TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT);
1304 msleep(GOYA_RESET_WAIT_MSEC);
1306 WREG32_AND(mmTPC0_EML_CFG_DBG_CNT + tpc_eml_offset,
1307 ~(1 << TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT));
1309 msleep(GOYA_RESET_WAIT_MSEC);
1311 for (slm_index = 0 ; slm_index < 256 ; slm_index++)
1312 WREG32(tpc_slm_offset + (slm_index << 2), 0);
1314 val = RREG32(tpc_slm_offset);
1317 static void goya_tpc_mbist_workaround(struct hl_device *hdev)
1319 struct goya_device *goya = hdev->asic_specific;
1325 if (goya->hw_cap_initialized & HW_CAP_TPC_MBIST)
1328 /* Workaround for H2 #2443 */
1330 for (i = 0 ; i < TPC_MAX_NUM ; i++)
1331 _goya_tpc_mbist_workaround(hdev, i);
1333 goya->hw_cap_initialized |= HW_CAP_TPC_MBIST;
1337 * goya_init_golden_registers - Initialize golden registers
1339 * @hdev: pointer to hl_device structure
1341 * Initialize the H/W registers of the device
1344 static void goya_init_golden_registers(struct hl_device *hdev)
1346 struct goya_device *goya = hdev->asic_specific;
1347 u32 polynom[10], tpc_intr_mask, offset;
1350 if (goya->hw_cap_initialized & HW_CAP_GOLDEN)
1353 polynom[0] = 0x00020080;
1354 polynom[1] = 0x00401000;
1355 polynom[2] = 0x00200800;
1356 polynom[3] = 0x00002000;
1357 polynom[4] = 0x00080200;
1358 polynom[5] = 0x00040100;
1359 polynom[6] = 0x00100400;
1360 polynom[7] = 0x00004000;
1361 polynom[8] = 0x00010000;
1362 polynom[9] = 0x00008000;
1364 /* Mask all arithmetic interrupts from TPC */
1365 tpc_intr_mask = 0x7FFF;
1367 for (i = 0, offset = 0 ; i < 6 ; i++, offset += 0x20000) {
1368 WREG32(mmSRAM_Y0_X0_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1369 WREG32(mmSRAM_Y0_X1_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1370 WREG32(mmSRAM_Y0_X2_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1371 WREG32(mmSRAM_Y0_X3_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1372 WREG32(mmSRAM_Y0_X4_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1374 WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_L_ARB + offset, 0x204);
1375 WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_L_ARB + offset, 0x204);
1376 WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_L_ARB + offset, 0x204);
1377 WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_L_ARB + offset, 0x204);
1378 WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_L_ARB + offset, 0x204);
1381 WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_E_ARB + offset, 0x206);
1382 WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_E_ARB + offset, 0x206);
1383 WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_E_ARB + offset, 0x206);
1384 WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_E_ARB + offset, 0x207);
1385 WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_E_ARB + offset, 0x207);
1387 WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_W_ARB + offset, 0x207);
1388 WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_W_ARB + offset, 0x207);
1389 WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_W_ARB + offset, 0x206);
1390 WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_W_ARB + offset, 0x206);
1391 WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_W_ARB + offset, 0x206);
1393 WREG32(mmSRAM_Y0_X0_RTR_HBW_WR_RS_E_ARB + offset, 0x101);
1394 WREG32(mmSRAM_Y0_X1_RTR_HBW_WR_RS_E_ARB + offset, 0x102);
1395 WREG32(mmSRAM_Y0_X2_RTR_HBW_WR_RS_E_ARB + offset, 0x103);
1396 WREG32(mmSRAM_Y0_X3_RTR_HBW_WR_RS_E_ARB + offset, 0x104);
1397 WREG32(mmSRAM_Y0_X4_RTR_HBW_WR_RS_E_ARB + offset, 0x105);
1399 WREG32(mmSRAM_Y0_X0_RTR_HBW_WR_RS_W_ARB + offset, 0x105);
1400 WREG32(mmSRAM_Y0_X1_RTR_HBW_WR_RS_W_ARB + offset, 0x104);
1401 WREG32(mmSRAM_Y0_X2_RTR_HBW_WR_RS_W_ARB + offset, 0x103);
1402 WREG32(mmSRAM_Y0_X3_RTR_HBW_WR_RS_W_ARB + offset, 0x102);
1403 WREG32(mmSRAM_Y0_X4_RTR_HBW_WR_RS_W_ARB + offset, 0x101);
1406 WREG32(mmMME_STORE_MAX_CREDIT, 0x21);
1407 WREG32(mmMME_AGU, 0x0f0f0f10);
1408 WREG32(mmMME_SEI_MASK, ~0x0);
1410 WREG32(mmMME6_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1411 WREG32(mmMME5_RTR_HBW_RD_RQ_N_ARB, 0x01040101);
1412 WREG32(mmMME4_RTR_HBW_RD_RQ_N_ARB, 0x01030101);
1413 WREG32(mmMME3_RTR_HBW_RD_RQ_N_ARB, 0x01020101);
1414 WREG32(mmMME2_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1415 WREG32(mmMME1_RTR_HBW_RD_RQ_N_ARB, 0x07010701);
1416 WREG32(mmMME6_RTR_HBW_RD_RQ_S_ARB, 0x04010401);
1417 WREG32(mmMME5_RTR_HBW_RD_RQ_S_ARB, 0x04050401);
1418 WREG32(mmMME4_RTR_HBW_RD_RQ_S_ARB, 0x03070301);
1419 WREG32(mmMME3_RTR_HBW_RD_RQ_S_ARB, 0x01030101);
1420 WREG32(mmMME2_RTR_HBW_RD_RQ_S_ARB, 0x01040101);
1421 WREG32(mmMME1_RTR_HBW_RD_RQ_S_ARB, 0x01050105);
1422 WREG32(mmMME6_RTR_HBW_RD_RQ_W_ARB, 0x01010501);
1423 WREG32(mmMME5_RTR_HBW_RD_RQ_W_ARB, 0x01010501);
1424 WREG32(mmMME4_RTR_HBW_RD_RQ_W_ARB, 0x01040301);
1425 WREG32(mmMME3_RTR_HBW_RD_RQ_W_ARB, 0x01030401);
1426 WREG32(mmMME2_RTR_HBW_RD_RQ_W_ARB, 0x01040101);
1427 WREG32(mmMME1_RTR_HBW_RD_RQ_W_ARB, 0x01050101);
1428 WREG32(mmMME6_RTR_HBW_WR_RQ_N_ARB, 0x02020202);
1429 WREG32(mmMME5_RTR_HBW_WR_RQ_N_ARB, 0x01070101);
1430 WREG32(mmMME4_RTR_HBW_WR_RQ_N_ARB, 0x02020201);
1431 WREG32(mmMME3_RTR_HBW_WR_RQ_N_ARB, 0x07020701);
1432 WREG32(mmMME2_RTR_HBW_WR_RQ_N_ARB, 0x01020101);
1433 WREG32(mmMME1_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
1434 WREG32(mmMME6_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
1435 WREG32(mmMME5_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
1436 WREG32(mmMME4_RTR_HBW_WR_RQ_S_ARB, 0x07020701);
1437 WREG32(mmMME3_RTR_HBW_WR_RQ_S_ARB, 0x02020201);
1438 WREG32(mmMME2_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
1439 WREG32(mmMME1_RTR_HBW_WR_RQ_S_ARB, 0x01020102);
1440 WREG32(mmMME6_RTR_HBW_WR_RQ_W_ARB, 0x01020701);
1441 WREG32(mmMME5_RTR_HBW_WR_RQ_W_ARB, 0x01020701);
1442 WREG32(mmMME4_RTR_HBW_WR_RQ_W_ARB, 0x07020707);
1443 WREG32(mmMME3_RTR_HBW_WR_RQ_W_ARB, 0x01020201);
1444 WREG32(mmMME2_RTR_HBW_WR_RQ_W_ARB, 0x01070201);
1445 WREG32(mmMME1_RTR_HBW_WR_RQ_W_ARB, 0x01070201);
1446 WREG32(mmMME6_RTR_HBW_RD_RS_N_ARB, 0x01070102);
1447 WREG32(mmMME5_RTR_HBW_RD_RS_N_ARB, 0x01070102);
1448 WREG32(mmMME4_RTR_HBW_RD_RS_N_ARB, 0x01060102);
1449 WREG32(mmMME3_RTR_HBW_RD_RS_N_ARB, 0x01040102);
1450 WREG32(mmMME2_RTR_HBW_RD_RS_N_ARB, 0x01020102);
1451 WREG32(mmMME1_RTR_HBW_RD_RS_N_ARB, 0x01020107);
1452 WREG32(mmMME6_RTR_HBW_RD_RS_S_ARB, 0x01020106);
1453 WREG32(mmMME5_RTR_HBW_RD_RS_S_ARB, 0x01020102);
1454 WREG32(mmMME4_RTR_HBW_RD_RS_S_ARB, 0x01040102);
1455 WREG32(mmMME3_RTR_HBW_RD_RS_S_ARB, 0x01060102);
1456 WREG32(mmMME2_RTR_HBW_RD_RS_S_ARB, 0x01070102);
1457 WREG32(mmMME1_RTR_HBW_RD_RS_S_ARB, 0x01070102);
1458 WREG32(mmMME6_RTR_HBW_RD_RS_E_ARB, 0x01020702);
1459 WREG32(mmMME5_RTR_HBW_RD_RS_E_ARB, 0x01020702);
1460 WREG32(mmMME4_RTR_HBW_RD_RS_E_ARB, 0x01040602);
1461 WREG32(mmMME3_RTR_HBW_RD_RS_E_ARB, 0x01060402);
1462 WREG32(mmMME2_RTR_HBW_RD_RS_E_ARB, 0x01070202);
1463 WREG32(mmMME1_RTR_HBW_RD_RS_E_ARB, 0x01070102);
1464 WREG32(mmMME6_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1465 WREG32(mmMME5_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1466 WREG32(mmMME4_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1467 WREG32(mmMME3_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1468 WREG32(mmMME2_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1469 WREG32(mmMME1_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1470 WREG32(mmMME6_RTR_HBW_WR_RS_N_ARB, 0x01050101);
1471 WREG32(mmMME5_RTR_HBW_WR_RS_N_ARB, 0x01040101);
1472 WREG32(mmMME4_RTR_HBW_WR_RS_N_ARB, 0x01030101);
1473 WREG32(mmMME3_RTR_HBW_WR_RS_N_ARB, 0x01020101);
1474 WREG32(mmMME2_RTR_HBW_WR_RS_N_ARB, 0x01010101);
1475 WREG32(mmMME1_RTR_HBW_WR_RS_N_ARB, 0x01010107);
1476 WREG32(mmMME6_RTR_HBW_WR_RS_S_ARB, 0x01010107);
1477 WREG32(mmMME5_RTR_HBW_WR_RS_S_ARB, 0x01010101);
1478 WREG32(mmMME4_RTR_HBW_WR_RS_S_ARB, 0x01020101);
1479 WREG32(mmMME3_RTR_HBW_WR_RS_S_ARB, 0x01030101);
1480 WREG32(mmMME2_RTR_HBW_WR_RS_S_ARB, 0x01040101);
1481 WREG32(mmMME1_RTR_HBW_WR_RS_S_ARB, 0x01050101);
1482 WREG32(mmMME6_RTR_HBW_WR_RS_E_ARB, 0x01010501);
1483 WREG32(mmMME5_RTR_HBW_WR_RS_E_ARB, 0x01010501);
1484 WREG32(mmMME4_RTR_HBW_WR_RS_E_ARB, 0x01040301);
1485 WREG32(mmMME3_RTR_HBW_WR_RS_E_ARB, 0x01030401);
1486 WREG32(mmMME2_RTR_HBW_WR_RS_E_ARB, 0x01040101);
1487 WREG32(mmMME1_RTR_HBW_WR_RS_E_ARB, 0x01050101);
1488 WREG32(mmMME6_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1489 WREG32(mmMME5_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1490 WREG32(mmMME4_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1491 WREG32(mmMME3_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1492 WREG32(mmMME2_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1493 WREG32(mmMME1_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1495 WREG32(mmTPC1_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1496 WREG32(mmTPC1_RTR_HBW_RD_RQ_S_ARB, 0x01010101);
1497 WREG32(mmTPC1_RTR_HBW_RD_RQ_E_ARB, 0x01060101);
1498 WREG32(mmTPC1_RTR_HBW_WR_RQ_N_ARB, 0x02020102);
1499 WREG32(mmTPC1_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
1500 WREG32(mmTPC1_RTR_HBW_WR_RQ_E_ARB, 0x02070202);
1501 WREG32(mmTPC1_RTR_HBW_RD_RS_N_ARB, 0x01020201);
1502 WREG32(mmTPC1_RTR_HBW_RD_RS_S_ARB, 0x01070201);
1503 WREG32(mmTPC1_RTR_HBW_RD_RS_W_ARB, 0x01070202);
1504 WREG32(mmTPC1_RTR_HBW_WR_RS_N_ARB, 0x01010101);
1505 WREG32(mmTPC1_RTR_HBW_WR_RS_S_ARB, 0x01050101);
1506 WREG32(mmTPC1_RTR_HBW_WR_RS_W_ARB, 0x01050101);
1508 WREG32(mmTPC2_RTR_HBW_RD_RQ_N_ARB, 0x01020101);
1509 WREG32(mmTPC2_RTR_HBW_RD_RQ_S_ARB, 0x01050101);
1510 WREG32(mmTPC2_RTR_HBW_RD_RQ_E_ARB, 0x01010201);
1511 WREG32(mmTPC2_RTR_HBW_WR_RQ_N_ARB, 0x02040102);
1512 WREG32(mmTPC2_RTR_HBW_WR_RQ_S_ARB, 0x01050101);
1513 WREG32(mmTPC2_RTR_HBW_WR_RQ_E_ARB, 0x02060202);
1514 WREG32(mmTPC2_RTR_HBW_RD_RS_N_ARB, 0x01020201);
1515 WREG32(mmTPC2_RTR_HBW_RD_RS_S_ARB, 0x01070201);
1516 WREG32(mmTPC2_RTR_HBW_RD_RS_W_ARB, 0x01070202);
1517 WREG32(mmTPC2_RTR_HBW_WR_RS_N_ARB, 0x01010101);
1518 WREG32(mmTPC2_RTR_HBW_WR_RS_S_ARB, 0x01040101);
1519 WREG32(mmTPC2_RTR_HBW_WR_RS_W_ARB, 0x01040101);
1521 WREG32(mmTPC3_RTR_HBW_RD_RQ_N_ARB, 0x01030101);
1522 WREG32(mmTPC3_RTR_HBW_RD_RQ_S_ARB, 0x01040101);
1523 WREG32(mmTPC3_RTR_HBW_RD_RQ_E_ARB, 0x01040301);
1524 WREG32(mmTPC3_RTR_HBW_WR_RQ_N_ARB, 0x02060102);
1525 WREG32(mmTPC3_RTR_HBW_WR_RQ_S_ARB, 0x01040101);
1526 WREG32(mmTPC3_RTR_HBW_WR_RQ_E_ARB, 0x01040301);
1527 WREG32(mmTPC3_RTR_HBW_RD_RS_N_ARB, 0x01040201);
1528 WREG32(mmTPC3_RTR_HBW_RD_RS_S_ARB, 0x01060201);
1529 WREG32(mmTPC3_RTR_HBW_RD_RS_W_ARB, 0x01060402);
1530 WREG32(mmTPC3_RTR_HBW_WR_RS_N_ARB, 0x01020101);
1531 WREG32(mmTPC3_RTR_HBW_WR_RS_S_ARB, 0x01030101);
1532 WREG32(mmTPC3_RTR_HBW_WR_RS_W_ARB, 0x01030401);
1534 WREG32(mmTPC4_RTR_HBW_RD_RQ_N_ARB, 0x01040101);
1535 WREG32(mmTPC4_RTR_HBW_RD_RQ_S_ARB, 0x01030101);
1536 WREG32(mmTPC4_RTR_HBW_RD_RQ_E_ARB, 0x01030401);
1537 WREG32(mmTPC4_RTR_HBW_WR_RQ_N_ARB, 0x02070102);
1538 WREG32(mmTPC4_RTR_HBW_WR_RQ_S_ARB, 0x01030101);
1539 WREG32(mmTPC4_RTR_HBW_WR_RQ_E_ARB, 0x02060702);
1540 WREG32(mmTPC4_RTR_HBW_RD_RS_N_ARB, 0x01060201);
1541 WREG32(mmTPC4_RTR_HBW_RD_RS_S_ARB, 0x01040201);
1542 WREG32(mmTPC4_RTR_HBW_RD_RS_W_ARB, 0x01040602);
1543 WREG32(mmTPC4_RTR_HBW_WR_RS_N_ARB, 0x01030101);
1544 WREG32(mmTPC4_RTR_HBW_WR_RS_S_ARB, 0x01020101);
1545 WREG32(mmTPC4_RTR_HBW_WR_RS_W_ARB, 0x01040301);
1547 WREG32(mmTPC5_RTR_HBW_RD_RQ_N_ARB, 0x01050101);
1548 WREG32(mmTPC5_RTR_HBW_RD_RQ_S_ARB, 0x01020101);
1549 WREG32(mmTPC5_RTR_HBW_RD_RQ_E_ARB, 0x01200501);
1550 WREG32(mmTPC5_RTR_HBW_WR_RQ_N_ARB, 0x02070102);
1551 WREG32(mmTPC5_RTR_HBW_WR_RQ_S_ARB, 0x01020101);
1552 WREG32(mmTPC5_RTR_HBW_WR_RQ_E_ARB, 0x02020602);
1553 WREG32(mmTPC5_RTR_HBW_RD_RS_N_ARB, 0x01070201);
1554 WREG32(mmTPC5_RTR_HBW_RD_RS_S_ARB, 0x01020201);
1555 WREG32(mmTPC5_RTR_HBW_RD_RS_W_ARB, 0x01020702);
1556 WREG32(mmTPC5_RTR_HBW_WR_RS_N_ARB, 0x01040101);
1557 WREG32(mmTPC5_RTR_HBW_WR_RS_S_ARB, 0x01010101);
1558 WREG32(mmTPC5_RTR_HBW_WR_RS_W_ARB, 0x01010501);
1560 WREG32(mmTPC6_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1561 WREG32(mmTPC6_RTR_HBW_RD_RQ_S_ARB, 0x01010101);
1562 WREG32(mmTPC6_RTR_HBW_RD_RQ_E_ARB, 0x01010601);
1563 WREG32(mmTPC6_RTR_HBW_WR_RQ_N_ARB, 0x01010101);
1564 WREG32(mmTPC6_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
1565 WREG32(mmTPC6_RTR_HBW_WR_RQ_E_ARB, 0x02020702);
1566 WREG32(mmTPC6_RTR_HBW_RD_RS_N_ARB, 0x01010101);
1567 WREG32(mmTPC6_RTR_HBW_RD_RS_S_ARB, 0x01010101);
1568 WREG32(mmTPC6_RTR_HBW_RD_RS_W_ARB, 0x01020702);
1569 WREG32(mmTPC6_RTR_HBW_WR_RS_N_ARB, 0x01050101);
1570 WREG32(mmTPC6_RTR_HBW_WR_RS_S_ARB, 0x01010101);
1571 WREG32(mmTPC6_RTR_HBW_WR_RS_W_ARB, 0x01010501);
1573 for (i = 0, offset = 0 ; i < 10 ; i++, offset += 4) {
1574 WREG32(mmMME1_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1575 WREG32(mmMME2_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1576 WREG32(mmMME3_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1577 WREG32(mmMME4_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1578 WREG32(mmMME5_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1579 WREG32(mmMME6_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1581 WREG32(mmTPC0_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1582 WREG32(mmTPC1_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1583 WREG32(mmTPC2_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1584 WREG32(mmTPC3_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1585 WREG32(mmTPC4_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1586 WREG32(mmTPC5_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1587 WREG32(mmTPC6_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1588 WREG32(mmTPC7_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1590 WREG32(mmPCI_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1591 WREG32(mmDMA_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1594 for (i = 0, offset = 0 ; i < 6 ; i++, offset += 0x40000) {
1595 WREG32(mmMME1_RTR_SCRAMB_EN + offset,
1596 1 << MME1_RTR_SCRAMB_EN_VAL_SHIFT);
1597 WREG32(mmMME1_RTR_NON_LIN_SCRAMB + offset,
1598 1 << MME1_RTR_NON_LIN_SCRAMB_EN_SHIFT);
1601 for (i = 0, offset = 0 ; i < 8 ; i++, offset += 0x40000) {
1603 * Workaround for Bug H2 #2441 :
1604 * "ST.NOP set trace event illegal opcode"
1606 WREG32(mmTPC0_CFG_TPC_INTR_MASK + offset, tpc_intr_mask);
1608 WREG32(mmTPC0_NRTR_SCRAMB_EN + offset,
1609 1 << TPC0_NRTR_SCRAMB_EN_VAL_SHIFT);
1610 WREG32(mmTPC0_NRTR_NON_LIN_SCRAMB + offset,
1611 1 << TPC0_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
1613 WREG32_FIELD(TPC0_CFG_MSS_CONFIG, offset,
1614 ICACHE_FETCH_LINE_NUM, 2);
1617 WREG32(mmDMA_NRTR_SCRAMB_EN, 1 << DMA_NRTR_SCRAMB_EN_VAL_SHIFT);
1618 WREG32(mmDMA_NRTR_NON_LIN_SCRAMB,
1619 1 << DMA_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
1621 WREG32(mmPCI_NRTR_SCRAMB_EN, 1 << PCI_NRTR_SCRAMB_EN_VAL_SHIFT);
1622 WREG32(mmPCI_NRTR_NON_LIN_SCRAMB,
1623 1 << PCI_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
1626 * Workaround for H2 #HW-23 bug
1627 * Set DMA max outstanding read requests to 240 on DMA CH 1.
1628 * This limitation is still large enough to not affect Gen4 bandwidth.
1629 * We need to only limit that DMA channel because the user can only read
1630 * from Host using DMA CH 1
1632 WREG32(mmDMA_CH_1_CFG0, 0x0fff00F0);
1634 WREG32(mmTPC_PLL_CLK_RLX_0, 0x200020);
1636 goya->hw_cap_initialized |= HW_CAP_GOLDEN;
1639 static void goya_init_mme_qman(struct hl_device *hdev)
1641 u32 mtr_base_lo, mtr_base_hi;
1642 u32 so_base_lo, so_base_hi;
1643 u32 gic_base_lo, gic_base_hi;
1646 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1647 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1648 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1649 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1652 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1654 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1656 qman_base_addr = hdev->asic_prop.sram_base_address +
1657 MME_QMAN_BASE_OFFSET;
1659 WREG32(mmMME_QM_PQ_BASE_LO, lower_32_bits(qman_base_addr));
1660 WREG32(mmMME_QM_PQ_BASE_HI, upper_32_bits(qman_base_addr));
1661 WREG32(mmMME_QM_PQ_SIZE, ilog2(MME_QMAN_LENGTH));
1662 WREG32(mmMME_QM_PQ_PI, 0);
1663 WREG32(mmMME_QM_PQ_CI, 0);
1664 WREG32(mmMME_QM_CP_LDMA_SRC_BASE_LO_OFFSET, 0x10C0);
1665 WREG32(mmMME_QM_CP_LDMA_SRC_BASE_HI_OFFSET, 0x10C4);
1666 WREG32(mmMME_QM_CP_LDMA_TSIZE_OFFSET, 0x10C8);
1667 WREG32(mmMME_QM_CP_LDMA_COMMIT_OFFSET, 0x10CC);
1669 WREG32(mmMME_QM_CP_MSG_BASE0_ADDR_LO, mtr_base_lo);
1670 WREG32(mmMME_QM_CP_MSG_BASE0_ADDR_HI, mtr_base_hi);
1671 WREG32(mmMME_QM_CP_MSG_BASE1_ADDR_LO, so_base_lo);
1672 WREG32(mmMME_QM_CP_MSG_BASE1_ADDR_HI, so_base_hi);
1674 /* QMAN CQ has 8 cache lines */
1675 WREG32(mmMME_QM_CQ_CFG1, 0x00080008);
1677 WREG32(mmMME_QM_GLBL_ERR_ADDR_LO, gic_base_lo);
1678 WREG32(mmMME_QM_GLBL_ERR_ADDR_HI, gic_base_hi);
1680 WREG32(mmMME_QM_GLBL_ERR_WDATA, GOYA_ASYNC_EVENT_ID_MME_QM);
1682 WREG32(mmMME_QM_GLBL_ERR_CFG, QMAN_MME_ERR_MSG_EN);
1684 WREG32(mmMME_QM_GLBL_PROT, QMAN_MME_ERR_PROT);
1686 WREG32(mmMME_QM_GLBL_CFG0, QMAN_MME_ENABLE);
1689 static void goya_init_mme_cmdq(struct hl_device *hdev)
1691 u32 mtr_base_lo, mtr_base_hi;
1692 u32 so_base_lo, so_base_hi;
1693 u32 gic_base_lo, gic_base_hi;
1695 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1696 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1697 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1698 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1701 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1703 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1705 WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_LO, mtr_base_lo);
1706 WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_HI, mtr_base_hi);
1707 WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_LO, so_base_lo);
1708 WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_HI, so_base_hi);
1710 /* CMDQ CQ has 20 cache lines */
1711 WREG32(mmMME_CMDQ_CQ_CFG1, 0x00140014);
1713 WREG32(mmMME_CMDQ_GLBL_ERR_ADDR_LO, gic_base_lo);
1714 WREG32(mmMME_CMDQ_GLBL_ERR_ADDR_HI, gic_base_hi);
1716 WREG32(mmMME_CMDQ_GLBL_ERR_WDATA, GOYA_ASYNC_EVENT_ID_MME_CMDQ);
1718 WREG32(mmMME_CMDQ_GLBL_ERR_CFG, CMDQ_MME_ERR_MSG_EN);
1720 WREG32(mmMME_CMDQ_GLBL_PROT, CMDQ_MME_ERR_PROT);
1722 WREG32(mmMME_CMDQ_GLBL_CFG0, CMDQ_MME_ENABLE);
1725 void goya_init_mme_qmans(struct hl_device *hdev)
1727 struct goya_device *goya = hdev->asic_specific;
1728 u32 so_base_lo, so_base_hi;
1730 if (goya->hw_cap_initialized & HW_CAP_MME)
1733 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1734 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1736 WREG32(mmMME_SM_BASE_ADDRESS_LOW, so_base_lo);
1737 WREG32(mmMME_SM_BASE_ADDRESS_HIGH, so_base_hi);
1739 goya_init_mme_qman(hdev);
1740 goya_init_mme_cmdq(hdev);
1742 goya->hw_cap_initialized |= HW_CAP_MME;
1745 static void goya_init_tpc_qman(struct hl_device *hdev, u32 base_off, int tpc_id)
1747 u32 mtr_base_lo, mtr_base_hi;
1748 u32 so_base_lo, so_base_hi;
1749 u32 gic_base_lo, gic_base_hi;
1751 u32 reg_off = tpc_id * (mmTPC1_QM_PQ_PI - mmTPC0_QM_PQ_PI);
1753 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1754 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1755 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1756 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1759 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1761 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1763 qman_base_addr = hdev->asic_prop.sram_base_address + base_off;
1765 WREG32(mmTPC0_QM_PQ_BASE_LO + reg_off, lower_32_bits(qman_base_addr));
1766 WREG32(mmTPC0_QM_PQ_BASE_HI + reg_off, upper_32_bits(qman_base_addr));
1767 WREG32(mmTPC0_QM_PQ_SIZE + reg_off, ilog2(TPC_QMAN_LENGTH));
1768 WREG32(mmTPC0_QM_PQ_PI + reg_off, 0);
1769 WREG32(mmTPC0_QM_PQ_CI + reg_off, 0);
1770 WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET + reg_off, 0x10C0);
1771 WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET + reg_off, 0x10C4);
1772 WREG32(mmTPC0_QM_CP_LDMA_TSIZE_OFFSET + reg_off, 0x10C8);
1773 WREG32(mmTPC0_QM_CP_LDMA_COMMIT_OFFSET + reg_off, 0x10CC);
1775 WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
1776 WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
1777 WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
1778 WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
1780 WREG32(mmTPC0_QM_CQ_CFG1 + reg_off, 0x00080008);
1782 WREG32(mmTPC0_QM_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
1783 WREG32(mmTPC0_QM_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
1785 WREG32(mmTPC0_QM_GLBL_ERR_WDATA + reg_off,
1786 GOYA_ASYNC_EVENT_ID_TPC0_QM + tpc_id);
1788 WREG32(mmTPC0_QM_GLBL_ERR_CFG + reg_off, QMAN_TPC_ERR_MSG_EN);
1790 WREG32(mmTPC0_QM_GLBL_PROT + reg_off, QMAN_TPC_ERR_PROT);
1792 WREG32(mmTPC0_QM_GLBL_CFG0 + reg_off, QMAN_TPC_ENABLE);
1795 static void goya_init_tpc_cmdq(struct hl_device *hdev, int tpc_id)
1797 u32 mtr_base_lo, mtr_base_hi;
1798 u32 so_base_lo, so_base_hi;
1799 u32 gic_base_lo, gic_base_hi;
1800 u32 reg_off = tpc_id * (mmTPC1_CMDQ_CQ_CFG1 - mmTPC0_CMDQ_CQ_CFG1);
1802 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1803 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1804 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1805 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1808 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1810 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1812 WREG32(mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
1813 WREG32(mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
1814 WREG32(mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
1815 WREG32(mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
1817 WREG32(mmTPC0_CMDQ_CQ_CFG1 + reg_off, 0x00140014);
1819 WREG32(mmTPC0_CMDQ_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
1820 WREG32(mmTPC0_CMDQ_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
1822 WREG32(mmTPC0_CMDQ_GLBL_ERR_WDATA + reg_off,
1823 GOYA_ASYNC_EVENT_ID_TPC0_CMDQ + tpc_id);
1825 WREG32(mmTPC0_CMDQ_GLBL_ERR_CFG + reg_off, CMDQ_TPC_ERR_MSG_EN);
1827 WREG32(mmTPC0_CMDQ_GLBL_PROT + reg_off, CMDQ_TPC_ERR_PROT);
1829 WREG32(mmTPC0_CMDQ_GLBL_CFG0 + reg_off, CMDQ_TPC_ENABLE);
1832 void goya_init_tpc_qmans(struct hl_device *hdev)
1834 struct goya_device *goya = hdev->asic_specific;
1835 u32 so_base_lo, so_base_hi;
1836 u32 cfg_off = mmTPC1_CFG_SM_BASE_ADDRESS_LOW -
1837 mmTPC0_CFG_SM_BASE_ADDRESS_LOW;
1840 if (goya->hw_cap_initialized & HW_CAP_TPC)
1843 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1844 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1846 for (i = 0 ; i < TPC_MAX_NUM ; i++) {
1847 WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_LOW + i * cfg_off,
1849 WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_HIGH + i * cfg_off,
1853 goya_init_tpc_qman(hdev, TPC0_QMAN_BASE_OFFSET, 0);
1854 goya_init_tpc_qman(hdev, TPC1_QMAN_BASE_OFFSET, 1);
1855 goya_init_tpc_qman(hdev, TPC2_QMAN_BASE_OFFSET, 2);
1856 goya_init_tpc_qman(hdev, TPC3_QMAN_BASE_OFFSET, 3);
1857 goya_init_tpc_qman(hdev, TPC4_QMAN_BASE_OFFSET, 4);
1858 goya_init_tpc_qman(hdev, TPC5_QMAN_BASE_OFFSET, 5);
1859 goya_init_tpc_qman(hdev, TPC6_QMAN_BASE_OFFSET, 6);
1860 goya_init_tpc_qman(hdev, TPC7_QMAN_BASE_OFFSET, 7);
1862 for (i = 0 ; i < TPC_MAX_NUM ; i++)
1863 goya_init_tpc_cmdq(hdev, i);
1865 goya->hw_cap_initialized |= HW_CAP_TPC;
1869 * goya_disable_internal_queues - Disable internal queues
1871 * @hdev: pointer to hl_device structure
1874 static void goya_disable_internal_queues(struct hl_device *hdev)
1876 struct goya_device *goya = hdev->asic_specific;
1878 if (!(goya->hw_cap_initialized & HW_CAP_MME))
1881 WREG32(mmMME_QM_GLBL_CFG0, 0);
1882 WREG32(mmMME_CMDQ_GLBL_CFG0, 0);
1885 if (!(goya->hw_cap_initialized & HW_CAP_TPC))
1888 WREG32(mmTPC0_QM_GLBL_CFG0, 0);
1889 WREG32(mmTPC0_CMDQ_GLBL_CFG0, 0);
1891 WREG32(mmTPC1_QM_GLBL_CFG0, 0);
1892 WREG32(mmTPC1_CMDQ_GLBL_CFG0, 0);
1894 WREG32(mmTPC2_QM_GLBL_CFG0, 0);
1895 WREG32(mmTPC2_CMDQ_GLBL_CFG0, 0);
1897 WREG32(mmTPC3_QM_GLBL_CFG0, 0);
1898 WREG32(mmTPC3_CMDQ_GLBL_CFG0, 0);
1900 WREG32(mmTPC4_QM_GLBL_CFG0, 0);
1901 WREG32(mmTPC4_CMDQ_GLBL_CFG0, 0);
1903 WREG32(mmTPC5_QM_GLBL_CFG0, 0);
1904 WREG32(mmTPC5_CMDQ_GLBL_CFG0, 0);
1906 WREG32(mmTPC6_QM_GLBL_CFG0, 0);
1907 WREG32(mmTPC6_CMDQ_GLBL_CFG0, 0);
1909 WREG32(mmTPC7_QM_GLBL_CFG0, 0);
1910 WREG32(mmTPC7_CMDQ_GLBL_CFG0, 0);
1914 * goya_stop_internal_queues - Stop internal queues
1916 * @hdev: pointer to hl_device structure
1918 * Returns 0 on success
1921 static int goya_stop_internal_queues(struct hl_device *hdev)
1923 struct goya_device *goya = hdev->asic_specific;
1926 if (!(goya->hw_cap_initialized & HW_CAP_MME))
1930 * Each queue (QMAN) is a separate H/W logic. That means that each
1931 * QMAN can be stopped independently and failure to stop one does NOT
1932 * mandate we should not try to stop other QMANs
1935 rc = goya_stop_queue(hdev,
1938 mmMME_QM_GLBL_STS0);
1941 dev_err(hdev->dev, "failed to stop MME QMAN\n");
1945 rc = goya_stop_queue(hdev,
1946 mmMME_CMDQ_GLBL_CFG1,
1948 mmMME_CMDQ_GLBL_STS0);
1951 dev_err(hdev->dev, "failed to stop MME CMDQ\n");
1956 if (!(goya->hw_cap_initialized & HW_CAP_TPC))
1959 rc = goya_stop_queue(hdev,
1960 mmTPC0_QM_GLBL_CFG1,
1962 mmTPC0_QM_GLBL_STS0);
1965 dev_err(hdev->dev, "failed to stop TPC 0 QMAN\n");
1969 rc = goya_stop_queue(hdev,
1970 mmTPC0_CMDQ_GLBL_CFG1,
1972 mmTPC0_CMDQ_GLBL_STS0);
1975 dev_err(hdev->dev, "failed to stop TPC 0 CMDQ\n");
1979 rc = goya_stop_queue(hdev,
1980 mmTPC1_QM_GLBL_CFG1,
1982 mmTPC1_QM_GLBL_STS0);
1985 dev_err(hdev->dev, "failed to stop TPC 1 QMAN\n");
1989 rc = goya_stop_queue(hdev,
1990 mmTPC1_CMDQ_GLBL_CFG1,
1992 mmTPC1_CMDQ_GLBL_STS0);
1995 dev_err(hdev->dev, "failed to stop TPC 1 CMDQ\n");
1999 rc = goya_stop_queue(hdev,
2000 mmTPC2_QM_GLBL_CFG1,
2002 mmTPC2_QM_GLBL_STS0);
2005 dev_err(hdev->dev, "failed to stop TPC 2 QMAN\n");
2009 rc = goya_stop_queue(hdev,
2010 mmTPC2_CMDQ_GLBL_CFG1,
2012 mmTPC2_CMDQ_GLBL_STS0);
2015 dev_err(hdev->dev, "failed to stop TPC 2 CMDQ\n");
2019 rc = goya_stop_queue(hdev,
2020 mmTPC3_QM_GLBL_CFG1,
2022 mmTPC3_QM_GLBL_STS0);
2025 dev_err(hdev->dev, "failed to stop TPC 3 QMAN\n");
2029 rc = goya_stop_queue(hdev,
2030 mmTPC3_CMDQ_GLBL_CFG1,
2032 mmTPC3_CMDQ_GLBL_STS0);
2035 dev_err(hdev->dev, "failed to stop TPC 3 CMDQ\n");
2039 rc = goya_stop_queue(hdev,
2040 mmTPC4_QM_GLBL_CFG1,
2042 mmTPC4_QM_GLBL_STS0);
2045 dev_err(hdev->dev, "failed to stop TPC 4 QMAN\n");
2049 rc = goya_stop_queue(hdev,
2050 mmTPC4_CMDQ_GLBL_CFG1,
2052 mmTPC4_CMDQ_GLBL_STS0);
2055 dev_err(hdev->dev, "failed to stop TPC 4 CMDQ\n");
2059 rc = goya_stop_queue(hdev,
2060 mmTPC5_QM_GLBL_CFG1,
2062 mmTPC5_QM_GLBL_STS0);
2065 dev_err(hdev->dev, "failed to stop TPC 5 QMAN\n");
2069 rc = goya_stop_queue(hdev,
2070 mmTPC5_CMDQ_GLBL_CFG1,
2072 mmTPC5_CMDQ_GLBL_STS0);
2075 dev_err(hdev->dev, "failed to stop TPC 5 CMDQ\n");
2079 rc = goya_stop_queue(hdev,
2080 mmTPC6_QM_GLBL_CFG1,
2082 mmTPC6_QM_GLBL_STS0);
2085 dev_err(hdev->dev, "failed to stop TPC 6 QMAN\n");
2089 rc = goya_stop_queue(hdev,
2090 mmTPC6_CMDQ_GLBL_CFG1,
2092 mmTPC6_CMDQ_GLBL_STS0);
2095 dev_err(hdev->dev, "failed to stop TPC 6 CMDQ\n");
2099 rc = goya_stop_queue(hdev,
2100 mmTPC7_QM_GLBL_CFG1,
2102 mmTPC7_QM_GLBL_STS0);
2105 dev_err(hdev->dev, "failed to stop TPC 7 QMAN\n");
2109 rc = goya_stop_queue(hdev,
2110 mmTPC7_CMDQ_GLBL_CFG1,
2112 mmTPC7_CMDQ_GLBL_STS0);
2115 dev_err(hdev->dev, "failed to stop TPC 7 CMDQ\n");
2122 static void goya_dma_stall(struct hl_device *hdev)
2124 struct goya_device *goya = hdev->asic_specific;
2126 if (!(goya->hw_cap_initialized & HW_CAP_DMA))
2129 WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT);
2130 WREG32(mmDMA_QM_1_GLBL_CFG1, 1 << DMA_QM_1_GLBL_CFG1_DMA_STOP_SHIFT);
2131 WREG32(mmDMA_QM_2_GLBL_CFG1, 1 << DMA_QM_2_GLBL_CFG1_DMA_STOP_SHIFT);
2132 WREG32(mmDMA_QM_3_GLBL_CFG1, 1 << DMA_QM_3_GLBL_CFG1_DMA_STOP_SHIFT);
2133 WREG32(mmDMA_QM_4_GLBL_CFG1, 1 << DMA_QM_4_GLBL_CFG1_DMA_STOP_SHIFT);
2136 static void goya_tpc_stall(struct hl_device *hdev)
2138 struct goya_device *goya = hdev->asic_specific;
2140 if (!(goya->hw_cap_initialized & HW_CAP_TPC))
2143 WREG32(mmTPC0_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
2144 WREG32(mmTPC1_CFG_TPC_STALL, 1 << TPC1_CFG_TPC_STALL_V_SHIFT);
2145 WREG32(mmTPC2_CFG_TPC_STALL, 1 << TPC2_CFG_TPC_STALL_V_SHIFT);
2146 WREG32(mmTPC3_CFG_TPC_STALL, 1 << TPC3_CFG_TPC_STALL_V_SHIFT);
2147 WREG32(mmTPC4_CFG_TPC_STALL, 1 << TPC4_CFG_TPC_STALL_V_SHIFT);
2148 WREG32(mmTPC5_CFG_TPC_STALL, 1 << TPC5_CFG_TPC_STALL_V_SHIFT);
2149 WREG32(mmTPC6_CFG_TPC_STALL, 1 << TPC6_CFG_TPC_STALL_V_SHIFT);
2150 WREG32(mmTPC7_CFG_TPC_STALL, 1 << TPC7_CFG_TPC_STALL_V_SHIFT);
2153 static void goya_mme_stall(struct hl_device *hdev)
2155 struct goya_device *goya = hdev->asic_specific;
2157 if (!(goya->hw_cap_initialized & HW_CAP_MME))
2160 WREG32(mmMME_STALL, 0xFFFFFFFF);
2163 static int goya_enable_msix(struct hl_device *hdev)
2165 struct goya_device *goya = hdev->asic_specific;
2166 int cq_cnt = hdev->asic_prop.completion_queues_count;
2167 int rc, i, irq_cnt_init, irq;
2169 if (goya->hw_cap_initialized & HW_CAP_MSIX)
2172 rc = pci_alloc_irq_vectors(hdev->pdev, GOYA_MSIX_ENTRIES,
2173 GOYA_MSIX_ENTRIES, PCI_IRQ_MSIX);
2176 "MSI-X: Failed to enable support -- %d/%d\n",
2177 GOYA_MSIX_ENTRIES, rc);
2181 for (i = 0, irq_cnt_init = 0 ; i < cq_cnt ; i++, irq_cnt_init++) {
2182 irq = pci_irq_vector(hdev->pdev, i);
2183 rc = request_irq(irq, hl_irq_handler_cq, 0, goya_irq_name[i],
2184 &hdev->completion_queue[i]);
2186 dev_err(hdev->dev, "Failed to request IRQ %d", irq);
2191 irq = pci_irq_vector(hdev->pdev, GOYA_EVENT_QUEUE_MSIX_IDX);
2193 rc = request_irq(irq, hl_irq_handler_eq, 0,
2194 goya_irq_name[GOYA_EVENT_QUEUE_MSIX_IDX],
2195 &hdev->event_queue);
2197 dev_err(hdev->dev, "Failed to request IRQ %d", irq);
2201 goya->hw_cap_initialized |= HW_CAP_MSIX;
2205 for (i = 0 ; i < irq_cnt_init ; i++)
2206 free_irq(pci_irq_vector(hdev->pdev, i),
2207 &hdev->completion_queue[i]);
2209 pci_free_irq_vectors(hdev->pdev);
2213 static void goya_sync_irqs(struct hl_device *hdev)
2215 struct goya_device *goya = hdev->asic_specific;
2218 if (!(goya->hw_cap_initialized & HW_CAP_MSIX))
2221 /* Wait for all pending IRQs to be finished */
2222 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
2223 synchronize_irq(pci_irq_vector(hdev->pdev, i));
2225 synchronize_irq(pci_irq_vector(hdev->pdev, GOYA_EVENT_QUEUE_MSIX_IDX));
2228 static void goya_disable_msix(struct hl_device *hdev)
2230 struct goya_device *goya = hdev->asic_specific;
2233 if (!(goya->hw_cap_initialized & HW_CAP_MSIX))
2236 goya_sync_irqs(hdev);
2238 irq = pci_irq_vector(hdev->pdev, GOYA_EVENT_QUEUE_MSIX_IDX);
2239 free_irq(irq, &hdev->event_queue);
2241 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
2242 irq = pci_irq_vector(hdev->pdev, i);
2243 free_irq(irq, &hdev->completion_queue[i]);
2246 pci_free_irq_vectors(hdev->pdev);
2248 goya->hw_cap_initialized &= ~HW_CAP_MSIX;
2251 static void goya_enable_timestamp(struct hl_device *hdev)
2253 /* Disable the timestamp counter */
2254 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
2256 /* Zero the lower/upper parts of the 64-bit counter */
2257 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0xC, 0);
2258 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0x8, 0);
2260 /* Enable the counter */
2261 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 1);
2264 static void goya_disable_timestamp(struct hl_device *hdev)
2266 /* Disable the timestamp counter */
2267 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0);
2270 static void goya_halt_engines(struct hl_device *hdev, bool hard_reset)
2272 u32 wait_timeout_ms;
2275 "Halting compute engines and disabling interrupts\n");
2278 wait_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
2280 wait_timeout_ms = GOYA_RESET_WAIT_MSEC;
2282 goya_stop_external_queues(hdev);
2283 goya_stop_internal_queues(hdev);
2285 msleep(wait_timeout_ms);
2287 goya_dma_stall(hdev);
2288 goya_tpc_stall(hdev);
2289 goya_mme_stall(hdev);
2291 msleep(wait_timeout_ms);
2293 goya_disable_external_queues(hdev);
2294 goya_disable_internal_queues(hdev);
2296 goya_disable_timestamp(hdev);
2299 goya_disable_msix(hdev);
2300 goya_mmu_remove_device_cpu_mappings(hdev);
2302 goya_sync_irqs(hdev);
2307 * goya_load_firmware_to_device() - Load LINUX FW code to device.
2308 * @hdev: Pointer to hl_device structure.
2310 * Copy LINUX fw code from firmware file to HBM BAR.
2312 * Return: 0 on success, non-zero for failure.
2314 static int goya_load_firmware_to_device(struct hl_device *hdev)
2318 dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET;
2320 return hl_fw_load_fw_to_device(hdev, GOYA_LINUX_FW_FILE, dst, 0, 0);
2324 * goya_load_boot_fit_to_device() - Load boot fit to device.
2325 * @hdev: Pointer to hl_device structure.
2327 * Copy boot fit file to SRAM BAR.
2329 * Return: 0 on success, non-zero for failure.
2331 static int goya_load_boot_fit_to_device(struct hl_device *hdev)
2335 dst = hdev->pcie_bar[SRAM_CFG_BAR_ID] + BOOT_FIT_SRAM_OFFSET;
2337 return hl_fw_load_fw_to_device(hdev, GOYA_BOOT_FIT_FILE, dst, 0, 0);
2341 * FW component passes an offset from SRAM_BASE_ADDR in SCRATCHPAD_xx.
2342 * The version string should be located by that offset.
2344 static void goya_read_device_fw_version(struct hl_device *hdev,
2345 enum hl_fw_component fwc)
2353 ver_off = RREG32(mmUBOOT_VER_OFFSET);
2354 dest = hdev->asic_prop.uboot_ver;
2357 case FW_COMP_PREBOOT:
2358 ver_off = RREG32(mmPREBOOT_VER_OFFSET);
2359 dest = hdev->asic_prop.preboot_ver;
2363 dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc);
2367 ver_off &= ~((u32)SRAM_BASE_ADDR);
2369 if (ver_off < SRAM_SIZE - VERSION_MAX_LEN) {
2370 memcpy_fromio(dest, hdev->pcie_bar[SRAM_CFG_BAR_ID] + ver_off,
2373 dev_err(hdev->dev, "%s version offset (0x%x) is above SRAM\n",
2375 strcpy(dest, "unavailable");
2379 static int goya_init_cpu(struct hl_device *hdev)
2381 struct goya_device *goya = hdev->asic_specific;
2384 if (!hdev->cpu_enable)
2387 if (goya->hw_cap_initialized & HW_CAP_CPU)
2391 * Before pushing u-boot/linux to device, need to set the ddr bar to
2392 * base address of dram
2394 if (goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE) == U64_MAX) {
2396 "failed to map DDR bar to DRAM base address\n");
2400 rc = hl_fw_init_cpu(hdev, mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS,
2401 mmPSOC_GLOBAL_CONF_UBOOT_MAGIC,
2402 mmCPU_CMD_STATUS_TO_HOST,
2403 mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_ERR0,
2404 false, GOYA_CPU_TIMEOUT_USEC,
2405 GOYA_BOOT_FIT_REQ_TIMEOUT_USEC);
2410 goya->hw_cap_initialized |= HW_CAP_CPU;
2415 static int goya_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
2418 u32 status, timeout_usec;
2422 timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
2424 timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
2426 WREG32(MMU_HOP0_PA43_12, phys_addr >> MMU_HOP0_PA43_12_SHIFT);
2427 WREG32(MMU_HOP0_PA49_44, phys_addr >> MMU_HOP0_PA49_44_SHIFT);
2428 WREG32(MMU_ASID_BUSY, 0x80000000 | asid);
2430 rc = hl_poll_timeout(
2434 !(status & 0x80000000),
2440 "Timeout during MMU hop0 config of asid %d\n", asid);
2447 int goya_mmu_init(struct hl_device *hdev)
2449 struct asic_fixed_properties *prop = &hdev->asic_prop;
2450 struct goya_device *goya = hdev->asic_specific;
2454 if (!hdev->mmu_enable)
2457 if (goya->hw_cap_initialized & HW_CAP_MMU)
2460 hdev->dram_supports_virtual_memory = true;
2461 hdev->dram_default_page_mapping = true;
2463 for (i = 0 ; i < prop->max_asid ; i++) {
2464 hop0_addr = prop->mmu_pgt_addr +
2465 (i * prop->mmu_hop_table_size);
2467 rc = goya_mmu_update_asid_hop0_addr(hdev, i, hop0_addr);
2470 "failed to set hop0 addr for asid %d\n", i);
2475 goya->hw_cap_initialized |= HW_CAP_MMU;
2477 /* init MMU cache manage page */
2478 WREG32(mmSTLB_CACHE_INV_BASE_39_8,
2479 lower_32_bits(MMU_CACHE_MNG_ADDR >> 8));
2480 WREG32(mmSTLB_CACHE_INV_BASE_49_40, MMU_CACHE_MNG_ADDR >> 40);
2482 /* Remove follower feature due to performance bug */
2483 WREG32_AND(mmSTLB_STLB_FEATURE_EN,
2484 (~STLB_STLB_FEATURE_EN_FOLLOWER_EN_MASK));
2486 hdev->asic_funcs->mmu_invalidate_cache(hdev, true,
2487 VM_TYPE_USERPTR | VM_TYPE_PHYS_PACK);
2489 WREG32(mmMMU_MMU_ENABLE, 1);
2490 WREG32(mmMMU_SPI_MASK, 0xF);
2499 * goya_hw_init - Goya hardware initialization code
2501 * @hdev: pointer to hl_device structure
2503 * Returns 0 on success
2506 static int goya_hw_init(struct hl_device *hdev)
2508 struct asic_fixed_properties *prop = &hdev->asic_prop;
2511 /* Perform read from the device to make sure device is up */
2512 RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
2515 * Let's mark in the H/W that we have reached this point. We check
2516 * this value in the reset_before_init function to understand whether
2517 * we need to reset the chip before doing H/W init. This register is
2518 * cleared by the H/W upon H/W reset
2520 WREG32(mmHW_STATE, HL_DEVICE_HW_STATE_DIRTY);
2522 rc = goya_init_cpu(hdev);
2524 dev_err(hdev->dev, "failed to initialize CPU\n");
2528 goya_tpc_mbist_workaround(hdev);
2530 goya_init_golden_registers(hdev);
2533 * After CPU initialization is finished, change DDR bar mapping inside
2534 * iATU to point to the start address of the MMU page tables
2536 if (goya_set_ddr_bar_base(hdev, (MMU_PAGE_TABLES_ADDR &
2537 ~(prop->dram_pci_bar_size - 0x1ull))) == U64_MAX) {
2539 "failed to map DDR bar to MMU page tables\n");
2543 rc = goya_mmu_init(hdev);
2547 goya_init_security(hdev);
2549 goya_init_dma_qmans(hdev);
2551 goya_init_mme_qmans(hdev);
2553 goya_init_tpc_qmans(hdev);
2555 goya_enable_timestamp(hdev);
2557 /* MSI-X must be enabled before CPU queues are initialized */
2558 rc = goya_enable_msix(hdev);
2560 goto disable_queues;
2562 /* Perform read from the device to flush all MSI-X configuration */
2563 RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
2568 goya_disable_internal_queues(hdev);
2569 goya_disable_external_queues(hdev);
2575 * goya_hw_fini - Goya hardware tear-down code
2577 * @hdev: pointer to hl_device structure
2578 * @hard_reset: should we do hard reset to all engines or just reset the
2579 * compute/dma engines
2581 static void goya_hw_fini(struct hl_device *hdev, bool hard_reset)
2583 struct goya_device *goya = hdev->asic_specific;
2584 u32 reset_timeout_ms, cpu_timeout_ms, status;
2587 reset_timeout_ms = GOYA_PLDM_RESET_TIMEOUT_MSEC;
2588 cpu_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
2590 reset_timeout_ms = GOYA_RESET_TIMEOUT_MSEC;
2591 cpu_timeout_ms = GOYA_CPU_RESET_WAIT_MSEC;
2595 /* I don't know what is the state of the CPU so make sure it is
2596 * stopped in any means necessary
2598 WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_GOTO_WFE);
2599 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
2600 GOYA_ASYNC_EVENT_ID_HALT_MACHINE);
2602 msleep(cpu_timeout_ms);
2604 goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE);
2605 goya_disable_clk_rlx(hdev);
2606 goya_set_pll_refclk(hdev);
2608 WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, RESET_ALL);
2610 "Issued HARD reset command, going to wait %dms\n",
2613 WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, DMA_MME_TPC_RESET);
2615 "Issued SOFT reset command, going to wait %dms\n",
2620 * After hard reset, we can't poll the BTM_FSM register because the PSOC
2621 * itself is in reset. In either reset we need to wait until the reset
2624 msleep(reset_timeout_ms);
2626 status = RREG32(mmPSOC_GLOBAL_CONF_BTM_FSM);
2627 if (status & PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK)
2629 "Timeout while waiting for device to reset 0x%x\n",
2633 goya->hw_cap_initialized &= ~(HW_CAP_DMA | HW_CAP_MME |
2634 HW_CAP_GOLDEN | HW_CAP_TPC);
2635 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
2636 GOYA_ASYNC_EVENT_ID_SOFT_RESET);
2640 /* Chicken bit to re-initiate boot sequencer flow */
2641 WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START,
2642 1 << PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_IND_SHIFT);
2643 /* Move boot manager FSM to pre boot sequencer init state */
2644 WREG32(mmPSOC_GLOBAL_CONF_SW_BTM_FSM,
2645 0xA << PSOC_GLOBAL_CONF_SW_BTM_FSM_CTRL_SHIFT);
2647 goya->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q |
2648 HW_CAP_DDR_0 | HW_CAP_DDR_1 |
2649 HW_CAP_DMA | HW_CAP_MME |
2650 HW_CAP_MMU | HW_CAP_TPC_MBIST |
2651 HW_CAP_GOLDEN | HW_CAP_TPC);
2652 memset(goya->events_stat, 0, sizeof(goya->events_stat));
2655 int goya_suspend(struct hl_device *hdev)
2659 rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS);
2661 dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
2666 int goya_resume(struct hl_device *hdev)
2668 return goya_init_iatu(hdev);
2671 static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
2672 void *cpu_addr, dma_addr_t dma_addr, size_t size)
2676 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
2677 VM_DONTCOPY | VM_NORESERVE;
2679 rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, dma_addr, size);
2681 dev_err(hdev->dev, "dma_mmap_coherent error %d", rc);
2686 void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
2688 u32 db_reg_offset, db_value;
2690 switch (hw_queue_id) {
2691 case GOYA_QUEUE_ID_DMA_0:
2692 db_reg_offset = mmDMA_QM_0_PQ_PI;
2695 case GOYA_QUEUE_ID_DMA_1:
2696 db_reg_offset = mmDMA_QM_1_PQ_PI;
2699 case GOYA_QUEUE_ID_DMA_2:
2700 db_reg_offset = mmDMA_QM_2_PQ_PI;
2703 case GOYA_QUEUE_ID_DMA_3:
2704 db_reg_offset = mmDMA_QM_3_PQ_PI;
2707 case GOYA_QUEUE_ID_DMA_4:
2708 db_reg_offset = mmDMA_QM_4_PQ_PI;
2711 case GOYA_QUEUE_ID_CPU_PQ:
2712 db_reg_offset = mmCPU_IF_PF_PQ_PI;
2715 case GOYA_QUEUE_ID_MME:
2716 db_reg_offset = mmMME_QM_PQ_PI;
2719 case GOYA_QUEUE_ID_TPC0:
2720 db_reg_offset = mmTPC0_QM_PQ_PI;
2723 case GOYA_QUEUE_ID_TPC1:
2724 db_reg_offset = mmTPC1_QM_PQ_PI;
2727 case GOYA_QUEUE_ID_TPC2:
2728 db_reg_offset = mmTPC2_QM_PQ_PI;
2731 case GOYA_QUEUE_ID_TPC3:
2732 db_reg_offset = mmTPC3_QM_PQ_PI;
2735 case GOYA_QUEUE_ID_TPC4:
2736 db_reg_offset = mmTPC4_QM_PQ_PI;
2739 case GOYA_QUEUE_ID_TPC5:
2740 db_reg_offset = mmTPC5_QM_PQ_PI;
2743 case GOYA_QUEUE_ID_TPC6:
2744 db_reg_offset = mmTPC6_QM_PQ_PI;
2747 case GOYA_QUEUE_ID_TPC7:
2748 db_reg_offset = mmTPC7_QM_PQ_PI;
2752 /* Should never get here */
2753 dev_err(hdev->dev, "H/W queue %d is invalid. Can't set pi\n",
2760 /* ring the doorbell */
2761 WREG32(db_reg_offset, db_value);
2763 if (hw_queue_id == GOYA_QUEUE_ID_CPU_PQ)
2764 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
2765 GOYA_ASYNC_EVENT_ID_PI_UPDATE);
2768 void goya_pqe_write(struct hl_device *hdev, __le64 *pqe, struct hl_bd *bd)
2770 /* The QMANs are on the SRAM so need to copy to IO space */
2771 memcpy_toio((void __iomem *) pqe, bd, sizeof(struct hl_bd));
2774 static void *goya_dma_alloc_coherent(struct hl_device *hdev, size_t size,
2775 dma_addr_t *dma_handle, gfp_t flags)
2777 void *kernel_addr = dma_alloc_coherent(&hdev->pdev->dev, size,
2780 /* Shift to the device's base physical address of host memory */
2782 *dma_handle += HOST_PHYS_BASE;
2787 static void goya_dma_free_coherent(struct hl_device *hdev, size_t size,
2788 void *cpu_addr, dma_addr_t dma_handle)
2790 /* Cancel the device's base physical address of host memory */
2791 dma_addr_t fixed_dma_handle = dma_handle - HOST_PHYS_BASE;
2793 dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, fixed_dma_handle);
2796 int goya_scrub_device_mem(struct hl_device *hdev, u64 addr, u64 size)
2801 void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
2802 dma_addr_t *dma_handle, u16 *queue_len)
2807 *dma_handle = hdev->asic_prop.sram_base_address;
2809 base = (void *) hdev->pcie_bar[SRAM_CFG_BAR_ID];
2812 case GOYA_QUEUE_ID_MME:
2813 offset = MME_QMAN_BASE_OFFSET;
2814 *queue_len = MME_QMAN_LENGTH;
2816 case GOYA_QUEUE_ID_TPC0:
2817 offset = TPC0_QMAN_BASE_OFFSET;
2818 *queue_len = TPC_QMAN_LENGTH;
2820 case GOYA_QUEUE_ID_TPC1:
2821 offset = TPC1_QMAN_BASE_OFFSET;
2822 *queue_len = TPC_QMAN_LENGTH;
2824 case GOYA_QUEUE_ID_TPC2:
2825 offset = TPC2_QMAN_BASE_OFFSET;
2826 *queue_len = TPC_QMAN_LENGTH;
2828 case GOYA_QUEUE_ID_TPC3:
2829 offset = TPC3_QMAN_BASE_OFFSET;
2830 *queue_len = TPC_QMAN_LENGTH;
2832 case GOYA_QUEUE_ID_TPC4:
2833 offset = TPC4_QMAN_BASE_OFFSET;
2834 *queue_len = TPC_QMAN_LENGTH;
2836 case GOYA_QUEUE_ID_TPC5:
2837 offset = TPC5_QMAN_BASE_OFFSET;
2838 *queue_len = TPC_QMAN_LENGTH;
2840 case GOYA_QUEUE_ID_TPC6:
2841 offset = TPC6_QMAN_BASE_OFFSET;
2842 *queue_len = TPC_QMAN_LENGTH;
2844 case GOYA_QUEUE_ID_TPC7:
2845 offset = TPC7_QMAN_BASE_OFFSET;
2846 *queue_len = TPC_QMAN_LENGTH;
2849 dev_err(hdev->dev, "Got invalid queue id %d\n", queue_id);
2854 *dma_handle += offset;
2859 static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
2861 struct packet_msg_prot *fence_pkt;
2863 dma_addr_t fence_dma_addr;
2869 timeout = GOYA_PLDM_QMAN0_TIMEOUT_USEC;
2871 timeout = HL_DEVICE_TIMEOUT_USEC;
2873 if (!hdev->asic_funcs->is_device_idle(hdev, NULL, NULL)) {
2874 dev_err_ratelimited(hdev->dev,
2875 "Can't send driver job on QMAN0 because the device is not idle\n");
2879 fence_ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL,
2883 "Failed to allocate fence memory for QMAN0\n");
2887 goya_qman0_set_security(hdev, true);
2889 cb = job->patched_cb;
2891 fence_pkt = cb->kernel_address +
2892 job->job_cb_size - sizeof(struct packet_msg_prot);
2894 tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
2895 (1 << GOYA_PKT_CTL_EB_SHIFT) |
2896 (1 << GOYA_PKT_CTL_MB_SHIFT);
2897 fence_pkt->ctl = cpu_to_le32(tmp);
2898 fence_pkt->value = cpu_to_le32(GOYA_QMAN0_FENCE_VAL);
2899 fence_pkt->addr = cpu_to_le64(fence_dma_addr);
2901 rc = hl_hw_queue_send_cb_no_cmpl(hdev, GOYA_QUEUE_ID_DMA_0,
2902 job->job_cb_size, cb->bus_address);
2904 dev_err(hdev->dev, "Failed to send CB on QMAN0, %d\n", rc);
2905 goto free_fence_ptr;
2908 rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp,
2909 (tmp == GOYA_QMAN0_FENCE_VAL), 1000,
2912 hl_hw_queue_inc_ci_kernel(hdev, GOYA_QUEUE_ID_DMA_0);
2914 if (rc == -ETIMEDOUT) {
2915 dev_err(hdev->dev, "QMAN0 Job timeout (0x%x)\n", tmp);
2916 goto free_fence_ptr;
2920 hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_ptr,
2923 goya_qman0_set_security(hdev, false);
2928 int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len,
2929 u32 timeout, long *result)
2931 struct goya_device *goya = hdev->asic_specific;
2933 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q)) {
2940 timeout = GOYA_MSG_TO_CPU_TIMEOUT_USEC;
2942 return hl_fw_send_cpu_message(hdev, GOYA_QUEUE_ID_CPU_PQ, msg, len,
2946 int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
2948 struct packet_msg_prot *fence_pkt;
2949 dma_addr_t pkt_dma_addr;
2951 dma_addr_t fence_dma_addr;
2955 fence_val = GOYA_QMAN0_FENCE_VAL;
2957 fence_ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL,
2961 "Failed to allocate memory for H/W queue %d testing\n",
2968 fence_pkt = hdev->asic_funcs->asic_dma_pool_zalloc(hdev,
2969 sizeof(struct packet_msg_prot),
2970 GFP_KERNEL, &pkt_dma_addr);
2973 "Failed to allocate packet for H/W queue %d testing\n",
2976 goto free_fence_ptr;
2979 tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
2980 (1 << GOYA_PKT_CTL_EB_SHIFT) |
2981 (1 << GOYA_PKT_CTL_MB_SHIFT);
2982 fence_pkt->ctl = cpu_to_le32(tmp);
2983 fence_pkt->value = cpu_to_le32(fence_val);
2984 fence_pkt->addr = cpu_to_le64(fence_dma_addr);
2986 rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id,
2987 sizeof(struct packet_msg_prot),
2991 "Failed to send fence packet to H/W queue %d\n",
2996 rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp, (tmp == fence_val),
2997 1000, GOYA_TEST_QUEUE_WAIT_USEC, true);
2999 hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
3001 if (rc == -ETIMEDOUT) {
3003 "H/W queue %d test failed (scratch(0x%08llX) == 0x%08X)\n",
3004 hw_queue_id, (unsigned long long) fence_dma_addr, tmp);
3009 hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_pkt,
3012 hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_ptr,
3017 int goya_test_cpu_queue(struct hl_device *hdev)
3019 struct goya_device *goya = hdev->asic_specific;
3022 * check capability here as send_cpu_message() won't update the result
3023 * value if no capability
3025 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
3028 return hl_fw_test_cpu_queue(hdev);
3031 int goya_test_queues(struct hl_device *hdev)
3033 int i, rc, ret_val = 0;
3035 for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
3036 rc = goya_test_queue(hdev, i);
3044 static void *goya_dma_pool_zalloc(struct hl_device *hdev, size_t size,
3045 gfp_t mem_flags, dma_addr_t *dma_handle)
3049 if (size > GOYA_DMA_POOL_BLK_SIZE)
3052 kernel_addr = dma_pool_zalloc(hdev->dma_pool, mem_flags, dma_handle);
3054 /* Shift to the device's base physical address of host memory */
3056 *dma_handle += HOST_PHYS_BASE;
3061 static void goya_dma_pool_free(struct hl_device *hdev, void *vaddr,
3062 dma_addr_t dma_addr)
3064 /* Cancel the device's base physical address of host memory */
3065 dma_addr_t fixed_dma_addr = dma_addr - HOST_PHYS_BASE;
3067 dma_pool_free(hdev->dma_pool, vaddr, fixed_dma_addr);
3070 void *goya_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
3071 dma_addr_t *dma_handle)
3075 vaddr = hl_fw_cpu_accessible_dma_pool_alloc(hdev, size, dma_handle);
3076 *dma_handle = (*dma_handle) - hdev->cpu_accessible_dma_address +
3077 VA_CPU_ACCESSIBLE_MEM_ADDR;
3082 void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
3085 hl_fw_cpu_accessible_dma_pool_free(hdev, size, vaddr);
3088 static int goya_dma_map_sg(struct hl_device *hdev, struct scatterlist *sgl,
3089 int nents, enum dma_data_direction dir)
3091 struct scatterlist *sg;
3094 if (!dma_map_sg(&hdev->pdev->dev, sgl, nents, dir))
3097 /* Shift to the device's base physical address of host memory */
3098 for_each_sg(sgl, sg, nents, i)
3099 sg->dma_address += HOST_PHYS_BASE;
3104 static void goya_dma_unmap_sg(struct hl_device *hdev, struct scatterlist *sgl,
3105 int nents, enum dma_data_direction dir)
3107 struct scatterlist *sg;
3110 /* Cancel the device's base physical address of host memory */
3111 for_each_sg(sgl, sg, nents, i)
3112 sg->dma_address -= HOST_PHYS_BASE;
3114 dma_unmap_sg(&hdev->pdev->dev, sgl, nents, dir);
3117 u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt)
3119 struct scatterlist *sg, *sg_next_iter;
3120 u32 count, dma_desc_cnt;
3122 dma_addr_t addr, addr_next;
3126 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
3128 len = sg_dma_len(sg);
3129 addr = sg_dma_address(sg);
3134 while ((count + 1) < sgt->nents) {
3135 sg_next_iter = sg_next(sg);
3136 len_next = sg_dma_len(sg_next_iter);
3137 addr_next = sg_dma_address(sg_next_iter);
3142 if ((addr + len == addr_next) &&
3143 (len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
3155 return dma_desc_cnt * sizeof(struct packet_lin_dma);
3158 static int goya_pin_memory_before_cs(struct hl_device *hdev,
3159 struct hl_cs_parser *parser,
3160 struct packet_lin_dma *user_dma_pkt,
3161 u64 addr, enum dma_data_direction dir)
3163 struct hl_userptr *userptr;
3166 if (hl_userptr_is_pinned(hdev, addr, le32_to_cpu(user_dma_pkt->tsize),
3167 parser->job_userptr_list, &userptr))
3168 goto already_pinned;
3170 userptr = kzalloc(sizeof(*userptr), GFP_ATOMIC);
3174 rc = hl_pin_host_memory(hdev, addr, le32_to_cpu(user_dma_pkt->tsize),
3179 list_add_tail(&userptr->job_node, parser->job_userptr_list);
3181 rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
3182 userptr->sgt->nents, dir);
3184 dev_err(hdev->dev, "failed to map sgt with DMA region\n");
3188 userptr->dma_mapped = true;
3192 parser->patched_cb_size +=
3193 goya_get_dma_desc_list_size(hdev, userptr->sgt);
3198 hl_unpin_host_memory(hdev, userptr);
3204 static int goya_validate_dma_pkt_host(struct hl_device *hdev,
3205 struct hl_cs_parser *parser,
3206 struct packet_lin_dma *user_dma_pkt)
3208 u64 device_memory_addr, addr;
3209 enum dma_data_direction dir;
3210 enum goya_dma_direction user_dir;
3211 bool sram_addr = true;
3212 bool skip_host_mem_pin = false;
3217 ctl = le32_to_cpu(user_dma_pkt->ctl);
3219 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3220 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3222 user_memset = (ctl & GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
3223 GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
3226 case DMA_HOST_TO_DRAM:
3227 dev_dbg(hdev->dev, "DMA direction is HOST --> DRAM\n");
3228 dir = DMA_TO_DEVICE;
3230 addr = le64_to_cpu(user_dma_pkt->src_addr);
3231 device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3233 skip_host_mem_pin = true;
3236 case DMA_DRAM_TO_HOST:
3237 dev_dbg(hdev->dev, "DMA direction is DRAM --> HOST\n");
3238 dir = DMA_FROM_DEVICE;
3240 addr = le64_to_cpu(user_dma_pkt->dst_addr);
3241 device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3244 case DMA_HOST_TO_SRAM:
3245 dev_dbg(hdev->dev, "DMA direction is HOST --> SRAM\n");
3246 dir = DMA_TO_DEVICE;
3247 addr = le64_to_cpu(user_dma_pkt->src_addr);
3248 device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3250 skip_host_mem_pin = true;
3253 case DMA_SRAM_TO_HOST:
3254 dev_dbg(hdev->dev, "DMA direction is SRAM --> HOST\n");
3255 dir = DMA_FROM_DEVICE;
3256 addr = le64_to_cpu(user_dma_pkt->dst_addr);
3257 device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3260 dev_err(hdev->dev, "DMA direction is undefined\n");
3265 if (!hl_mem_area_inside_range(device_memory_addr,
3266 le32_to_cpu(user_dma_pkt->tsize),
3267 hdev->asic_prop.sram_user_base_address,
3268 hdev->asic_prop.sram_end_address)) {
3271 "SRAM address 0x%llx + 0x%x is invalid\n",
3273 user_dma_pkt->tsize);
3277 if (!hl_mem_area_inside_range(device_memory_addr,
3278 le32_to_cpu(user_dma_pkt->tsize),
3279 hdev->asic_prop.dram_user_base_address,
3280 hdev->asic_prop.dram_end_address)) {
3283 "DRAM address 0x%llx + 0x%x is invalid\n",
3285 user_dma_pkt->tsize);
3290 if (skip_host_mem_pin)
3291 parser->patched_cb_size += sizeof(*user_dma_pkt);
3293 if ((dir == DMA_TO_DEVICE) &&
3294 (parser->hw_queue_id > GOYA_QUEUE_ID_DMA_1)) {
3296 "Can't DMA from host on queue other then 1\n");
3300 rc = goya_pin_memory_before_cs(hdev, parser, user_dma_pkt,
3307 static int goya_validate_dma_pkt_no_host(struct hl_device *hdev,
3308 struct hl_cs_parser *parser,
3309 struct packet_lin_dma *user_dma_pkt)
3311 u64 sram_memory_addr, dram_memory_addr;
3312 enum goya_dma_direction user_dir;
3315 ctl = le32_to_cpu(user_dma_pkt->ctl);
3316 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3317 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3319 if (user_dir == DMA_DRAM_TO_SRAM) {
3320 dev_dbg(hdev->dev, "DMA direction is DRAM --> SRAM\n");
3321 dram_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3322 sram_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3324 dev_dbg(hdev->dev, "DMA direction is SRAM --> DRAM\n");
3325 sram_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3326 dram_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3329 if (!hl_mem_area_inside_range(sram_memory_addr,
3330 le32_to_cpu(user_dma_pkt->tsize),
3331 hdev->asic_prop.sram_user_base_address,
3332 hdev->asic_prop.sram_end_address)) {
3333 dev_err(hdev->dev, "SRAM address 0x%llx + 0x%x is invalid\n",
3334 sram_memory_addr, user_dma_pkt->tsize);
3338 if (!hl_mem_area_inside_range(dram_memory_addr,
3339 le32_to_cpu(user_dma_pkt->tsize),
3340 hdev->asic_prop.dram_user_base_address,
3341 hdev->asic_prop.dram_end_address)) {
3342 dev_err(hdev->dev, "DRAM address 0x%llx + 0x%x is invalid\n",
3343 dram_memory_addr, user_dma_pkt->tsize);
3347 parser->patched_cb_size += sizeof(*user_dma_pkt);
3352 static int goya_validate_dma_pkt_no_mmu(struct hl_device *hdev,
3353 struct hl_cs_parser *parser,
3354 struct packet_lin_dma *user_dma_pkt)
3356 enum goya_dma_direction user_dir;
3360 dev_dbg(hdev->dev, "DMA packet details:\n");
3361 dev_dbg(hdev->dev, "source == 0x%llx\n",
3362 le64_to_cpu(user_dma_pkt->src_addr));
3363 dev_dbg(hdev->dev, "destination == 0x%llx\n",
3364 le64_to_cpu(user_dma_pkt->dst_addr));
3365 dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize));
3367 ctl = le32_to_cpu(user_dma_pkt->ctl);
3368 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3369 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3372 * Special handling for DMA with size 0. The H/W has a bug where
3373 * this can cause the QMAN DMA to get stuck, so block it here.
3375 if (user_dma_pkt->tsize == 0) {
3377 "Got DMA with size 0, might reset the device\n");
3381 if ((user_dir == DMA_DRAM_TO_SRAM) || (user_dir == DMA_SRAM_TO_DRAM))
3382 rc = goya_validate_dma_pkt_no_host(hdev, parser, user_dma_pkt);
3384 rc = goya_validate_dma_pkt_host(hdev, parser, user_dma_pkt);
3389 static int goya_validate_dma_pkt_mmu(struct hl_device *hdev,
3390 struct hl_cs_parser *parser,
3391 struct packet_lin_dma *user_dma_pkt)
3393 dev_dbg(hdev->dev, "DMA packet details:\n");
3394 dev_dbg(hdev->dev, "source == 0x%llx\n",
3395 le64_to_cpu(user_dma_pkt->src_addr));
3396 dev_dbg(hdev->dev, "destination == 0x%llx\n",
3397 le64_to_cpu(user_dma_pkt->dst_addr));
3398 dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize));
3402 * We can't allow user to read from Host using QMANs other than 1.
3403 * PMMU and HPMMU addresses are equal, check only one of them.
3405 if (parser->hw_queue_id != GOYA_QUEUE_ID_DMA_1 &&
3406 hl_mem_area_inside_range(le64_to_cpu(user_dma_pkt->src_addr),
3407 le32_to_cpu(user_dma_pkt->tsize),
3408 hdev->asic_prop.pmmu.start_addr,
3409 hdev->asic_prop.pmmu.end_addr)) {
3411 "Can't DMA from host on queue other then 1\n");
3415 if (user_dma_pkt->tsize == 0) {
3417 "Got DMA with size 0, might reset the device\n");
3421 parser->patched_cb_size += sizeof(*user_dma_pkt);
3426 static int goya_validate_wreg32(struct hl_device *hdev,
3427 struct hl_cs_parser *parser,
3428 struct packet_wreg32 *wreg_pkt)
3430 struct goya_device *goya = hdev->asic_specific;
3431 u32 sob_start_addr, sob_end_addr;
3434 reg_offset = le32_to_cpu(wreg_pkt->ctl) &
3435 GOYA_PKT_WREG32_CTL_REG_OFFSET_MASK;
3437 dev_dbg(hdev->dev, "WREG32 packet details:\n");
3438 dev_dbg(hdev->dev, "reg_offset == 0x%x\n", reg_offset);
3439 dev_dbg(hdev->dev, "value == 0x%x\n",
3440 le32_to_cpu(wreg_pkt->value));
3442 if (reg_offset != (mmDMA_CH_0_WR_COMP_ADDR_LO & 0x1FFF)) {
3443 dev_err(hdev->dev, "WREG32 packet with illegal address 0x%x\n",
3449 * With MMU, DMA channels are not secured, so it doesn't matter where
3450 * the WR COMP will be written to because it will go out with
3451 * non-secured property
3453 if (goya->hw_cap_initialized & HW_CAP_MMU)
3456 sob_start_addr = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
3457 sob_end_addr = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1023);
3459 if ((le32_to_cpu(wreg_pkt->value) < sob_start_addr) ||
3460 (le32_to_cpu(wreg_pkt->value) > sob_end_addr)) {
3462 dev_err(hdev->dev, "WREG32 packet with illegal value 0x%x\n",
3470 static int goya_validate_cb(struct hl_device *hdev,
3471 struct hl_cs_parser *parser, bool is_mmu)
3473 u32 cb_parsed_length = 0;
3476 parser->patched_cb_size = 0;
3478 /* cb_user_size is more than 0 so loop will always be executed */
3479 while (cb_parsed_length < parser->user_cb_size) {
3480 enum packet_id pkt_id;
3482 struct goya_packet *user_pkt;
3484 user_pkt = parser->user_cb->kernel_address + cb_parsed_length;
3486 pkt_id = (enum packet_id) (
3487 (le64_to_cpu(user_pkt->header) &
3488 PACKET_HEADER_PACKET_ID_MASK) >>
3489 PACKET_HEADER_PACKET_ID_SHIFT);
3491 if (!validate_packet_id(pkt_id)) {
3492 dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id);
3497 pkt_size = goya_packet_sizes[pkt_id];
3498 cb_parsed_length += pkt_size;
3499 if (cb_parsed_length > parser->user_cb_size) {
3501 "packet 0x%x is out of CB boundary\n", pkt_id);
3507 case PACKET_WREG_32:
3509 * Although it is validated after copy in patch_cb(),
3510 * need to validate here as well because patch_cb() is
3511 * not called in MMU path while this function is called
3513 rc = goya_validate_wreg32(hdev,
3514 parser, (struct packet_wreg32 *) user_pkt);
3515 parser->patched_cb_size += pkt_size;
3518 case PACKET_WREG_BULK:
3520 "User not allowed to use WREG_BULK\n");
3524 case PACKET_MSG_PROT:
3526 "User not allowed to use MSG_PROT\n");
3531 dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
3536 dev_err(hdev->dev, "User not allowed to use STOP\n");
3540 case PACKET_LIN_DMA:
3542 rc = goya_validate_dma_pkt_mmu(hdev, parser,
3543 (struct packet_lin_dma *) user_pkt);
3545 rc = goya_validate_dma_pkt_no_mmu(hdev, parser,
3546 (struct packet_lin_dma *) user_pkt);
3549 case PACKET_MSG_LONG:
3550 case PACKET_MSG_SHORT:
3553 parser->patched_cb_size += pkt_size;
3557 dev_err(hdev->dev, "Invalid packet header 0x%x\n",
3568 * The new CB should have space at the end for two MSG_PROT packets:
3569 * 1. A packet that will act as a completion packet
3570 * 2. A packet that will generate MSI-X interrupt
3572 parser->patched_cb_size += sizeof(struct packet_msg_prot) * 2;
3577 static int goya_patch_dma_packet(struct hl_device *hdev,
3578 struct hl_cs_parser *parser,
3579 struct packet_lin_dma *user_dma_pkt,
3580 struct packet_lin_dma *new_dma_pkt,
3581 u32 *new_dma_pkt_size)
3583 struct hl_userptr *userptr;
3584 struct scatterlist *sg, *sg_next_iter;
3585 u32 count, dma_desc_cnt;
3587 dma_addr_t dma_addr, dma_addr_next;
3588 enum goya_dma_direction user_dir;
3589 u64 device_memory_addr, addr;
3590 enum dma_data_direction dir;
3591 struct sg_table *sgt;
3592 bool skip_host_mem_pin = false;
3594 u32 user_rdcomp_mask, user_wrcomp_mask, ctl;
3596 ctl = le32_to_cpu(user_dma_pkt->ctl);
3598 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3599 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3601 user_memset = (ctl & GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
3602 GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
3604 if ((user_dir == DMA_DRAM_TO_SRAM) || (user_dir == DMA_SRAM_TO_DRAM) ||
3605 (user_dma_pkt->tsize == 0)) {
3606 memcpy(new_dma_pkt, user_dma_pkt, sizeof(*new_dma_pkt));
3607 *new_dma_pkt_size = sizeof(*new_dma_pkt);
3611 if ((user_dir == DMA_HOST_TO_DRAM) || (user_dir == DMA_HOST_TO_SRAM)) {
3612 addr = le64_to_cpu(user_dma_pkt->src_addr);
3613 device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3614 dir = DMA_TO_DEVICE;
3616 skip_host_mem_pin = true;
3618 addr = le64_to_cpu(user_dma_pkt->dst_addr);
3619 device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3620 dir = DMA_FROM_DEVICE;
3623 if ((!skip_host_mem_pin) &&
3624 (hl_userptr_is_pinned(hdev, addr,
3625 le32_to_cpu(user_dma_pkt->tsize),
3626 parser->job_userptr_list, &userptr) == false)) {
3627 dev_err(hdev->dev, "Userptr 0x%llx + 0x%x NOT mapped\n",
3628 addr, user_dma_pkt->tsize);
3632 if ((user_memset) && (dir == DMA_TO_DEVICE)) {
3633 memcpy(new_dma_pkt, user_dma_pkt, sizeof(*user_dma_pkt));
3634 *new_dma_pkt_size = sizeof(*user_dma_pkt);
3638 user_rdcomp_mask = ctl & GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK;
3640 user_wrcomp_mask = ctl & GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK;
3645 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
3646 len = sg_dma_len(sg);
3647 dma_addr = sg_dma_address(sg);
3652 while ((count + 1) < sgt->nents) {
3653 sg_next_iter = sg_next(sg);
3654 len_next = sg_dma_len(sg_next_iter);
3655 dma_addr_next = sg_dma_address(sg_next_iter);
3660 if ((dma_addr + len == dma_addr_next) &&
3661 (len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
3670 ctl = le32_to_cpu(user_dma_pkt->ctl);
3671 if (likely(dma_desc_cnt))
3672 ctl &= ~GOYA_PKT_CTL_EB_MASK;
3673 ctl &= ~(GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK |
3674 GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK);
3675 new_dma_pkt->ctl = cpu_to_le32(ctl);
3676 new_dma_pkt->tsize = cpu_to_le32((u32) len);
3678 if (dir == DMA_TO_DEVICE) {
3679 new_dma_pkt->src_addr = cpu_to_le64(dma_addr);
3680 new_dma_pkt->dst_addr = cpu_to_le64(device_memory_addr);
3682 new_dma_pkt->src_addr = cpu_to_le64(device_memory_addr);
3683 new_dma_pkt->dst_addr = cpu_to_le64(dma_addr);
3687 device_memory_addr += len;
3692 if (!dma_desc_cnt) {
3694 "Error of 0 SG entries when patching DMA packet\n");
3698 /* Fix the last dma packet - rdcomp/wrcomp must be as user set them */
3700 new_dma_pkt->ctl |= cpu_to_le32(user_rdcomp_mask | user_wrcomp_mask);
3702 *new_dma_pkt_size = dma_desc_cnt * sizeof(struct packet_lin_dma);
3707 static int goya_patch_cb(struct hl_device *hdev,
3708 struct hl_cs_parser *parser)
3710 u32 cb_parsed_length = 0;
3711 u32 cb_patched_cur_length = 0;
3714 /* cb_user_size is more than 0 so loop will always be executed */
3715 while (cb_parsed_length < parser->user_cb_size) {
3716 enum packet_id pkt_id;
3718 u32 new_pkt_size = 0;
3719 struct goya_packet *user_pkt, *kernel_pkt;
3721 user_pkt = parser->user_cb->kernel_address + cb_parsed_length;
3722 kernel_pkt = parser->patched_cb->kernel_address +
3723 cb_patched_cur_length;
3725 pkt_id = (enum packet_id) (
3726 (le64_to_cpu(user_pkt->header) &
3727 PACKET_HEADER_PACKET_ID_MASK) >>
3728 PACKET_HEADER_PACKET_ID_SHIFT);
3730 if (!validate_packet_id(pkt_id)) {
3731 dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id);
3736 pkt_size = goya_packet_sizes[pkt_id];
3737 cb_parsed_length += pkt_size;
3738 if (cb_parsed_length > parser->user_cb_size) {
3740 "packet 0x%x is out of CB boundary\n", pkt_id);
3746 case PACKET_LIN_DMA:
3747 rc = goya_patch_dma_packet(hdev, parser,
3748 (struct packet_lin_dma *) user_pkt,
3749 (struct packet_lin_dma *) kernel_pkt,
3751 cb_patched_cur_length += new_pkt_size;
3754 case PACKET_WREG_32:
3755 memcpy(kernel_pkt, user_pkt, pkt_size);
3756 cb_patched_cur_length += pkt_size;
3757 rc = goya_validate_wreg32(hdev, parser,
3758 (struct packet_wreg32 *) kernel_pkt);
3761 case PACKET_WREG_BULK:
3763 "User not allowed to use WREG_BULK\n");
3767 case PACKET_MSG_PROT:
3769 "User not allowed to use MSG_PROT\n");
3774 dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
3779 dev_err(hdev->dev, "User not allowed to use STOP\n");
3783 case PACKET_MSG_LONG:
3784 case PACKET_MSG_SHORT:
3787 memcpy(kernel_pkt, user_pkt, pkt_size);
3788 cb_patched_cur_length += pkt_size;
3792 dev_err(hdev->dev, "Invalid packet header 0x%x\n",
3805 static int goya_parse_cb_mmu(struct hl_device *hdev,
3806 struct hl_cs_parser *parser)
3808 u64 patched_cb_handle;
3809 u32 patched_cb_size;
3810 struct hl_cb *user_cb;
3814 * The new CB should have space at the end for two MSG_PROT pkt:
3815 * 1. A packet that will act as a completion packet
3816 * 2. A packet that will generate MSI-X interrupt
3818 parser->patched_cb_size = parser->user_cb_size +
3819 sizeof(struct packet_msg_prot) * 2;
3821 rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx,
3822 parser->patched_cb_size, false, false,
3823 &patched_cb_handle);
3827 "Failed to allocate patched CB for DMA CS %d\n",
3832 patched_cb_handle >>= PAGE_SHIFT;
3833 parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
3834 (u32) patched_cb_handle);
3835 /* hl_cb_get should never fail here so use kernel WARN */
3836 WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n",
3837 (u32) patched_cb_handle);
3838 if (!parser->patched_cb) {
3844 * The check that parser->user_cb_size <= parser->user_cb->size was done
3845 * in validate_queue_index().
3847 memcpy(parser->patched_cb->kernel_address,
3848 parser->user_cb->kernel_address,
3849 parser->user_cb_size);
3851 patched_cb_size = parser->patched_cb_size;
3853 /* validate patched CB instead of user CB */
3854 user_cb = parser->user_cb;
3855 parser->user_cb = parser->patched_cb;
3856 rc = goya_validate_cb(hdev, parser, true);
3857 parser->user_cb = user_cb;
3860 hl_cb_put(parser->patched_cb);
3864 if (patched_cb_size != parser->patched_cb_size) {
3865 dev_err(hdev->dev, "user CB size mismatch\n");
3866 hl_cb_put(parser->patched_cb);
3873 * Always call cb destroy here because we still have 1 reference
3874 * to it by calling cb_get earlier. After the job will be completed,
3875 * cb_put will release it, but here we want to remove it from the
3878 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
3879 patched_cb_handle << PAGE_SHIFT);
3884 static int goya_parse_cb_no_mmu(struct hl_device *hdev,
3885 struct hl_cs_parser *parser)
3887 u64 patched_cb_handle;
3890 rc = goya_validate_cb(hdev, parser, false);
3895 rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx,
3896 parser->patched_cb_size, false, false,
3897 &patched_cb_handle);
3900 "Failed to allocate patched CB for DMA CS %d\n", rc);
3904 patched_cb_handle >>= PAGE_SHIFT;
3905 parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
3906 (u32) patched_cb_handle);
3907 /* hl_cb_get should never fail here so use kernel WARN */
3908 WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n",
3909 (u32) patched_cb_handle);
3910 if (!parser->patched_cb) {
3915 rc = goya_patch_cb(hdev, parser);
3918 hl_cb_put(parser->patched_cb);
3922 * Always call cb destroy here because we still have 1 reference
3923 * to it by calling cb_get earlier. After the job will be completed,
3924 * cb_put will release it, but here we want to remove it from the
3927 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
3928 patched_cb_handle << PAGE_SHIFT);
3932 hl_userptr_delete_list(hdev, parser->job_userptr_list);
3936 static int goya_parse_cb_no_ext_queue(struct hl_device *hdev,
3937 struct hl_cs_parser *parser)
3939 struct asic_fixed_properties *asic_prop = &hdev->asic_prop;
3940 struct goya_device *goya = hdev->asic_specific;
3942 if (goya->hw_cap_initialized & HW_CAP_MMU)
3945 /* For internal queue jobs, just check if CB address is valid */
3946 if (hl_mem_area_inside_range(
3947 (u64) (uintptr_t) parser->user_cb,
3948 parser->user_cb_size,
3949 asic_prop->sram_user_base_address,
3950 asic_prop->sram_end_address))
3953 if (hl_mem_area_inside_range(
3954 (u64) (uintptr_t) parser->user_cb,
3955 parser->user_cb_size,
3956 asic_prop->dram_user_base_address,
3957 asic_prop->dram_end_address))
3961 "Internal CB address 0x%px + 0x%x is not in SRAM nor in DRAM\n",
3962 parser->user_cb, parser->user_cb_size);
3967 int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser)
3969 struct goya_device *goya = hdev->asic_specific;
3971 if (parser->queue_type == QUEUE_TYPE_INT)
3972 return goya_parse_cb_no_ext_queue(hdev, parser);
3974 if (goya->hw_cap_initialized & HW_CAP_MMU)
3975 return goya_parse_cb_mmu(hdev, parser);
3977 return goya_parse_cb_no_mmu(hdev, parser);
3980 void goya_add_end_of_cb_packets(struct hl_device *hdev, void *kernel_address,
3981 u32 len, u64 cq_addr, u32 cq_val, u32 msix_vec,
3984 struct packet_msg_prot *cq_pkt;
3987 cq_pkt = kernel_address + len - (sizeof(struct packet_msg_prot) * 2);
3989 tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
3990 (1 << GOYA_PKT_CTL_EB_SHIFT) |
3991 (1 << GOYA_PKT_CTL_MB_SHIFT);
3992 cq_pkt->ctl = cpu_to_le32(tmp);
3993 cq_pkt->value = cpu_to_le32(cq_val);
3994 cq_pkt->addr = cpu_to_le64(cq_addr);
3998 tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
3999 (1 << GOYA_PKT_CTL_MB_SHIFT);
4000 cq_pkt->ctl = cpu_to_le32(tmp);
4001 cq_pkt->value = cpu_to_le32(msix_vec & 0x7FF);
4002 cq_pkt->addr = cpu_to_le64(CFG_BASE + mmPCIE_DBI_MSIX_DOORBELL_OFF);
4005 void goya_update_eq_ci(struct hl_device *hdev, u32 val)
4007 WREG32(mmCPU_EQ_CI, val);
4010 void goya_restore_phase_topology(struct hl_device *hdev)
4015 static void goya_clear_sm_regs(struct hl_device *hdev)
4017 int i, num_of_sob_in_longs, num_of_mon_in_longs;
4019 num_of_sob_in_longs =
4020 ((mmSYNC_MNGR_SOB_OBJ_1023 - mmSYNC_MNGR_SOB_OBJ_0) + 4);
4022 num_of_mon_in_longs =
4023 ((mmSYNC_MNGR_MON_STATUS_255 - mmSYNC_MNGR_MON_STATUS_0) + 4);
4025 for (i = 0 ; i < num_of_sob_in_longs ; i += 4)
4026 WREG32(mmSYNC_MNGR_SOB_OBJ_0 + i, 0);
4028 for (i = 0 ; i < num_of_mon_in_longs ; i += 4)
4029 WREG32(mmSYNC_MNGR_MON_STATUS_0 + i, 0);
4031 /* Flush all WREG to prevent race */
4032 i = RREG32(mmSYNC_MNGR_SOB_OBJ_0);
4036 * goya_debugfs_read32 - read a 32bit value from a given device or a host mapped
4039 * @hdev: pointer to hl_device structure
4040 * @addr: device or host mapped address
4041 * @val: returned value
4043 * In case of DDR address that is not mapped into the default aperture that
4044 * the DDR bar exposes, the function will configure the iATU so that the DDR
4045 * bar will be positioned at a base address that allows reading from the
4046 * required address. Configuring the iATU during normal operation can
4047 * lead to undefined behavior and therefore, should be done with extreme care
4050 static int goya_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
4052 struct asic_fixed_properties *prop = &hdev->asic_prop;
4056 if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
4057 *val = RREG32(addr - CFG_BASE);
4059 } else if ((addr >= SRAM_BASE_ADDR) &&
4060 (addr < SRAM_BASE_ADDR + SRAM_SIZE)) {
4062 *val = readl(hdev->pcie_bar[SRAM_CFG_BAR_ID] +
4063 (addr - SRAM_BASE_ADDR));
4065 } else if (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size) {
4067 u64 bar_base_addr = DRAM_PHYS_BASE +
4068 (addr & ~(prop->dram_pci_bar_size - 0x1ull));
4070 ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr);
4071 if (ddr_bar_addr != U64_MAX) {
4072 *val = readl(hdev->pcie_bar[DDR_BAR_ID] +
4073 (addr - bar_base_addr));
4075 ddr_bar_addr = goya_set_ddr_bar_base(hdev,
4078 if (ddr_bar_addr == U64_MAX)
4081 } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
4082 *val = *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE);
4092 * goya_debugfs_write32 - write a 32bit value to a given device or a host mapped
4095 * @hdev: pointer to hl_device structure
4096 * @addr: device or host mapped address
4097 * @val: returned value
4099 * In case of DDR address that is not mapped into the default aperture that
4100 * the DDR bar exposes, the function will configure the iATU so that the DDR
4101 * bar will be positioned at a base address that allows writing to the
4102 * required address. Configuring the iATU during normal operation can
4103 * lead to undefined behavior and therefore, should be done with extreme care
4106 static int goya_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
4108 struct asic_fixed_properties *prop = &hdev->asic_prop;
4112 if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
4113 WREG32(addr - CFG_BASE, val);
4115 } else if ((addr >= SRAM_BASE_ADDR) &&
4116 (addr < SRAM_BASE_ADDR + SRAM_SIZE)) {
4118 writel(val, hdev->pcie_bar[SRAM_CFG_BAR_ID] +
4119 (addr - SRAM_BASE_ADDR));
4121 } else if (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size) {
4123 u64 bar_base_addr = DRAM_PHYS_BASE +
4124 (addr & ~(prop->dram_pci_bar_size - 0x1ull));
4126 ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr);
4127 if (ddr_bar_addr != U64_MAX) {
4128 writel(val, hdev->pcie_bar[DDR_BAR_ID] +
4129 (addr - bar_base_addr));
4131 ddr_bar_addr = goya_set_ddr_bar_base(hdev,
4134 if (ddr_bar_addr == U64_MAX)
4137 } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
4138 *(u32 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
4147 static int goya_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val)
4149 struct asic_fixed_properties *prop = &hdev->asic_prop;
4153 if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
4154 u32 val_l = RREG32(addr - CFG_BASE);
4155 u32 val_h = RREG32(addr + sizeof(u32) - CFG_BASE);
4157 *val = (((u64) val_h) << 32) | val_l;
4159 } else if ((addr >= SRAM_BASE_ADDR) &&
4160 (addr <= SRAM_BASE_ADDR + SRAM_SIZE - sizeof(u64))) {
4162 *val = readq(hdev->pcie_bar[SRAM_CFG_BAR_ID] +
4163 (addr - SRAM_BASE_ADDR));
4166 DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64)) {
4168 u64 bar_base_addr = DRAM_PHYS_BASE +
4169 (addr & ~(prop->dram_pci_bar_size - 0x1ull));
4171 ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr);
4172 if (ddr_bar_addr != U64_MAX) {
4173 *val = readq(hdev->pcie_bar[DDR_BAR_ID] +
4174 (addr - bar_base_addr));
4176 ddr_bar_addr = goya_set_ddr_bar_base(hdev,
4179 if (ddr_bar_addr == U64_MAX)
4182 } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
4183 *val = *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE);
4192 static int goya_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val)
4194 struct asic_fixed_properties *prop = &hdev->asic_prop;
4198 if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
4199 WREG32(addr - CFG_BASE, lower_32_bits(val));
4200 WREG32(addr + sizeof(u32) - CFG_BASE, upper_32_bits(val));
4202 } else if ((addr >= SRAM_BASE_ADDR) &&
4203 (addr <= SRAM_BASE_ADDR + SRAM_SIZE - sizeof(u64))) {
4205 writeq(val, hdev->pcie_bar[SRAM_CFG_BAR_ID] +
4206 (addr - SRAM_BASE_ADDR));
4209 DRAM_PHYS_BASE + hdev->asic_prop.dram_size - sizeof(u64)) {
4211 u64 bar_base_addr = DRAM_PHYS_BASE +
4212 (addr & ~(prop->dram_pci_bar_size - 0x1ull));
4214 ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr);
4215 if (ddr_bar_addr != U64_MAX) {
4216 writeq(val, hdev->pcie_bar[DDR_BAR_ID] +
4217 (addr - bar_base_addr));
4219 ddr_bar_addr = goya_set_ddr_bar_base(hdev,
4222 if (ddr_bar_addr == U64_MAX)
4225 } else if (addr >= HOST_PHYS_BASE && !iommu_present(&pci_bus_type)) {
4226 *(u64 *) phys_to_virt(addr - HOST_PHYS_BASE) = val;
4235 static u64 goya_read_pte(struct hl_device *hdev, u64 addr)
4237 struct goya_device *goya = hdev->asic_specific;
4239 if (hdev->hard_reset_pending)
4242 return readq(hdev->pcie_bar[DDR_BAR_ID] +
4243 (addr - goya->ddr_bar_cur_addr));
4246 static void goya_write_pte(struct hl_device *hdev, u64 addr, u64 val)
4248 struct goya_device *goya = hdev->asic_specific;
4250 if (hdev->hard_reset_pending)
4253 writeq(val, hdev->pcie_bar[DDR_BAR_ID] +
4254 (addr - goya->ddr_bar_cur_addr));
4257 static const char *_goya_get_event_desc(u16 event_type)
4259 switch (event_type) {
4260 case GOYA_ASYNC_EVENT_ID_PCIE_IF:
4262 case GOYA_ASYNC_EVENT_ID_TPC0_ECC:
4263 case GOYA_ASYNC_EVENT_ID_TPC1_ECC:
4264 case GOYA_ASYNC_EVENT_ID_TPC2_ECC:
4265 case GOYA_ASYNC_EVENT_ID_TPC3_ECC:
4266 case GOYA_ASYNC_EVENT_ID_TPC4_ECC:
4267 case GOYA_ASYNC_EVENT_ID_TPC5_ECC:
4268 case GOYA_ASYNC_EVENT_ID_TPC6_ECC:
4269 case GOYA_ASYNC_EVENT_ID_TPC7_ECC:
4271 case GOYA_ASYNC_EVENT_ID_MME_ECC:
4273 case GOYA_ASYNC_EVENT_ID_MME_ECC_EXT:
4274 return "MME_ecc_ext";
4275 case GOYA_ASYNC_EVENT_ID_MMU_ECC:
4277 case GOYA_ASYNC_EVENT_ID_DMA_MACRO:
4279 case GOYA_ASYNC_EVENT_ID_DMA_ECC:
4281 case GOYA_ASYNC_EVENT_ID_CPU_IF_ECC:
4282 return "CPU_if_ecc";
4283 case GOYA_ASYNC_EVENT_ID_PSOC_MEM:
4285 case GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT:
4286 return "PSOC_coresight";
4287 case GOYA_ASYNC_EVENT_ID_SRAM0 ... GOYA_ASYNC_EVENT_ID_SRAM29:
4289 case GOYA_ASYNC_EVENT_ID_GIC500:
4291 case GOYA_ASYNC_EVENT_ID_PLL0 ... GOYA_ASYNC_EVENT_ID_PLL6:
4293 case GOYA_ASYNC_EVENT_ID_AXI_ECC:
4295 case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC:
4296 return "L2_ram_ecc";
4297 case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET:
4298 return "PSOC_gpio_05_sw_reset";
4299 case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT:
4300 return "PSOC_gpio_10_vrhot_icrit";
4301 case GOYA_ASYNC_EVENT_ID_PCIE_DEC:
4303 case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
4304 case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
4305 case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
4306 case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
4307 case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
4308 case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
4309 case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
4310 case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
4312 case GOYA_ASYNC_EVENT_ID_MME_WACS:
4314 case GOYA_ASYNC_EVENT_ID_MME_WACSD:
4316 case GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER:
4317 return "CPU_axi_splitter";
4318 case GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC:
4319 return "PSOC_axi_dec";
4320 case GOYA_ASYNC_EVENT_ID_PSOC:
4322 case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
4323 case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
4324 case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
4325 case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
4326 case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
4327 case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
4328 case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
4329 case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
4330 return "TPC%d_krn_err";
4331 case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_CMDQ:
4333 case GOYA_ASYNC_EVENT_ID_TPC0_QM ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
4335 case GOYA_ASYNC_EVENT_ID_MME_QM:
4337 case GOYA_ASYNC_EVENT_ID_MME_CMDQ:
4339 case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
4341 case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
4343 case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU:
4344 case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU:
4345 case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU:
4346 case GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU:
4347 case GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU:
4348 case GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU:
4349 case GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU:
4350 case GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU:
4351 return "TPC%d_bmon_spmu";
4352 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0 ... GOYA_ASYNC_EVENT_ID_DMA_BM_CH4:
4353 return "DMA_bm_ch%d";
4354 case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S:
4355 return "POWER_ENV_S";
4356 case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E:
4357 return "POWER_ENV_E";
4358 case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S:
4359 return "THERMAL_ENV_S";
4360 case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E:
4361 return "THERMAL_ENV_E";
4367 static void goya_get_event_desc(u16 event_type, char *desc, size_t size)
4371 switch (event_type) {
4372 case GOYA_ASYNC_EVENT_ID_TPC0_ECC:
4373 case GOYA_ASYNC_EVENT_ID_TPC1_ECC:
4374 case GOYA_ASYNC_EVENT_ID_TPC2_ECC:
4375 case GOYA_ASYNC_EVENT_ID_TPC3_ECC:
4376 case GOYA_ASYNC_EVENT_ID_TPC4_ECC:
4377 case GOYA_ASYNC_EVENT_ID_TPC5_ECC:
4378 case GOYA_ASYNC_EVENT_ID_TPC6_ECC:
4379 case GOYA_ASYNC_EVENT_ID_TPC7_ECC:
4380 index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_ECC) / 3;
4381 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4383 case GOYA_ASYNC_EVENT_ID_SRAM0 ... GOYA_ASYNC_EVENT_ID_SRAM29:
4384 index = event_type - GOYA_ASYNC_EVENT_ID_SRAM0;
4385 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4387 case GOYA_ASYNC_EVENT_ID_PLL0 ... GOYA_ASYNC_EVENT_ID_PLL6:
4388 index = event_type - GOYA_ASYNC_EVENT_ID_PLL0;
4389 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4391 case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
4392 case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
4393 case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
4394 case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
4395 case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
4396 case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
4397 case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
4398 case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
4399 index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_DEC) / 3;
4400 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4402 case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
4403 case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
4404 case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
4405 case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
4406 case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
4407 case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
4408 case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
4409 case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
4410 index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR) / 10;
4411 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4413 case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_CMDQ:
4414 index = event_type - GOYA_ASYNC_EVENT_ID_TPC0_CMDQ;
4415 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4417 case GOYA_ASYNC_EVENT_ID_TPC0_QM ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
4418 index = event_type - GOYA_ASYNC_EVENT_ID_TPC0_QM;
4419 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4421 case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
4422 index = event_type - GOYA_ASYNC_EVENT_ID_DMA0_QM;
4423 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4425 case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
4426 index = event_type - GOYA_ASYNC_EVENT_ID_DMA0_CH;
4427 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4429 case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU:
4430 case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU:
4431 case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU:
4432 case GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU:
4433 case GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU:
4434 case GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU:
4435 case GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU:
4436 case GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU:
4437 index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU) / 10;
4438 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4440 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0 ... GOYA_ASYNC_EVENT_ID_DMA_BM_CH4:
4441 index = event_type - GOYA_ASYNC_EVENT_ID_DMA_BM_CH0;
4442 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4445 snprintf(desc, size, _goya_get_event_desc(event_type));
4450 static void goya_print_razwi_info(struct hl_device *hdev)
4452 if (RREG32(mmDMA_MACRO_RAZWI_LBW_WT_VLD)) {
4453 dev_err_ratelimited(hdev->dev, "Illegal write to LBW\n");
4454 WREG32(mmDMA_MACRO_RAZWI_LBW_WT_VLD, 0);
4457 if (RREG32(mmDMA_MACRO_RAZWI_LBW_RD_VLD)) {
4458 dev_err_ratelimited(hdev->dev, "Illegal read from LBW\n");
4459 WREG32(mmDMA_MACRO_RAZWI_LBW_RD_VLD, 0);
4462 if (RREG32(mmDMA_MACRO_RAZWI_HBW_WT_VLD)) {
4463 dev_err_ratelimited(hdev->dev, "Illegal write to HBW\n");
4464 WREG32(mmDMA_MACRO_RAZWI_HBW_WT_VLD, 0);
4467 if (RREG32(mmDMA_MACRO_RAZWI_HBW_RD_VLD)) {
4468 dev_err_ratelimited(hdev->dev, "Illegal read from HBW\n");
4469 WREG32(mmDMA_MACRO_RAZWI_HBW_RD_VLD, 0);
4473 static void goya_print_mmu_error_info(struct hl_device *hdev)
4475 struct goya_device *goya = hdev->asic_specific;
4479 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4482 val = RREG32(mmMMU_PAGE_ERROR_CAPTURE);
4483 if (val & MMU_PAGE_ERROR_CAPTURE_ENTRY_VALID_MASK) {
4484 addr = val & MMU_PAGE_ERROR_CAPTURE_VA_49_32_MASK;
4486 addr |= RREG32(mmMMU_PAGE_ERROR_CAPTURE_VA);
4488 dev_err_ratelimited(hdev->dev, "MMU page fault on va 0x%llx\n",
4491 WREG32(mmMMU_PAGE_ERROR_CAPTURE, 0);
4495 static void goya_print_irq_info(struct hl_device *hdev, u16 event_type,
4500 goya_get_event_desc(event_type, desc, sizeof(desc));
4501 dev_err_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
4505 goya_print_razwi_info(hdev);
4506 goya_print_mmu_error_info(hdev);
4510 static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
4511 size_t irq_arr_size)
4513 struct cpucp_unmask_irq_arr_packet *pkt;
4514 size_t total_pkt_size;
4517 int irq_num_entries, irq_arr_index;
4518 __le32 *goya_irq_arr;
4520 total_pkt_size = sizeof(struct cpucp_unmask_irq_arr_packet) +
4523 /* data should be aligned to 8 bytes in order to CPU-CP to copy it */
4524 total_pkt_size = (total_pkt_size + 0x7) & ~0x7;
4526 /* total_pkt_size is casted to u16 later on */
4527 if (total_pkt_size > USHRT_MAX) {
4528 dev_err(hdev->dev, "too many elements in IRQ array\n");
4532 pkt = kzalloc(total_pkt_size, GFP_KERNEL);
4536 irq_num_entries = irq_arr_size / sizeof(irq_arr[0]);
4537 pkt->length = cpu_to_le32(irq_num_entries);
4539 /* We must perform any necessary endianness conversation on the irq
4540 * array being passed to the goya hardware
4542 for (irq_arr_index = 0, goya_irq_arr = (__le32 *) &pkt->irqs;
4543 irq_arr_index < irq_num_entries ; irq_arr_index++)
4544 goya_irq_arr[irq_arr_index] =
4545 cpu_to_le32(irq_arr[irq_arr_index]);
4547 pkt->cpucp_pkt.ctl = cpu_to_le32(CPUCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
4548 CPUCP_PKT_CTL_OPCODE_SHIFT);
4550 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt,
4551 total_pkt_size, 0, &result);
4554 dev_err(hdev->dev, "failed to unmask IRQ array\n");
4561 static int goya_soft_reset_late_init(struct hl_device *hdev)
4564 * Unmask all IRQs since some could have been received
4565 * during the soft reset
4567 return goya_unmask_irq_arr(hdev, goya_all_events,
4568 sizeof(goya_all_events));
4571 static int goya_unmask_irq(struct hl_device *hdev, u16 event_type)
4573 struct cpucp_packet pkt;
4577 memset(&pkt, 0, sizeof(pkt));
4579 pkt.ctl = cpu_to_le32(CPUCP_PACKET_UNMASK_RAZWI_IRQ <<
4580 CPUCP_PKT_CTL_OPCODE_SHIFT);
4581 pkt.value = cpu_to_le64(event_type);
4583 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
4587 dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type);
4592 static void goya_print_clk_change_info(struct hl_device *hdev, u16 event_type)
4594 switch (event_type) {
4595 case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S:
4596 hdev->clk_throttling_reason |= HL_CLK_THROTTLE_POWER;
4597 dev_info_ratelimited(hdev->dev,
4598 "Clock throttling due to power consumption\n");
4600 case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E:
4601 hdev->clk_throttling_reason &= ~HL_CLK_THROTTLE_POWER;
4602 dev_info_ratelimited(hdev->dev,
4603 "Power envelop is safe, back to optimal clock\n");
4605 case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S:
4606 hdev->clk_throttling_reason |= HL_CLK_THROTTLE_THERMAL;
4607 dev_info_ratelimited(hdev->dev,
4608 "Clock throttling due to overheating\n");
4610 case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E:
4611 hdev->clk_throttling_reason &= ~HL_CLK_THROTTLE_THERMAL;
4612 dev_info_ratelimited(hdev->dev,
4613 "Thermal envelop is safe, back to optimal clock\n");
4617 dev_err(hdev->dev, "Received invalid clock change event %d\n",
4623 void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
4625 u32 ctl = le32_to_cpu(eq_entry->hdr.ctl);
4626 u16 event_type = ((ctl & EQ_CTL_EVENT_TYPE_MASK)
4627 >> EQ_CTL_EVENT_TYPE_SHIFT);
4628 struct goya_device *goya = hdev->asic_specific;
4630 goya->events_stat[event_type]++;
4631 goya->events_stat_aggregate[event_type]++;
4633 switch (event_type) {
4634 case GOYA_ASYNC_EVENT_ID_PCIE_IF:
4635 case GOYA_ASYNC_EVENT_ID_TPC0_ECC:
4636 case GOYA_ASYNC_EVENT_ID_TPC1_ECC:
4637 case GOYA_ASYNC_EVENT_ID_TPC2_ECC:
4638 case GOYA_ASYNC_EVENT_ID_TPC3_ECC:
4639 case GOYA_ASYNC_EVENT_ID_TPC4_ECC:
4640 case GOYA_ASYNC_EVENT_ID_TPC5_ECC:
4641 case GOYA_ASYNC_EVENT_ID_TPC6_ECC:
4642 case GOYA_ASYNC_EVENT_ID_TPC7_ECC:
4643 case GOYA_ASYNC_EVENT_ID_MME_ECC:
4644 case GOYA_ASYNC_EVENT_ID_MME_ECC_EXT:
4645 case GOYA_ASYNC_EVENT_ID_MMU_ECC:
4646 case GOYA_ASYNC_EVENT_ID_DMA_MACRO:
4647 case GOYA_ASYNC_EVENT_ID_DMA_ECC:
4648 case GOYA_ASYNC_EVENT_ID_CPU_IF_ECC:
4649 case GOYA_ASYNC_EVENT_ID_PSOC_MEM:
4650 case GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT:
4651 case GOYA_ASYNC_EVENT_ID_SRAM0 ... GOYA_ASYNC_EVENT_ID_SRAM29:
4652 case GOYA_ASYNC_EVENT_ID_GIC500:
4653 case GOYA_ASYNC_EVENT_ID_PLL0 ... GOYA_ASYNC_EVENT_ID_PLL6:
4654 case GOYA_ASYNC_EVENT_ID_AXI_ECC:
4655 case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC:
4656 case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET:
4657 goya_print_irq_info(hdev, event_type, false);
4658 if (hdev->hard_reset_on_fw_events)
4659 hl_device_reset(hdev, true, false);
4662 case GOYA_ASYNC_EVENT_ID_PCIE_DEC:
4663 case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
4664 case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
4665 case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
4666 case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
4667 case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
4668 case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
4669 case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
4670 case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
4671 case GOYA_ASYNC_EVENT_ID_MME_WACS:
4672 case GOYA_ASYNC_EVENT_ID_MME_WACSD:
4673 case GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER:
4674 case GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC:
4675 case GOYA_ASYNC_EVENT_ID_PSOC:
4676 case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
4677 case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
4678 case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
4679 case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
4680 case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
4681 case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
4682 case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
4683 case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
4684 case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
4685 case GOYA_ASYNC_EVENT_ID_MME_QM:
4686 case GOYA_ASYNC_EVENT_ID_MME_CMDQ:
4687 case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
4688 case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
4689 goya_print_irq_info(hdev, event_type, true);
4690 goya_unmask_irq(hdev, event_type);
4693 case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT:
4694 case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU:
4695 case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU:
4696 case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU:
4697 case GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU:
4698 case GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU:
4699 case GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU:
4700 case GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU:
4701 case GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU:
4702 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0 ... GOYA_ASYNC_EVENT_ID_DMA_BM_CH4:
4703 goya_print_irq_info(hdev, event_type, false);
4704 goya_unmask_irq(hdev, event_type);
4707 case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S:
4708 case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E:
4709 case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S:
4710 case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E:
4711 goya_print_clk_change_info(hdev, event_type);
4712 goya_unmask_irq(hdev, event_type);
4716 dev_err(hdev->dev, "Received invalid H/W interrupt %d\n",
4722 void *goya_get_events_stat(struct hl_device *hdev, bool aggregate, u32 *size)
4724 struct goya_device *goya = hdev->asic_specific;
4727 *size = (u32) sizeof(goya->events_stat_aggregate);
4728 return goya->events_stat_aggregate;
4731 *size = (u32) sizeof(goya->events_stat);
4732 return goya->events_stat;
4735 static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size,
4736 u64 val, bool is_dram)
4738 struct packet_lin_dma *lin_dma_pkt;
4739 struct hl_cs_job *job;
4742 int rc, lin_dma_pkts_cnt;
4744 lin_dma_pkts_cnt = DIV_ROUND_UP_ULL(size, SZ_2G);
4745 cb_size = lin_dma_pkts_cnt * sizeof(struct packet_lin_dma) +
4746 sizeof(struct packet_msg_prot);
4747 cb = hl_cb_kernel_create(hdev, cb_size, false);
4751 lin_dma_pkt = cb->kernel_address;
4754 memset(lin_dma_pkt, 0, sizeof(*lin_dma_pkt));
4756 ctl = ((PACKET_LIN_DMA << GOYA_PKT_CTL_OPCODE_SHIFT) |
4757 (1 << GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT) |
4758 (1 << GOYA_PKT_LIN_DMA_CTL_WO_SHIFT) |
4759 (1 << GOYA_PKT_CTL_RB_SHIFT) |
4760 (1 << GOYA_PKT_CTL_MB_SHIFT));
4761 ctl |= (is_dram ? DMA_HOST_TO_DRAM : DMA_HOST_TO_SRAM) <<
4762 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
4763 lin_dma_pkt->ctl = cpu_to_le32(ctl);
4765 lin_dma_pkt->src_addr = cpu_to_le64(val);
4766 lin_dma_pkt->dst_addr = cpu_to_le64(addr);
4767 if (lin_dma_pkts_cnt > 1)
4768 lin_dma_pkt->tsize = cpu_to_le32(SZ_2G);
4770 lin_dma_pkt->tsize = cpu_to_le32(size);
4775 } while (--lin_dma_pkts_cnt);
4777 job = hl_cs_allocate_job(hdev, QUEUE_TYPE_EXT, true);
4779 dev_err(hdev->dev, "Failed to allocate a new job\n");
4786 job->user_cb->cs_cnt++;
4787 job->user_cb_size = cb_size;
4788 job->hw_queue_id = GOYA_QUEUE_ID_DMA_0;
4789 job->patched_cb = job->user_cb;
4790 job->job_cb_size = job->user_cb_size;
4792 hl_debugfs_add_job(hdev, job);
4794 rc = goya_send_job_on_qman0(hdev, job);
4796 hl_debugfs_remove_job(hdev, job);
4802 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
4807 int goya_context_switch(struct hl_device *hdev, u32 asid)
4809 struct asic_fixed_properties *prop = &hdev->asic_prop;
4810 u64 addr = prop->sram_base_address, sob_addr;
4811 u32 size = hdev->pldm ? 0x10000 : prop->sram_size;
4812 u64 val = 0x7777777777777777ull;
4814 u32 channel_off = mmDMA_CH_1_WR_COMP_ADDR_LO -
4815 mmDMA_CH_0_WR_COMP_ADDR_LO;
4817 rc = goya_memset_device_memory(hdev, addr, size, val, false);
4819 dev_err(hdev->dev, "Failed to clear SRAM in context switch\n");
4823 /* we need to reset registers that the user is allowed to change */
4824 sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1007;
4825 WREG32(mmDMA_CH_0_WR_COMP_ADDR_LO, lower_32_bits(sob_addr));
4827 for (dma_id = 1 ; dma_id < NUMBER_OF_EXT_HW_QUEUES ; dma_id++) {
4828 sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1000 +
4830 WREG32(mmDMA_CH_0_WR_COMP_ADDR_LO + channel_off * dma_id,
4831 lower_32_bits(sob_addr));
4834 WREG32(mmTPC_PLL_CLK_RLX_0, 0x200020);
4836 goya_mmu_prepare(hdev, asid);
4838 goya_clear_sm_regs(hdev);
4843 static int goya_mmu_clear_pgt_range(struct hl_device *hdev)
4845 struct asic_fixed_properties *prop = &hdev->asic_prop;
4846 struct goya_device *goya = hdev->asic_specific;
4847 u64 addr = prop->mmu_pgt_addr;
4848 u32 size = prop->mmu_pgt_size + MMU_DRAM_DEFAULT_PAGE_SIZE +
4851 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4854 return goya_memset_device_memory(hdev, addr, size, 0, true);
4857 static int goya_mmu_set_dram_default_page(struct hl_device *hdev)
4859 struct goya_device *goya = hdev->asic_specific;
4860 u64 addr = hdev->asic_prop.mmu_dram_default_page_addr;
4861 u32 size = MMU_DRAM_DEFAULT_PAGE_SIZE;
4862 u64 val = 0x9999999999999999ull;
4864 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4867 return goya_memset_device_memory(hdev, addr, size, val, true);
4870 static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev)
4872 struct asic_fixed_properties *prop = &hdev->asic_prop;
4873 struct goya_device *goya = hdev->asic_specific;
4877 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4880 for (off = 0 ; off < CPU_FW_IMAGE_SIZE ; off += PAGE_SIZE_2MB) {
4881 rc = hl_mmu_map(hdev->kernel_ctx, prop->dram_base_address + off,
4882 prop->dram_base_address + off, PAGE_SIZE_2MB,
4883 (off + PAGE_SIZE_2MB) == CPU_FW_IMAGE_SIZE);
4885 dev_err(hdev->dev, "Map failed for address 0x%llx\n",
4886 prop->dram_base_address + off);
4891 if (!(hdev->cpu_accessible_dma_address & (PAGE_SIZE_2MB - 1))) {
4892 rc = hl_mmu_map(hdev->kernel_ctx, VA_CPU_ACCESSIBLE_MEM_ADDR,
4893 hdev->cpu_accessible_dma_address, PAGE_SIZE_2MB, true);
4897 "Map failed for CPU accessible memory\n");
4898 off -= PAGE_SIZE_2MB;
4902 for (cpu_off = 0 ; cpu_off < SZ_2M ; cpu_off += PAGE_SIZE_4KB) {
4903 rc = hl_mmu_map(hdev->kernel_ctx,
4904 VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off,
4905 hdev->cpu_accessible_dma_address + cpu_off,
4906 PAGE_SIZE_4KB, true);
4909 "Map failed for CPU accessible memory\n");
4910 cpu_off -= PAGE_SIZE_4KB;
4916 goya_mmu_prepare_reg(hdev, mmCPU_IF_ARUSER_OVR, HL_KERNEL_ASID_ID);
4917 goya_mmu_prepare_reg(hdev, mmCPU_IF_AWUSER_OVR, HL_KERNEL_ASID_ID);
4918 WREG32(mmCPU_IF_ARUSER_OVR_EN, 0x7FF);
4919 WREG32(mmCPU_IF_AWUSER_OVR_EN, 0x7FF);
4921 /* Make sure configuration is flushed to device */
4922 RREG32(mmCPU_IF_AWUSER_OVR_EN);
4924 goya->device_cpu_mmu_mappings_done = true;
4929 for (; cpu_off >= 0 ; cpu_off -= PAGE_SIZE_4KB)
4930 if (hl_mmu_unmap(hdev->kernel_ctx,
4931 VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off,
4932 PAGE_SIZE_4KB, true))
4933 dev_warn_ratelimited(hdev->dev,
4934 "failed to unmap address 0x%llx\n",
4935 VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off);
4937 for (; off >= 0 ; off -= PAGE_SIZE_2MB)
4938 if (hl_mmu_unmap(hdev->kernel_ctx,
4939 prop->dram_base_address + off, PAGE_SIZE_2MB,
4941 dev_warn_ratelimited(hdev->dev,
4942 "failed to unmap address 0x%llx\n",
4943 prop->dram_base_address + off);
4948 void goya_mmu_remove_device_cpu_mappings(struct hl_device *hdev)
4950 struct asic_fixed_properties *prop = &hdev->asic_prop;
4951 struct goya_device *goya = hdev->asic_specific;
4954 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4957 if (!goya->device_cpu_mmu_mappings_done)
4960 WREG32(mmCPU_IF_ARUSER_OVR_EN, 0);
4961 WREG32(mmCPU_IF_AWUSER_OVR_EN, 0);
4963 if (!(hdev->cpu_accessible_dma_address & (PAGE_SIZE_2MB - 1))) {
4964 if (hl_mmu_unmap(hdev->kernel_ctx, VA_CPU_ACCESSIBLE_MEM_ADDR,
4965 PAGE_SIZE_2MB, true))
4967 "Failed to unmap CPU accessible memory\n");
4969 for (cpu_off = 0 ; cpu_off < SZ_2M ; cpu_off += PAGE_SIZE_4KB)
4970 if (hl_mmu_unmap(hdev->kernel_ctx,
4971 VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off,
4973 (cpu_off + PAGE_SIZE_4KB) >= SZ_2M))
4974 dev_warn_ratelimited(hdev->dev,
4975 "failed to unmap address 0x%llx\n",
4976 VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off);
4979 for (off = 0 ; off < CPU_FW_IMAGE_SIZE ; off += PAGE_SIZE_2MB)
4980 if (hl_mmu_unmap(hdev->kernel_ctx,
4981 prop->dram_base_address + off, PAGE_SIZE_2MB,
4982 (off + PAGE_SIZE_2MB) >= CPU_FW_IMAGE_SIZE))
4983 dev_warn_ratelimited(hdev->dev,
4984 "Failed to unmap address 0x%llx\n",
4985 prop->dram_base_address + off);
4987 goya->device_cpu_mmu_mappings_done = false;
4990 static void goya_mmu_prepare(struct hl_device *hdev, u32 asid)
4992 struct goya_device *goya = hdev->asic_specific;
4995 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4998 if (asid & ~MME_QM_GLBL_SECURE_PROPS_ASID_MASK) {
4999 WARN(1, "asid %u is too big\n", asid);
5003 /* zero the MMBP and ASID bits and then set the ASID */
5004 for (i = 0 ; i < GOYA_MMU_REGS_NUM ; i++)
5005 goya_mmu_prepare_reg(hdev, goya_mmu_regs[i], asid);
5008 static int goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
5011 struct goya_device *goya = hdev->asic_specific;
5012 u32 status, timeout_usec;
5015 if (!(goya->hw_cap_initialized & HW_CAP_MMU) ||
5016 hdev->hard_reset_pending)
5019 /* no need in L1 only invalidation in Goya */
5024 timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
5026 timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
5028 mutex_lock(&hdev->mmu_cache_lock);
5030 /* L0 & L1 invalidation */
5031 WREG32(mmSTLB_INV_ALL_START, 1);
5033 rc = hl_poll_timeout(
5035 mmSTLB_INV_ALL_START,
5041 mutex_unlock(&hdev->mmu_cache_lock);
5044 dev_err_ratelimited(hdev->dev,
5045 "MMU cache invalidation timeout\n");
5046 hl_device_reset(hdev, true, false);
5052 static int goya_mmu_invalidate_cache_range(struct hl_device *hdev,
5053 bool is_hard, u32 asid, u64 va, u64 size)
5055 struct goya_device *goya = hdev->asic_specific;
5056 u32 status, timeout_usec, inv_data, pi;
5059 if (!(goya->hw_cap_initialized & HW_CAP_MMU) ||
5060 hdev->hard_reset_pending)
5063 /* no need in L1 only invalidation in Goya */
5068 timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
5070 timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
5072 mutex_lock(&hdev->mmu_cache_lock);
5075 * TODO: currently invalidate entire L0 & L1 as in regular hard
5076 * invalidation. Need to apply invalidation of specific cache lines with
5077 * mask of ASID & VA & size.
5078 * Note that L1 with be flushed entirely in any case.
5081 /* L0 & L1 invalidation */
5082 inv_data = RREG32(mmSTLB_CACHE_INV);
5084 pi = ((inv_data & STLB_CACHE_INV_PRODUCER_INDEX_MASK) + 1) & 0xFF;
5085 WREG32(mmSTLB_CACHE_INV,
5086 (inv_data & STLB_CACHE_INV_INDEX_MASK_MASK) | pi);
5088 rc = hl_poll_timeout(
5090 mmSTLB_INV_CONSUMER_INDEX,
5096 mutex_unlock(&hdev->mmu_cache_lock);
5099 dev_err_ratelimited(hdev->dev,
5100 "MMU cache invalidation timeout\n");
5101 hl_device_reset(hdev, true, false);
5107 int goya_send_heartbeat(struct hl_device *hdev)
5109 struct goya_device *goya = hdev->asic_specific;
5111 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
5114 return hl_fw_send_heartbeat(hdev);
5117 int goya_cpucp_info_get(struct hl_device *hdev)
5119 struct goya_device *goya = hdev->asic_specific;
5120 struct asic_fixed_properties *prop = &hdev->asic_prop;
5124 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
5127 rc = hl_fw_cpucp_info_get(hdev);
5131 dram_size = le64_to_cpu(prop->cpucp_info.dram_size);
5133 if ((!is_power_of_2(dram_size)) ||
5134 (dram_size < DRAM_PHYS_DEFAULT_SIZE)) {
5136 "F/W reported invalid DRAM size %llu. Trying to use default size\n",
5138 dram_size = DRAM_PHYS_DEFAULT_SIZE;
5141 prop->dram_size = dram_size;
5142 prop->dram_end_address = prop->dram_base_address + dram_size;
5145 if (!strlen(prop->cpucp_info.card_name))
5146 strncpy(prop->cpucp_info.card_name, GOYA_DEFAULT_CARD_NAME,
5152 static void goya_set_clock_gating(struct hl_device *hdev)
5154 /* clock gating not supported in Goya */
5157 static void goya_disable_clock_gating(struct hl_device *hdev)
5159 /* clock gating not supported in Goya */
5162 static bool goya_is_device_idle(struct hl_device *hdev, u64 *mask,
5165 const char *fmt = "%-5d%-9s%#-14x%#-16x%#x\n";
5166 const char *dma_fmt = "%-5d%-9s%#-14x%#x\n";
5167 u32 qm_glbl_sts0, cmdq_glbl_sts0, dma_core_sts0, tpc_cfg_sts,
5169 bool is_idle = true, is_eng_idle;
5174 seq_puts(s, "\nDMA is_idle QM_GLBL_STS0 DMA_CORE_STS0\n"
5175 "--- ------- ------------ -------------\n");
5177 offset = mmDMA_QM_1_GLBL_STS0 - mmDMA_QM_0_GLBL_STS0;
5179 for (i = 0 ; i < DMA_MAX_NUM ; i++) {
5180 qm_glbl_sts0 = RREG32(mmDMA_QM_0_GLBL_STS0 + i * offset);
5181 dma_core_sts0 = RREG32(mmDMA_CH_0_STS0 + i * offset);
5182 is_eng_idle = IS_DMA_QM_IDLE(qm_glbl_sts0) &&
5183 IS_DMA_IDLE(dma_core_sts0);
5184 is_idle &= is_eng_idle;
5187 *mask |= ((u64) !is_eng_idle) <<
5188 (GOYA_ENGINE_ID_DMA_0 + i);
5190 seq_printf(s, dma_fmt, i, is_eng_idle ? "Y" : "N",
5191 qm_glbl_sts0, dma_core_sts0);
5196 "\nTPC is_idle QM_GLBL_STS0 CMDQ_GLBL_STS0 CFG_STATUS\n"
5197 "--- ------- ------------ -------------- ----------\n");
5199 offset = mmTPC1_QM_GLBL_STS0 - mmTPC0_QM_GLBL_STS0;
5201 for (i = 0 ; i < TPC_MAX_NUM ; i++) {
5202 qm_glbl_sts0 = RREG32(mmTPC0_QM_GLBL_STS0 + i * offset);
5203 cmdq_glbl_sts0 = RREG32(mmTPC0_CMDQ_GLBL_STS0 + i * offset);
5204 tpc_cfg_sts = RREG32(mmTPC0_CFG_STATUS + i * offset);
5205 is_eng_idle = IS_TPC_QM_IDLE(qm_glbl_sts0) &&
5206 IS_TPC_CMDQ_IDLE(cmdq_glbl_sts0) &&
5207 IS_TPC_IDLE(tpc_cfg_sts);
5208 is_idle &= is_eng_idle;
5211 *mask |= ((u64) !is_eng_idle) <<
5212 (GOYA_ENGINE_ID_TPC_0 + i);
5214 seq_printf(s, fmt, i, is_eng_idle ? "Y" : "N",
5215 qm_glbl_sts0, cmdq_glbl_sts0, tpc_cfg_sts);
5220 "\nMME is_idle QM_GLBL_STS0 CMDQ_GLBL_STS0 ARCH_STATUS\n"
5221 "--- ------- ------------ -------------- -----------\n");
5223 qm_glbl_sts0 = RREG32(mmMME_QM_GLBL_STS0);
5224 cmdq_glbl_sts0 = RREG32(mmMME_CMDQ_GLBL_STS0);
5225 mme_arch_sts = RREG32(mmMME_ARCH_STATUS);
5226 is_eng_idle = IS_MME_QM_IDLE(qm_glbl_sts0) &&
5227 IS_MME_CMDQ_IDLE(cmdq_glbl_sts0) &&
5228 IS_MME_IDLE(mme_arch_sts);
5229 is_idle &= is_eng_idle;
5232 *mask |= ((u64) !is_eng_idle) << GOYA_ENGINE_ID_MME_0;
5234 seq_printf(s, fmt, 0, is_eng_idle ? "Y" : "N", qm_glbl_sts0,
5235 cmdq_glbl_sts0, mme_arch_sts);
5242 static void goya_hw_queues_lock(struct hl_device *hdev)
5243 __acquires(&goya->hw_queues_lock)
5245 struct goya_device *goya = hdev->asic_specific;
5247 spin_lock(&goya->hw_queues_lock);
5250 static void goya_hw_queues_unlock(struct hl_device *hdev)
5251 __releases(&goya->hw_queues_lock)
5253 struct goya_device *goya = hdev->asic_specific;
5255 spin_unlock(&goya->hw_queues_lock);
5258 static u32 goya_get_pci_id(struct hl_device *hdev)
5260 return hdev->pdev->device;
5263 static int goya_get_eeprom_data(struct hl_device *hdev, void *data,
5266 struct goya_device *goya = hdev->asic_specific;
5268 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
5271 return hl_fw_get_eeprom_data(hdev, data, max_size);
5274 static enum hl_device_hw_state goya_get_hw_state(struct hl_device *hdev)
5276 return RREG32(mmHW_STATE);
5279 static int goya_ctx_init(struct hl_ctx *ctx)
5284 u32 goya_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx)
5289 static u32 goya_get_signal_cb_size(struct hl_device *hdev)
5294 static u32 goya_get_wait_cb_size(struct hl_device *hdev)
5299 static u32 goya_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
5305 static u32 goya_gen_wait_cb(struct hl_device *hdev,
5306 struct hl_gen_wait_properties *prop)
5311 static void goya_reset_sob(struct hl_device *hdev, void *data)
5316 void goya_reset_sob_group(struct hl_device *hdev, u16 sob_group)
5321 static void goya_set_dma_mask_from_fw(struct hl_device *hdev)
5323 if (RREG32(mmPSOC_GLOBAL_CONF_NON_RST_FLOPS_0) ==
5324 HL_POWER9_HOST_MAGIC) {
5325 dev_dbg(hdev->dev, "Working in 64-bit DMA mode\n");
5326 hdev->power9_64bit_dma_enable = 1;
5327 hdev->dma_mask = 64;
5329 dev_dbg(hdev->dev, "Working in 48-bit DMA mode\n");
5330 hdev->power9_64bit_dma_enable = 0;
5331 hdev->dma_mask = 48;
5335 u64 goya_get_device_time(struct hl_device *hdev)
5337 u64 device_time = ((u64) RREG32(mmPSOC_TIMESTAMP_CNTCVU)) << 32;
5339 return device_time | RREG32(mmPSOC_TIMESTAMP_CNTCVL);
5342 void goya_collective_wait_init_cs(struct hl_cs *cs)
5347 int goya_collective_wait_create_jobs(struct hl_device *hdev,
5348 struct hl_ctx *ctx, struct hl_cs *cs, u32 wait_queue_id,
5349 u32 collective_engine_id)
5354 static void goya_ctx_fini(struct hl_ctx *ctx)
5359 static const struct hl_asic_funcs goya_funcs = {
5360 .early_init = goya_early_init,
5361 .early_fini = goya_early_fini,
5362 .late_init = goya_late_init,
5363 .late_fini = goya_late_fini,
5364 .sw_init = goya_sw_init,
5365 .sw_fini = goya_sw_fini,
5366 .hw_init = goya_hw_init,
5367 .hw_fini = goya_hw_fini,
5368 .halt_engines = goya_halt_engines,
5369 .suspend = goya_suspend,
5370 .resume = goya_resume,
5371 .cb_mmap = goya_cb_mmap,
5372 .ring_doorbell = goya_ring_doorbell,
5373 .pqe_write = goya_pqe_write,
5374 .asic_dma_alloc_coherent = goya_dma_alloc_coherent,
5375 .asic_dma_free_coherent = goya_dma_free_coherent,
5376 .scrub_device_mem = goya_scrub_device_mem,
5377 .get_int_queue_base = goya_get_int_queue_base,
5378 .test_queues = goya_test_queues,
5379 .asic_dma_pool_zalloc = goya_dma_pool_zalloc,
5380 .asic_dma_pool_free = goya_dma_pool_free,
5381 .cpu_accessible_dma_pool_alloc = goya_cpu_accessible_dma_pool_alloc,
5382 .cpu_accessible_dma_pool_free = goya_cpu_accessible_dma_pool_free,
5383 .hl_dma_unmap_sg = goya_dma_unmap_sg,
5384 .cs_parser = goya_cs_parser,
5385 .asic_dma_map_sg = goya_dma_map_sg,
5386 .get_dma_desc_list_size = goya_get_dma_desc_list_size,
5387 .add_end_of_cb_packets = goya_add_end_of_cb_packets,
5388 .update_eq_ci = goya_update_eq_ci,
5389 .context_switch = goya_context_switch,
5390 .restore_phase_topology = goya_restore_phase_topology,
5391 .debugfs_read32 = goya_debugfs_read32,
5392 .debugfs_write32 = goya_debugfs_write32,
5393 .debugfs_read64 = goya_debugfs_read64,
5394 .debugfs_write64 = goya_debugfs_write64,
5395 .add_device_attr = goya_add_device_attr,
5396 .handle_eqe = goya_handle_eqe,
5397 .set_pll_profile = goya_set_pll_profile,
5398 .get_events_stat = goya_get_events_stat,
5399 .read_pte = goya_read_pte,
5400 .write_pte = goya_write_pte,
5401 .mmu_invalidate_cache = goya_mmu_invalidate_cache,
5402 .mmu_invalidate_cache_range = goya_mmu_invalidate_cache_range,
5403 .send_heartbeat = goya_send_heartbeat,
5404 .set_clock_gating = goya_set_clock_gating,
5405 .disable_clock_gating = goya_disable_clock_gating,
5406 .debug_coresight = goya_debug_coresight,
5407 .is_device_idle = goya_is_device_idle,
5408 .soft_reset_late_init = goya_soft_reset_late_init,
5409 .hw_queues_lock = goya_hw_queues_lock,
5410 .hw_queues_unlock = goya_hw_queues_unlock,
5411 .get_pci_id = goya_get_pci_id,
5412 .get_eeprom_data = goya_get_eeprom_data,
5413 .send_cpu_message = goya_send_cpu_message,
5414 .get_hw_state = goya_get_hw_state,
5415 .pci_bars_map = goya_pci_bars_map,
5416 .init_iatu = goya_init_iatu,
5419 .halt_coresight = goya_halt_coresight,
5420 .ctx_init = goya_ctx_init,
5421 .ctx_fini = goya_ctx_fini,
5422 .get_clk_rate = goya_get_clk_rate,
5423 .get_queue_id_for_cq = goya_get_queue_id_for_cq,
5424 .read_device_fw_version = goya_read_device_fw_version,
5425 .load_firmware_to_device = goya_load_firmware_to_device,
5426 .load_boot_fit_to_device = goya_load_boot_fit_to_device,
5427 .get_signal_cb_size = goya_get_signal_cb_size,
5428 .get_wait_cb_size = goya_get_wait_cb_size,
5429 .gen_signal_cb = goya_gen_signal_cb,
5430 .gen_wait_cb = goya_gen_wait_cb,
5431 .reset_sob = goya_reset_sob,
5432 .reset_sob_group = goya_reset_sob_group,
5433 .set_dma_mask_from_fw = goya_set_dma_mask_from_fw,
5434 .get_device_time = goya_get_device_time,
5435 .collective_wait_init_cs = goya_collective_wait_init_cs,
5436 .collective_wait_create_jobs = goya_collective_wait_create_jobs
5440 * goya_set_asic_funcs - set Goya function pointers
5442 * @*hdev: pointer to hl_device structure
5445 void goya_set_asic_funcs(struct hl_device *hdev)
5447 hdev->asic_funcs = &goya_funcs;