1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2019 HabanaLabs, Ltd.
9 #include "include/hw_ip/mmu/mmu_general.h"
10 #include "include/hw_ip/mmu/mmu_v1_0.h"
11 #include "include/goya/asic_reg/goya_masks.h"
13 #include <linux/pci.h>
14 #include <linux/genalloc.h>
15 #include <linux/firmware.h>
16 #include <linux/hwmon.h>
17 #include <linux/io-64-nonatomic-lo-hi.h>
18 #include <linux/io-64-nonatomic-hi-lo.h>
21 * GOYA security scheme:
23 * 1. Host is protected by:
24 * - Range registers (When MMU is enabled, DMA RR does NOT protect host)
27 * 2. DRAM is protected by:
28 * - Range registers (protect the first 512MB)
29 * - MMU (isolation between users)
31 * 3. Configuration is protected by:
35 * When MMU is disabled:
37 * QMAN DMA: PQ, CQ, CP, DMA are secured.
38 * PQ, CB and the data are on the host.
41 * PQ, CQ and CP are not secured.
42 * PQ, CB and the data are on the SRAM/DRAM.
44 * Since QMAN DMA is secured, KMD is parsing the DMA CB:
45 * - KMD checks DMA pointer
46 * - WREG, MSG_PROT are not allowed.
47 * - MSG_LONG/SHORT are allowed.
49 * A read/write transaction by the QMAN to a protected area will succeed if
50 * and only if the QMAN's CP is secured and MSG_PROT is used
53 * When MMU is enabled:
55 * QMAN DMA: PQ, CQ and CP are secured.
56 * MMU is set to bypass on the Secure props register of the QMAN.
57 * The reasons we don't enable MMU for PQ, CQ and CP are:
58 * - PQ entry is in kernel address space and KMD doesn't map it.
59 * - CP writes to MSIX register and to kernel address space (completion
62 * DMA is not secured but because CP is secured, KMD still needs to parse the
63 * CB, but doesn't need to check the DMA addresses.
65 * For QMAN DMA 0, DMA is also secured because only KMD uses this DMA and KMD
66 * doesn't map memory in MMU.
68 * QMAN TPC/MME: PQ, CQ and CP aren't secured (no change from MMU disabled mode)
70 * DMA RR does NOT protect host because DMA is not secured
74 #define GOYA_MMU_REGS_NUM 61
76 #define GOYA_DMA_POOL_BLK_SIZE 0x100 /* 256 bytes */
78 #define GOYA_RESET_TIMEOUT_MSEC 500 /* 500ms */
79 #define GOYA_PLDM_RESET_TIMEOUT_MSEC 20000 /* 20s */
80 #define GOYA_RESET_WAIT_MSEC 1 /* 1ms */
81 #define GOYA_CPU_RESET_WAIT_MSEC 100 /* 100ms */
82 #define GOYA_PLDM_RESET_WAIT_MSEC 1000 /* 1s */
83 #define GOYA_CPU_TIMEOUT_USEC 10000000 /* 10s */
84 #define GOYA_TEST_QUEUE_WAIT_USEC 100000 /* 100ms */
85 #define GOYA_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100)
86 #define GOYA_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
88 #define GOYA_QMAN0_FENCE_VAL 0xD169B243
90 #define GOYA_MAX_INITIATORS 20
92 #define GOYA_MAX_STRING_LEN 20
94 #define GOYA_CB_POOL_CB_CNT 512
95 #define GOYA_CB_POOL_CB_SIZE 0x20000 /* 128KB */
97 static const char goya_irq_name[GOYA_MSIX_ENTRIES][GOYA_MAX_STRING_LEN] = {
98 "goya cq 0", "goya cq 1", "goya cq 2", "goya cq 3",
99 "goya cq 4", "goya cpu eq"
102 static u16 goya_packet_sizes[MAX_PACKET_ID] = {
103 [PACKET_WREG_32] = sizeof(struct packet_wreg32),
104 [PACKET_WREG_BULK] = sizeof(struct packet_wreg_bulk),
105 [PACKET_MSG_LONG] = sizeof(struct packet_msg_long),
106 [PACKET_MSG_SHORT] = sizeof(struct packet_msg_short),
107 [PACKET_CP_DMA] = sizeof(struct packet_cp_dma),
108 [PACKET_MSG_PROT] = sizeof(struct packet_msg_prot),
109 [PACKET_FENCE] = sizeof(struct packet_fence),
110 [PACKET_LIN_DMA] = sizeof(struct packet_lin_dma),
111 [PACKET_NOP] = sizeof(struct packet_nop),
112 [PACKET_STOP] = sizeof(struct packet_stop)
115 static u64 goya_mmu_regs[GOYA_MMU_REGS_NUM] = {
116 mmDMA_QM_0_GLBL_NON_SECURE_PROPS,
117 mmDMA_QM_1_GLBL_NON_SECURE_PROPS,
118 mmDMA_QM_2_GLBL_NON_SECURE_PROPS,
119 mmDMA_QM_3_GLBL_NON_SECURE_PROPS,
120 mmDMA_QM_4_GLBL_NON_SECURE_PROPS,
121 mmTPC0_QM_GLBL_SECURE_PROPS,
122 mmTPC0_QM_GLBL_NON_SECURE_PROPS,
123 mmTPC0_CMDQ_GLBL_SECURE_PROPS,
124 mmTPC0_CMDQ_GLBL_NON_SECURE_PROPS,
127 mmTPC1_QM_GLBL_SECURE_PROPS,
128 mmTPC1_QM_GLBL_NON_SECURE_PROPS,
129 mmTPC1_CMDQ_GLBL_SECURE_PROPS,
130 mmTPC1_CMDQ_GLBL_NON_SECURE_PROPS,
133 mmTPC2_QM_GLBL_SECURE_PROPS,
134 mmTPC2_QM_GLBL_NON_SECURE_PROPS,
135 mmTPC2_CMDQ_GLBL_SECURE_PROPS,
136 mmTPC2_CMDQ_GLBL_NON_SECURE_PROPS,
139 mmTPC3_QM_GLBL_SECURE_PROPS,
140 mmTPC3_QM_GLBL_NON_SECURE_PROPS,
141 mmTPC3_CMDQ_GLBL_SECURE_PROPS,
142 mmTPC3_CMDQ_GLBL_NON_SECURE_PROPS,
145 mmTPC4_QM_GLBL_SECURE_PROPS,
146 mmTPC4_QM_GLBL_NON_SECURE_PROPS,
147 mmTPC4_CMDQ_GLBL_SECURE_PROPS,
148 mmTPC4_CMDQ_GLBL_NON_SECURE_PROPS,
151 mmTPC5_QM_GLBL_SECURE_PROPS,
152 mmTPC5_QM_GLBL_NON_SECURE_PROPS,
153 mmTPC5_CMDQ_GLBL_SECURE_PROPS,
154 mmTPC5_CMDQ_GLBL_NON_SECURE_PROPS,
157 mmTPC6_QM_GLBL_SECURE_PROPS,
158 mmTPC6_QM_GLBL_NON_SECURE_PROPS,
159 mmTPC6_CMDQ_GLBL_SECURE_PROPS,
160 mmTPC6_CMDQ_GLBL_NON_SECURE_PROPS,
163 mmTPC7_QM_GLBL_SECURE_PROPS,
164 mmTPC7_QM_GLBL_NON_SECURE_PROPS,
165 mmTPC7_CMDQ_GLBL_SECURE_PROPS,
166 mmTPC7_CMDQ_GLBL_NON_SECURE_PROPS,
169 mmMME_QM_GLBL_SECURE_PROPS,
170 mmMME_QM_GLBL_NON_SECURE_PROPS,
171 mmMME_CMDQ_GLBL_SECURE_PROPS,
172 mmMME_CMDQ_GLBL_NON_SECURE_PROPS,
173 mmMME_SBA_CONTROL_DATA,
174 mmMME_SBB_CONTROL_DATA,
175 mmMME_SBC_CONTROL_DATA,
176 mmMME_WBC_CONTROL_DATA
179 #define GOYA_ASYC_EVENT_GROUP_NON_FATAL_SIZE 121
181 static u32 goya_non_fatal_events[GOYA_ASYC_EVENT_GROUP_NON_FATAL_SIZE] = {
182 GOYA_ASYNC_EVENT_ID_PCIE_IF,
183 GOYA_ASYNC_EVENT_ID_TPC0_ECC,
184 GOYA_ASYNC_EVENT_ID_TPC1_ECC,
185 GOYA_ASYNC_EVENT_ID_TPC2_ECC,
186 GOYA_ASYNC_EVENT_ID_TPC3_ECC,
187 GOYA_ASYNC_EVENT_ID_TPC4_ECC,
188 GOYA_ASYNC_EVENT_ID_TPC5_ECC,
189 GOYA_ASYNC_EVENT_ID_TPC6_ECC,
190 GOYA_ASYNC_EVENT_ID_TPC7_ECC,
191 GOYA_ASYNC_EVENT_ID_MME_ECC,
192 GOYA_ASYNC_EVENT_ID_MME_ECC_EXT,
193 GOYA_ASYNC_EVENT_ID_MMU_ECC,
194 GOYA_ASYNC_EVENT_ID_DMA_MACRO,
195 GOYA_ASYNC_EVENT_ID_DMA_ECC,
196 GOYA_ASYNC_EVENT_ID_CPU_IF_ECC,
197 GOYA_ASYNC_EVENT_ID_PSOC_MEM,
198 GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT,
199 GOYA_ASYNC_EVENT_ID_SRAM0,
200 GOYA_ASYNC_EVENT_ID_SRAM1,
201 GOYA_ASYNC_EVENT_ID_SRAM2,
202 GOYA_ASYNC_EVENT_ID_SRAM3,
203 GOYA_ASYNC_EVENT_ID_SRAM4,
204 GOYA_ASYNC_EVENT_ID_SRAM5,
205 GOYA_ASYNC_EVENT_ID_SRAM6,
206 GOYA_ASYNC_EVENT_ID_SRAM7,
207 GOYA_ASYNC_EVENT_ID_SRAM8,
208 GOYA_ASYNC_EVENT_ID_SRAM9,
209 GOYA_ASYNC_EVENT_ID_SRAM10,
210 GOYA_ASYNC_EVENT_ID_SRAM11,
211 GOYA_ASYNC_EVENT_ID_SRAM12,
212 GOYA_ASYNC_EVENT_ID_SRAM13,
213 GOYA_ASYNC_EVENT_ID_SRAM14,
214 GOYA_ASYNC_EVENT_ID_SRAM15,
215 GOYA_ASYNC_EVENT_ID_SRAM16,
216 GOYA_ASYNC_EVENT_ID_SRAM17,
217 GOYA_ASYNC_EVENT_ID_SRAM18,
218 GOYA_ASYNC_EVENT_ID_SRAM19,
219 GOYA_ASYNC_EVENT_ID_SRAM20,
220 GOYA_ASYNC_EVENT_ID_SRAM21,
221 GOYA_ASYNC_EVENT_ID_SRAM22,
222 GOYA_ASYNC_EVENT_ID_SRAM23,
223 GOYA_ASYNC_EVENT_ID_SRAM24,
224 GOYA_ASYNC_EVENT_ID_SRAM25,
225 GOYA_ASYNC_EVENT_ID_SRAM26,
226 GOYA_ASYNC_EVENT_ID_SRAM27,
227 GOYA_ASYNC_EVENT_ID_SRAM28,
228 GOYA_ASYNC_EVENT_ID_SRAM29,
229 GOYA_ASYNC_EVENT_ID_GIC500,
230 GOYA_ASYNC_EVENT_ID_PLL0,
231 GOYA_ASYNC_EVENT_ID_PLL1,
232 GOYA_ASYNC_EVENT_ID_PLL3,
233 GOYA_ASYNC_EVENT_ID_PLL4,
234 GOYA_ASYNC_EVENT_ID_PLL5,
235 GOYA_ASYNC_EVENT_ID_PLL6,
236 GOYA_ASYNC_EVENT_ID_AXI_ECC,
237 GOYA_ASYNC_EVENT_ID_L2_RAM_ECC,
238 GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET,
239 GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT,
240 GOYA_ASYNC_EVENT_ID_PCIE_DEC,
241 GOYA_ASYNC_EVENT_ID_TPC0_DEC,
242 GOYA_ASYNC_EVENT_ID_TPC1_DEC,
243 GOYA_ASYNC_EVENT_ID_TPC2_DEC,
244 GOYA_ASYNC_EVENT_ID_TPC3_DEC,
245 GOYA_ASYNC_EVENT_ID_TPC4_DEC,
246 GOYA_ASYNC_EVENT_ID_TPC5_DEC,
247 GOYA_ASYNC_EVENT_ID_TPC6_DEC,
248 GOYA_ASYNC_EVENT_ID_TPC7_DEC,
249 GOYA_ASYNC_EVENT_ID_MME_WACS,
250 GOYA_ASYNC_EVENT_ID_MME_WACSD,
251 GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER,
252 GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC,
253 GOYA_ASYNC_EVENT_ID_PSOC,
254 GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR,
255 GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR,
256 GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR,
257 GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR,
258 GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR,
259 GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR,
260 GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR,
261 GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR,
262 GOYA_ASYNC_EVENT_ID_TPC0_CMDQ,
263 GOYA_ASYNC_EVENT_ID_TPC1_CMDQ,
264 GOYA_ASYNC_EVENT_ID_TPC2_CMDQ,
265 GOYA_ASYNC_EVENT_ID_TPC3_CMDQ,
266 GOYA_ASYNC_EVENT_ID_TPC4_CMDQ,
267 GOYA_ASYNC_EVENT_ID_TPC5_CMDQ,
268 GOYA_ASYNC_EVENT_ID_TPC6_CMDQ,
269 GOYA_ASYNC_EVENT_ID_TPC7_CMDQ,
270 GOYA_ASYNC_EVENT_ID_TPC0_QM,
271 GOYA_ASYNC_EVENT_ID_TPC1_QM,
272 GOYA_ASYNC_EVENT_ID_TPC2_QM,
273 GOYA_ASYNC_EVENT_ID_TPC3_QM,
274 GOYA_ASYNC_EVENT_ID_TPC4_QM,
275 GOYA_ASYNC_EVENT_ID_TPC5_QM,
276 GOYA_ASYNC_EVENT_ID_TPC6_QM,
277 GOYA_ASYNC_EVENT_ID_TPC7_QM,
278 GOYA_ASYNC_EVENT_ID_MME_QM,
279 GOYA_ASYNC_EVENT_ID_MME_CMDQ,
280 GOYA_ASYNC_EVENT_ID_DMA0_QM,
281 GOYA_ASYNC_EVENT_ID_DMA1_QM,
282 GOYA_ASYNC_EVENT_ID_DMA2_QM,
283 GOYA_ASYNC_EVENT_ID_DMA3_QM,
284 GOYA_ASYNC_EVENT_ID_DMA4_QM,
285 GOYA_ASYNC_EVENT_ID_DMA0_CH,
286 GOYA_ASYNC_EVENT_ID_DMA1_CH,
287 GOYA_ASYNC_EVENT_ID_DMA2_CH,
288 GOYA_ASYNC_EVENT_ID_DMA3_CH,
289 GOYA_ASYNC_EVENT_ID_DMA4_CH,
290 GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU,
291 GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU,
292 GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU,
293 GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU,
294 GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU,
295 GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU,
296 GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU,
297 GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU,
298 GOYA_ASYNC_EVENT_ID_DMA_BM_CH0,
299 GOYA_ASYNC_EVENT_ID_DMA_BM_CH1,
300 GOYA_ASYNC_EVENT_ID_DMA_BM_CH2,
301 GOYA_ASYNC_EVENT_ID_DMA_BM_CH3,
302 GOYA_ASYNC_EVENT_ID_DMA_BM_CH4
305 static int goya_armcp_info_get(struct hl_device *hdev);
306 static void goya_mmu_prepare(struct hl_device *hdev, u32 asid);
307 static int goya_mmu_clear_pgt_range(struct hl_device *hdev);
308 static int goya_mmu_set_dram_default_page(struct hl_device *hdev);
309 static int goya_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
312 static void goya_get_fixed_properties(struct hl_device *hdev)
314 struct asic_fixed_properties *prop = &hdev->asic_prop;
317 for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
318 prop->hw_queues_props[i].type = QUEUE_TYPE_EXT;
319 prop->hw_queues_props[i].kmd_only = 0;
322 for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES ; i++) {
323 prop->hw_queues_props[i].type = QUEUE_TYPE_CPU;
324 prop->hw_queues_props[i].kmd_only = 1;
327 for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES +
328 NUMBER_OF_INT_HW_QUEUES; i++) {
329 prop->hw_queues_props[i].type = QUEUE_TYPE_INT;
330 prop->hw_queues_props[i].kmd_only = 0;
333 for (; i < HL_MAX_QUEUES; i++)
334 prop->hw_queues_props[i].type = QUEUE_TYPE_NA;
336 prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
338 prop->dram_base_address = DRAM_PHYS_BASE;
339 prop->dram_size = DRAM_PHYS_DEFAULT_SIZE;
340 prop->dram_end_address = prop->dram_base_address + prop->dram_size;
341 prop->dram_user_base_address = DRAM_BASE_ADDR_USER;
343 prop->sram_base_address = SRAM_BASE_ADDR;
344 prop->sram_size = SRAM_SIZE;
345 prop->sram_end_address = prop->sram_base_address + prop->sram_size;
346 prop->sram_user_base_address = prop->sram_base_address +
347 SRAM_USER_BASE_OFFSET;
349 prop->mmu_pgt_addr = MMU_PAGE_TABLES_ADDR;
350 prop->mmu_dram_default_page_addr = MMU_DRAM_DEFAULT_PAGE_ADDR;
352 prop->mmu_pgt_size = 0x800000; /* 8MB */
354 prop->mmu_pgt_size = MMU_PAGE_TABLES_SIZE;
355 prop->mmu_pte_size = HL_PTE_SIZE;
356 prop->mmu_hop_table_size = HOP_TABLE_SIZE;
357 prop->mmu_hop0_tables_total_size = HOP0_TABLES_TOTAL_SIZE;
358 prop->dram_page_size = PAGE_SIZE_2MB;
360 prop->host_phys_base_address = HOST_PHYS_BASE;
361 prop->va_space_host_start_address = VA_HOST_SPACE_START;
362 prop->va_space_host_end_address = VA_HOST_SPACE_END;
363 prop->va_space_dram_start_address = VA_DDR_SPACE_START;
364 prop->va_space_dram_end_address = VA_DDR_SPACE_END;
365 prop->dram_size_for_default_page_mapping =
366 prop->va_space_dram_end_address;
367 prop->cfg_size = CFG_SIZE;
368 prop->max_asid = MAX_ASID;
369 prop->num_of_events = GOYA_ASYNC_EVENT_ID_SIZE;
370 prop->cb_pool_cb_cnt = GOYA_CB_POOL_CB_CNT;
371 prop->cb_pool_cb_size = GOYA_CB_POOL_CB_SIZE;
372 prop->max_power_default = MAX_POWER_DEFAULT;
373 prop->tpc_enabled_mask = TPC_ENABLED_MASK;
375 prop->high_pll = PLL_HIGH_DEFAULT;
378 int goya_send_pci_access_msg(struct hl_device *hdev, u32 opcode)
380 struct armcp_packet pkt;
382 memset(&pkt, 0, sizeof(pkt));
384 pkt.ctl = cpu_to_le32(opcode << ARMCP_PKT_CTL_OPCODE_SHIFT);
386 return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt,
387 sizeof(pkt), HL_DEVICE_TIMEOUT_USEC, NULL);
391 * goya_pci_bars_map - Map PCI BARS of Goya device
393 * @hdev: pointer to hl_device structure
395 * Request PCI regions and map them to kernel virtual addresses.
396 * Returns 0 on success
399 static int goya_pci_bars_map(struct hl_device *hdev)
401 struct pci_dev *pdev = hdev->pdev;
404 rc = pci_request_regions(pdev, HL_NAME);
406 dev_err(hdev->dev, "Cannot obtain PCI resources\n");
410 hdev->pcie_bar[SRAM_CFG_BAR_ID] =
411 pci_ioremap_bar(pdev, SRAM_CFG_BAR_ID);
412 if (!hdev->pcie_bar[SRAM_CFG_BAR_ID]) {
413 dev_err(hdev->dev, "pci_ioremap_bar failed for CFG\n");
415 goto err_release_regions;
418 hdev->pcie_bar[MSIX_BAR_ID] = pci_ioremap_bar(pdev, MSIX_BAR_ID);
419 if (!hdev->pcie_bar[MSIX_BAR_ID]) {
420 dev_err(hdev->dev, "pci_ioremap_bar failed for MSIX\n");
422 goto err_unmap_sram_cfg;
425 hdev->pcie_bar[DDR_BAR_ID] = pci_ioremap_wc_bar(pdev, DDR_BAR_ID);
426 if (!hdev->pcie_bar[DDR_BAR_ID]) {
427 dev_err(hdev->dev, "pci_ioremap_bar failed for DDR\n");
432 hdev->rmmio = hdev->pcie_bar[SRAM_CFG_BAR_ID] +
433 (CFG_BASE - SRAM_BASE_ADDR);
438 iounmap(hdev->pcie_bar[MSIX_BAR_ID]);
440 iounmap(hdev->pcie_bar[SRAM_CFG_BAR_ID]);
442 pci_release_regions(pdev);
448 * goya_pci_bars_unmap - Unmap PCI BARS of Goya device
450 * @hdev: pointer to hl_device structure
452 * Release all PCI BARS and unmap their virtual addresses
455 static void goya_pci_bars_unmap(struct hl_device *hdev)
457 struct pci_dev *pdev = hdev->pdev;
459 iounmap(hdev->pcie_bar[DDR_BAR_ID]);
460 iounmap(hdev->pcie_bar[MSIX_BAR_ID]);
461 iounmap(hdev->pcie_bar[SRAM_CFG_BAR_ID]);
462 pci_release_regions(pdev);
466 * goya_elbi_write - Write through the ELBI interface
468 * @hdev: pointer to hl_device structure
470 * return 0 on success, -1 on failure
473 static int goya_elbi_write(struct hl_device *hdev, u64 addr, u32 data)
475 struct pci_dev *pdev = hdev->pdev;
479 /* Clear previous status */
480 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, 0);
482 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_ADDR, (u32) addr);
483 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_DATA, data);
484 pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_CTRL,
485 PCI_CONFIG_ELBI_CTRL_WRITE);
487 timeout = ktime_add_ms(ktime_get(), 10);
489 pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, &val);
490 if (val & PCI_CONFIG_ELBI_STS_MASK)
492 if (ktime_compare(ktime_get(), timeout) > 0) {
493 pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS,
497 usleep_range(300, 500);
500 if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE)
503 if (val & PCI_CONFIG_ELBI_STS_ERR) {
504 dev_err(hdev->dev, "Error writing to ELBI\n");
508 if (!(val & PCI_CONFIG_ELBI_STS_MASK)) {
509 dev_err(hdev->dev, "ELBI write didn't finish in time\n");
513 dev_err(hdev->dev, "ELBI write has undefined bits in status\n");
518 * goya_iatu_write - iatu write routine
520 * @hdev: pointer to hl_device structure
523 static int goya_iatu_write(struct hl_device *hdev, u32 addr, u32 data)
528 dbi_offset = addr & 0xFFF;
530 rc = goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI, 0x00300000);
531 rc |= goya_elbi_write(hdev, mmPCIE_DBI_BASE + dbi_offset, data);
539 static void goya_reset_link_through_bridge(struct hl_device *hdev)
541 struct pci_dev *pdev = hdev->pdev;
542 struct pci_dev *parent_port;
545 parent_port = pdev->bus->self;
546 pci_read_config_word(parent_port, PCI_BRIDGE_CONTROL, &val);
547 val |= PCI_BRIDGE_CTL_BUS_RESET;
548 pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val);
551 val &= ~(PCI_BRIDGE_CTL_BUS_RESET);
552 pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val);
557 * goya_set_ddr_bar_base - set DDR bar to map specific device address
559 * @hdev: pointer to hl_device structure
560 * @addr: address in DDR. Must be aligned to DDR bar size
562 * This function configures the iATU so that the DDR bar will start at the
566 static int goya_set_ddr_bar_base(struct hl_device *hdev, u64 addr)
568 struct goya_device *goya = hdev->asic_specific;
571 if ((goya) && (goya->ddr_bar_cur_addr == addr))
574 /* Inbound Region 1 - Bar 4 - Point to DDR */
575 rc = goya_iatu_write(hdev, 0x314, lower_32_bits(addr));
576 rc |= goya_iatu_write(hdev, 0x318, upper_32_bits(addr));
577 rc |= goya_iatu_write(hdev, 0x300, 0);
578 /* Enable + Bar match + match enable + Bar 4 */
579 rc |= goya_iatu_write(hdev, 0x304, 0xC0080400);
581 /* Return the DBI window to the default location */
582 rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI, 0);
583 rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI_32, 0);
586 dev_err(hdev->dev, "failed to map DDR bar to 0x%08llx\n", addr);
591 goya->ddr_bar_cur_addr = addr;
597 * goya_init_iatu - Initialize the iATU unit inside the PCI controller
599 * @hdev: pointer to hl_device structure
601 * This is needed in case the firmware doesn't initialize the iATU
604 static int goya_init_iatu(struct hl_device *hdev)
608 /* Inbound Region 0 - Bar 0 - Point to SRAM_BASE_ADDR */
609 rc = goya_iatu_write(hdev, 0x114, lower_32_bits(SRAM_BASE_ADDR));
610 rc |= goya_iatu_write(hdev, 0x118, upper_32_bits(SRAM_BASE_ADDR));
611 rc |= goya_iatu_write(hdev, 0x100, 0);
612 /* Enable + Bar match + match enable */
613 rc |= goya_iatu_write(hdev, 0x104, 0xC0080000);
615 /* Inbound Region 1 - Bar 4 - Point to DDR */
616 rc |= goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE);
618 /* Outbound Region 0 - Point to Host */
619 rc |= goya_iatu_write(hdev, 0x008, lower_32_bits(HOST_PHYS_BASE));
620 rc |= goya_iatu_write(hdev, 0x00C, upper_32_bits(HOST_PHYS_BASE));
621 rc |= goya_iatu_write(hdev, 0x010,
622 lower_32_bits(HOST_PHYS_BASE + HOST_PHYS_SIZE - 1));
623 rc |= goya_iatu_write(hdev, 0x014, 0);
624 rc |= goya_iatu_write(hdev, 0x018, 0);
625 rc |= goya_iatu_write(hdev, 0x020,
626 upper_32_bits(HOST_PHYS_BASE + HOST_PHYS_SIZE - 1));
627 /* Increase region size */
628 rc |= goya_iatu_write(hdev, 0x000, 0x00002000);
630 rc |= goya_iatu_write(hdev, 0x004, 0x80000000);
632 /* Return the DBI window to the default location */
633 rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI, 0);
634 rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI_32, 0);
643 * goya_early_init - GOYA early initialization code
645 * @hdev: pointer to hl_device structure
649 * PCI controller initialization
653 static int goya_early_init(struct hl_device *hdev)
655 struct asic_fixed_properties *prop = &hdev->asic_prop;
656 struct pci_dev *pdev = hdev->pdev;
660 goya_get_fixed_properties(hdev);
662 /* Check BAR sizes */
663 if (pci_resource_len(pdev, SRAM_CFG_BAR_ID) != CFG_BAR_SIZE) {
665 "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n",
667 (unsigned long long) pci_resource_len(pdev,
673 if (pci_resource_len(pdev, MSIX_BAR_ID) != MSIX_BAR_SIZE) {
675 "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n",
677 (unsigned long long) pci_resource_len(pdev,
683 prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID);
685 /* set DMA mask for GOYA */
686 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
688 dev_warn(hdev->dev, "Unable to set pci dma mask to 39 bits\n");
689 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
692 "Unable to set pci dma mask to 32 bits\n");
697 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
700 "Unable to set pci consistent dma mask to 39 bits\n");
701 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
704 "Unable to set pci consistent dma mask to 32 bits\n");
709 if (hdev->reset_pcilink)
710 goya_reset_link_through_bridge(hdev);
712 rc = pci_enable_device_mem(pdev);
714 dev_err(hdev->dev, "can't enable PCI device\n");
718 pci_set_master(pdev);
720 rc = goya_init_iatu(hdev);
722 dev_err(hdev->dev, "Failed to initialize iATU\n");
726 rc = goya_pci_bars_map(hdev);
728 dev_err(hdev->dev, "Failed to initialize PCI BARS\n");
733 val = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
734 if (val & PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SRIOV_EN_MASK)
736 "PCI strap is not configured correctly, PCI bus errors may occur\n");
742 pci_clear_master(pdev);
743 pci_disable_device(pdev);
749 * goya_early_fini - GOYA early finalization code
751 * @hdev: pointer to hl_device structure
756 static int goya_early_fini(struct hl_device *hdev)
758 goya_pci_bars_unmap(hdev);
760 pci_clear_master(hdev->pdev);
761 pci_disable_device(hdev->pdev);
767 * goya_fetch_psoc_frequency - Fetch PSOC frequency values
769 * @hdev: pointer to hl_device structure
772 static void goya_fetch_psoc_frequency(struct hl_device *hdev)
774 struct asic_fixed_properties *prop = &hdev->asic_prop;
776 prop->psoc_pci_pll_nr = RREG32(mmPSOC_PCI_PLL_NR);
777 prop->psoc_pci_pll_nf = RREG32(mmPSOC_PCI_PLL_NF);
778 prop->psoc_pci_pll_od = RREG32(mmPSOC_PCI_PLL_OD);
779 prop->psoc_pci_pll_div_factor = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
783 * goya_late_init - GOYA late initialization code
785 * @hdev: pointer to hl_device structure
787 * Get ArmCP info and send message to CPU to enable PCI access
789 static int goya_late_init(struct hl_device *hdev)
791 struct asic_fixed_properties *prop = &hdev->asic_prop;
792 struct goya_device *goya = hdev->asic_specific;
795 rc = goya->armcp_info_get(hdev);
797 dev_err(hdev->dev, "Failed to get armcp info\n");
801 /* Now that we have the DRAM size in ASIC prop, we need to check
802 * its size and configure the DMA_IF DDR wrap protection (which is in
803 * the MMU block) accordingly. The value is the log2 of the DRAM size
805 WREG32(mmMMU_LOG2_DDR_SIZE, ilog2(prop->dram_size));
807 rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS);
809 dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
813 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
814 GOYA_ASYNC_EVENT_ID_INTS_REGISTER);
816 goya_fetch_psoc_frequency(hdev);
818 rc = goya_mmu_clear_pgt_range(hdev);
820 dev_err(hdev->dev, "Failed to clear MMU page tables range\n");
821 goto disable_pci_access;
824 rc = goya_mmu_set_dram_default_page(hdev);
826 dev_err(hdev->dev, "Failed to set DRAM default page\n");
827 goto disable_pci_access;
833 goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
839 * goya_late_fini - GOYA late tear-down code
841 * @hdev: pointer to hl_device structure
843 * Free sensors allocated structures
845 void goya_late_fini(struct hl_device *hdev)
847 const struct hwmon_channel_info **channel_info_arr;
850 if (!hdev->hl_chip_info->info)
853 channel_info_arr = hdev->hl_chip_info->info;
855 while (channel_info_arr[i]) {
856 kfree(channel_info_arr[i]->config);
857 kfree(channel_info_arr[i]);
861 kfree(channel_info_arr);
863 hdev->hl_chip_info->info = NULL;
867 * goya_sw_init - Goya software initialization code
869 * @hdev: pointer to hl_device structure
872 static int goya_sw_init(struct hl_device *hdev)
874 struct goya_device *goya;
877 /* Allocate device structure */
878 goya = kzalloc(sizeof(*goya), GFP_KERNEL);
882 goya->test_cpu_queue = goya_test_cpu_queue;
883 goya->armcp_info_get = goya_armcp_info_get;
885 /* according to goya_init_iatu */
886 goya->ddr_bar_cur_addr = DRAM_PHYS_BASE;
888 goya->mme_clk = GOYA_PLL_FREQ_LOW;
889 goya->tpc_clk = GOYA_PLL_FREQ_LOW;
890 goya->ic_clk = GOYA_PLL_FREQ_LOW;
892 hdev->asic_specific = goya;
894 /* Create DMA pool for small allocations */
895 hdev->dma_pool = dma_pool_create(dev_name(hdev->dev),
896 &hdev->pdev->dev, GOYA_DMA_POOL_BLK_SIZE, 8, 0);
897 if (!hdev->dma_pool) {
898 dev_err(hdev->dev, "failed to create DMA pool\n");
900 goto free_goya_device;
903 hdev->cpu_accessible_dma_mem =
904 hdev->asic_funcs->dma_alloc_coherent(hdev,
905 CPU_ACCESSIBLE_MEM_SIZE,
906 &hdev->cpu_accessible_dma_address,
907 GFP_KERNEL | __GFP_ZERO);
909 if (!hdev->cpu_accessible_dma_mem) {
911 "failed to allocate %d of dma memory for CPU accessible memory space\n",
912 CPU_ACCESSIBLE_MEM_SIZE);
917 hdev->cpu_accessible_dma_pool = gen_pool_create(CPU_PKT_SHIFT, -1);
918 if (!hdev->cpu_accessible_dma_pool) {
920 "Failed to create CPU accessible DMA pool\n");
922 goto free_cpu_pq_dma_mem;
925 rc = gen_pool_add(hdev->cpu_accessible_dma_pool,
926 (uintptr_t) hdev->cpu_accessible_dma_mem,
927 CPU_ACCESSIBLE_MEM_SIZE, -1);
930 "Failed to add memory to CPU accessible DMA pool\n");
932 goto free_cpu_pq_pool;
935 spin_lock_init(&goya->hw_queues_lock);
940 gen_pool_destroy(hdev->cpu_accessible_dma_pool);
942 hdev->asic_funcs->dma_free_coherent(hdev, CPU_ACCESSIBLE_MEM_SIZE,
943 hdev->cpu_accessible_dma_mem,
944 hdev->cpu_accessible_dma_address);
946 dma_pool_destroy(hdev->dma_pool);
954 * goya_sw_fini - Goya software tear-down code
956 * @hdev: pointer to hl_device structure
959 static int goya_sw_fini(struct hl_device *hdev)
961 struct goya_device *goya = hdev->asic_specific;
963 gen_pool_destroy(hdev->cpu_accessible_dma_pool);
965 hdev->asic_funcs->dma_free_coherent(hdev, CPU_ACCESSIBLE_MEM_SIZE,
966 hdev->cpu_accessible_dma_mem,
967 hdev->cpu_accessible_dma_address);
969 dma_pool_destroy(hdev->dma_pool);
976 static void goya_init_dma_qman(struct hl_device *hdev, int dma_id,
977 dma_addr_t bus_address)
979 struct goya_device *goya = hdev->asic_specific;
980 u32 mtr_base_lo, mtr_base_hi;
981 u32 so_base_lo, so_base_hi;
982 u32 gic_base_lo, gic_base_hi;
983 u32 reg_off = dma_id * (mmDMA_QM_1_PQ_PI - mmDMA_QM_0_PQ_PI);
985 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
986 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
987 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
988 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
991 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
993 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
995 WREG32(mmDMA_QM_0_PQ_BASE_LO + reg_off, lower_32_bits(bus_address));
996 WREG32(mmDMA_QM_0_PQ_BASE_HI + reg_off, upper_32_bits(bus_address));
998 WREG32(mmDMA_QM_0_PQ_SIZE + reg_off, ilog2(HL_QUEUE_LENGTH));
999 WREG32(mmDMA_QM_0_PQ_PI + reg_off, 0);
1000 WREG32(mmDMA_QM_0_PQ_CI + reg_off, 0);
1002 WREG32(mmDMA_QM_0_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
1003 WREG32(mmDMA_QM_0_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
1004 WREG32(mmDMA_QM_0_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
1005 WREG32(mmDMA_QM_0_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
1006 WREG32(mmDMA_QM_0_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
1007 WREG32(mmDMA_QM_0_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
1008 WREG32(mmDMA_QM_0_GLBL_ERR_WDATA + reg_off,
1009 GOYA_ASYNC_EVENT_ID_DMA0_QM + dma_id);
1011 /* PQ has buffer of 2 cache lines, while CQ has 8 lines */
1012 WREG32(mmDMA_QM_0_PQ_CFG1 + reg_off, 0x00020002);
1013 WREG32(mmDMA_QM_0_CQ_CFG1 + reg_off, 0x00080008);
1015 if (goya->hw_cap_initialized & HW_CAP_MMU)
1016 WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, QMAN_DMA_PARTLY_TRUSTED);
1018 WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, QMAN_DMA_FULLY_TRUSTED);
1020 WREG32(mmDMA_QM_0_GLBL_ERR_CFG + reg_off, QMAN_DMA_ERR_MSG_EN);
1021 WREG32(mmDMA_QM_0_GLBL_CFG0 + reg_off, QMAN_DMA_ENABLE);
1024 static void goya_init_dma_ch(struct hl_device *hdev, int dma_id)
1026 u32 gic_base_lo, gic_base_hi;
1028 u32 reg_off = dma_id * (mmDMA_CH_1_CFG1 - mmDMA_CH_0_CFG1);
1031 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1033 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1035 WREG32(mmDMA_CH_0_ERRMSG_ADDR_LO + reg_off, gic_base_lo);
1036 WREG32(mmDMA_CH_0_ERRMSG_ADDR_HI + reg_off, gic_base_hi);
1037 WREG32(mmDMA_CH_0_ERRMSG_WDATA + reg_off,
1038 GOYA_ASYNC_EVENT_ID_DMA0_CH + dma_id);
1041 sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1000 +
1044 sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1007;
1046 WREG32(mmDMA_CH_0_WR_COMP_ADDR_LO + reg_off, lower_32_bits(sob_addr));
1047 WREG32(mmDMA_CH_0_WR_COMP_ADDR_HI + reg_off, upper_32_bits(sob_addr));
1048 WREG32(mmDMA_CH_0_WR_COMP_WDATA + reg_off, 0x80000001);
1052 * goya_init_dma_qmans - Initialize QMAN DMA registers
1054 * @hdev: pointer to hl_device structure
1056 * Initialize the H/W registers of the QMAN DMA channels
1059 static void goya_init_dma_qmans(struct hl_device *hdev)
1061 struct goya_device *goya = hdev->asic_specific;
1062 struct hl_hw_queue *q;
1063 dma_addr_t bus_address;
1066 if (goya->hw_cap_initialized & HW_CAP_DMA)
1069 q = &hdev->kernel_queues[0];
1071 for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++, q++) {
1072 bus_address = q->bus_address +
1073 hdev->asic_prop.host_phys_base_address;
1075 goya_init_dma_qman(hdev, i, bus_address);
1076 goya_init_dma_ch(hdev, i);
1079 goya->hw_cap_initialized |= HW_CAP_DMA;
1083 * goya_disable_external_queues - Disable external queues
1085 * @hdev: pointer to hl_device structure
1088 static void goya_disable_external_queues(struct hl_device *hdev)
1090 WREG32(mmDMA_QM_0_GLBL_CFG0, 0);
1091 WREG32(mmDMA_QM_1_GLBL_CFG0, 0);
1092 WREG32(mmDMA_QM_2_GLBL_CFG0, 0);
1093 WREG32(mmDMA_QM_3_GLBL_CFG0, 0);
1094 WREG32(mmDMA_QM_4_GLBL_CFG0, 0);
1097 static int goya_stop_queue(struct hl_device *hdev, u32 cfg_reg,
1098 u32 cp_sts_reg, u32 glbl_sts0_reg)
1103 /* use the values of TPC0 as they are all the same*/
1105 WREG32(cfg_reg, 1 << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
1107 status = RREG32(cp_sts_reg);
1108 if (status & TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK) {
1109 rc = hl_poll_timeout(
1113 !(status & TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK),
1115 QMAN_FENCE_TIMEOUT_USEC);
1117 /* if QMAN is stuck in fence no need to check for stop */
1122 rc = hl_poll_timeout(
1126 (status & TPC0_QM_GLBL_STS0_CP_IS_STOP_MASK),
1128 QMAN_STOP_TIMEOUT_USEC);
1132 "Timeout while waiting for QMAN to stop\n");
1140 * goya_stop_external_queues - Stop external queues
1142 * @hdev: pointer to hl_device structure
1144 * Returns 0 on success
1147 static int goya_stop_external_queues(struct hl_device *hdev)
1151 rc = goya_stop_queue(hdev,
1152 mmDMA_QM_0_GLBL_CFG1,
1154 mmDMA_QM_0_GLBL_STS0);
1157 dev_err(hdev->dev, "failed to stop DMA QMAN 0\n");
1161 rc = goya_stop_queue(hdev,
1162 mmDMA_QM_1_GLBL_CFG1,
1164 mmDMA_QM_1_GLBL_STS0);
1167 dev_err(hdev->dev, "failed to stop DMA QMAN 1\n");
1171 rc = goya_stop_queue(hdev,
1172 mmDMA_QM_2_GLBL_CFG1,
1174 mmDMA_QM_2_GLBL_STS0);
1177 dev_err(hdev->dev, "failed to stop DMA QMAN 2\n");
1181 rc = goya_stop_queue(hdev,
1182 mmDMA_QM_3_GLBL_CFG1,
1184 mmDMA_QM_3_GLBL_STS0);
1187 dev_err(hdev->dev, "failed to stop DMA QMAN 3\n");
1191 rc = goya_stop_queue(hdev,
1192 mmDMA_QM_4_GLBL_CFG1,
1194 mmDMA_QM_4_GLBL_STS0);
1197 dev_err(hdev->dev, "failed to stop DMA QMAN 4\n");
1204 static void goya_resume_external_queues(struct hl_device *hdev)
1206 WREG32(mmDMA_QM_0_GLBL_CFG1, 0);
1207 WREG32(mmDMA_QM_1_GLBL_CFG1, 0);
1208 WREG32(mmDMA_QM_2_GLBL_CFG1, 0);
1209 WREG32(mmDMA_QM_3_GLBL_CFG1, 0);
1210 WREG32(mmDMA_QM_4_GLBL_CFG1, 0);
1214 * goya_init_cpu_queues - Initialize PQ/CQ/EQ of CPU
1216 * @hdev: pointer to hl_device structure
1218 * Returns 0 on success
1221 static int goya_init_cpu_queues(struct hl_device *hdev)
1223 struct goya_device *goya = hdev->asic_specific;
1225 dma_addr_t bus_address;
1227 struct hl_hw_queue *cpu_pq = &hdev->kernel_queues[GOYA_QUEUE_ID_CPU_PQ];
1230 if (!hdev->cpu_queues_enable)
1233 if (goya->hw_cap_initialized & HW_CAP_CPU_Q)
1236 eq = &hdev->event_queue;
1238 bus_address = cpu_pq->bus_address +
1239 hdev->asic_prop.host_phys_base_address;
1240 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_0, lower_32_bits(bus_address));
1241 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_1, upper_32_bits(bus_address));
1243 bus_address = eq->bus_address + hdev->asic_prop.host_phys_base_address;
1244 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_2, lower_32_bits(bus_address));
1245 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_3, upper_32_bits(bus_address));
1247 bus_address = hdev->cpu_accessible_dma_address +
1248 hdev->asic_prop.host_phys_base_address;
1249 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_8, lower_32_bits(bus_address));
1250 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_9, upper_32_bits(bus_address));
1252 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_5, HL_QUEUE_SIZE_IN_BYTES);
1253 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_4, HL_EQ_SIZE_IN_BYTES);
1254 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_10, CPU_ACCESSIBLE_MEM_SIZE);
1256 /* Used for EQ CI */
1257 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_6, 0);
1259 WREG32(mmCPU_IF_PF_PQ_PI, 0);
1261 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_7, PQ_INIT_STATUS_READY_FOR_CP);
1263 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
1264 GOYA_ASYNC_EVENT_ID_PI_UPDATE);
1266 err = hl_poll_timeout(
1268 mmPSOC_GLOBAL_CONF_SCRATCHPAD_7,
1270 (status == PQ_INIT_STATUS_READY_FOR_HOST),
1272 GOYA_CPU_TIMEOUT_USEC);
1276 "Failed to communicate with ARM CPU (ArmCP timeout)\n");
1280 goya->hw_cap_initialized |= HW_CAP_CPU_Q;
1284 static void goya_set_pll_refclk(struct hl_device *hdev)
1286 WREG32(mmCPU_PLL_DIV_SEL_0, 0x0);
1287 WREG32(mmCPU_PLL_DIV_SEL_1, 0x0);
1288 WREG32(mmCPU_PLL_DIV_SEL_2, 0x0);
1289 WREG32(mmCPU_PLL_DIV_SEL_3, 0x0);
1291 WREG32(mmIC_PLL_DIV_SEL_0, 0x0);
1292 WREG32(mmIC_PLL_DIV_SEL_1, 0x0);
1293 WREG32(mmIC_PLL_DIV_SEL_2, 0x0);
1294 WREG32(mmIC_PLL_DIV_SEL_3, 0x0);
1296 WREG32(mmMC_PLL_DIV_SEL_0, 0x0);
1297 WREG32(mmMC_PLL_DIV_SEL_1, 0x0);
1298 WREG32(mmMC_PLL_DIV_SEL_2, 0x0);
1299 WREG32(mmMC_PLL_DIV_SEL_3, 0x0);
1301 WREG32(mmPSOC_MME_PLL_DIV_SEL_0, 0x0);
1302 WREG32(mmPSOC_MME_PLL_DIV_SEL_1, 0x0);
1303 WREG32(mmPSOC_MME_PLL_DIV_SEL_2, 0x0);
1304 WREG32(mmPSOC_MME_PLL_DIV_SEL_3, 0x0);
1306 WREG32(mmPSOC_PCI_PLL_DIV_SEL_0, 0x0);
1307 WREG32(mmPSOC_PCI_PLL_DIV_SEL_1, 0x0);
1308 WREG32(mmPSOC_PCI_PLL_DIV_SEL_2, 0x0);
1309 WREG32(mmPSOC_PCI_PLL_DIV_SEL_3, 0x0);
1311 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_0, 0x0);
1312 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_1, 0x0);
1313 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_2, 0x0);
1314 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_3, 0x0);
1316 WREG32(mmTPC_PLL_DIV_SEL_0, 0x0);
1317 WREG32(mmTPC_PLL_DIV_SEL_1, 0x0);
1318 WREG32(mmTPC_PLL_DIV_SEL_2, 0x0);
1319 WREG32(mmTPC_PLL_DIV_SEL_3, 0x0);
1322 static void goya_disable_clk_rlx(struct hl_device *hdev)
1324 WREG32(mmPSOC_MME_PLL_CLK_RLX_0, 0x100010);
1325 WREG32(mmIC_PLL_CLK_RLX_0, 0x100010);
1328 static void _goya_tpc_mbist_workaround(struct hl_device *hdev, u8 tpc_id)
1330 u64 tpc_eml_address;
1331 u32 val, tpc_offset, tpc_eml_offset, tpc_slm_offset;
1334 tpc_offset = tpc_id * 0x40000;
1335 tpc_eml_offset = tpc_id * 0x200000;
1336 tpc_eml_address = (mmTPC0_EML_CFG_BASE + tpc_eml_offset - CFG_BASE);
1337 tpc_slm_offset = tpc_eml_address + 0x100000;
1340 * Workaround for Bug H2 #2443 :
1341 * "TPC SB is not initialized on chip reset"
1344 val = RREG32(mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset);
1345 if (val & TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_ACTIVE_MASK)
1346 dev_warn(hdev->dev, "TPC%d MBIST ACTIVE is not cleared\n",
1349 WREG32(mmTPC0_CFG_FUNC_MBIST_PAT + tpc_offset, val & 0xFFFFF000);
1351 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_0 + tpc_offset, 0x37FF);
1352 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_1 + tpc_offset, 0x303F);
1353 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_2 + tpc_offset, 0x71FF);
1354 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_3 + tpc_offset, 0x71FF);
1355 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_4 + tpc_offset, 0x70FF);
1356 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_5 + tpc_offset, 0x70FF);
1357 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_6 + tpc_offset, 0x70FF);
1358 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_7 + tpc_offset, 0x70FF);
1359 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_8 + tpc_offset, 0x70FF);
1360 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_9 + tpc_offset, 0x70FF);
1362 WREG32_OR(mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset,
1363 1 << TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_START_SHIFT);
1365 err = hl_poll_timeout(
1367 mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset,
1369 (val & TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_DONE_MASK),
1371 HL_DEVICE_TIMEOUT_USEC);
1375 "Timeout while waiting for TPC%d MBIST DONE\n", tpc_id);
1377 WREG32_OR(mmTPC0_EML_CFG_DBG_CNT + tpc_eml_offset,
1378 1 << TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT);
1380 msleep(GOYA_RESET_WAIT_MSEC);
1382 WREG32_AND(mmTPC0_EML_CFG_DBG_CNT + tpc_eml_offset,
1383 ~(1 << TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT));
1385 msleep(GOYA_RESET_WAIT_MSEC);
1387 for (slm_index = 0 ; slm_index < 256 ; slm_index++)
1388 WREG32(tpc_slm_offset + (slm_index << 2), 0);
1390 val = RREG32(tpc_slm_offset);
1393 static void goya_tpc_mbist_workaround(struct hl_device *hdev)
1395 struct goya_device *goya = hdev->asic_specific;
1401 if (goya->hw_cap_initialized & HW_CAP_TPC_MBIST)
1404 /* Workaround for H2 #2443 */
1406 for (i = 0 ; i < TPC_MAX_NUM ; i++)
1407 _goya_tpc_mbist_workaround(hdev, i);
1409 goya->hw_cap_initialized |= HW_CAP_TPC_MBIST;
1413 * goya_init_golden_registers - Initialize golden registers
1415 * @hdev: pointer to hl_device structure
1417 * Initialize the H/W registers of the device
1420 static void goya_init_golden_registers(struct hl_device *hdev)
1422 struct goya_device *goya = hdev->asic_specific;
1423 u32 polynom[10], tpc_intr_mask, offset;
1426 if (goya->hw_cap_initialized & HW_CAP_GOLDEN)
1429 polynom[0] = 0x00020080;
1430 polynom[1] = 0x00401000;
1431 polynom[2] = 0x00200800;
1432 polynom[3] = 0x00002000;
1433 polynom[4] = 0x00080200;
1434 polynom[5] = 0x00040100;
1435 polynom[6] = 0x00100400;
1436 polynom[7] = 0x00004000;
1437 polynom[8] = 0x00010000;
1438 polynom[9] = 0x00008000;
1440 /* Mask all arithmetic interrupts from TPC */
1441 tpc_intr_mask = 0x7FFF;
1443 for (i = 0, offset = 0 ; i < 6 ; i++, offset += 0x20000) {
1444 WREG32(mmSRAM_Y0_X0_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1445 WREG32(mmSRAM_Y0_X1_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1446 WREG32(mmSRAM_Y0_X2_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1447 WREG32(mmSRAM_Y0_X3_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1448 WREG32(mmSRAM_Y0_X4_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1450 WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_L_ARB + offset, 0x204);
1451 WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_L_ARB + offset, 0x204);
1452 WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_L_ARB + offset, 0x204);
1453 WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_L_ARB + offset, 0x204);
1454 WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_L_ARB + offset, 0x204);
1457 WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_E_ARB + offset, 0x206);
1458 WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_E_ARB + offset, 0x206);
1459 WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_E_ARB + offset, 0x206);
1460 WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_E_ARB + offset, 0x207);
1461 WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_E_ARB + offset, 0x207);
1463 WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_W_ARB + offset, 0x207);
1464 WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_W_ARB + offset, 0x207);
1465 WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_W_ARB + offset, 0x206);
1466 WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_W_ARB + offset, 0x206);
1467 WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_W_ARB + offset, 0x206);
1469 WREG32(mmSRAM_Y0_X0_RTR_HBW_WR_RS_E_ARB + offset, 0x101);
1470 WREG32(mmSRAM_Y0_X1_RTR_HBW_WR_RS_E_ARB + offset, 0x102);
1471 WREG32(mmSRAM_Y0_X2_RTR_HBW_WR_RS_E_ARB + offset, 0x103);
1472 WREG32(mmSRAM_Y0_X3_RTR_HBW_WR_RS_E_ARB + offset, 0x104);
1473 WREG32(mmSRAM_Y0_X4_RTR_HBW_WR_RS_E_ARB + offset, 0x105);
1475 WREG32(mmSRAM_Y0_X0_RTR_HBW_WR_RS_W_ARB + offset, 0x105);
1476 WREG32(mmSRAM_Y0_X1_RTR_HBW_WR_RS_W_ARB + offset, 0x104);
1477 WREG32(mmSRAM_Y0_X2_RTR_HBW_WR_RS_W_ARB + offset, 0x103);
1478 WREG32(mmSRAM_Y0_X3_RTR_HBW_WR_RS_W_ARB + offset, 0x102);
1479 WREG32(mmSRAM_Y0_X4_RTR_HBW_WR_RS_W_ARB + offset, 0x101);
1482 WREG32(mmMME_STORE_MAX_CREDIT, 0x21);
1483 WREG32(mmMME_AGU, 0x0f0f0f10);
1484 WREG32(mmMME_SEI_MASK, ~0x0);
1486 WREG32(mmMME6_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1487 WREG32(mmMME5_RTR_HBW_RD_RQ_N_ARB, 0x01040101);
1488 WREG32(mmMME4_RTR_HBW_RD_RQ_N_ARB, 0x01030101);
1489 WREG32(mmMME3_RTR_HBW_RD_RQ_N_ARB, 0x01020101);
1490 WREG32(mmMME2_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1491 WREG32(mmMME1_RTR_HBW_RD_RQ_N_ARB, 0x07010701);
1492 WREG32(mmMME6_RTR_HBW_RD_RQ_S_ARB, 0x04010401);
1493 WREG32(mmMME5_RTR_HBW_RD_RQ_S_ARB, 0x04050401);
1494 WREG32(mmMME4_RTR_HBW_RD_RQ_S_ARB, 0x03070301);
1495 WREG32(mmMME3_RTR_HBW_RD_RQ_S_ARB, 0x01030101);
1496 WREG32(mmMME2_RTR_HBW_RD_RQ_S_ARB, 0x01040101);
1497 WREG32(mmMME1_RTR_HBW_RD_RQ_S_ARB, 0x01050105);
1498 WREG32(mmMME6_RTR_HBW_RD_RQ_W_ARB, 0x01010501);
1499 WREG32(mmMME5_RTR_HBW_RD_RQ_W_ARB, 0x01010501);
1500 WREG32(mmMME4_RTR_HBW_RD_RQ_W_ARB, 0x01040301);
1501 WREG32(mmMME3_RTR_HBW_RD_RQ_W_ARB, 0x01030401);
1502 WREG32(mmMME2_RTR_HBW_RD_RQ_W_ARB, 0x01040101);
1503 WREG32(mmMME1_RTR_HBW_RD_RQ_W_ARB, 0x01050101);
1504 WREG32(mmMME6_RTR_HBW_WR_RQ_N_ARB, 0x02020202);
1505 WREG32(mmMME5_RTR_HBW_WR_RQ_N_ARB, 0x01070101);
1506 WREG32(mmMME4_RTR_HBW_WR_RQ_N_ARB, 0x02020201);
1507 WREG32(mmMME3_RTR_HBW_WR_RQ_N_ARB, 0x07020701);
1508 WREG32(mmMME2_RTR_HBW_WR_RQ_N_ARB, 0x01020101);
1509 WREG32(mmMME1_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
1510 WREG32(mmMME6_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
1511 WREG32(mmMME5_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
1512 WREG32(mmMME4_RTR_HBW_WR_RQ_S_ARB, 0x07020701);
1513 WREG32(mmMME3_RTR_HBW_WR_RQ_S_ARB, 0x02020201);
1514 WREG32(mmMME2_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
1515 WREG32(mmMME1_RTR_HBW_WR_RQ_S_ARB, 0x01020102);
1516 WREG32(mmMME6_RTR_HBW_WR_RQ_W_ARB, 0x01020701);
1517 WREG32(mmMME5_RTR_HBW_WR_RQ_W_ARB, 0x01020701);
1518 WREG32(mmMME4_RTR_HBW_WR_RQ_W_ARB, 0x07020707);
1519 WREG32(mmMME3_RTR_HBW_WR_RQ_W_ARB, 0x01020201);
1520 WREG32(mmMME2_RTR_HBW_WR_RQ_W_ARB, 0x01070201);
1521 WREG32(mmMME1_RTR_HBW_WR_RQ_W_ARB, 0x01070201);
1522 WREG32(mmMME6_RTR_HBW_RD_RS_N_ARB, 0x01070102);
1523 WREG32(mmMME5_RTR_HBW_RD_RS_N_ARB, 0x01070102);
1524 WREG32(mmMME4_RTR_HBW_RD_RS_N_ARB, 0x01060102);
1525 WREG32(mmMME3_RTR_HBW_RD_RS_N_ARB, 0x01040102);
1526 WREG32(mmMME2_RTR_HBW_RD_RS_N_ARB, 0x01020102);
1527 WREG32(mmMME1_RTR_HBW_RD_RS_N_ARB, 0x01020107);
1528 WREG32(mmMME6_RTR_HBW_RD_RS_S_ARB, 0x01020106);
1529 WREG32(mmMME5_RTR_HBW_RD_RS_S_ARB, 0x01020102);
1530 WREG32(mmMME4_RTR_HBW_RD_RS_S_ARB, 0x01040102);
1531 WREG32(mmMME3_RTR_HBW_RD_RS_S_ARB, 0x01060102);
1532 WREG32(mmMME2_RTR_HBW_RD_RS_S_ARB, 0x01070102);
1533 WREG32(mmMME1_RTR_HBW_RD_RS_S_ARB, 0x01070102);
1534 WREG32(mmMME6_RTR_HBW_RD_RS_E_ARB, 0x01020702);
1535 WREG32(mmMME5_RTR_HBW_RD_RS_E_ARB, 0x01020702);
1536 WREG32(mmMME4_RTR_HBW_RD_RS_E_ARB, 0x01040602);
1537 WREG32(mmMME3_RTR_HBW_RD_RS_E_ARB, 0x01060402);
1538 WREG32(mmMME2_RTR_HBW_RD_RS_E_ARB, 0x01070202);
1539 WREG32(mmMME1_RTR_HBW_RD_RS_E_ARB, 0x01070102);
1540 WREG32(mmMME6_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1541 WREG32(mmMME5_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1542 WREG32(mmMME4_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1543 WREG32(mmMME3_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1544 WREG32(mmMME2_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1545 WREG32(mmMME1_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1546 WREG32(mmMME6_RTR_HBW_WR_RS_N_ARB, 0x01050101);
1547 WREG32(mmMME5_RTR_HBW_WR_RS_N_ARB, 0x01040101);
1548 WREG32(mmMME4_RTR_HBW_WR_RS_N_ARB, 0x01030101);
1549 WREG32(mmMME3_RTR_HBW_WR_RS_N_ARB, 0x01020101);
1550 WREG32(mmMME2_RTR_HBW_WR_RS_N_ARB, 0x01010101);
1551 WREG32(mmMME1_RTR_HBW_WR_RS_N_ARB, 0x01010107);
1552 WREG32(mmMME6_RTR_HBW_WR_RS_S_ARB, 0x01010107);
1553 WREG32(mmMME5_RTR_HBW_WR_RS_S_ARB, 0x01010101);
1554 WREG32(mmMME4_RTR_HBW_WR_RS_S_ARB, 0x01020101);
1555 WREG32(mmMME3_RTR_HBW_WR_RS_S_ARB, 0x01030101);
1556 WREG32(mmMME2_RTR_HBW_WR_RS_S_ARB, 0x01040101);
1557 WREG32(mmMME1_RTR_HBW_WR_RS_S_ARB, 0x01050101);
1558 WREG32(mmMME6_RTR_HBW_WR_RS_E_ARB, 0x01010501);
1559 WREG32(mmMME5_RTR_HBW_WR_RS_E_ARB, 0x01010501);
1560 WREG32(mmMME4_RTR_HBW_WR_RS_E_ARB, 0x01040301);
1561 WREG32(mmMME3_RTR_HBW_WR_RS_E_ARB, 0x01030401);
1562 WREG32(mmMME2_RTR_HBW_WR_RS_E_ARB, 0x01040101);
1563 WREG32(mmMME1_RTR_HBW_WR_RS_E_ARB, 0x01050101);
1564 WREG32(mmMME6_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1565 WREG32(mmMME5_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1566 WREG32(mmMME4_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1567 WREG32(mmMME3_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1568 WREG32(mmMME2_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1569 WREG32(mmMME1_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1571 WREG32(mmTPC1_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1572 WREG32(mmTPC1_RTR_HBW_RD_RQ_S_ARB, 0x01010101);
1573 WREG32(mmTPC1_RTR_HBW_RD_RQ_E_ARB, 0x01060101);
1574 WREG32(mmTPC1_RTR_HBW_WR_RQ_N_ARB, 0x02020102);
1575 WREG32(mmTPC1_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
1576 WREG32(mmTPC1_RTR_HBW_WR_RQ_E_ARB, 0x02070202);
1577 WREG32(mmTPC1_RTR_HBW_RD_RS_N_ARB, 0x01020201);
1578 WREG32(mmTPC1_RTR_HBW_RD_RS_S_ARB, 0x01070201);
1579 WREG32(mmTPC1_RTR_HBW_RD_RS_W_ARB, 0x01070202);
1580 WREG32(mmTPC1_RTR_HBW_WR_RS_N_ARB, 0x01010101);
1581 WREG32(mmTPC1_RTR_HBW_WR_RS_S_ARB, 0x01050101);
1582 WREG32(mmTPC1_RTR_HBW_WR_RS_W_ARB, 0x01050101);
1584 WREG32(mmTPC2_RTR_HBW_RD_RQ_N_ARB, 0x01020101);
1585 WREG32(mmTPC2_RTR_HBW_RD_RQ_S_ARB, 0x01050101);
1586 WREG32(mmTPC2_RTR_HBW_RD_RQ_E_ARB, 0x01010201);
1587 WREG32(mmTPC2_RTR_HBW_WR_RQ_N_ARB, 0x02040102);
1588 WREG32(mmTPC2_RTR_HBW_WR_RQ_S_ARB, 0x01050101);
1589 WREG32(mmTPC2_RTR_HBW_WR_RQ_E_ARB, 0x02060202);
1590 WREG32(mmTPC2_RTR_HBW_RD_RS_N_ARB, 0x01020201);
1591 WREG32(mmTPC2_RTR_HBW_RD_RS_S_ARB, 0x01070201);
1592 WREG32(mmTPC2_RTR_HBW_RD_RS_W_ARB, 0x01070202);
1593 WREG32(mmTPC2_RTR_HBW_WR_RS_N_ARB, 0x01010101);
1594 WREG32(mmTPC2_RTR_HBW_WR_RS_S_ARB, 0x01040101);
1595 WREG32(mmTPC2_RTR_HBW_WR_RS_W_ARB, 0x01040101);
1597 WREG32(mmTPC3_RTR_HBW_RD_RQ_N_ARB, 0x01030101);
1598 WREG32(mmTPC3_RTR_HBW_RD_RQ_S_ARB, 0x01040101);
1599 WREG32(mmTPC3_RTR_HBW_RD_RQ_E_ARB, 0x01040301);
1600 WREG32(mmTPC3_RTR_HBW_WR_RQ_N_ARB, 0x02060102);
1601 WREG32(mmTPC3_RTR_HBW_WR_RQ_S_ARB, 0x01040101);
1602 WREG32(mmTPC3_RTR_HBW_WR_RQ_E_ARB, 0x01040301);
1603 WREG32(mmTPC3_RTR_HBW_RD_RS_N_ARB, 0x01040201);
1604 WREG32(mmTPC3_RTR_HBW_RD_RS_S_ARB, 0x01060201);
1605 WREG32(mmTPC3_RTR_HBW_RD_RS_W_ARB, 0x01060402);
1606 WREG32(mmTPC3_RTR_HBW_WR_RS_N_ARB, 0x01020101);
1607 WREG32(mmTPC3_RTR_HBW_WR_RS_S_ARB, 0x01030101);
1608 WREG32(mmTPC3_RTR_HBW_WR_RS_W_ARB, 0x01030401);
1610 WREG32(mmTPC4_RTR_HBW_RD_RQ_N_ARB, 0x01040101);
1611 WREG32(mmTPC4_RTR_HBW_RD_RQ_S_ARB, 0x01030101);
1612 WREG32(mmTPC4_RTR_HBW_RD_RQ_E_ARB, 0x01030401);
1613 WREG32(mmTPC4_RTR_HBW_WR_RQ_N_ARB, 0x02070102);
1614 WREG32(mmTPC4_RTR_HBW_WR_RQ_S_ARB, 0x01030101);
1615 WREG32(mmTPC4_RTR_HBW_WR_RQ_E_ARB, 0x02060702);
1616 WREG32(mmTPC4_RTR_HBW_RD_RS_N_ARB, 0x01060201);
1617 WREG32(mmTPC4_RTR_HBW_RD_RS_S_ARB, 0x01040201);
1618 WREG32(mmTPC4_RTR_HBW_RD_RS_W_ARB, 0x01040602);
1619 WREG32(mmTPC4_RTR_HBW_WR_RS_N_ARB, 0x01030101);
1620 WREG32(mmTPC4_RTR_HBW_WR_RS_S_ARB, 0x01020101);
1621 WREG32(mmTPC4_RTR_HBW_WR_RS_W_ARB, 0x01040301);
1623 WREG32(mmTPC5_RTR_HBW_RD_RQ_N_ARB, 0x01050101);
1624 WREG32(mmTPC5_RTR_HBW_RD_RQ_S_ARB, 0x01020101);
1625 WREG32(mmTPC5_RTR_HBW_RD_RQ_E_ARB, 0x01200501);
1626 WREG32(mmTPC5_RTR_HBW_WR_RQ_N_ARB, 0x02070102);
1627 WREG32(mmTPC5_RTR_HBW_WR_RQ_S_ARB, 0x01020101);
1628 WREG32(mmTPC5_RTR_HBW_WR_RQ_E_ARB, 0x02020602);
1629 WREG32(mmTPC5_RTR_HBW_RD_RS_N_ARB, 0x01070201);
1630 WREG32(mmTPC5_RTR_HBW_RD_RS_S_ARB, 0x01020201);
1631 WREG32(mmTPC5_RTR_HBW_RD_RS_W_ARB, 0x01020702);
1632 WREG32(mmTPC5_RTR_HBW_WR_RS_N_ARB, 0x01040101);
1633 WREG32(mmTPC5_RTR_HBW_WR_RS_S_ARB, 0x01010101);
1634 WREG32(mmTPC5_RTR_HBW_WR_RS_W_ARB, 0x01010501);
1636 WREG32(mmTPC6_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1637 WREG32(mmTPC6_RTR_HBW_RD_RQ_S_ARB, 0x01010101);
1638 WREG32(mmTPC6_RTR_HBW_RD_RQ_E_ARB, 0x01010601);
1639 WREG32(mmTPC6_RTR_HBW_WR_RQ_N_ARB, 0x01010101);
1640 WREG32(mmTPC6_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
1641 WREG32(mmTPC6_RTR_HBW_WR_RQ_E_ARB, 0x02020702);
1642 WREG32(mmTPC6_RTR_HBW_RD_RS_N_ARB, 0x01010101);
1643 WREG32(mmTPC6_RTR_HBW_RD_RS_S_ARB, 0x01010101);
1644 WREG32(mmTPC6_RTR_HBW_RD_RS_W_ARB, 0x01020702);
1645 WREG32(mmTPC6_RTR_HBW_WR_RS_N_ARB, 0x01050101);
1646 WREG32(mmTPC6_RTR_HBW_WR_RS_S_ARB, 0x01010101);
1647 WREG32(mmTPC6_RTR_HBW_WR_RS_W_ARB, 0x01010501);
1649 for (i = 0, offset = 0 ; i < 10 ; i++, offset += 4) {
1650 WREG32(mmMME1_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1651 WREG32(mmMME2_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1652 WREG32(mmMME3_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1653 WREG32(mmMME4_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1654 WREG32(mmMME5_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1655 WREG32(mmMME6_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1657 WREG32(mmTPC0_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1658 WREG32(mmTPC1_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1659 WREG32(mmTPC2_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1660 WREG32(mmTPC3_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1661 WREG32(mmTPC4_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1662 WREG32(mmTPC5_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1663 WREG32(mmTPC6_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1664 WREG32(mmTPC7_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1666 WREG32(mmPCI_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1667 WREG32(mmDMA_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1670 for (i = 0, offset = 0 ; i < 6 ; i++, offset += 0x40000) {
1671 WREG32(mmMME1_RTR_SCRAMB_EN + offset,
1672 1 << MME1_RTR_SCRAMB_EN_VAL_SHIFT);
1673 WREG32(mmMME1_RTR_NON_LIN_SCRAMB + offset,
1674 1 << MME1_RTR_NON_LIN_SCRAMB_EN_SHIFT);
1677 for (i = 0, offset = 0 ; i < 8 ; i++, offset += 0x40000) {
1679 * Workaround for Bug H2 #2441 :
1680 * "ST.NOP set trace event illegal opcode"
1682 WREG32(mmTPC0_CFG_TPC_INTR_MASK + offset, tpc_intr_mask);
1684 WREG32(mmTPC0_NRTR_SCRAMB_EN + offset,
1685 1 << TPC0_NRTR_SCRAMB_EN_VAL_SHIFT);
1686 WREG32(mmTPC0_NRTR_NON_LIN_SCRAMB + offset,
1687 1 << TPC0_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
1690 WREG32(mmDMA_NRTR_SCRAMB_EN, 1 << DMA_NRTR_SCRAMB_EN_VAL_SHIFT);
1691 WREG32(mmDMA_NRTR_NON_LIN_SCRAMB,
1692 1 << DMA_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
1694 WREG32(mmPCI_NRTR_SCRAMB_EN, 1 << PCI_NRTR_SCRAMB_EN_VAL_SHIFT);
1695 WREG32(mmPCI_NRTR_NON_LIN_SCRAMB,
1696 1 << PCI_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
1699 * Workaround for H2 #HW-23 bug
1700 * Set DMA max outstanding read requests to 240 on DMA CH 1. Set it
1702 * We need to limit only these DMAs because the user can only read
1703 * from Host using DMA CH 1
1705 WREG32(mmDMA_CH_0_CFG0, 0x0fff0010);
1706 WREG32(mmDMA_CH_1_CFG0, 0x0fff00F0);
1708 goya->hw_cap_initialized |= HW_CAP_GOLDEN;
1711 static void goya_init_mme_qman(struct hl_device *hdev)
1713 u32 mtr_base_lo, mtr_base_hi;
1714 u32 so_base_lo, so_base_hi;
1715 u32 gic_base_lo, gic_base_hi;
1718 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1719 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1720 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1721 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1724 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1726 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1728 qman_base_addr = hdev->asic_prop.sram_base_address +
1729 MME_QMAN_BASE_OFFSET;
1731 WREG32(mmMME_QM_PQ_BASE_LO, lower_32_bits(qman_base_addr));
1732 WREG32(mmMME_QM_PQ_BASE_HI, upper_32_bits(qman_base_addr));
1733 WREG32(mmMME_QM_PQ_SIZE, ilog2(MME_QMAN_LENGTH));
1734 WREG32(mmMME_QM_PQ_PI, 0);
1735 WREG32(mmMME_QM_PQ_CI, 0);
1736 WREG32(mmMME_QM_CP_LDMA_SRC_BASE_LO_OFFSET, 0x10C0);
1737 WREG32(mmMME_QM_CP_LDMA_SRC_BASE_HI_OFFSET, 0x10C4);
1738 WREG32(mmMME_QM_CP_LDMA_TSIZE_OFFSET, 0x10C8);
1739 WREG32(mmMME_QM_CP_LDMA_COMMIT_OFFSET, 0x10CC);
1741 WREG32(mmMME_QM_CP_MSG_BASE0_ADDR_LO, mtr_base_lo);
1742 WREG32(mmMME_QM_CP_MSG_BASE0_ADDR_HI, mtr_base_hi);
1743 WREG32(mmMME_QM_CP_MSG_BASE1_ADDR_LO, so_base_lo);
1744 WREG32(mmMME_QM_CP_MSG_BASE1_ADDR_HI, so_base_hi);
1746 /* QMAN CQ has 8 cache lines */
1747 WREG32(mmMME_QM_CQ_CFG1, 0x00080008);
1749 WREG32(mmMME_QM_GLBL_ERR_ADDR_LO, gic_base_lo);
1750 WREG32(mmMME_QM_GLBL_ERR_ADDR_HI, gic_base_hi);
1752 WREG32(mmMME_QM_GLBL_ERR_WDATA, GOYA_ASYNC_EVENT_ID_MME_QM);
1754 WREG32(mmMME_QM_GLBL_ERR_CFG, QMAN_MME_ERR_MSG_EN);
1756 WREG32(mmMME_QM_GLBL_PROT, QMAN_MME_ERR_PROT);
1758 WREG32(mmMME_QM_GLBL_CFG0, QMAN_MME_ENABLE);
1761 static void goya_init_mme_cmdq(struct hl_device *hdev)
1763 u32 mtr_base_lo, mtr_base_hi;
1764 u32 so_base_lo, so_base_hi;
1765 u32 gic_base_lo, gic_base_hi;
1768 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1769 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1770 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1771 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1774 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1776 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1778 qman_base_addr = hdev->asic_prop.sram_base_address +
1779 MME_QMAN_BASE_OFFSET;
1781 WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_LO, mtr_base_lo);
1782 WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_HI, mtr_base_hi);
1783 WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_LO, so_base_lo);
1784 WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_HI, so_base_hi);
1786 /* CMDQ CQ has 20 cache lines */
1787 WREG32(mmMME_CMDQ_CQ_CFG1, 0x00140014);
1789 WREG32(mmMME_CMDQ_GLBL_ERR_ADDR_LO, gic_base_lo);
1790 WREG32(mmMME_CMDQ_GLBL_ERR_ADDR_HI, gic_base_hi);
1792 WREG32(mmMME_CMDQ_GLBL_ERR_WDATA, GOYA_ASYNC_EVENT_ID_MME_CMDQ);
1794 WREG32(mmMME_CMDQ_GLBL_ERR_CFG, CMDQ_MME_ERR_MSG_EN);
1796 WREG32(mmMME_CMDQ_GLBL_PROT, CMDQ_MME_ERR_PROT);
1798 WREG32(mmMME_CMDQ_GLBL_CFG0, CMDQ_MME_ENABLE);
1801 static void goya_init_mme_qmans(struct hl_device *hdev)
1803 struct goya_device *goya = hdev->asic_specific;
1804 u32 so_base_lo, so_base_hi;
1806 if (goya->hw_cap_initialized & HW_CAP_MME)
1809 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1810 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1812 WREG32(mmMME_SM_BASE_ADDRESS_LOW, so_base_lo);
1813 WREG32(mmMME_SM_BASE_ADDRESS_HIGH, so_base_hi);
1815 goya_init_mme_qman(hdev);
1816 goya_init_mme_cmdq(hdev);
1818 goya->hw_cap_initialized |= HW_CAP_MME;
1821 static void goya_init_tpc_qman(struct hl_device *hdev, u32 base_off, int tpc_id)
1823 u32 mtr_base_lo, mtr_base_hi;
1824 u32 so_base_lo, so_base_hi;
1825 u32 gic_base_lo, gic_base_hi;
1827 u32 reg_off = tpc_id * (mmTPC1_QM_PQ_PI - mmTPC0_QM_PQ_PI);
1829 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1830 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1831 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1832 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1835 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1837 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1839 qman_base_addr = hdev->asic_prop.sram_base_address + base_off;
1841 WREG32(mmTPC0_QM_PQ_BASE_LO + reg_off, lower_32_bits(qman_base_addr));
1842 WREG32(mmTPC0_QM_PQ_BASE_HI + reg_off, upper_32_bits(qman_base_addr));
1843 WREG32(mmTPC0_QM_PQ_SIZE + reg_off, ilog2(TPC_QMAN_LENGTH));
1844 WREG32(mmTPC0_QM_PQ_PI + reg_off, 0);
1845 WREG32(mmTPC0_QM_PQ_CI + reg_off, 0);
1846 WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET + reg_off, 0x10C0);
1847 WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET + reg_off, 0x10C4);
1848 WREG32(mmTPC0_QM_CP_LDMA_TSIZE_OFFSET + reg_off, 0x10C8);
1849 WREG32(mmTPC0_QM_CP_LDMA_COMMIT_OFFSET + reg_off, 0x10CC);
1851 WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
1852 WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
1853 WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
1854 WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
1856 WREG32(mmTPC0_QM_CQ_CFG1 + reg_off, 0x00080008);
1858 WREG32(mmTPC0_QM_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
1859 WREG32(mmTPC0_QM_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
1861 WREG32(mmTPC0_QM_GLBL_ERR_WDATA + reg_off,
1862 GOYA_ASYNC_EVENT_ID_TPC0_QM + tpc_id);
1864 WREG32(mmTPC0_QM_GLBL_ERR_CFG + reg_off, QMAN_TPC_ERR_MSG_EN);
1866 WREG32(mmTPC0_QM_GLBL_PROT + reg_off, QMAN_TPC_ERR_PROT);
1868 WREG32(mmTPC0_QM_GLBL_CFG0 + reg_off, QMAN_TPC_ENABLE);
1871 static void goya_init_tpc_cmdq(struct hl_device *hdev, int tpc_id)
1873 u32 mtr_base_lo, mtr_base_hi;
1874 u32 so_base_lo, so_base_hi;
1875 u32 gic_base_lo, gic_base_hi;
1876 u32 reg_off = tpc_id * (mmTPC1_CMDQ_CQ_CFG1 - mmTPC0_CMDQ_CQ_CFG1);
1878 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1879 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1880 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1881 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1884 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1886 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1888 WREG32(mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
1889 WREG32(mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
1890 WREG32(mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
1891 WREG32(mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
1893 WREG32(mmTPC0_CMDQ_CQ_CFG1 + reg_off, 0x00140014);
1895 WREG32(mmTPC0_CMDQ_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
1896 WREG32(mmTPC0_CMDQ_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
1898 WREG32(mmTPC0_CMDQ_GLBL_ERR_WDATA + reg_off,
1899 GOYA_ASYNC_EVENT_ID_TPC0_CMDQ + tpc_id);
1901 WREG32(mmTPC0_CMDQ_GLBL_ERR_CFG + reg_off, CMDQ_TPC_ERR_MSG_EN);
1903 WREG32(mmTPC0_CMDQ_GLBL_PROT + reg_off, CMDQ_TPC_ERR_PROT);
1905 WREG32(mmTPC0_CMDQ_GLBL_CFG0 + reg_off, CMDQ_TPC_ENABLE);
1908 static void goya_init_tpc_qmans(struct hl_device *hdev)
1910 struct goya_device *goya = hdev->asic_specific;
1911 u32 so_base_lo, so_base_hi;
1912 u32 cfg_off = mmTPC1_CFG_SM_BASE_ADDRESS_LOW -
1913 mmTPC0_CFG_SM_BASE_ADDRESS_LOW;
1916 if (goya->hw_cap_initialized & HW_CAP_TPC)
1919 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1920 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1922 for (i = 0 ; i < TPC_MAX_NUM ; i++) {
1923 WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_LOW + i * cfg_off,
1925 WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_HIGH + i * cfg_off,
1929 goya_init_tpc_qman(hdev, TPC0_QMAN_BASE_OFFSET, 0);
1930 goya_init_tpc_qman(hdev, TPC1_QMAN_BASE_OFFSET, 1);
1931 goya_init_tpc_qman(hdev, TPC2_QMAN_BASE_OFFSET, 2);
1932 goya_init_tpc_qman(hdev, TPC3_QMAN_BASE_OFFSET, 3);
1933 goya_init_tpc_qman(hdev, TPC4_QMAN_BASE_OFFSET, 4);
1934 goya_init_tpc_qman(hdev, TPC5_QMAN_BASE_OFFSET, 5);
1935 goya_init_tpc_qman(hdev, TPC6_QMAN_BASE_OFFSET, 6);
1936 goya_init_tpc_qman(hdev, TPC7_QMAN_BASE_OFFSET, 7);
1938 for (i = 0 ; i < TPC_MAX_NUM ; i++)
1939 goya_init_tpc_cmdq(hdev, i);
1941 goya->hw_cap_initialized |= HW_CAP_TPC;
1945 * goya_disable_internal_queues - Disable internal queues
1947 * @hdev: pointer to hl_device structure
1950 static void goya_disable_internal_queues(struct hl_device *hdev)
1952 WREG32(mmMME_QM_GLBL_CFG0, 0);
1953 WREG32(mmMME_CMDQ_GLBL_CFG0, 0);
1955 WREG32(mmTPC0_QM_GLBL_CFG0, 0);
1956 WREG32(mmTPC0_CMDQ_GLBL_CFG0, 0);
1958 WREG32(mmTPC1_QM_GLBL_CFG0, 0);
1959 WREG32(mmTPC1_CMDQ_GLBL_CFG0, 0);
1961 WREG32(mmTPC2_QM_GLBL_CFG0, 0);
1962 WREG32(mmTPC2_CMDQ_GLBL_CFG0, 0);
1964 WREG32(mmTPC3_QM_GLBL_CFG0, 0);
1965 WREG32(mmTPC3_CMDQ_GLBL_CFG0, 0);
1967 WREG32(mmTPC4_QM_GLBL_CFG0, 0);
1968 WREG32(mmTPC4_CMDQ_GLBL_CFG0, 0);
1970 WREG32(mmTPC5_QM_GLBL_CFG0, 0);
1971 WREG32(mmTPC5_CMDQ_GLBL_CFG0, 0);
1973 WREG32(mmTPC6_QM_GLBL_CFG0, 0);
1974 WREG32(mmTPC6_CMDQ_GLBL_CFG0, 0);
1976 WREG32(mmTPC7_QM_GLBL_CFG0, 0);
1977 WREG32(mmTPC7_CMDQ_GLBL_CFG0, 0);
1981 * goya_stop_internal_queues - Stop internal queues
1983 * @hdev: pointer to hl_device structure
1985 * Returns 0 on success
1988 static int goya_stop_internal_queues(struct hl_device *hdev)
1993 * Each queue (QMAN) is a separate H/W logic. That means that each
1994 * QMAN can be stopped independently and failure to stop one does NOT
1995 * mandate we should not try to stop other QMANs
1998 rc = goya_stop_queue(hdev,
2001 mmMME_QM_GLBL_STS0);
2004 dev_err(hdev->dev, "failed to stop MME QMAN\n");
2008 rc = goya_stop_queue(hdev,
2009 mmMME_CMDQ_GLBL_CFG1,
2011 mmMME_CMDQ_GLBL_STS0);
2014 dev_err(hdev->dev, "failed to stop MME CMDQ\n");
2018 rc = goya_stop_queue(hdev,
2019 mmTPC0_QM_GLBL_CFG1,
2021 mmTPC0_QM_GLBL_STS0);
2024 dev_err(hdev->dev, "failed to stop TPC 0 QMAN\n");
2028 rc = goya_stop_queue(hdev,
2029 mmTPC0_CMDQ_GLBL_CFG1,
2031 mmTPC0_CMDQ_GLBL_STS0);
2034 dev_err(hdev->dev, "failed to stop TPC 0 CMDQ\n");
2038 rc = goya_stop_queue(hdev,
2039 mmTPC1_QM_GLBL_CFG1,
2041 mmTPC1_QM_GLBL_STS0);
2044 dev_err(hdev->dev, "failed to stop TPC 1 QMAN\n");
2048 rc = goya_stop_queue(hdev,
2049 mmTPC1_CMDQ_GLBL_CFG1,
2051 mmTPC1_CMDQ_GLBL_STS0);
2054 dev_err(hdev->dev, "failed to stop TPC 1 CMDQ\n");
2058 rc = goya_stop_queue(hdev,
2059 mmTPC2_QM_GLBL_CFG1,
2061 mmTPC2_QM_GLBL_STS0);
2064 dev_err(hdev->dev, "failed to stop TPC 2 QMAN\n");
2068 rc = goya_stop_queue(hdev,
2069 mmTPC2_CMDQ_GLBL_CFG1,
2071 mmTPC2_CMDQ_GLBL_STS0);
2074 dev_err(hdev->dev, "failed to stop TPC 2 CMDQ\n");
2078 rc = goya_stop_queue(hdev,
2079 mmTPC3_QM_GLBL_CFG1,
2081 mmTPC3_QM_GLBL_STS0);
2084 dev_err(hdev->dev, "failed to stop TPC 3 QMAN\n");
2088 rc = goya_stop_queue(hdev,
2089 mmTPC3_CMDQ_GLBL_CFG1,
2091 mmTPC3_CMDQ_GLBL_STS0);
2094 dev_err(hdev->dev, "failed to stop TPC 3 CMDQ\n");
2098 rc = goya_stop_queue(hdev,
2099 mmTPC4_QM_GLBL_CFG1,
2101 mmTPC4_QM_GLBL_STS0);
2104 dev_err(hdev->dev, "failed to stop TPC 4 QMAN\n");
2108 rc = goya_stop_queue(hdev,
2109 mmTPC4_CMDQ_GLBL_CFG1,
2111 mmTPC4_CMDQ_GLBL_STS0);
2114 dev_err(hdev->dev, "failed to stop TPC 4 CMDQ\n");
2118 rc = goya_stop_queue(hdev,
2119 mmTPC5_QM_GLBL_CFG1,
2121 mmTPC5_QM_GLBL_STS0);
2124 dev_err(hdev->dev, "failed to stop TPC 5 QMAN\n");
2128 rc = goya_stop_queue(hdev,
2129 mmTPC5_CMDQ_GLBL_CFG1,
2131 mmTPC5_CMDQ_GLBL_STS0);
2134 dev_err(hdev->dev, "failed to stop TPC 5 CMDQ\n");
2138 rc = goya_stop_queue(hdev,
2139 mmTPC6_QM_GLBL_CFG1,
2141 mmTPC6_QM_GLBL_STS0);
2144 dev_err(hdev->dev, "failed to stop TPC 6 QMAN\n");
2148 rc = goya_stop_queue(hdev,
2149 mmTPC6_CMDQ_GLBL_CFG1,
2151 mmTPC6_CMDQ_GLBL_STS0);
2154 dev_err(hdev->dev, "failed to stop TPC 6 CMDQ\n");
2158 rc = goya_stop_queue(hdev,
2159 mmTPC7_QM_GLBL_CFG1,
2161 mmTPC7_QM_GLBL_STS0);
2164 dev_err(hdev->dev, "failed to stop TPC 7 QMAN\n");
2168 rc = goya_stop_queue(hdev,
2169 mmTPC7_CMDQ_GLBL_CFG1,
2171 mmTPC7_CMDQ_GLBL_STS0);
2174 dev_err(hdev->dev, "failed to stop TPC 7 CMDQ\n");
2181 static void goya_resume_internal_queues(struct hl_device *hdev)
2183 WREG32(mmMME_QM_GLBL_CFG1, 0);
2184 WREG32(mmMME_CMDQ_GLBL_CFG1, 0);
2186 WREG32(mmTPC0_QM_GLBL_CFG1, 0);
2187 WREG32(mmTPC0_CMDQ_GLBL_CFG1, 0);
2189 WREG32(mmTPC1_QM_GLBL_CFG1, 0);
2190 WREG32(mmTPC1_CMDQ_GLBL_CFG1, 0);
2192 WREG32(mmTPC2_QM_GLBL_CFG1, 0);
2193 WREG32(mmTPC2_CMDQ_GLBL_CFG1, 0);
2195 WREG32(mmTPC3_QM_GLBL_CFG1, 0);
2196 WREG32(mmTPC3_CMDQ_GLBL_CFG1, 0);
2198 WREG32(mmTPC4_QM_GLBL_CFG1, 0);
2199 WREG32(mmTPC4_CMDQ_GLBL_CFG1, 0);
2201 WREG32(mmTPC5_QM_GLBL_CFG1, 0);
2202 WREG32(mmTPC5_CMDQ_GLBL_CFG1, 0);
2204 WREG32(mmTPC6_QM_GLBL_CFG1, 0);
2205 WREG32(mmTPC6_CMDQ_GLBL_CFG1, 0);
2207 WREG32(mmTPC7_QM_GLBL_CFG1, 0);
2208 WREG32(mmTPC7_CMDQ_GLBL_CFG1, 0);
2211 static void goya_dma_stall(struct hl_device *hdev)
2213 WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT);
2214 WREG32(mmDMA_QM_1_GLBL_CFG1, 1 << DMA_QM_1_GLBL_CFG1_DMA_STOP_SHIFT);
2215 WREG32(mmDMA_QM_2_GLBL_CFG1, 1 << DMA_QM_2_GLBL_CFG1_DMA_STOP_SHIFT);
2216 WREG32(mmDMA_QM_3_GLBL_CFG1, 1 << DMA_QM_3_GLBL_CFG1_DMA_STOP_SHIFT);
2217 WREG32(mmDMA_QM_4_GLBL_CFG1, 1 << DMA_QM_4_GLBL_CFG1_DMA_STOP_SHIFT);
2220 static void goya_tpc_stall(struct hl_device *hdev)
2222 WREG32(mmTPC0_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
2223 WREG32(mmTPC1_CFG_TPC_STALL, 1 << TPC1_CFG_TPC_STALL_V_SHIFT);
2224 WREG32(mmTPC2_CFG_TPC_STALL, 1 << TPC2_CFG_TPC_STALL_V_SHIFT);
2225 WREG32(mmTPC3_CFG_TPC_STALL, 1 << TPC3_CFG_TPC_STALL_V_SHIFT);
2226 WREG32(mmTPC4_CFG_TPC_STALL, 1 << TPC4_CFG_TPC_STALL_V_SHIFT);
2227 WREG32(mmTPC5_CFG_TPC_STALL, 1 << TPC5_CFG_TPC_STALL_V_SHIFT);
2228 WREG32(mmTPC6_CFG_TPC_STALL, 1 << TPC6_CFG_TPC_STALL_V_SHIFT);
2229 WREG32(mmTPC7_CFG_TPC_STALL, 1 << TPC7_CFG_TPC_STALL_V_SHIFT);
2232 static void goya_mme_stall(struct hl_device *hdev)
2234 WREG32(mmMME_STALL, 0xFFFFFFFF);
2237 static int goya_enable_msix(struct hl_device *hdev)
2239 struct goya_device *goya = hdev->asic_specific;
2240 int cq_cnt = hdev->asic_prop.completion_queues_count;
2241 int rc, i, irq_cnt_init, irq;
2243 if (goya->hw_cap_initialized & HW_CAP_MSIX)
2246 rc = pci_alloc_irq_vectors(hdev->pdev, GOYA_MSIX_ENTRIES,
2247 GOYA_MSIX_ENTRIES, PCI_IRQ_MSIX);
2250 "MSI-X: Failed to enable support -- %d/%d\n",
2251 GOYA_MSIX_ENTRIES, rc);
2255 for (i = 0, irq_cnt_init = 0 ; i < cq_cnt ; i++, irq_cnt_init++) {
2256 irq = pci_irq_vector(hdev->pdev, i);
2257 rc = request_irq(irq, hl_irq_handler_cq, 0, goya_irq_name[i],
2258 &hdev->completion_queue[i]);
2260 dev_err(hdev->dev, "Failed to request IRQ %d", irq);
2265 irq = pci_irq_vector(hdev->pdev, EVENT_QUEUE_MSIX_IDX);
2267 rc = request_irq(irq, hl_irq_handler_eq, 0,
2268 goya_irq_name[EVENT_QUEUE_MSIX_IDX],
2269 &hdev->event_queue);
2271 dev_err(hdev->dev, "Failed to request IRQ %d", irq);
2275 goya->hw_cap_initialized |= HW_CAP_MSIX;
2279 for (i = 0 ; i < irq_cnt_init ; i++)
2280 free_irq(pci_irq_vector(hdev->pdev, i),
2281 &hdev->completion_queue[i]);
2283 pci_free_irq_vectors(hdev->pdev);
2287 static void goya_sync_irqs(struct hl_device *hdev)
2289 struct goya_device *goya = hdev->asic_specific;
2292 if (!(goya->hw_cap_initialized & HW_CAP_MSIX))
2295 /* Wait for all pending IRQs to be finished */
2296 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
2297 synchronize_irq(pci_irq_vector(hdev->pdev, i));
2299 synchronize_irq(pci_irq_vector(hdev->pdev, EVENT_QUEUE_MSIX_IDX));
2302 static void goya_disable_msix(struct hl_device *hdev)
2304 struct goya_device *goya = hdev->asic_specific;
2307 if (!(goya->hw_cap_initialized & HW_CAP_MSIX))
2310 goya_sync_irqs(hdev);
2312 irq = pci_irq_vector(hdev->pdev, EVENT_QUEUE_MSIX_IDX);
2313 free_irq(irq, &hdev->event_queue);
2315 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
2316 irq = pci_irq_vector(hdev->pdev, i);
2317 free_irq(irq, &hdev->completion_queue[i]);
2320 pci_free_irq_vectors(hdev->pdev);
2322 goya->hw_cap_initialized &= ~HW_CAP_MSIX;
2325 static void goya_halt_engines(struct hl_device *hdev, bool hard_reset)
2327 u32 wait_timeout_ms, cpu_timeout_ms;
2330 "Halting compute engines and disabling interrupts\n");
2333 wait_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
2334 cpu_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
2336 wait_timeout_ms = GOYA_RESET_WAIT_MSEC;
2337 cpu_timeout_ms = GOYA_CPU_RESET_WAIT_MSEC;
2342 * I don't know what is the state of the CPU so make sure it is
2343 * stopped in any means necessary
2345 WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_GOTO_WFE);
2346 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
2347 GOYA_ASYNC_EVENT_ID_HALT_MACHINE);
2348 msleep(cpu_timeout_ms);
2351 goya_stop_external_queues(hdev);
2352 goya_stop_internal_queues(hdev);
2354 msleep(wait_timeout_ms);
2356 goya_dma_stall(hdev);
2357 goya_tpc_stall(hdev);
2358 goya_mme_stall(hdev);
2360 msleep(wait_timeout_ms);
2362 goya_disable_external_queues(hdev);
2363 goya_disable_internal_queues(hdev);
2366 goya_disable_msix(hdev);
2368 goya_sync_irqs(hdev);
2372 * goya_push_fw_to_device - Push FW code to device
2374 * @hdev: pointer to hl_device structure
2376 * Copy fw code from firmware file to device memory.
2377 * Returns 0 on success
2380 static int goya_push_fw_to_device(struct hl_device *hdev, const char *fw_name,
2383 const struct firmware *fw;
2388 rc = request_firmware(&fw, fw_name, hdev->dev);
2391 dev_err(hdev->dev, "Failed to request %s\n", fw_name);
2396 if ((fw_size % 4) != 0) {
2397 dev_err(hdev->dev, "illegal %s firmware size %zu\n",
2403 dev_dbg(hdev->dev, "%s firmware size == %zu\n", fw_name, fw_size);
2405 fw_data = (const u64 *) fw->data;
2407 if ((fw->size % 8) != 0)
2410 for (i = 0 ; i < fw_size ; i += 8, fw_data++, dst += 8) {
2411 if (!(i & (0x80000 - 1))) {
2413 "copied so far %zu out of %zu for %s firmware",
2414 i, fw_size, fw_name);
2415 usleep_range(20, 100);
2418 writeq(*fw_data, dst);
2421 if ((fw->size % 8) != 0)
2422 writel(*(const u32 *) fw_data, dst);
2425 release_firmware(fw);
2429 static int goya_pldm_init_cpu(struct hl_device *hdev)
2433 u32 val, unit_rst_val;
2436 /* Must initialize SRAM scrambler before pushing u-boot to SRAM */
2437 goya_init_golden_registers(hdev);
2439 /* Put ARM cores into reset */
2440 WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL, CPU_RESET_ASSERT);
2441 val = RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL);
2443 /* Reset the CA53 MACRO */
2444 unit_rst_val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
2445 WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, CA53_RESET);
2446 val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
2447 WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, unit_rst_val);
2448 val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
2450 snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-u-boot.bin");
2451 dst = hdev->pcie_bar[SRAM_CFG_BAR_ID] + UBOOT_FW_OFFSET;
2452 rc = goya_push_fw_to_device(hdev, fw_name, dst);
2456 snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-fit.itb");
2457 dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET;
2458 rc = goya_push_fw_to_device(hdev, fw_name, dst);
2462 WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_FIT_RDY);
2463 WREG32(mmPSOC_GLOBAL_CONF_WARM_REBOOT, CPU_BOOT_STATUS_NA);
2465 WREG32(mmCPU_CA53_CFG_RST_ADDR_LSB_0,
2466 lower_32_bits(SRAM_BASE_ADDR + UBOOT_FW_OFFSET));
2467 WREG32(mmCPU_CA53_CFG_RST_ADDR_MSB_0,
2468 upper_32_bits(SRAM_BASE_ADDR + UBOOT_FW_OFFSET));
2470 /* Release ARM core 0 from reset */
2471 WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL,
2472 CPU_RESET_CORE0_DEASSERT);
2473 val = RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL);
2479 * FW component passes an offset from SRAM_BASE_ADDR in SCRATCHPAD_xx.
2480 * The version string should be located by that offset.
2482 static void goya_read_device_fw_version(struct hl_device *hdev,
2483 enum goya_fw_component fwc)
2491 ver_off = RREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_29);
2492 dest = hdev->asic_prop.uboot_ver;
2495 case FW_COMP_PREBOOT:
2496 ver_off = RREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_28);
2497 dest = hdev->asic_prop.preboot_ver;
2501 dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc);
2505 ver_off &= ~((u32)SRAM_BASE_ADDR);
2507 if (ver_off < SRAM_SIZE - VERSION_MAX_LEN) {
2508 memcpy_fromio(dest, hdev->pcie_bar[SRAM_CFG_BAR_ID] + ver_off,
2511 dev_err(hdev->dev, "%s version offset (0x%x) is above SRAM\n",
2513 strcpy(dest, "unavailable");
2517 static int goya_init_cpu(struct hl_device *hdev, u32 cpu_timeout)
2519 struct goya_device *goya = hdev->asic_specific;
2525 if (!hdev->cpu_enable)
2528 if (goya->hw_cap_initialized & HW_CAP_CPU)
2532 * Before pushing u-boot/linux to device, need to set the ddr bar to
2533 * base address of dram
2535 rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE);
2538 "failed to map DDR bar to DRAM base address\n");
2543 rc = goya_pldm_init_cpu(hdev);
2550 /* Make sure CPU boot-loader is running */
2551 rc = hl_poll_timeout(
2553 mmPSOC_GLOBAL_CONF_WARM_REBOOT,
2555 (status == CPU_BOOT_STATUS_DRAM_RDY) ||
2556 (status == CPU_BOOT_STATUS_SRAM_AVAIL),
2561 dev_err(hdev->dev, "Error in ARM u-boot!");
2563 case CPU_BOOT_STATUS_NA:
2565 "ARM status %d - BTL did NOT run\n", status);
2567 case CPU_BOOT_STATUS_IN_WFE:
2569 "ARM status %d - Inside WFE loop\n", status);
2571 case CPU_BOOT_STATUS_IN_BTL:
2573 "ARM status %d - Stuck in BTL\n", status);
2575 case CPU_BOOT_STATUS_IN_PREBOOT:
2577 "ARM status %d - Stuck in Preboot\n", status);
2579 case CPU_BOOT_STATUS_IN_SPL:
2581 "ARM status %d - Stuck in SPL\n", status);
2583 case CPU_BOOT_STATUS_IN_UBOOT:
2585 "ARM status %d - Stuck in u-boot\n", status);
2587 case CPU_BOOT_STATUS_DRAM_INIT_FAIL:
2589 "ARM status %d - DDR initialization failed\n",
2594 "ARM status %d - Invalid status code\n",
2601 /* Read U-Boot version now in case we will later fail */
2602 goya_read_device_fw_version(hdev, FW_COMP_UBOOT);
2603 goya_read_device_fw_version(hdev, FW_COMP_PREBOOT);
2605 if (status == CPU_BOOT_STATUS_SRAM_AVAIL)
2608 if (!hdev->fw_loading) {
2609 dev_info(hdev->dev, "Skip loading FW\n");
2613 snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-fit.itb");
2614 dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET;
2615 rc = goya_push_fw_to_device(hdev, fw_name, dst);
2619 WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_FIT_RDY);
2621 rc = hl_poll_timeout(
2623 mmPSOC_GLOBAL_CONF_WARM_REBOOT,
2625 (status == CPU_BOOT_STATUS_SRAM_AVAIL),
2630 if (status == CPU_BOOT_STATUS_FIT_CORRUPTED)
2632 "ARM u-boot reports FIT image is corrupted\n");
2635 "ARM Linux failed to load, %d\n", status);
2636 WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_NA);
2640 dev_info(hdev->dev, "Successfully loaded firmware to device\n");
2643 goya->hw_cap_initialized |= HW_CAP_CPU;
2648 static int goya_mmu_init(struct hl_device *hdev)
2650 struct asic_fixed_properties *prop = &hdev->asic_prop;
2651 struct goya_device *goya = hdev->asic_specific;
2655 if (!hdev->mmu_enable)
2658 if (goya->hw_cap_initialized & HW_CAP_MMU)
2661 hdev->dram_supports_virtual_memory = true;
2662 hdev->dram_default_page_mapping = true;
2664 for (i = 0 ; i < prop->max_asid ; i++) {
2665 hop0_addr = prop->mmu_pgt_addr +
2666 (i * prop->mmu_hop_table_size);
2668 rc = goya_mmu_update_asid_hop0_addr(hdev, i, hop0_addr);
2671 "failed to set hop0 addr for asid %d\n", i);
2676 goya->hw_cap_initialized |= HW_CAP_MMU;
2678 /* init MMU cache manage page */
2679 WREG32(mmSTLB_CACHE_INV_BASE_39_8,
2680 lower_32_bits(MMU_CACHE_MNG_ADDR >> 8));
2681 WREG32(mmSTLB_CACHE_INV_BASE_49_40, MMU_CACHE_MNG_ADDR >> 40);
2683 /* Remove follower feature due to performance bug */
2684 WREG32_AND(mmSTLB_STLB_FEATURE_EN,
2685 (~STLB_STLB_FEATURE_EN_FOLLOWER_EN_MASK));
2687 hdev->asic_funcs->mmu_invalidate_cache(hdev, true);
2689 WREG32(mmMMU_MMU_ENABLE, 1);
2690 WREG32(mmMMU_SPI_MASK, 0xF);
2699 * goya_hw_init - Goya hardware initialization code
2701 * @hdev: pointer to hl_device structure
2703 * Returns 0 on success
2706 static int goya_hw_init(struct hl_device *hdev)
2708 struct asic_fixed_properties *prop = &hdev->asic_prop;
2712 dev_info(hdev->dev, "Starting initialization of H/W\n");
2714 /* Perform read from the device to make sure device is up */
2715 val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
2718 * Let's mark in the H/W that we have reached this point. We check
2719 * this value in the reset_before_init function to understand whether
2720 * we need to reset the chip before doing H/W init. This register is
2721 * cleared by the H/W upon H/W reset
2723 WREG32(mmPSOC_GLOBAL_CONF_APP_STATUS, HL_DEVICE_HW_STATE_DIRTY);
2725 rc = goya_init_cpu(hdev, GOYA_CPU_TIMEOUT_USEC);
2727 dev_err(hdev->dev, "failed to initialize CPU\n");
2731 goya_tpc_mbist_workaround(hdev);
2733 goya_init_golden_registers(hdev);
2736 * After CPU initialization is finished, change DDR bar mapping inside
2737 * iATU to point to the start address of the MMU page tables
2739 rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE +
2740 (MMU_PAGE_TABLES_ADDR & ~(prop->dram_pci_bar_size - 0x1ull)));
2743 "failed to map DDR bar to MMU page tables\n");
2747 rc = goya_mmu_init(hdev);
2751 goya_init_security(hdev);
2753 goya_init_dma_qmans(hdev);
2755 goya_init_mme_qmans(hdev);
2757 goya_init_tpc_qmans(hdev);
2759 /* MSI-X must be enabled before CPU queues are initialized */
2760 rc = goya_enable_msix(hdev);
2762 goto disable_queues;
2764 rc = goya_init_cpu_queues(hdev);
2766 dev_err(hdev->dev, "failed to initialize CPU H/W queues %d\n",
2771 /* CPU initialization is finished, we can now move to 48 bit DMA mask */
2772 rc = pci_set_dma_mask(hdev->pdev, DMA_BIT_MASK(48));
2774 dev_warn(hdev->dev, "Unable to set pci dma mask to 48 bits\n");
2775 rc = pci_set_dma_mask(hdev->pdev, DMA_BIT_MASK(32));
2778 "Unable to set pci dma mask to 32 bits\n");
2779 goto disable_pci_access;
2783 rc = pci_set_consistent_dma_mask(hdev->pdev, DMA_BIT_MASK(48));
2786 "Unable to set pci consistent dma mask to 48 bits\n");
2787 rc = pci_set_consistent_dma_mask(hdev->pdev, DMA_BIT_MASK(32));
2790 "Unable to set pci consistent dma mask to 32 bits\n");
2791 goto disable_pci_access;
2795 /* Perform read from the device to flush all MSI-X configuration */
2796 val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
2801 goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
2803 goya_disable_msix(hdev);
2805 goya_disable_internal_queues(hdev);
2806 goya_disable_external_queues(hdev);
2812 * goya_hw_fini - Goya hardware tear-down code
2814 * @hdev: pointer to hl_device structure
2815 * @hard_reset: should we do hard reset to all engines or just reset the
2816 * compute/dma engines
2818 static void goya_hw_fini(struct hl_device *hdev, bool hard_reset)
2820 struct goya_device *goya = hdev->asic_specific;
2821 u32 reset_timeout_ms, status;
2824 reset_timeout_ms = GOYA_PLDM_RESET_TIMEOUT_MSEC;
2826 reset_timeout_ms = GOYA_RESET_TIMEOUT_MSEC;
2829 goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE);
2830 goya_disable_clk_rlx(hdev);
2831 goya_set_pll_refclk(hdev);
2833 WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, RESET_ALL);
2835 "Issued HARD reset command, going to wait %dms\n",
2838 WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, DMA_MME_TPC_RESET);
2840 "Issued SOFT reset command, going to wait %dms\n",
2845 * After hard reset, we can't poll the BTM_FSM register because the PSOC
2846 * itself is in reset. In either reset we need to wait until the reset
2849 msleep(reset_timeout_ms);
2851 status = RREG32(mmPSOC_GLOBAL_CONF_BTM_FSM);
2852 if (status & PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK)
2854 "Timeout while waiting for device to reset 0x%x\n",
2858 goya->hw_cap_initialized &= ~(HW_CAP_DMA | HW_CAP_MME |
2859 HW_CAP_GOLDEN | HW_CAP_TPC);
2860 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
2861 GOYA_ASYNC_EVENT_ID_SOFT_RESET);
2865 /* Chicken bit to re-initiate boot sequencer flow */
2866 WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START,
2867 1 << PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_IND_SHIFT);
2868 /* Move boot manager FSM to pre boot sequencer init state */
2869 WREG32(mmPSOC_GLOBAL_CONF_SW_BTM_FSM,
2870 0xA << PSOC_GLOBAL_CONF_SW_BTM_FSM_CTRL_SHIFT);
2872 goya->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q |
2873 HW_CAP_DDR_0 | HW_CAP_DDR_1 |
2874 HW_CAP_DMA | HW_CAP_MME |
2875 HW_CAP_MMU | HW_CAP_TPC_MBIST |
2876 HW_CAP_GOLDEN | HW_CAP_TPC);
2877 memset(goya->events_stat, 0, sizeof(goya->events_stat));
2881 /* In case we are running inside VM and the VM is
2882 * shutting down, we need to make sure CPU boot-loader
2883 * is running before we can continue the VM shutdown.
2884 * That is because the VM will send an FLR signal that
2888 "Going to wait up to %ds for CPU boot loader\n",
2889 GOYA_CPU_TIMEOUT_USEC / 1000 / 1000);
2891 rc = hl_poll_timeout(
2893 mmPSOC_GLOBAL_CONF_WARM_REBOOT,
2895 (status == CPU_BOOT_STATUS_DRAM_RDY),
2897 GOYA_CPU_TIMEOUT_USEC);
2900 "failed to wait for CPU boot loader\n");
2904 int goya_suspend(struct hl_device *hdev)
2908 rc = goya_stop_internal_queues(hdev);
2911 dev_err(hdev->dev, "failed to stop internal queues\n");
2915 rc = goya_stop_external_queues(hdev);
2918 dev_err(hdev->dev, "failed to stop external queues\n");
2922 rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
2924 dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
2929 int goya_resume(struct hl_device *hdev)
2933 goya_resume_external_queues(hdev);
2934 goya_resume_internal_queues(hdev);
2936 rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS);
2938 dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
2942 static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
2943 u64 kaddress, phys_addr_t paddress, u32 size)
2947 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
2948 VM_DONTCOPY | VM_NORESERVE;
2950 rc = remap_pfn_range(vma, vma->vm_start, paddress >> PAGE_SHIFT,
2951 size, vma->vm_page_prot);
2953 dev_err(hdev->dev, "remap_pfn_range error %d", rc);
2958 static void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
2960 u32 db_reg_offset, db_value;
2961 bool invalid_queue = false;
2963 switch (hw_queue_id) {
2964 case GOYA_QUEUE_ID_DMA_0:
2965 db_reg_offset = mmDMA_QM_0_PQ_PI;
2968 case GOYA_QUEUE_ID_DMA_1:
2969 db_reg_offset = mmDMA_QM_1_PQ_PI;
2972 case GOYA_QUEUE_ID_DMA_2:
2973 db_reg_offset = mmDMA_QM_2_PQ_PI;
2976 case GOYA_QUEUE_ID_DMA_3:
2977 db_reg_offset = mmDMA_QM_3_PQ_PI;
2980 case GOYA_QUEUE_ID_DMA_4:
2981 db_reg_offset = mmDMA_QM_4_PQ_PI;
2984 case GOYA_QUEUE_ID_CPU_PQ:
2985 if (hdev->cpu_queues_enable)
2986 db_reg_offset = mmCPU_IF_PF_PQ_PI;
2988 invalid_queue = true;
2991 case GOYA_QUEUE_ID_MME:
2992 db_reg_offset = mmMME_QM_PQ_PI;
2995 case GOYA_QUEUE_ID_TPC0:
2996 db_reg_offset = mmTPC0_QM_PQ_PI;
2999 case GOYA_QUEUE_ID_TPC1:
3000 db_reg_offset = mmTPC1_QM_PQ_PI;
3003 case GOYA_QUEUE_ID_TPC2:
3004 db_reg_offset = mmTPC2_QM_PQ_PI;
3007 case GOYA_QUEUE_ID_TPC3:
3008 db_reg_offset = mmTPC3_QM_PQ_PI;
3011 case GOYA_QUEUE_ID_TPC4:
3012 db_reg_offset = mmTPC4_QM_PQ_PI;
3015 case GOYA_QUEUE_ID_TPC5:
3016 db_reg_offset = mmTPC5_QM_PQ_PI;
3019 case GOYA_QUEUE_ID_TPC6:
3020 db_reg_offset = mmTPC6_QM_PQ_PI;
3023 case GOYA_QUEUE_ID_TPC7:
3024 db_reg_offset = mmTPC7_QM_PQ_PI;
3028 invalid_queue = true;
3031 if (invalid_queue) {
3032 /* Should never get here */
3033 dev_err(hdev->dev, "h/w queue %d is invalid. Can't set pi\n",
3040 /* ring the doorbell */
3041 WREG32(db_reg_offset, db_value);
3043 if (hw_queue_id == GOYA_QUEUE_ID_CPU_PQ)
3044 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
3045 GOYA_ASYNC_EVENT_ID_PI_UPDATE);
3048 void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val)
3050 /* Not needed in Goya */
3053 static void *goya_dma_alloc_coherent(struct hl_device *hdev, size_t size,
3054 dma_addr_t *dma_handle, gfp_t flags)
3056 return dma_alloc_coherent(&hdev->pdev->dev, size, dma_handle, flags);
3059 static void goya_dma_free_coherent(struct hl_device *hdev, size_t size,
3060 void *cpu_addr, dma_addr_t dma_handle)
3062 dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, dma_handle);
3065 void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
3066 dma_addr_t *dma_handle, u16 *queue_len)
3071 *dma_handle = hdev->asic_prop.sram_base_address;
3073 base = hdev->pcie_bar[SRAM_CFG_BAR_ID];
3076 case GOYA_QUEUE_ID_MME:
3077 offset = MME_QMAN_BASE_OFFSET;
3078 *queue_len = MME_QMAN_LENGTH;
3080 case GOYA_QUEUE_ID_TPC0:
3081 offset = TPC0_QMAN_BASE_OFFSET;
3082 *queue_len = TPC_QMAN_LENGTH;
3084 case GOYA_QUEUE_ID_TPC1:
3085 offset = TPC1_QMAN_BASE_OFFSET;
3086 *queue_len = TPC_QMAN_LENGTH;
3088 case GOYA_QUEUE_ID_TPC2:
3089 offset = TPC2_QMAN_BASE_OFFSET;
3090 *queue_len = TPC_QMAN_LENGTH;
3092 case GOYA_QUEUE_ID_TPC3:
3093 offset = TPC3_QMAN_BASE_OFFSET;
3094 *queue_len = TPC_QMAN_LENGTH;
3096 case GOYA_QUEUE_ID_TPC4:
3097 offset = TPC4_QMAN_BASE_OFFSET;
3098 *queue_len = TPC_QMAN_LENGTH;
3100 case GOYA_QUEUE_ID_TPC5:
3101 offset = TPC5_QMAN_BASE_OFFSET;
3102 *queue_len = TPC_QMAN_LENGTH;
3104 case GOYA_QUEUE_ID_TPC6:
3105 offset = TPC6_QMAN_BASE_OFFSET;
3106 *queue_len = TPC_QMAN_LENGTH;
3108 case GOYA_QUEUE_ID_TPC7:
3109 offset = TPC7_QMAN_BASE_OFFSET;
3110 *queue_len = TPC_QMAN_LENGTH;
3113 dev_err(hdev->dev, "Got invalid queue id %d\n", queue_id);
3118 *dma_handle += offset;
3123 static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
3125 struct goya_device *goya = hdev->asic_specific;
3126 struct packet_msg_prot *fence_pkt;
3128 dma_addr_t fence_dma_addr;
3134 timeout = GOYA_PLDM_QMAN0_TIMEOUT_USEC;
3136 timeout = HL_DEVICE_TIMEOUT_USEC;
3138 if (!hdev->asic_funcs->is_device_idle(hdev)) {
3139 dev_err_ratelimited(hdev->dev,
3140 "Can't send KMD job on QMAN0 if device is not idle\n");
3144 fence_ptr = hdev->asic_funcs->dma_pool_zalloc(hdev, 4, GFP_KERNEL,
3148 "Failed to allocate fence memory for QMAN0\n");
3154 if (goya->hw_cap_initialized & HW_CAP_MMU) {
3155 WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_FULLY_TRUSTED);
3156 RREG32(mmDMA_QM_0_GLBL_PROT);
3160 * goya cs parser saves space for 2xpacket_msg_prot at end of CB. For
3161 * synchronized kernel jobs we only need space for 1 packet_msg_prot
3163 job->job_cb_size -= sizeof(struct packet_msg_prot);
3165 cb = job->patched_cb;
3167 fence_pkt = (struct packet_msg_prot *) (uintptr_t) (cb->kernel_address +
3168 job->job_cb_size - sizeof(struct packet_msg_prot));
3170 tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
3171 (1 << GOYA_PKT_CTL_EB_SHIFT) |
3172 (1 << GOYA_PKT_CTL_MB_SHIFT);
3173 fence_pkt->ctl = cpu_to_le32(tmp);
3174 fence_pkt->value = cpu_to_le32(GOYA_QMAN0_FENCE_VAL);
3175 fence_pkt->addr = cpu_to_le64(fence_dma_addr +
3176 hdev->asic_prop.host_phys_base_address);
3178 rc = hl_hw_queue_send_cb_no_cmpl(hdev, GOYA_QUEUE_ID_DMA_0,
3179 job->job_cb_size, cb->bus_address);
3181 dev_err(hdev->dev, "Failed to send CB on QMAN0, %d\n", rc);
3182 goto free_fence_ptr;
3185 rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) fence_ptr, timeout,
3188 hl_hw_queue_inc_ci_kernel(hdev, GOYA_QUEUE_ID_DMA_0);
3190 if ((rc) || (tmp != GOYA_QMAN0_FENCE_VAL)) {
3191 dev_err(hdev->dev, "QMAN0 Job hasn't finished in time\n");
3196 hdev->asic_funcs->dma_pool_free(hdev, (void *) fence_ptr,
3199 if (goya->hw_cap_initialized & HW_CAP_MMU) {
3200 WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_PARTLY_TRUSTED);
3201 RREG32(mmDMA_QM_0_GLBL_PROT);
3207 int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len,
3208 u32 timeout, long *result)
3210 struct goya_device *goya = hdev->asic_specific;
3211 struct armcp_packet *pkt;
3212 dma_addr_t pkt_dma_addr;
3216 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q)) {
3222 if (len > CPU_CB_SIZE) {
3223 dev_err(hdev->dev, "Invalid CPU message size of %d bytes\n",
3228 pkt = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, len,
3232 "Failed to allocate DMA memory for packet to CPU\n");
3236 memcpy(pkt, msg, len);
3238 mutex_lock(&hdev->send_cpu_message_lock);
3243 if (hdev->device_cpu_disabled) {
3248 rc = hl_hw_queue_send_cb_no_cmpl(hdev, GOYA_QUEUE_ID_CPU_PQ, len,
3251 dev_err(hdev->dev, "Failed to send CB on CPU PQ (%d)\n", rc);
3255 rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) &pkt->fence,
3258 hl_hw_queue_inc_ci_kernel(hdev, GOYA_QUEUE_ID_CPU_PQ);
3260 if (rc == -ETIMEDOUT) {
3261 dev_err(hdev->dev, "Timeout while waiting for device CPU\n");
3262 hdev->device_cpu_disabled = true;
3266 if (tmp == ARMCP_PACKET_FENCE_VAL) {
3267 u32 ctl = le32_to_cpu(pkt->ctl);
3269 rc = (ctl & ARMCP_PKT_CTL_RC_MASK) >> ARMCP_PKT_CTL_RC_SHIFT;
3272 "F/W ERROR %d for CPU packet %d\n",
3273 rc, (ctl & ARMCP_PKT_CTL_OPCODE_MASK)
3274 >> ARMCP_PKT_CTL_OPCODE_SHIFT);
3276 } else if (result) {
3277 *result = (long) le64_to_cpu(pkt->result);
3280 dev_err(hdev->dev, "CPU packet wrong fence value\n");
3285 mutex_unlock(&hdev->send_cpu_message_lock);
3287 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, len, pkt);
3292 int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
3294 struct packet_msg_prot *fence_pkt;
3295 dma_addr_t pkt_dma_addr;
3297 dma_addr_t fence_dma_addr;
3301 fence_val = GOYA_QMAN0_FENCE_VAL;
3303 fence_ptr = hdev->asic_funcs->dma_pool_zalloc(hdev, 4, GFP_KERNEL,
3307 "Failed to allocate memory for queue testing\n");
3313 fence_pkt = hdev->asic_funcs->dma_pool_zalloc(hdev,
3314 sizeof(struct packet_msg_prot),
3315 GFP_KERNEL, &pkt_dma_addr);
3318 "Failed to allocate packet for queue testing\n");
3320 goto free_fence_ptr;
3323 tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
3324 (1 << GOYA_PKT_CTL_EB_SHIFT) |
3325 (1 << GOYA_PKT_CTL_MB_SHIFT);
3326 fence_pkt->ctl = cpu_to_le32(tmp);
3327 fence_pkt->value = cpu_to_le32(fence_val);
3328 fence_pkt->addr = cpu_to_le64(fence_dma_addr +
3329 hdev->asic_prop.host_phys_base_address);
3331 rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id,
3332 sizeof(struct packet_msg_prot),
3336 "Failed to send fence packet\n");
3340 rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) fence_ptr,
3341 GOYA_TEST_QUEUE_WAIT_USEC, &tmp);
3343 hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
3345 if ((!rc) && (tmp == fence_val)) {
3347 "queue test on H/W queue %d succeeded\n",
3351 "H/W queue %d test failed (scratch(0x%08llX) == 0x%08X)\n",
3352 hw_queue_id, (unsigned long long) fence_dma_addr, tmp);
3357 hdev->asic_funcs->dma_pool_free(hdev, (void *) fence_pkt,
3360 hdev->asic_funcs->dma_pool_free(hdev, (void *) fence_ptr,
3365 int goya_test_cpu_queue(struct hl_device *hdev)
3367 struct armcp_packet test_pkt;
3371 /* cpu_queues_enable flag is always checked in send cpu message */
3373 memset(&test_pkt, 0, sizeof(test_pkt));
3375 test_pkt.ctl = cpu_to_le32(ARMCP_PACKET_TEST <<
3376 ARMCP_PKT_CTL_OPCODE_SHIFT);
3377 test_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL);
3379 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &test_pkt,
3380 sizeof(test_pkt), HL_DEVICE_TIMEOUT_USEC, &result);
3383 if (result == ARMCP_PACKET_FENCE_VAL)
3385 "queue test on CPU queue succeeded\n");
3388 "CPU queue test failed (0x%08lX)\n", result);
3390 dev_err(hdev->dev, "CPU queue test failed, error %d\n", rc);
3396 static int goya_test_queues(struct hl_device *hdev)
3398 struct goya_device *goya = hdev->asic_specific;
3399 int i, rc, ret_val = 0;
3401 for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
3402 rc = goya_test_queue(hdev, i);
3407 if (hdev->cpu_queues_enable) {
3408 rc = goya->test_cpu_queue(hdev);
3416 static void *goya_dma_pool_zalloc(struct hl_device *hdev, size_t size,
3417 gfp_t mem_flags, dma_addr_t *dma_handle)
3419 if (size > GOYA_DMA_POOL_BLK_SIZE)
3422 return dma_pool_zalloc(hdev->dma_pool, mem_flags, dma_handle);
3425 static void goya_dma_pool_free(struct hl_device *hdev, void *vaddr,
3426 dma_addr_t dma_addr)
3428 dma_pool_free(hdev->dma_pool, vaddr, dma_addr);
3431 static void *goya_cpu_accessible_dma_pool_alloc(struct hl_device *hdev,
3432 size_t size, dma_addr_t *dma_handle)
3436 /* roundup to CPU_PKT_SIZE */
3437 size = (size + (CPU_PKT_SIZE - 1)) & CPU_PKT_MASK;
3439 kernel_addr = gen_pool_alloc(hdev->cpu_accessible_dma_pool, size);
3441 *dma_handle = hdev->cpu_accessible_dma_address +
3442 (kernel_addr - (u64) (uintptr_t) hdev->cpu_accessible_dma_mem);
3444 return (void *) (uintptr_t) kernel_addr;
3447 static void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev,
3448 size_t size, void *vaddr)
3450 /* roundup to CPU_PKT_SIZE */
3451 size = (size + (CPU_PKT_SIZE - 1)) & CPU_PKT_MASK;
3453 gen_pool_free(hdev->cpu_accessible_dma_pool, (u64) (uintptr_t) vaddr,
3457 static int goya_dma_map_sg(struct hl_device *hdev, struct scatterlist *sg,
3458 int nents, enum dma_data_direction dir)
3460 if (!dma_map_sg(&hdev->pdev->dev, sg, nents, dir))
3466 static void goya_dma_unmap_sg(struct hl_device *hdev, struct scatterlist *sg,
3467 int nents, enum dma_data_direction dir)
3469 dma_unmap_sg(&hdev->pdev->dev, sg, nents, dir);
3472 u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt)
3474 struct scatterlist *sg, *sg_next_iter;
3475 u32 count, dma_desc_cnt;
3477 dma_addr_t addr, addr_next;
3481 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
3483 len = sg_dma_len(sg);
3484 addr = sg_dma_address(sg);
3489 while ((count + 1) < sgt->nents) {
3490 sg_next_iter = sg_next(sg);
3491 len_next = sg_dma_len(sg_next_iter);
3492 addr_next = sg_dma_address(sg_next_iter);
3497 if ((addr + len == addr_next) &&
3498 (len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
3510 return dma_desc_cnt * sizeof(struct packet_lin_dma);
3513 static int goya_pin_memory_before_cs(struct hl_device *hdev,
3514 struct hl_cs_parser *parser,
3515 struct packet_lin_dma *user_dma_pkt,
3516 u64 addr, enum dma_data_direction dir)
3518 struct hl_userptr *userptr;
3521 if (hl_userptr_is_pinned(hdev, addr, le32_to_cpu(user_dma_pkt->tsize),
3522 parser->job_userptr_list, &userptr))
3523 goto already_pinned;
3525 userptr = kzalloc(sizeof(*userptr), GFP_ATOMIC);
3529 rc = hl_pin_host_memory(hdev, addr, le32_to_cpu(user_dma_pkt->tsize),
3534 list_add_tail(&userptr->job_node, parser->job_userptr_list);
3536 rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
3537 userptr->sgt->nents, dir);
3539 dev_err(hdev->dev, "failed to map sgt with DMA region\n");
3543 userptr->dma_mapped = true;
3547 parser->patched_cb_size +=
3548 goya_get_dma_desc_list_size(hdev, userptr->sgt);
3553 hl_unpin_host_memory(hdev, userptr);
3559 static int goya_validate_dma_pkt_host(struct hl_device *hdev,
3560 struct hl_cs_parser *parser,
3561 struct packet_lin_dma *user_dma_pkt)
3563 u64 device_memory_addr, addr;
3564 enum dma_data_direction dir;
3565 enum goya_dma_direction user_dir;
3566 bool sram_addr = true;
3567 bool skip_host_mem_pin = false;
3572 ctl = le32_to_cpu(user_dma_pkt->ctl);
3574 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3575 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3577 user_memset = (ctl & GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
3578 GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
3581 case DMA_HOST_TO_DRAM:
3582 dev_dbg(hdev->dev, "DMA direction is HOST --> DRAM\n");
3583 dir = DMA_TO_DEVICE;
3585 addr = le64_to_cpu(user_dma_pkt->src_addr);
3586 device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3588 skip_host_mem_pin = true;
3591 case DMA_DRAM_TO_HOST:
3592 dev_dbg(hdev->dev, "DMA direction is DRAM --> HOST\n");
3593 dir = DMA_FROM_DEVICE;
3595 addr = le64_to_cpu(user_dma_pkt->dst_addr);
3596 device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3599 case DMA_HOST_TO_SRAM:
3600 dev_dbg(hdev->dev, "DMA direction is HOST --> SRAM\n");
3601 dir = DMA_TO_DEVICE;
3602 addr = le64_to_cpu(user_dma_pkt->src_addr);
3603 device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3605 skip_host_mem_pin = true;
3608 case DMA_SRAM_TO_HOST:
3609 dev_dbg(hdev->dev, "DMA direction is SRAM --> HOST\n");
3610 dir = DMA_FROM_DEVICE;
3611 addr = le64_to_cpu(user_dma_pkt->dst_addr);
3612 device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3615 dev_err(hdev->dev, "DMA direction is undefined\n");
3619 if (parser->ctx_id != HL_KERNEL_ASID_ID) {
3621 if (!hl_mem_area_inside_range(device_memory_addr,
3622 le32_to_cpu(user_dma_pkt->tsize),
3623 hdev->asic_prop.sram_user_base_address,
3624 hdev->asic_prop.sram_end_address)) {
3627 "SRAM address 0x%llx + 0x%x is invalid\n",
3629 user_dma_pkt->tsize);
3633 if (!hl_mem_area_inside_range(device_memory_addr,
3634 le32_to_cpu(user_dma_pkt->tsize),
3635 hdev->asic_prop.dram_user_base_address,
3636 hdev->asic_prop.dram_end_address)) {
3639 "DRAM address 0x%llx + 0x%x is invalid\n",
3641 user_dma_pkt->tsize);
3647 if (skip_host_mem_pin)
3648 parser->patched_cb_size += sizeof(*user_dma_pkt);
3650 if ((dir == DMA_TO_DEVICE) &&
3651 (parser->hw_queue_id > GOYA_QUEUE_ID_DMA_1)) {
3653 "Can't DMA from host on queue other then 1\n");
3657 rc = goya_pin_memory_before_cs(hdev, parser, user_dma_pkt,
3664 static int goya_validate_dma_pkt_no_host(struct hl_device *hdev,
3665 struct hl_cs_parser *parser,
3666 struct packet_lin_dma *user_dma_pkt)
3668 u64 sram_memory_addr, dram_memory_addr;
3669 enum goya_dma_direction user_dir;
3672 ctl = le32_to_cpu(user_dma_pkt->ctl);
3673 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3674 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3676 if (user_dir == DMA_DRAM_TO_SRAM) {
3677 dev_dbg(hdev->dev, "DMA direction is DRAM --> SRAM\n");
3678 dram_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3679 sram_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3681 dev_dbg(hdev->dev, "DMA direction is SRAM --> DRAM\n");
3682 sram_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3683 dram_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3686 if (!hl_mem_area_inside_range(sram_memory_addr,
3687 le32_to_cpu(user_dma_pkt->tsize),
3688 hdev->asic_prop.sram_user_base_address,
3689 hdev->asic_prop.sram_end_address)) {
3690 dev_err(hdev->dev, "SRAM address 0x%llx + 0x%x is invalid\n",
3691 sram_memory_addr, user_dma_pkt->tsize);
3695 if (!hl_mem_area_inside_range(dram_memory_addr,
3696 le32_to_cpu(user_dma_pkt->tsize),
3697 hdev->asic_prop.dram_user_base_address,
3698 hdev->asic_prop.dram_end_address)) {
3699 dev_err(hdev->dev, "DRAM address 0x%llx + 0x%x is invalid\n",
3700 dram_memory_addr, user_dma_pkt->tsize);
3704 parser->patched_cb_size += sizeof(*user_dma_pkt);
3709 static int goya_validate_dma_pkt_no_mmu(struct hl_device *hdev,
3710 struct hl_cs_parser *parser,
3711 struct packet_lin_dma *user_dma_pkt)
3713 enum goya_dma_direction user_dir;
3717 dev_dbg(hdev->dev, "DMA packet details:\n");
3718 dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr);
3719 dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr);
3720 dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize);
3722 ctl = le32_to_cpu(user_dma_pkt->ctl);
3723 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3724 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3727 * Special handling for DMA with size 0. The H/W has a bug where
3728 * this can cause the QMAN DMA to get stuck, so block it here.
3730 if (user_dma_pkt->tsize == 0) {
3732 "Got DMA with size 0, might reset the device\n");
3736 if ((user_dir == DMA_DRAM_TO_SRAM) || (user_dir == DMA_SRAM_TO_DRAM))
3737 rc = goya_validate_dma_pkt_no_host(hdev, parser, user_dma_pkt);
3739 rc = goya_validate_dma_pkt_host(hdev, parser, user_dma_pkt);
3744 static int goya_validate_dma_pkt_mmu(struct hl_device *hdev,
3745 struct hl_cs_parser *parser,
3746 struct packet_lin_dma *user_dma_pkt)
3748 dev_dbg(hdev->dev, "DMA packet details:\n");
3749 dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr);
3750 dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr);
3751 dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize);
3755 * We can't allow user to read from Host using QMANs other than 1.
3757 if (parser->hw_queue_id > GOYA_QUEUE_ID_DMA_1 &&
3758 hl_mem_area_inside_range(le64_to_cpu(user_dma_pkt->src_addr),
3759 le32_to_cpu(user_dma_pkt->tsize),
3760 hdev->asic_prop.va_space_host_start_address,
3761 hdev->asic_prop.va_space_host_end_address)) {
3763 "Can't DMA from host on queue other then 1\n");
3767 if (user_dma_pkt->tsize == 0) {
3769 "Got DMA with size 0, might reset the device\n");
3773 parser->patched_cb_size += sizeof(*user_dma_pkt);
3778 static int goya_validate_wreg32(struct hl_device *hdev,
3779 struct hl_cs_parser *parser,
3780 struct packet_wreg32 *wreg_pkt)
3782 struct goya_device *goya = hdev->asic_specific;
3783 u32 sob_start_addr, sob_end_addr;
3786 reg_offset = le32_to_cpu(wreg_pkt->ctl) &
3787 GOYA_PKT_WREG32_CTL_REG_OFFSET_MASK;
3789 dev_dbg(hdev->dev, "WREG32 packet details:\n");
3790 dev_dbg(hdev->dev, "reg_offset == 0x%x\n", reg_offset);
3791 dev_dbg(hdev->dev, "value == 0x%x\n", wreg_pkt->value);
3793 if (reg_offset != (mmDMA_CH_0_WR_COMP_ADDR_LO & 0x1FFF)) {
3794 dev_err(hdev->dev, "WREG32 packet with illegal address 0x%x\n",
3800 * With MMU, DMA channels are not secured, so it doesn't matter where
3801 * the WR COMP will be written to because it will go out with
3802 * non-secured property
3804 if (goya->hw_cap_initialized & HW_CAP_MMU)
3807 sob_start_addr = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
3808 sob_end_addr = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1023);
3810 if ((le32_to_cpu(wreg_pkt->value) < sob_start_addr) ||
3811 (le32_to_cpu(wreg_pkt->value) > sob_end_addr)) {
3813 dev_err(hdev->dev, "WREG32 packet with illegal value 0x%x\n",
3821 static int goya_validate_cb(struct hl_device *hdev,
3822 struct hl_cs_parser *parser, bool is_mmu)
3824 u32 cb_parsed_length = 0;
3827 parser->patched_cb_size = 0;
3829 /* cb_user_size is more than 0 so loop will always be executed */
3830 while (cb_parsed_length < parser->user_cb_size) {
3831 enum packet_id pkt_id;
3835 user_pkt = (void *) (uintptr_t)
3836 (parser->user_cb->kernel_address + cb_parsed_length);
3838 pkt_id = (enum packet_id) (((*(u64 *) user_pkt) &
3839 PACKET_HEADER_PACKET_ID_MASK) >>
3840 PACKET_HEADER_PACKET_ID_SHIFT);
3842 pkt_size = goya_packet_sizes[pkt_id];
3843 cb_parsed_length += pkt_size;
3844 if (cb_parsed_length > parser->user_cb_size) {
3846 "packet 0x%x is out of CB boundary\n", pkt_id);
3852 case PACKET_WREG_32:
3854 * Although it is validated after copy in patch_cb(),
3855 * need to validate here as well because patch_cb() is
3856 * not called in MMU path while this function is called
3858 rc = goya_validate_wreg32(hdev, parser, user_pkt);
3861 case PACKET_WREG_BULK:
3863 "User not allowed to use WREG_BULK\n");
3867 case PACKET_MSG_PROT:
3869 "User not allowed to use MSG_PROT\n");
3874 dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
3879 dev_err(hdev->dev, "User not allowed to use STOP\n");
3883 case PACKET_LIN_DMA:
3885 rc = goya_validate_dma_pkt_mmu(hdev, parser,
3888 rc = goya_validate_dma_pkt_no_mmu(hdev, parser,
3892 case PACKET_MSG_LONG:
3893 case PACKET_MSG_SHORT:
3896 parser->patched_cb_size += pkt_size;
3900 dev_err(hdev->dev, "Invalid packet header 0x%x\n",
3911 * The new CB should have space at the end for two MSG_PROT packets:
3912 * 1. A packet that will act as a completion packet
3913 * 2. A packet that will generate MSI-X interrupt
3915 parser->patched_cb_size += sizeof(struct packet_msg_prot) * 2;
3920 static int goya_patch_dma_packet(struct hl_device *hdev,
3921 struct hl_cs_parser *parser,
3922 struct packet_lin_dma *user_dma_pkt,
3923 struct packet_lin_dma *new_dma_pkt,
3924 u32 *new_dma_pkt_size)
3926 struct hl_userptr *userptr;
3927 struct scatterlist *sg, *sg_next_iter;
3928 u32 count, dma_desc_cnt;
3930 dma_addr_t dma_addr, dma_addr_next;
3931 enum goya_dma_direction user_dir;
3932 u64 device_memory_addr, addr;
3933 enum dma_data_direction dir;
3934 struct sg_table *sgt;
3935 bool skip_host_mem_pin = false;
3937 u32 user_rdcomp_mask, user_wrcomp_mask, ctl;
3939 ctl = le32_to_cpu(user_dma_pkt->ctl);
3941 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3942 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3944 user_memset = (ctl & GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
3945 GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
3947 if ((user_dir == DMA_DRAM_TO_SRAM) || (user_dir == DMA_SRAM_TO_DRAM) ||
3948 (user_dma_pkt->tsize == 0)) {
3949 memcpy(new_dma_pkt, user_dma_pkt, sizeof(*new_dma_pkt));
3950 *new_dma_pkt_size = sizeof(*new_dma_pkt);
3954 if ((user_dir == DMA_HOST_TO_DRAM) || (user_dir == DMA_HOST_TO_SRAM)) {
3955 addr = le64_to_cpu(user_dma_pkt->src_addr);
3956 device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3957 dir = DMA_TO_DEVICE;
3959 skip_host_mem_pin = true;
3961 addr = le64_to_cpu(user_dma_pkt->dst_addr);
3962 device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3963 dir = DMA_FROM_DEVICE;
3966 if ((!skip_host_mem_pin) &&
3967 (hl_userptr_is_pinned(hdev, addr,
3968 le32_to_cpu(user_dma_pkt->tsize),
3969 parser->job_userptr_list, &userptr) == false)) {
3970 dev_err(hdev->dev, "Userptr 0x%llx + 0x%x NOT mapped\n",
3971 addr, user_dma_pkt->tsize);
3975 if ((user_memset) && (dir == DMA_TO_DEVICE)) {
3976 memcpy(new_dma_pkt, user_dma_pkt, sizeof(*user_dma_pkt));
3977 *new_dma_pkt_size = sizeof(*user_dma_pkt);
3981 user_rdcomp_mask = ctl & GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK;
3983 user_wrcomp_mask = ctl & GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK;
3988 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
3989 len = sg_dma_len(sg);
3990 dma_addr = sg_dma_address(sg);
3995 while ((count + 1) < sgt->nents) {
3996 sg_next_iter = sg_next(sg);
3997 len_next = sg_dma_len(sg_next_iter);
3998 dma_addr_next = sg_dma_address(sg_next_iter);
4003 if ((dma_addr + len == dma_addr_next) &&
4004 (len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
4013 ctl = le32_to_cpu(user_dma_pkt->ctl);
4014 if (likely(dma_desc_cnt))
4015 ctl &= ~GOYA_PKT_CTL_EB_MASK;
4016 ctl &= ~(GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK |
4017 GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK);
4018 new_dma_pkt->ctl = cpu_to_le32(ctl);
4019 new_dma_pkt->tsize = cpu_to_le32((u32) len);
4021 dma_addr += hdev->asic_prop.host_phys_base_address;
4023 if (dir == DMA_TO_DEVICE) {
4024 new_dma_pkt->src_addr = cpu_to_le64(dma_addr);
4025 new_dma_pkt->dst_addr = cpu_to_le64(device_memory_addr);
4027 new_dma_pkt->src_addr = cpu_to_le64(device_memory_addr);
4028 new_dma_pkt->dst_addr = cpu_to_le64(dma_addr);
4032 device_memory_addr += len;
4037 if (!dma_desc_cnt) {
4039 "Error of 0 SG entries when patching DMA packet\n");
4043 /* Fix the last dma packet - rdcomp/wrcomp must be as user set them */
4045 new_dma_pkt->ctl |= cpu_to_le32(user_rdcomp_mask | user_wrcomp_mask);
4047 *new_dma_pkt_size = dma_desc_cnt * sizeof(struct packet_lin_dma);
4052 static int goya_patch_cb(struct hl_device *hdev,
4053 struct hl_cs_parser *parser)
4055 u32 cb_parsed_length = 0;
4056 u32 cb_patched_cur_length = 0;
4059 /* cb_user_size is more than 0 so loop will always be executed */
4060 while (cb_parsed_length < parser->user_cb_size) {
4061 enum packet_id pkt_id;
4063 u32 new_pkt_size = 0;
4064 void *user_pkt, *kernel_pkt;
4066 user_pkt = (void *) (uintptr_t)
4067 (parser->user_cb->kernel_address + cb_parsed_length);
4068 kernel_pkt = (void *) (uintptr_t)
4069 (parser->patched_cb->kernel_address +
4070 cb_patched_cur_length);
4072 pkt_id = (enum packet_id) (((*(u64 *) user_pkt) &
4073 PACKET_HEADER_PACKET_ID_MASK) >>
4074 PACKET_HEADER_PACKET_ID_SHIFT);
4076 pkt_size = goya_packet_sizes[pkt_id];
4077 cb_parsed_length += pkt_size;
4078 if (cb_parsed_length > parser->user_cb_size) {
4080 "packet 0x%x is out of CB boundary\n", pkt_id);
4086 case PACKET_LIN_DMA:
4087 rc = goya_patch_dma_packet(hdev, parser, user_pkt,
4088 kernel_pkt, &new_pkt_size);
4089 cb_patched_cur_length += new_pkt_size;
4092 case PACKET_WREG_32:
4093 memcpy(kernel_pkt, user_pkt, pkt_size);
4094 cb_patched_cur_length += pkt_size;
4095 rc = goya_validate_wreg32(hdev, parser, kernel_pkt);
4098 case PACKET_WREG_BULK:
4100 "User not allowed to use WREG_BULK\n");
4104 case PACKET_MSG_PROT:
4106 "User not allowed to use MSG_PROT\n");
4111 dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
4116 dev_err(hdev->dev, "User not allowed to use STOP\n");
4120 case PACKET_MSG_LONG:
4121 case PACKET_MSG_SHORT:
4124 memcpy(kernel_pkt, user_pkt, pkt_size);
4125 cb_patched_cur_length += pkt_size;
4129 dev_err(hdev->dev, "Invalid packet header 0x%x\n",
4142 static int goya_parse_cb_mmu(struct hl_device *hdev,
4143 struct hl_cs_parser *parser)
4145 u64 patched_cb_handle;
4146 u32 patched_cb_size;
4147 struct hl_cb *user_cb;
4151 * The new CB should have space at the end for two MSG_PROT pkt:
4152 * 1. A packet that will act as a completion packet
4153 * 2. A packet that will generate MSI-X interrupt
4155 parser->patched_cb_size = parser->user_cb_size +
4156 sizeof(struct packet_msg_prot) * 2;
4158 rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr,
4159 parser->patched_cb_size,
4160 &patched_cb_handle, HL_KERNEL_ASID_ID);
4164 "Failed to allocate patched CB for DMA CS %d\n",
4169 patched_cb_handle >>= PAGE_SHIFT;
4170 parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
4171 (u32) patched_cb_handle);
4172 /* hl_cb_get should never fail here so use kernel WARN */
4173 WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n",
4174 (u32) patched_cb_handle);
4175 if (!parser->patched_cb) {
4181 * The check that parser->user_cb_size <= parser->user_cb->size was done
4182 * in validate_queue_index().
4184 memcpy((void *) (uintptr_t) parser->patched_cb->kernel_address,
4185 (void *) (uintptr_t) parser->user_cb->kernel_address,
4186 parser->user_cb_size);
4188 patched_cb_size = parser->patched_cb_size;
4190 /* validate patched CB instead of user CB */
4191 user_cb = parser->user_cb;
4192 parser->user_cb = parser->patched_cb;
4193 rc = goya_validate_cb(hdev, parser, true);
4194 parser->user_cb = user_cb;
4197 hl_cb_put(parser->patched_cb);
4201 if (patched_cb_size != parser->patched_cb_size) {
4202 dev_err(hdev->dev, "user CB size mismatch\n");
4203 hl_cb_put(parser->patched_cb);
4210 * Always call cb destroy here because we still have 1 reference
4211 * to it by calling cb_get earlier. After the job will be completed,
4212 * cb_put will release it, but here we want to remove it from the
4215 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
4216 patched_cb_handle << PAGE_SHIFT);
4221 static int goya_parse_cb_no_mmu(struct hl_device *hdev,
4222 struct hl_cs_parser *parser)
4224 u64 patched_cb_handle;
4227 rc = goya_validate_cb(hdev, parser, false);
4232 rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr,
4233 parser->patched_cb_size,
4234 &patched_cb_handle, HL_KERNEL_ASID_ID);
4237 "Failed to allocate patched CB for DMA CS %d\n", rc);
4241 patched_cb_handle >>= PAGE_SHIFT;
4242 parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
4243 (u32) patched_cb_handle);
4244 /* hl_cb_get should never fail here so use kernel WARN */
4245 WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n",
4246 (u32) patched_cb_handle);
4247 if (!parser->patched_cb) {
4252 rc = goya_patch_cb(hdev, parser);
4255 hl_cb_put(parser->patched_cb);
4259 * Always call cb destroy here because we still have 1 reference
4260 * to it by calling cb_get earlier. After the job will be completed,
4261 * cb_put will release it, but here we want to remove it from the
4264 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
4265 patched_cb_handle << PAGE_SHIFT);
4269 hl_userptr_delete_list(hdev, parser->job_userptr_list);
4273 static int goya_parse_cb_no_ext_quque(struct hl_device *hdev,
4274 struct hl_cs_parser *parser)
4276 struct asic_fixed_properties *asic_prop = &hdev->asic_prop;
4277 struct goya_device *goya = hdev->asic_specific;
4279 if (!(goya->hw_cap_initialized & HW_CAP_MMU)) {
4280 /* For internal queue jobs, just check if cb address is valid */
4281 if (hl_mem_area_inside_range(
4282 (u64) (uintptr_t) parser->user_cb,
4283 parser->user_cb_size,
4284 asic_prop->sram_user_base_address,
4285 asic_prop->sram_end_address))
4288 if (hl_mem_area_inside_range(
4289 (u64) (uintptr_t) parser->user_cb,
4290 parser->user_cb_size,
4291 asic_prop->dram_user_base_address,
4292 asic_prop->dram_end_address))
4296 "Internal CB address %px + 0x%x is not in SRAM nor in DRAM\n",
4297 parser->user_cb, parser->user_cb_size);
4305 int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser)
4307 struct goya_device *goya = hdev->asic_specific;
4309 if (!parser->ext_queue)
4310 return goya_parse_cb_no_ext_quque(hdev, parser);
4312 if ((goya->hw_cap_initialized & HW_CAP_MMU) && parser->use_virt_addr)
4313 return goya_parse_cb_mmu(hdev, parser);
4315 return goya_parse_cb_no_mmu(hdev, parser);
4318 void goya_add_end_of_cb_packets(u64 kernel_address, u32 len, u64 cq_addr,
4319 u32 cq_val, u32 msix_vec)
4321 struct packet_msg_prot *cq_pkt;
4324 cq_pkt = (struct packet_msg_prot *) (uintptr_t)
4325 (kernel_address + len - (sizeof(struct packet_msg_prot) * 2));
4327 tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
4328 (1 << GOYA_PKT_CTL_EB_SHIFT) |
4329 (1 << GOYA_PKT_CTL_MB_SHIFT);
4330 cq_pkt->ctl = cpu_to_le32(tmp);
4331 cq_pkt->value = cpu_to_le32(cq_val);
4332 cq_pkt->addr = cpu_to_le64(cq_addr);
4336 tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
4337 (1 << GOYA_PKT_CTL_MB_SHIFT);
4338 cq_pkt->ctl = cpu_to_le32(tmp);
4339 cq_pkt->value = cpu_to_le32(msix_vec & 0x7FF);
4340 cq_pkt->addr = cpu_to_le64(CFG_BASE + mmPCIE_DBI_MSIX_DOORBELL_OFF);
4343 static void goya_update_eq_ci(struct hl_device *hdev, u32 val)
4345 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_6, val);
4348 static void goya_restore_phase_topology(struct hl_device *hdev)
4350 int i, num_of_sob_in_longs, num_of_mon_in_longs;
4352 num_of_sob_in_longs =
4353 ((mmSYNC_MNGR_SOB_OBJ_1023 - mmSYNC_MNGR_SOB_OBJ_0) + 4);
4355 num_of_mon_in_longs =
4356 ((mmSYNC_MNGR_MON_STATUS_255 - mmSYNC_MNGR_MON_STATUS_0) + 4);
4358 for (i = 0 ; i < num_of_sob_in_longs ; i += 4)
4359 WREG32(mmSYNC_MNGR_SOB_OBJ_0 + i, 0);
4361 for (i = 0 ; i < num_of_mon_in_longs ; i += 4)
4362 WREG32(mmSYNC_MNGR_MON_STATUS_0 + i, 0);
4364 /* Flush all WREG to prevent race */
4365 i = RREG32(mmSYNC_MNGR_SOB_OBJ_0);
4369 * goya_debugfs_read32 - read a 32bit value from a given device address
4371 * @hdev: pointer to hl_device structure
4372 * @addr: address in device
4373 * @val: returned value
4375 * In case of DDR address that is not mapped into the default aperture that
4376 * the DDR bar exposes, the function will configure the iATU so that the DDR
4377 * bar will be positioned at a base address that allows reading from the
4378 * required address. Configuring the iATU during normal operation can
4379 * lead to undefined behavior and therefore, should be done with extreme care
4382 static int goya_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
4384 struct asic_fixed_properties *prop = &hdev->asic_prop;
4387 if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
4388 *val = RREG32(addr - CFG_BASE);
4390 } else if ((addr >= SRAM_BASE_ADDR) &&
4391 (addr < SRAM_BASE_ADDR + SRAM_SIZE)) {
4393 *val = readl(hdev->pcie_bar[SRAM_CFG_BAR_ID] +
4394 (addr - SRAM_BASE_ADDR));
4396 } else if ((addr >= DRAM_PHYS_BASE) &&
4397 (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size)) {
4399 u64 bar_base_addr = DRAM_PHYS_BASE +
4400 (addr & ~(prop->dram_pci_bar_size - 0x1ull));
4402 rc = goya_set_ddr_bar_base(hdev, bar_base_addr);
4404 *val = readl(hdev->pcie_bar[DDR_BAR_ID] +
4405 (addr - bar_base_addr));
4407 rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE +
4408 (MMU_PAGE_TABLES_ADDR &
4409 ~(prop->dram_pci_bar_size - 0x1ull)));
4419 * goya_debugfs_write32 - write a 32bit value to a given device address
4421 * @hdev: pointer to hl_device structure
4422 * @addr: address in device
4423 * @val: returned value
4425 * In case of DDR address that is not mapped into the default aperture that
4426 * the DDR bar exposes, the function will configure the iATU so that the DDR
4427 * bar will be positioned at a base address that allows writing to the
4428 * required address. Configuring the iATU during normal operation can
4429 * lead to undefined behavior and therefore, should be done with extreme care
4432 static int goya_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
4434 struct asic_fixed_properties *prop = &hdev->asic_prop;
4437 if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
4438 WREG32(addr - CFG_BASE, val);
4440 } else if ((addr >= SRAM_BASE_ADDR) &&
4441 (addr < SRAM_BASE_ADDR + SRAM_SIZE)) {
4443 writel(val, hdev->pcie_bar[SRAM_CFG_BAR_ID] +
4444 (addr - SRAM_BASE_ADDR));
4446 } else if ((addr >= DRAM_PHYS_BASE) &&
4447 (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size)) {
4449 u64 bar_base_addr = DRAM_PHYS_BASE +
4450 (addr & ~(prop->dram_pci_bar_size - 0x1ull));
4452 rc = goya_set_ddr_bar_base(hdev, bar_base_addr);
4454 writel(val, hdev->pcie_bar[DDR_BAR_ID] +
4455 (addr - bar_base_addr));
4457 rc = goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE +
4458 (MMU_PAGE_TABLES_ADDR &
4459 ~(prop->dram_pci_bar_size - 0x1ull)));
4468 static u64 goya_read_pte(struct hl_device *hdev, u64 addr)
4470 struct goya_device *goya = hdev->asic_specific;
4472 return readq(hdev->pcie_bar[DDR_BAR_ID] +
4473 (addr - goya->ddr_bar_cur_addr));
4476 static void goya_write_pte(struct hl_device *hdev, u64 addr, u64 val)
4478 struct goya_device *goya = hdev->asic_specific;
4480 writeq(val, hdev->pcie_bar[DDR_BAR_ID] +
4481 (addr - goya->ddr_bar_cur_addr));
4484 static const char *_goya_get_event_desc(u16 event_type)
4486 switch (event_type) {
4487 case GOYA_ASYNC_EVENT_ID_PCIE_DEC:
4489 case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
4490 case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
4491 case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
4492 case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
4493 case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
4494 case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
4495 case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
4496 case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
4498 case GOYA_ASYNC_EVENT_ID_MME_WACS:
4500 case GOYA_ASYNC_EVENT_ID_MME_WACSD:
4502 case GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER:
4503 return "CPU_axi_splitter";
4504 case GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC:
4505 return "PSOC_axi_dec";
4506 case GOYA_ASYNC_EVENT_ID_PSOC:
4508 case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
4509 case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
4510 case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
4511 case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
4512 case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
4513 case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
4514 case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
4515 case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
4516 return "TPC%d_krn_err";
4517 case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_CMDQ:
4519 case GOYA_ASYNC_EVENT_ID_TPC0_QM ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
4521 case GOYA_ASYNC_EVENT_ID_MME_QM:
4523 case GOYA_ASYNC_EVENT_ID_MME_CMDQ:
4525 case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
4527 case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
4534 static void goya_get_event_desc(u16 event_type, char *desc, size_t size)
4538 switch (event_type) {
4539 case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
4540 case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
4541 case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
4542 case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
4543 case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
4544 case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
4545 case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
4546 case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
4547 index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_DEC) / 3;
4548 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4550 case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
4551 case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
4552 case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
4553 case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
4554 case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
4555 case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
4556 case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
4557 case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
4558 index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR) / 10;
4559 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4561 case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_CMDQ:
4562 index = event_type - GOYA_ASYNC_EVENT_ID_TPC0_CMDQ;
4563 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4565 case GOYA_ASYNC_EVENT_ID_TPC0_QM ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
4566 index = event_type - GOYA_ASYNC_EVENT_ID_TPC0_QM;
4567 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4569 case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
4570 index = event_type - GOYA_ASYNC_EVENT_ID_DMA0_QM;
4571 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4573 case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
4574 index = event_type - GOYA_ASYNC_EVENT_ID_DMA0_CH;
4575 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4578 snprintf(desc, size, _goya_get_event_desc(event_type));
4583 static void goya_print_razwi_info(struct hl_device *hdev)
4585 if (RREG32(mmDMA_MACRO_RAZWI_LBW_WT_VLD)) {
4586 dev_err(hdev->dev, "Illegal write to LBW\n");
4587 WREG32(mmDMA_MACRO_RAZWI_LBW_WT_VLD, 0);
4590 if (RREG32(mmDMA_MACRO_RAZWI_LBW_RD_VLD)) {
4591 dev_err(hdev->dev, "Illegal read from LBW\n");
4592 WREG32(mmDMA_MACRO_RAZWI_LBW_RD_VLD, 0);
4595 if (RREG32(mmDMA_MACRO_RAZWI_HBW_WT_VLD)) {
4596 dev_err(hdev->dev, "Illegal write to HBW\n");
4597 WREG32(mmDMA_MACRO_RAZWI_HBW_WT_VLD, 0);
4600 if (RREG32(mmDMA_MACRO_RAZWI_HBW_RD_VLD)) {
4601 dev_err(hdev->dev, "Illegal read from HBW\n");
4602 WREG32(mmDMA_MACRO_RAZWI_HBW_RD_VLD, 0);
4606 static void goya_print_mmu_error_info(struct hl_device *hdev)
4608 struct goya_device *goya = hdev->asic_specific;
4612 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4615 val = RREG32(mmMMU_PAGE_ERROR_CAPTURE);
4616 if (val & MMU_PAGE_ERROR_CAPTURE_ENTRY_VALID_MASK) {
4617 addr = val & MMU_PAGE_ERROR_CAPTURE_VA_49_32_MASK;
4619 addr |= RREG32(mmMMU_PAGE_ERROR_CAPTURE_VA);
4621 dev_err(hdev->dev, "MMU page fault on va 0x%llx\n", addr);
4623 WREG32(mmMMU_PAGE_ERROR_CAPTURE, 0);
4627 static void goya_print_irq_info(struct hl_device *hdev, u16 event_type)
4631 goya_get_event_desc(event_type, desc, sizeof(desc));
4632 dev_err(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
4635 goya_print_razwi_info(hdev);
4636 goya_print_mmu_error_info(hdev);
4639 static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
4640 size_t irq_arr_size)
4642 struct armcp_unmask_irq_arr_packet *pkt;
4643 size_t total_pkt_size;
4647 total_pkt_size = sizeof(struct armcp_unmask_irq_arr_packet) +
4650 /* data should be aligned to 8 bytes in order to ArmCP to copy it */
4651 total_pkt_size = (total_pkt_size + 0x7) & ~0x7;
4653 /* total_pkt_size is casted to u16 later on */
4654 if (total_pkt_size > USHRT_MAX) {
4655 dev_err(hdev->dev, "too many elements in IRQ array\n");
4659 pkt = kzalloc(total_pkt_size, GFP_KERNEL);
4663 pkt->length = cpu_to_le32(irq_arr_size / sizeof(irq_arr[0]));
4664 memcpy(&pkt->irqs, irq_arr, irq_arr_size);
4666 pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
4667 ARMCP_PKT_CTL_OPCODE_SHIFT);
4669 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt,
4670 total_pkt_size, HL_DEVICE_TIMEOUT_USEC, &result);
4673 dev_err(hdev->dev, "failed to unmask IRQ array\n");
4680 static int goya_soft_reset_late_init(struct hl_device *hdev)
4683 * Unmask all IRQs since some could have been received
4684 * during the soft reset
4686 return goya_unmask_irq_arr(hdev, goya_non_fatal_events,
4687 sizeof(goya_non_fatal_events));
4690 static int goya_unmask_irq(struct hl_device *hdev, u16 event_type)
4692 struct armcp_packet pkt;
4696 memset(&pkt, 0, sizeof(pkt));
4698 pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ <<
4699 ARMCP_PKT_CTL_OPCODE_SHIFT);
4700 pkt.value = cpu_to_le64(event_type);
4702 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
4703 HL_DEVICE_TIMEOUT_USEC, &result);
4706 dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type);
4711 void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
4713 u32 ctl = le32_to_cpu(eq_entry->hdr.ctl);
4714 u16 event_type = ((ctl & EQ_CTL_EVENT_TYPE_MASK)
4715 >> EQ_CTL_EVENT_TYPE_SHIFT);
4716 struct goya_device *goya = hdev->asic_specific;
4718 goya->events_stat[event_type]++;
4720 switch (event_type) {
4721 case GOYA_ASYNC_EVENT_ID_PCIE_IF:
4722 case GOYA_ASYNC_EVENT_ID_TPC0_ECC:
4723 case GOYA_ASYNC_EVENT_ID_TPC1_ECC:
4724 case GOYA_ASYNC_EVENT_ID_TPC2_ECC:
4725 case GOYA_ASYNC_EVENT_ID_TPC3_ECC:
4726 case GOYA_ASYNC_EVENT_ID_TPC4_ECC:
4727 case GOYA_ASYNC_EVENT_ID_TPC5_ECC:
4728 case GOYA_ASYNC_EVENT_ID_TPC6_ECC:
4729 case GOYA_ASYNC_EVENT_ID_TPC7_ECC:
4730 case GOYA_ASYNC_EVENT_ID_MME_ECC:
4731 case GOYA_ASYNC_EVENT_ID_MME_ECC_EXT:
4732 case GOYA_ASYNC_EVENT_ID_MMU_ECC:
4733 case GOYA_ASYNC_EVENT_ID_DMA_MACRO:
4734 case GOYA_ASYNC_EVENT_ID_DMA_ECC:
4735 case GOYA_ASYNC_EVENT_ID_CPU_IF_ECC:
4736 case GOYA_ASYNC_EVENT_ID_PSOC_MEM:
4737 case GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT:
4738 case GOYA_ASYNC_EVENT_ID_SRAM0 ... GOYA_ASYNC_EVENT_ID_SRAM29:
4739 case GOYA_ASYNC_EVENT_ID_GIC500:
4740 case GOYA_ASYNC_EVENT_ID_PLL0:
4741 case GOYA_ASYNC_EVENT_ID_PLL1:
4742 case GOYA_ASYNC_EVENT_ID_PLL3:
4743 case GOYA_ASYNC_EVENT_ID_PLL4:
4744 case GOYA_ASYNC_EVENT_ID_PLL5:
4745 case GOYA_ASYNC_EVENT_ID_PLL6:
4746 case GOYA_ASYNC_EVENT_ID_AXI_ECC:
4747 case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC:
4748 case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET:
4749 case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT:
4751 "Received H/W interrupt %d, reset the chip\n",
4753 hl_device_reset(hdev, true, false);
4756 case GOYA_ASYNC_EVENT_ID_PCIE_DEC:
4757 case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
4758 case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
4759 case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
4760 case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
4761 case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
4762 case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
4763 case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
4764 case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
4765 case GOYA_ASYNC_EVENT_ID_MME_WACS:
4766 case GOYA_ASYNC_EVENT_ID_MME_WACSD:
4767 case GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER:
4768 case GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC:
4769 case GOYA_ASYNC_EVENT_ID_PSOC:
4770 case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
4771 case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
4772 case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
4773 case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
4774 case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
4775 case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
4776 case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
4777 case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
4778 case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
4779 case GOYA_ASYNC_EVENT_ID_MME_QM:
4780 case GOYA_ASYNC_EVENT_ID_MME_CMDQ:
4781 case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
4782 case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
4783 goya_print_irq_info(hdev, event_type);
4784 goya_unmask_irq(hdev, event_type);
4787 case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU:
4788 case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU:
4789 case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU:
4790 case GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU:
4791 case GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU:
4792 case GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU:
4793 case GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU:
4794 case GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU:
4795 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0:
4796 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH1:
4797 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH2:
4798 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH3:
4799 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH4:
4800 dev_info(hdev->dev, "Received H/W interrupt %d\n", event_type);
4804 dev_err(hdev->dev, "Received invalid H/W interrupt %d\n",
4810 void *goya_get_events_stat(struct hl_device *hdev, u32 *size)
4812 struct goya_device *goya = hdev->asic_specific;
4814 *size = (u32) sizeof(goya->events_stat);
4816 return goya->events_stat;
4819 static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u32 size,
4820 u64 val, bool is_dram)
4822 struct packet_lin_dma *lin_dma_pkt;
4823 struct hl_cs_parser parser;
4824 struct hl_cs_job *job;
4829 cb = hl_cb_kernel_create(hdev, PAGE_SIZE);
4833 lin_dma_pkt = (struct packet_lin_dma *) (uintptr_t) cb->kernel_address;
4835 memset(lin_dma_pkt, 0, sizeof(*lin_dma_pkt));
4836 cb_size = sizeof(*lin_dma_pkt);
4838 ctl = ((PACKET_LIN_DMA << GOYA_PKT_CTL_OPCODE_SHIFT) |
4839 (1 << GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT) |
4840 (1 << GOYA_PKT_LIN_DMA_CTL_WO_SHIFT) |
4841 (1 << GOYA_PKT_CTL_RB_SHIFT) |
4842 (1 << GOYA_PKT_CTL_MB_SHIFT));
4843 ctl |= (is_dram ? DMA_HOST_TO_DRAM : DMA_HOST_TO_SRAM) <<
4844 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
4845 lin_dma_pkt->ctl = cpu_to_le32(ctl);
4847 lin_dma_pkt->src_addr = cpu_to_le64(val);
4848 lin_dma_pkt->dst_addr = cpu_to_le64(addr);
4849 lin_dma_pkt->tsize = cpu_to_le32(size);
4851 job = hl_cs_allocate_job(hdev, true);
4853 dev_err(hdev->dev, "Failed to allocate a new job\n");
4860 job->user_cb->cs_cnt++;
4861 job->user_cb_size = cb_size;
4862 job->hw_queue_id = GOYA_QUEUE_ID_DMA_0;
4864 hl_debugfs_add_job(hdev, job);
4866 parser.ctx_id = HL_KERNEL_ASID_ID;
4867 parser.cs_sequence = 0;
4868 parser.job_id = job->id;
4869 parser.hw_queue_id = job->hw_queue_id;
4870 parser.job_userptr_list = &job->userptr_list;
4871 parser.user_cb = job->user_cb;
4872 parser.user_cb_size = job->user_cb_size;
4873 parser.ext_queue = job->ext_queue;
4874 parser.use_virt_addr = hdev->mmu_enable;
4876 rc = hdev->asic_funcs->cs_parser(hdev, &parser);
4878 dev_err(hdev->dev, "Failed to parse kernel CB\n");
4882 job->patched_cb = parser.patched_cb;
4883 job->job_cb_size = parser.patched_cb_size;
4884 job->patched_cb->cs_cnt++;
4886 rc = goya_send_job_on_qman0(hdev, job);
4888 job->patched_cb->cs_cnt--;
4889 hl_cb_put(job->patched_cb);
4892 hl_userptr_delete_list(hdev, &job->userptr_list);
4893 hl_debugfs_remove_job(hdev, job);
4899 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
4904 static int goya_context_switch(struct hl_device *hdev, u32 asid)
4906 struct asic_fixed_properties *prop = &hdev->asic_prop;
4907 u64 addr = prop->sram_base_address;
4908 u32 size = hdev->pldm ? 0x10000 : prop->sram_size;
4909 u64 val = 0x7777777777777777ull;
4912 rc = goya_memset_device_memory(hdev, addr, size, val, false);
4914 dev_err(hdev->dev, "Failed to clear SRAM in context switch\n");
4918 goya_mmu_prepare(hdev, asid);
4923 static int goya_mmu_clear_pgt_range(struct hl_device *hdev)
4925 struct asic_fixed_properties *prop = &hdev->asic_prop;
4926 struct goya_device *goya = hdev->asic_specific;
4927 u64 addr = prop->mmu_pgt_addr;
4928 u32 size = prop->mmu_pgt_size + MMU_DRAM_DEFAULT_PAGE_SIZE +
4931 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4934 return goya_memset_device_memory(hdev, addr, size, 0, true);
4937 static int goya_mmu_set_dram_default_page(struct hl_device *hdev)
4939 struct goya_device *goya = hdev->asic_specific;
4940 u64 addr = hdev->asic_prop.mmu_dram_default_page_addr;
4941 u32 size = MMU_DRAM_DEFAULT_PAGE_SIZE;
4942 u64 val = 0x9999999999999999ull;
4944 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4947 return goya_memset_device_memory(hdev, addr, size, val, true);
4950 static void goya_mmu_prepare(struct hl_device *hdev, u32 asid)
4952 struct goya_device *goya = hdev->asic_specific;
4955 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4958 if (asid & ~MME_QM_GLBL_SECURE_PROPS_ASID_MASK) {
4959 WARN(1, "asid %u is too big\n", asid);
4963 /* zero the MMBP and ASID bits and then set the ASID */
4964 for (i = 0 ; i < GOYA_MMU_REGS_NUM ; i++) {
4965 WREG32_AND(goya_mmu_regs[i], ~0x7FF);
4966 WREG32_OR(goya_mmu_regs[i], asid);
4970 static void goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard)
4972 struct goya_device *goya = hdev->asic_specific;
4973 u32 status, timeout_usec;
4976 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4979 /* no need in L1 only invalidation in Goya */
4984 timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
4986 timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
4988 mutex_lock(&hdev->mmu_cache_lock);
4990 /* L0 & L1 invalidation */
4991 WREG32(mmSTLB_INV_ALL_START, 1);
4993 rc = hl_poll_timeout(
4995 mmSTLB_INV_ALL_START,
5001 mutex_unlock(&hdev->mmu_cache_lock);
5004 dev_notice_ratelimited(hdev->dev,
5005 "Timeout when waiting for MMU cache invalidation\n");
5008 static void goya_mmu_invalidate_cache_range(struct hl_device *hdev,
5009 bool is_hard, u32 asid, u64 va, u64 size)
5011 struct goya_device *goya = hdev->asic_specific;
5012 u32 status, timeout_usec, inv_data, pi;
5015 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
5018 /* no need in L1 only invalidation in Goya */
5023 timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
5025 timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
5027 mutex_lock(&hdev->mmu_cache_lock);
5030 * TODO: currently invalidate entire L0 & L1 as in regular hard
5031 * invalidation. Need to apply invalidation of specific cache lines with
5032 * mask of ASID & VA & size.
5033 * Note that L1 with be flushed entirely in any case.
5036 /* L0 & L1 invalidation */
5037 inv_data = RREG32(mmSTLB_CACHE_INV);
5039 pi = ((inv_data & STLB_CACHE_INV_PRODUCER_INDEX_MASK) + 1) & 0xFF;
5040 WREG32(mmSTLB_CACHE_INV,
5041 (inv_data & STLB_CACHE_INV_INDEX_MASK_MASK) | pi);
5043 rc = hl_poll_timeout(
5045 mmSTLB_INV_CONSUMER_INDEX,
5051 mutex_unlock(&hdev->mmu_cache_lock);
5054 dev_notice_ratelimited(hdev->dev,
5055 "Timeout when waiting for MMU cache invalidation\n");
5058 static int goya_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
5061 u32 status, timeout_usec;
5065 timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
5067 timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
5069 WREG32(MMU_HOP0_PA43_12, phys_addr >> MMU_HOP0_PA43_12_SHIFT);
5070 WREG32(MMU_HOP0_PA49_44, phys_addr >> MMU_HOP0_PA49_44_SHIFT);
5071 WREG32(MMU_ASID_BUSY, 0x80000000 | asid);
5073 rc = hl_poll_timeout(
5077 !(status & 0x80000000),
5083 "Timeout during MMU hop0 config of asid %d\n", asid);
5090 int goya_send_heartbeat(struct hl_device *hdev)
5092 struct goya_device *goya = hdev->asic_specific;
5093 struct armcp_packet hb_pkt;
5097 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
5100 memset(&hb_pkt, 0, sizeof(hb_pkt));
5102 hb_pkt.ctl = cpu_to_le32(ARMCP_PACKET_TEST <<
5103 ARMCP_PKT_CTL_OPCODE_SHIFT);
5104 hb_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL);
5106 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &hb_pkt,
5107 sizeof(hb_pkt), HL_DEVICE_TIMEOUT_USEC, &result);
5109 if ((rc) || (result != ARMCP_PACKET_FENCE_VAL))
5115 static int goya_armcp_info_get(struct hl_device *hdev)
5117 struct goya_device *goya = hdev->asic_specific;
5118 struct asic_fixed_properties *prop = &hdev->asic_prop;
5119 struct armcp_packet pkt;
5120 void *armcp_info_cpu_addr;
5121 dma_addr_t armcp_info_dma_addr;
5126 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
5129 armcp_info_cpu_addr =
5130 hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
5131 sizeof(struct armcp_info), &armcp_info_dma_addr);
5132 if (!armcp_info_cpu_addr) {
5134 "Failed to allocate DMA memory for ArmCP info packet\n");
5138 memset(armcp_info_cpu_addr, 0, sizeof(struct armcp_info));
5140 memset(&pkt, 0, sizeof(pkt));
5142 pkt.ctl = cpu_to_le32(ARMCP_PACKET_INFO_GET <<
5143 ARMCP_PKT_CTL_OPCODE_SHIFT);
5144 pkt.addr = cpu_to_le64(armcp_info_dma_addr +
5145 prop->host_phys_base_address);
5146 pkt.data_max_size = cpu_to_le32(sizeof(struct armcp_info));
5148 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
5149 GOYA_ARMCP_INFO_TIMEOUT, &result);
5153 "Failed to send armcp info pkt, error %d\n", rc);
5157 memcpy(&prop->armcp_info, armcp_info_cpu_addr,
5158 sizeof(prop->armcp_info));
5160 dram_size = le64_to_cpu(prop->armcp_info.dram_size);
5162 if ((!is_power_of_2(dram_size)) ||
5163 (dram_size < DRAM_PHYS_DEFAULT_SIZE)) {
5165 "F/W reported invalid DRAM size %llu. Trying to use default size\n",
5167 dram_size = DRAM_PHYS_DEFAULT_SIZE;
5170 prop->dram_size = dram_size;
5171 prop->dram_end_address = prop->dram_base_address + dram_size;
5174 rc = hl_build_hwmon_channel_info(hdev, prop->armcp_info.sensors);
5177 "Failed to build hwmon channel info, error %d\n", rc);
5183 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
5184 sizeof(struct armcp_info), armcp_info_cpu_addr);
5189 static void goya_init_clock_gating(struct hl_device *hdev)
5194 static void goya_disable_clock_gating(struct hl_device *hdev)
5199 static bool goya_is_device_idle(struct hl_device *hdev)
5201 u64 offset, dma_qm_reg, tpc_qm_reg, tpc_cmdq_reg, tpc_cfg_reg;
5204 offset = mmDMA_QM_1_GLBL_STS0 - mmDMA_QM_0_GLBL_STS0;
5206 for (i = 0 ; i < DMA_MAX_NUM ; i++) {
5207 dma_qm_reg = mmDMA_QM_0_GLBL_STS0 + i * offset;
5209 if ((RREG32(dma_qm_reg) & DMA_QM_IDLE_MASK) !=
5214 offset = mmTPC1_QM_GLBL_STS0 - mmTPC0_QM_GLBL_STS0;
5216 for (i = 0 ; i < TPC_MAX_NUM ; i++) {
5217 tpc_qm_reg = mmTPC0_QM_GLBL_STS0 + i * offset;
5218 tpc_cmdq_reg = mmTPC0_CMDQ_GLBL_STS0 + i * offset;
5219 tpc_cfg_reg = mmTPC0_CFG_STATUS + i * offset;
5221 if ((RREG32(tpc_qm_reg) & TPC_QM_IDLE_MASK) !=
5225 if ((RREG32(tpc_cmdq_reg) & TPC_CMDQ_IDLE_MASK) !=
5229 if ((RREG32(tpc_cfg_reg) & TPC_CFG_IDLE_MASK) !=
5234 if ((RREG32(mmMME_QM_GLBL_STS0) & MME_QM_IDLE_MASK) !=
5238 if ((RREG32(mmMME_CMDQ_GLBL_STS0) & MME_CMDQ_IDLE_MASK) !=
5242 if ((RREG32(mmMME_ARCH_STATUS) & MME_ARCH_IDLE_MASK) !=
5246 if (RREG32(mmMME_SHADOW_0_STATUS) & MME_SHADOW_IDLE_MASK)
5252 static void goya_hw_queues_lock(struct hl_device *hdev)
5254 struct goya_device *goya = hdev->asic_specific;
5256 spin_lock(&goya->hw_queues_lock);
5259 static void goya_hw_queues_unlock(struct hl_device *hdev)
5261 struct goya_device *goya = hdev->asic_specific;
5263 spin_unlock(&goya->hw_queues_lock);
5266 static u32 goya_get_pci_id(struct hl_device *hdev)
5268 return hdev->pdev->device;
5271 static int goya_get_eeprom_data(struct hl_device *hdev, void *data,
5274 struct goya_device *goya = hdev->asic_specific;
5275 struct asic_fixed_properties *prop = &hdev->asic_prop;
5276 struct armcp_packet pkt;
5277 void *eeprom_info_cpu_addr;
5278 dma_addr_t eeprom_info_dma_addr;
5282 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
5285 eeprom_info_cpu_addr =
5286 hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
5287 max_size, &eeprom_info_dma_addr);
5288 if (!eeprom_info_cpu_addr) {
5290 "Failed to allocate DMA memory for EEPROM info packet\n");
5294 memset(eeprom_info_cpu_addr, 0, max_size);
5296 memset(&pkt, 0, sizeof(pkt));
5298 pkt.ctl = cpu_to_le32(ARMCP_PACKET_EEPROM_DATA_GET <<
5299 ARMCP_PKT_CTL_OPCODE_SHIFT);
5300 pkt.addr = cpu_to_le64(eeprom_info_dma_addr +
5301 prop->host_phys_base_address);
5302 pkt.data_max_size = cpu_to_le32(max_size);
5304 rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
5305 GOYA_ARMCP_EEPROM_TIMEOUT, &result);
5309 "Failed to send armcp EEPROM pkt, error %d\n", rc);
5313 /* result contains the actual size */
5314 memcpy(data, eeprom_info_cpu_addr, min((size_t)result, max_size));
5317 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, max_size,
5318 eeprom_info_cpu_addr);
5323 static enum hl_device_hw_state goya_get_hw_state(struct hl_device *hdev)
5325 return RREG32(mmPSOC_GLOBAL_CONF_APP_STATUS);
5328 static const struct hl_asic_funcs goya_funcs = {
5329 .early_init = goya_early_init,
5330 .early_fini = goya_early_fini,
5331 .late_init = goya_late_init,
5332 .late_fini = goya_late_fini,
5333 .sw_init = goya_sw_init,
5334 .sw_fini = goya_sw_fini,
5335 .hw_init = goya_hw_init,
5336 .hw_fini = goya_hw_fini,
5337 .halt_engines = goya_halt_engines,
5338 .suspend = goya_suspend,
5339 .resume = goya_resume,
5340 .cb_mmap = goya_cb_mmap,
5341 .ring_doorbell = goya_ring_doorbell,
5342 .flush_pq_write = goya_flush_pq_write,
5343 .dma_alloc_coherent = goya_dma_alloc_coherent,
5344 .dma_free_coherent = goya_dma_free_coherent,
5345 .get_int_queue_base = goya_get_int_queue_base,
5346 .test_queues = goya_test_queues,
5347 .dma_pool_zalloc = goya_dma_pool_zalloc,
5348 .dma_pool_free = goya_dma_pool_free,
5349 .cpu_accessible_dma_pool_alloc = goya_cpu_accessible_dma_pool_alloc,
5350 .cpu_accessible_dma_pool_free = goya_cpu_accessible_dma_pool_free,
5351 .hl_dma_unmap_sg = goya_dma_unmap_sg,
5352 .cs_parser = goya_cs_parser,
5353 .asic_dma_map_sg = goya_dma_map_sg,
5354 .get_dma_desc_list_size = goya_get_dma_desc_list_size,
5355 .add_end_of_cb_packets = goya_add_end_of_cb_packets,
5356 .update_eq_ci = goya_update_eq_ci,
5357 .context_switch = goya_context_switch,
5358 .restore_phase_topology = goya_restore_phase_topology,
5359 .debugfs_read32 = goya_debugfs_read32,
5360 .debugfs_write32 = goya_debugfs_write32,
5361 .add_device_attr = goya_add_device_attr,
5362 .handle_eqe = goya_handle_eqe,
5363 .set_pll_profile = goya_set_pll_profile,
5364 .get_events_stat = goya_get_events_stat,
5365 .read_pte = goya_read_pte,
5366 .write_pte = goya_write_pte,
5367 .mmu_invalidate_cache = goya_mmu_invalidate_cache,
5368 .mmu_invalidate_cache_range = goya_mmu_invalidate_cache_range,
5369 .send_heartbeat = goya_send_heartbeat,
5370 .enable_clock_gating = goya_init_clock_gating,
5371 .disable_clock_gating = goya_disable_clock_gating,
5372 .is_device_idle = goya_is_device_idle,
5373 .soft_reset_late_init = goya_soft_reset_late_init,
5374 .hw_queues_lock = goya_hw_queues_lock,
5375 .hw_queues_unlock = goya_hw_queues_unlock,
5376 .get_pci_id = goya_get_pci_id,
5377 .get_eeprom_data = goya_get_eeprom_data,
5378 .send_cpu_message = goya_send_cpu_message,
5379 .get_hw_state = goya_get_hw_state
5383 * goya_set_asic_funcs - set Goya function pointers
5385 * @*hdev: pointer to hl_device structure
5388 void goya_set_asic_funcs(struct hl_device *hdev)
5390 hdev->asic_funcs = &goya_funcs;