1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2019 HabanaLabs, Ltd.
9 #include "include/hw_ip/mmu/mmu_general.h"
10 #include "include/hw_ip/mmu/mmu_v1_0.h"
11 #include "include/goya/asic_reg/goya_masks.h"
13 #include <linux/pci.h>
14 #include <linux/genalloc.h>
15 #include <linux/hwmon.h>
16 #include <linux/io-64-nonatomic-lo-hi.h>
19 * GOYA security scheme:
21 * 1. Host is protected by:
22 * - Range registers (When MMU is enabled, DMA RR does NOT protect host)
25 * 2. DRAM is protected by:
26 * - Range registers (protect the first 512MB)
27 * - MMU (isolation between users)
29 * 3. Configuration is protected by:
33 * When MMU is disabled:
35 * QMAN DMA: PQ, CQ, CP, DMA are secured.
36 * PQ, CB and the data are on the host.
39 * PQ, CQ and CP are not secured.
40 * PQ, CB and the data are on the SRAM/DRAM.
42 * Since QMAN DMA is secured, KMD is parsing the DMA CB:
43 * - KMD checks DMA pointer
44 * - WREG, MSG_PROT are not allowed.
45 * - MSG_LONG/SHORT are allowed.
47 * A read/write transaction by the QMAN to a protected area will succeed if
48 * and only if the QMAN's CP is secured and MSG_PROT is used
51 * When MMU is enabled:
53 * QMAN DMA: PQ, CQ and CP are secured.
54 * MMU is set to bypass on the Secure props register of the QMAN.
55 * The reasons we don't enable MMU for PQ, CQ and CP are:
56 * - PQ entry is in kernel address space and KMD doesn't map it.
57 * - CP writes to MSIX register and to kernel address space (completion
60 * DMA is not secured but because CP is secured, KMD still needs to parse the
61 * CB, but doesn't need to check the DMA addresses.
63 * For QMAN DMA 0, DMA is also secured because only KMD uses this DMA and KMD
64 * doesn't map memory in MMU.
66 * QMAN TPC/MME: PQ, CQ and CP aren't secured (no change from MMU disabled mode)
68 * DMA RR does NOT protect host because DMA is not secured
72 #define GOYA_MMU_REGS_NUM 63
74 #define GOYA_DMA_POOL_BLK_SIZE 0x100 /* 256 bytes */
76 #define GOYA_RESET_TIMEOUT_MSEC 500 /* 500ms */
77 #define GOYA_PLDM_RESET_TIMEOUT_MSEC 20000 /* 20s */
78 #define GOYA_RESET_WAIT_MSEC 1 /* 1ms */
79 #define GOYA_CPU_RESET_WAIT_MSEC 100 /* 100ms */
80 #define GOYA_PLDM_RESET_WAIT_MSEC 1000 /* 1s */
81 #define GOYA_TEST_QUEUE_WAIT_USEC 100000 /* 100ms */
82 #define GOYA_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100)
83 #define GOYA_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
85 #define GOYA_QMAN0_FENCE_VAL 0xD169B243
87 #define GOYA_MAX_STRING_LEN 20
89 #define GOYA_CB_POOL_CB_CNT 512
90 #define GOYA_CB_POOL_CB_SIZE 0x20000 /* 128KB */
92 static const char goya_irq_name[GOYA_MSIX_ENTRIES][GOYA_MAX_STRING_LEN] = {
93 "goya cq 0", "goya cq 1", "goya cq 2", "goya cq 3",
94 "goya cq 4", "goya cpu eq"
97 static u16 goya_packet_sizes[MAX_PACKET_ID] = {
98 [PACKET_WREG_32] = sizeof(struct packet_wreg32),
99 [PACKET_WREG_BULK] = sizeof(struct packet_wreg_bulk),
100 [PACKET_MSG_LONG] = sizeof(struct packet_msg_long),
101 [PACKET_MSG_SHORT] = sizeof(struct packet_msg_short),
102 [PACKET_CP_DMA] = sizeof(struct packet_cp_dma),
103 [PACKET_MSG_PROT] = sizeof(struct packet_msg_prot),
104 [PACKET_FENCE] = sizeof(struct packet_fence),
105 [PACKET_LIN_DMA] = sizeof(struct packet_lin_dma),
106 [PACKET_NOP] = sizeof(struct packet_nop),
107 [PACKET_STOP] = sizeof(struct packet_stop)
110 static u64 goya_mmu_regs[GOYA_MMU_REGS_NUM] = {
111 mmDMA_QM_0_GLBL_NON_SECURE_PROPS,
112 mmDMA_QM_1_GLBL_NON_SECURE_PROPS,
113 mmDMA_QM_2_GLBL_NON_SECURE_PROPS,
114 mmDMA_QM_3_GLBL_NON_SECURE_PROPS,
115 mmDMA_QM_4_GLBL_NON_SECURE_PROPS,
116 mmTPC0_QM_GLBL_SECURE_PROPS,
117 mmTPC0_QM_GLBL_NON_SECURE_PROPS,
118 mmTPC0_CMDQ_GLBL_SECURE_PROPS,
119 mmTPC0_CMDQ_GLBL_NON_SECURE_PROPS,
122 mmTPC1_QM_GLBL_SECURE_PROPS,
123 mmTPC1_QM_GLBL_NON_SECURE_PROPS,
124 mmTPC1_CMDQ_GLBL_SECURE_PROPS,
125 mmTPC1_CMDQ_GLBL_NON_SECURE_PROPS,
128 mmTPC2_QM_GLBL_SECURE_PROPS,
129 mmTPC2_QM_GLBL_NON_SECURE_PROPS,
130 mmTPC2_CMDQ_GLBL_SECURE_PROPS,
131 mmTPC2_CMDQ_GLBL_NON_SECURE_PROPS,
134 mmTPC3_QM_GLBL_SECURE_PROPS,
135 mmTPC3_QM_GLBL_NON_SECURE_PROPS,
136 mmTPC3_CMDQ_GLBL_SECURE_PROPS,
137 mmTPC3_CMDQ_GLBL_NON_SECURE_PROPS,
140 mmTPC4_QM_GLBL_SECURE_PROPS,
141 mmTPC4_QM_GLBL_NON_SECURE_PROPS,
142 mmTPC4_CMDQ_GLBL_SECURE_PROPS,
143 mmTPC4_CMDQ_GLBL_NON_SECURE_PROPS,
146 mmTPC5_QM_GLBL_SECURE_PROPS,
147 mmTPC5_QM_GLBL_NON_SECURE_PROPS,
148 mmTPC5_CMDQ_GLBL_SECURE_PROPS,
149 mmTPC5_CMDQ_GLBL_NON_SECURE_PROPS,
152 mmTPC6_QM_GLBL_SECURE_PROPS,
153 mmTPC6_QM_GLBL_NON_SECURE_PROPS,
154 mmTPC6_CMDQ_GLBL_SECURE_PROPS,
155 mmTPC6_CMDQ_GLBL_NON_SECURE_PROPS,
158 mmTPC7_QM_GLBL_SECURE_PROPS,
159 mmTPC7_QM_GLBL_NON_SECURE_PROPS,
160 mmTPC7_CMDQ_GLBL_SECURE_PROPS,
161 mmTPC7_CMDQ_GLBL_NON_SECURE_PROPS,
164 mmMME_QM_GLBL_SECURE_PROPS,
165 mmMME_QM_GLBL_NON_SECURE_PROPS,
166 mmMME_CMDQ_GLBL_SECURE_PROPS,
167 mmMME_CMDQ_GLBL_NON_SECURE_PROPS,
168 mmMME_SBA_CONTROL_DATA,
169 mmMME_SBB_CONTROL_DATA,
170 mmMME_SBC_CONTROL_DATA,
171 mmMME_WBC_CONTROL_DATA,
172 mmPCIE_WRAP_PSOC_ARUSER,
173 mmPCIE_WRAP_PSOC_AWUSER
176 static u32 goya_all_events[] = {
177 GOYA_ASYNC_EVENT_ID_PCIE_IF,
178 GOYA_ASYNC_EVENT_ID_TPC0_ECC,
179 GOYA_ASYNC_EVENT_ID_TPC1_ECC,
180 GOYA_ASYNC_EVENT_ID_TPC2_ECC,
181 GOYA_ASYNC_EVENT_ID_TPC3_ECC,
182 GOYA_ASYNC_EVENT_ID_TPC4_ECC,
183 GOYA_ASYNC_EVENT_ID_TPC5_ECC,
184 GOYA_ASYNC_EVENT_ID_TPC6_ECC,
185 GOYA_ASYNC_EVENT_ID_TPC7_ECC,
186 GOYA_ASYNC_EVENT_ID_MME_ECC,
187 GOYA_ASYNC_EVENT_ID_MME_ECC_EXT,
188 GOYA_ASYNC_EVENT_ID_MMU_ECC,
189 GOYA_ASYNC_EVENT_ID_DMA_MACRO,
190 GOYA_ASYNC_EVENT_ID_DMA_ECC,
191 GOYA_ASYNC_EVENT_ID_CPU_IF_ECC,
192 GOYA_ASYNC_EVENT_ID_PSOC_MEM,
193 GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT,
194 GOYA_ASYNC_EVENT_ID_SRAM0,
195 GOYA_ASYNC_EVENT_ID_SRAM1,
196 GOYA_ASYNC_EVENT_ID_SRAM2,
197 GOYA_ASYNC_EVENT_ID_SRAM3,
198 GOYA_ASYNC_EVENT_ID_SRAM4,
199 GOYA_ASYNC_EVENT_ID_SRAM5,
200 GOYA_ASYNC_EVENT_ID_SRAM6,
201 GOYA_ASYNC_EVENT_ID_SRAM7,
202 GOYA_ASYNC_EVENT_ID_SRAM8,
203 GOYA_ASYNC_EVENT_ID_SRAM9,
204 GOYA_ASYNC_EVENT_ID_SRAM10,
205 GOYA_ASYNC_EVENT_ID_SRAM11,
206 GOYA_ASYNC_EVENT_ID_SRAM12,
207 GOYA_ASYNC_EVENT_ID_SRAM13,
208 GOYA_ASYNC_EVENT_ID_SRAM14,
209 GOYA_ASYNC_EVENT_ID_SRAM15,
210 GOYA_ASYNC_EVENT_ID_SRAM16,
211 GOYA_ASYNC_EVENT_ID_SRAM17,
212 GOYA_ASYNC_EVENT_ID_SRAM18,
213 GOYA_ASYNC_EVENT_ID_SRAM19,
214 GOYA_ASYNC_EVENT_ID_SRAM20,
215 GOYA_ASYNC_EVENT_ID_SRAM21,
216 GOYA_ASYNC_EVENT_ID_SRAM22,
217 GOYA_ASYNC_EVENT_ID_SRAM23,
218 GOYA_ASYNC_EVENT_ID_SRAM24,
219 GOYA_ASYNC_EVENT_ID_SRAM25,
220 GOYA_ASYNC_EVENT_ID_SRAM26,
221 GOYA_ASYNC_EVENT_ID_SRAM27,
222 GOYA_ASYNC_EVENT_ID_SRAM28,
223 GOYA_ASYNC_EVENT_ID_SRAM29,
224 GOYA_ASYNC_EVENT_ID_GIC500,
225 GOYA_ASYNC_EVENT_ID_PLL0,
226 GOYA_ASYNC_EVENT_ID_PLL1,
227 GOYA_ASYNC_EVENT_ID_PLL3,
228 GOYA_ASYNC_EVENT_ID_PLL4,
229 GOYA_ASYNC_EVENT_ID_PLL5,
230 GOYA_ASYNC_EVENT_ID_PLL6,
231 GOYA_ASYNC_EVENT_ID_AXI_ECC,
232 GOYA_ASYNC_EVENT_ID_L2_RAM_ECC,
233 GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET,
234 GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT,
235 GOYA_ASYNC_EVENT_ID_PCIE_DEC,
236 GOYA_ASYNC_EVENT_ID_TPC0_DEC,
237 GOYA_ASYNC_EVENT_ID_TPC1_DEC,
238 GOYA_ASYNC_EVENT_ID_TPC2_DEC,
239 GOYA_ASYNC_EVENT_ID_TPC3_DEC,
240 GOYA_ASYNC_EVENT_ID_TPC4_DEC,
241 GOYA_ASYNC_EVENT_ID_TPC5_DEC,
242 GOYA_ASYNC_EVENT_ID_TPC6_DEC,
243 GOYA_ASYNC_EVENT_ID_TPC7_DEC,
244 GOYA_ASYNC_EVENT_ID_MME_WACS,
245 GOYA_ASYNC_EVENT_ID_MME_WACSD,
246 GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER,
247 GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC,
248 GOYA_ASYNC_EVENT_ID_PSOC,
249 GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR,
250 GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR,
251 GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR,
252 GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR,
253 GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR,
254 GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR,
255 GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR,
256 GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR,
257 GOYA_ASYNC_EVENT_ID_TPC0_CMDQ,
258 GOYA_ASYNC_EVENT_ID_TPC1_CMDQ,
259 GOYA_ASYNC_EVENT_ID_TPC2_CMDQ,
260 GOYA_ASYNC_EVENT_ID_TPC3_CMDQ,
261 GOYA_ASYNC_EVENT_ID_TPC4_CMDQ,
262 GOYA_ASYNC_EVENT_ID_TPC5_CMDQ,
263 GOYA_ASYNC_EVENT_ID_TPC6_CMDQ,
264 GOYA_ASYNC_EVENT_ID_TPC7_CMDQ,
265 GOYA_ASYNC_EVENT_ID_TPC0_QM,
266 GOYA_ASYNC_EVENT_ID_TPC1_QM,
267 GOYA_ASYNC_EVENT_ID_TPC2_QM,
268 GOYA_ASYNC_EVENT_ID_TPC3_QM,
269 GOYA_ASYNC_EVENT_ID_TPC4_QM,
270 GOYA_ASYNC_EVENT_ID_TPC5_QM,
271 GOYA_ASYNC_EVENT_ID_TPC6_QM,
272 GOYA_ASYNC_EVENT_ID_TPC7_QM,
273 GOYA_ASYNC_EVENT_ID_MME_QM,
274 GOYA_ASYNC_EVENT_ID_MME_CMDQ,
275 GOYA_ASYNC_EVENT_ID_DMA0_QM,
276 GOYA_ASYNC_EVENT_ID_DMA1_QM,
277 GOYA_ASYNC_EVENT_ID_DMA2_QM,
278 GOYA_ASYNC_EVENT_ID_DMA3_QM,
279 GOYA_ASYNC_EVENT_ID_DMA4_QM,
280 GOYA_ASYNC_EVENT_ID_DMA0_CH,
281 GOYA_ASYNC_EVENT_ID_DMA1_CH,
282 GOYA_ASYNC_EVENT_ID_DMA2_CH,
283 GOYA_ASYNC_EVENT_ID_DMA3_CH,
284 GOYA_ASYNC_EVENT_ID_DMA4_CH,
285 GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU,
286 GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU,
287 GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU,
288 GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU,
289 GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU,
290 GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU,
291 GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU,
292 GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU,
293 GOYA_ASYNC_EVENT_ID_DMA_BM_CH0,
294 GOYA_ASYNC_EVENT_ID_DMA_BM_CH1,
295 GOYA_ASYNC_EVENT_ID_DMA_BM_CH2,
296 GOYA_ASYNC_EVENT_ID_DMA_BM_CH3,
297 GOYA_ASYNC_EVENT_ID_DMA_BM_CH4
300 static int goya_mmu_clear_pgt_range(struct hl_device *hdev);
301 static int goya_mmu_set_dram_default_page(struct hl_device *hdev);
302 static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev);
303 static void goya_mmu_prepare(struct hl_device *hdev, u32 asid);
305 void goya_get_fixed_properties(struct hl_device *hdev)
307 struct asic_fixed_properties *prop = &hdev->asic_prop;
310 for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
311 prop->hw_queues_props[i].type = QUEUE_TYPE_EXT;
312 prop->hw_queues_props[i].kmd_only = 0;
315 for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES ; i++) {
316 prop->hw_queues_props[i].type = QUEUE_TYPE_CPU;
317 prop->hw_queues_props[i].kmd_only = 1;
320 for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES +
321 NUMBER_OF_INT_HW_QUEUES; i++) {
322 prop->hw_queues_props[i].type = QUEUE_TYPE_INT;
323 prop->hw_queues_props[i].kmd_only = 0;
326 for (; i < HL_MAX_QUEUES; i++)
327 prop->hw_queues_props[i].type = QUEUE_TYPE_NA;
329 prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES;
331 prop->dram_base_address = DRAM_PHYS_BASE;
332 prop->dram_size = DRAM_PHYS_DEFAULT_SIZE;
333 prop->dram_end_address = prop->dram_base_address + prop->dram_size;
334 prop->dram_user_base_address = DRAM_BASE_ADDR_USER;
336 prop->sram_base_address = SRAM_BASE_ADDR;
337 prop->sram_size = SRAM_SIZE;
338 prop->sram_end_address = prop->sram_base_address + prop->sram_size;
339 prop->sram_user_base_address = prop->sram_base_address +
340 SRAM_USER_BASE_OFFSET;
342 prop->mmu_pgt_addr = MMU_PAGE_TABLES_ADDR;
343 prop->mmu_dram_default_page_addr = MMU_DRAM_DEFAULT_PAGE_ADDR;
345 prop->mmu_pgt_size = 0x800000; /* 8MB */
347 prop->mmu_pgt_size = MMU_PAGE_TABLES_SIZE;
348 prop->mmu_pte_size = HL_PTE_SIZE;
349 prop->mmu_hop_table_size = HOP_TABLE_SIZE;
350 prop->mmu_hop0_tables_total_size = HOP0_TABLES_TOTAL_SIZE;
351 prop->dram_page_size = PAGE_SIZE_2MB;
353 prop->va_space_host_start_address = VA_HOST_SPACE_START;
354 prop->va_space_host_end_address = VA_HOST_SPACE_END;
355 prop->va_space_dram_start_address = VA_DDR_SPACE_START;
356 prop->va_space_dram_end_address = VA_DDR_SPACE_END;
357 prop->dram_size_for_default_page_mapping =
358 prop->va_space_dram_end_address;
359 prop->cfg_size = CFG_SIZE;
360 prop->max_asid = MAX_ASID;
361 prop->num_of_events = GOYA_ASYNC_EVENT_ID_SIZE;
362 prop->high_pll = PLL_HIGH_DEFAULT;
363 prop->cb_pool_cb_cnt = GOYA_CB_POOL_CB_CNT;
364 prop->cb_pool_cb_size = GOYA_CB_POOL_CB_SIZE;
365 prop->max_power_default = MAX_POWER_DEFAULT;
366 prop->tpc_enabled_mask = TPC_ENABLED_MASK;
367 prop->pcie_dbi_base_address = mmPCIE_DBI_BASE;
368 prop->pcie_aux_dbi_reg_addr = CFG_BASE + mmPCIE_AUX_DBI;
372 * goya_pci_bars_map - Map PCI BARS of Goya device
374 * @hdev: pointer to hl_device structure
376 * Request PCI regions and map them to kernel virtual addresses.
377 * Returns 0 on success
380 static int goya_pci_bars_map(struct hl_device *hdev)
382 static const char * const name[] = {"SRAM_CFG", "MSIX", "DDR"};
383 bool is_wc[3] = {false, false, true};
386 rc = hl_pci_bars_map(hdev, name, is_wc);
390 hdev->rmmio = hdev->pcie_bar[SRAM_CFG_BAR_ID] +
391 (CFG_BASE - SRAM_BASE_ADDR);
396 static u64 goya_set_ddr_bar_base(struct hl_device *hdev, u64 addr)
398 struct goya_device *goya = hdev->asic_specific;
402 if ((goya) && (goya->ddr_bar_cur_addr == addr))
405 /* Inbound Region 1 - Bar 4 - Point to DDR */
406 rc = hl_pci_set_dram_bar_base(hdev, 1, 4, addr);
411 old_addr = goya->ddr_bar_cur_addr;
412 goya->ddr_bar_cur_addr = addr;
419 * goya_init_iatu - Initialize the iATU unit inside the PCI controller
421 * @hdev: pointer to hl_device structure
423 * This is needed in case the firmware doesn't initialize the iATU
426 static int goya_init_iatu(struct hl_device *hdev)
428 return hl_pci_init_iatu(hdev, SRAM_BASE_ADDR, DRAM_PHYS_BASE,
429 HOST_PHYS_BASE, HOST_PHYS_SIZE);
433 * goya_early_init - GOYA early initialization code
435 * @hdev: pointer to hl_device structure
439 * PCI controller initialization
443 static int goya_early_init(struct hl_device *hdev)
445 struct asic_fixed_properties *prop = &hdev->asic_prop;
446 struct pci_dev *pdev = hdev->pdev;
450 goya_get_fixed_properties(hdev);
452 /* Check BAR sizes */
453 if (pci_resource_len(pdev, SRAM_CFG_BAR_ID) != CFG_BAR_SIZE) {
455 "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n",
457 (unsigned long long) pci_resource_len(pdev,
463 if (pci_resource_len(pdev, MSIX_BAR_ID) != MSIX_BAR_SIZE) {
465 "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n",
467 (unsigned long long) pci_resource_len(pdev,
473 prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID);
475 rc = hl_pci_init(hdev, 48);
480 val = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
481 if (val & PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SRIOV_EN_MASK)
483 "PCI strap is not configured correctly, PCI bus errors may occur\n");
490 * goya_early_fini - GOYA early finalization code
492 * @hdev: pointer to hl_device structure
497 static int goya_early_fini(struct hl_device *hdev)
504 static void goya_mmu_prepare_reg(struct hl_device *hdev, u64 reg, u32 asid)
506 /* mask to zero the MMBP and ASID bits */
507 WREG32_AND(reg, ~0x7FF);
508 WREG32_OR(reg, asid);
511 static void goya_qman0_set_security(struct hl_device *hdev, bool secure)
513 struct goya_device *goya = hdev->asic_specific;
515 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
519 WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_FULLY_TRUSTED);
521 WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_PARTLY_TRUSTED);
523 RREG32(mmDMA_QM_0_GLBL_PROT);
527 * goya_fetch_psoc_frequency - Fetch PSOC frequency values
529 * @hdev: pointer to hl_device structure
532 static void goya_fetch_psoc_frequency(struct hl_device *hdev)
534 struct asic_fixed_properties *prop = &hdev->asic_prop;
536 prop->psoc_pci_pll_nr = RREG32(mmPSOC_PCI_PLL_NR);
537 prop->psoc_pci_pll_nf = RREG32(mmPSOC_PCI_PLL_NF);
538 prop->psoc_pci_pll_od = RREG32(mmPSOC_PCI_PLL_OD);
539 prop->psoc_pci_pll_div_factor = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
542 int goya_late_init(struct hl_device *hdev)
544 struct asic_fixed_properties *prop = &hdev->asic_prop;
547 goya_fetch_psoc_frequency(hdev);
549 rc = goya_mmu_clear_pgt_range(hdev);
552 "Failed to clear MMU page tables range %d\n", rc);
556 rc = goya_mmu_set_dram_default_page(hdev);
558 dev_err(hdev->dev, "Failed to set DRAM default page %d\n", rc);
562 rc = goya_mmu_add_mappings_for_device_cpu(hdev);
566 rc = goya_init_cpu_queues(hdev);
570 rc = goya_test_cpu_queue(hdev);
574 rc = goya_armcp_info_get(hdev);
576 dev_err(hdev->dev, "Failed to get armcp info %d\n", rc);
580 /* Now that we have the DRAM size in ASIC prop, we need to check
581 * its size and configure the DMA_IF DDR wrap protection (which is in
582 * the MMU block) accordingly. The value is the log2 of the DRAM size
584 WREG32(mmMMU_LOG2_DDR_SIZE, ilog2(prop->dram_size));
586 rc = hl_fw_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS);
589 "Failed to enable PCI access from CPU %d\n", rc);
593 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
594 GOYA_ASYNC_EVENT_ID_INTS_REGISTER);
600 * goya_late_fini - GOYA late tear-down code
602 * @hdev: pointer to hl_device structure
604 * Free sensors allocated structures
606 void goya_late_fini(struct hl_device *hdev)
608 const struct hwmon_channel_info **channel_info_arr;
611 if (!hdev->hl_chip_info->info)
614 channel_info_arr = hdev->hl_chip_info->info;
616 while (channel_info_arr[i]) {
617 kfree(channel_info_arr[i]->config);
618 kfree(channel_info_arr[i]);
622 kfree(channel_info_arr);
624 hdev->hl_chip_info->info = NULL;
628 * goya_sw_init - Goya software initialization code
630 * @hdev: pointer to hl_device structure
633 static int goya_sw_init(struct hl_device *hdev)
635 struct goya_device *goya;
638 /* Allocate device structure */
639 goya = kzalloc(sizeof(*goya), GFP_KERNEL);
643 /* according to goya_init_iatu */
644 goya->ddr_bar_cur_addr = DRAM_PHYS_BASE;
646 goya->mme_clk = GOYA_PLL_FREQ_LOW;
647 goya->tpc_clk = GOYA_PLL_FREQ_LOW;
648 goya->ic_clk = GOYA_PLL_FREQ_LOW;
650 hdev->asic_specific = goya;
652 /* Create DMA pool for small allocations */
653 hdev->dma_pool = dma_pool_create(dev_name(hdev->dev),
654 &hdev->pdev->dev, GOYA_DMA_POOL_BLK_SIZE, 8, 0);
655 if (!hdev->dma_pool) {
656 dev_err(hdev->dev, "failed to create DMA pool\n");
658 goto free_goya_device;
661 hdev->cpu_accessible_dma_mem =
662 hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
663 HL_CPU_ACCESSIBLE_MEM_SIZE,
664 &hdev->cpu_accessible_dma_address,
665 GFP_KERNEL | __GFP_ZERO);
667 if (!hdev->cpu_accessible_dma_mem) {
672 dev_dbg(hdev->dev, "cpu accessible memory at bus address 0x%llx\n",
673 hdev->cpu_accessible_dma_address);
675 hdev->cpu_accessible_dma_pool = gen_pool_create(ilog2(32), -1);
676 if (!hdev->cpu_accessible_dma_pool) {
678 "Failed to create CPU accessible DMA pool\n");
680 goto free_cpu_dma_mem;
683 rc = gen_pool_add(hdev->cpu_accessible_dma_pool,
684 (uintptr_t) hdev->cpu_accessible_dma_mem,
685 HL_CPU_ACCESSIBLE_MEM_SIZE, -1);
688 "Failed to add memory to CPU accessible DMA pool\n");
690 goto free_cpu_accessible_dma_pool;
693 spin_lock_init(&goya->hw_queues_lock);
697 free_cpu_accessible_dma_pool:
698 gen_pool_destroy(hdev->cpu_accessible_dma_pool);
700 hdev->asic_funcs->asic_dma_free_coherent(hdev,
701 HL_CPU_ACCESSIBLE_MEM_SIZE,
702 hdev->cpu_accessible_dma_mem,
703 hdev->cpu_accessible_dma_address);
705 dma_pool_destroy(hdev->dma_pool);
713 * goya_sw_fini - Goya software tear-down code
715 * @hdev: pointer to hl_device structure
718 static int goya_sw_fini(struct hl_device *hdev)
720 struct goya_device *goya = hdev->asic_specific;
722 gen_pool_destroy(hdev->cpu_accessible_dma_pool);
724 hdev->asic_funcs->asic_dma_free_coherent(hdev,
725 HL_CPU_ACCESSIBLE_MEM_SIZE,
726 hdev->cpu_accessible_dma_mem,
727 hdev->cpu_accessible_dma_address);
729 dma_pool_destroy(hdev->dma_pool);
736 static void goya_init_dma_qman(struct hl_device *hdev, int dma_id,
737 dma_addr_t bus_address)
739 struct goya_device *goya = hdev->asic_specific;
740 u32 mtr_base_lo, mtr_base_hi;
741 u32 so_base_lo, so_base_hi;
742 u32 gic_base_lo, gic_base_hi;
743 u32 reg_off = dma_id * (mmDMA_QM_1_PQ_PI - mmDMA_QM_0_PQ_PI);
745 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
746 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
747 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
748 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
751 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
753 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
755 WREG32(mmDMA_QM_0_PQ_BASE_LO + reg_off, lower_32_bits(bus_address));
756 WREG32(mmDMA_QM_0_PQ_BASE_HI + reg_off, upper_32_bits(bus_address));
758 WREG32(mmDMA_QM_0_PQ_SIZE + reg_off, ilog2(HL_QUEUE_LENGTH));
759 WREG32(mmDMA_QM_0_PQ_PI + reg_off, 0);
760 WREG32(mmDMA_QM_0_PQ_CI + reg_off, 0);
762 WREG32(mmDMA_QM_0_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
763 WREG32(mmDMA_QM_0_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
764 WREG32(mmDMA_QM_0_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
765 WREG32(mmDMA_QM_0_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
766 WREG32(mmDMA_QM_0_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
767 WREG32(mmDMA_QM_0_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
768 WREG32(mmDMA_QM_0_GLBL_ERR_WDATA + reg_off,
769 GOYA_ASYNC_EVENT_ID_DMA0_QM + dma_id);
771 /* PQ has buffer of 2 cache lines, while CQ has 8 lines */
772 WREG32(mmDMA_QM_0_PQ_CFG1 + reg_off, 0x00020002);
773 WREG32(mmDMA_QM_0_CQ_CFG1 + reg_off, 0x00080008);
775 if (goya->hw_cap_initialized & HW_CAP_MMU)
776 WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, QMAN_DMA_PARTLY_TRUSTED);
778 WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, QMAN_DMA_FULLY_TRUSTED);
780 WREG32(mmDMA_QM_0_GLBL_ERR_CFG + reg_off, QMAN_DMA_ERR_MSG_EN);
781 WREG32(mmDMA_QM_0_GLBL_CFG0 + reg_off, QMAN_DMA_ENABLE);
784 static void goya_init_dma_ch(struct hl_device *hdev, int dma_id)
786 u32 gic_base_lo, gic_base_hi;
788 u32 reg_off = dma_id * (mmDMA_CH_1_CFG1 - mmDMA_CH_0_CFG1);
791 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
793 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
795 WREG32(mmDMA_CH_0_ERRMSG_ADDR_LO + reg_off, gic_base_lo);
796 WREG32(mmDMA_CH_0_ERRMSG_ADDR_HI + reg_off, gic_base_hi);
797 WREG32(mmDMA_CH_0_ERRMSG_WDATA + reg_off,
798 GOYA_ASYNC_EVENT_ID_DMA0_CH + dma_id);
801 sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1000 +
804 sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1007;
806 WREG32(mmDMA_CH_0_WR_COMP_ADDR_HI + reg_off, upper_32_bits(sob_addr));
807 WREG32(mmDMA_CH_0_WR_COMP_WDATA + reg_off, 0x80000001);
811 * goya_init_dma_qmans - Initialize QMAN DMA registers
813 * @hdev: pointer to hl_device structure
815 * Initialize the H/W registers of the QMAN DMA channels
818 void goya_init_dma_qmans(struct hl_device *hdev)
820 struct goya_device *goya = hdev->asic_specific;
821 struct hl_hw_queue *q;
824 if (goya->hw_cap_initialized & HW_CAP_DMA)
827 q = &hdev->kernel_queues[0];
829 for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++, q++) {
830 goya_init_dma_qman(hdev, i, q->bus_address);
831 goya_init_dma_ch(hdev, i);
834 goya->hw_cap_initialized |= HW_CAP_DMA;
838 * goya_disable_external_queues - Disable external queues
840 * @hdev: pointer to hl_device structure
843 static void goya_disable_external_queues(struct hl_device *hdev)
845 WREG32(mmDMA_QM_0_GLBL_CFG0, 0);
846 WREG32(mmDMA_QM_1_GLBL_CFG0, 0);
847 WREG32(mmDMA_QM_2_GLBL_CFG0, 0);
848 WREG32(mmDMA_QM_3_GLBL_CFG0, 0);
849 WREG32(mmDMA_QM_4_GLBL_CFG0, 0);
852 static int goya_stop_queue(struct hl_device *hdev, u32 cfg_reg,
853 u32 cp_sts_reg, u32 glbl_sts0_reg)
858 /* use the values of TPC0 as they are all the same*/
860 WREG32(cfg_reg, 1 << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT);
862 status = RREG32(cp_sts_reg);
863 if (status & TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK) {
864 rc = hl_poll_timeout(
868 !(status & TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK),
870 QMAN_FENCE_TIMEOUT_USEC);
872 /* if QMAN is stuck in fence no need to check for stop */
877 rc = hl_poll_timeout(
881 (status & TPC0_QM_GLBL_STS0_CP_IS_STOP_MASK),
883 QMAN_STOP_TIMEOUT_USEC);
887 "Timeout while waiting for QMAN to stop\n");
895 * goya_stop_external_queues - Stop external queues
897 * @hdev: pointer to hl_device structure
899 * Returns 0 on success
902 static int goya_stop_external_queues(struct hl_device *hdev)
906 rc = goya_stop_queue(hdev,
907 mmDMA_QM_0_GLBL_CFG1,
909 mmDMA_QM_0_GLBL_STS0);
912 dev_err(hdev->dev, "failed to stop DMA QMAN 0\n");
916 rc = goya_stop_queue(hdev,
917 mmDMA_QM_1_GLBL_CFG1,
919 mmDMA_QM_1_GLBL_STS0);
922 dev_err(hdev->dev, "failed to stop DMA QMAN 1\n");
926 rc = goya_stop_queue(hdev,
927 mmDMA_QM_2_GLBL_CFG1,
929 mmDMA_QM_2_GLBL_STS0);
932 dev_err(hdev->dev, "failed to stop DMA QMAN 2\n");
936 rc = goya_stop_queue(hdev,
937 mmDMA_QM_3_GLBL_CFG1,
939 mmDMA_QM_3_GLBL_STS0);
942 dev_err(hdev->dev, "failed to stop DMA QMAN 3\n");
946 rc = goya_stop_queue(hdev,
947 mmDMA_QM_4_GLBL_CFG1,
949 mmDMA_QM_4_GLBL_STS0);
952 dev_err(hdev->dev, "failed to stop DMA QMAN 4\n");
960 * goya_init_cpu_queues - Initialize PQ/CQ/EQ of CPU
962 * @hdev: pointer to hl_device structure
964 * Returns 0 on success
967 int goya_init_cpu_queues(struct hl_device *hdev)
969 struct goya_device *goya = hdev->asic_specific;
972 struct hl_hw_queue *cpu_pq = &hdev->kernel_queues[GOYA_QUEUE_ID_CPU_PQ];
975 if (!hdev->cpu_queues_enable)
978 if (goya->hw_cap_initialized & HW_CAP_CPU_Q)
981 eq = &hdev->event_queue;
983 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_0,
984 lower_32_bits(cpu_pq->bus_address));
985 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_1,
986 upper_32_bits(cpu_pq->bus_address));
988 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_2, lower_32_bits(eq->bus_address));
989 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_3, upper_32_bits(eq->bus_address));
991 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_8,
992 lower_32_bits(VA_CPU_ACCESSIBLE_MEM_ADDR));
993 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_9,
994 upper_32_bits(VA_CPU_ACCESSIBLE_MEM_ADDR));
996 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_5, HL_QUEUE_SIZE_IN_BYTES);
997 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_4, HL_EQ_SIZE_IN_BYTES);
998 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_10, HL_CPU_ACCESSIBLE_MEM_SIZE);
1000 /* Used for EQ CI */
1001 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_6, 0);
1003 WREG32(mmCPU_IF_PF_PQ_PI, 0);
1005 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_7, PQ_INIT_STATUS_READY_FOR_CP);
1007 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
1008 GOYA_ASYNC_EVENT_ID_PI_UPDATE);
1010 err = hl_poll_timeout(
1012 mmPSOC_GLOBAL_CONF_SCRATCHPAD_7,
1014 (status == PQ_INIT_STATUS_READY_FOR_HOST),
1016 GOYA_CPU_TIMEOUT_USEC);
1020 "Failed to setup communication with device CPU\n");
1024 goya->hw_cap_initialized |= HW_CAP_CPU_Q;
1028 static void goya_set_pll_refclk(struct hl_device *hdev)
1030 WREG32(mmCPU_PLL_DIV_SEL_0, 0x0);
1031 WREG32(mmCPU_PLL_DIV_SEL_1, 0x0);
1032 WREG32(mmCPU_PLL_DIV_SEL_2, 0x0);
1033 WREG32(mmCPU_PLL_DIV_SEL_3, 0x0);
1035 WREG32(mmIC_PLL_DIV_SEL_0, 0x0);
1036 WREG32(mmIC_PLL_DIV_SEL_1, 0x0);
1037 WREG32(mmIC_PLL_DIV_SEL_2, 0x0);
1038 WREG32(mmIC_PLL_DIV_SEL_3, 0x0);
1040 WREG32(mmMC_PLL_DIV_SEL_0, 0x0);
1041 WREG32(mmMC_PLL_DIV_SEL_1, 0x0);
1042 WREG32(mmMC_PLL_DIV_SEL_2, 0x0);
1043 WREG32(mmMC_PLL_DIV_SEL_3, 0x0);
1045 WREG32(mmPSOC_MME_PLL_DIV_SEL_0, 0x0);
1046 WREG32(mmPSOC_MME_PLL_DIV_SEL_1, 0x0);
1047 WREG32(mmPSOC_MME_PLL_DIV_SEL_2, 0x0);
1048 WREG32(mmPSOC_MME_PLL_DIV_SEL_3, 0x0);
1050 WREG32(mmPSOC_PCI_PLL_DIV_SEL_0, 0x0);
1051 WREG32(mmPSOC_PCI_PLL_DIV_SEL_1, 0x0);
1052 WREG32(mmPSOC_PCI_PLL_DIV_SEL_2, 0x0);
1053 WREG32(mmPSOC_PCI_PLL_DIV_SEL_3, 0x0);
1055 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_0, 0x0);
1056 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_1, 0x0);
1057 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_2, 0x0);
1058 WREG32(mmPSOC_EMMC_PLL_DIV_SEL_3, 0x0);
1060 WREG32(mmTPC_PLL_DIV_SEL_0, 0x0);
1061 WREG32(mmTPC_PLL_DIV_SEL_1, 0x0);
1062 WREG32(mmTPC_PLL_DIV_SEL_2, 0x0);
1063 WREG32(mmTPC_PLL_DIV_SEL_3, 0x0);
1066 static void goya_disable_clk_rlx(struct hl_device *hdev)
1068 WREG32(mmPSOC_MME_PLL_CLK_RLX_0, 0x100010);
1069 WREG32(mmIC_PLL_CLK_RLX_0, 0x100010);
1072 static void _goya_tpc_mbist_workaround(struct hl_device *hdev, u8 tpc_id)
1074 u64 tpc_eml_address;
1075 u32 val, tpc_offset, tpc_eml_offset, tpc_slm_offset;
1078 tpc_offset = tpc_id * 0x40000;
1079 tpc_eml_offset = tpc_id * 0x200000;
1080 tpc_eml_address = (mmTPC0_EML_CFG_BASE + tpc_eml_offset - CFG_BASE);
1081 tpc_slm_offset = tpc_eml_address + 0x100000;
1084 * Workaround for Bug H2 #2443 :
1085 * "TPC SB is not initialized on chip reset"
1088 val = RREG32(mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset);
1089 if (val & TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_ACTIVE_MASK)
1090 dev_warn(hdev->dev, "TPC%d MBIST ACTIVE is not cleared\n",
1093 WREG32(mmTPC0_CFG_FUNC_MBIST_PAT + tpc_offset, val & 0xFFFFF000);
1095 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_0 + tpc_offset, 0x37FF);
1096 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_1 + tpc_offset, 0x303F);
1097 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_2 + tpc_offset, 0x71FF);
1098 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_3 + tpc_offset, 0x71FF);
1099 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_4 + tpc_offset, 0x70FF);
1100 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_5 + tpc_offset, 0x70FF);
1101 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_6 + tpc_offset, 0x70FF);
1102 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_7 + tpc_offset, 0x70FF);
1103 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_8 + tpc_offset, 0x70FF);
1104 WREG32(mmTPC0_CFG_FUNC_MBIST_MEM_9 + tpc_offset, 0x70FF);
1106 WREG32_OR(mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset,
1107 1 << TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_START_SHIFT);
1109 err = hl_poll_timeout(
1111 mmTPC0_CFG_FUNC_MBIST_CNTRL + tpc_offset,
1113 (val & TPC0_CFG_FUNC_MBIST_CNTRL_MBIST_DONE_MASK),
1115 HL_DEVICE_TIMEOUT_USEC);
1119 "Timeout while waiting for TPC%d MBIST DONE\n", tpc_id);
1121 WREG32_OR(mmTPC0_EML_CFG_DBG_CNT + tpc_eml_offset,
1122 1 << TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT);
1124 msleep(GOYA_RESET_WAIT_MSEC);
1126 WREG32_AND(mmTPC0_EML_CFG_DBG_CNT + tpc_eml_offset,
1127 ~(1 << TPC0_EML_CFG_DBG_CNT_CORE_RST_SHIFT));
1129 msleep(GOYA_RESET_WAIT_MSEC);
1131 for (slm_index = 0 ; slm_index < 256 ; slm_index++)
1132 WREG32(tpc_slm_offset + (slm_index << 2), 0);
1134 val = RREG32(tpc_slm_offset);
1137 static void goya_tpc_mbist_workaround(struct hl_device *hdev)
1139 struct goya_device *goya = hdev->asic_specific;
1145 if (goya->hw_cap_initialized & HW_CAP_TPC_MBIST)
1148 /* Workaround for H2 #2443 */
1150 for (i = 0 ; i < TPC_MAX_NUM ; i++)
1151 _goya_tpc_mbist_workaround(hdev, i);
1153 goya->hw_cap_initialized |= HW_CAP_TPC_MBIST;
1157 * goya_init_golden_registers - Initialize golden registers
1159 * @hdev: pointer to hl_device structure
1161 * Initialize the H/W registers of the device
1164 static void goya_init_golden_registers(struct hl_device *hdev)
1166 struct goya_device *goya = hdev->asic_specific;
1167 u32 polynom[10], tpc_intr_mask, offset;
1170 if (goya->hw_cap_initialized & HW_CAP_GOLDEN)
1173 polynom[0] = 0x00020080;
1174 polynom[1] = 0x00401000;
1175 polynom[2] = 0x00200800;
1176 polynom[3] = 0x00002000;
1177 polynom[4] = 0x00080200;
1178 polynom[5] = 0x00040100;
1179 polynom[6] = 0x00100400;
1180 polynom[7] = 0x00004000;
1181 polynom[8] = 0x00010000;
1182 polynom[9] = 0x00008000;
1184 /* Mask all arithmetic interrupts from TPC */
1185 tpc_intr_mask = 0x7FFF;
1187 for (i = 0, offset = 0 ; i < 6 ; i++, offset += 0x20000) {
1188 WREG32(mmSRAM_Y0_X0_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1189 WREG32(mmSRAM_Y0_X1_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1190 WREG32(mmSRAM_Y0_X2_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1191 WREG32(mmSRAM_Y0_X3_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1192 WREG32(mmSRAM_Y0_X4_RTR_HBW_RD_RQ_L_ARB + offset, 0x302);
1194 WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_L_ARB + offset, 0x204);
1195 WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_L_ARB + offset, 0x204);
1196 WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_L_ARB + offset, 0x204);
1197 WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_L_ARB + offset, 0x204);
1198 WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_L_ARB + offset, 0x204);
1201 WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_E_ARB + offset, 0x206);
1202 WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_E_ARB + offset, 0x206);
1203 WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_E_ARB + offset, 0x206);
1204 WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_E_ARB + offset, 0x207);
1205 WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_E_ARB + offset, 0x207);
1207 WREG32(mmSRAM_Y0_X0_RTR_HBW_DATA_W_ARB + offset, 0x207);
1208 WREG32(mmSRAM_Y0_X1_RTR_HBW_DATA_W_ARB + offset, 0x207);
1209 WREG32(mmSRAM_Y0_X2_RTR_HBW_DATA_W_ARB + offset, 0x206);
1210 WREG32(mmSRAM_Y0_X3_RTR_HBW_DATA_W_ARB + offset, 0x206);
1211 WREG32(mmSRAM_Y0_X4_RTR_HBW_DATA_W_ARB + offset, 0x206);
1213 WREG32(mmSRAM_Y0_X0_RTR_HBW_WR_RS_E_ARB + offset, 0x101);
1214 WREG32(mmSRAM_Y0_X1_RTR_HBW_WR_RS_E_ARB + offset, 0x102);
1215 WREG32(mmSRAM_Y0_X2_RTR_HBW_WR_RS_E_ARB + offset, 0x103);
1216 WREG32(mmSRAM_Y0_X3_RTR_HBW_WR_RS_E_ARB + offset, 0x104);
1217 WREG32(mmSRAM_Y0_X4_RTR_HBW_WR_RS_E_ARB + offset, 0x105);
1219 WREG32(mmSRAM_Y0_X0_RTR_HBW_WR_RS_W_ARB + offset, 0x105);
1220 WREG32(mmSRAM_Y0_X1_RTR_HBW_WR_RS_W_ARB + offset, 0x104);
1221 WREG32(mmSRAM_Y0_X2_RTR_HBW_WR_RS_W_ARB + offset, 0x103);
1222 WREG32(mmSRAM_Y0_X3_RTR_HBW_WR_RS_W_ARB + offset, 0x102);
1223 WREG32(mmSRAM_Y0_X4_RTR_HBW_WR_RS_W_ARB + offset, 0x101);
1226 WREG32(mmMME_STORE_MAX_CREDIT, 0x21);
1227 WREG32(mmMME_AGU, 0x0f0f0f10);
1228 WREG32(mmMME_SEI_MASK, ~0x0);
1230 WREG32(mmMME6_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1231 WREG32(mmMME5_RTR_HBW_RD_RQ_N_ARB, 0x01040101);
1232 WREG32(mmMME4_RTR_HBW_RD_RQ_N_ARB, 0x01030101);
1233 WREG32(mmMME3_RTR_HBW_RD_RQ_N_ARB, 0x01020101);
1234 WREG32(mmMME2_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1235 WREG32(mmMME1_RTR_HBW_RD_RQ_N_ARB, 0x07010701);
1236 WREG32(mmMME6_RTR_HBW_RD_RQ_S_ARB, 0x04010401);
1237 WREG32(mmMME5_RTR_HBW_RD_RQ_S_ARB, 0x04050401);
1238 WREG32(mmMME4_RTR_HBW_RD_RQ_S_ARB, 0x03070301);
1239 WREG32(mmMME3_RTR_HBW_RD_RQ_S_ARB, 0x01030101);
1240 WREG32(mmMME2_RTR_HBW_RD_RQ_S_ARB, 0x01040101);
1241 WREG32(mmMME1_RTR_HBW_RD_RQ_S_ARB, 0x01050105);
1242 WREG32(mmMME6_RTR_HBW_RD_RQ_W_ARB, 0x01010501);
1243 WREG32(mmMME5_RTR_HBW_RD_RQ_W_ARB, 0x01010501);
1244 WREG32(mmMME4_RTR_HBW_RD_RQ_W_ARB, 0x01040301);
1245 WREG32(mmMME3_RTR_HBW_RD_RQ_W_ARB, 0x01030401);
1246 WREG32(mmMME2_RTR_HBW_RD_RQ_W_ARB, 0x01040101);
1247 WREG32(mmMME1_RTR_HBW_RD_RQ_W_ARB, 0x01050101);
1248 WREG32(mmMME6_RTR_HBW_WR_RQ_N_ARB, 0x02020202);
1249 WREG32(mmMME5_RTR_HBW_WR_RQ_N_ARB, 0x01070101);
1250 WREG32(mmMME4_RTR_HBW_WR_RQ_N_ARB, 0x02020201);
1251 WREG32(mmMME3_RTR_HBW_WR_RQ_N_ARB, 0x07020701);
1252 WREG32(mmMME2_RTR_HBW_WR_RQ_N_ARB, 0x01020101);
1253 WREG32(mmMME1_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
1254 WREG32(mmMME6_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
1255 WREG32(mmMME5_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
1256 WREG32(mmMME4_RTR_HBW_WR_RQ_S_ARB, 0x07020701);
1257 WREG32(mmMME3_RTR_HBW_WR_RQ_S_ARB, 0x02020201);
1258 WREG32(mmMME2_RTR_HBW_WR_RQ_S_ARB, 0x01070101);
1259 WREG32(mmMME1_RTR_HBW_WR_RQ_S_ARB, 0x01020102);
1260 WREG32(mmMME6_RTR_HBW_WR_RQ_W_ARB, 0x01020701);
1261 WREG32(mmMME5_RTR_HBW_WR_RQ_W_ARB, 0x01020701);
1262 WREG32(mmMME4_RTR_HBW_WR_RQ_W_ARB, 0x07020707);
1263 WREG32(mmMME3_RTR_HBW_WR_RQ_W_ARB, 0x01020201);
1264 WREG32(mmMME2_RTR_HBW_WR_RQ_W_ARB, 0x01070201);
1265 WREG32(mmMME1_RTR_HBW_WR_RQ_W_ARB, 0x01070201);
1266 WREG32(mmMME6_RTR_HBW_RD_RS_N_ARB, 0x01070102);
1267 WREG32(mmMME5_RTR_HBW_RD_RS_N_ARB, 0x01070102);
1268 WREG32(mmMME4_RTR_HBW_RD_RS_N_ARB, 0x01060102);
1269 WREG32(mmMME3_RTR_HBW_RD_RS_N_ARB, 0x01040102);
1270 WREG32(mmMME2_RTR_HBW_RD_RS_N_ARB, 0x01020102);
1271 WREG32(mmMME1_RTR_HBW_RD_RS_N_ARB, 0x01020107);
1272 WREG32(mmMME6_RTR_HBW_RD_RS_S_ARB, 0x01020106);
1273 WREG32(mmMME5_RTR_HBW_RD_RS_S_ARB, 0x01020102);
1274 WREG32(mmMME4_RTR_HBW_RD_RS_S_ARB, 0x01040102);
1275 WREG32(mmMME3_RTR_HBW_RD_RS_S_ARB, 0x01060102);
1276 WREG32(mmMME2_RTR_HBW_RD_RS_S_ARB, 0x01070102);
1277 WREG32(mmMME1_RTR_HBW_RD_RS_S_ARB, 0x01070102);
1278 WREG32(mmMME6_RTR_HBW_RD_RS_E_ARB, 0x01020702);
1279 WREG32(mmMME5_RTR_HBW_RD_RS_E_ARB, 0x01020702);
1280 WREG32(mmMME4_RTR_HBW_RD_RS_E_ARB, 0x01040602);
1281 WREG32(mmMME3_RTR_HBW_RD_RS_E_ARB, 0x01060402);
1282 WREG32(mmMME2_RTR_HBW_RD_RS_E_ARB, 0x01070202);
1283 WREG32(mmMME1_RTR_HBW_RD_RS_E_ARB, 0x01070102);
1284 WREG32(mmMME6_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1285 WREG32(mmMME5_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1286 WREG32(mmMME4_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1287 WREG32(mmMME3_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1288 WREG32(mmMME2_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1289 WREG32(mmMME1_RTR_HBW_RD_RS_W_ARB, 0x01060401);
1290 WREG32(mmMME6_RTR_HBW_WR_RS_N_ARB, 0x01050101);
1291 WREG32(mmMME5_RTR_HBW_WR_RS_N_ARB, 0x01040101);
1292 WREG32(mmMME4_RTR_HBW_WR_RS_N_ARB, 0x01030101);
1293 WREG32(mmMME3_RTR_HBW_WR_RS_N_ARB, 0x01020101);
1294 WREG32(mmMME2_RTR_HBW_WR_RS_N_ARB, 0x01010101);
1295 WREG32(mmMME1_RTR_HBW_WR_RS_N_ARB, 0x01010107);
1296 WREG32(mmMME6_RTR_HBW_WR_RS_S_ARB, 0x01010107);
1297 WREG32(mmMME5_RTR_HBW_WR_RS_S_ARB, 0x01010101);
1298 WREG32(mmMME4_RTR_HBW_WR_RS_S_ARB, 0x01020101);
1299 WREG32(mmMME3_RTR_HBW_WR_RS_S_ARB, 0x01030101);
1300 WREG32(mmMME2_RTR_HBW_WR_RS_S_ARB, 0x01040101);
1301 WREG32(mmMME1_RTR_HBW_WR_RS_S_ARB, 0x01050101);
1302 WREG32(mmMME6_RTR_HBW_WR_RS_E_ARB, 0x01010501);
1303 WREG32(mmMME5_RTR_HBW_WR_RS_E_ARB, 0x01010501);
1304 WREG32(mmMME4_RTR_HBW_WR_RS_E_ARB, 0x01040301);
1305 WREG32(mmMME3_RTR_HBW_WR_RS_E_ARB, 0x01030401);
1306 WREG32(mmMME2_RTR_HBW_WR_RS_E_ARB, 0x01040101);
1307 WREG32(mmMME1_RTR_HBW_WR_RS_E_ARB, 0x01050101);
1308 WREG32(mmMME6_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1309 WREG32(mmMME5_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1310 WREG32(mmMME4_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1311 WREG32(mmMME3_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1312 WREG32(mmMME2_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1313 WREG32(mmMME1_RTR_HBW_WR_RS_W_ARB, 0x01010101);
1315 WREG32(mmTPC1_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1316 WREG32(mmTPC1_RTR_HBW_RD_RQ_S_ARB, 0x01010101);
1317 WREG32(mmTPC1_RTR_HBW_RD_RQ_E_ARB, 0x01060101);
1318 WREG32(mmTPC1_RTR_HBW_WR_RQ_N_ARB, 0x02020102);
1319 WREG32(mmTPC1_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
1320 WREG32(mmTPC1_RTR_HBW_WR_RQ_E_ARB, 0x02070202);
1321 WREG32(mmTPC1_RTR_HBW_RD_RS_N_ARB, 0x01020201);
1322 WREG32(mmTPC1_RTR_HBW_RD_RS_S_ARB, 0x01070201);
1323 WREG32(mmTPC1_RTR_HBW_RD_RS_W_ARB, 0x01070202);
1324 WREG32(mmTPC1_RTR_HBW_WR_RS_N_ARB, 0x01010101);
1325 WREG32(mmTPC1_RTR_HBW_WR_RS_S_ARB, 0x01050101);
1326 WREG32(mmTPC1_RTR_HBW_WR_RS_W_ARB, 0x01050101);
1328 WREG32(mmTPC2_RTR_HBW_RD_RQ_N_ARB, 0x01020101);
1329 WREG32(mmTPC2_RTR_HBW_RD_RQ_S_ARB, 0x01050101);
1330 WREG32(mmTPC2_RTR_HBW_RD_RQ_E_ARB, 0x01010201);
1331 WREG32(mmTPC2_RTR_HBW_WR_RQ_N_ARB, 0x02040102);
1332 WREG32(mmTPC2_RTR_HBW_WR_RQ_S_ARB, 0x01050101);
1333 WREG32(mmTPC2_RTR_HBW_WR_RQ_E_ARB, 0x02060202);
1334 WREG32(mmTPC2_RTR_HBW_RD_RS_N_ARB, 0x01020201);
1335 WREG32(mmTPC2_RTR_HBW_RD_RS_S_ARB, 0x01070201);
1336 WREG32(mmTPC2_RTR_HBW_RD_RS_W_ARB, 0x01070202);
1337 WREG32(mmTPC2_RTR_HBW_WR_RS_N_ARB, 0x01010101);
1338 WREG32(mmTPC2_RTR_HBW_WR_RS_S_ARB, 0x01040101);
1339 WREG32(mmTPC2_RTR_HBW_WR_RS_W_ARB, 0x01040101);
1341 WREG32(mmTPC3_RTR_HBW_RD_RQ_N_ARB, 0x01030101);
1342 WREG32(mmTPC3_RTR_HBW_RD_RQ_S_ARB, 0x01040101);
1343 WREG32(mmTPC3_RTR_HBW_RD_RQ_E_ARB, 0x01040301);
1344 WREG32(mmTPC3_RTR_HBW_WR_RQ_N_ARB, 0x02060102);
1345 WREG32(mmTPC3_RTR_HBW_WR_RQ_S_ARB, 0x01040101);
1346 WREG32(mmTPC3_RTR_HBW_WR_RQ_E_ARB, 0x01040301);
1347 WREG32(mmTPC3_RTR_HBW_RD_RS_N_ARB, 0x01040201);
1348 WREG32(mmTPC3_RTR_HBW_RD_RS_S_ARB, 0x01060201);
1349 WREG32(mmTPC3_RTR_HBW_RD_RS_W_ARB, 0x01060402);
1350 WREG32(mmTPC3_RTR_HBW_WR_RS_N_ARB, 0x01020101);
1351 WREG32(mmTPC3_RTR_HBW_WR_RS_S_ARB, 0x01030101);
1352 WREG32(mmTPC3_RTR_HBW_WR_RS_W_ARB, 0x01030401);
1354 WREG32(mmTPC4_RTR_HBW_RD_RQ_N_ARB, 0x01040101);
1355 WREG32(mmTPC4_RTR_HBW_RD_RQ_S_ARB, 0x01030101);
1356 WREG32(mmTPC4_RTR_HBW_RD_RQ_E_ARB, 0x01030401);
1357 WREG32(mmTPC4_RTR_HBW_WR_RQ_N_ARB, 0x02070102);
1358 WREG32(mmTPC4_RTR_HBW_WR_RQ_S_ARB, 0x01030101);
1359 WREG32(mmTPC4_RTR_HBW_WR_RQ_E_ARB, 0x02060702);
1360 WREG32(mmTPC4_RTR_HBW_RD_RS_N_ARB, 0x01060201);
1361 WREG32(mmTPC4_RTR_HBW_RD_RS_S_ARB, 0x01040201);
1362 WREG32(mmTPC4_RTR_HBW_RD_RS_W_ARB, 0x01040602);
1363 WREG32(mmTPC4_RTR_HBW_WR_RS_N_ARB, 0x01030101);
1364 WREG32(mmTPC4_RTR_HBW_WR_RS_S_ARB, 0x01020101);
1365 WREG32(mmTPC4_RTR_HBW_WR_RS_W_ARB, 0x01040301);
1367 WREG32(mmTPC5_RTR_HBW_RD_RQ_N_ARB, 0x01050101);
1368 WREG32(mmTPC5_RTR_HBW_RD_RQ_S_ARB, 0x01020101);
1369 WREG32(mmTPC5_RTR_HBW_RD_RQ_E_ARB, 0x01200501);
1370 WREG32(mmTPC5_RTR_HBW_WR_RQ_N_ARB, 0x02070102);
1371 WREG32(mmTPC5_RTR_HBW_WR_RQ_S_ARB, 0x01020101);
1372 WREG32(mmTPC5_RTR_HBW_WR_RQ_E_ARB, 0x02020602);
1373 WREG32(mmTPC5_RTR_HBW_RD_RS_N_ARB, 0x01070201);
1374 WREG32(mmTPC5_RTR_HBW_RD_RS_S_ARB, 0x01020201);
1375 WREG32(mmTPC5_RTR_HBW_RD_RS_W_ARB, 0x01020702);
1376 WREG32(mmTPC5_RTR_HBW_WR_RS_N_ARB, 0x01040101);
1377 WREG32(mmTPC5_RTR_HBW_WR_RS_S_ARB, 0x01010101);
1378 WREG32(mmTPC5_RTR_HBW_WR_RS_W_ARB, 0x01010501);
1380 WREG32(mmTPC6_RTR_HBW_RD_RQ_N_ARB, 0x01010101);
1381 WREG32(mmTPC6_RTR_HBW_RD_RQ_S_ARB, 0x01010101);
1382 WREG32(mmTPC6_RTR_HBW_RD_RQ_E_ARB, 0x01010601);
1383 WREG32(mmTPC6_RTR_HBW_WR_RQ_N_ARB, 0x01010101);
1384 WREG32(mmTPC6_RTR_HBW_WR_RQ_S_ARB, 0x01010101);
1385 WREG32(mmTPC6_RTR_HBW_WR_RQ_E_ARB, 0x02020702);
1386 WREG32(mmTPC6_RTR_HBW_RD_RS_N_ARB, 0x01010101);
1387 WREG32(mmTPC6_RTR_HBW_RD_RS_S_ARB, 0x01010101);
1388 WREG32(mmTPC6_RTR_HBW_RD_RS_W_ARB, 0x01020702);
1389 WREG32(mmTPC6_RTR_HBW_WR_RS_N_ARB, 0x01050101);
1390 WREG32(mmTPC6_RTR_HBW_WR_RS_S_ARB, 0x01010101);
1391 WREG32(mmTPC6_RTR_HBW_WR_RS_W_ARB, 0x01010501);
1393 for (i = 0, offset = 0 ; i < 10 ; i++, offset += 4) {
1394 WREG32(mmMME1_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1395 WREG32(mmMME2_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1396 WREG32(mmMME3_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1397 WREG32(mmMME4_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1398 WREG32(mmMME5_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1399 WREG32(mmMME6_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1401 WREG32(mmTPC0_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1402 WREG32(mmTPC1_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1403 WREG32(mmTPC2_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1404 WREG32(mmTPC3_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1405 WREG32(mmTPC4_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1406 WREG32(mmTPC5_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1407 WREG32(mmTPC6_RTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1408 WREG32(mmTPC7_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1410 WREG32(mmPCI_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1411 WREG32(mmDMA_NRTR_SPLIT_COEF_0 + offset, polynom[i] >> 7);
1414 for (i = 0, offset = 0 ; i < 6 ; i++, offset += 0x40000) {
1415 WREG32(mmMME1_RTR_SCRAMB_EN + offset,
1416 1 << MME1_RTR_SCRAMB_EN_VAL_SHIFT);
1417 WREG32(mmMME1_RTR_NON_LIN_SCRAMB + offset,
1418 1 << MME1_RTR_NON_LIN_SCRAMB_EN_SHIFT);
1421 for (i = 0, offset = 0 ; i < 8 ; i++, offset += 0x40000) {
1423 * Workaround for Bug H2 #2441 :
1424 * "ST.NOP set trace event illegal opcode"
1426 WREG32(mmTPC0_CFG_TPC_INTR_MASK + offset, tpc_intr_mask);
1428 WREG32(mmTPC0_NRTR_SCRAMB_EN + offset,
1429 1 << TPC0_NRTR_SCRAMB_EN_VAL_SHIFT);
1430 WREG32(mmTPC0_NRTR_NON_LIN_SCRAMB + offset,
1431 1 << TPC0_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
1434 WREG32(mmDMA_NRTR_SCRAMB_EN, 1 << DMA_NRTR_SCRAMB_EN_VAL_SHIFT);
1435 WREG32(mmDMA_NRTR_NON_LIN_SCRAMB,
1436 1 << DMA_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
1438 WREG32(mmPCI_NRTR_SCRAMB_EN, 1 << PCI_NRTR_SCRAMB_EN_VAL_SHIFT);
1439 WREG32(mmPCI_NRTR_NON_LIN_SCRAMB,
1440 1 << PCI_NRTR_NON_LIN_SCRAMB_EN_SHIFT);
1443 * Workaround for H2 #HW-23 bug
1444 * Set DMA max outstanding read requests to 240 on DMA CH 1.
1445 * This limitation is still large enough to not affect Gen4 bandwidth.
1446 * We need to only limit that DMA channel because the user can only read
1447 * from Host using DMA CH 1
1449 WREG32(mmDMA_CH_1_CFG0, 0x0fff00F0);
1451 WREG32(mmTPC_PLL_CLK_RLX_0, 0x200020);
1453 goya->hw_cap_initialized |= HW_CAP_GOLDEN;
1456 static void goya_init_mme_qman(struct hl_device *hdev)
1458 u32 mtr_base_lo, mtr_base_hi;
1459 u32 so_base_lo, so_base_hi;
1460 u32 gic_base_lo, gic_base_hi;
1463 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1464 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1465 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1466 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1469 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1471 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1473 qman_base_addr = hdev->asic_prop.sram_base_address +
1474 MME_QMAN_BASE_OFFSET;
1476 WREG32(mmMME_QM_PQ_BASE_LO, lower_32_bits(qman_base_addr));
1477 WREG32(mmMME_QM_PQ_BASE_HI, upper_32_bits(qman_base_addr));
1478 WREG32(mmMME_QM_PQ_SIZE, ilog2(MME_QMAN_LENGTH));
1479 WREG32(mmMME_QM_PQ_PI, 0);
1480 WREG32(mmMME_QM_PQ_CI, 0);
1481 WREG32(mmMME_QM_CP_LDMA_SRC_BASE_LO_OFFSET, 0x10C0);
1482 WREG32(mmMME_QM_CP_LDMA_SRC_BASE_HI_OFFSET, 0x10C4);
1483 WREG32(mmMME_QM_CP_LDMA_TSIZE_OFFSET, 0x10C8);
1484 WREG32(mmMME_QM_CP_LDMA_COMMIT_OFFSET, 0x10CC);
1486 WREG32(mmMME_QM_CP_MSG_BASE0_ADDR_LO, mtr_base_lo);
1487 WREG32(mmMME_QM_CP_MSG_BASE0_ADDR_HI, mtr_base_hi);
1488 WREG32(mmMME_QM_CP_MSG_BASE1_ADDR_LO, so_base_lo);
1489 WREG32(mmMME_QM_CP_MSG_BASE1_ADDR_HI, so_base_hi);
1491 /* QMAN CQ has 8 cache lines */
1492 WREG32(mmMME_QM_CQ_CFG1, 0x00080008);
1494 WREG32(mmMME_QM_GLBL_ERR_ADDR_LO, gic_base_lo);
1495 WREG32(mmMME_QM_GLBL_ERR_ADDR_HI, gic_base_hi);
1497 WREG32(mmMME_QM_GLBL_ERR_WDATA, GOYA_ASYNC_EVENT_ID_MME_QM);
1499 WREG32(mmMME_QM_GLBL_ERR_CFG, QMAN_MME_ERR_MSG_EN);
1501 WREG32(mmMME_QM_GLBL_PROT, QMAN_MME_ERR_PROT);
1503 WREG32(mmMME_QM_GLBL_CFG0, QMAN_MME_ENABLE);
1506 static void goya_init_mme_cmdq(struct hl_device *hdev)
1508 u32 mtr_base_lo, mtr_base_hi;
1509 u32 so_base_lo, so_base_hi;
1510 u32 gic_base_lo, gic_base_hi;
1513 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1514 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1515 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1516 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1519 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1521 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1523 qman_base_addr = hdev->asic_prop.sram_base_address +
1524 MME_QMAN_BASE_OFFSET;
1526 WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_LO, mtr_base_lo);
1527 WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_HI, mtr_base_hi);
1528 WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_LO, so_base_lo);
1529 WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_HI, so_base_hi);
1531 /* CMDQ CQ has 20 cache lines */
1532 WREG32(mmMME_CMDQ_CQ_CFG1, 0x00140014);
1534 WREG32(mmMME_CMDQ_GLBL_ERR_ADDR_LO, gic_base_lo);
1535 WREG32(mmMME_CMDQ_GLBL_ERR_ADDR_HI, gic_base_hi);
1537 WREG32(mmMME_CMDQ_GLBL_ERR_WDATA, GOYA_ASYNC_EVENT_ID_MME_CMDQ);
1539 WREG32(mmMME_CMDQ_GLBL_ERR_CFG, CMDQ_MME_ERR_MSG_EN);
1541 WREG32(mmMME_CMDQ_GLBL_PROT, CMDQ_MME_ERR_PROT);
1543 WREG32(mmMME_CMDQ_GLBL_CFG0, CMDQ_MME_ENABLE);
1546 void goya_init_mme_qmans(struct hl_device *hdev)
1548 struct goya_device *goya = hdev->asic_specific;
1549 u32 so_base_lo, so_base_hi;
1551 if (goya->hw_cap_initialized & HW_CAP_MME)
1554 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1555 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1557 WREG32(mmMME_SM_BASE_ADDRESS_LOW, so_base_lo);
1558 WREG32(mmMME_SM_BASE_ADDRESS_HIGH, so_base_hi);
1560 goya_init_mme_qman(hdev);
1561 goya_init_mme_cmdq(hdev);
1563 goya->hw_cap_initialized |= HW_CAP_MME;
1566 static void goya_init_tpc_qman(struct hl_device *hdev, u32 base_off, int tpc_id)
1568 u32 mtr_base_lo, mtr_base_hi;
1569 u32 so_base_lo, so_base_hi;
1570 u32 gic_base_lo, gic_base_hi;
1572 u32 reg_off = tpc_id * (mmTPC1_QM_PQ_PI - mmTPC0_QM_PQ_PI);
1574 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1575 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1576 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1577 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1580 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1582 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1584 qman_base_addr = hdev->asic_prop.sram_base_address + base_off;
1586 WREG32(mmTPC0_QM_PQ_BASE_LO + reg_off, lower_32_bits(qman_base_addr));
1587 WREG32(mmTPC0_QM_PQ_BASE_HI + reg_off, upper_32_bits(qman_base_addr));
1588 WREG32(mmTPC0_QM_PQ_SIZE + reg_off, ilog2(TPC_QMAN_LENGTH));
1589 WREG32(mmTPC0_QM_PQ_PI + reg_off, 0);
1590 WREG32(mmTPC0_QM_PQ_CI + reg_off, 0);
1591 WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET + reg_off, 0x10C0);
1592 WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET + reg_off, 0x10C4);
1593 WREG32(mmTPC0_QM_CP_LDMA_TSIZE_OFFSET + reg_off, 0x10C8);
1594 WREG32(mmTPC0_QM_CP_LDMA_COMMIT_OFFSET + reg_off, 0x10CC);
1596 WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
1597 WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
1598 WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
1599 WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
1601 WREG32(mmTPC0_QM_CQ_CFG1 + reg_off, 0x00080008);
1603 WREG32(mmTPC0_QM_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
1604 WREG32(mmTPC0_QM_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
1606 WREG32(mmTPC0_QM_GLBL_ERR_WDATA + reg_off,
1607 GOYA_ASYNC_EVENT_ID_TPC0_QM + tpc_id);
1609 WREG32(mmTPC0_QM_GLBL_ERR_CFG + reg_off, QMAN_TPC_ERR_MSG_EN);
1611 WREG32(mmTPC0_QM_GLBL_PROT + reg_off, QMAN_TPC_ERR_PROT);
1613 WREG32(mmTPC0_QM_GLBL_CFG0 + reg_off, QMAN_TPC_ENABLE);
1616 static void goya_init_tpc_cmdq(struct hl_device *hdev, int tpc_id)
1618 u32 mtr_base_lo, mtr_base_hi;
1619 u32 so_base_lo, so_base_hi;
1620 u32 gic_base_lo, gic_base_hi;
1621 u32 reg_off = tpc_id * (mmTPC1_CMDQ_CQ_CFG1 - mmTPC0_CMDQ_CQ_CFG1);
1623 mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1624 mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0);
1625 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1626 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1629 lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1631 upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR);
1633 WREG32(mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo);
1634 WREG32(mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi);
1635 WREG32(mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo);
1636 WREG32(mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi);
1638 WREG32(mmTPC0_CMDQ_CQ_CFG1 + reg_off, 0x00140014);
1640 WREG32(mmTPC0_CMDQ_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo);
1641 WREG32(mmTPC0_CMDQ_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi);
1643 WREG32(mmTPC0_CMDQ_GLBL_ERR_WDATA + reg_off,
1644 GOYA_ASYNC_EVENT_ID_TPC0_CMDQ + tpc_id);
1646 WREG32(mmTPC0_CMDQ_GLBL_ERR_CFG + reg_off, CMDQ_TPC_ERR_MSG_EN);
1648 WREG32(mmTPC0_CMDQ_GLBL_PROT + reg_off, CMDQ_TPC_ERR_PROT);
1650 WREG32(mmTPC0_CMDQ_GLBL_CFG0 + reg_off, CMDQ_TPC_ENABLE);
1653 void goya_init_tpc_qmans(struct hl_device *hdev)
1655 struct goya_device *goya = hdev->asic_specific;
1656 u32 so_base_lo, so_base_hi;
1657 u32 cfg_off = mmTPC1_CFG_SM_BASE_ADDRESS_LOW -
1658 mmTPC0_CFG_SM_BASE_ADDRESS_LOW;
1661 if (goya->hw_cap_initialized & HW_CAP_TPC)
1664 so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1665 so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
1667 for (i = 0 ; i < TPC_MAX_NUM ; i++) {
1668 WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_LOW + i * cfg_off,
1670 WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_HIGH + i * cfg_off,
1674 goya_init_tpc_qman(hdev, TPC0_QMAN_BASE_OFFSET, 0);
1675 goya_init_tpc_qman(hdev, TPC1_QMAN_BASE_OFFSET, 1);
1676 goya_init_tpc_qman(hdev, TPC2_QMAN_BASE_OFFSET, 2);
1677 goya_init_tpc_qman(hdev, TPC3_QMAN_BASE_OFFSET, 3);
1678 goya_init_tpc_qman(hdev, TPC4_QMAN_BASE_OFFSET, 4);
1679 goya_init_tpc_qman(hdev, TPC5_QMAN_BASE_OFFSET, 5);
1680 goya_init_tpc_qman(hdev, TPC6_QMAN_BASE_OFFSET, 6);
1681 goya_init_tpc_qman(hdev, TPC7_QMAN_BASE_OFFSET, 7);
1683 for (i = 0 ; i < TPC_MAX_NUM ; i++)
1684 goya_init_tpc_cmdq(hdev, i);
1686 goya->hw_cap_initialized |= HW_CAP_TPC;
1690 * goya_disable_internal_queues - Disable internal queues
1692 * @hdev: pointer to hl_device structure
1695 static void goya_disable_internal_queues(struct hl_device *hdev)
1697 WREG32(mmMME_QM_GLBL_CFG0, 0);
1698 WREG32(mmMME_CMDQ_GLBL_CFG0, 0);
1700 WREG32(mmTPC0_QM_GLBL_CFG0, 0);
1701 WREG32(mmTPC0_CMDQ_GLBL_CFG0, 0);
1703 WREG32(mmTPC1_QM_GLBL_CFG0, 0);
1704 WREG32(mmTPC1_CMDQ_GLBL_CFG0, 0);
1706 WREG32(mmTPC2_QM_GLBL_CFG0, 0);
1707 WREG32(mmTPC2_CMDQ_GLBL_CFG0, 0);
1709 WREG32(mmTPC3_QM_GLBL_CFG0, 0);
1710 WREG32(mmTPC3_CMDQ_GLBL_CFG0, 0);
1712 WREG32(mmTPC4_QM_GLBL_CFG0, 0);
1713 WREG32(mmTPC4_CMDQ_GLBL_CFG0, 0);
1715 WREG32(mmTPC5_QM_GLBL_CFG0, 0);
1716 WREG32(mmTPC5_CMDQ_GLBL_CFG0, 0);
1718 WREG32(mmTPC6_QM_GLBL_CFG0, 0);
1719 WREG32(mmTPC6_CMDQ_GLBL_CFG0, 0);
1721 WREG32(mmTPC7_QM_GLBL_CFG0, 0);
1722 WREG32(mmTPC7_CMDQ_GLBL_CFG0, 0);
1726 * goya_stop_internal_queues - Stop internal queues
1728 * @hdev: pointer to hl_device structure
1730 * Returns 0 on success
1733 static int goya_stop_internal_queues(struct hl_device *hdev)
1738 * Each queue (QMAN) is a separate H/W logic. That means that each
1739 * QMAN can be stopped independently and failure to stop one does NOT
1740 * mandate we should not try to stop other QMANs
1743 rc = goya_stop_queue(hdev,
1746 mmMME_QM_GLBL_STS0);
1749 dev_err(hdev->dev, "failed to stop MME QMAN\n");
1753 rc = goya_stop_queue(hdev,
1754 mmMME_CMDQ_GLBL_CFG1,
1756 mmMME_CMDQ_GLBL_STS0);
1759 dev_err(hdev->dev, "failed to stop MME CMDQ\n");
1763 rc = goya_stop_queue(hdev,
1764 mmTPC0_QM_GLBL_CFG1,
1766 mmTPC0_QM_GLBL_STS0);
1769 dev_err(hdev->dev, "failed to stop TPC 0 QMAN\n");
1773 rc = goya_stop_queue(hdev,
1774 mmTPC0_CMDQ_GLBL_CFG1,
1776 mmTPC0_CMDQ_GLBL_STS0);
1779 dev_err(hdev->dev, "failed to stop TPC 0 CMDQ\n");
1783 rc = goya_stop_queue(hdev,
1784 mmTPC1_QM_GLBL_CFG1,
1786 mmTPC1_QM_GLBL_STS0);
1789 dev_err(hdev->dev, "failed to stop TPC 1 QMAN\n");
1793 rc = goya_stop_queue(hdev,
1794 mmTPC1_CMDQ_GLBL_CFG1,
1796 mmTPC1_CMDQ_GLBL_STS0);
1799 dev_err(hdev->dev, "failed to stop TPC 1 CMDQ\n");
1803 rc = goya_stop_queue(hdev,
1804 mmTPC2_QM_GLBL_CFG1,
1806 mmTPC2_QM_GLBL_STS0);
1809 dev_err(hdev->dev, "failed to stop TPC 2 QMAN\n");
1813 rc = goya_stop_queue(hdev,
1814 mmTPC2_CMDQ_GLBL_CFG1,
1816 mmTPC2_CMDQ_GLBL_STS0);
1819 dev_err(hdev->dev, "failed to stop TPC 2 CMDQ\n");
1823 rc = goya_stop_queue(hdev,
1824 mmTPC3_QM_GLBL_CFG1,
1826 mmTPC3_QM_GLBL_STS0);
1829 dev_err(hdev->dev, "failed to stop TPC 3 QMAN\n");
1833 rc = goya_stop_queue(hdev,
1834 mmTPC3_CMDQ_GLBL_CFG1,
1836 mmTPC3_CMDQ_GLBL_STS0);
1839 dev_err(hdev->dev, "failed to stop TPC 3 CMDQ\n");
1843 rc = goya_stop_queue(hdev,
1844 mmTPC4_QM_GLBL_CFG1,
1846 mmTPC4_QM_GLBL_STS0);
1849 dev_err(hdev->dev, "failed to stop TPC 4 QMAN\n");
1853 rc = goya_stop_queue(hdev,
1854 mmTPC4_CMDQ_GLBL_CFG1,
1856 mmTPC4_CMDQ_GLBL_STS0);
1859 dev_err(hdev->dev, "failed to stop TPC 4 CMDQ\n");
1863 rc = goya_stop_queue(hdev,
1864 mmTPC5_QM_GLBL_CFG1,
1866 mmTPC5_QM_GLBL_STS0);
1869 dev_err(hdev->dev, "failed to stop TPC 5 QMAN\n");
1873 rc = goya_stop_queue(hdev,
1874 mmTPC5_CMDQ_GLBL_CFG1,
1876 mmTPC5_CMDQ_GLBL_STS0);
1879 dev_err(hdev->dev, "failed to stop TPC 5 CMDQ\n");
1883 rc = goya_stop_queue(hdev,
1884 mmTPC6_QM_GLBL_CFG1,
1886 mmTPC6_QM_GLBL_STS0);
1889 dev_err(hdev->dev, "failed to stop TPC 6 QMAN\n");
1893 rc = goya_stop_queue(hdev,
1894 mmTPC6_CMDQ_GLBL_CFG1,
1896 mmTPC6_CMDQ_GLBL_STS0);
1899 dev_err(hdev->dev, "failed to stop TPC 6 CMDQ\n");
1903 rc = goya_stop_queue(hdev,
1904 mmTPC7_QM_GLBL_CFG1,
1906 mmTPC7_QM_GLBL_STS0);
1909 dev_err(hdev->dev, "failed to stop TPC 7 QMAN\n");
1913 rc = goya_stop_queue(hdev,
1914 mmTPC7_CMDQ_GLBL_CFG1,
1916 mmTPC7_CMDQ_GLBL_STS0);
1919 dev_err(hdev->dev, "failed to stop TPC 7 CMDQ\n");
1926 static void goya_dma_stall(struct hl_device *hdev)
1928 WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT);
1929 WREG32(mmDMA_QM_1_GLBL_CFG1, 1 << DMA_QM_1_GLBL_CFG1_DMA_STOP_SHIFT);
1930 WREG32(mmDMA_QM_2_GLBL_CFG1, 1 << DMA_QM_2_GLBL_CFG1_DMA_STOP_SHIFT);
1931 WREG32(mmDMA_QM_3_GLBL_CFG1, 1 << DMA_QM_3_GLBL_CFG1_DMA_STOP_SHIFT);
1932 WREG32(mmDMA_QM_4_GLBL_CFG1, 1 << DMA_QM_4_GLBL_CFG1_DMA_STOP_SHIFT);
1935 static void goya_tpc_stall(struct hl_device *hdev)
1937 WREG32(mmTPC0_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
1938 WREG32(mmTPC1_CFG_TPC_STALL, 1 << TPC1_CFG_TPC_STALL_V_SHIFT);
1939 WREG32(mmTPC2_CFG_TPC_STALL, 1 << TPC2_CFG_TPC_STALL_V_SHIFT);
1940 WREG32(mmTPC3_CFG_TPC_STALL, 1 << TPC3_CFG_TPC_STALL_V_SHIFT);
1941 WREG32(mmTPC4_CFG_TPC_STALL, 1 << TPC4_CFG_TPC_STALL_V_SHIFT);
1942 WREG32(mmTPC5_CFG_TPC_STALL, 1 << TPC5_CFG_TPC_STALL_V_SHIFT);
1943 WREG32(mmTPC6_CFG_TPC_STALL, 1 << TPC6_CFG_TPC_STALL_V_SHIFT);
1944 WREG32(mmTPC7_CFG_TPC_STALL, 1 << TPC7_CFG_TPC_STALL_V_SHIFT);
1947 static void goya_mme_stall(struct hl_device *hdev)
1949 WREG32(mmMME_STALL, 0xFFFFFFFF);
1952 static int goya_enable_msix(struct hl_device *hdev)
1954 struct goya_device *goya = hdev->asic_specific;
1955 int cq_cnt = hdev->asic_prop.completion_queues_count;
1956 int rc, i, irq_cnt_init, irq;
1958 if (goya->hw_cap_initialized & HW_CAP_MSIX)
1961 rc = pci_alloc_irq_vectors(hdev->pdev, GOYA_MSIX_ENTRIES,
1962 GOYA_MSIX_ENTRIES, PCI_IRQ_MSIX);
1965 "MSI-X: Failed to enable support -- %d/%d\n",
1966 GOYA_MSIX_ENTRIES, rc);
1970 for (i = 0, irq_cnt_init = 0 ; i < cq_cnt ; i++, irq_cnt_init++) {
1971 irq = pci_irq_vector(hdev->pdev, i);
1972 rc = request_irq(irq, hl_irq_handler_cq, 0, goya_irq_name[i],
1973 &hdev->completion_queue[i]);
1975 dev_err(hdev->dev, "Failed to request IRQ %d", irq);
1980 irq = pci_irq_vector(hdev->pdev, GOYA_EVENT_QUEUE_MSIX_IDX);
1982 rc = request_irq(irq, hl_irq_handler_eq, 0,
1983 goya_irq_name[GOYA_EVENT_QUEUE_MSIX_IDX],
1984 &hdev->event_queue);
1986 dev_err(hdev->dev, "Failed to request IRQ %d", irq);
1990 goya->hw_cap_initialized |= HW_CAP_MSIX;
1994 for (i = 0 ; i < irq_cnt_init ; i++)
1995 free_irq(pci_irq_vector(hdev->pdev, i),
1996 &hdev->completion_queue[i]);
1998 pci_free_irq_vectors(hdev->pdev);
2002 static void goya_sync_irqs(struct hl_device *hdev)
2004 struct goya_device *goya = hdev->asic_specific;
2007 if (!(goya->hw_cap_initialized & HW_CAP_MSIX))
2010 /* Wait for all pending IRQs to be finished */
2011 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
2012 synchronize_irq(pci_irq_vector(hdev->pdev, i));
2014 synchronize_irq(pci_irq_vector(hdev->pdev, GOYA_EVENT_QUEUE_MSIX_IDX));
2017 static void goya_disable_msix(struct hl_device *hdev)
2019 struct goya_device *goya = hdev->asic_specific;
2022 if (!(goya->hw_cap_initialized & HW_CAP_MSIX))
2025 goya_sync_irqs(hdev);
2027 irq = pci_irq_vector(hdev->pdev, GOYA_EVENT_QUEUE_MSIX_IDX);
2028 free_irq(irq, &hdev->event_queue);
2030 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
2031 irq = pci_irq_vector(hdev->pdev, i);
2032 free_irq(irq, &hdev->completion_queue[i]);
2035 pci_free_irq_vectors(hdev->pdev);
2037 goya->hw_cap_initialized &= ~HW_CAP_MSIX;
2040 static void goya_halt_engines(struct hl_device *hdev, bool hard_reset)
2042 u32 wait_timeout_ms, cpu_timeout_ms;
2045 "Halting compute engines and disabling interrupts\n");
2048 wait_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
2049 cpu_timeout_ms = GOYA_PLDM_RESET_WAIT_MSEC;
2051 wait_timeout_ms = GOYA_RESET_WAIT_MSEC;
2052 cpu_timeout_ms = GOYA_CPU_RESET_WAIT_MSEC;
2057 * I don't know what is the state of the CPU so make sure it is
2058 * stopped in any means necessary
2060 WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_GOTO_WFE);
2061 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
2062 GOYA_ASYNC_EVENT_ID_HALT_MACHINE);
2063 msleep(cpu_timeout_ms);
2066 goya_stop_external_queues(hdev);
2067 goya_stop_internal_queues(hdev);
2069 msleep(wait_timeout_ms);
2071 goya_dma_stall(hdev);
2072 goya_tpc_stall(hdev);
2073 goya_mme_stall(hdev);
2075 msleep(wait_timeout_ms);
2077 goya_disable_external_queues(hdev);
2078 goya_disable_internal_queues(hdev);
2081 goya_disable_msix(hdev);
2082 goya_mmu_remove_device_cpu_mappings(hdev);
2084 goya_sync_irqs(hdev);
2089 * goya_push_uboot_to_device() - Push u-boot FW code to device.
2090 * @hdev: Pointer to hl_device structure.
2092 * Copy u-boot fw code from firmware file to SRAM BAR.
2094 * Return: 0 on success, non-zero for failure.
2096 static int goya_push_uboot_to_device(struct hl_device *hdev)
2101 snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-u-boot.bin");
2102 dst = hdev->pcie_bar[SRAM_CFG_BAR_ID] + UBOOT_FW_OFFSET;
2104 return hl_fw_push_fw_to_device(hdev, fw_name, dst);
2108 * goya_push_linux_to_device() - Push LINUX FW code to device.
2109 * @hdev: Pointer to hl_device structure.
2111 * Copy LINUX fw code from firmware file to HBM BAR.
2113 * Return: 0 on success, non-zero for failure.
2115 static int goya_push_linux_to_device(struct hl_device *hdev)
2120 snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-fit.itb");
2121 dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET;
2123 return hl_fw_push_fw_to_device(hdev, fw_name, dst);
2126 static int goya_pldm_init_cpu(struct hl_device *hdev)
2128 u32 val, unit_rst_val;
2131 /* Must initialize SRAM scrambler before pushing u-boot to SRAM */
2132 goya_init_golden_registers(hdev);
2134 /* Put ARM cores into reset */
2135 WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL, CPU_RESET_ASSERT);
2136 val = RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL);
2138 /* Reset the CA53 MACRO */
2139 unit_rst_val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
2140 WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, CA53_RESET);
2141 val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
2142 WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, unit_rst_val);
2143 val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N);
2145 rc = goya_push_uboot_to_device(hdev);
2149 rc = goya_push_linux_to_device(hdev);
2153 WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_FIT_RDY);
2154 WREG32(mmPSOC_GLOBAL_CONF_WARM_REBOOT, CPU_BOOT_STATUS_NA);
2156 WREG32(mmCPU_CA53_CFG_RST_ADDR_LSB_0,
2157 lower_32_bits(SRAM_BASE_ADDR + UBOOT_FW_OFFSET));
2158 WREG32(mmCPU_CA53_CFG_RST_ADDR_MSB_0,
2159 upper_32_bits(SRAM_BASE_ADDR + UBOOT_FW_OFFSET));
2161 /* Release ARM core 0 from reset */
2162 WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL,
2163 CPU_RESET_CORE0_DEASSERT);
2164 val = RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL);
2170 * FW component passes an offset from SRAM_BASE_ADDR in SCRATCHPAD_xx.
2171 * The version string should be located by that offset.
2173 static void goya_read_device_fw_version(struct hl_device *hdev,
2174 enum goya_fw_component fwc)
2182 ver_off = RREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_29);
2183 dest = hdev->asic_prop.uboot_ver;
2186 case FW_COMP_PREBOOT:
2187 ver_off = RREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_28);
2188 dest = hdev->asic_prop.preboot_ver;
2192 dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc);
2196 ver_off &= ~((u32)SRAM_BASE_ADDR);
2198 if (ver_off < SRAM_SIZE - VERSION_MAX_LEN) {
2199 memcpy_fromio(dest, hdev->pcie_bar[SRAM_CFG_BAR_ID] + ver_off,
2202 dev_err(hdev->dev, "%s version offset (0x%x) is above SRAM\n",
2204 strcpy(dest, "unavailable");
2208 static int goya_init_cpu(struct hl_device *hdev, u32 cpu_timeout)
2210 struct goya_device *goya = hdev->asic_specific;
2214 if (!hdev->cpu_enable)
2217 if (goya->hw_cap_initialized & HW_CAP_CPU)
2221 * Before pushing u-boot/linux to device, need to set the ddr bar to
2222 * base address of dram
2224 if (goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE) == U64_MAX) {
2226 "failed to map DDR bar to DRAM base address\n");
2231 rc = goya_pldm_init_cpu(hdev);
2238 /* Make sure CPU boot-loader is running */
2239 rc = hl_poll_timeout(
2241 mmPSOC_GLOBAL_CONF_WARM_REBOOT,
2243 (status == CPU_BOOT_STATUS_DRAM_RDY) ||
2244 (status == CPU_BOOT_STATUS_SRAM_AVAIL),
2249 dev_err(hdev->dev, "Error in ARM u-boot!");
2251 case CPU_BOOT_STATUS_NA:
2253 "ARM status %d - BTL did NOT run\n", status);
2255 case CPU_BOOT_STATUS_IN_WFE:
2257 "ARM status %d - Inside WFE loop\n", status);
2259 case CPU_BOOT_STATUS_IN_BTL:
2261 "ARM status %d - Stuck in BTL\n", status);
2263 case CPU_BOOT_STATUS_IN_PREBOOT:
2265 "ARM status %d - Stuck in Preboot\n", status);
2267 case CPU_BOOT_STATUS_IN_SPL:
2269 "ARM status %d - Stuck in SPL\n", status);
2271 case CPU_BOOT_STATUS_IN_UBOOT:
2273 "ARM status %d - Stuck in u-boot\n", status);
2275 case CPU_BOOT_STATUS_DRAM_INIT_FAIL:
2277 "ARM status %d - DDR initialization failed\n",
2280 case CPU_BOOT_STATUS_UBOOT_NOT_READY:
2282 "ARM status %d - u-boot stopped by user\n",
2287 "ARM status %d - Invalid status code\n",
2294 /* Read U-Boot version now in case we will later fail */
2295 goya_read_device_fw_version(hdev, FW_COMP_UBOOT);
2296 goya_read_device_fw_version(hdev, FW_COMP_PREBOOT);
2298 if (!hdev->fw_loading) {
2299 dev_info(hdev->dev, "Skip loading FW\n");
2303 if (status == CPU_BOOT_STATUS_SRAM_AVAIL)
2306 rc = goya_push_linux_to_device(hdev);
2310 WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_FIT_RDY);
2312 rc = hl_poll_timeout(
2314 mmPSOC_GLOBAL_CONF_WARM_REBOOT,
2316 (status == CPU_BOOT_STATUS_SRAM_AVAIL),
2321 if (status == CPU_BOOT_STATUS_FIT_CORRUPTED)
2323 "ARM u-boot reports FIT image is corrupted\n");
2326 "ARM Linux failed to load, %d\n", status);
2327 WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_NA);
2331 dev_info(hdev->dev, "Successfully loaded firmware to device\n");
2334 goya->hw_cap_initialized |= HW_CAP_CPU;
2339 static int goya_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid,
2342 u32 status, timeout_usec;
2346 timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
2348 timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
2350 WREG32(MMU_HOP0_PA43_12, phys_addr >> MMU_HOP0_PA43_12_SHIFT);
2351 WREG32(MMU_HOP0_PA49_44, phys_addr >> MMU_HOP0_PA49_44_SHIFT);
2352 WREG32(MMU_ASID_BUSY, 0x80000000 | asid);
2354 rc = hl_poll_timeout(
2358 !(status & 0x80000000),
2364 "Timeout during MMU hop0 config of asid %d\n", asid);
2371 int goya_mmu_init(struct hl_device *hdev)
2373 struct asic_fixed_properties *prop = &hdev->asic_prop;
2374 struct goya_device *goya = hdev->asic_specific;
2378 if (!hdev->mmu_enable)
2381 if (goya->hw_cap_initialized & HW_CAP_MMU)
2384 hdev->dram_supports_virtual_memory = true;
2385 hdev->dram_default_page_mapping = true;
2387 for (i = 0 ; i < prop->max_asid ; i++) {
2388 hop0_addr = prop->mmu_pgt_addr +
2389 (i * prop->mmu_hop_table_size);
2391 rc = goya_mmu_update_asid_hop0_addr(hdev, i, hop0_addr);
2394 "failed to set hop0 addr for asid %d\n", i);
2399 goya->hw_cap_initialized |= HW_CAP_MMU;
2401 /* init MMU cache manage page */
2402 WREG32(mmSTLB_CACHE_INV_BASE_39_8,
2403 lower_32_bits(MMU_CACHE_MNG_ADDR >> 8));
2404 WREG32(mmSTLB_CACHE_INV_BASE_49_40, MMU_CACHE_MNG_ADDR >> 40);
2406 /* Remove follower feature due to performance bug */
2407 WREG32_AND(mmSTLB_STLB_FEATURE_EN,
2408 (~STLB_STLB_FEATURE_EN_FOLLOWER_EN_MASK));
2410 hdev->asic_funcs->mmu_invalidate_cache(hdev, true);
2412 WREG32(mmMMU_MMU_ENABLE, 1);
2413 WREG32(mmMMU_SPI_MASK, 0xF);
2422 * goya_hw_init - Goya hardware initialization code
2424 * @hdev: pointer to hl_device structure
2426 * Returns 0 on success
2429 static int goya_hw_init(struct hl_device *hdev)
2431 struct asic_fixed_properties *prop = &hdev->asic_prop;
2435 dev_info(hdev->dev, "Starting initialization of H/W\n");
2437 /* Perform read from the device to make sure device is up */
2438 val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
2441 * Let's mark in the H/W that we have reached this point. We check
2442 * this value in the reset_before_init function to understand whether
2443 * we need to reset the chip before doing H/W init. This register is
2444 * cleared by the H/W upon H/W reset
2446 WREG32(mmPSOC_GLOBAL_CONF_APP_STATUS, HL_DEVICE_HW_STATE_DIRTY);
2448 rc = goya_init_cpu(hdev, GOYA_CPU_TIMEOUT_USEC);
2450 dev_err(hdev->dev, "failed to initialize CPU\n");
2454 goya_tpc_mbist_workaround(hdev);
2456 goya_init_golden_registers(hdev);
2459 * After CPU initialization is finished, change DDR bar mapping inside
2460 * iATU to point to the start address of the MMU page tables
2462 if (goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE +
2463 (MMU_PAGE_TABLES_ADDR &
2464 ~(prop->dram_pci_bar_size - 0x1ull))) == U64_MAX) {
2466 "failed to map DDR bar to MMU page tables\n");
2470 rc = goya_mmu_init(hdev);
2474 goya_init_security(hdev);
2476 goya_init_dma_qmans(hdev);
2478 goya_init_mme_qmans(hdev);
2480 goya_init_tpc_qmans(hdev);
2482 /* MSI-X must be enabled before CPU queues are initialized */
2483 rc = goya_enable_msix(hdev);
2485 goto disable_queues;
2487 /* Perform read from the device to flush all MSI-X configuration */
2488 val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
2493 goya_disable_internal_queues(hdev);
2494 goya_disable_external_queues(hdev);
2500 * goya_hw_fini - Goya hardware tear-down code
2502 * @hdev: pointer to hl_device structure
2503 * @hard_reset: should we do hard reset to all engines or just reset the
2504 * compute/dma engines
2506 static void goya_hw_fini(struct hl_device *hdev, bool hard_reset)
2508 struct goya_device *goya = hdev->asic_specific;
2509 u32 reset_timeout_ms, status;
2512 reset_timeout_ms = GOYA_PLDM_RESET_TIMEOUT_MSEC;
2514 reset_timeout_ms = GOYA_RESET_TIMEOUT_MSEC;
2517 goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE);
2518 goya_disable_clk_rlx(hdev);
2519 goya_set_pll_refclk(hdev);
2521 WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, RESET_ALL);
2523 "Issued HARD reset command, going to wait %dms\n",
2526 WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST_CFG, DMA_MME_TPC_RESET);
2528 "Issued SOFT reset command, going to wait %dms\n",
2533 * After hard reset, we can't poll the BTM_FSM register because the PSOC
2534 * itself is in reset. In either reset we need to wait until the reset
2537 msleep(reset_timeout_ms);
2539 status = RREG32(mmPSOC_GLOBAL_CONF_BTM_FSM);
2540 if (status & PSOC_GLOBAL_CONF_BTM_FSM_STATE_MASK)
2542 "Timeout while waiting for device to reset 0x%x\n",
2546 goya->hw_cap_initialized &= ~(HW_CAP_DMA | HW_CAP_MME |
2547 HW_CAP_GOLDEN | HW_CAP_TPC);
2548 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
2549 GOYA_ASYNC_EVENT_ID_SOFT_RESET);
2553 /* Chicken bit to re-initiate boot sequencer flow */
2554 WREG32(mmPSOC_GLOBAL_CONF_BOOT_SEQ_RE_START,
2555 1 << PSOC_GLOBAL_CONF_BOOT_SEQ_RE_START_IND_SHIFT);
2556 /* Move boot manager FSM to pre boot sequencer init state */
2557 WREG32(mmPSOC_GLOBAL_CONF_SW_BTM_FSM,
2558 0xA << PSOC_GLOBAL_CONF_SW_BTM_FSM_CTRL_SHIFT);
2560 goya->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q |
2561 HW_CAP_DDR_0 | HW_CAP_DDR_1 |
2562 HW_CAP_DMA | HW_CAP_MME |
2563 HW_CAP_MMU | HW_CAP_TPC_MBIST |
2564 HW_CAP_GOLDEN | HW_CAP_TPC);
2565 memset(goya->events_stat, 0, sizeof(goya->events_stat));
2569 /* In case we are running inside VM and the VM is
2570 * shutting down, we need to make sure CPU boot-loader
2571 * is running before we can continue the VM shutdown.
2572 * That is because the VM will send an FLR signal that
2576 "Going to wait up to %ds for CPU boot loader\n",
2577 GOYA_CPU_TIMEOUT_USEC / 1000 / 1000);
2579 rc = hl_poll_timeout(
2581 mmPSOC_GLOBAL_CONF_WARM_REBOOT,
2583 (status == CPU_BOOT_STATUS_DRAM_RDY),
2585 GOYA_CPU_TIMEOUT_USEC);
2588 "failed to wait for CPU boot loader\n");
2592 int goya_suspend(struct hl_device *hdev)
2596 rc = hl_fw_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
2598 dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
2603 int goya_resume(struct hl_device *hdev)
2605 return goya_init_iatu(hdev);
2608 static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
2609 u64 kaddress, phys_addr_t paddress, u32 size)
2613 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
2614 VM_DONTCOPY | VM_NORESERVE;
2616 rc = remap_pfn_range(vma, vma->vm_start, paddress >> PAGE_SHIFT,
2617 size, vma->vm_page_prot);
2619 dev_err(hdev->dev, "remap_pfn_range error %d", rc);
2624 void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi)
2626 u32 db_reg_offset, db_value;
2628 switch (hw_queue_id) {
2629 case GOYA_QUEUE_ID_DMA_0:
2630 db_reg_offset = mmDMA_QM_0_PQ_PI;
2633 case GOYA_QUEUE_ID_DMA_1:
2634 db_reg_offset = mmDMA_QM_1_PQ_PI;
2637 case GOYA_QUEUE_ID_DMA_2:
2638 db_reg_offset = mmDMA_QM_2_PQ_PI;
2641 case GOYA_QUEUE_ID_DMA_3:
2642 db_reg_offset = mmDMA_QM_3_PQ_PI;
2645 case GOYA_QUEUE_ID_DMA_4:
2646 db_reg_offset = mmDMA_QM_4_PQ_PI;
2649 case GOYA_QUEUE_ID_CPU_PQ:
2650 db_reg_offset = mmCPU_IF_PF_PQ_PI;
2653 case GOYA_QUEUE_ID_MME:
2654 db_reg_offset = mmMME_QM_PQ_PI;
2657 case GOYA_QUEUE_ID_TPC0:
2658 db_reg_offset = mmTPC0_QM_PQ_PI;
2661 case GOYA_QUEUE_ID_TPC1:
2662 db_reg_offset = mmTPC1_QM_PQ_PI;
2665 case GOYA_QUEUE_ID_TPC2:
2666 db_reg_offset = mmTPC2_QM_PQ_PI;
2669 case GOYA_QUEUE_ID_TPC3:
2670 db_reg_offset = mmTPC3_QM_PQ_PI;
2673 case GOYA_QUEUE_ID_TPC4:
2674 db_reg_offset = mmTPC4_QM_PQ_PI;
2677 case GOYA_QUEUE_ID_TPC5:
2678 db_reg_offset = mmTPC5_QM_PQ_PI;
2681 case GOYA_QUEUE_ID_TPC6:
2682 db_reg_offset = mmTPC6_QM_PQ_PI;
2685 case GOYA_QUEUE_ID_TPC7:
2686 db_reg_offset = mmTPC7_QM_PQ_PI;
2690 /* Should never get here */
2691 dev_err(hdev->dev, "H/W queue %d is invalid. Can't set pi\n",
2698 /* ring the doorbell */
2699 WREG32(db_reg_offset, db_value);
2701 if (hw_queue_id == GOYA_QUEUE_ID_CPU_PQ)
2702 WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR,
2703 GOYA_ASYNC_EVENT_ID_PI_UPDATE);
2706 void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val)
2708 /* Not needed in Goya */
2711 static void *goya_dma_alloc_coherent(struct hl_device *hdev, size_t size,
2712 dma_addr_t *dma_handle, gfp_t flags)
2714 void *kernel_addr = dma_alloc_coherent(&hdev->pdev->dev, size,
2717 /* Shift to the device's base physical address of host memory */
2719 *dma_handle += HOST_PHYS_BASE;
2724 static void goya_dma_free_coherent(struct hl_device *hdev, size_t size,
2725 void *cpu_addr, dma_addr_t dma_handle)
2727 /* Cancel the device's base physical address of host memory */
2728 dma_addr_t fixed_dma_handle = dma_handle - HOST_PHYS_BASE;
2730 dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, fixed_dma_handle);
2733 void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
2734 dma_addr_t *dma_handle, u16 *queue_len)
2739 *dma_handle = hdev->asic_prop.sram_base_address;
2741 base = (void *) hdev->pcie_bar[SRAM_CFG_BAR_ID];
2744 case GOYA_QUEUE_ID_MME:
2745 offset = MME_QMAN_BASE_OFFSET;
2746 *queue_len = MME_QMAN_LENGTH;
2748 case GOYA_QUEUE_ID_TPC0:
2749 offset = TPC0_QMAN_BASE_OFFSET;
2750 *queue_len = TPC_QMAN_LENGTH;
2752 case GOYA_QUEUE_ID_TPC1:
2753 offset = TPC1_QMAN_BASE_OFFSET;
2754 *queue_len = TPC_QMAN_LENGTH;
2756 case GOYA_QUEUE_ID_TPC2:
2757 offset = TPC2_QMAN_BASE_OFFSET;
2758 *queue_len = TPC_QMAN_LENGTH;
2760 case GOYA_QUEUE_ID_TPC3:
2761 offset = TPC3_QMAN_BASE_OFFSET;
2762 *queue_len = TPC_QMAN_LENGTH;
2764 case GOYA_QUEUE_ID_TPC4:
2765 offset = TPC4_QMAN_BASE_OFFSET;
2766 *queue_len = TPC_QMAN_LENGTH;
2768 case GOYA_QUEUE_ID_TPC5:
2769 offset = TPC5_QMAN_BASE_OFFSET;
2770 *queue_len = TPC_QMAN_LENGTH;
2772 case GOYA_QUEUE_ID_TPC6:
2773 offset = TPC6_QMAN_BASE_OFFSET;
2774 *queue_len = TPC_QMAN_LENGTH;
2776 case GOYA_QUEUE_ID_TPC7:
2777 offset = TPC7_QMAN_BASE_OFFSET;
2778 *queue_len = TPC_QMAN_LENGTH;
2781 dev_err(hdev->dev, "Got invalid queue id %d\n", queue_id);
2786 *dma_handle += offset;
2791 static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
2793 struct packet_msg_prot *fence_pkt;
2795 dma_addr_t fence_dma_addr;
2802 timeout = GOYA_PLDM_QMAN0_TIMEOUT_USEC;
2804 timeout = HL_DEVICE_TIMEOUT_USEC;
2806 if (!hdev->asic_funcs->is_device_idle(hdev, buf, sizeof(buf))) {
2807 dev_err_ratelimited(hdev->dev,
2808 "Can't send KMD job on QMAN0 because %s is busy\n",
2813 fence_ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL,
2817 "Failed to allocate fence memory for QMAN0\n");
2821 goya_qman0_set_security(hdev, true);
2823 cb = job->patched_cb;
2825 fence_pkt = (struct packet_msg_prot *) (uintptr_t) (cb->kernel_address +
2826 job->job_cb_size - sizeof(struct packet_msg_prot));
2828 tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
2829 (1 << GOYA_PKT_CTL_EB_SHIFT) |
2830 (1 << GOYA_PKT_CTL_MB_SHIFT);
2831 fence_pkt->ctl = cpu_to_le32(tmp);
2832 fence_pkt->value = cpu_to_le32(GOYA_QMAN0_FENCE_VAL);
2833 fence_pkt->addr = cpu_to_le64(fence_dma_addr);
2835 rc = hl_hw_queue_send_cb_no_cmpl(hdev, GOYA_QUEUE_ID_DMA_0,
2836 job->job_cb_size, cb->bus_address);
2838 dev_err(hdev->dev, "Failed to send CB on QMAN0, %d\n", rc);
2839 goto free_fence_ptr;
2842 rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp,
2843 (tmp == GOYA_QMAN0_FENCE_VAL), 1000, timeout);
2845 hl_hw_queue_inc_ci_kernel(hdev, GOYA_QUEUE_ID_DMA_0);
2847 if (rc == -ETIMEDOUT) {
2848 dev_err(hdev->dev, "QMAN0 Job timeout (0x%x)\n", tmp);
2849 goto free_fence_ptr;
2853 hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_ptr,
2856 goya_qman0_set_security(hdev, false);
2861 int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len,
2862 u32 timeout, long *result)
2864 struct goya_device *goya = hdev->asic_specific;
2866 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q)) {
2872 return hl_fw_send_cpu_message(hdev, GOYA_QUEUE_ID_CPU_PQ, msg, len,
2876 int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
2878 struct packet_msg_prot *fence_pkt;
2879 dma_addr_t pkt_dma_addr;
2881 dma_addr_t fence_dma_addr;
2885 fence_val = GOYA_QMAN0_FENCE_VAL;
2887 fence_ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL,
2891 "Failed to allocate memory for queue testing\n");
2897 fence_pkt = hdev->asic_funcs->asic_dma_pool_zalloc(hdev,
2898 sizeof(struct packet_msg_prot),
2899 GFP_KERNEL, &pkt_dma_addr);
2902 "Failed to allocate packet for queue testing\n");
2904 goto free_fence_ptr;
2907 tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
2908 (1 << GOYA_PKT_CTL_EB_SHIFT) |
2909 (1 << GOYA_PKT_CTL_MB_SHIFT);
2910 fence_pkt->ctl = cpu_to_le32(tmp);
2911 fence_pkt->value = cpu_to_le32(fence_val);
2912 fence_pkt->addr = cpu_to_le64(fence_dma_addr);
2914 rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id,
2915 sizeof(struct packet_msg_prot),
2919 "Failed to send fence packet\n");
2923 rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp, (tmp == fence_val),
2924 1000, GOYA_TEST_QUEUE_WAIT_USEC);
2926 hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
2928 if (rc == -ETIMEDOUT) {
2930 "H/W queue %d test failed (scratch(0x%08llX) == 0x%08X)\n",
2931 hw_queue_id, (unsigned long long) fence_dma_addr, tmp);
2934 dev_info(hdev->dev, "queue test on H/W queue %d succeeded\n",
2939 hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_pkt,
2942 hdev->asic_funcs->asic_dma_pool_free(hdev, (void *) fence_ptr,
2947 int goya_test_cpu_queue(struct hl_device *hdev)
2949 struct goya_device *goya = hdev->asic_specific;
2952 * check capability here as send_cpu_message() won't update the result
2953 * value if no capability
2955 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
2958 return hl_fw_test_cpu_queue(hdev);
2961 int goya_test_queues(struct hl_device *hdev)
2963 int i, rc, ret_val = 0;
2965 for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) {
2966 rc = goya_test_queue(hdev, i);
2974 static void *goya_dma_pool_zalloc(struct hl_device *hdev, size_t size,
2975 gfp_t mem_flags, dma_addr_t *dma_handle)
2979 if (size > GOYA_DMA_POOL_BLK_SIZE)
2982 kernel_addr = dma_pool_zalloc(hdev->dma_pool, mem_flags, dma_handle);
2984 /* Shift to the device's base physical address of host memory */
2986 *dma_handle += HOST_PHYS_BASE;
2991 static void goya_dma_pool_free(struct hl_device *hdev, void *vaddr,
2992 dma_addr_t dma_addr)
2994 /* Cancel the device's base physical address of host memory */
2995 dma_addr_t fixed_dma_addr = dma_addr - HOST_PHYS_BASE;
2997 dma_pool_free(hdev->dma_pool, vaddr, fixed_dma_addr);
3000 void *goya_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
3001 dma_addr_t *dma_handle)
3005 vaddr = hl_fw_cpu_accessible_dma_pool_alloc(hdev, size, dma_handle);
3006 *dma_handle = (*dma_handle) - hdev->cpu_accessible_dma_address +
3007 VA_CPU_ACCESSIBLE_MEM_ADDR;
3012 void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
3015 hl_fw_cpu_accessible_dma_pool_free(hdev, size, vaddr);
3018 static int goya_dma_map_sg(struct hl_device *hdev, struct scatterlist *sgl,
3019 int nents, enum dma_data_direction dir)
3021 struct scatterlist *sg;
3024 if (!dma_map_sg(&hdev->pdev->dev, sgl, nents, dir))
3027 /* Shift to the device's base physical address of host memory */
3028 for_each_sg(sgl, sg, nents, i)
3029 sg->dma_address += HOST_PHYS_BASE;
3034 static void goya_dma_unmap_sg(struct hl_device *hdev, struct scatterlist *sgl,
3035 int nents, enum dma_data_direction dir)
3037 struct scatterlist *sg;
3040 /* Cancel the device's base physical address of host memory */
3041 for_each_sg(sgl, sg, nents, i)
3042 sg->dma_address -= HOST_PHYS_BASE;
3044 dma_unmap_sg(&hdev->pdev->dev, sgl, nents, dir);
3047 u32 goya_get_dma_desc_list_size(struct hl_device *hdev, struct sg_table *sgt)
3049 struct scatterlist *sg, *sg_next_iter;
3050 u32 count, dma_desc_cnt;
3052 dma_addr_t addr, addr_next;
3056 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
3058 len = sg_dma_len(sg);
3059 addr = sg_dma_address(sg);
3064 while ((count + 1) < sgt->nents) {
3065 sg_next_iter = sg_next(sg);
3066 len_next = sg_dma_len(sg_next_iter);
3067 addr_next = sg_dma_address(sg_next_iter);
3072 if ((addr + len == addr_next) &&
3073 (len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
3085 return dma_desc_cnt * sizeof(struct packet_lin_dma);
3088 static int goya_pin_memory_before_cs(struct hl_device *hdev,
3089 struct hl_cs_parser *parser,
3090 struct packet_lin_dma *user_dma_pkt,
3091 u64 addr, enum dma_data_direction dir)
3093 struct hl_userptr *userptr;
3096 if (hl_userptr_is_pinned(hdev, addr, le32_to_cpu(user_dma_pkt->tsize),
3097 parser->job_userptr_list, &userptr))
3098 goto already_pinned;
3100 userptr = kzalloc(sizeof(*userptr), GFP_ATOMIC);
3104 rc = hl_pin_host_memory(hdev, addr, le32_to_cpu(user_dma_pkt->tsize),
3109 list_add_tail(&userptr->job_node, parser->job_userptr_list);
3111 rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
3112 userptr->sgt->nents, dir);
3114 dev_err(hdev->dev, "failed to map sgt with DMA region\n");
3118 userptr->dma_mapped = true;
3122 parser->patched_cb_size +=
3123 goya_get_dma_desc_list_size(hdev, userptr->sgt);
3128 hl_unpin_host_memory(hdev, userptr);
3134 static int goya_validate_dma_pkt_host(struct hl_device *hdev,
3135 struct hl_cs_parser *parser,
3136 struct packet_lin_dma *user_dma_pkt)
3138 u64 device_memory_addr, addr;
3139 enum dma_data_direction dir;
3140 enum goya_dma_direction user_dir;
3141 bool sram_addr = true;
3142 bool skip_host_mem_pin = false;
3147 ctl = le32_to_cpu(user_dma_pkt->ctl);
3149 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3150 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3152 user_memset = (ctl & GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
3153 GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
3156 case DMA_HOST_TO_DRAM:
3157 dev_dbg(hdev->dev, "DMA direction is HOST --> DRAM\n");
3158 dir = DMA_TO_DEVICE;
3160 addr = le64_to_cpu(user_dma_pkt->src_addr);
3161 device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3163 skip_host_mem_pin = true;
3166 case DMA_DRAM_TO_HOST:
3167 dev_dbg(hdev->dev, "DMA direction is DRAM --> HOST\n");
3168 dir = DMA_FROM_DEVICE;
3170 addr = le64_to_cpu(user_dma_pkt->dst_addr);
3171 device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3174 case DMA_HOST_TO_SRAM:
3175 dev_dbg(hdev->dev, "DMA direction is HOST --> SRAM\n");
3176 dir = DMA_TO_DEVICE;
3177 addr = le64_to_cpu(user_dma_pkt->src_addr);
3178 device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3180 skip_host_mem_pin = true;
3183 case DMA_SRAM_TO_HOST:
3184 dev_dbg(hdev->dev, "DMA direction is SRAM --> HOST\n");
3185 dir = DMA_FROM_DEVICE;
3186 addr = le64_to_cpu(user_dma_pkt->dst_addr);
3187 device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3190 dev_err(hdev->dev, "DMA direction is undefined\n");
3195 if (!hl_mem_area_inside_range(device_memory_addr,
3196 le32_to_cpu(user_dma_pkt->tsize),
3197 hdev->asic_prop.sram_user_base_address,
3198 hdev->asic_prop.sram_end_address)) {
3201 "SRAM address 0x%llx + 0x%x is invalid\n",
3203 user_dma_pkt->tsize);
3207 if (!hl_mem_area_inside_range(device_memory_addr,
3208 le32_to_cpu(user_dma_pkt->tsize),
3209 hdev->asic_prop.dram_user_base_address,
3210 hdev->asic_prop.dram_end_address)) {
3213 "DRAM address 0x%llx + 0x%x is invalid\n",
3215 user_dma_pkt->tsize);
3220 if (skip_host_mem_pin)
3221 parser->patched_cb_size += sizeof(*user_dma_pkt);
3223 if ((dir == DMA_TO_DEVICE) &&
3224 (parser->hw_queue_id > GOYA_QUEUE_ID_DMA_1)) {
3226 "Can't DMA from host on queue other then 1\n");
3230 rc = goya_pin_memory_before_cs(hdev, parser, user_dma_pkt,
3237 static int goya_validate_dma_pkt_no_host(struct hl_device *hdev,
3238 struct hl_cs_parser *parser,
3239 struct packet_lin_dma *user_dma_pkt)
3241 u64 sram_memory_addr, dram_memory_addr;
3242 enum goya_dma_direction user_dir;
3245 ctl = le32_to_cpu(user_dma_pkt->ctl);
3246 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3247 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3249 if (user_dir == DMA_DRAM_TO_SRAM) {
3250 dev_dbg(hdev->dev, "DMA direction is DRAM --> SRAM\n");
3251 dram_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3252 sram_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3254 dev_dbg(hdev->dev, "DMA direction is SRAM --> DRAM\n");
3255 sram_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3256 dram_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3259 if (!hl_mem_area_inside_range(sram_memory_addr,
3260 le32_to_cpu(user_dma_pkt->tsize),
3261 hdev->asic_prop.sram_user_base_address,
3262 hdev->asic_prop.sram_end_address)) {
3263 dev_err(hdev->dev, "SRAM address 0x%llx + 0x%x is invalid\n",
3264 sram_memory_addr, user_dma_pkt->tsize);
3268 if (!hl_mem_area_inside_range(dram_memory_addr,
3269 le32_to_cpu(user_dma_pkt->tsize),
3270 hdev->asic_prop.dram_user_base_address,
3271 hdev->asic_prop.dram_end_address)) {
3272 dev_err(hdev->dev, "DRAM address 0x%llx + 0x%x is invalid\n",
3273 dram_memory_addr, user_dma_pkt->tsize);
3277 parser->patched_cb_size += sizeof(*user_dma_pkt);
3282 static int goya_validate_dma_pkt_no_mmu(struct hl_device *hdev,
3283 struct hl_cs_parser *parser,
3284 struct packet_lin_dma *user_dma_pkt)
3286 enum goya_dma_direction user_dir;
3290 dev_dbg(hdev->dev, "DMA packet details:\n");
3291 dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr);
3292 dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr);
3293 dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize);
3295 ctl = le32_to_cpu(user_dma_pkt->ctl);
3296 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3297 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3300 * Special handling for DMA with size 0. The H/W has a bug where
3301 * this can cause the QMAN DMA to get stuck, so block it here.
3303 if (user_dma_pkt->tsize == 0) {
3305 "Got DMA with size 0, might reset the device\n");
3309 if ((user_dir == DMA_DRAM_TO_SRAM) || (user_dir == DMA_SRAM_TO_DRAM))
3310 rc = goya_validate_dma_pkt_no_host(hdev, parser, user_dma_pkt);
3312 rc = goya_validate_dma_pkt_host(hdev, parser, user_dma_pkt);
3317 static int goya_validate_dma_pkt_mmu(struct hl_device *hdev,
3318 struct hl_cs_parser *parser,
3319 struct packet_lin_dma *user_dma_pkt)
3321 dev_dbg(hdev->dev, "DMA packet details:\n");
3322 dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr);
3323 dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr);
3324 dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize);
3328 * We can't allow user to read from Host using QMANs other than 1.
3330 if (parser->hw_queue_id != GOYA_QUEUE_ID_DMA_1 &&
3331 hl_mem_area_inside_range(le64_to_cpu(user_dma_pkt->src_addr),
3332 le32_to_cpu(user_dma_pkt->tsize),
3333 hdev->asic_prop.va_space_host_start_address,
3334 hdev->asic_prop.va_space_host_end_address)) {
3336 "Can't DMA from host on queue other then 1\n");
3340 if (user_dma_pkt->tsize == 0) {
3342 "Got DMA with size 0, might reset the device\n");
3346 parser->patched_cb_size += sizeof(*user_dma_pkt);
3351 static int goya_validate_wreg32(struct hl_device *hdev,
3352 struct hl_cs_parser *parser,
3353 struct packet_wreg32 *wreg_pkt)
3355 struct goya_device *goya = hdev->asic_specific;
3356 u32 sob_start_addr, sob_end_addr;
3359 reg_offset = le32_to_cpu(wreg_pkt->ctl) &
3360 GOYA_PKT_WREG32_CTL_REG_OFFSET_MASK;
3362 dev_dbg(hdev->dev, "WREG32 packet details:\n");
3363 dev_dbg(hdev->dev, "reg_offset == 0x%x\n", reg_offset);
3364 dev_dbg(hdev->dev, "value == 0x%x\n", wreg_pkt->value);
3366 if (reg_offset != (mmDMA_CH_0_WR_COMP_ADDR_LO & 0x1FFF)) {
3367 dev_err(hdev->dev, "WREG32 packet with illegal address 0x%x\n",
3373 * With MMU, DMA channels are not secured, so it doesn't matter where
3374 * the WR COMP will be written to because it will go out with
3375 * non-secured property
3377 if (goya->hw_cap_initialized & HW_CAP_MMU)
3380 sob_start_addr = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0);
3381 sob_end_addr = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1023);
3383 if ((le32_to_cpu(wreg_pkt->value) < sob_start_addr) ||
3384 (le32_to_cpu(wreg_pkt->value) > sob_end_addr)) {
3386 dev_err(hdev->dev, "WREG32 packet with illegal value 0x%x\n",
3394 static int goya_validate_cb(struct hl_device *hdev,
3395 struct hl_cs_parser *parser, bool is_mmu)
3397 u32 cb_parsed_length = 0;
3400 parser->patched_cb_size = 0;
3402 /* cb_user_size is more than 0 so loop will always be executed */
3403 while (cb_parsed_length < parser->user_cb_size) {
3404 enum packet_id pkt_id;
3408 user_pkt = (void *) (uintptr_t)
3409 (parser->user_cb->kernel_address + cb_parsed_length);
3411 pkt_id = (enum packet_id) (((*(u64 *) user_pkt) &
3412 PACKET_HEADER_PACKET_ID_MASK) >>
3413 PACKET_HEADER_PACKET_ID_SHIFT);
3415 pkt_size = goya_packet_sizes[pkt_id];
3416 cb_parsed_length += pkt_size;
3417 if (cb_parsed_length > parser->user_cb_size) {
3419 "packet 0x%x is out of CB boundary\n", pkt_id);
3425 case PACKET_WREG_32:
3427 * Although it is validated after copy in patch_cb(),
3428 * need to validate here as well because patch_cb() is
3429 * not called in MMU path while this function is called
3431 rc = goya_validate_wreg32(hdev, parser, user_pkt);
3434 case PACKET_WREG_BULK:
3436 "User not allowed to use WREG_BULK\n");
3440 case PACKET_MSG_PROT:
3442 "User not allowed to use MSG_PROT\n");
3447 dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
3452 dev_err(hdev->dev, "User not allowed to use STOP\n");
3456 case PACKET_LIN_DMA:
3458 rc = goya_validate_dma_pkt_mmu(hdev, parser,
3461 rc = goya_validate_dma_pkt_no_mmu(hdev, parser,
3465 case PACKET_MSG_LONG:
3466 case PACKET_MSG_SHORT:
3469 parser->patched_cb_size += pkt_size;
3473 dev_err(hdev->dev, "Invalid packet header 0x%x\n",
3484 * The new CB should have space at the end for two MSG_PROT packets:
3485 * 1. A packet that will act as a completion packet
3486 * 2. A packet that will generate MSI-X interrupt
3488 parser->patched_cb_size += sizeof(struct packet_msg_prot) * 2;
3493 static int goya_patch_dma_packet(struct hl_device *hdev,
3494 struct hl_cs_parser *parser,
3495 struct packet_lin_dma *user_dma_pkt,
3496 struct packet_lin_dma *new_dma_pkt,
3497 u32 *new_dma_pkt_size)
3499 struct hl_userptr *userptr;
3500 struct scatterlist *sg, *sg_next_iter;
3501 u32 count, dma_desc_cnt;
3503 dma_addr_t dma_addr, dma_addr_next;
3504 enum goya_dma_direction user_dir;
3505 u64 device_memory_addr, addr;
3506 enum dma_data_direction dir;
3507 struct sg_table *sgt;
3508 bool skip_host_mem_pin = false;
3510 u32 user_rdcomp_mask, user_wrcomp_mask, ctl;
3512 ctl = le32_to_cpu(user_dma_pkt->ctl);
3514 user_dir = (ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >>
3515 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
3517 user_memset = (ctl & GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK) >>
3518 GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT;
3520 if ((user_dir == DMA_DRAM_TO_SRAM) || (user_dir == DMA_SRAM_TO_DRAM) ||
3521 (user_dma_pkt->tsize == 0)) {
3522 memcpy(new_dma_pkt, user_dma_pkt, sizeof(*new_dma_pkt));
3523 *new_dma_pkt_size = sizeof(*new_dma_pkt);
3527 if ((user_dir == DMA_HOST_TO_DRAM) || (user_dir == DMA_HOST_TO_SRAM)) {
3528 addr = le64_to_cpu(user_dma_pkt->src_addr);
3529 device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr);
3530 dir = DMA_TO_DEVICE;
3532 skip_host_mem_pin = true;
3534 addr = le64_to_cpu(user_dma_pkt->dst_addr);
3535 device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr);
3536 dir = DMA_FROM_DEVICE;
3539 if ((!skip_host_mem_pin) &&
3540 (hl_userptr_is_pinned(hdev, addr,
3541 le32_to_cpu(user_dma_pkt->tsize),
3542 parser->job_userptr_list, &userptr) == false)) {
3543 dev_err(hdev->dev, "Userptr 0x%llx + 0x%x NOT mapped\n",
3544 addr, user_dma_pkt->tsize);
3548 if ((user_memset) && (dir == DMA_TO_DEVICE)) {
3549 memcpy(new_dma_pkt, user_dma_pkt, sizeof(*user_dma_pkt));
3550 *new_dma_pkt_size = sizeof(*user_dma_pkt);
3554 user_rdcomp_mask = ctl & GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK;
3556 user_wrcomp_mask = ctl & GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK;
3561 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
3562 len = sg_dma_len(sg);
3563 dma_addr = sg_dma_address(sg);
3568 while ((count + 1) < sgt->nents) {
3569 sg_next_iter = sg_next(sg);
3570 len_next = sg_dma_len(sg_next_iter);
3571 dma_addr_next = sg_dma_address(sg_next_iter);
3576 if ((dma_addr + len == dma_addr_next) &&
3577 (len + len_next <= DMA_MAX_TRANSFER_SIZE)) {
3586 ctl = le32_to_cpu(user_dma_pkt->ctl);
3587 if (likely(dma_desc_cnt))
3588 ctl &= ~GOYA_PKT_CTL_EB_MASK;
3589 ctl &= ~(GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK |
3590 GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK);
3591 new_dma_pkt->ctl = cpu_to_le32(ctl);
3592 new_dma_pkt->tsize = cpu_to_le32((u32) len);
3594 if (dir == DMA_TO_DEVICE) {
3595 new_dma_pkt->src_addr = cpu_to_le64(dma_addr);
3596 new_dma_pkt->dst_addr = cpu_to_le64(device_memory_addr);
3598 new_dma_pkt->src_addr = cpu_to_le64(device_memory_addr);
3599 new_dma_pkt->dst_addr = cpu_to_le64(dma_addr);
3603 device_memory_addr += len;
3608 if (!dma_desc_cnt) {
3610 "Error of 0 SG entries when patching DMA packet\n");
3614 /* Fix the last dma packet - rdcomp/wrcomp must be as user set them */
3616 new_dma_pkt->ctl |= cpu_to_le32(user_rdcomp_mask | user_wrcomp_mask);
3618 *new_dma_pkt_size = dma_desc_cnt * sizeof(struct packet_lin_dma);
3623 static int goya_patch_cb(struct hl_device *hdev,
3624 struct hl_cs_parser *parser)
3626 u32 cb_parsed_length = 0;
3627 u32 cb_patched_cur_length = 0;
3630 /* cb_user_size is more than 0 so loop will always be executed */
3631 while (cb_parsed_length < parser->user_cb_size) {
3632 enum packet_id pkt_id;
3634 u32 new_pkt_size = 0;
3635 void *user_pkt, *kernel_pkt;
3637 user_pkt = (void *) (uintptr_t)
3638 (parser->user_cb->kernel_address + cb_parsed_length);
3639 kernel_pkt = (void *) (uintptr_t)
3640 (parser->patched_cb->kernel_address +
3641 cb_patched_cur_length);
3643 pkt_id = (enum packet_id) (((*(u64 *) user_pkt) &
3644 PACKET_HEADER_PACKET_ID_MASK) >>
3645 PACKET_HEADER_PACKET_ID_SHIFT);
3647 pkt_size = goya_packet_sizes[pkt_id];
3648 cb_parsed_length += pkt_size;
3649 if (cb_parsed_length > parser->user_cb_size) {
3651 "packet 0x%x is out of CB boundary\n", pkt_id);
3657 case PACKET_LIN_DMA:
3658 rc = goya_patch_dma_packet(hdev, parser, user_pkt,
3659 kernel_pkt, &new_pkt_size);
3660 cb_patched_cur_length += new_pkt_size;
3663 case PACKET_WREG_32:
3664 memcpy(kernel_pkt, user_pkt, pkt_size);
3665 cb_patched_cur_length += pkt_size;
3666 rc = goya_validate_wreg32(hdev, parser, kernel_pkt);
3669 case PACKET_WREG_BULK:
3671 "User not allowed to use WREG_BULK\n");
3675 case PACKET_MSG_PROT:
3677 "User not allowed to use MSG_PROT\n");
3682 dev_err(hdev->dev, "User not allowed to use CP_DMA\n");
3687 dev_err(hdev->dev, "User not allowed to use STOP\n");
3691 case PACKET_MSG_LONG:
3692 case PACKET_MSG_SHORT:
3695 memcpy(kernel_pkt, user_pkt, pkt_size);
3696 cb_patched_cur_length += pkt_size;
3700 dev_err(hdev->dev, "Invalid packet header 0x%x\n",
3713 static int goya_parse_cb_mmu(struct hl_device *hdev,
3714 struct hl_cs_parser *parser)
3716 u64 patched_cb_handle;
3717 u32 patched_cb_size;
3718 struct hl_cb *user_cb;
3722 * The new CB should have space at the end for two MSG_PROT pkt:
3723 * 1. A packet that will act as a completion packet
3724 * 2. A packet that will generate MSI-X interrupt
3726 parser->patched_cb_size = parser->user_cb_size +
3727 sizeof(struct packet_msg_prot) * 2;
3729 rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr,
3730 parser->patched_cb_size,
3731 &patched_cb_handle, HL_KERNEL_ASID_ID);
3735 "Failed to allocate patched CB for DMA CS %d\n",
3740 patched_cb_handle >>= PAGE_SHIFT;
3741 parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
3742 (u32) patched_cb_handle);
3743 /* hl_cb_get should never fail here so use kernel WARN */
3744 WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n",
3745 (u32) patched_cb_handle);
3746 if (!parser->patched_cb) {
3752 * The check that parser->user_cb_size <= parser->user_cb->size was done
3753 * in validate_queue_index().
3755 memcpy((void *) (uintptr_t) parser->patched_cb->kernel_address,
3756 (void *) (uintptr_t) parser->user_cb->kernel_address,
3757 parser->user_cb_size);
3759 patched_cb_size = parser->patched_cb_size;
3761 /* validate patched CB instead of user CB */
3762 user_cb = parser->user_cb;
3763 parser->user_cb = parser->patched_cb;
3764 rc = goya_validate_cb(hdev, parser, true);
3765 parser->user_cb = user_cb;
3768 hl_cb_put(parser->patched_cb);
3772 if (patched_cb_size != parser->patched_cb_size) {
3773 dev_err(hdev->dev, "user CB size mismatch\n");
3774 hl_cb_put(parser->patched_cb);
3781 * Always call cb destroy here because we still have 1 reference
3782 * to it by calling cb_get earlier. After the job will be completed,
3783 * cb_put will release it, but here we want to remove it from the
3786 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
3787 patched_cb_handle << PAGE_SHIFT);
3792 static int goya_parse_cb_no_mmu(struct hl_device *hdev,
3793 struct hl_cs_parser *parser)
3795 u64 patched_cb_handle;
3798 rc = goya_validate_cb(hdev, parser, false);
3803 rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr,
3804 parser->patched_cb_size,
3805 &patched_cb_handle, HL_KERNEL_ASID_ID);
3808 "Failed to allocate patched CB for DMA CS %d\n", rc);
3812 patched_cb_handle >>= PAGE_SHIFT;
3813 parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr,
3814 (u32) patched_cb_handle);
3815 /* hl_cb_get should never fail here so use kernel WARN */
3816 WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n",
3817 (u32) patched_cb_handle);
3818 if (!parser->patched_cb) {
3823 rc = goya_patch_cb(hdev, parser);
3826 hl_cb_put(parser->patched_cb);
3830 * Always call cb destroy here because we still have 1 reference
3831 * to it by calling cb_get earlier. After the job will be completed,
3832 * cb_put will release it, but here we want to remove it from the
3835 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr,
3836 patched_cb_handle << PAGE_SHIFT);
3840 hl_userptr_delete_list(hdev, parser->job_userptr_list);
3844 static int goya_parse_cb_no_ext_queue(struct hl_device *hdev,
3845 struct hl_cs_parser *parser)
3847 struct asic_fixed_properties *asic_prop = &hdev->asic_prop;
3848 struct goya_device *goya = hdev->asic_specific;
3850 if (goya->hw_cap_initialized & HW_CAP_MMU)
3853 /* For internal queue jobs, just check if CB address is valid */
3854 if (hl_mem_area_inside_range(
3855 (u64) (uintptr_t) parser->user_cb,
3856 parser->user_cb_size,
3857 asic_prop->sram_user_base_address,
3858 asic_prop->sram_end_address))
3861 if (hl_mem_area_inside_range(
3862 (u64) (uintptr_t) parser->user_cb,
3863 parser->user_cb_size,
3864 asic_prop->dram_user_base_address,
3865 asic_prop->dram_end_address))
3869 "Internal CB address %px + 0x%x is not in SRAM nor in DRAM\n",
3870 parser->user_cb, parser->user_cb_size);
3875 int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser)
3877 struct goya_device *goya = hdev->asic_specific;
3879 if (!parser->ext_queue)
3880 return goya_parse_cb_no_ext_queue(hdev, parser);
3882 if (goya->hw_cap_initialized & HW_CAP_MMU)
3883 return goya_parse_cb_mmu(hdev, parser);
3885 return goya_parse_cb_no_mmu(hdev, parser);
3888 void goya_add_end_of_cb_packets(struct hl_device *hdev, u64 kernel_address,
3889 u32 len, u64 cq_addr, u32 cq_val, u32 msix_vec)
3891 struct packet_msg_prot *cq_pkt;
3894 cq_pkt = (struct packet_msg_prot *) (uintptr_t)
3895 (kernel_address + len - (sizeof(struct packet_msg_prot) * 2));
3897 tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
3898 (1 << GOYA_PKT_CTL_EB_SHIFT) |
3899 (1 << GOYA_PKT_CTL_MB_SHIFT);
3900 cq_pkt->ctl = cpu_to_le32(tmp);
3901 cq_pkt->value = cpu_to_le32(cq_val);
3902 cq_pkt->addr = cpu_to_le64(cq_addr);
3906 tmp = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) |
3907 (1 << GOYA_PKT_CTL_MB_SHIFT);
3908 cq_pkt->ctl = cpu_to_le32(tmp);
3909 cq_pkt->value = cpu_to_le32(msix_vec & 0x7FF);
3910 cq_pkt->addr = cpu_to_le64(CFG_BASE + mmPCIE_DBI_MSIX_DOORBELL_OFF);
3913 void goya_update_eq_ci(struct hl_device *hdev, u32 val)
3915 WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_6, val);
3918 void goya_restore_phase_topology(struct hl_device *hdev)
3923 static void goya_clear_sm_regs(struct hl_device *hdev)
3925 int i, num_of_sob_in_longs, num_of_mon_in_longs;
3927 num_of_sob_in_longs =
3928 ((mmSYNC_MNGR_SOB_OBJ_1023 - mmSYNC_MNGR_SOB_OBJ_0) + 4);
3930 num_of_mon_in_longs =
3931 ((mmSYNC_MNGR_MON_STATUS_255 - mmSYNC_MNGR_MON_STATUS_0) + 4);
3933 for (i = 0 ; i < num_of_sob_in_longs ; i += 4)
3934 WREG32(mmSYNC_MNGR_SOB_OBJ_0 + i, 0);
3936 for (i = 0 ; i < num_of_mon_in_longs ; i += 4)
3937 WREG32(mmSYNC_MNGR_MON_STATUS_0 + i, 0);
3939 /* Flush all WREG to prevent race */
3940 i = RREG32(mmSYNC_MNGR_SOB_OBJ_0);
3944 * goya_debugfs_read32 - read a 32bit value from a given device address
3946 * @hdev: pointer to hl_device structure
3947 * @addr: address in device
3948 * @val: returned value
3950 * In case of DDR address that is not mapped into the default aperture that
3951 * the DDR bar exposes, the function will configure the iATU so that the DDR
3952 * bar will be positioned at a base address that allows reading from the
3953 * required address. Configuring the iATU during normal operation can
3954 * lead to undefined behavior and therefore, should be done with extreme care
3957 static int goya_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
3959 struct asic_fixed_properties *prop = &hdev->asic_prop;
3963 if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
3964 *val = RREG32(addr - CFG_BASE);
3966 } else if ((addr >= SRAM_BASE_ADDR) &&
3967 (addr < SRAM_BASE_ADDR + SRAM_SIZE)) {
3969 *val = readl(hdev->pcie_bar[SRAM_CFG_BAR_ID] +
3970 (addr - SRAM_BASE_ADDR));
3972 } else if ((addr >= DRAM_PHYS_BASE) &&
3973 (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size)) {
3975 u64 bar_base_addr = DRAM_PHYS_BASE +
3976 (addr & ~(prop->dram_pci_bar_size - 0x1ull));
3978 ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr);
3979 if (ddr_bar_addr != U64_MAX) {
3980 *val = readl(hdev->pcie_bar[DDR_BAR_ID] +
3981 (addr - bar_base_addr));
3983 ddr_bar_addr = goya_set_ddr_bar_base(hdev,
3986 if (ddr_bar_addr == U64_MAX)
3996 * goya_debugfs_write32 - write a 32bit value to a given device address
3998 * @hdev: pointer to hl_device structure
3999 * @addr: address in device
4000 * @val: returned value
4002 * In case of DDR address that is not mapped into the default aperture that
4003 * the DDR bar exposes, the function will configure the iATU so that the DDR
4004 * bar will be positioned at a base address that allows writing to the
4005 * required address. Configuring the iATU during normal operation can
4006 * lead to undefined behavior and therefore, should be done with extreme care
4009 static int goya_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
4011 struct asic_fixed_properties *prop = &hdev->asic_prop;
4015 if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
4016 WREG32(addr - CFG_BASE, val);
4018 } else if ((addr >= SRAM_BASE_ADDR) &&
4019 (addr < SRAM_BASE_ADDR + SRAM_SIZE)) {
4021 writel(val, hdev->pcie_bar[SRAM_CFG_BAR_ID] +
4022 (addr - SRAM_BASE_ADDR));
4024 } else if ((addr >= DRAM_PHYS_BASE) &&
4025 (addr < DRAM_PHYS_BASE + hdev->asic_prop.dram_size)) {
4027 u64 bar_base_addr = DRAM_PHYS_BASE +
4028 (addr & ~(prop->dram_pci_bar_size - 0x1ull));
4030 ddr_bar_addr = goya_set_ddr_bar_base(hdev, bar_base_addr);
4031 if (ddr_bar_addr != U64_MAX) {
4032 writel(val, hdev->pcie_bar[DDR_BAR_ID] +
4033 (addr - bar_base_addr));
4035 ddr_bar_addr = goya_set_ddr_bar_base(hdev,
4038 if (ddr_bar_addr == U64_MAX)
4047 static u64 goya_read_pte(struct hl_device *hdev, u64 addr)
4049 struct goya_device *goya = hdev->asic_specific;
4051 if (hdev->hard_reset_pending)
4054 return readq(hdev->pcie_bar[DDR_BAR_ID] +
4055 (addr - goya->ddr_bar_cur_addr));
4058 static void goya_write_pte(struct hl_device *hdev, u64 addr, u64 val)
4060 struct goya_device *goya = hdev->asic_specific;
4062 if (hdev->hard_reset_pending)
4065 writeq(val, hdev->pcie_bar[DDR_BAR_ID] +
4066 (addr - goya->ddr_bar_cur_addr));
4069 static const char *_goya_get_event_desc(u16 event_type)
4071 switch (event_type) {
4072 case GOYA_ASYNC_EVENT_ID_PCIE_IF:
4074 case GOYA_ASYNC_EVENT_ID_TPC0_ECC:
4075 case GOYA_ASYNC_EVENT_ID_TPC1_ECC:
4076 case GOYA_ASYNC_EVENT_ID_TPC2_ECC:
4077 case GOYA_ASYNC_EVENT_ID_TPC3_ECC:
4078 case GOYA_ASYNC_EVENT_ID_TPC4_ECC:
4079 case GOYA_ASYNC_EVENT_ID_TPC5_ECC:
4080 case GOYA_ASYNC_EVENT_ID_TPC6_ECC:
4081 case GOYA_ASYNC_EVENT_ID_TPC7_ECC:
4083 case GOYA_ASYNC_EVENT_ID_MME_ECC:
4085 case GOYA_ASYNC_EVENT_ID_MME_ECC_EXT:
4086 return "MME_ecc_ext";
4087 case GOYA_ASYNC_EVENT_ID_MMU_ECC:
4089 case GOYA_ASYNC_EVENT_ID_DMA_MACRO:
4091 case GOYA_ASYNC_EVENT_ID_DMA_ECC:
4093 case GOYA_ASYNC_EVENT_ID_CPU_IF_ECC:
4094 return "CPU_if_ecc";
4095 case GOYA_ASYNC_EVENT_ID_PSOC_MEM:
4097 case GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT:
4098 return "PSOC_coresight";
4099 case GOYA_ASYNC_EVENT_ID_SRAM0 ... GOYA_ASYNC_EVENT_ID_SRAM29:
4101 case GOYA_ASYNC_EVENT_ID_GIC500:
4103 case GOYA_ASYNC_EVENT_ID_PLL0 ... GOYA_ASYNC_EVENT_ID_PLL6:
4105 case GOYA_ASYNC_EVENT_ID_AXI_ECC:
4107 case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC:
4108 return "L2_ram_ecc";
4109 case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET:
4110 return "PSOC_gpio_05_sw_reset";
4111 case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT:
4112 return "PSOC_gpio_10_vrhot_icrit";
4113 case GOYA_ASYNC_EVENT_ID_PCIE_DEC:
4115 case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
4116 case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
4117 case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
4118 case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
4119 case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
4120 case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
4121 case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
4122 case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
4124 case GOYA_ASYNC_EVENT_ID_MME_WACS:
4126 case GOYA_ASYNC_EVENT_ID_MME_WACSD:
4128 case GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER:
4129 return "CPU_axi_splitter";
4130 case GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC:
4131 return "PSOC_axi_dec";
4132 case GOYA_ASYNC_EVENT_ID_PSOC:
4134 case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
4135 case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
4136 case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
4137 case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
4138 case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
4139 case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
4140 case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
4141 case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
4142 return "TPC%d_krn_err";
4143 case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_CMDQ:
4145 case GOYA_ASYNC_EVENT_ID_TPC0_QM ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
4147 case GOYA_ASYNC_EVENT_ID_MME_QM:
4149 case GOYA_ASYNC_EVENT_ID_MME_CMDQ:
4151 case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
4153 case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
4155 case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU:
4156 case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU:
4157 case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU:
4158 case GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU:
4159 case GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU:
4160 case GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU:
4161 case GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU:
4162 case GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU:
4163 return "TPC%d_bmon_spmu";
4164 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0 ... GOYA_ASYNC_EVENT_ID_DMA_BM_CH4:
4165 return "DMA_bm_ch%d";
4171 static void goya_get_event_desc(u16 event_type, char *desc, size_t size)
4175 switch (event_type) {
4176 case GOYA_ASYNC_EVENT_ID_TPC0_ECC:
4177 case GOYA_ASYNC_EVENT_ID_TPC1_ECC:
4178 case GOYA_ASYNC_EVENT_ID_TPC2_ECC:
4179 case GOYA_ASYNC_EVENT_ID_TPC3_ECC:
4180 case GOYA_ASYNC_EVENT_ID_TPC4_ECC:
4181 case GOYA_ASYNC_EVENT_ID_TPC5_ECC:
4182 case GOYA_ASYNC_EVENT_ID_TPC6_ECC:
4183 case GOYA_ASYNC_EVENT_ID_TPC7_ECC:
4184 index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_ECC) / 3;
4185 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4187 case GOYA_ASYNC_EVENT_ID_SRAM0 ... GOYA_ASYNC_EVENT_ID_SRAM29:
4188 index = event_type - GOYA_ASYNC_EVENT_ID_SRAM0;
4189 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4191 case GOYA_ASYNC_EVENT_ID_PLL0 ... GOYA_ASYNC_EVENT_ID_PLL6:
4192 index = event_type - GOYA_ASYNC_EVENT_ID_PLL0;
4193 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4195 case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
4196 case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
4197 case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
4198 case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
4199 case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
4200 case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
4201 case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
4202 case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
4203 index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_DEC) / 3;
4204 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4206 case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
4207 case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
4208 case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
4209 case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
4210 case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
4211 case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
4212 case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
4213 case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
4214 index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR) / 10;
4215 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4217 case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_CMDQ:
4218 index = event_type - GOYA_ASYNC_EVENT_ID_TPC0_CMDQ;
4219 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4221 case GOYA_ASYNC_EVENT_ID_TPC0_QM ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
4222 index = event_type - GOYA_ASYNC_EVENT_ID_TPC0_QM;
4223 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4225 case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
4226 index = event_type - GOYA_ASYNC_EVENT_ID_DMA0_QM;
4227 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4229 case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
4230 index = event_type - GOYA_ASYNC_EVENT_ID_DMA0_CH;
4231 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4233 case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU:
4234 case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU:
4235 case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU:
4236 case GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU:
4237 case GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU:
4238 case GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU:
4239 case GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU:
4240 case GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU:
4241 index = (event_type - GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU) / 10;
4242 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4244 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0 ... GOYA_ASYNC_EVENT_ID_DMA_BM_CH4:
4245 index = event_type - GOYA_ASYNC_EVENT_ID_DMA_BM_CH0;
4246 snprintf(desc, size, _goya_get_event_desc(event_type), index);
4249 snprintf(desc, size, _goya_get_event_desc(event_type));
4254 static void goya_print_razwi_info(struct hl_device *hdev)
4256 if (RREG32(mmDMA_MACRO_RAZWI_LBW_WT_VLD)) {
4257 dev_err(hdev->dev, "Illegal write to LBW\n");
4258 WREG32(mmDMA_MACRO_RAZWI_LBW_WT_VLD, 0);
4261 if (RREG32(mmDMA_MACRO_RAZWI_LBW_RD_VLD)) {
4262 dev_err(hdev->dev, "Illegal read from LBW\n");
4263 WREG32(mmDMA_MACRO_RAZWI_LBW_RD_VLD, 0);
4266 if (RREG32(mmDMA_MACRO_RAZWI_HBW_WT_VLD)) {
4267 dev_err(hdev->dev, "Illegal write to HBW\n");
4268 WREG32(mmDMA_MACRO_RAZWI_HBW_WT_VLD, 0);
4271 if (RREG32(mmDMA_MACRO_RAZWI_HBW_RD_VLD)) {
4272 dev_err(hdev->dev, "Illegal read from HBW\n");
4273 WREG32(mmDMA_MACRO_RAZWI_HBW_RD_VLD, 0);
4277 static void goya_print_mmu_error_info(struct hl_device *hdev)
4279 struct goya_device *goya = hdev->asic_specific;
4283 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4286 val = RREG32(mmMMU_PAGE_ERROR_CAPTURE);
4287 if (val & MMU_PAGE_ERROR_CAPTURE_ENTRY_VALID_MASK) {
4288 addr = val & MMU_PAGE_ERROR_CAPTURE_VA_49_32_MASK;
4290 addr |= RREG32(mmMMU_PAGE_ERROR_CAPTURE_VA);
4292 dev_err(hdev->dev, "MMU page fault on va 0x%llx\n", addr);
4294 WREG32(mmMMU_PAGE_ERROR_CAPTURE, 0);
4298 static void goya_print_irq_info(struct hl_device *hdev, u16 event_type,
4303 goya_get_event_desc(event_type, desc, sizeof(desc));
4304 dev_err(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
4308 goya_print_razwi_info(hdev);
4309 goya_print_mmu_error_info(hdev);
4313 static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
4314 size_t irq_arr_size)
4316 struct armcp_unmask_irq_arr_packet *pkt;
4317 size_t total_pkt_size;
4321 total_pkt_size = sizeof(struct armcp_unmask_irq_arr_packet) +
4324 /* data should be aligned to 8 bytes in order to ArmCP to copy it */
4325 total_pkt_size = (total_pkt_size + 0x7) & ~0x7;
4327 /* total_pkt_size is casted to u16 later on */
4328 if (total_pkt_size > USHRT_MAX) {
4329 dev_err(hdev->dev, "too many elements in IRQ array\n");
4333 pkt = kzalloc(total_pkt_size, GFP_KERNEL);
4337 pkt->length = cpu_to_le32(irq_arr_size / sizeof(irq_arr[0]));
4338 memcpy(&pkt->irqs, irq_arr, irq_arr_size);
4340 pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
4341 ARMCP_PKT_CTL_OPCODE_SHIFT);
4343 rc = goya_send_cpu_message(hdev, (u32 *) pkt, total_pkt_size,
4344 HL_DEVICE_TIMEOUT_USEC, &result);
4347 dev_err(hdev->dev, "failed to unmask IRQ array\n");
4354 static int goya_soft_reset_late_init(struct hl_device *hdev)
4357 * Unmask all IRQs since some could have been received
4358 * during the soft reset
4360 return goya_unmask_irq_arr(hdev, goya_all_events,
4361 sizeof(goya_all_events));
4364 static int goya_unmask_irq(struct hl_device *hdev, u16 event_type)
4366 struct armcp_packet pkt;
4370 memset(&pkt, 0, sizeof(pkt));
4372 pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ <<
4373 ARMCP_PKT_CTL_OPCODE_SHIFT);
4374 pkt.value = cpu_to_le64(event_type);
4376 rc = goya_send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
4377 HL_DEVICE_TIMEOUT_USEC, &result);
4380 dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type);
4385 void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
4387 u32 ctl = le32_to_cpu(eq_entry->hdr.ctl);
4388 u16 event_type = ((ctl & EQ_CTL_EVENT_TYPE_MASK)
4389 >> EQ_CTL_EVENT_TYPE_SHIFT);
4390 struct goya_device *goya = hdev->asic_specific;
4392 goya->events_stat[event_type]++;
4394 switch (event_type) {
4395 case GOYA_ASYNC_EVENT_ID_PCIE_IF:
4396 case GOYA_ASYNC_EVENT_ID_TPC0_ECC:
4397 case GOYA_ASYNC_EVENT_ID_TPC1_ECC:
4398 case GOYA_ASYNC_EVENT_ID_TPC2_ECC:
4399 case GOYA_ASYNC_EVENT_ID_TPC3_ECC:
4400 case GOYA_ASYNC_EVENT_ID_TPC4_ECC:
4401 case GOYA_ASYNC_EVENT_ID_TPC5_ECC:
4402 case GOYA_ASYNC_EVENT_ID_TPC6_ECC:
4403 case GOYA_ASYNC_EVENT_ID_TPC7_ECC:
4404 case GOYA_ASYNC_EVENT_ID_MME_ECC:
4405 case GOYA_ASYNC_EVENT_ID_MME_ECC_EXT:
4406 case GOYA_ASYNC_EVENT_ID_MMU_ECC:
4407 case GOYA_ASYNC_EVENT_ID_DMA_MACRO:
4408 case GOYA_ASYNC_EVENT_ID_DMA_ECC:
4409 case GOYA_ASYNC_EVENT_ID_CPU_IF_ECC:
4410 case GOYA_ASYNC_EVENT_ID_PSOC_MEM:
4411 case GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT:
4412 case GOYA_ASYNC_EVENT_ID_SRAM0 ... GOYA_ASYNC_EVENT_ID_SRAM29:
4413 case GOYA_ASYNC_EVENT_ID_GIC500:
4414 case GOYA_ASYNC_EVENT_ID_PLL0 ... GOYA_ASYNC_EVENT_ID_PLL6:
4415 case GOYA_ASYNC_EVENT_ID_AXI_ECC:
4416 case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC:
4417 case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET:
4418 case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT:
4419 goya_print_irq_info(hdev, event_type, false);
4420 hl_device_reset(hdev, true, false);
4423 case GOYA_ASYNC_EVENT_ID_PCIE_DEC:
4424 case GOYA_ASYNC_EVENT_ID_TPC0_DEC:
4425 case GOYA_ASYNC_EVENT_ID_TPC1_DEC:
4426 case GOYA_ASYNC_EVENT_ID_TPC2_DEC:
4427 case GOYA_ASYNC_EVENT_ID_TPC3_DEC:
4428 case GOYA_ASYNC_EVENT_ID_TPC4_DEC:
4429 case GOYA_ASYNC_EVENT_ID_TPC5_DEC:
4430 case GOYA_ASYNC_EVENT_ID_TPC6_DEC:
4431 case GOYA_ASYNC_EVENT_ID_TPC7_DEC:
4432 case GOYA_ASYNC_EVENT_ID_MME_WACS:
4433 case GOYA_ASYNC_EVENT_ID_MME_WACSD:
4434 case GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER:
4435 case GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC:
4436 case GOYA_ASYNC_EVENT_ID_PSOC:
4437 case GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR:
4438 case GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR:
4439 case GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR:
4440 case GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR:
4441 case GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR:
4442 case GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR:
4443 case GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR:
4444 case GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR:
4445 case GOYA_ASYNC_EVENT_ID_TPC0_CMDQ ... GOYA_ASYNC_EVENT_ID_TPC7_QM:
4446 case GOYA_ASYNC_EVENT_ID_MME_QM:
4447 case GOYA_ASYNC_EVENT_ID_MME_CMDQ:
4448 case GOYA_ASYNC_EVENT_ID_DMA0_QM ... GOYA_ASYNC_EVENT_ID_DMA4_QM:
4449 case GOYA_ASYNC_EVENT_ID_DMA0_CH ... GOYA_ASYNC_EVENT_ID_DMA4_CH:
4450 goya_print_irq_info(hdev, event_type, true);
4451 goya_unmask_irq(hdev, event_type);
4454 case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU:
4455 case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU:
4456 case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU:
4457 case GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU:
4458 case GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU:
4459 case GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU:
4460 case GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU:
4461 case GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU:
4462 case GOYA_ASYNC_EVENT_ID_DMA_BM_CH0 ... GOYA_ASYNC_EVENT_ID_DMA_BM_CH4:
4463 goya_print_irq_info(hdev, event_type, false);
4464 goya_unmask_irq(hdev, event_type);
4468 dev_err(hdev->dev, "Received invalid H/W interrupt %d\n",
4474 void *goya_get_events_stat(struct hl_device *hdev, u32 *size)
4476 struct goya_device *goya = hdev->asic_specific;
4478 *size = (u32) sizeof(goya->events_stat);
4480 return goya->events_stat;
4483 static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size,
4484 u64 val, bool is_dram)
4486 struct packet_lin_dma *lin_dma_pkt;
4487 struct hl_cs_job *job;
4490 int rc, lin_dma_pkts_cnt;
4492 lin_dma_pkts_cnt = DIV_ROUND_UP_ULL(size, SZ_2G);
4493 cb_size = lin_dma_pkts_cnt * sizeof(struct packet_lin_dma) +
4494 sizeof(struct packet_msg_prot);
4495 cb = hl_cb_kernel_create(hdev, cb_size);
4499 lin_dma_pkt = (struct packet_lin_dma *) (uintptr_t) cb->kernel_address;
4502 memset(lin_dma_pkt, 0, sizeof(*lin_dma_pkt));
4504 ctl = ((PACKET_LIN_DMA << GOYA_PKT_CTL_OPCODE_SHIFT) |
4505 (1 << GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT) |
4506 (1 << GOYA_PKT_LIN_DMA_CTL_WO_SHIFT) |
4507 (1 << GOYA_PKT_CTL_RB_SHIFT) |
4508 (1 << GOYA_PKT_CTL_MB_SHIFT));
4509 ctl |= (is_dram ? DMA_HOST_TO_DRAM : DMA_HOST_TO_SRAM) <<
4510 GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT;
4511 lin_dma_pkt->ctl = cpu_to_le32(ctl);
4513 lin_dma_pkt->src_addr = cpu_to_le64(val);
4514 lin_dma_pkt->dst_addr = cpu_to_le64(addr);
4515 if (lin_dma_pkts_cnt > 1)
4516 lin_dma_pkt->tsize = cpu_to_le32(SZ_2G);
4518 lin_dma_pkt->tsize = cpu_to_le32(size);
4523 } while (--lin_dma_pkts_cnt);
4525 job = hl_cs_allocate_job(hdev, true);
4527 dev_err(hdev->dev, "Failed to allocate a new job\n");
4534 job->user_cb->cs_cnt++;
4535 job->user_cb_size = cb_size;
4536 job->hw_queue_id = GOYA_QUEUE_ID_DMA_0;
4537 job->patched_cb = job->user_cb;
4538 job->job_cb_size = job->user_cb_size;
4540 hl_debugfs_add_job(hdev, job);
4542 rc = goya_send_job_on_qman0(hdev, job);
4544 hl_cb_put(job->patched_cb);
4546 hl_debugfs_remove_job(hdev, job);
4552 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
4557 int goya_context_switch(struct hl_device *hdev, u32 asid)
4559 struct asic_fixed_properties *prop = &hdev->asic_prop;
4560 u64 addr = prop->sram_base_address, sob_addr;
4561 u32 size = hdev->pldm ? 0x10000 : prop->sram_size;
4562 u64 val = 0x7777777777777777ull;
4564 u32 channel_off = mmDMA_CH_1_WR_COMP_ADDR_LO -
4565 mmDMA_CH_0_WR_COMP_ADDR_LO;
4567 rc = goya_memset_device_memory(hdev, addr, size, val, false);
4569 dev_err(hdev->dev, "Failed to clear SRAM in context switch\n");
4573 /* we need to reset registers that the user is allowed to change */
4574 sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1007;
4575 WREG32(mmDMA_CH_0_WR_COMP_ADDR_LO, lower_32_bits(sob_addr));
4577 for (dma_id = 1 ; dma_id < NUMBER_OF_EXT_HW_QUEUES ; dma_id++) {
4578 sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1000 +
4580 WREG32(mmDMA_CH_0_WR_COMP_ADDR_LO + channel_off * dma_id,
4581 lower_32_bits(sob_addr));
4584 WREG32(mmTPC_PLL_CLK_RLX_0, 0x200020);
4586 goya_mmu_prepare(hdev, asid);
4588 goya_clear_sm_regs(hdev);
4593 static int goya_mmu_clear_pgt_range(struct hl_device *hdev)
4595 struct asic_fixed_properties *prop = &hdev->asic_prop;
4596 struct goya_device *goya = hdev->asic_specific;
4597 u64 addr = prop->mmu_pgt_addr;
4598 u32 size = prop->mmu_pgt_size + MMU_DRAM_DEFAULT_PAGE_SIZE +
4601 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4604 return goya_memset_device_memory(hdev, addr, size, 0, true);
4607 static int goya_mmu_set_dram_default_page(struct hl_device *hdev)
4609 struct goya_device *goya = hdev->asic_specific;
4610 u64 addr = hdev->asic_prop.mmu_dram_default_page_addr;
4611 u32 size = MMU_DRAM_DEFAULT_PAGE_SIZE;
4612 u64 val = 0x9999999999999999ull;
4614 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4617 return goya_memset_device_memory(hdev, addr, size, val, true);
4620 static int goya_mmu_add_mappings_for_device_cpu(struct hl_device *hdev)
4622 struct asic_fixed_properties *prop = &hdev->asic_prop;
4623 struct goya_device *goya = hdev->asic_specific;
4627 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4630 for (off = 0 ; off < CPU_FW_IMAGE_SIZE ; off += PAGE_SIZE_2MB) {
4631 rc = hl_mmu_map(hdev->kernel_ctx, prop->dram_base_address + off,
4632 prop->dram_base_address + off, PAGE_SIZE_2MB);
4634 dev_err(hdev->dev, "Map failed for address 0x%llx\n",
4635 prop->dram_base_address + off);
4640 if (!(hdev->cpu_accessible_dma_address & (PAGE_SIZE_2MB - 1))) {
4641 rc = hl_mmu_map(hdev->kernel_ctx, VA_CPU_ACCESSIBLE_MEM_ADDR,
4642 hdev->cpu_accessible_dma_address, PAGE_SIZE_2MB);
4646 "Map failed for CPU accessible memory\n");
4647 off -= PAGE_SIZE_2MB;
4651 for (cpu_off = 0 ; cpu_off < SZ_2M ; cpu_off += PAGE_SIZE_4KB) {
4652 rc = hl_mmu_map(hdev->kernel_ctx,
4653 VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off,
4654 hdev->cpu_accessible_dma_address + cpu_off,
4658 "Map failed for CPU accessible memory\n");
4659 cpu_off -= PAGE_SIZE_4KB;
4665 goya_mmu_prepare_reg(hdev, mmCPU_IF_ARUSER_OVR, HL_KERNEL_ASID_ID);
4666 goya_mmu_prepare_reg(hdev, mmCPU_IF_AWUSER_OVR, HL_KERNEL_ASID_ID);
4667 WREG32(mmCPU_IF_ARUSER_OVR_EN, 0x7FF);
4668 WREG32(mmCPU_IF_AWUSER_OVR_EN, 0x7FF);
4670 /* Make sure configuration is flushed to device */
4671 RREG32(mmCPU_IF_AWUSER_OVR_EN);
4673 goya->device_cpu_mmu_mappings_done = true;
4678 for (; cpu_off >= 0 ; cpu_off -= PAGE_SIZE_4KB)
4679 if (hl_mmu_unmap(hdev->kernel_ctx,
4680 VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off,
4682 dev_warn_ratelimited(hdev->dev,
4683 "failed to unmap address 0x%llx\n",
4684 VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off);
4686 for (; off >= 0 ; off -= PAGE_SIZE_2MB)
4687 if (hl_mmu_unmap(hdev->kernel_ctx,
4688 prop->dram_base_address + off, PAGE_SIZE_2MB))
4689 dev_warn_ratelimited(hdev->dev,
4690 "failed to unmap address 0x%llx\n",
4691 prop->dram_base_address + off);
4696 void goya_mmu_remove_device_cpu_mappings(struct hl_device *hdev)
4698 struct asic_fixed_properties *prop = &hdev->asic_prop;
4699 struct goya_device *goya = hdev->asic_specific;
4702 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4705 if (!goya->device_cpu_mmu_mappings_done)
4708 WREG32(mmCPU_IF_ARUSER_OVR_EN, 0);
4709 WREG32(mmCPU_IF_AWUSER_OVR_EN, 0);
4711 if (!(hdev->cpu_accessible_dma_address & (PAGE_SIZE_2MB - 1))) {
4712 if (hl_mmu_unmap(hdev->kernel_ctx, VA_CPU_ACCESSIBLE_MEM_ADDR,
4715 "Failed to unmap CPU accessible memory\n");
4717 for (cpu_off = 0 ; cpu_off < SZ_2M ; cpu_off += PAGE_SIZE_4KB)
4718 if (hl_mmu_unmap(hdev->kernel_ctx,
4719 VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off,
4721 dev_warn_ratelimited(hdev->dev,
4722 "failed to unmap address 0x%llx\n",
4723 VA_CPU_ACCESSIBLE_MEM_ADDR + cpu_off);
4726 for (off = 0 ; off < CPU_FW_IMAGE_SIZE ; off += PAGE_SIZE_2MB)
4727 if (hl_mmu_unmap(hdev->kernel_ctx,
4728 prop->dram_base_address + off, PAGE_SIZE_2MB))
4729 dev_warn_ratelimited(hdev->dev,
4730 "Failed to unmap address 0x%llx\n",
4731 prop->dram_base_address + off);
4733 goya->device_cpu_mmu_mappings_done = false;
4736 static void goya_mmu_prepare(struct hl_device *hdev, u32 asid)
4738 struct goya_device *goya = hdev->asic_specific;
4741 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4744 if (asid & ~MME_QM_GLBL_SECURE_PROPS_ASID_MASK) {
4745 WARN(1, "asid %u is too big\n", asid);
4749 /* zero the MMBP and ASID bits and then set the ASID */
4750 for (i = 0 ; i < GOYA_MMU_REGS_NUM ; i++)
4751 goya_mmu_prepare_reg(hdev, goya_mmu_regs[i], asid);
4754 static void goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard)
4756 struct goya_device *goya = hdev->asic_specific;
4757 u32 status, timeout_usec;
4760 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4763 /* no need in L1 only invalidation in Goya */
4768 timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
4770 timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
4772 mutex_lock(&hdev->mmu_cache_lock);
4774 /* L0 & L1 invalidation */
4775 WREG32(mmSTLB_INV_ALL_START, 1);
4777 rc = hl_poll_timeout(
4779 mmSTLB_INV_ALL_START,
4785 mutex_unlock(&hdev->mmu_cache_lock);
4788 dev_notice_ratelimited(hdev->dev,
4789 "Timeout when waiting for MMU cache invalidation\n");
4792 static void goya_mmu_invalidate_cache_range(struct hl_device *hdev,
4793 bool is_hard, u32 asid, u64 va, u64 size)
4795 struct goya_device *goya = hdev->asic_specific;
4796 u32 status, timeout_usec, inv_data, pi;
4799 if (!(goya->hw_cap_initialized & HW_CAP_MMU))
4802 /* no need in L1 only invalidation in Goya */
4807 timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC;
4809 timeout_usec = MMU_CONFIG_TIMEOUT_USEC;
4811 mutex_lock(&hdev->mmu_cache_lock);
4814 * TODO: currently invalidate entire L0 & L1 as in regular hard
4815 * invalidation. Need to apply invalidation of specific cache lines with
4816 * mask of ASID & VA & size.
4817 * Note that L1 with be flushed entirely in any case.
4820 /* L0 & L1 invalidation */
4821 inv_data = RREG32(mmSTLB_CACHE_INV);
4823 pi = ((inv_data & STLB_CACHE_INV_PRODUCER_INDEX_MASK) + 1) & 0xFF;
4824 WREG32(mmSTLB_CACHE_INV,
4825 (inv_data & STLB_CACHE_INV_INDEX_MASK_MASK) | pi);
4827 rc = hl_poll_timeout(
4829 mmSTLB_INV_CONSUMER_INDEX,
4835 mutex_unlock(&hdev->mmu_cache_lock);
4838 dev_notice_ratelimited(hdev->dev,
4839 "Timeout when waiting for MMU cache invalidation\n");
4842 int goya_send_heartbeat(struct hl_device *hdev)
4844 struct goya_device *goya = hdev->asic_specific;
4846 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
4849 return hl_fw_send_heartbeat(hdev);
4852 int goya_armcp_info_get(struct hl_device *hdev)
4854 struct goya_device *goya = hdev->asic_specific;
4855 struct asic_fixed_properties *prop = &hdev->asic_prop;
4859 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
4862 rc = hl_fw_armcp_info_get(hdev);
4866 dram_size = le64_to_cpu(prop->armcp_info.dram_size);
4868 if ((!is_power_of_2(dram_size)) ||
4869 (dram_size < DRAM_PHYS_DEFAULT_SIZE)) {
4871 "F/W reported invalid DRAM size %llu. Trying to use default size\n",
4873 dram_size = DRAM_PHYS_DEFAULT_SIZE;
4876 prop->dram_size = dram_size;
4877 prop->dram_end_address = prop->dram_base_address + dram_size;
4883 static bool goya_is_device_idle(struct hl_device *hdev, char *buf, size_t size)
4885 u64 offset, dma_qm_reg, tpc_qm_reg, tpc_cmdq_reg, tpc_cfg_reg;
4888 offset = mmDMA_QM_1_GLBL_STS0 - mmDMA_QM_0_GLBL_STS0;
4890 for (i = 0 ; i < DMA_MAX_NUM ; i++) {
4891 dma_qm_reg = mmDMA_QM_0_GLBL_STS0 + i * offset;
4893 if ((RREG32(dma_qm_reg) & DMA_QM_IDLE_MASK) !=
4895 return HL_ENG_BUSY(buf, size, "DMA%d_QM", i);
4898 offset = mmTPC1_QM_GLBL_STS0 - mmTPC0_QM_GLBL_STS0;
4900 for (i = 0 ; i < TPC_MAX_NUM ; i++) {
4901 tpc_qm_reg = mmTPC0_QM_GLBL_STS0 + i * offset;
4902 tpc_cmdq_reg = mmTPC0_CMDQ_GLBL_STS0 + i * offset;
4903 tpc_cfg_reg = mmTPC0_CFG_STATUS + i * offset;
4905 if ((RREG32(tpc_qm_reg) & TPC_QM_IDLE_MASK) !=
4907 return HL_ENG_BUSY(buf, size, "TPC%d_QM", i);
4909 if ((RREG32(tpc_cmdq_reg) & TPC_CMDQ_IDLE_MASK) !=
4911 return HL_ENG_BUSY(buf, size, "TPC%d_CMDQ", i);
4913 if ((RREG32(tpc_cfg_reg) & TPC_CFG_IDLE_MASK) !=
4915 return HL_ENG_BUSY(buf, size, "TPC%d_CFG", i);
4918 if ((RREG32(mmMME_QM_GLBL_STS0) & MME_QM_IDLE_MASK) !=
4920 return HL_ENG_BUSY(buf, size, "MME_QM");
4922 if ((RREG32(mmMME_CMDQ_GLBL_STS0) & MME_CMDQ_IDLE_MASK) !=
4924 return HL_ENG_BUSY(buf, size, "MME_CMDQ");
4926 if ((RREG32(mmMME_ARCH_STATUS) & MME_ARCH_IDLE_MASK) !=
4928 return HL_ENG_BUSY(buf, size, "MME_ARCH");
4930 if (RREG32(mmMME_SHADOW_0_STATUS) & MME_SHADOW_IDLE_MASK)
4931 return HL_ENG_BUSY(buf, size, "MME");
4936 static void goya_hw_queues_lock(struct hl_device *hdev)
4938 struct goya_device *goya = hdev->asic_specific;
4940 spin_lock(&goya->hw_queues_lock);
4943 static void goya_hw_queues_unlock(struct hl_device *hdev)
4945 struct goya_device *goya = hdev->asic_specific;
4947 spin_unlock(&goya->hw_queues_lock);
4950 static u32 goya_get_pci_id(struct hl_device *hdev)
4952 return hdev->pdev->device;
4955 static int goya_get_eeprom_data(struct hl_device *hdev, void *data,
4958 struct goya_device *goya = hdev->asic_specific;
4960 if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
4963 return hl_fw_get_eeprom_data(hdev, data, max_size);
4966 static enum hl_device_hw_state goya_get_hw_state(struct hl_device *hdev)
4968 return RREG32(mmPSOC_GLOBAL_CONF_APP_STATUS);
4971 static const struct hl_asic_funcs goya_funcs = {
4972 .early_init = goya_early_init,
4973 .early_fini = goya_early_fini,
4974 .late_init = goya_late_init,
4975 .late_fini = goya_late_fini,
4976 .sw_init = goya_sw_init,
4977 .sw_fini = goya_sw_fini,
4978 .hw_init = goya_hw_init,
4979 .hw_fini = goya_hw_fini,
4980 .halt_engines = goya_halt_engines,
4981 .suspend = goya_suspend,
4982 .resume = goya_resume,
4983 .cb_mmap = goya_cb_mmap,
4984 .ring_doorbell = goya_ring_doorbell,
4985 .flush_pq_write = goya_flush_pq_write,
4986 .asic_dma_alloc_coherent = goya_dma_alloc_coherent,
4987 .asic_dma_free_coherent = goya_dma_free_coherent,
4988 .get_int_queue_base = goya_get_int_queue_base,
4989 .test_queues = goya_test_queues,
4990 .asic_dma_pool_zalloc = goya_dma_pool_zalloc,
4991 .asic_dma_pool_free = goya_dma_pool_free,
4992 .cpu_accessible_dma_pool_alloc = goya_cpu_accessible_dma_pool_alloc,
4993 .cpu_accessible_dma_pool_free = goya_cpu_accessible_dma_pool_free,
4994 .hl_dma_unmap_sg = goya_dma_unmap_sg,
4995 .cs_parser = goya_cs_parser,
4996 .asic_dma_map_sg = goya_dma_map_sg,
4997 .get_dma_desc_list_size = goya_get_dma_desc_list_size,
4998 .add_end_of_cb_packets = goya_add_end_of_cb_packets,
4999 .update_eq_ci = goya_update_eq_ci,
5000 .context_switch = goya_context_switch,
5001 .restore_phase_topology = goya_restore_phase_topology,
5002 .debugfs_read32 = goya_debugfs_read32,
5003 .debugfs_write32 = goya_debugfs_write32,
5004 .add_device_attr = goya_add_device_attr,
5005 .handle_eqe = goya_handle_eqe,
5006 .set_pll_profile = goya_set_pll_profile,
5007 .get_events_stat = goya_get_events_stat,
5008 .read_pte = goya_read_pte,
5009 .write_pte = goya_write_pte,
5010 .mmu_invalidate_cache = goya_mmu_invalidate_cache,
5011 .mmu_invalidate_cache_range = goya_mmu_invalidate_cache_range,
5012 .send_heartbeat = goya_send_heartbeat,
5013 .debug_coresight = goya_debug_coresight,
5014 .is_device_idle = goya_is_device_idle,
5015 .soft_reset_late_init = goya_soft_reset_late_init,
5016 .hw_queues_lock = goya_hw_queues_lock,
5017 .hw_queues_unlock = goya_hw_queues_unlock,
5018 .get_pci_id = goya_get_pci_id,
5019 .get_eeprom_data = goya_get_eeprom_data,
5020 .send_cpu_message = goya_send_cpu_message,
5021 .get_hw_state = goya_get_hw_state,
5022 .pci_bars_map = goya_pci_bars_map,
5023 .set_dram_bar_base = goya_set_ddr_bar_base,
5024 .init_iatu = goya_init_iatu,
5027 .halt_coresight = goya_halt_coresight
5031 * goya_set_asic_funcs - set Goya function pointers
5033 * @*hdev: pointer to hl_device structure
5036 void goya_set_asic_funcs(struct hl_device *hdev)
5038 hdev->asic_funcs = &goya_funcs;