1 /* SPDX-License-Identifier: GPL-2.0
3 * Copyright 2016-2022 HabanaLabs, Ltd.
11 #include "../include/common/cpucp_if.h"
12 #include "../include/common/qman_if.h"
13 #include "../include/hw_ip/mmu/mmu_general.h"
14 #include <uapi/misc/habanalabs.h>
16 #include <linux/cdev.h>
17 #include <linux/iopoll.h>
18 #include <linux/irqreturn.h>
19 #include <linux/dma-direction.h>
20 #include <linux/scatterlist.h>
21 #include <linux/hashtable.h>
22 #include <linux/debugfs.h>
23 #include <linux/rwsem.h>
24 #include <linux/bitfield.h>
25 #include <linux/genalloc.h>
26 #include <linux/sched/signal.h>
27 #include <linux/io-64-nonatomic-lo-hi.h>
28 #include <linux/coresight.h>
29 #include <linux/dma-buf.h>
31 #define HL_NAME "habanalabs"
33 /* Use upper bits of mmap offset to store habana driver specific information.
34 * bits[63:61] - Encode mmap type
35 * bits[45:0] - mmap offset value
37 * NOTE: struct vm_area_struct.vm_pgoff uses offset in pages. Hence, these
38 * defines are w.r.t to PAGE_SIZE
40 #define HL_MMAP_TYPE_SHIFT (61 - PAGE_SHIFT)
41 #define HL_MMAP_TYPE_MASK (0x7ull << HL_MMAP_TYPE_SHIFT)
42 #define HL_MMAP_TYPE_BLOCK (0x4ull << HL_MMAP_TYPE_SHIFT)
43 #define HL_MMAP_TYPE_CB (0x2ull << HL_MMAP_TYPE_SHIFT)
45 #define HL_MMAP_OFFSET_VALUE_MASK (0x1FFFFFFFFFFFull >> PAGE_SHIFT)
46 #define HL_MMAP_OFFSET_VALUE_GET(off) (off & HL_MMAP_OFFSET_VALUE_MASK)
48 #define HL_PENDING_RESET_PER_SEC 10
49 #define HL_PENDING_RESET_MAX_TRIALS 60 /* 10 minutes */
50 #define HL_PENDING_RESET_LONG_SEC 60
52 #define HL_HARD_RESET_MAX_TIMEOUT 120
53 #define HL_PLDM_HARD_RESET_MAX_TIMEOUT (HL_HARD_RESET_MAX_TIMEOUT * 3)
55 #define HL_DEVICE_TIMEOUT_USEC 1000000 /* 1 s */
57 #define HL_HEARTBEAT_PER_USEC 5000000 /* 5 s */
59 #define HL_PLL_LOW_JOB_FREQ_USEC 5000000 /* 5 s */
61 #define HL_CPUCP_INFO_TIMEOUT_USEC 10000000 /* 10s */
62 #define HL_CPUCP_EEPROM_TIMEOUT_USEC 10000000 /* 10s */
64 #define HL_FW_STATUS_POLL_INTERVAL_USEC 10000 /* 10ms */
66 #define HL_PCI_ELBI_TIMEOUT_MSEC 10 /* 10ms */
68 #define HL_SIM_MAX_TIMEOUT_US 10000000 /* 10s */
70 #define HL_COMMON_USER_INTERRUPT_ID 0xFFF
72 #define HL_STATE_DUMP_HIST_LEN 5
74 /* Default value for device reset trigger , an invalid value */
75 #define HL_RESET_TRIGGER_DEFAULT 0xFF
77 #define OBJ_NAMES_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
78 #define SYNC_TO_ENGINE_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
81 #define MEM_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
84 #define MMU_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
87 * enum hl_mmu_page_table_locaion - mmu page table location
88 * @MMU_DR_PGT: page-table is located on device DRAM.
89 * @MMU_HR_PGT: page-table is located on host memory.
90 * @MMU_NUM_PGT_LOCATIONS: number of page-table locations currently supported.
92 enum hl_mmu_page_table_location {
93 MMU_DR_PGT = 0, /* device-dram-resident MMU PGT */
94 MMU_HR_PGT, /* host resident MMU PGT */
95 MMU_NUM_PGT_LOCATIONS /* num of PGT locations */
99 * HL_RSVD_SOBS 'sync stream' reserved sync objects per QMAN stream
100 * HL_RSVD_MONS 'sync stream' reserved monitors per QMAN stream
102 #define HL_RSVD_SOBS 2
103 #define HL_RSVD_MONS 1
106 * HL_COLLECTIVE_RSVD_MSTR_MONS 'collective' reserved monitors per QMAN stream
108 #define HL_COLLECTIVE_RSVD_MSTR_MONS 2
110 #define HL_MAX_SOB_VAL (1 << 15)
112 #define IS_POWER_OF_2(n) (n != 0 && ((n & (n - 1)) == 0))
113 #define IS_MAX_PENDING_CS_VALID(n) (IS_POWER_OF_2(n) && (n > 1))
115 #define HL_PCI_NUM_BARS 6
117 #define HL_MAX_DCORES 4
122 * - HL_DRV_RESET_HARD
123 * If set do hard reset to all engines. If not set reset just
124 * compute/DMA engines.
126 * - HL_DRV_RESET_FROM_RESET_THR
127 * Set if the caller is the hard-reset thread
129 * - HL_DRV_RESET_HEARTBEAT
130 * Set if reset is due to heartbeat
133 * Set if reset is due to TDR
135 * - HL_DRV_RESET_DEV_RELEASE
136 * Set if reset is due to device release
138 * - HL_DRV_RESET_BYPASS_REQ_TO_FW
139 * F/W will perform the reset. No need to ask it to reset the device. This is relevant
140 * only when running with secured f/w
142 * - HL_DRV_RESET_FW_FATAL_ERR
143 * Set if reset is due to a fatal error from FW
146 #define HL_DRV_RESET_HARD (1 << 0)
147 #define HL_DRV_RESET_FROM_RESET_THR (1 << 1)
148 #define HL_DRV_RESET_HEARTBEAT (1 << 2)
149 #define HL_DRV_RESET_TDR (1 << 3)
150 #define HL_DRV_RESET_DEV_RELEASE (1 << 4)
151 #define HL_DRV_RESET_BYPASS_REQ_TO_FW (1 << 5)
152 #define HL_DRV_RESET_FW_FATAL_ERR (1 << 6)
154 #define HL_MAX_SOBS_PER_MONITOR 8
157 * struct hl_gen_wait_properties - properties for generating a wait CB
158 * @data: command buffer
159 * @q_idx: queue id is used to extract fence register address
160 * @size: offset in command buffer
161 * @sob_base: SOB base to use in this wait CB
162 * @sob_val: SOB value to wait for
163 * @mon_id: monitor to use in this wait CB
164 * @sob_mask: each bit represents a SOB offset from sob_base to be used
166 struct hl_gen_wait_properties {
177 * struct pgt_info - MMU hop page info.
178 * @node: hash linked-list node for the pgts shadow hash of pgts.
179 * @phys_addr: physical address of the pgt.
180 * @shadow_addr: shadow hop in the host.
181 * @ctx: pointer to the owner ctx.
182 * @num_of_ptes: indicates how many ptes are used in the pgt.
184 * The MMU page tables hierarchy is placed on the DRAM. When a new level (hop)
185 * is needed during mapping, a new page is allocated and this structure holds
186 * its essential information. During unmapping, if no valid PTEs remained in the
187 * page, it is freed with its pgt_info structure.
190 struct hlist_node node;
201 * enum hl_pci_match_mode - pci match mode per region
202 * @PCI_ADDRESS_MATCH_MODE: address match mode
203 * @PCI_BAR_MATCH_MODE: bar match mode
205 enum hl_pci_match_mode {
206 PCI_ADDRESS_MATCH_MODE,
211 * enum hl_fw_component - F/W components to read version through registers.
212 * @FW_COMP_BOOT_FIT: boot fit.
213 * @FW_COMP_PREBOOT: preboot.
214 * @FW_COMP_LINUX: linux.
216 enum hl_fw_component {
223 * enum hl_fw_types - F/W types present in the system
224 * @FW_TYPE_NONE: no FW component indication
225 * @FW_TYPE_LINUX: Linux image for device CPU
226 * @FW_TYPE_BOOT_CPU: Boot image for device CPU
227 * @FW_TYPE_PREBOOT_CPU: Indicates pre-loaded CPUs are present in the system
228 * (preboot, ppboot etc...)
229 * @FW_TYPE_ALL_TYPES: Mask for all types
234 FW_TYPE_BOOT_CPU = 0x2,
235 FW_TYPE_PREBOOT_CPU = 0x4,
237 (FW_TYPE_LINUX | FW_TYPE_BOOT_CPU | FW_TYPE_PREBOOT_CPU)
241 * enum hl_queue_type - Supported QUEUE types.
242 * @QUEUE_TYPE_NA: queue is not available.
243 * @QUEUE_TYPE_EXT: external queue which is a DMA channel that may access the
245 * @QUEUE_TYPE_INT: internal queue that performs DMA inside the device's
246 * memories and/or operates the compute engines.
247 * @QUEUE_TYPE_CPU: S/W queue for communication with the device's CPU.
248 * @QUEUE_TYPE_HW: queue of DMA and compute engines jobs, for which completion
249 * notifications are sent by H/W.
263 CS_TYPE_COLLECTIVE_WAIT,
269 * struct hl_inbound_pci_region - inbound region descriptor
270 * @mode: pci match mode for this region
271 * @addr: region target address
272 * @size: region size in bytes
273 * @offset_in_bar: offset within bar (address match mode)
276 struct hl_inbound_pci_region {
277 enum hl_pci_match_mode mode;
285 * struct hl_outbound_pci_region - outbound region descriptor
286 * @addr: region target address
287 * @size: region size in bytes
289 struct hl_outbound_pci_region {
295 * enum queue_cb_alloc_flags - Indicates queue support for CBs that
296 * allocated by Kernel or by User
297 * @CB_ALLOC_KERNEL: support only CBs that allocated by Kernel
298 * @CB_ALLOC_USER: support only CBs that allocated by User
300 enum queue_cb_alloc_flags {
301 CB_ALLOC_KERNEL = 0x1,
306 * struct hl_hw_sob - H/W SOB info.
307 * @hdev: habanalabs device structure.
308 * @kref: refcount of this SOB. The SOB will reset once the refcount is zero.
309 * @sob_id: id of this SOB.
310 * @sob_addr: the sob offset from the base address.
311 * @q_idx: the H/W queue that uses this SOB.
312 * @need_reset: reset indication set when switching to the other sob.
315 struct hl_device *hdev;
323 enum hl_collective_mode {
324 HL_COLLECTIVE_NOT_SUPPORTED = 0x0,
325 HL_COLLECTIVE_MASTER = 0x1,
326 HL_COLLECTIVE_SLAVE = 0x2
330 * struct hw_queue_properties - queue information.
332 * @queue_cb_alloc_flags: bitmap which indicates if the hw queue supports CB
333 * that allocated by the Kernel driver and therefore,
334 * a CB handle can be provided for jobs on this queue.
335 * Otherwise, a CB address must be provided.
336 * @collective_mode: collective mode of current queue
337 * @driver_only: true if only the driver is allowed to send a job to this queue,
339 * @supports_sync_stream: True if queue supports sync stream
341 struct hw_queue_properties {
342 enum hl_queue_type type;
343 enum queue_cb_alloc_flags cb_alloc_flags;
344 enum hl_collective_mode collective_mode;
346 u8 supports_sync_stream;
350 * enum vm_type - virtual memory mapping request information.
351 * @VM_TYPE_USERPTR: mapping of user memory to device virtual address.
352 * @VM_TYPE_PHYS_PACK: mapping of DRAM memory to device virtual address.
355 VM_TYPE_USERPTR = 0x1,
356 VM_TYPE_PHYS_PACK = 0x2
360 * enum mmu_op_flags - mmu operation relevant information.
361 * @MMU_OP_USERPTR: operation on user memory (host resident).
362 * @MMU_OP_PHYS_PACK: operation on DRAM (device resident).
363 * @MMU_OP_CLEAR_MEMCACHE: operation has to clear memcache.
364 * @MMU_OP_SKIP_LOW_CACHE_INV: operation is allowed to skip parts of cache invalidation.
367 MMU_OP_USERPTR = 0x1,
368 MMU_OP_PHYS_PACK = 0x2,
369 MMU_OP_CLEAR_MEMCACHE = 0x4,
370 MMU_OP_SKIP_LOW_CACHE_INV = 0x8,
375 * enum hl_device_hw_state - H/W device state. use this to understand whether
376 * to do reset before hw_init or not
377 * @HL_DEVICE_HW_STATE_CLEAN: H/W state is clean. i.e. after hard reset
378 * @HL_DEVICE_HW_STATE_DIRTY: H/W state is dirty. i.e. we started to execute
381 enum hl_device_hw_state {
382 HL_DEVICE_HW_STATE_CLEAN = 0,
383 HL_DEVICE_HW_STATE_DIRTY
386 #define HL_MMU_VA_ALIGNMENT_NOT_NEEDED 0
389 * struct hl_mmu_properties - ASIC specific MMU address translation properties.
390 * @start_addr: virtual start address of the memory region.
391 * @end_addr: virtual end address of the memory region.
392 * @hop0_shift: shift of hop 0 mask.
393 * @hop1_shift: shift of hop 1 mask.
394 * @hop2_shift: shift of hop 2 mask.
395 * @hop3_shift: shift of hop 3 mask.
396 * @hop4_shift: shift of hop 4 mask.
397 * @hop5_shift: shift of hop 5 mask.
398 * @hop0_mask: mask to get the PTE address in hop 0.
399 * @hop1_mask: mask to get the PTE address in hop 1.
400 * @hop2_mask: mask to get the PTE address in hop 2.
401 * @hop3_mask: mask to get the PTE address in hop 3.
402 * @hop4_mask: mask to get the PTE address in hop 4.
403 * @hop5_mask: mask to get the PTE address in hop 5.
404 * @last_mask: mask to get the bit indicating this is the last hop.
405 * @pgt_size: size for page tables.
406 * @page_size: default page size used to allocate memory.
407 * @num_hops: The amount of hops supported by the translation table.
408 * @hop_table_size: HOP table size.
409 * @hop0_tables_total_size: total size for all HOP0 tables.
410 * @host_resident: Should the MMU page table reside in host memory or in the
413 struct hl_mmu_properties {
433 u32 hop0_tables_total_size;
438 * struct hl_hints_range - hint addresses reserved va range.
439 * @start_addr: start address of the va range.
440 * @end_addr: end address of the va range.
442 struct hl_hints_range {
448 * struct asic_fixed_properties - ASIC specific immutable properties.
449 * @hw_queues_props: H/W queues properties.
450 * @cpucp_info: received various information from CPU-CP regarding the H/W, e.g.
452 * @uboot_ver: F/W U-boot version.
453 * @preboot_ver: F/W Preboot version.
454 * @dmmu: DRAM MMU address translation properties.
455 * @pmmu: PCI (host) MMU address translation properties.
456 * @pmmu_huge: PCI (host) MMU address translation properties for memory
457 * allocated with huge pages.
458 * @hints_dram_reserved_va_range: dram hint addresses reserved range.
459 * @hints_host_reserved_va_range: host hint addresses reserved range.
460 * @hints_host_hpage_reserved_va_range: host huge page hint addresses reserved
462 * @sram_base_address: SRAM physical start address.
463 * @sram_end_address: SRAM physical end address.
464 * @sram_user_base_address - SRAM physical start address for user access.
465 * @dram_base_address: DRAM physical start address.
466 * @dram_end_address: DRAM physical end address.
467 * @dram_user_base_address: DRAM physical start address for user access.
468 * @dram_size: DRAM total size.
469 * @dram_pci_bar_size: size of PCI bar towards DRAM.
470 * @max_power_default: max power of the device after reset
471 * @dc_power_default: power consumed by the device in mode idle.
472 * @dram_size_for_default_page_mapping: DRAM size needed to map to avoid page
474 * @pcie_dbi_base_address: Base address of the PCIE_DBI block.
475 * @pcie_aux_dbi_reg_addr: Address of the PCIE_AUX DBI register.
476 * @mmu_pgt_addr: base physical address in DRAM of MMU page tables.
477 * @mmu_dram_default_page_addr: DRAM default page physical address.
478 * @cb_va_start_addr: virtual start address of command buffers which are mapped
479 * to the device's MMU.
480 * @cb_va_end_addr: virtual end address of command buffers which are mapped to
482 * @dram_hints_align_mask: dram va hint addresses alignment mask which is used
483 * for hints validity check.
484 * device_dma_offset_for_host_access: the offset to add to host DMA addresses
485 * to enable the device to access them.
486 * @max_freq_value: current max clk frequency.
487 * @clk_pll_index: clock PLL index that specify which PLL determines the clock
488 * we display to the user
489 * @mmu_pgt_size: MMU page tables total size.
490 * @mmu_pte_size: PTE size in MMU page tables.
491 * @mmu_hop_table_size: MMU hop table size.
492 * @mmu_hop0_tables_total_size: total size of MMU hop0 tables.
493 * @dram_page_size: page size for MMU DRAM allocation.
494 * @cfg_size: configuration space size on SRAM.
495 * @sram_size: total size of SRAM.
496 * @max_asid: maximum number of open contexts (ASIDs).
497 * @num_of_events: number of possible internal H/W IRQs.
498 * @psoc_pci_pll_nr: PCI PLL NR value.
499 * @psoc_pci_pll_nf: PCI PLL NF value.
500 * @psoc_pci_pll_od: PCI PLL OD value.
501 * @psoc_pci_pll_div_factor: PCI PLL DIV FACTOR 1 value.
502 * @psoc_timestamp_frequency: frequency of the psoc timestamp clock.
503 * @high_pll: high PLL frequency used by the device.
504 * @cb_pool_cb_cnt: number of CBs in the CB pool.
505 * @cb_pool_cb_size: size of each CB in the CB pool.
506 * @max_pending_cs: maximum of concurrent pending command submissions
507 * @max_queues: maximum amount of queues in the system
508 * @fw_preboot_cpu_boot_dev_sts0: bitmap representation of preboot cpu
509 * capabilities reported by FW, bit description
510 * can be found in CPU_BOOT_DEV_STS0
511 * @fw_preboot_cpu_boot_dev_sts1: bitmap representation of preboot cpu
512 * capabilities reported by FW, bit description
513 * can be found in CPU_BOOT_DEV_STS1
514 * @fw_bootfit_cpu_boot_dev_sts0: bitmap representation of boot cpu security
515 * status reported by FW, bit description can be
516 * found in CPU_BOOT_DEV_STS0
517 * @fw_bootfit_cpu_boot_dev_sts1: bitmap representation of boot cpu security
518 * status reported by FW, bit description can be
519 * found in CPU_BOOT_DEV_STS1
520 * @fw_app_cpu_boot_dev_sts0: bitmap representation of application security
521 * status reported by FW, bit description can be
522 * found in CPU_BOOT_DEV_STS0
523 * @fw_app_cpu_boot_dev_sts1: bitmap representation of application security
524 * status reported by FW, bit description can be
525 * found in CPU_BOOT_DEV_STS1
526 * @collective_first_sob: first sync object available for collective use
527 * @collective_first_mon: first monitor available for collective use
528 * @sync_stream_first_sob: first sync object available for sync stream use
529 * @sync_stream_first_mon: first monitor available for sync stream use
530 * @first_available_user_sob: first sob available for the user
531 * @first_available_user_mon: first monitor available for the user
532 * @first_available_user_msix_interrupt: first available msix interrupt
533 * reserved for the user
534 * @first_available_cq: first available CQ for the user.
535 * @user_interrupt_count: number of user interrupts.
536 * @server_type: Server type that the ASIC is currently installed in.
537 * The value is according to enum hl_server_type in uapi file.
538 * @tpc_enabled_mask: which TPCs are enabled.
539 * @completion_queues_count: number of completion queues.
540 * @fw_security_enabled: true if security measures are enabled in firmware,
542 * @fw_cpu_boot_dev_sts0_valid: status bits are valid and can be fetched from
544 * @fw_cpu_boot_dev_sts1_valid: status bits are valid and can be fetched from
546 * @dram_supports_virtual_memory: is there an MMU towards the DRAM
547 * @hard_reset_done_by_fw: true if firmware is handling hard reset flow
548 * @num_functional_hbms: number of functional HBMs in each DCORE.
549 * @hints_range_reservation: device support hint addresses range reservation.
550 * @iatu_done_by_fw: true if iATU configuration is being done by FW.
551 * @dynamic_fw_load: is dynamic FW load is supported.
552 * @gic_interrupts_enable: true if FW is not blocking GIC controller,
554 * @use_get_power_for_reset_history: To support backward compatibility for Goya
556 * @supports_soft_reset: is soft reset supported.
557 * @allow_inference_soft_reset: true if the ASIC supports soft reset that is
558 * initiated by user or TDR. This is only true
559 * in inference ASICs, as there is no real-world
560 * use-case of doing soft-reset in training (due
561 * to the fact that training runs on multiple
564 struct asic_fixed_properties {
565 struct hw_queue_properties *hw_queues_props;
566 struct cpucp_info cpucp_info;
567 char uboot_ver[VERSION_MAX_LEN];
568 char preboot_ver[VERSION_MAX_LEN];
569 struct hl_mmu_properties dmmu;
570 struct hl_mmu_properties pmmu;
571 struct hl_mmu_properties pmmu_huge;
572 struct hl_hints_range hints_dram_reserved_va_range;
573 struct hl_hints_range hints_host_reserved_va_range;
574 struct hl_hints_range hints_host_hpage_reserved_va_range;
575 u64 sram_base_address;
576 u64 sram_end_address;
577 u64 sram_user_base_address;
578 u64 dram_base_address;
579 u64 dram_end_address;
580 u64 dram_user_base_address;
582 u64 dram_pci_bar_size;
583 u64 max_power_default;
584 u64 dc_power_default;
585 u64 dram_size_for_default_page_mapping;
586 u64 pcie_dbi_base_address;
587 u64 pcie_aux_dbi_reg_addr;
589 u64 mmu_dram_default_page_addr;
590 u64 cb_va_start_addr;
592 u64 dram_hints_align_mask;
593 u64 device_dma_offset_for_host_access;
598 u32 mmu_hop_table_size;
599 u32 mmu_hop0_tables_total_size;
608 u32 psoc_pci_pll_div_factor;
609 u32 psoc_timestamp_frequency;
615 u32 fw_preboot_cpu_boot_dev_sts0;
616 u32 fw_preboot_cpu_boot_dev_sts1;
617 u32 fw_bootfit_cpu_boot_dev_sts0;
618 u32 fw_bootfit_cpu_boot_dev_sts1;
619 u32 fw_app_cpu_boot_dev_sts0;
620 u32 fw_app_cpu_boot_dev_sts1;
621 u16 collective_first_sob;
622 u16 collective_first_mon;
623 u16 sync_stream_first_sob;
624 u16 sync_stream_first_mon;
625 u16 first_available_user_sob[HL_MAX_DCORES];
626 u16 first_available_user_mon[HL_MAX_DCORES];
627 u16 first_available_user_msix_interrupt;
628 u16 first_available_cq[HL_MAX_DCORES];
629 u16 user_interrupt_count;
632 u8 completion_queues_count;
633 u8 fw_security_enabled;
634 u8 fw_cpu_boot_dev_sts0_valid;
635 u8 fw_cpu_boot_dev_sts1_valid;
636 u8 dram_supports_virtual_memory;
637 u8 hard_reset_done_by_fw;
638 u8 num_functional_hbms;
639 u8 hints_range_reservation;
642 u8 gic_interrupts_enable;
643 u8 use_get_power_for_reset_history;
644 u8 supports_soft_reset;
645 u8 allow_inference_soft_reset;
649 * struct hl_fence - software synchronization primitive
650 * @completion: fence is implemented using completion
651 * @refcount: refcount for this fence
652 * @cs_sequence: sequence of the corresponding command submission
653 * @stream_master_qid_map: streams masters QID bitmap to represent all streams
654 * masters QIDs that multi cs is waiting on
655 * @error: mark this fence with error
656 * @timestamp: timestamp upon completion
657 * @mcs_handling_done: indicates that corresponding command submission has
658 * finished msc handling, this does not mean it was part
662 struct completion completion;
663 struct kref refcount;
665 u32 stream_master_qid_map;
668 u8 mcs_handling_done;
672 * struct hl_cs_compl - command submission completion object.
673 * @base_fence: hl fence object.
674 * @lock: spinlock to protect fence.
675 * @hdev: habanalabs device structure.
676 * @hw_sob: the H/W SOB used in this signal/wait CS.
677 * @encaps_sig_hdl: encaps signals hanlder.
678 * @cs_seq: command submission sequence number.
679 * @type: type of the CS - signal/wait.
680 * @sob_val: the SOB value that is used in this signal/wait CS.
681 * @sob_group: the SOB group that is used in this collective wait CS.
682 * @encaps_signals: indication whether it's a completion object of cs with
683 * encaps signals or not.
686 struct hl_fence base_fence;
688 struct hl_device *hdev;
689 struct hl_hw_sob *hw_sob;
690 struct hl_cs_encaps_sig_handle *encaps_sig_hdl;
692 enum hl_cs_type type;
703 * struct hl_cb_mgr - describes a Command Buffer Manager.
704 * @cb_lock: protects cb_handles.
705 * @cb_handles: an idr to hold all command buffer handles.
709 struct idr cb_handles; /* protected by cb_lock */
713 * struct hl_cb - describes a Command Buffer.
714 * @refcount: reference counter for usage of the CB.
715 * @hdev: pointer to device this CB belongs to.
716 * @ctx: pointer to the CB owner's context.
717 * @lock: spinlock to protect mmap flows.
718 * @debugfs_list: node in debugfs list of command buffers.
719 * @pool_list: node in pool list of command buffers.
720 * @va_block_list: list of virtual addresses blocks of the CB if it is mapped to
723 * @kernel_address: Holds the CB's kernel virtual address.
724 * @bus_address: Holds the CB's DMA address.
725 * @mmap_size: Holds the CB's size that was mmaped.
726 * @size: holds the CB's size.
727 * @cs_cnt: holds number of CS that this CB participates in.
728 * @mmap: true if the CB is currently mmaped to user.
729 * @is_pool: true if CB was acquired from the pool, false otherwise.
730 * @is_internal: internaly allocated
731 * @is_mmu_mapped: true if the CB is mapped to the device's MMU.
734 struct kref refcount;
735 struct hl_device *hdev;
738 struct list_head debugfs_list;
739 struct list_head pool_list;
740 struct list_head va_block_list;
742 void *kernel_address;
743 dma_addr_t bus_address;
761 /* Queue length of external and HW queues */
762 #define HL_QUEUE_LENGTH 4096
763 #define HL_QUEUE_SIZE_IN_BYTES (HL_QUEUE_LENGTH * HL_BD_SIZE)
765 #if (HL_MAX_JOBS_PER_CS > HL_QUEUE_LENGTH)
766 #error "HL_QUEUE_LENGTH must be greater than HL_MAX_JOBS_PER_CS"
769 /* HL_CQ_LENGTH is in units of struct hl_cq_entry */
770 #define HL_CQ_LENGTH HL_QUEUE_LENGTH
771 #define HL_CQ_SIZE_IN_BYTES (HL_CQ_LENGTH * HL_CQ_ENTRY_SIZE)
773 /* Must be power of 2 */
774 #define HL_EQ_LENGTH 64
775 #define HL_EQ_SIZE_IN_BYTES (HL_EQ_LENGTH * HL_EQ_ENTRY_SIZE)
777 /* Host <-> CPU-CP shared memory size */
778 #define HL_CPU_ACCESSIBLE_MEM_SIZE SZ_2M
781 * struct hl_sync_stream_properties -
782 * describes a H/W queue sync stream properties
783 * @hw_sob: array of the used H/W SOBs by this H/W queue.
784 * @next_sob_val: the next value to use for the currently used SOB.
785 * @base_sob_id: the base SOB id of the SOBs used by this queue.
786 * @base_mon_id: the base MON id of the MONs used by this queue.
787 * @collective_mstr_mon_id: the MON ids of the MONs used by this master queue
788 * in order to sync with all slave queues.
789 * @collective_slave_mon_id: the MON id used by this slave queue in order to
790 * sync with its master queue.
791 * @collective_sob_id: current SOB id used by this collective slave queue
792 * to signal its collective master queue upon completion.
793 * @curr_sob_offset: the id offset to the currently used SOB from the
794 * HL_RSVD_SOBS that are being used by this queue.
796 struct hl_sync_stream_properties {
797 struct hl_hw_sob hw_sob[HL_RSVD_SOBS];
801 u16 collective_mstr_mon_id[HL_COLLECTIVE_RSVD_MSTR_MONS];
802 u16 collective_slave_mon_id;
803 u16 collective_sob_id;
808 * struct hl_encaps_signals_mgr - describes sync stream encapsulated signals
810 * @lock: protects handles.
811 * @handles: an idr to hold all encapsulated signals handles.
813 struct hl_encaps_signals_mgr {
819 * struct hl_hw_queue - describes a H/W transport queue.
820 * @shadow_queue: pointer to a shadow queue that holds pointers to jobs.
821 * @sync_stream_prop: sync stream queue properties
822 * @queue_type: type of queue.
823 * @collective_mode: collective mode of current queue
824 * @kernel_address: holds the queue's kernel virtual address.
825 * @bus_address: holds the queue's DMA address.
826 * @pi: holds the queue's pi value.
827 * @ci: holds the queue's ci value, AS CALCULATED BY THE DRIVER (not real ci).
828 * @hw_queue_id: the id of the H/W queue.
829 * @cq_id: the id for the corresponding CQ for this H/W queue.
830 * @msi_vec: the IRQ number of the H/W queue.
831 * @int_queue_len: length of internal queue (number of entries).
832 * @valid: is the queue valid (we have array of 32 queues, not all of them
834 * @supports_sync_stream: True if queue supports sync stream
837 struct hl_cs_job **shadow_queue;
838 struct hl_sync_stream_properties sync_stream_prop;
839 enum hl_queue_type queue_type;
840 enum hl_collective_mode collective_mode;
841 void *kernel_address;
842 dma_addr_t bus_address;
850 u8 supports_sync_stream;
854 * struct hl_cq - describes a completion queue
855 * @hdev: pointer to the device structure
856 * @kernel_address: holds the queue's kernel virtual address
857 * @bus_address: holds the queue's DMA address
858 * @cq_idx: completion queue index in array
859 * @hw_queue_id: the id of the matching H/W queue
860 * @ci: ci inside the queue
861 * @pi: pi inside the queue
862 * @free_slots_cnt: counter of free slots in queue
865 struct hl_device *hdev;
866 void *kernel_address;
867 dma_addr_t bus_address;
872 atomic_t free_slots_cnt;
876 * struct hl_user_interrupt - holds user interrupt information
877 * @hdev: pointer to the device structure
878 * @wait_list_head: head to the list of user threads pending on this interrupt
879 * @wait_list_lock: protects wait_list_head
880 * @interrupt_id: msix interrupt id
882 struct hl_user_interrupt {
883 struct hl_device *hdev;
884 struct list_head wait_list_head;
885 spinlock_t wait_list_lock;
890 * struct hl_user_pending_interrupt - holds a context to a user thread
891 * pending on an interrupt
892 * @wait_list_node: node in the list of user threads pending on an interrupt
893 * @fence: hl fence object for interrupt completion
894 * @cq_target_value: CQ target value
895 * @cq_kernel_addr: CQ kernel address, to be used in the cq interrupt
896 * handler for taget value comparison
898 struct hl_user_pending_interrupt {
899 struct list_head wait_list_node;
900 struct hl_fence fence;
906 * struct hl_eq - describes the event queue (single one per device)
907 * @hdev: pointer to the device structure
908 * @kernel_address: holds the queue's kernel virtual address
909 * @bus_address: holds the queue's DMA address
910 * @ci: ci inside the queue
911 * @prev_eqe_index: the index of the previous event queue entry. The index of
912 * the current entry's index must be +1 of the previous one.
913 * @check_eqe_index: do we need to check the index of the current entry vs. the
914 * previous one. This is for backward compatibility with older
918 struct hl_device *hdev;
919 void *kernel_address;
920 dma_addr_t bus_address;
923 bool check_eqe_index;
932 * enum hl_asic_type - supported ASIC types.
933 * @ASIC_INVALID: Invalid ASIC type.
934 * @ASIC_GOYA: Goya device.
935 * @ASIC_GAUDI: Gaudi device.
936 * @ASIC_GAUDI_SEC: Gaudi secured device (HL-2000).
948 * enum hl_pm_mng_profile - power management profile.
949 * @PM_AUTO: internal clock is set by the Linux driver.
950 * @PM_MANUAL: internal clock is set by the user.
951 * @PM_LAST: last power management type.
953 enum hl_pm_mng_profile {
960 * enum hl_pll_frequency - PLL frequency.
961 * @PLL_HIGH: high frequency.
962 * @PLL_LOW: low frequency.
963 * @PLL_LAST: last frequency values that were configured by the user.
965 enum hl_pll_frequency {
971 #define PLL_REF_CLK 50
973 enum div_select_defs {
976 DIV_SEL_DIVIDED_REF = 2,
977 DIV_SEL_DIVIDED_PLL = 3,
989 * struct pci_mem_region - describe memory region in a PCI bar
990 * @region_base: region base address
991 * @region_size: region size
992 * @bar_size: size of the BAR
993 * @offset_in_bar: region offset into the bar
994 * @bar_id: bar ID of the region
995 * @used: if used 1, otherwise 0
997 struct pci_mem_region {
1007 * struct static_fw_load_mgr - static FW load manager
1008 * @preboot_version_max_off: max offset to preboot version
1009 * @boot_fit_version_max_off: max offset to boot fit version
1010 * @kmd_msg_to_cpu_reg: register address for KDM->CPU messages
1011 * @cpu_cmd_status_to_host_reg: register address for CPU command status response
1012 * @cpu_boot_status_reg: boot status register
1013 * @cpu_boot_dev_status0_reg: boot device status register 0
1014 * @cpu_boot_dev_status1_reg: boot device status register 1
1015 * @boot_err0_reg: boot error register 0
1016 * @boot_err1_reg: boot error register 1
1017 * @preboot_version_offset_reg: SRAM offset to preboot version register
1018 * @boot_fit_version_offset_reg: SRAM offset to boot fit version register
1019 * @sram_offset_mask: mask for getting offset into the SRAM
1020 * @cpu_reset_wait_msec: used when setting WFE via kmd_msg_to_cpu_reg
1022 struct static_fw_load_mgr {
1023 u64 preboot_version_max_off;
1024 u64 boot_fit_version_max_off;
1025 u32 kmd_msg_to_cpu_reg;
1026 u32 cpu_cmd_status_to_host_reg;
1027 u32 cpu_boot_status_reg;
1028 u32 cpu_boot_dev_status0_reg;
1029 u32 cpu_boot_dev_status1_reg;
1032 u32 preboot_version_offset_reg;
1033 u32 boot_fit_version_offset_reg;
1034 u32 sram_offset_mask;
1035 u32 cpu_reset_wait_msec;
1039 * struct fw_response - FW response to LKD command
1040 * @ram_offset: descriptor offset into the RAM
1041 * @ram_type: RAM type containing the descriptor (SRAM/DRAM)
1042 * @status: command status
1044 struct fw_response {
1051 * struct dynamic_fw_load_mgr - dynamic FW load manager
1052 * @response: FW to LKD response
1053 * @comm_desc: the communication descriptor with FW
1054 * @image_region: region to copy the FW image to
1055 * @fw_image_size: size of FW image to load
1056 * @wait_for_bl_timeout: timeout for waiting for boot loader to respond
1057 * @fw_desc_valid: true if FW descriptor has been validated and hence the data can be used
1059 struct dynamic_fw_load_mgr {
1060 struct fw_response response;
1061 struct lkd_fw_comms_desc comm_desc;
1062 struct pci_mem_region *image_region;
1063 size_t fw_image_size;
1064 u32 wait_for_bl_timeout;
1069 * struct fw_image_props - properties of FW image
1070 * @image_name: name of the image
1071 * @src_off: offset in src FW to copy from
1072 * @copy_size: amount of bytes to copy (0 to copy the whole binary)
1074 struct fw_image_props {
1081 * struct fw_load_mgr - manager FW loading process
1082 * @dynamic_loader: specific structure for dynamic load
1083 * @static_loader: specific structure for static load
1084 * @boot_fit_img: boot fit image properties
1085 * @linux_img: linux image properties
1086 * @cpu_timeout: CPU response timeout in usec
1087 * @boot_fit_timeout: Boot fit load timeout in usec
1088 * @skip_bmc: should BMC be skipped
1089 * @sram_bar_id: SRAM bar ID
1090 * @dram_bar_id: DRAM bar ID
1091 * @fw_comp_loaded: bitmask of loaded FW components. set bit meaning loaded
1092 * component. values are set according to enum hl_fw_types.
1094 struct fw_load_mgr {
1096 struct dynamic_fw_load_mgr dynamic_loader;
1097 struct static_fw_load_mgr static_loader;
1099 struct fw_image_props boot_fit_img;
1100 struct fw_image_props linux_img;
1102 u32 boot_fit_timeout;
1110 * struct hl_asic_funcs - ASIC specific functions that are can be called from
1112 * @early_init: sets up early driver state (pre sw_init), doesn't configure H/W.
1113 * @early_fini: tears down what was done in early_init.
1114 * @late_init: sets up late driver/hw state (post hw_init) - Optional.
1115 * @late_fini: tears down what was done in late_init (pre hw_fini) - Optional.
1116 * @sw_init: sets up driver state, does not configure H/W.
1117 * @sw_fini: tears down driver state, does not configure H/W.
1118 * @hw_init: sets up the H/W state.
1119 * @hw_fini: tears down the H/W state.
1120 * @halt_engines: halt engines, needed for reset sequence. This also disables
1121 * interrupts from the device. Should be called before
1122 * hw_fini and before CS rollback.
1123 * @suspend: handles IP specific H/W or SW changes for suspend.
1124 * @resume: handles IP specific H/W or SW changes for resume.
1125 * @mmap: maps a memory.
1126 * @ring_doorbell: increment PI on a given QMAN.
1127 * @pqe_write: Write the PQ entry to the PQ. This is ASIC-specific
1128 * function because the PQs are located in different memory areas
1129 * per ASIC (SRAM, DRAM, Host memory) and therefore, the method of
1130 * writing the PQE must match the destination memory area
1132 * @asic_dma_alloc_coherent: Allocate coherent DMA memory by calling
1133 * dma_alloc_coherent(). This is ASIC function because
1134 * its implementation is not trivial when the driver
1135 * is loaded in simulation mode (not upstreamed).
1136 * @asic_dma_free_coherent: Free coherent DMA memory by calling
1137 * dma_free_coherent(). This is ASIC function because
1138 * its implementation is not trivial when the driver
1139 * is loaded in simulation mode (not upstreamed).
1140 * @scrub_device_mem: Scrub device memory given an address and size
1141 * @get_int_queue_base: get the internal queue base address.
1142 * @test_queues: run simple test on all queues for sanity check.
1143 * @asic_dma_pool_zalloc: small DMA allocation of coherent memory from DMA pool.
1144 * size of allocation is HL_DMA_POOL_BLK_SIZE.
1145 * @asic_dma_pool_free: free small DMA allocation from pool.
1146 * @cpu_accessible_dma_pool_alloc: allocate CPU PQ packet from DMA pool.
1147 * @cpu_accessible_dma_pool_free: free CPU PQ packet from DMA pool.
1148 * @hl_dma_unmap_sg: DMA unmap scatter-gather list.
1149 * @cs_parser: parse Command Submission.
1150 * @asic_dma_map_sg: DMA map scatter-gather list.
1151 * @get_dma_desc_list_size: get number of LIN_DMA packets required for CB.
1152 * @add_end_of_cb_packets: Add packets to the end of CB, if device requires it.
1153 * @update_eq_ci: update event queue CI.
1154 * @context_switch: called upon ASID context switch.
1155 * @restore_phase_topology: clear all SOBs amd MONs.
1156 * @debugfs_read32: debug interface for reading u32 from DRAM/SRAM/Host memory.
1157 * @debugfs_write32: debug interface for writing u32 to DRAM/SRAM/Host memory.
1158 * @debugfs_read64: debug interface for reading u64 from DRAM/SRAM/Host memory.
1159 * @debugfs_write64: debug interface for writing u64 to DRAM/SRAM/Host memory.
1160 * @debugfs_read_dma: debug interface for reading up to 2MB from the device's
1161 * internal memory via DMA engine.
1162 * @add_device_attr: add ASIC specific device attributes.
1163 * @handle_eqe: handle event queue entry (IRQ) from CPU-CP.
1164 * @get_events_stat: retrieve event queue entries histogram.
1165 * @read_pte: read MMU page table entry from DRAM.
1166 * @write_pte: write MMU page table entry to DRAM.
1167 * @mmu_invalidate_cache: flush MMU STLB host/DRAM cache, either with soft
1168 * (L1 only) or hard (L0 & L1) flush.
1169 * @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with
1170 * ASID-VA-size mask.
1171 * @send_heartbeat: send is-alive packet to CPU-CP and verify response.
1172 * @debug_coresight: perform certain actions on Coresight for debugging.
1173 * @is_device_idle: return true if device is idle, false otherwise.
1174 * @non_hard_reset_late_init: perform certain actions needed after a reset which is not hard-reset
1175 * @hw_queues_lock: acquire H/W queues lock.
1176 * @hw_queues_unlock: release H/W queues lock.
1177 * @get_pci_id: retrieve PCI ID.
1178 * @get_eeprom_data: retrieve EEPROM data from F/W.
1179 * @send_cpu_message: send message to F/W. If the message is timedout, the
1180 * driver will eventually reset the device. The timeout can
1181 * be determined by the calling function or it can be 0 and
1182 * then the timeout is the default timeout for the specific
1184 * @get_hw_state: retrieve the H/W state
1185 * @pci_bars_map: Map PCI BARs.
1186 * @init_iatu: Initialize the iATU unit inside the PCI controller.
1187 * @rreg: Read a register. Needed for simulator support.
1188 * @wreg: Write a register. Needed for simulator support.
1189 * @halt_coresight: stop the ETF and ETR traces.
1190 * @ctx_init: context dependent initialization.
1191 * @ctx_fini: context dependent cleanup.
1192 * @get_queue_id_for_cq: Get the H/W queue id related to the given CQ index.
1193 * @load_firmware_to_device: load the firmware to the device's memory
1194 * @load_boot_fit_to_device: load boot fit to device's memory
1195 * @get_signal_cb_size: Get signal CB size.
1196 * @get_wait_cb_size: Get wait CB size.
1197 * @gen_signal_cb: Generate a signal CB.
1198 * @gen_wait_cb: Generate a wait CB.
1199 * @reset_sob: Reset a SOB.
1200 * @reset_sob_group: Reset SOB group
1201 * @set_dma_mask_from_fw: set the DMA mask in the driver according to the
1202 * firmware configuration
1203 * @get_device_time: Get the device time.
1204 * @collective_wait_init_cs: Generate collective master/slave packets
1205 * and place them in the relevant cs jobs
1206 * @collective_wait_create_jobs: allocate collective wait cs jobs
1207 * @scramble_addr: Routine to scramble the address prior of mapping it
1209 * @descramble_addr: Routine to de-scramble the address prior of
1210 * showing it to users.
1211 * @ack_protection_bits_errors: ack and dump all security violations
1212 * @get_hw_block_id: retrieve a HW block id to be used by the user to mmap it.
1213 * also returns the size of the block if caller supplies
1214 * a valid pointer for it
1215 * @hw_block_mmap: mmap a HW block with a given id.
1216 * @enable_events_from_fw: send interrupt to firmware to notify them the
1217 * driver is ready to receive asynchronous events. This
1218 * function should be called during the first init and
1219 * after every hard-reset of the device
1220 * @get_msi_info: Retrieve asic-specific MSI ID of the f/w async event
1221 * @map_pll_idx_to_fw_idx: convert driver specific per asic PLL index to
1222 * generic f/w compatible PLL Indexes
1223 * @init_firmware_loader: initialize data for FW loader.
1224 * @init_cpu_scrambler_dram: Enable CPU specific DRAM scrambling
1225 * @state_dump_init: initialize constants required for state dump
1226 * @get_sob_addr: get SOB base address offset.
1227 * @set_pci_memory_regions: setting properties of PCI memory regions
1228 * @get_stream_master_qid_arr: get pointer to stream masters QID array
1230 struct hl_asic_funcs {
1231 int (*early_init)(struct hl_device *hdev);
1232 int (*early_fini)(struct hl_device *hdev);
1233 int (*late_init)(struct hl_device *hdev);
1234 void (*late_fini)(struct hl_device *hdev);
1235 int (*sw_init)(struct hl_device *hdev);
1236 int (*sw_fini)(struct hl_device *hdev);
1237 int (*hw_init)(struct hl_device *hdev);
1238 void (*hw_fini)(struct hl_device *hdev, bool hard_reset, bool fw_reset);
1239 void (*halt_engines)(struct hl_device *hdev, bool hard_reset, bool fw_reset);
1240 int (*suspend)(struct hl_device *hdev);
1241 int (*resume)(struct hl_device *hdev);
1242 int (*mmap)(struct hl_device *hdev, struct vm_area_struct *vma,
1243 void *cpu_addr, dma_addr_t dma_addr, size_t size);
1244 void (*ring_doorbell)(struct hl_device *hdev, u32 hw_queue_id, u32 pi);
1245 void (*pqe_write)(struct hl_device *hdev, __le64 *pqe,
1247 void* (*asic_dma_alloc_coherent)(struct hl_device *hdev, size_t size,
1248 dma_addr_t *dma_handle, gfp_t flag);
1249 void (*asic_dma_free_coherent)(struct hl_device *hdev, size_t size,
1250 void *cpu_addr, dma_addr_t dma_handle);
1251 int (*scrub_device_mem)(struct hl_device *hdev, u64 addr, u64 size);
1252 void* (*get_int_queue_base)(struct hl_device *hdev, u32 queue_id,
1253 dma_addr_t *dma_handle, u16 *queue_len);
1254 int (*test_queues)(struct hl_device *hdev);
1255 void* (*asic_dma_pool_zalloc)(struct hl_device *hdev, size_t size,
1256 gfp_t mem_flags, dma_addr_t *dma_handle);
1257 void (*asic_dma_pool_free)(struct hl_device *hdev, void *vaddr,
1258 dma_addr_t dma_addr);
1259 void* (*cpu_accessible_dma_pool_alloc)(struct hl_device *hdev,
1260 size_t size, dma_addr_t *dma_handle);
1261 void (*cpu_accessible_dma_pool_free)(struct hl_device *hdev,
1262 size_t size, void *vaddr);
1263 void (*hl_dma_unmap_sg)(struct hl_device *hdev,
1264 struct scatterlist *sgl, int nents,
1265 enum dma_data_direction dir);
1266 int (*cs_parser)(struct hl_device *hdev, struct hl_cs_parser *parser);
1267 int (*asic_dma_map_sg)(struct hl_device *hdev,
1268 struct scatterlist *sgl, int nents,
1269 enum dma_data_direction dir);
1270 u32 (*get_dma_desc_list_size)(struct hl_device *hdev,
1271 struct sg_table *sgt);
1272 void (*add_end_of_cb_packets)(struct hl_device *hdev,
1273 void *kernel_address, u32 len,
1274 u64 cq_addr, u32 cq_val, u32 msix_num,
1276 void (*update_eq_ci)(struct hl_device *hdev, u32 val);
1277 int (*context_switch)(struct hl_device *hdev, u32 asid);
1278 void (*restore_phase_topology)(struct hl_device *hdev);
1279 int (*debugfs_read32)(struct hl_device *hdev, u64 addr,
1280 bool user_address, u32 *val);
1281 int (*debugfs_write32)(struct hl_device *hdev, u64 addr,
1282 bool user_address, u32 val);
1283 int (*debugfs_read64)(struct hl_device *hdev, u64 addr,
1284 bool user_address, u64 *val);
1285 int (*debugfs_write64)(struct hl_device *hdev, u64 addr,
1286 bool user_address, u64 val);
1287 int (*debugfs_read_dma)(struct hl_device *hdev, u64 addr, u32 size,
1289 void (*add_device_attr)(struct hl_device *hdev, struct attribute_group *dev_clk_attr_grp,
1290 struct attribute_group *dev_vrm_attr_grp);
1291 void (*handle_eqe)(struct hl_device *hdev,
1292 struct hl_eq_entry *eq_entry);
1293 void* (*get_events_stat)(struct hl_device *hdev, bool aggregate,
1295 u64 (*read_pte)(struct hl_device *hdev, u64 addr);
1296 void (*write_pte)(struct hl_device *hdev, u64 addr, u64 val);
1297 int (*mmu_invalidate_cache)(struct hl_device *hdev, bool is_hard,
1299 int (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard,
1300 u32 flags, u32 asid, u64 va, u64 size);
1301 int (*send_heartbeat)(struct hl_device *hdev);
1302 int (*debug_coresight)(struct hl_device *hdev, struct hl_ctx *ctx, void *data);
1303 bool (*is_device_idle)(struct hl_device *hdev, u64 *mask_arr,
1304 u8 mask_len, struct seq_file *s);
1305 int (*non_hard_reset_late_init)(struct hl_device *hdev);
1306 void (*hw_queues_lock)(struct hl_device *hdev);
1307 void (*hw_queues_unlock)(struct hl_device *hdev);
1308 u32 (*get_pci_id)(struct hl_device *hdev);
1309 int (*get_eeprom_data)(struct hl_device *hdev, void *data,
1311 int (*send_cpu_message)(struct hl_device *hdev, u32 *msg,
1312 u16 len, u32 timeout, u64 *result);
1313 int (*pci_bars_map)(struct hl_device *hdev);
1314 int (*init_iatu)(struct hl_device *hdev);
1315 u32 (*rreg)(struct hl_device *hdev, u32 reg);
1316 void (*wreg)(struct hl_device *hdev, u32 reg, u32 val);
1317 void (*halt_coresight)(struct hl_device *hdev, struct hl_ctx *ctx);
1318 int (*ctx_init)(struct hl_ctx *ctx);
1319 void (*ctx_fini)(struct hl_ctx *ctx);
1320 u32 (*get_queue_id_for_cq)(struct hl_device *hdev, u32 cq_idx);
1321 int (*load_firmware_to_device)(struct hl_device *hdev);
1322 int (*load_boot_fit_to_device)(struct hl_device *hdev);
1323 u32 (*get_signal_cb_size)(struct hl_device *hdev);
1324 u32 (*get_wait_cb_size)(struct hl_device *hdev);
1325 u32 (*gen_signal_cb)(struct hl_device *hdev, void *data, u16 sob_id,
1327 u32 (*gen_wait_cb)(struct hl_device *hdev,
1328 struct hl_gen_wait_properties *prop);
1329 void (*reset_sob)(struct hl_device *hdev, void *data);
1330 void (*reset_sob_group)(struct hl_device *hdev, u16 sob_group);
1331 void (*set_dma_mask_from_fw)(struct hl_device *hdev);
1332 u64 (*get_device_time)(struct hl_device *hdev);
1333 int (*collective_wait_init_cs)(struct hl_cs *cs);
1334 int (*collective_wait_create_jobs)(struct hl_device *hdev,
1335 struct hl_ctx *ctx, struct hl_cs *cs,
1336 u32 wait_queue_id, u32 collective_engine_id,
1337 u32 encaps_signal_offset);
1338 u64 (*scramble_addr)(struct hl_device *hdev, u64 addr);
1339 u64 (*descramble_addr)(struct hl_device *hdev, u64 addr);
1340 void (*ack_protection_bits_errors)(struct hl_device *hdev);
1341 int (*get_hw_block_id)(struct hl_device *hdev, u64 block_addr,
1342 u32 *block_size, u32 *block_id);
1343 int (*hw_block_mmap)(struct hl_device *hdev, struct vm_area_struct *vma,
1344 u32 block_id, u32 block_size);
1345 void (*enable_events_from_fw)(struct hl_device *hdev);
1346 void (*get_msi_info)(__le32 *table);
1347 int (*map_pll_idx_to_fw_idx)(u32 pll_idx);
1348 void (*init_firmware_loader)(struct hl_device *hdev);
1349 void (*init_cpu_scrambler_dram)(struct hl_device *hdev);
1350 void (*state_dump_init)(struct hl_device *hdev);
1351 u32 (*get_sob_addr)(struct hl_device *hdev, u32 sob_id);
1352 void (*set_pci_memory_regions)(struct hl_device *hdev);
1353 u32* (*get_stream_master_qid_arr)(void);
1361 #define HL_KERNEL_ASID_ID 0
1364 * enum hl_va_range_type - virtual address range type.
1365 * @HL_VA_RANGE_TYPE_HOST: range type of host pages
1366 * @HL_VA_RANGE_TYPE_HOST_HUGE: range type of host huge pages
1367 * @HL_VA_RANGE_TYPE_DRAM: range type of dram pages
1369 enum hl_va_range_type {
1370 HL_VA_RANGE_TYPE_HOST,
1371 HL_VA_RANGE_TYPE_HOST_HUGE,
1372 HL_VA_RANGE_TYPE_DRAM,
1373 HL_VA_RANGE_TYPE_MAX
1377 * struct hl_va_range - virtual addresses range.
1378 * @lock: protects the virtual addresses list.
1379 * @list: list of virtual addresses blocks available for mappings.
1380 * @start_addr: range start address.
1381 * @end_addr: range end address.
1382 * @page_size: page size of this va range.
1384 struct hl_va_range {
1386 struct list_head list;
1393 * struct hl_cs_counters_atomic - command submission counters
1394 * @out_of_mem_drop_cnt: dropped due to memory allocation issue
1395 * @parsing_drop_cnt: dropped due to error in packet parsing
1396 * @queue_full_drop_cnt: dropped due to queue full
1397 * @device_in_reset_drop_cnt: dropped due to device in reset
1398 * @max_cs_in_flight_drop_cnt: dropped due to maximum CS in-flight
1399 * @validation_drop_cnt: dropped due to error in validation
1401 struct hl_cs_counters_atomic {
1402 atomic64_t out_of_mem_drop_cnt;
1403 atomic64_t parsing_drop_cnt;
1404 atomic64_t queue_full_drop_cnt;
1405 atomic64_t device_in_reset_drop_cnt;
1406 atomic64_t max_cs_in_flight_drop_cnt;
1407 atomic64_t validation_drop_cnt;
1411 * struct hl_dmabuf_priv - a dma-buf private object.
1412 * @dmabuf: pointer to dma-buf object.
1413 * @ctx: pointer to the dma-buf owner's context.
1414 * @phys_pg_pack: pointer to physical page pack if the dma-buf was exported for
1415 * memory allocation handle.
1416 * @device_address: physical address of the device's memory. Relevant only
1417 * if phys_pg_pack is NULL (dma-buf was exported from address).
1418 * The total size can be taken from the dmabuf object.
1420 struct hl_dmabuf_priv {
1421 struct dma_buf *dmabuf;
1423 struct hl_vm_phys_pg_pack *phys_pg_pack;
1424 uint64_t device_address;
1428 * struct hl_ctx - user/kernel context.
1429 * @mem_hash: holds mapping from virtual address to virtual memory area
1430 * descriptor (hl_vm_phys_pg_list or hl_userptr).
1431 * @mmu_shadow_hash: holds a mapping from shadow address to pgt_info structure.
1432 * @hpriv: pointer to the private (Kernel Driver) data of the process (fd).
1433 * @hdev: pointer to the device structure.
1434 * @refcount: reference counter for the context. Context is released only when
1435 * this hits 0l. It is incremented on CS and CS_WAIT.
1436 * @cs_pending: array of hl fence objects representing pending CS.
1437 * @va_range: holds available virtual addresses for host and dram mappings.
1438 * @mem_hash_lock: protects the mem_hash.
1439 * @mmu_lock: protects the MMU page tables. Any change to the PGT, modifying the
1440 * MMU hash or walking the PGT requires talking this lock.
1441 * @hw_block_list_lock: protects the HW block memory list.
1442 * @debugfs_list: node in debugfs list of contexts.
1443 * @hw_block_mem_list: list of HW block virtual mapped addresses.
1444 * @cs_counters: context command submission counters.
1445 * @cb_va_pool: device VA pool for command buffers which are mapped to the
1447 * @sig_mgr: encaps signals handle manager.
1448 * @cs_sequence: sequence number for CS. Value is assigned to a CS and passed
1449 * to user so user could inquire about CS. It is used as
1450 * index to cs_pending array.
1451 * @dram_default_hops: array that holds all hops addresses needed for default
1453 * @cs_lock: spinlock to protect cs_sequence.
1454 * @dram_phys_mem: amount of used physical DRAM memory by this context.
1455 * @thread_ctx_switch_token: token to prevent multiple threads of the same
1456 * context from running the context switch phase.
1457 * Only a single thread should run it.
1458 * @thread_ctx_switch_wait_token: token to prevent the threads that didn't run
1459 * the context switch phase from moving to their
1460 * execution phase before the context switch phase
1462 * @asid: context's unique address space ID in the device's MMU.
1463 * @handle: context's opaque handle for user
1466 DECLARE_HASHTABLE(mem_hash, MEM_HASH_TABLE_BITS);
1467 DECLARE_HASHTABLE(mmu_shadow_hash, MMU_HASH_TABLE_BITS);
1468 struct hl_fpriv *hpriv;
1469 struct hl_device *hdev;
1470 struct kref refcount;
1471 struct hl_fence **cs_pending;
1472 struct hl_va_range *va_range[HL_VA_RANGE_TYPE_MAX];
1473 struct mutex mem_hash_lock;
1474 struct mutex mmu_lock;
1475 struct mutex hw_block_list_lock;
1476 struct list_head debugfs_list;
1477 struct list_head hw_block_mem_list;
1478 struct hl_cs_counters_atomic cs_counters;
1479 struct gen_pool *cb_va_pool;
1480 struct hl_encaps_signals_mgr sig_mgr;
1482 u64 *dram_default_hops;
1484 atomic64_t dram_phys_mem;
1485 atomic_t thread_ctx_switch_token;
1486 u32 thread_ctx_switch_wait_token;
1492 * struct hl_ctx_mgr - for handling multiple contexts.
1493 * @ctx_lock: protects ctx_handles.
1494 * @ctx_handles: idr to hold all ctx handles.
1497 struct mutex ctx_lock;
1498 struct idr ctx_handles;
1504 * COMMAND SUBMISSIONS
1508 * struct hl_userptr - memory mapping chunk information
1509 * @vm_type: type of the VM.
1510 * @job_node: linked-list node for hanging the object on the Job's list.
1511 * @pages: pointer to struct page array
1512 * @npages: size of @pages array
1513 * @sgt: pointer to the scatter-gather table that holds the pages.
1514 * @dir: for DMA unmapping, the direction must be supplied, so save it.
1515 * @debugfs_list: node in debugfs list of command submissions.
1516 * @pid: the pid of the user process owning the memory
1517 * @addr: user-space virtual address of the start of the memory area.
1518 * @size: size of the memory area to pin & map.
1519 * @dma_mapped: true if the SG was mapped to DMA addresses, false otherwise.
1522 enum vm_type vm_type; /* must be first */
1523 struct list_head job_node;
1524 struct page **pages;
1525 unsigned int npages;
1526 struct sg_table *sgt;
1527 enum dma_data_direction dir;
1528 struct list_head debugfs_list;
1536 * struct hl_cs - command submission.
1537 * @jobs_in_queue_cnt: per each queue, maintain counter of submitted jobs.
1538 * @ctx: the context this CS belongs to.
1539 * @job_list: list of the CS's jobs in the various queues.
1540 * @job_lock: spinlock for the CS's jobs list. Needed for free_job.
1541 * @refcount: reference counter for usage of the CS.
1542 * @fence: pointer to the fence object of this CS.
1543 * @signal_fence: pointer to the fence object of the signal CS (used by wait
1545 * @finish_work: workqueue object to run when CS is completed by H/W.
1546 * @work_tdr: delayed work node for TDR.
1547 * @mirror_node : node in device mirror list of command submissions.
1548 * @staged_cs_node: node in the staged cs list.
1549 * @debugfs_list: node in debugfs list of command submissions.
1550 * @encaps_sig_hdl: holds the encaps signals handle.
1551 * @sequence: the sequence number of this CS.
1552 * @staged_sequence: the sequence of the staged submission this CS is part of,
1553 * relevant only if staged_cs is set.
1554 * @timeout_jiffies: cs timeout in jiffies.
1555 * @submission_time_jiffies: submission time of the cs
1557 * @encaps_sig_hdl_id: encaps signals handle id, set for the first staged cs.
1558 * @sob_addr_offset: sob offset from the configuration base address.
1559 * @initial_sob_count: count of completed signals in SOB before current submission of signal or
1560 * cs with encaps signals.
1561 * @submitted: true if CS was submitted to H/W.
1562 * @completed: true if CS was completed by device.
1563 * @timedout : true if CS was timedout.
1564 * @tdr_active: true if TDR was activated for this CS (to prevent
1565 * double TDR activation).
1566 * @aborted: true if CS was aborted due to some device error.
1567 * @timestamp: true if a timestmap must be captured upon completion.
1568 * @staged_last: true if this is the last staged CS and needs completion.
1569 * @staged_first: true if this is the first staged CS and we need to receive
1570 * timeout for this CS.
1571 * @staged_cs: true if this CS is part of a staged submission.
1572 * @skip_reset_on_timeout: true if we shall not reset the device in case
1573 * timeout occurs (debug scenario).
1574 * @encaps_signals: true if this CS has encaps reserved signals.
1577 u16 *jobs_in_queue_cnt;
1579 struct list_head job_list;
1580 spinlock_t job_lock;
1581 struct kref refcount;
1582 struct hl_fence *fence;
1583 struct hl_fence *signal_fence;
1584 struct work_struct finish_work;
1585 struct delayed_work work_tdr;
1586 struct list_head mirror_node;
1587 struct list_head staged_cs_node;
1588 struct list_head debugfs_list;
1589 struct hl_cs_encaps_sig_handle *encaps_sig_hdl;
1591 u64 staged_sequence;
1592 u64 timeout_jiffies;
1593 u64 submission_time_jiffies;
1594 enum hl_cs_type type;
1595 u32 encaps_sig_hdl_id;
1596 u32 sob_addr_offset;
1597 u16 initial_sob_count;
1607 u8 skip_reset_on_timeout;
1612 * struct hl_cs_job - command submission job.
1613 * @cs_node: the node to hang on the CS jobs list.
1614 * @cs: the CS this job belongs to.
1615 * @user_cb: the CB we got from the user.
1616 * @patched_cb: in case of patching, this is internal CB which is submitted on
1617 * the queue instead of the CB we got from the IOCTL.
1618 * @finish_work: workqueue object to run when job is completed.
1619 * @userptr_list: linked-list of userptr mappings that belong to this job and
1620 * wait for completion.
1621 * @debugfs_list: node in debugfs list of command submission jobs.
1622 * @refcount: reference counter for usage of the CS job.
1623 * @queue_type: the type of the H/W queue this job is submitted to.
1624 * @id: the id of this job inside a CS.
1625 * @hw_queue_id: the id of the H/W queue this job is submitted to.
1626 * @user_cb_size: the actual size of the CB we got from the user.
1627 * @job_cb_size: the actual size of the CB that we put on the queue.
1628 * @encaps_sig_wait_offset: encapsulated signals offset, which allow user
1629 * to wait on part of the reserved signals.
1630 * @is_kernel_allocated_cb: true if the CB handle we got from the user holds a
1631 * handle to a kernel-allocated CB object, false
1632 * otherwise (SRAM/DRAM/host address).
1633 * @contains_dma_pkt: whether the JOB contains at least one DMA packet. This
1634 * info is needed later, when adding the 2xMSG_PROT at the
1635 * end of the JOB, to know which barriers to put in the
1636 * MSG_PROT packets. Relevant only for GAUDI as GOYA doesn't
1637 * have streams so the engine can't be busy by another
1641 struct list_head cs_node;
1643 struct hl_cb *user_cb;
1644 struct hl_cb *patched_cb;
1645 struct work_struct finish_work;
1646 struct list_head userptr_list;
1647 struct list_head debugfs_list;
1648 struct kref refcount;
1649 enum hl_queue_type queue_type;
1654 u32 encaps_sig_wait_offset;
1655 u8 is_kernel_allocated_cb;
1656 u8 contains_dma_pkt;
1660 * struct hl_cs_parser - command submission parser properties.
1661 * @user_cb: the CB we got from the user.
1662 * @patched_cb: in case of patching, this is internal CB which is submitted on
1663 * the queue instead of the CB we got from the IOCTL.
1664 * @job_userptr_list: linked-list of userptr mappings that belong to the related
1665 * job and wait for completion.
1666 * @cs_sequence: the sequence number of the related CS.
1667 * @queue_type: the type of the H/W queue this job is submitted to.
1668 * @ctx_id: the ID of the context the related CS belongs to.
1669 * @hw_queue_id: the id of the H/W queue this job is submitted to.
1670 * @user_cb_size: the actual size of the CB we got from the user.
1671 * @patched_cb_size: the size of the CB after parsing.
1672 * @job_id: the id of the related job inside the related CS.
1673 * @is_kernel_allocated_cb: true if the CB handle we got from the user holds a
1674 * handle to a kernel-allocated CB object, false
1675 * otherwise (SRAM/DRAM/host address).
1676 * @contains_dma_pkt: whether the JOB contains at least one DMA packet. This
1677 * info is needed later, when adding the 2xMSG_PROT at the
1678 * end of the JOB, to know which barriers to put in the
1679 * MSG_PROT packets. Relevant only for GAUDI as GOYA doesn't
1680 * have streams so the engine can't be busy by another
1682 * @completion: true if we need completion for this CS.
1684 struct hl_cs_parser {
1685 struct hl_cb *user_cb;
1686 struct hl_cb *patched_cb;
1687 struct list_head *job_userptr_list;
1689 enum hl_queue_type queue_type;
1693 u32 patched_cb_size;
1695 u8 is_kernel_allocated_cb;
1696 u8 contains_dma_pkt;
1705 * struct hl_vm_hash_node - hash element from virtual address to virtual
1706 * memory area descriptor (hl_vm_phys_pg_list or
1708 * @node: node to hang on the hash table in context object.
1709 * @vaddr: key virtual address.
1710 * @ptr: value pointer (hl_vm_phys_pg_list or hl_userptr).
1712 struct hl_vm_hash_node {
1713 struct hlist_node node;
1719 * struct hl_vm_hw_block_list_node - list element from user virtual address to
1721 * @node: node to hang on the list in context object.
1722 * @ctx: the context this node belongs to.
1723 * @vaddr: virtual address of the HW block.
1724 * @size: size of the block.
1725 * @id: HW block id (handle).
1727 struct hl_vm_hw_block_list_node {
1728 struct list_head node;
1730 unsigned long vaddr;
1736 * struct hl_vm_phys_pg_pack - physical page pack.
1737 * @vm_type: describes the type of the virtual area descriptor.
1738 * @pages: the physical page array.
1739 * @npages: num physical pages in the pack.
1740 * @total_size: total size of all the pages in this list.
1741 * @mapping_cnt: number of shared mappings.
1742 * @exporting_cnt: number of dma-buf exporting.
1743 * @asid: the context related to this list.
1744 * @page_size: size of each page in the pack.
1745 * @flags: HL_MEM_* flags related to this list.
1746 * @handle: the provided handle related to this list.
1747 * @offset: offset from the first page.
1748 * @contiguous: is contiguous physical memory.
1749 * @created_from_userptr: is product of host virtual address.
1751 struct hl_vm_phys_pg_pack {
1752 enum vm_type vm_type; /* must be first */
1756 atomic_t mapping_cnt;
1764 u8 created_from_userptr;
1768 * struct hl_vm_va_block - virtual range block information.
1769 * @node: node to hang on the virtual range list in context object.
1770 * @start: virtual range start address.
1771 * @end: virtual range end address.
1772 * @size: virtual range size.
1774 struct hl_vm_va_block {
1775 struct list_head node;
1782 * struct hl_vm - virtual memory manager for MMU.
1783 * @dram_pg_pool: pool for DRAM physical pages of 2MB.
1784 * @dram_pg_pool_refcount: reference counter for the pool usage.
1785 * @idr_lock: protects the phys_pg_list_handles.
1786 * @phys_pg_pack_handles: idr to hold all device allocations handles.
1787 * @init_done: whether initialization was done. We need this because VM
1788 * initialization might be skipped during device initialization.
1791 struct gen_pool *dram_pg_pool;
1792 struct kref dram_pg_pool_refcount;
1793 spinlock_t idr_lock;
1794 struct idr phys_pg_pack_handles;
1800 * DEBUG, PROFILING STRUCTURE
1804 * struct hl_debug_params - Coresight debug parameters.
1805 * @input: pointer to component specific input parameters.
1806 * @output: pointer to component specific output parameters.
1807 * @output_size: size of output buffer.
1808 * @reg_idx: relevant register ID.
1809 * @op: component operation to execute.
1810 * @enable: true if to enable component debugging, false otherwise.
1812 struct hl_debug_params {
1822 * FILE PRIVATE STRUCTURE
1826 * struct hl_fpriv - process information stored in FD private data.
1827 * @hdev: habanalabs device structure.
1828 * @filp: pointer to the given file structure.
1829 * @taskpid: current process ID.
1830 * @ctx: current executing context. TODO: remove for multiple ctx per process
1831 * @ctx_mgr: context manager to handle multiple context for this FD.
1832 * @cb_mgr: command buffer manager to handle multiple buffers for this FD.
1833 * @debugfs_list: list of relevant ASIC debugfs.
1834 * @dev_node: node in the device list of file private data
1835 * @refcount: number of related contexts.
1836 * @restore_phase_mutex: lock for context switch and restore phase.
1839 struct hl_device *hdev;
1841 struct pid *taskpid;
1843 struct hl_ctx_mgr ctx_mgr;
1844 struct hl_cb_mgr cb_mgr;
1845 struct list_head debugfs_list;
1846 struct list_head dev_node;
1847 struct kref refcount;
1848 struct mutex restore_phase_mutex;
1857 * struct hl_info_list - debugfs file ops.
1859 * @show: function to output information.
1860 * @write: function to write to the file.
1862 struct hl_info_list {
1864 int (*show)(struct seq_file *s, void *data);
1865 ssize_t (*write)(struct file *file, const char __user *buf,
1866 size_t count, loff_t *f_pos);
1870 * struct hl_debugfs_entry - debugfs dentry wrapper.
1871 * @info_ent: dentry realted ops.
1872 * @dev_entry: ASIC specific debugfs manager.
1874 struct hl_debugfs_entry {
1875 const struct hl_info_list *info_ent;
1876 struct hl_dbg_device_entry *dev_entry;
1880 * struct hl_dbg_device_entry - ASIC specific debugfs manager.
1881 * @root: root dentry.
1882 * @hdev: habanalabs device structure.
1883 * @entry_arr: array of available hl_debugfs_entry.
1884 * @file_list: list of available debugfs files.
1885 * @file_mutex: protects file_list.
1886 * @cb_list: list of available CBs.
1887 * @cb_spinlock: protects cb_list.
1888 * @cs_list: list of available CSs.
1889 * @cs_spinlock: protects cs_list.
1890 * @cs_job_list: list of available CB jobs.
1891 * @cs_job_spinlock: protects cs_job_list.
1892 * @userptr_list: list of available userptrs (virtual memory chunk descriptor).
1893 * @userptr_spinlock: protects userptr_list.
1894 * @ctx_mem_hash_list: list of available contexts with MMU mappings.
1895 * @ctx_mem_hash_spinlock: protects cb_list.
1896 * @blob_desc: descriptor of blob
1897 * @state_dump: data of the system states in case of a bad cs.
1898 * @state_dump_sem: protects state_dump.
1899 * @addr: next address to read/write from/to in read/write32.
1900 * @mmu_addr: next virtual address to translate to physical address in mmu_show.
1901 * @userptr_lookup: the target user ptr to look up for on demand.
1902 * @mmu_asid: ASID to use while translating in mmu_show.
1903 * @state_dump_head: index of the latest state dump
1904 * @i2c_bus: generic u8 debugfs file for bus value to use in i2c_data_read.
1905 * @i2c_addr: generic u8 debugfs file for address value to use in i2c_data_read.
1906 * @i2c_reg: generic u8 debugfs file for register value to use in i2c_data_read.
1907 * @i2c_len: generic u8 debugfs file for length value to use in i2c_data_read.
1909 struct hl_dbg_device_entry {
1910 struct dentry *root;
1911 struct hl_device *hdev;
1912 struct hl_debugfs_entry *entry_arr;
1913 struct list_head file_list;
1914 struct mutex file_mutex;
1915 struct list_head cb_list;
1916 spinlock_t cb_spinlock;
1917 struct list_head cs_list;
1918 spinlock_t cs_spinlock;
1919 struct list_head cs_job_list;
1920 spinlock_t cs_job_spinlock;
1921 struct list_head userptr_list;
1922 spinlock_t userptr_spinlock;
1923 struct list_head ctx_mem_hash_list;
1924 spinlock_t ctx_mem_hash_spinlock;
1925 struct debugfs_blob_wrapper blob_desc;
1926 char *state_dump[HL_STATE_DUMP_HIST_LEN];
1927 struct rw_semaphore state_dump_sem;
1932 u32 state_dump_head;
1940 * struct hl_hw_obj_name_entry - single hw object name, member of
1941 * hl_state_dump_specs
1942 * @node: link to the containing hash table
1943 * @name: hw object name
1944 * @id: object identifier
1946 struct hl_hw_obj_name_entry {
1947 struct hlist_node node;
1952 enum hl_state_dump_specs_props {
1953 SP_SYNC_OBJ_BASE_ADDR,
1954 SP_NEXT_SYNC_OBJ_ADDR,
1956 SP_MON_OBJ_WR_ADDR_LOW,
1957 SP_MON_OBJ_WR_ADDR_HIGH,
1959 SP_MON_OBJ_ARM_DATA,
1970 SP_DMA_QUEUES_OFFSET,
1971 SP_NUM_OF_MME_ENGINES,
1973 SP_NUM_OF_DMA_ENGINES,
1974 SP_NUM_OF_TPC_ENGINES,
1975 SP_ENGINE_NUM_OF_QUEUES,
1976 SP_ENGINE_NUM_OF_STREAMS,
1977 SP_ENGINE_NUM_OF_FENCES,
1978 SP_FENCE0_CNT_OFFSET,
1979 SP_FENCE0_RDATA_OFFSET,
1986 enum hl_sync_engine_type {
1993 * struct hl_mon_state_dump - represents a state dump of a single monitor
1995 * @wr_addr_low: address monitor will write to, low bits
1996 * @wr_addr_high: address monitor will write to, high bits
1997 * @wr_data: data monitor will write
1998 * @arm_data: register value containing monitor configuration
1999 * @status: monitor status
2001 struct hl_mon_state_dump {
2011 * struct hl_sync_to_engine_map_entry - sync object id to engine mapping entry
2012 * @engine_type: type of the engine
2013 * @engine_id: id of the engine
2014 * @sync_id: id of the sync object
2016 struct hl_sync_to_engine_map_entry {
2017 struct hlist_node node;
2018 enum hl_sync_engine_type engine_type;
2024 * struct hl_sync_to_engine_map - maps sync object id to associated engine id
2025 * @tb: hash table containing the mapping, each element is of type
2026 * struct hl_sync_to_engine_map_entry
2028 struct hl_sync_to_engine_map {
2029 DECLARE_HASHTABLE(tb, SYNC_TO_ENGINE_HASH_TABLE_BITS);
2033 * struct hl_state_dump_specs_funcs - virtual functions used by the state dump
2034 * @gen_sync_to_engine_map: generate a hash map from sync obj id to its engine
2035 * @print_single_monitor: format monitor data as string
2036 * @monitor_valid: return true if given monitor dump is valid
2037 * @print_fences_single_engine: format fences data as string
2039 struct hl_state_dump_specs_funcs {
2040 int (*gen_sync_to_engine_map)(struct hl_device *hdev,
2041 struct hl_sync_to_engine_map *map);
2042 int (*print_single_monitor)(char **buf, size_t *size, size_t *offset,
2043 struct hl_device *hdev,
2044 struct hl_mon_state_dump *mon);
2045 int (*monitor_valid)(struct hl_mon_state_dump *mon);
2046 int (*print_fences_single_engine)(struct hl_device *hdev,
2048 u64 status_base_offset,
2049 enum hl_sync_engine_type engine_type,
2050 u32 engine_id, char **buf,
2051 size_t *size, size_t *offset);
2055 * struct hl_state_dump_specs - defines ASIC known hw objects names
2056 * @so_id_to_str_tb: sync objects names index table
2057 * @monitor_id_to_str_tb: monitors names index table
2058 * @funcs: virtual functions used for state dump
2059 * @sync_namager_names: readable names for sync manager if available (ex: N_E)
2060 * @props: pointer to a per asic const props array required for state dump
2062 struct hl_state_dump_specs {
2063 DECLARE_HASHTABLE(so_id_to_str_tb, OBJ_NAMES_HASH_TABLE_BITS);
2064 DECLARE_HASHTABLE(monitor_id_to_str_tb, OBJ_NAMES_HASH_TABLE_BITS);
2065 struct hl_state_dump_specs_funcs funcs;
2066 const char * const *sync_namager_names;
2075 #define HL_STR_MAX 32
2077 #define HL_DEV_STS_MAX (HL_DEVICE_STATUS_LAST + 1)
2079 /* Theoretical limit only. A single host can only contain up to 4 or 8 PCIe
2080 * x16 cards. In extreme cases, there are hosts that can accommodate 16 cards.
2082 #define HL_MAX_MINORS 256
2085 * Registers read & write functions.
2088 u32 hl_rreg(struct hl_device *hdev, u32 reg);
2089 void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
2091 #define RREG32(reg) hdev->asic_funcs->rreg(hdev, (reg))
2092 #define WREG32(reg, v) hdev->asic_funcs->wreg(hdev, (reg), (v))
2093 #define DREG32(reg) pr_info("REGISTER: " #reg " : 0x%08X\n", \
2094 hdev->asic_funcs->rreg(hdev, (reg)))
2096 #define WREG32_P(reg, val, mask) \
2098 u32 tmp_ = RREG32(reg); \
2100 tmp_ |= ((val) & ~(mask)); \
2101 WREG32(reg, tmp_); \
2103 #define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
2104 #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
2106 #define RMWREG32(reg, val, mask) \
2108 u32 tmp_ = RREG32(reg); \
2110 tmp_ |= ((val) << __ffs(mask)); \
2111 WREG32(reg, tmp_); \
2114 #define RREG32_MASK(reg, mask) ((RREG32(reg) & mask) >> __ffs(mask))
2116 #define REG_FIELD_SHIFT(reg, field) reg##_##field##_SHIFT
2117 #define REG_FIELD_MASK(reg, field) reg##_##field##_MASK
2118 #define WREG32_FIELD(reg, offset, field, val) \
2119 WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & \
2120 ~REG_FIELD_MASK(reg, field)) | \
2121 (val) << REG_FIELD_SHIFT(reg, field))
2123 /* Timeout should be longer when working with simulator but cap the
2124 * increased timeout to some maximum
2126 #define hl_poll_timeout(hdev, addr, val, cond, sleep_us, timeout_us) \
2128 ktime_t __timeout; \
2130 __timeout = ktime_add_us(ktime_get(), timeout_us); \
2132 __timeout = ktime_add_us(ktime_get(),\
2133 min((u64)(timeout_us * 10), \
2134 (u64) HL_SIM_MAX_TIMEOUT_US)); \
2135 might_sleep_if(sleep_us); \
2137 (val) = RREG32(addr); \
2140 if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \
2141 (val) = RREG32(addr); \
2145 usleep_range((sleep_us >> 2) + 1, sleep_us); \
2147 (cond) ? 0 : -ETIMEDOUT; \
2151 * address in this macro points always to a memory location in the
2152 * host's (server's) memory. That location is updated asynchronously
2153 * either by the direct access of the device or by another core.
2155 * To work both in LE and BE architectures, we need to distinguish between the
2156 * two states (device or another core updates the memory location). Therefore,
2157 * if mem_written_by_device is true, the host memory being polled will be
2158 * updated directly by the device. If false, the host memory being polled will
2159 * be updated by host CPU. Required so host knows whether or not the memory
2160 * might need to be byte-swapped before returning value to caller.
2162 #define hl_poll_timeout_memory(hdev, addr, val, cond, sleep_us, timeout_us, \
2163 mem_written_by_device) \
2165 ktime_t __timeout; \
2167 __timeout = ktime_add_us(ktime_get(), timeout_us); \
2169 __timeout = ktime_add_us(ktime_get(),\
2170 min((u64)(timeout_us * 10), \
2171 (u64) HL_SIM_MAX_TIMEOUT_US)); \
2172 might_sleep_if(sleep_us); \
2174 /* Verify we read updates done by other cores or by device */ \
2176 (val) = *((u32 *)(addr)); \
2177 if (mem_written_by_device) \
2178 (val) = le32_to_cpu(*(__le32 *) &(val)); \
2181 if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \
2182 (val) = *((u32 *)(addr)); \
2183 if (mem_written_by_device) \
2184 (val) = le32_to_cpu(*(__le32 *) &(val)); \
2188 usleep_range((sleep_us >> 2) + 1, sleep_us); \
2190 (cond) ? 0 : -ETIMEDOUT; \
2193 #define hl_poll_timeout_device_memory(hdev, addr, val, cond, sleep_us, \
2196 ktime_t __timeout; \
2198 __timeout = ktime_add_us(ktime_get(), timeout_us); \
2200 __timeout = ktime_add_us(ktime_get(),\
2201 min((u64)(timeout_us * 10), \
2202 (u64) HL_SIM_MAX_TIMEOUT_US)); \
2203 might_sleep_if(sleep_us); \
2205 (val) = readl(addr); \
2208 if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \
2209 (val) = readl(addr); \
2213 usleep_range((sleep_us >> 2) + 1, sleep_us); \
2215 (cond) ? 0 : -ETIMEDOUT; \
2218 struct hwmon_chip_info;
2221 * struct hl_device_reset_work - reset workqueue task wrapper.
2222 * @wq: work queue for device reset procedure.
2223 * @reset_work: reset work to be done.
2224 * @hdev: habanalabs device structure.
2225 * @flags: reset flags.
2227 struct hl_device_reset_work {
2228 struct workqueue_struct *wq;
2229 struct delayed_work reset_work;
2230 struct hl_device *hdev;
2235 * struct hr_mmu_hop_addrs - used for holding per-device host-resident mmu hop
2237 * @virt_addr: the virtual address of the hop.
2238 * @phys-addr: the physical address of the hop (used by the device-mmu).
2239 * @shadow_addr: The shadow of the hop used by the driver for walking the hops.
2241 struct hr_mmu_hop_addrs {
2248 * struct hl_mmu_hr_pgt_priv - used for holding per-device mmu host-resident
2249 * page-table internal information.
2250 * @mmu_pgt_pool: pool of page tables used by MMU for allocating hops.
2251 * @mmu_shadow_hop0: shadow array of hop0 tables.
2253 struct hl_mmu_hr_priv {
2254 struct gen_pool *mmu_pgt_pool;
2255 struct hr_mmu_hop_addrs *mmu_shadow_hop0;
2259 * struct hl_mmu_dr_pgt_priv - used for holding per-device mmu device-resident
2260 * page-table internal information.
2261 * @mmu_pgt_pool: pool of page tables used by MMU for allocating hops.
2262 * @mmu_shadow_hop0: shadow array of hop0 tables.
2264 struct hl_mmu_dr_priv {
2265 struct gen_pool *mmu_pgt_pool;
2266 void *mmu_shadow_hop0;
2270 * struct hl_mmu_priv - used for holding per-device mmu internal information.
2271 * @dr: information on the device-resident MMU, when exists.
2272 * @hr: information on the host-resident MMU, when exists.
2274 struct hl_mmu_priv {
2275 struct hl_mmu_dr_priv dr;
2276 struct hl_mmu_hr_priv hr;
2280 * struct hl_mmu_per_hop_info - A structure describing one TLB HOP and its entry
2281 * that was created in order to translate a virtual address to a
2283 * @hop_addr: The address of the hop.
2284 * @hop_pte_addr: The address of the hop entry.
2285 * @hop_pte_val: The value in the hop entry.
2287 struct hl_mmu_per_hop_info {
2294 * struct hl_mmu_hop_info - A structure describing the TLB hops and their
2295 * hop-entries that were created in order to translate a virtual address to a
2297 * @scrambled_vaddr: The value of the virtual address after scrambling. This
2298 * address replaces the original virtual-address when mapped
2299 * in the MMU tables.
2300 * @unscrambled_paddr: The un-scrambled physical address.
2301 * @hop_info: Array holding the per-hop information used for the translation.
2302 * @used_hops: The number of hops used for the translation.
2303 * @range_type: virtual address range type.
2305 struct hl_mmu_hop_info {
2306 u64 scrambled_vaddr;
2307 u64 unscrambled_paddr;
2308 struct hl_mmu_per_hop_info hop_info[MMU_ARCH_5_HOPS];
2310 enum hl_va_range_type range_type;
2314 * struct hl_mmu_funcs - Device related MMU functions.
2315 * @init: initialize the MMU module.
2316 * @fini: release the MMU module.
2317 * @ctx_init: Initialize a context for using the MMU module.
2318 * @ctx_fini: disable a ctx from using the mmu module.
2319 * @map: maps a virtual address to physical address for a context.
2320 * @unmap: unmap a virtual address of a context.
2321 * @flush: flush all writes from all cores to reach device MMU.
2322 * @swap_out: marks all mapping of the given context as swapped out.
2323 * @swap_in: marks all mapping of the given context as swapped in.
2324 * @get_tlb_info: returns the list of hops and hop-entries used that were
2325 * created in order to translate the giver virtual address to a
2328 struct hl_mmu_funcs {
2329 int (*init)(struct hl_device *hdev);
2330 void (*fini)(struct hl_device *hdev);
2331 int (*ctx_init)(struct hl_ctx *ctx);
2332 void (*ctx_fini)(struct hl_ctx *ctx);
2333 int (*map)(struct hl_ctx *ctx,
2334 u64 virt_addr, u64 phys_addr, u32 page_size,
2336 int (*unmap)(struct hl_ctx *ctx,
2337 u64 virt_addr, bool is_dram_addr);
2338 void (*flush)(struct hl_ctx *ctx);
2339 void (*swap_out)(struct hl_ctx *ctx);
2340 void (*swap_in)(struct hl_ctx *ctx);
2341 int (*get_tlb_info)(struct hl_ctx *ctx,
2342 u64 virt_addr, struct hl_mmu_hop_info *hops);
2346 * number of user contexts allowed to call wait_for_multi_cs ioctl in
2349 #define MULTI_CS_MAX_USER_CTX 2
2352 * struct multi_cs_completion - multi CS wait completion.
2353 * @completion: completion of any of the CS in the list
2354 * @lock: spinlock for the completion structure
2355 * @timestamp: timestamp for the multi-CS completion
2356 * @stream_master_qid_map: bitmap of all stream masters on which the multi-CS
2358 * @used: 1 if in use, otherwise 0
2360 struct multi_cs_completion {
2361 struct completion completion;
2364 u32 stream_master_qid_map;
2369 * struct multi_cs_data - internal data for multi CS call
2370 * @ctx: pointer to the context structure
2371 * @fence_arr: array of fences of all CSs
2372 * @seq_arr: array of CS sequence numbers
2373 * @timeout_jiffies: timeout in jiffies for waiting for CS to complete
2374 * @timestamp: timestamp of first completed CS
2375 * @wait_status: wait for CS status
2376 * @completion_bitmap: bitmap of completed CSs (1- completed, otherwise 0)
2377 * @arr_len: fence_arr and seq_arr array length
2378 * @gone_cs: indication of gone CS (1- there was gone CS, otherwise 0)
2379 * @update_ts: update timestamp. 1- update the timestamp, otherwise 0.
2381 struct multi_cs_data {
2383 struct hl_fence **fence_arr;
2385 s64 timeout_jiffies;
2388 u32 completion_bitmap;
2395 * struct hl_clk_throttle_timestamp - current/last clock throttling timestamp
2396 * @start: timestamp taken when 'start' event is received in driver
2397 * @end: timestamp taken when 'end' event is received in driver
2399 struct hl_clk_throttle_timestamp {
2405 * struct hl_clk_throttle - keeps current/last clock throttling timestamps
2406 * @timestamp: timestamp taken by driver and firmware, index 0 refers to POWER
2407 * index 1 refers to THERMAL
2408 * @lock: protects this structure as it can be accessed from both event queue
2409 * context and info_ioctl context
2410 * @current_reason: bitmask represents the current clk throttling reasons
2411 * @aggregated_reason: bitmask represents aggregated clk throttling reasons since driver load
2413 struct hl_clk_throttle {
2414 struct hl_clk_throttle_timestamp timestamp[HL_CLK_THROTTLE_TYPE_MAX];
2417 u32 aggregated_reason;
2421 * struct last_error_session_info - info about last session in which CS timeout or
2422 * razwi error occurred.
2423 * @open_dev_timestamp: device open timestamp.
2424 * @cs_timeout_timestamp: CS timeout timestamp.
2425 * @razwi_timestamp: razwi timestamp.
2426 * @cs_write_disable: if set writing to CS parameters in the structure is disabled so the
2427 * first (root cause) CS timeout will not be overwritten.
2428 * @razwi_write_disable: if set writing to razwi parameters in the structure is disabled so the
2429 * first (root cause) razwi will not be overwritten.
2430 * @cs_timeout_seq: CS timeout sequence number.
2431 * @razwi_addr: address that caused razwi.
2432 * @razwi_engine_id_1: engine id of the razwi initiator, if it was initiated by engine that does
2433 * not have engine id it will be set to U16_MAX.
2434 * @razwi_engine_id_2: second engine id of razwi initiator. Might happen that razwi have 2 possible
2435 * engines which one them caused the razwi. In that case, it will contain the
2436 * second possible engine id, otherwise it will be set to U16_MAX.
2437 * @razwi_non_engine_initiator: in case the initiator of the razwi does not have engine id.
2438 * @razwi_type: cause of razwi, page fault or access error, otherwise it will be set to U8_MAX.
2440 struct last_error_session_info {
2441 ktime_t open_dev_timestamp;
2442 ktime_t cs_timeout_timestamp;
2443 ktime_t razwi_timestamp;
2444 atomic_t cs_write_disable;
2445 atomic_t razwi_write_disable;
2448 u16 razwi_engine_id_1;
2449 u16 razwi_engine_id_2;
2450 u8 razwi_non_engine_initiator;
2455 * struct hl_reset_info - holds current device reset information.
2456 * @lock: lock to protect critical reset flows.
2457 * @soft_reset_cnt: number of soft reset since the driver was loaded.
2458 * @hard_reset_cnt: number of hard reset since the driver was loaded.
2459 * @hard_reset_schedule_flags: hard reset is scheduled to after current soft reset,
2460 * here we hold the hard reset flags.
2461 * @in_reset: is device in reset flow.
2462 * @is_in_soft_reset: Device is currently in soft reset process.
2463 * @needs_reset: true if reset_on_lockup is false and device should be reset
2465 * @hard_reset_pending: is there a hard reset work pending.
2466 * @curr_reset_cause: saves an enumerated reset cause when a hard reset is
2467 * triggered, and cleared after it is shared with preboot.
2468 * @prev_reset_trigger: saves the previous trigger which caused a reset, overidden
2469 * with a new value on next reset
2470 * @reset_trigger_repeated: set if device reset is triggered more than once with
2472 * @skip_reset_on_timeout: Skip device reset if CS has timed out, wait for it to
2475 struct hl_reset_info {
2479 u32 hard_reset_schedule_flags;
2481 u8 is_in_soft_reset;
2483 u8 hard_reset_pending;
2485 u8 curr_reset_cause;
2486 u8 prev_reset_trigger;
2487 u8 reset_trigger_repeated;
2489 u8 skip_reset_on_timeout;
2493 * struct hl_device - habanalabs device structure.
2494 * @pdev: pointer to PCI device, can be NULL in case of simulator device.
2495 * @pcie_bar_phys: array of available PCIe bars physical addresses.
2496 * (required only for PCI address match mode)
2497 * @pcie_bar: array of available PCIe bars virtual addresses.
2498 * @rmmio: configuration area address on SRAM.
2499 * @cdev: related char device.
2500 * @cdev_ctrl: char device for control operations only (INFO IOCTL)
2501 * @dev: related kernel basic device structure.
2502 * @dev_ctrl: related kernel device structure for the control device
2503 * @work_heartbeat: delayed work for CPU-CP is-alive check.
2504 * @device_reset_work: delayed work which performs hard reset
2505 * @asic_name: ASIC specific name.
2506 * @asic_type: ASIC specific type.
2507 * @completion_queue: array of hl_cq.
2508 * @user_interrupt: array of hl_user_interrupt. upon the corresponding user
2509 * interrupt, driver will monitor the list of fences
2510 * registered to this interrupt.
2511 * @common_user_interrupt: common user interrupt for all user interrupts.
2512 * upon any user interrupt, driver will monitor the
2513 * list of fences registered to this common structure.
2514 * @cq_wq: work queues of completion queues for executing work in process
2516 * @eq_wq: work queue of event queue for executing work in process context.
2517 * @sob_reset_wq: work queue for sob reset executions.
2518 * @kernel_ctx: Kernel driver context structure.
2519 * @kernel_queues: array of hl_hw_queue.
2520 * @cs_mirror_list: CS mirror list for TDR.
2521 * @cs_mirror_lock: protects cs_mirror_list.
2522 * @kernel_cb_mgr: command buffer manager for creating/destroying/handling CBs.
2523 * @event_queue: event queue for IRQ from CPU-CP.
2524 * @dma_pool: DMA pool for small allocations.
2525 * @cpu_accessible_dma_mem: Host <-> CPU-CP shared memory CPU address.
2526 * @cpu_accessible_dma_address: Host <-> CPU-CP shared memory DMA address.
2527 * @cpu_accessible_dma_pool: Host <-> CPU-CP shared memory pool.
2528 * @asid_bitmap: holds used/available ASIDs.
2529 * @asid_mutex: protects asid_bitmap.
2530 * @send_cpu_message_lock: enforces only one message in Host <-> CPU-CP queue.
2531 * @debug_lock: protects critical section of setting debug mode for device
2532 * @asic_prop: ASIC specific immutable properties.
2533 * @asic_funcs: ASIC specific functions.
2534 * @asic_specific: ASIC specific information to use only from ASIC files.
2535 * @vm: virtual memory manager for MMU.
2536 * @hwmon_dev: H/W monitor device.
2537 * @hl_chip_info: ASIC's sensors information.
2538 * @device_status_description: device status description.
2539 * @hl_debugfs: device's debugfs manager.
2540 * @cb_pool: list of preallocated CBs.
2541 * @cb_pool_lock: protects the CB pool.
2542 * @internal_cb_pool_virt_addr: internal command buffer pool virtual address.
2543 * @internal_cb_pool_dma_addr: internal command buffer pool dma address.
2544 * @internal_cb_pool: internal command buffer memory pool.
2545 * @internal_cb_va_base: internal cb pool mmu virtual address base
2546 * @fpriv_list: list of file private data structures. Each structure is created
2547 * when a user opens the device
2548 * @fpriv_ctrl_list: list of file private data structures. Each structure is created
2549 * when a user opens the control device
2550 * @fpriv_list_lock: protects the fpriv_list
2551 * @fpriv_ctrl_list_lock: protects the fpriv_ctrl_list
2552 * @aggregated_cs_counters: aggregated cs counters among all contexts
2553 * @mmu_priv: device-specific MMU data.
2554 * @mmu_func: device-related MMU functions.
2555 * @fw_loader: FW loader manager.
2556 * @pci_mem_region: array of memory regions in the PCI
2557 * @state_dump_specs: constants and dictionaries needed to dump system state.
2558 * @multi_cs_completion: array of multi-CS completion.
2559 * @clk_throttling: holds information about current/previous clock throttling events
2560 * @reset_info: holds current device reset information.
2561 * @last_error: holds information about last session in which CS timeout or razwi error occurred.
2562 * @stream_master_qid_arr: pointer to array with QIDs of master streams.
2563 * @dram_used_mem: current DRAM memory consumption.
2564 * @timeout_jiffies: device CS timeout value.
2565 * @max_power: the max power of the device, as configured by the sysadmin. This
2566 * value is saved so in case of hard-reset, the driver will restore
2567 * this value and update the F/W after the re-initialization
2568 * @boot_error_status_mask: contains a mask of the device boot error status.
2569 * Each bit represents a different error, according to
2570 * the defines in hl_boot_if.h. If the bit is cleared,
2571 * the error will be ignored by the driver during
2572 * device initialization. Mainly used to debug and
2573 * workaround firmware bugs
2574 * @dram_pci_bar_start: start bus address of PCIe bar towards DRAM.
2575 * @last_successful_open_ktime: timestamp (ktime) of the last successful device open.
2576 * @last_successful_open_jif: timestamp (jiffies) of the last successful
2578 * @last_open_session_duration_jif: duration (jiffies) of the last device open
2580 * @open_counter: number of successful device open operations.
2581 * @fw_poll_interval_usec: FW status poll interval in usec.
2582 * @card_type: Various ASICs have several card types. This indicates the card
2583 * type of the current device.
2584 * @major: habanalabs kernel driver major.
2585 * @high_pll: high PLL profile frequency.
2586 * @id: device minor.
2587 * @id_control: minor of the control device
2588 * @cpu_pci_msb_addr: 50-bit extension bits for the device CPU's 40-bit
2590 * @disabled: is device disabled.
2591 * @late_init_done: is late init stage was done during initialization.
2592 * @hwmon_initialized: is H/W monitor sensors was initialized.
2593 * @heartbeat: is heartbeat sanity check towards CPU-CP enabled.
2594 * @reset_on_lockup: true if a reset should be done in case of stuck CS, false
2596 * @dram_default_page_mapping: is DRAM default page mapping enabled.
2597 * @memory_scrub: true to perform device memory scrub in various locations,
2598 * such as context-switch, context close, page free, etc.
2599 * @pmmu_huge_range: is a different virtual addresses range used for PMMU with
2601 * @init_done: is the initialization of the device done.
2602 * @device_cpu_disabled: is the device CPU disabled (due to timeouts)
2603 * @dma_mask: the dma mask that was set for this device
2604 * @in_debug: whether the device is in a state where the profiling/tracing infrastructure
2605 * can be used. This indication is needed because in some ASICs we need to do
2606 * specific operations to enable that infrastructure.
2607 * @cdev_sysfs_created: were char devices and sysfs nodes created.
2608 * @stop_on_err: true if engines should stop on error.
2609 * @supports_sync_stream: is sync stream supported.
2610 * @sync_stream_queue_idx: helper index for sync stream queues initialization.
2611 * @collective_mon_idx: helper index for collective initialization
2612 * @supports_coresight: is CoreSight supported.
2613 * @supports_cb_mapping: is mapping a CB to the device's MMU supported.
2614 * @process_kill_trial_cnt: number of trials reset thread tried killing
2616 * @device_fini_pending: true if device_fini was called and might be
2617 * waiting for the reset thread to finish
2618 * @supports_staged_submission: true if staged submissions are supported
2619 * @device_cpu_is_halted: Flag to indicate whether the device CPU was already
2620 * halted. We can't halt it again because the COMMS
2621 * protocol will throw an error. Relevant only for
2622 * cases where Linux was not loaded to device CPU
2623 * @supports_wait_for_multi_cs: true if wait for multi CS is supported
2624 * @is_compute_ctx_active: Whether there is an active compute context executing.
2627 struct pci_dev *pdev;
2628 u64 pcie_bar_phys[HL_PCI_NUM_BARS];
2629 void __iomem *pcie_bar[HL_PCI_NUM_BARS];
2630 void __iomem *rmmio;
2632 struct cdev cdev_ctrl;
2634 struct device *dev_ctrl;
2635 struct delayed_work work_heartbeat;
2636 struct hl_device_reset_work device_reset_work;
2637 char asic_name[HL_STR_MAX];
2638 char status[HL_DEV_STS_MAX][HL_STR_MAX];
2639 enum hl_asic_type asic_type;
2640 struct hl_cq *completion_queue;
2641 struct hl_user_interrupt *user_interrupt;
2642 struct hl_user_interrupt common_user_interrupt;
2643 struct workqueue_struct **cq_wq;
2644 struct workqueue_struct *eq_wq;
2645 struct workqueue_struct *sob_reset_wq;
2646 struct hl_ctx *kernel_ctx;
2647 struct hl_hw_queue *kernel_queues;
2648 struct list_head cs_mirror_list;
2649 spinlock_t cs_mirror_lock;
2650 struct hl_cb_mgr kernel_cb_mgr;
2651 struct hl_eq event_queue;
2652 struct dma_pool *dma_pool;
2653 void *cpu_accessible_dma_mem;
2654 dma_addr_t cpu_accessible_dma_address;
2655 struct gen_pool *cpu_accessible_dma_pool;
2656 unsigned long *asid_bitmap;
2657 struct mutex asid_mutex;
2658 struct mutex send_cpu_message_lock;
2659 struct mutex debug_lock;
2660 struct asic_fixed_properties asic_prop;
2661 const struct hl_asic_funcs *asic_funcs;
2662 void *asic_specific;
2664 struct device *hwmon_dev;
2665 struct hwmon_chip_info *hl_chip_info;
2667 struct hl_dbg_device_entry hl_debugfs;
2669 struct list_head cb_pool;
2670 spinlock_t cb_pool_lock;
2672 void *internal_cb_pool_virt_addr;
2673 dma_addr_t internal_cb_pool_dma_addr;
2674 struct gen_pool *internal_cb_pool;
2675 u64 internal_cb_va_base;
2677 struct list_head fpriv_list;
2678 struct list_head fpriv_ctrl_list;
2679 struct mutex fpriv_list_lock;
2680 struct mutex fpriv_ctrl_list_lock;
2682 struct hl_cs_counters_atomic aggregated_cs_counters;
2684 struct hl_mmu_priv mmu_priv;
2685 struct hl_mmu_funcs mmu_func[MMU_NUM_PGT_LOCATIONS];
2687 struct fw_load_mgr fw_loader;
2689 struct pci_mem_region pci_mem_region[PCI_REGION_NUMBER];
2691 struct hl_state_dump_specs state_dump_specs;
2693 struct multi_cs_completion multi_cs_completion[
2694 MULTI_CS_MAX_USER_CTX];
2695 struct hl_clk_throttle clk_throttling;
2696 struct last_error_session_info last_error;
2698 struct hl_reset_info reset_info;
2700 u32 *stream_master_qid_arr;
2701 atomic64_t dram_used_mem;
2702 u64 timeout_jiffies;
2704 u64 boot_error_status_mask;
2705 u64 dram_pci_bar_start;
2706 u64 last_successful_open_jif;
2707 u64 last_open_session_duration_jif;
2709 u64 fw_poll_interval_usec;
2710 ktime_t last_successful_open_ktime;
2711 enum cpucp_card_types card_type;
2716 u16 cpu_pci_msb_addr;
2719 u8 hwmon_initialized;
2722 u8 dram_default_page_mapping;
2726 u8 device_cpu_disabled;
2729 u8 cdev_sysfs_created;
2731 u8 supports_sync_stream;
2732 u8 sync_stream_queue_idx;
2733 u8 collective_mon_idx;
2734 u8 supports_coresight;
2735 u8 supports_cb_mapping;
2736 u8 process_kill_trial_cnt;
2737 u8 device_fini_pending;
2738 u8 supports_staged_submission;
2739 u8 device_cpu_is_halted;
2740 u8 supports_wait_for_multi_cs;
2741 u8 stream_master_qid_arr_size;
2742 u8 is_compute_ctx_active;
2744 /* Parameters for bring-up */
2748 u8 mmu_huge_page_opt;
2750 u8 cpu_queues_enable;
2753 u8 sram_scrambler_enable;
2754 u8 dram_scrambler_enable;
2755 u8 hard_reset_on_fw_events;
2758 u8 reset_on_preboot_fail;
2759 u8 reset_upon_device_release;
2760 u8 reset_if_device_not_idle;
2765 * struct hl_cs_encaps_sig_handle - encapsulated signals handle structure
2766 * @refcount: refcount used to protect removing this id when several
2767 * wait cs are used to wait of the reserved encaps signals.
2768 * @hdev: pointer to habanalabs device structure.
2769 * @hw_sob: pointer to H/W SOB used in the reservation.
2770 * @ctx: pointer to the user's context data structure
2771 * @cs_seq: staged cs sequence which contains encapsulated signals
2772 * @id: idr handler id to be used to fetch the handler info
2773 * @q_idx: stream queue index
2774 * @pre_sob_val: current SOB value before reservation
2775 * @count: signals number
2777 struct hl_cs_encaps_sig_handle {
2778 struct kref refcount;
2779 struct hl_device *hdev;
2780 struct hl_hw_sob *hw_sob;
2794 * typedef hl_ioctl_t - typedef for ioctl function in the driver
2795 * @hpriv: pointer to the FD's private data, which contains state of
2797 * @data: pointer to the input/output arguments structure of the IOCTL
2799 * Return: 0 for success, negative value for error
2801 typedef int hl_ioctl_t(struct hl_fpriv *hpriv, void *data);
2804 * struct hl_ioctl_desc - describes an IOCTL entry of the driver.
2805 * @cmd: the IOCTL code as created by the kernel macros.
2806 * @func: pointer to the driver's function that should be called for this IOCTL.
2808 struct hl_ioctl_desc {
2815 * Kernel module functions that can be accessed by entire module
2819 * hl_get_sg_info() - get number of pages and the DMA address from SG list.
2821 * @dma_addr: pointer to DMA address to return.
2823 * Calculate the number of consecutive pages described by the SG list. Take the
2824 * offset of the address in the first page, add to it the length and round it up
2825 * to the number of needed pages.
2827 static inline u32 hl_get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr)
2829 *dma_addr = sg_dma_address(sg);
2831 return ((((*dma_addr) & (PAGE_SIZE - 1)) + sg_dma_len(sg)) +
2832 (PAGE_SIZE - 1)) >> PAGE_SHIFT;
2836 * hl_mem_area_inside_range() - Checks whether address+size are inside a range.
2837 * @address: The start address of the area we want to validate.
2838 * @size: The size in bytes of the area we want to validate.
2839 * @range_start_address: The start address of the valid range.
2840 * @range_end_address: The end address of the valid range.
2842 * Return: true if the area is inside the valid range, false otherwise.
2844 static inline bool hl_mem_area_inside_range(u64 address, u64 size,
2845 u64 range_start_address, u64 range_end_address)
2847 u64 end_address = address + size;
2849 if ((address >= range_start_address) &&
2850 (end_address <= range_end_address) &&
2851 (end_address > address))
2858 * hl_mem_area_crosses_range() - Checks whether address+size crossing a range.
2859 * @address: The start address of the area we want to validate.
2860 * @size: The size in bytes of the area we want to validate.
2861 * @range_start_address: The start address of the valid range.
2862 * @range_end_address: The end address of the valid range.
2864 * Return: true if the area overlaps part or all of the valid range,
2867 static inline bool hl_mem_area_crosses_range(u64 address, u32 size,
2868 u64 range_start_address, u64 range_end_address)
2870 u64 end_address = address + size - 1;
2872 return ((address <= range_end_address) && (range_start_address <= end_address));
2875 int hl_device_open(struct inode *inode, struct file *filp);
2876 int hl_device_open_ctrl(struct inode *inode, struct file *filp);
2877 bool hl_device_operational(struct hl_device *hdev,
2878 enum hl_device_status *status);
2879 enum hl_device_status hl_device_status(struct hl_device *hdev);
2880 int hl_device_set_debug_mode(struct hl_device *hdev, struct hl_ctx *ctx, bool enable);
2881 int hl_hw_queues_create(struct hl_device *hdev);
2882 void hl_hw_queues_destroy(struct hl_device *hdev);
2883 int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
2884 u32 cb_size, u64 cb_ptr);
2885 void hl_hw_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q,
2886 u32 ctl, u32 len, u64 ptr);
2887 int hl_hw_queue_schedule_cs(struct hl_cs *cs);
2888 u32 hl_hw_queue_add_ptr(u32 ptr, u16 val);
2889 void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id);
2890 void hl_hw_queue_update_ci(struct hl_cs *cs);
2891 void hl_hw_queue_reset(struct hl_device *hdev, bool hard_reset);
2893 #define hl_queue_inc_ptr(p) hl_hw_queue_add_ptr(p, 1)
2894 #define hl_pi_2_offset(pi) ((pi) & (HL_QUEUE_LENGTH - 1))
2896 int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id);
2897 void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q);
2898 int hl_eq_init(struct hl_device *hdev, struct hl_eq *q);
2899 void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q);
2900 void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q);
2901 void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q);
2902 irqreturn_t hl_irq_handler_cq(int irq, void *arg);
2903 irqreturn_t hl_irq_handler_eq(int irq, void *arg);
2904 irqreturn_t hl_irq_handler_user_cq(int irq, void *arg);
2905 irqreturn_t hl_irq_handler_default(int irq, void *arg);
2906 u32 hl_cq_inc_ptr(u32 ptr);
2908 int hl_asid_init(struct hl_device *hdev);
2909 void hl_asid_fini(struct hl_device *hdev);
2910 unsigned long hl_asid_alloc(struct hl_device *hdev);
2911 void hl_asid_free(struct hl_device *hdev, unsigned long asid);
2913 int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv);
2914 void hl_ctx_free(struct hl_device *hdev, struct hl_ctx *ctx);
2915 int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx);
2916 void hl_ctx_do_release(struct kref *ref);
2917 void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx);
2918 int hl_ctx_put(struct hl_ctx *ctx);
2919 struct hl_ctx *hl_get_compute_ctx(struct hl_device *hdev);
2920 struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq);
2921 int hl_ctx_get_fences(struct hl_ctx *ctx, u64 *seq_arr,
2922 struct hl_fence **fence, u32 arr_len);
2923 void hl_ctx_mgr_init(struct hl_ctx_mgr *mgr);
2924 void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *mgr);
2926 int hl_device_init(struct hl_device *hdev, struct class *hclass);
2927 void hl_device_fini(struct hl_device *hdev);
2928 int hl_device_suspend(struct hl_device *hdev);
2929 int hl_device_resume(struct hl_device *hdev);
2930 int hl_device_reset(struct hl_device *hdev, u32 flags);
2931 void hl_hpriv_get(struct hl_fpriv *hpriv);
2932 int hl_hpriv_put(struct hl_fpriv *hpriv);
2933 int hl_device_utilization(struct hl_device *hdev, u32 *utilization);
2935 int hl_build_hwmon_channel_info(struct hl_device *hdev,
2936 struct cpucp_sensor *sensors_arr);
2938 int hl_sysfs_init(struct hl_device *hdev);
2939 void hl_sysfs_fini(struct hl_device *hdev);
2941 int hl_hwmon_init(struct hl_device *hdev);
2942 void hl_hwmon_fini(struct hl_device *hdev);
2944 int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
2945 struct hl_ctx *ctx, u32 cb_size, bool internal_cb,
2946 bool map_cb, u64 *handle);
2947 int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle);
2948 int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma);
2949 int hl_hw_block_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma);
2950 struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr,
2952 void hl_cb_put(struct hl_cb *cb);
2953 void hl_cb_mgr_init(struct hl_cb_mgr *mgr);
2954 void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr);
2955 struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
2957 int hl_cb_pool_init(struct hl_device *hdev);
2958 int hl_cb_pool_fini(struct hl_device *hdev);
2959 int hl_cb_va_pool_init(struct hl_ctx *ctx);
2960 void hl_cb_va_pool_fini(struct hl_ctx *ctx);
2962 void hl_cs_rollback_all(struct hl_device *hdev);
2963 struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
2964 enum hl_queue_type queue_type, bool is_kernel_allocated_cb);
2965 void hl_sob_reset_error(struct kref *ref);
2966 int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask);
2967 void hl_fence_put(struct hl_fence *fence);
2968 void hl_fences_put(struct hl_fence **fence, int len);
2969 void hl_fence_get(struct hl_fence *fence);
2970 void cs_get(struct hl_cs *cs);
2971 bool cs_needs_completion(struct hl_cs *cs);
2972 bool cs_needs_timeout(struct hl_cs *cs);
2973 bool is_staged_cs_last_exists(struct hl_device *hdev, struct hl_cs *cs);
2974 struct hl_cs *hl_staged_cs_find_first(struct hl_device *hdev, u64 cs_seq);
2975 void hl_multi_cs_completion_init(struct hl_device *hdev);
2977 void goya_set_asic_funcs(struct hl_device *hdev);
2978 void gaudi_set_asic_funcs(struct hl_device *hdev);
2980 int hl_vm_ctx_init(struct hl_ctx *ctx);
2981 void hl_vm_ctx_fini(struct hl_ctx *ctx);
2983 int hl_vm_init(struct hl_device *hdev);
2984 void hl_vm_fini(struct hl_device *hdev);
2986 void hl_hw_block_mem_init(struct hl_ctx *ctx);
2987 void hl_hw_block_mem_fini(struct hl_ctx *ctx);
2989 u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
2990 enum hl_va_range_type type, u32 size, u32 alignment);
2991 int hl_unreserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
2992 u64 start_addr, u64 size);
2993 int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
2994 struct hl_userptr *userptr);
2995 void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr);
2996 void hl_userptr_delete_list(struct hl_device *hdev,
2997 struct list_head *userptr_list);
2998 bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr, u32 size,
2999 struct list_head *userptr_list,
3000 struct hl_userptr **userptr);
3002 int hl_mmu_init(struct hl_device *hdev);
3003 void hl_mmu_fini(struct hl_device *hdev);
3004 int hl_mmu_ctx_init(struct hl_ctx *ctx);
3005 void hl_mmu_ctx_fini(struct hl_ctx *ctx);
3006 int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
3007 u32 page_size, bool flush_pte);
3008 int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
3010 int hl_mmu_map_contiguous(struct hl_ctx *ctx, u64 virt_addr,
3011 u64 phys_addr, u32 size);
3012 int hl_mmu_unmap_contiguous(struct hl_ctx *ctx, u64 virt_addr, u32 size);
3013 int hl_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, u32 flags);
3014 int hl_mmu_invalidate_cache_range(struct hl_device *hdev, bool is_hard,
3015 u32 flags, u32 asid, u64 va, u64 size);
3016 u64 hl_mmu_get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte);
3017 u64 hl_mmu_get_hop_pte_phys_addr(struct hl_ctx *ctx, struct hl_mmu_properties *mmu_prop,
3018 u8 hop_idx, u64 hop_addr, u64 virt_addr);
3019 void hl_mmu_swap_out(struct hl_ctx *ctx);
3020 void hl_mmu_swap_in(struct hl_ctx *ctx);
3021 int hl_mmu_if_set_funcs(struct hl_device *hdev);
3022 void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu);
3023 int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr);
3024 int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
3025 struct hl_mmu_hop_info *hops);
3026 u64 hl_mmu_scramble_addr(struct hl_device *hdev, u64 addr);
3027 u64 hl_mmu_descramble_addr(struct hl_device *hdev, u64 addr);
3028 bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr);
3030 int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
3031 void __iomem *dst, u32 src_offset, u32 size);
3032 int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode);
3033 int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
3034 u16 len, u32 timeout, u64 *result);
3035 int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type);
3036 int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr,
3037 size_t irq_arr_size);
3038 int hl_fw_test_cpu_queue(struct hl_device *hdev);
3039 void *hl_fw_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
3040 dma_addr_t *dma_handle);
3041 void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
3043 int hl_fw_send_heartbeat(struct hl_device *hdev);
3044 int hl_fw_cpucp_info_get(struct hl_device *hdev,
3045 u32 sts_boot_dev_sts0_reg,
3046 u32 sts_boot_dev_sts1_reg, u32 boot_err0_reg,
3048 int hl_fw_cpucp_handshake(struct hl_device *hdev,
3049 u32 sts_boot_dev_sts0_reg,
3050 u32 sts_boot_dev_sts1_reg, u32 boot_err0_reg,
3052 int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size);
3053 int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
3054 struct hl_info_pci_counters *counters);
3055 int hl_fw_cpucp_total_energy_get(struct hl_device *hdev,
3057 int get_used_pll_index(struct hl_device *hdev, u32 input_pll_index,
3058 enum pll_index *pll_index);
3059 int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, u32 pll_index,
3061 int hl_fw_cpucp_power_get(struct hl_device *hdev, u64 *power);
3062 void hl_fw_ask_hard_reset_without_linux(struct hl_device *hdev);
3063 void hl_fw_ask_halt_machine_without_linux(struct hl_device *hdev);
3064 int hl_fw_init_cpu(struct hl_device *hdev);
3065 int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg,
3066 u32 sts_boot_dev_sts0_reg,
3067 u32 sts_boot_dev_sts1_reg, u32 boot_err0_reg,
3068 u32 boot_err1_reg, u32 timeout);
3069 int hl_fw_dynamic_send_protocol_cmd(struct hl_device *hdev,
3070 struct fw_load_mgr *fw_loader,
3071 enum comms_cmd cmd, unsigned int size,
3072 bool wait_ok, u32 timeout);
3073 int hl_fw_dram_replaced_row_get(struct hl_device *hdev,
3074 struct cpucp_hbm_row_info *info);
3075 int hl_fw_dram_pending_row_get(struct hl_device *hdev, u32 *pend_rows_num);
3076 int hl_fw_cpucp_engine_core_asid_set(struct hl_device *hdev, u32 asid);
3077 int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3],
3079 int hl_pci_elbi_read(struct hl_device *hdev, u64 addr, u32 *data);
3080 int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data);
3081 int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region,
3082 struct hl_inbound_pci_region *pci_region);
3083 int hl_pci_set_outbound_region(struct hl_device *hdev,
3084 struct hl_outbound_pci_region *pci_region);
3085 enum pci_region hl_get_pci_memory_region(struct hl_device *hdev, u64 addr);
3086 int hl_pci_init(struct hl_device *hdev);
3087 void hl_pci_fini(struct hl_device *hdev);
3089 long hl_fw_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr);
3090 void hl_fw_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq);
3091 int hl_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr, long *value);
3092 int hl_set_temperature(struct hl_device *hdev, int sensor_index, u32 attr, long value);
3093 int hl_get_voltage(struct hl_device *hdev, int sensor_index, u32 attr, long *value);
3094 int hl_get_current(struct hl_device *hdev, int sensor_index, u32 attr, long *value);
3095 int hl_get_fan_speed(struct hl_device *hdev, int sensor_index, u32 attr, long *value);
3096 int hl_get_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr, long *value);
3097 void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr, long value);
3098 u64 hl_fw_get_max_power(struct hl_device *hdev);
3099 void hl_fw_set_max_power(struct hl_device *hdev);
3100 int hl_set_voltage(struct hl_device *hdev, int sensor_index, u32 attr, long value);
3101 int hl_set_current(struct hl_device *hdev, int sensor_index, u32 attr, long value);
3102 int hl_set_power(struct hl_device *hdev, int sensor_index, u32 attr, long value);
3103 int hl_get_power(struct hl_device *hdev, int sensor_index, u32 attr, long *value);
3104 int hl_fw_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk);
3105 void hl_fw_set_pll_profile(struct hl_device *hdev);
3106 void hl_sysfs_add_dev_clk_attr(struct hl_device *hdev, struct attribute_group *dev_clk_attr_grp);
3107 void hl_sysfs_add_dev_vrm_attr(struct hl_device *hdev, struct attribute_group *dev_vrm_attr_grp);
3109 void hw_sob_get(struct hl_hw_sob *hw_sob);
3110 void hw_sob_put(struct hl_hw_sob *hw_sob);
3111 void hl_encaps_handle_do_release(struct kref *ref);
3112 void hl_hw_queue_encaps_sig_set_sob_info(struct hl_device *hdev,
3113 struct hl_cs *cs, struct hl_cs_job *job,
3114 struct hl_cs_compl *cs_cmpl);
3115 void hl_release_pending_user_interrupts(struct hl_device *hdev);
3116 int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
3117 struct hl_hw_sob **hw_sob, u32 count, bool encaps_sig);
3119 int hl_state_dump(struct hl_device *hdev);
3120 const char *hl_state_dump_get_sync_name(struct hl_device *hdev, u32 sync_id);
3121 const char *hl_state_dump_get_monitor_name(struct hl_device *hdev,
3122 struct hl_mon_state_dump *mon);
3123 void hl_state_dump_free_sync_to_engine_map(struct hl_sync_to_engine_map *map);
3124 __printf(4, 5) int hl_snprintf_resize(char **buf, size_t *size, size_t *offset,
3125 const char *format, ...);
3126 char *hl_format_as_binary(char *buf, size_t buf_len, u32 n);
3127 const char *hl_sync_engine_to_string(enum hl_sync_engine_type engine_type);
3129 #ifdef CONFIG_DEBUG_FS
3131 void hl_debugfs_init(void);
3132 void hl_debugfs_fini(void);
3133 void hl_debugfs_add_device(struct hl_device *hdev);
3134 void hl_debugfs_remove_device(struct hl_device *hdev);
3135 void hl_debugfs_add_file(struct hl_fpriv *hpriv);
3136 void hl_debugfs_remove_file(struct hl_fpriv *hpriv);
3137 void hl_debugfs_add_cb(struct hl_cb *cb);
3138 void hl_debugfs_remove_cb(struct hl_cb *cb);
3139 void hl_debugfs_add_cs(struct hl_cs *cs);
3140 void hl_debugfs_remove_cs(struct hl_cs *cs);
3141 void hl_debugfs_add_job(struct hl_device *hdev, struct hl_cs_job *job);
3142 void hl_debugfs_remove_job(struct hl_device *hdev, struct hl_cs_job *job);
3143 void hl_debugfs_add_userptr(struct hl_device *hdev, struct hl_userptr *userptr);
3144 void hl_debugfs_remove_userptr(struct hl_device *hdev,
3145 struct hl_userptr *userptr);
3146 void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx);
3147 void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx);
3148 void hl_debugfs_set_state_dump(struct hl_device *hdev, char *data,
3149 unsigned long length);
3153 static inline void __init hl_debugfs_init(void)
3157 static inline void hl_debugfs_fini(void)
3161 static inline void hl_debugfs_add_device(struct hl_device *hdev)
3165 static inline void hl_debugfs_remove_device(struct hl_device *hdev)
3169 static inline void hl_debugfs_add_file(struct hl_fpriv *hpriv)
3173 static inline void hl_debugfs_remove_file(struct hl_fpriv *hpriv)
3177 static inline void hl_debugfs_add_cb(struct hl_cb *cb)
3181 static inline void hl_debugfs_remove_cb(struct hl_cb *cb)
3185 static inline void hl_debugfs_add_cs(struct hl_cs *cs)
3189 static inline void hl_debugfs_remove_cs(struct hl_cs *cs)
3193 static inline void hl_debugfs_add_job(struct hl_device *hdev,
3194 struct hl_cs_job *job)
3198 static inline void hl_debugfs_remove_job(struct hl_device *hdev,
3199 struct hl_cs_job *job)
3203 static inline void hl_debugfs_add_userptr(struct hl_device *hdev,
3204 struct hl_userptr *userptr)
3208 static inline void hl_debugfs_remove_userptr(struct hl_device *hdev,
3209 struct hl_userptr *userptr)
3213 static inline void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev,
3218 static inline void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev,
3223 static inline void hl_debugfs_set_state_dump(struct hl_device *hdev,
3224 char *data, unsigned long length)
3231 long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
3232 long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg);
3233 int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data);
3234 int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data);
3235 int hl_wait_ioctl(struct hl_fpriv *hpriv, void *data);
3236 int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data);
3238 #endif /* HABANALABSP_H_ */