1 /* SPDX-License-Identifier: GPL-2.0
3 * Copyright 2016-2019 HabanaLabs, Ltd.
11 #include "../include/common/cpucp_if.h"
12 #include "../include/common/qman_if.h"
13 #include <uapi/misc/habanalabs.h>
15 #include <linux/cdev.h>
16 #include <linux/iopoll.h>
17 #include <linux/irqreturn.h>
18 #include <linux/dma-direction.h>
19 #include <linux/scatterlist.h>
20 #include <linux/hashtable.h>
21 #include <linux/bitfield.h>
22 #include <linux/genalloc.h>
23 #include <linux/sched/signal.h>
24 #include <linux/io-64-nonatomic-lo-hi.h>
25 #include <linux/coresight.h>
27 #define HL_NAME "habanalabs"
29 /* Use upper bits of mmap offset to store habana driver specific information.
30 * bits[63:62] - Encode mmap type
31 * bits[45:0] - mmap offset value
33 * NOTE: struct vm_area_struct.vm_pgoff uses offset in pages. Hence, these
34 * defines are w.r.t to PAGE_SIZE
36 #define HL_MMAP_TYPE_SHIFT (62 - PAGE_SHIFT)
37 #define HL_MMAP_TYPE_MASK (0x3ull << HL_MMAP_TYPE_SHIFT)
38 #define HL_MMAP_TYPE_CB (0x2ull << HL_MMAP_TYPE_SHIFT)
40 #define HL_MMAP_OFFSET_VALUE_MASK (0x3FFFFFFFFFFFull >> PAGE_SHIFT)
41 #define HL_MMAP_OFFSET_VALUE_GET(off) (off & HL_MMAP_OFFSET_VALUE_MASK)
43 #define HL_PENDING_RESET_PER_SEC 30
45 #define HL_HARD_RESET_MAX_TIMEOUT 120
47 #define HL_DEVICE_TIMEOUT_USEC 1000000 /* 1 s */
49 #define HL_HEARTBEAT_PER_USEC 5000000 /* 5 s */
51 #define HL_PLL_LOW_JOB_FREQ_USEC 5000000 /* 5 s */
53 #define HL_CPUCP_INFO_TIMEOUT_USEC 10000000 /* 10s */
54 #define HL_CPUCP_EEPROM_TIMEOUT_USEC 10000000 /* 10s */
56 #define HL_PCI_ELBI_TIMEOUT_MSEC 10 /* 10ms */
58 #define HL_SIM_MAX_TIMEOUT_US 10000000 /* 10s */
60 #define HL_IDLE_BUSY_TS_ARR_SIZE 4096
63 #define MEM_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
66 #define MMU_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
69 * enum hl_mmu_page_table_locaion - mmu page table location
70 * @MMU_DR_PGT: page-table is located on device DRAM.
71 * @MMU_HR_PGT: page-table is located on host memory.
72 * @MMU_NUM_PGT_LOCATIONS: number of page-table locations currently supported.
74 enum hl_mmu_page_table_location {
75 MMU_DR_PGT = 0, /* device-dram-resident MMU PGT */
76 MMU_HR_PGT, /* host resident MMU PGT */
77 MMU_NUM_PGT_LOCATIONS /* num of PGT locations */
81 * HL_RSVD_SOBS 'sync stream' reserved sync objects per QMAN stream
82 * HL_RSVD_MONS 'sync stream' reserved monitors per QMAN stream
84 #define HL_RSVD_SOBS 2
85 #define HL_RSVD_MONS 1
88 * HL_COLLECTIVE_RSVD_MSTR_MONS 'collective' reserved monitors per QMAN stream
90 #define HL_COLLECTIVE_RSVD_MSTR_MONS 2
92 #define HL_MAX_SOB_VAL (1 << 15)
94 #define IS_POWER_OF_2(n) (n != 0 && ((n & (n - 1)) == 0))
95 #define IS_MAX_PENDING_CS_VALID(n) (IS_POWER_OF_2(n) && (n > 1))
97 #define HL_PCI_NUM_BARS 6
99 #define HL_MAX_DCORES 4
101 #define HL_MAX_SOBS_PER_MONITOR 8
104 * struct hl_gen_wait_properties - properties for generating a wait CB
105 * @data: command buffer
106 * @q_idx: queue id is used to extract fence register address
107 * @size: offset in command buffer
108 * @sob_base: SOB base to use in this wait CB
109 * @sob_val: SOB value to wait for
110 * @mon_id: monitor to use in this wait CB
111 * @sob_mask: each bit represents a SOB offset from sob_base to be used
113 struct hl_gen_wait_properties {
124 * struct pgt_info - MMU hop page info.
125 * @node: hash linked-list node for the pgts shadow hash of pgts.
126 * @phys_addr: physical address of the pgt.
127 * @shadow_addr: shadow hop in the host.
128 * @ctx: pointer to the owner ctx.
129 * @num_of_ptes: indicates how many ptes are used in the pgt.
131 * The MMU page tables hierarchy is placed on the DRAM. When a new level (hop)
132 * is needed during mapping, a new page is allocated and this structure holds
133 * its essential information. During unmapping, if no valid PTEs remained in the
134 * page, it is freed with its pgt_info structure.
137 struct hlist_node node;
148 * enum hl_pci_match_mode - pci match mode per region
149 * @PCI_ADDRESS_MATCH_MODE: address match mode
150 * @PCI_BAR_MATCH_MODE: bar match mode
152 enum hl_pci_match_mode {
153 PCI_ADDRESS_MATCH_MODE,
158 * enum hl_fw_component - F/W components to read version through registers.
159 * @FW_COMP_UBOOT: u-boot.
160 * @FW_COMP_PREBOOT: preboot.
162 enum hl_fw_component {
168 * enum hl_fw_types - F/W types to load
169 * @FW_TYPE_LINUX: Linux image for device CPU
170 * @FW_TYPE_BOOT_CPU: Boot image for device CPU
171 * @FW_TYPE_ALL_TYPES: Mask for all types
175 FW_TYPE_BOOT_CPU = 0x2,
176 FW_TYPE_ALL_TYPES = (FW_TYPE_LINUX | FW_TYPE_BOOT_CPU)
180 * enum hl_queue_type - Supported QUEUE types.
181 * @QUEUE_TYPE_NA: queue is not available.
182 * @QUEUE_TYPE_EXT: external queue which is a DMA channel that may access the
184 * @QUEUE_TYPE_INT: internal queue that performs DMA inside the device's
185 * memories and/or operates the compute engines.
186 * @QUEUE_TYPE_CPU: S/W queue for communication with the device's CPU.
187 * @QUEUE_TYPE_HW: queue of DMA and compute engines jobs, for which completion
188 * notifications are sent by H/W.
202 CS_TYPE_COLLECTIVE_WAIT
206 * struct hl_inbound_pci_region - inbound region descriptor
207 * @mode: pci match mode for this region
208 * @addr: region target address
209 * @size: region size in bytes
210 * @offset_in_bar: offset within bar (address match mode)
213 struct hl_inbound_pci_region {
214 enum hl_pci_match_mode mode;
222 * struct hl_outbound_pci_region - outbound region descriptor
223 * @addr: region target address
224 * @size: region size in bytes
226 struct hl_outbound_pci_region {
232 * enum queue_cb_alloc_flags - Indicates queue support for CBs that
233 * allocated by Kernel or by User
234 * @CB_ALLOC_KERNEL: support only CBs that allocated by Kernel
235 * @CB_ALLOC_USER: support only CBs that allocated by User
237 enum queue_cb_alloc_flags {
238 CB_ALLOC_KERNEL = 0x1,
243 * struct hl_hw_sob - H/W SOB info.
244 * @hdev: habanalabs device structure.
245 * @kref: refcount of this SOB. The SOB will reset once the refcount is zero.
246 * @sob_id: id of this SOB.
247 * @q_idx: the H/W queue that uses this SOB.
250 struct hl_device *hdev;
256 enum hl_collective_mode {
257 HL_COLLECTIVE_NOT_SUPPORTED = 0x0,
258 HL_COLLECTIVE_MASTER = 0x1,
259 HL_COLLECTIVE_SLAVE = 0x2
263 * struct hw_queue_properties - queue information.
265 * @queue_cb_alloc_flags: bitmap which indicates if the hw queue supports CB
266 * that allocated by the Kernel driver and therefore,
267 * a CB handle can be provided for jobs on this queue.
268 * Otherwise, a CB address must be provided.
269 * @collective_mode: collective mode of current queue
270 * @driver_only: true if only the driver is allowed to send a job to this queue,
272 * @supports_sync_stream: True if queue supports sync stream
274 struct hw_queue_properties {
275 enum hl_queue_type type;
276 enum queue_cb_alloc_flags cb_alloc_flags;
277 enum hl_collective_mode collective_mode;
279 u8 supports_sync_stream;
283 * enum vm_type_t - virtual memory mapping request information.
284 * @VM_TYPE_USERPTR: mapping of user memory to device virtual address.
285 * @VM_TYPE_PHYS_PACK: mapping of DRAM memory to device virtual address.
288 VM_TYPE_USERPTR = 0x1,
289 VM_TYPE_PHYS_PACK = 0x2
293 * enum hl_device_hw_state - H/W device state. use this to understand whether
294 * to do reset before hw_init or not
295 * @HL_DEVICE_HW_STATE_CLEAN: H/W state is clean. i.e. after hard reset
296 * @HL_DEVICE_HW_STATE_DIRTY: H/W state is dirty. i.e. we started to execute
299 enum hl_device_hw_state {
300 HL_DEVICE_HW_STATE_CLEAN = 0,
301 HL_DEVICE_HW_STATE_DIRTY
305 * struct hl_mmu_properties - ASIC specific MMU address translation properties.
306 * @start_addr: virtual start address of the memory region.
307 * @end_addr: virtual end address of the memory region.
308 * @hop0_shift: shift of hop 0 mask.
309 * @hop1_shift: shift of hop 1 mask.
310 * @hop2_shift: shift of hop 2 mask.
311 * @hop3_shift: shift of hop 3 mask.
312 * @hop4_shift: shift of hop 4 mask.
313 * @hop5_shift: shift of hop 5 mask.
314 * @hop0_mask: mask to get the PTE address in hop 0.
315 * @hop1_mask: mask to get the PTE address in hop 1.
316 * @hop2_mask: mask to get the PTE address in hop 2.
317 * @hop3_mask: mask to get the PTE address in hop 3.
318 * @hop4_mask: mask to get the PTE address in hop 4.
319 * @hop5_mask: mask to get the PTE address in hop 5.
320 * @page_size: default page size used to allocate memory.
321 * @num_hops: The amount of hops supported by the translation table.
322 * @host_resident: Should the MMU page table reside in host memory or in the
325 struct hl_mmu_properties {
346 * struct asic_fixed_properties - ASIC specific immutable properties.
347 * @hw_queues_props: H/W queues properties.
348 * @cpucp_info: received various information from CPU-CP regarding the H/W, e.g.
350 * @uboot_ver: F/W U-boot version.
351 * @preboot_ver: F/W Preboot version.
352 * @dmmu: DRAM MMU address translation properties.
353 * @pmmu: PCI (host) MMU address translation properties.
354 * @pmmu_huge: PCI (host) MMU address translation properties for memory
355 * allocated with huge pages.
356 * @sram_base_address: SRAM physical start address.
357 * @sram_end_address: SRAM physical end address.
358 * @sram_user_base_address - SRAM physical start address for user access.
359 * @dram_base_address: DRAM physical start address.
360 * @dram_end_address: DRAM physical end address.
361 * @dram_user_base_address: DRAM physical start address for user access.
362 * @dram_size: DRAM total size.
363 * @dram_pci_bar_size: size of PCI bar towards DRAM.
364 * @max_power_default: max power of the device after reset
365 * @dram_size_for_default_page_mapping: DRAM size needed to map to avoid page
367 * @pcie_dbi_base_address: Base address of the PCIE_DBI block.
368 * @pcie_aux_dbi_reg_addr: Address of the PCIE_AUX DBI register.
369 * @mmu_pgt_addr: base physical address in DRAM of MMU page tables.
370 * @mmu_dram_default_page_addr: DRAM default page physical address.
371 * @cb_va_start_addr: virtual start address of command buffers which are mapped
372 * to the device's MMU.
373 * @cb_va_end_addr: virtual end address of command buffers which are mapped to
375 * @mmu_pgt_size: MMU page tables total size.
376 * @mmu_pte_size: PTE size in MMU page tables.
377 * @mmu_hop_table_size: MMU hop table size.
378 * @mmu_hop0_tables_total_size: total size of MMU hop0 tables.
379 * @dram_page_size: page size for MMU DRAM allocation.
380 * @cfg_size: configuration space size on SRAM.
381 * @sram_size: total size of SRAM.
382 * @max_asid: maximum number of open contexts (ASIDs).
383 * @num_of_events: number of possible internal H/W IRQs.
384 * @psoc_pci_pll_nr: PCI PLL NR value.
385 * @psoc_pci_pll_nf: PCI PLL NF value.
386 * @psoc_pci_pll_od: PCI PLL OD value.
387 * @psoc_pci_pll_div_factor: PCI PLL DIV FACTOR 1 value.
388 * @psoc_timestamp_frequency: frequency of the psoc timestamp clock.
389 * @high_pll: high PLL frequency used by the device.
390 * @cb_pool_cb_cnt: number of CBs in the CB pool.
391 * @cb_pool_cb_size: size of each CB in the CB pool.
392 * @max_pending_cs: maximum of concurrent pending command submissions
393 * @max_queues: maximum amount of queues in the system
394 * @fw_boot_cpu_security_map: bitmap representation of boot cpu security status
395 * reported by FW, bit description can be found in
397 * @fw_app_security_map: bitmap representation of application security status
398 * reported by FW, bit description can be found in
400 * @collective_first_sob: first sync object available for collective use
401 * @collective_first_mon: first monitor available for collective use
402 * @sync_stream_first_sob: first sync object available for sync stream use
403 * @sync_stream_first_mon: first monitor available for sync stream use
404 * @first_available_user_sob: first sob available for the user
405 * @first_available_user_mon: first monitor available for the user
406 * @tpc_enabled_mask: which TPCs are enabled.
407 * @completion_queues_count: number of completion queues.
408 * @fw_security_disabled: true if security measures are disabled in firmware,
410 * @fw_security_status_valid: security status bits are valid and can be fetched
413 struct asic_fixed_properties {
414 struct hw_queue_properties *hw_queues_props;
415 struct cpucp_info cpucp_info;
416 char uboot_ver[VERSION_MAX_LEN];
417 char preboot_ver[VERSION_MAX_LEN];
418 struct hl_mmu_properties dmmu;
419 struct hl_mmu_properties pmmu;
420 struct hl_mmu_properties pmmu_huge;
421 u64 sram_base_address;
422 u64 sram_end_address;
423 u64 sram_user_base_address;
424 u64 dram_base_address;
425 u64 dram_end_address;
426 u64 dram_user_base_address;
428 u64 dram_pci_bar_size;
429 u64 max_power_default;
430 u64 dram_size_for_default_page_mapping;
431 u64 pcie_dbi_base_address;
432 u64 pcie_aux_dbi_reg_addr;
434 u64 mmu_dram_default_page_addr;
435 u64 cb_va_start_addr;
439 u32 mmu_hop_table_size;
440 u32 mmu_hop0_tables_total_size;
449 u32 psoc_pci_pll_div_factor;
450 u32 psoc_timestamp_frequency;
456 u32 fw_boot_cpu_security_map;
457 u32 fw_app_security_map;
458 u16 collective_first_sob;
459 u16 collective_first_mon;
460 u16 sync_stream_first_sob;
461 u16 sync_stream_first_mon;
462 u16 first_available_user_sob[HL_MAX_DCORES];
463 u16 first_available_user_mon[HL_MAX_DCORES];
465 u8 completion_queues_count;
466 u8 fw_security_disabled;
467 u8 fw_security_status_valid;
471 * struct hl_fence - software synchronization primitive
472 * @completion: fence is implemented using completion
473 * @refcount: refcount for this fence
474 * @error: mark this fence with error
478 struct completion completion;
479 struct kref refcount;
484 * struct hl_cs_compl - command submission completion object.
485 * @base_fence: hl fence object.
486 * @lock: spinlock to protect fence.
487 * @hdev: habanalabs device structure.
488 * @hw_sob: the H/W SOB used in this signal/wait CS.
489 * @cs_seq: command submission sequence number.
490 * @type: type of the CS - signal/wait.
491 * @sob_val: the SOB value that is used in this signal/wait CS.
492 * @sob_group: the SOB group that is used in this collective wait CS.
495 struct hl_fence base_fence;
497 struct hl_device *hdev;
498 struct hl_hw_sob *hw_sob;
500 enum hl_cs_type type;
510 * struct hl_cb_mgr - describes a Command Buffer Manager.
511 * @cb_lock: protects cb_handles.
512 * @cb_handles: an idr to hold all command buffer handles.
516 struct idr cb_handles; /* protected by cb_lock */
520 * struct hl_cb - describes a Command Buffer.
521 * @refcount: reference counter for usage of the CB.
522 * @hdev: pointer to device this CB belongs to.
523 * @ctx: pointer to the CB owner's context.
524 * @lock: spinlock to protect mmap/cs flows.
525 * @debugfs_list: node in debugfs list of command buffers.
526 * @pool_list: node in pool list of command buffers.
527 * @va_block_list: list of virtual addresses blocks of the CB if it is mapped to
530 * @kernel_address: Holds the CB's kernel virtual address.
531 * @bus_address: Holds the CB's DMA address.
532 * @mmap_size: Holds the CB's size that was mmaped.
533 * @size: holds the CB's size.
534 * @cs_cnt: holds number of CS that this CB participates in.
535 * @mmap: true if the CB is currently mmaped to user.
536 * @is_pool: true if CB was acquired from the pool, false otherwise.
537 * @is_internal: internaly allocated
538 * @is_mmu_mapped: true if the CB is mapped to the device's MMU.
541 struct kref refcount;
542 struct hl_device *hdev;
545 struct list_head debugfs_list;
546 struct list_head pool_list;
547 struct list_head va_block_list;
549 void *kernel_address;
550 dma_addr_t bus_address;
568 /* Queue length of external and HW queues */
569 #define HL_QUEUE_LENGTH 4096
570 #define HL_QUEUE_SIZE_IN_BYTES (HL_QUEUE_LENGTH * HL_BD_SIZE)
572 #if (HL_MAX_JOBS_PER_CS > HL_QUEUE_LENGTH)
573 #error "HL_QUEUE_LENGTH must be greater than HL_MAX_JOBS_PER_CS"
576 /* HL_CQ_LENGTH is in units of struct hl_cq_entry */
577 #define HL_CQ_LENGTH HL_QUEUE_LENGTH
578 #define HL_CQ_SIZE_IN_BYTES (HL_CQ_LENGTH * HL_CQ_ENTRY_SIZE)
580 /* Must be power of 2 */
581 #define HL_EQ_LENGTH 64
582 #define HL_EQ_SIZE_IN_BYTES (HL_EQ_LENGTH * HL_EQ_ENTRY_SIZE)
584 /* Host <-> CPU-CP shared memory size */
585 #define HL_CPU_ACCESSIBLE_MEM_SIZE SZ_2M
588 * struct hl_sync_stream_properties -
589 * describes a H/W queue sync stream properties
590 * @hw_sob: array of the used H/W SOBs by this H/W queue.
591 * @next_sob_val: the next value to use for the currently used SOB.
592 * @base_sob_id: the base SOB id of the SOBs used by this queue.
593 * @base_mon_id: the base MON id of the MONs used by this queue.
594 * @collective_mstr_mon_id: the MON ids of the MONs used by this master queue
595 * in order to sync with all slave queues.
596 * @collective_slave_mon_id: the MON id used by this slave queue in order to
597 * sync with its master queue.
598 * @collective_sob_id: current SOB id used by this collective slave queue
599 * to signal its collective master queue upon completion.
600 * @curr_sob_offset: the id offset to the currently used SOB from the
601 * HL_RSVD_SOBS that are being used by this queue.
603 struct hl_sync_stream_properties {
604 struct hl_hw_sob hw_sob[HL_RSVD_SOBS];
608 u16 collective_mstr_mon_id[HL_COLLECTIVE_RSVD_MSTR_MONS];
609 u16 collective_slave_mon_id;
610 u16 collective_sob_id;
615 * struct hl_hw_queue - describes a H/W transport queue.
616 * @shadow_queue: pointer to a shadow queue that holds pointers to jobs.
617 * @sync_stream_prop: sync stream queue properties
618 * @queue_type: type of queue.
619 * @collective_mode: collective mode of current queue
620 * @kernel_address: holds the queue's kernel virtual address.
621 * @bus_address: holds the queue's DMA address.
622 * @pi: holds the queue's pi value.
623 * @ci: holds the queue's ci value, AS CALCULATED BY THE DRIVER (not real ci).
624 * @hw_queue_id: the id of the H/W queue.
625 * @cq_id: the id for the corresponding CQ for this H/W queue.
626 * @msi_vec: the IRQ number of the H/W queue.
627 * @int_queue_len: length of internal queue (number of entries).
628 * @valid: is the queue valid (we have array of 32 queues, not all of them
630 * @supports_sync_stream: True if queue supports sync stream
633 struct hl_cs_job **shadow_queue;
634 struct hl_sync_stream_properties sync_stream_prop;
635 enum hl_queue_type queue_type;
636 enum hl_collective_mode collective_mode;
637 void *kernel_address;
638 dma_addr_t bus_address;
646 u8 supports_sync_stream;
650 * struct hl_cq - describes a completion queue
651 * @hdev: pointer to the device structure
652 * @kernel_address: holds the queue's kernel virtual address
653 * @bus_address: holds the queue's DMA address
654 * @cq_idx: completion queue index in array
655 * @hw_queue_id: the id of the matching H/W queue
656 * @ci: ci inside the queue
657 * @pi: pi inside the queue
658 * @free_slots_cnt: counter of free slots in queue
661 struct hl_device *hdev;
662 void *kernel_address;
663 dma_addr_t bus_address;
668 atomic_t free_slots_cnt;
672 * struct hl_eq - describes the event queue (single one per device)
673 * @hdev: pointer to the device structure
674 * @kernel_address: holds the queue's kernel virtual address
675 * @bus_address: holds the queue's DMA address
676 * @ci: ci inside the queue
679 struct hl_device *hdev;
680 void *kernel_address;
681 dma_addr_t bus_address;
691 * enum hl_asic_type - supported ASIC types.
692 * @ASIC_INVALID: Invalid ASIC type.
693 * @ASIC_GOYA: Goya device.
694 * @ASIC_GAUDI: Gaudi device.
705 * enum hl_pm_mng_profile - power management profile.
706 * @PM_AUTO: internal clock is set by the Linux driver.
707 * @PM_MANUAL: internal clock is set by the user.
708 * @PM_LAST: last power management type.
710 enum hl_pm_mng_profile {
717 * enum hl_pll_frequency - PLL frequency.
718 * @PLL_HIGH: high frequency.
719 * @PLL_LOW: low frequency.
720 * @PLL_LAST: last frequency values that were configured by the user.
722 enum hl_pll_frequency {
728 #define PLL_REF_CLK 50
730 enum div_select_defs {
733 DIV_SEL_DIVIDED_REF = 2,
734 DIV_SEL_DIVIDED_PLL = 3,
738 * struct hl_asic_funcs - ASIC specific functions that are can be called from
740 * @early_init: sets up early driver state (pre sw_init), doesn't configure H/W.
741 * @early_fini: tears down what was done in early_init.
742 * @late_init: sets up late driver/hw state (post hw_init) - Optional.
743 * @late_fini: tears down what was done in late_init (pre hw_fini) - Optional.
744 * @sw_init: sets up driver state, does not configure H/W.
745 * @sw_fini: tears down driver state, does not configure H/W.
746 * @hw_init: sets up the H/W state.
747 * @hw_fini: tears down the H/W state.
748 * @halt_engines: halt engines, needed for reset sequence. This also disables
749 * interrupts from the device. Should be called before
750 * hw_fini and before CS rollback.
751 * @suspend: handles IP specific H/W or SW changes for suspend.
752 * @resume: handles IP specific H/W or SW changes for resume.
753 * @cb_mmap: maps a CB.
754 * @ring_doorbell: increment PI on a given QMAN.
755 * @pqe_write: Write the PQ entry to the PQ. This is ASIC-specific
756 * function because the PQs are located in different memory areas
757 * per ASIC (SRAM, DRAM, Host memory) and therefore, the method of
758 * writing the PQE must match the destination memory area
760 * @asic_dma_alloc_coherent: Allocate coherent DMA memory by calling
761 * dma_alloc_coherent(). This is ASIC function because
762 * its implementation is not trivial when the driver
763 * is loaded in simulation mode (not upstreamed).
764 * @asic_dma_free_coherent: Free coherent DMA memory by calling
765 * dma_free_coherent(). This is ASIC function because
766 * its implementation is not trivial when the driver
767 * is loaded in simulation mode (not upstreamed).
768 * @scrub_device_mem: Scrub device memory given an address and size
769 * @get_int_queue_base: get the internal queue base address.
770 * @test_queues: run simple test on all queues for sanity check.
771 * @asic_dma_pool_zalloc: small DMA allocation of coherent memory from DMA pool.
772 * size of allocation is HL_DMA_POOL_BLK_SIZE.
773 * @asic_dma_pool_free: free small DMA allocation from pool.
774 * @cpu_accessible_dma_pool_alloc: allocate CPU PQ packet from DMA pool.
775 * @cpu_accessible_dma_pool_free: free CPU PQ packet from DMA pool.
776 * @hl_dma_unmap_sg: DMA unmap scatter-gather list.
777 * @cs_parser: parse Command Submission.
778 * @asic_dma_map_sg: DMA map scatter-gather list.
779 * @get_dma_desc_list_size: get number of LIN_DMA packets required for CB.
780 * @add_end_of_cb_packets: Add packets to the end of CB, if device requires it.
781 * @update_eq_ci: update event queue CI.
782 * @context_switch: called upon ASID context switch.
783 * @restore_phase_topology: clear all SOBs amd MONs.
784 * @debugfs_read32: debug interface for reading u32 from DRAM/SRAM.
785 * @debugfs_write32: debug interface for writing u32 to DRAM/SRAM.
786 * @add_device_attr: add ASIC specific device attributes.
787 * @handle_eqe: handle event queue entry (IRQ) from CPU-CP.
788 * @set_pll_profile: change PLL profile (manual/automatic).
789 * @get_events_stat: retrieve event queue entries histogram.
790 * @read_pte: read MMU page table entry from DRAM.
791 * @write_pte: write MMU page table entry to DRAM.
792 * @mmu_invalidate_cache: flush MMU STLB host/DRAM cache, either with soft
793 * (L1 only) or hard (L0 & L1) flush.
794 * @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with
796 * @send_heartbeat: send is-alive packet to CPU-CP and verify response.
797 * @set_clock_gating: enable/disable clock gating per engine according to
798 * clock gating mask in hdev
799 * @disable_clock_gating: disable clock gating completely
800 * @debug_coresight: perform certain actions on Coresight for debugging.
801 * @is_device_idle: return true if device is idle, false otherwise.
802 * @soft_reset_late_init: perform certain actions needed after soft reset.
803 * @hw_queues_lock: acquire H/W queues lock.
804 * @hw_queues_unlock: release H/W queues lock.
805 * @get_pci_id: retrieve PCI ID.
806 * @get_eeprom_data: retrieve EEPROM data from F/W.
807 * @send_cpu_message: send message to F/W. If the message is timedout, the
808 * driver will eventually reset the device. The timeout can
809 * be determined by the calling function or it can be 0 and
810 * then the timeout is the default timeout for the specific
812 * @get_hw_state: retrieve the H/W state
813 * @pci_bars_map: Map PCI BARs.
814 * @init_iatu: Initialize the iATU unit inside the PCI controller.
815 * @rreg: Read a register. Needed for simulator support.
816 * @wreg: Write a register. Needed for simulator support.
817 * @halt_coresight: stop the ETF and ETR traces.
818 * @ctx_init: context dependent initialization.
819 * @ctx_fini: context dependent cleanup.
820 * @get_clk_rate: Retrieve the ASIC current and maximum clock rate in MHz
821 * @get_queue_id_for_cq: Get the H/W queue id related to the given CQ index.
822 * @read_device_fw_version: read the device's firmware versions that are
823 * contained in registers
824 * @load_firmware_to_device: load the firmware to the device's memory
825 * @load_boot_fit_to_device: load boot fit to device's memory
826 * @get_signal_cb_size: Get signal CB size.
827 * @get_wait_cb_size: Get wait CB size.
828 * @gen_signal_cb: Generate a signal CB.
829 * @gen_wait_cb: Generate a wait CB.
830 * @reset_sob: Reset a SOB.
831 * @reset_sob_group: Reset SOB group
832 * @set_dma_mask_from_fw: set the DMA mask in the driver according to the
833 * firmware configuration
834 * @get_device_time: Get the device time.
835 * @collective_wait_init_cs: Generate collective master/slave packets
836 * and place them in the relevant cs jobs
837 * @collective_wait_create_jobs: allocate collective wait cs jobs
839 struct hl_asic_funcs {
840 int (*early_init)(struct hl_device *hdev);
841 int (*early_fini)(struct hl_device *hdev);
842 int (*late_init)(struct hl_device *hdev);
843 void (*late_fini)(struct hl_device *hdev);
844 int (*sw_init)(struct hl_device *hdev);
845 int (*sw_fini)(struct hl_device *hdev);
846 int (*hw_init)(struct hl_device *hdev);
847 void (*hw_fini)(struct hl_device *hdev, bool hard_reset);
848 void (*halt_engines)(struct hl_device *hdev, bool hard_reset);
849 int (*suspend)(struct hl_device *hdev);
850 int (*resume)(struct hl_device *hdev);
851 int (*cb_mmap)(struct hl_device *hdev, struct vm_area_struct *vma,
852 void *cpu_addr, dma_addr_t dma_addr, size_t size);
853 void (*ring_doorbell)(struct hl_device *hdev, u32 hw_queue_id, u32 pi);
854 void (*pqe_write)(struct hl_device *hdev, __le64 *pqe,
856 void* (*asic_dma_alloc_coherent)(struct hl_device *hdev, size_t size,
857 dma_addr_t *dma_handle, gfp_t flag);
858 void (*asic_dma_free_coherent)(struct hl_device *hdev, size_t size,
859 void *cpu_addr, dma_addr_t dma_handle);
860 int (*scrub_device_mem)(struct hl_device *hdev, u64 addr, u64 size);
861 void* (*get_int_queue_base)(struct hl_device *hdev, u32 queue_id,
862 dma_addr_t *dma_handle, u16 *queue_len);
863 int (*test_queues)(struct hl_device *hdev);
864 void* (*asic_dma_pool_zalloc)(struct hl_device *hdev, size_t size,
865 gfp_t mem_flags, dma_addr_t *dma_handle);
866 void (*asic_dma_pool_free)(struct hl_device *hdev, void *vaddr,
867 dma_addr_t dma_addr);
868 void* (*cpu_accessible_dma_pool_alloc)(struct hl_device *hdev,
869 size_t size, dma_addr_t *dma_handle);
870 void (*cpu_accessible_dma_pool_free)(struct hl_device *hdev,
871 size_t size, void *vaddr);
872 void (*hl_dma_unmap_sg)(struct hl_device *hdev,
873 struct scatterlist *sgl, int nents,
874 enum dma_data_direction dir);
875 int (*cs_parser)(struct hl_device *hdev, struct hl_cs_parser *parser);
876 int (*asic_dma_map_sg)(struct hl_device *hdev,
877 struct scatterlist *sgl, int nents,
878 enum dma_data_direction dir);
879 u32 (*get_dma_desc_list_size)(struct hl_device *hdev,
880 struct sg_table *sgt);
881 void (*add_end_of_cb_packets)(struct hl_device *hdev,
882 void *kernel_address, u32 len,
883 u64 cq_addr, u32 cq_val, u32 msix_num,
885 void (*update_eq_ci)(struct hl_device *hdev, u32 val);
886 int (*context_switch)(struct hl_device *hdev, u32 asid);
887 void (*restore_phase_topology)(struct hl_device *hdev);
888 int (*debugfs_read32)(struct hl_device *hdev, u64 addr, u32 *val);
889 int (*debugfs_write32)(struct hl_device *hdev, u64 addr, u32 val);
890 int (*debugfs_read64)(struct hl_device *hdev, u64 addr, u64 *val);
891 int (*debugfs_write64)(struct hl_device *hdev, u64 addr, u64 val);
892 void (*add_device_attr)(struct hl_device *hdev,
893 struct attribute_group *dev_attr_grp);
894 void (*handle_eqe)(struct hl_device *hdev,
895 struct hl_eq_entry *eq_entry);
896 void (*set_pll_profile)(struct hl_device *hdev,
897 enum hl_pll_frequency freq);
898 void* (*get_events_stat)(struct hl_device *hdev, bool aggregate,
900 u64 (*read_pte)(struct hl_device *hdev, u64 addr);
901 void (*write_pte)(struct hl_device *hdev, u64 addr, u64 val);
902 int (*mmu_invalidate_cache)(struct hl_device *hdev, bool is_hard,
904 int (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard,
905 u32 asid, u64 va, u64 size);
906 int (*send_heartbeat)(struct hl_device *hdev);
907 void (*set_clock_gating)(struct hl_device *hdev);
908 void (*disable_clock_gating)(struct hl_device *hdev);
909 int (*debug_coresight)(struct hl_device *hdev, void *data);
910 bool (*is_device_idle)(struct hl_device *hdev, u64 *mask,
912 int (*soft_reset_late_init)(struct hl_device *hdev);
913 void (*hw_queues_lock)(struct hl_device *hdev);
914 void (*hw_queues_unlock)(struct hl_device *hdev);
915 u32 (*get_pci_id)(struct hl_device *hdev);
916 int (*get_eeprom_data)(struct hl_device *hdev, void *data,
918 int (*send_cpu_message)(struct hl_device *hdev, u32 *msg,
919 u16 len, u32 timeout, long *result);
920 enum hl_device_hw_state (*get_hw_state)(struct hl_device *hdev);
921 int (*pci_bars_map)(struct hl_device *hdev);
922 int (*init_iatu)(struct hl_device *hdev);
923 u32 (*rreg)(struct hl_device *hdev, u32 reg);
924 void (*wreg)(struct hl_device *hdev, u32 reg, u32 val);
925 void (*halt_coresight)(struct hl_device *hdev);
926 int (*ctx_init)(struct hl_ctx *ctx);
927 void (*ctx_fini)(struct hl_ctx *ctx);
928 int (*get_clk_rate)(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk);
929 u32 (*get_queue_id_for_cq)(struct hl_device *hdev, u32 cq_idx);
930 void (*read_device_fw_version)(struct hl_device *hdev,
931 enum hl_fw_component fwc);
932 int (*load_firmware_to_device)(struct hl_device *hdev);
933 int (*load_boot_fit_to_device)(struct hl_device *hdev);
934 u32 (*get_signal_cb_size)(struct hl_device *hdev);
935 u32 (*get_wait_cb_size)(struct hl_device *hdev);
936 u32 (*gen_signal_cb)(struct hl_device *hdev, void *data, u16 sob_id,
938 u32 (*gen_wait_cb)(struct hl_device *hdev,
939 struct hl_gen_wait_properties *prop);
940 void (*reset_sob)(struct hl_device *hdev, void *data);
941 void (*reset_sob_group)(struct hl_device *hdev, u16 sob_group);
942 void (*set_dma_mask_from_fw)(struct hl_device *hdev);
943 u64 (*get_device_time)(struct hl_device *hdev);
944 void (*collective_wait_init_cs)(struct hl_cs *cs);
945 int (*collective_wait_create_jobs)(struct hl_device *hdev,
946 struct hl_ctx *ctx, struct hl_cs *cs, u32 wait_queue_id,
947 u32 collective_engine_id);
955 #define HL_KERNEL_ASID_ID 0
958 * struct hl_va_range - virtual addresses range.
959 * @lock: protects the virtual addresses list.
960 * @list: list of virtual addresses blocks available for mappings.
961 * @start_addr: range start address.
962 * @end_addr: range end address.
966 struct list_head list;
972 * struct hl_cs_counters_atomic - command submission counters
973 * @out_of_mem_drop_cnt: dropped due to memory allocation issue
974 * @parsing_drop_cnt: dropped due to error in packet parsing
975 * @queue_full_drop_cnt: dropped due to queue full
976 * @device_in_reset_drop_cnt: dropped due to device in reset
977 * @max_cs_in_flight_drop_cnt: dropped due to maximum CS in-flight
979 struct hl_cs_counters_atomic {
980 atomic64_t out_of_mem_drop_cnt;
981 atomic64_t parsing_drop_cnt;
982 atomic64_t queue_full_drop_cnt;
983 atomic64_t device_in_reset_drop_cnt;
984 atomic64_t max_cs_in_flight_drop_cnt;
988 * struct hl_ctx - user/kernel context.
989 * @mem_hash: holds mapping from virtual address to virtual memory area
990 * descriptor (hl_vm_phys_pg_list or hl_userptr).
991 * @mmu_shadow_hash: holds a mapping from shadow address to pgt_info structure.
992 * @hpriv: pointer to the private (Kernel Driver) data of the process (fd).
993 * @hdev: pointer to the device structure.
994 * @refcount: reference counter for the context. Context is released only when
995 * this hits 0l. It is incremented on CS and CS_WAIT.
996 * @cs_pending: array of hl fence objects representing pending CS.
997 * @host_va_range: holds available virtual addresses for host mappings.
998 * @host_huge_va_range: holds available virtual addresses for host mappings
1000 * @dram_va_range: holds available virtual addresses for DRAM mappings.
1001 * @mem_hash_lock: protects the mem_hash.
1002 * @mmu_lock: protects the MMU page tables. Any change to the PGT, modifying the
1003 * MMU hash or walking the PGT requires talking this lock.
1004 * @debugfs_list: node in debugfs list of contexts.
1005 * @cs_counters: context command submission counters.
1006 * @cb_va_pool: device VA pool for command buffers which are mapped to the
1008 * @cs_sequence: sequence number for CS. Value is assigned to a CS and passed
1009 * to user so user could inquire about CS. It is used as
1010 * index to cs_pending array.
1011 * @dram_default_hops: array that holds all hops addresses needed for default
1013 * @cs_lock: spinlock to protect cs_sequence.
1014 * @dram_phys_mem: amount of used physical DRAM memory by this context.
1015 * @thread_ctx_switch_token: token to prevent multiple threads of the same
1016 * context from running the context switch phase.
1017 * Only a single thread should run it.
1018 * @thread_ctx_switch_wait_token: token to prevent the threads that didn't run
1019 * the context switch phase from moving to their
1020 * execution phase before the context switch phase
1022 * @asid: context's unique address space ID in the device's MMU.
1023 * @handle: context's opaque handle for user
1026 DECLARE_HASHTABLE(mem_hash, MEM_HASH_TABLE_BITS);
1027 DECLARE_HASHTABLE(mmu_shadow_hash, MMU_HASH_TABLE_BITS);
1028 struct hl_fpriv *hpriv;
1029 struct hl_device *hdev;
1030 struct kref refcount;
1031 struct hl_fence **cs_pending;
1032 struct hl_va_range *host_va_range;
1033 struct hl_va_range *host_huge_va_range;
1034 struct hl_va_range *dram_va_range;
1035 struct mutex mem_hash_lock;
1036 struct mutex mmu_lock;
1037 struct list_head debugfs_list;
1038 struct hl_cs_counters_atomic cs_counters;
1039 struct gen_pool *cb_va_pool;
1041 u64 *dram_default_hops;
1043 atomic64_t dram_phys_mem;
1044 atomic_t thread_ctx_switch_token;
1045 u32 thread_ctx_switch_wait_token;
1051 * struct hl_ctx_mgr - for handling multiple contexts.
1052 * @ctx_lock: protects ctx_handles.
1053 * @ctx_handles: idr to hold all ctx handles.
1056 struct mutex ctx_lock;
1057 struct idr ctx_handles;
1063 * COMMAND SUBMISSIONS
1067 * struct hl_userptr - memory mapping chunk information
1068 * @vm_type: type of the VM.
1069 * @job_node: linked-list node for hanging the object on the Job's list.
1070 * @vec: pointer to the frame vector.
1071 * @sgt: pointer to the scatter-gather table that holds the pages.
1072 * @dir: for DMA unmapping, the direction must be supplied, so save it.
1073 * @debugfs_list: node in debugfs list of command submissions.
1074 * @addr: user-space virtual address of the start of the memory area.
1075 * @size: size of the memory area to pin & map.
1076 * @dma_mapped: true if the SG was mapped to DMA addresses, false otherwise.
1079 enum vm_type_t vm_type; /* must be first */
1080 struct list_head job_node;
1081 struct frame_vector *vec;
1082 struct sg_table *sgt;
1083 enum dma_data_direction dir;
1084 struct list_head debugfs_list;
1091 * struct hl_cs - command submission.
1092 * @jobs_in_queue_cnt: per each queue, maintain counter of submitted jobs.
1093 * @ctx: the context this CS belongs to.
1094 * @job_list: list of the CS's jobs in the various queues.
1095 * @job_lock: spinlock for the CS's jobs list. Needed for free_job.
1096 * @refcount: reference counter for usage of the CS.
1097 * @fence: pointer to the fence object of this CS.
1098 * @signal_fence: pointer to the fence object of the signal CS (used by wait
1100 * @finish_work: workqueue object to run when CS is completed by H/W.
1101 * @work_tdr: delayed work node for TDR.
1102 * @mirror_node : node in device mirror list of command submissions.
1103 * @debugfs_list: node in debugfs list of command submissions.
1104 * @sequence: the sequence number of this CS.
1106 * @submitted: true if CS was submitted to H/W.
1107 * @completed: true if CS was completed by device.
1108 * @timedout : true if CS was timedout.
1109 * @tdr_active: true if TDR was activated for this CS (to prevent
1110 * double TDR activation).
1111 * @aborted: true if CS was aborted due to some device error.
1114 u16 *jobs_in_queue_cnt;
1116 struct list_head job_list;
1117 spinlock_t job_lock;
1118 struct kref refcount;
1119 struct hl_fence *fence;
1120 struct hl_fence *signal_fence;
1121 struct work_struct finish_work;
1122 struct delayed_work work_tdr;
1123 struct list_head mirror_node;
1124 struct list_head debugfs_list;
1126 enum hl_cs_type type;
1135 * struct hl_cs_job - command submission job.
1136 * @cs_node: the node to hang on the CS jobs list.
1137 * @cs: the CS this job belongs to.
1138 * @user_cb: the CB we got from the user.
1139 * @patched_cb: in case of patching, this is internal CB which is submitted on
1140 * the queue instead of the CB we got from the IOCTL.
1141 * @finish_work: workqueue object to run when job is completed.
1142 * @userptr_list: linked-list of userptr mappings that belong to this job and
1143 * wait for completion.
1144 * @debugfs_list: node in debugfs list of command submission jobs.
1145 * @queue_type: the type of the H/W queue this job is submitted to.
1146 * @id: the id of this job inside a CS.
1147 * @hw_queue_id: the id of the H/W queue this job is submitted to.
1148 * @user_cb_size: the actual size of the CB we got from the user.
1149 * @job_cb_size: the actual size of the CB that we put on the queue.
1150 * @is_kernel_allocated_cb: true if the CB handle we got from the user holds a
1151 * handle to a kernel-allocated CB object, false
1152 * otherwise (SRAM/DRAM/host address).
1153 * @contains_dma_pkt: whether the JOB contains at least one DMA packet. This
1154 * info is needed later, when adding the 2xMSG_PROT at the
1155 * end of the JOB, to know which barriers to put in the
1156 * MSG_PROT packets. Relevant only for GAUDI as GOYA doesn't
1157 * have streams so the engine can't be busy by another
1161 struct list_head cs_node;
1163 struct hl_cb *user_cb;
1164 struct hl_cb *patched_cb;
1165 struct work_struct finish_work;
1166 struct list_head userptr_list;
1167 struct list_head debugfs_list;
1168 enum hl_queue_type queue_type;
1173 u8 is_kernel_allocated_cb;
1174 u8 contains_dma_pkt;
1178 * struct hl_cs_parser - command submission parser properties.
1179 * @user_cb: the CB we got from the user.
1180 * @patched_cb: in case of patching, this is internal CB which is submitted on
1181 * the queue instead of the CB we got from the IOCTL.
1182 * @job_userptr_list: linked-list of userptr mappings that belong to the related
1183 * job and wait for completion.
1184 * @cs_sequence: the sequence number of the related CS.
1185 * @queue_type: the type of the H/W queue this job is submitted to.
1186 * @ctx_id: the ID of the context the related CS belongs to.
1187 * @hw_queue_id: the id of the H/W queue this job is submitted to.
1188 * @user_cb_size: the actual size of the CB we got from the user.
1189 * @patched_cb_size: the size of the CB after parsing.
1190 * @job_id: the id of the related job inside the related CS.
1191 * @is_kernel_allocated_cb: true if the CB handle we got from the user holds a
1192 * handle to a kernel-allocated CB object, false
1193 * otherwise (SRAM/DRAM/host address).
1194 * @contains_dma_pkt: whether the JOB contains at least one DMA packet. This
1195 * info is needed later, when adding the 2xMSG_PROT at the
1196 * end of the JOB, to know which barriers to put in the
1197 * MSG_PROT packets. Relevant only for GAUDI as GOYA doesn't
1198 * have streams so the engine can't be busy by another
1201 struct hl_cs_parser {
1202 struct hl_cb *user_cb;
1203 struct hl_cb *patched_cb;
1204 struct list_head *job_userptr_list;
1206 enum hl_queue_type queue_type;
1210 u32 patched_cb_size;
1212 u8 is_kernel_allocated_cb;
1213 u8 contains_dma_pkt;
1221 * struct hl_vm_hash_node - hash element from virtual address to virtual
1222 * memory area descriptor (hl_vm_phys_pg_list or
1224 * @node: node to hang on the hash table in context object.
1225 * @vaddr: key virtual address.
1226 * @ptr: value pointer (hl_vm_phys_pg_list or hl_userptr).
1228 struct hl_vm_hash_node {
1229 struct hlist_node node;
1235 * struct hl_vm_phys_pg_pack - physical page pack.
1236 * @vm_type: describes the type of the virtual area descriptor.
1237 * @pages: the physical page array.
1238 * @npages: num physical pages in the pack.
1239 * @total_size: total size of all the pages in this list.
1240 * @mapping_cnt: number of shared mappings.
1241 * @asid: the context related to this list.
1242 * @page_size: size of each page in the pack.
1243 * @flags: HL_MEM_* flags related to this list.
1244 * @handle: the provided handle related to this list.
1245 * @offset: offset from the first page.
1246 * @contiguous: is contiguous physical memory.
1247 * @created_from_userptr: is product of host virtual address.
1249 struct hl_vm_phys_pg_pack {
1250 enum vm_type_t vm_type; /* must be first */
1254 atomic_t mapping_cnt;
1261 u8 created_from_userptr;
1265 * struct hl_vm_va_block - virtual range block information.
1266 * @node: node to hang on the virtual range list in context object.
1267 * @start: virtual range start address.
1268 * @end: virtual range end address.
1269 * @size: virtual range size.
1271 struct hl_vm_va_block {
1272 struct list_head node;
1279 * struct hl_vm - virtual memory manager for MMU.
1280 * @dram_pg_pool: pool for DRAM physical pages of 2MB.
1281 * @dram_pg_pool_refcount: reference counter for the pool usage.
1282 * @idr_lock: protects the phys_pg_list_handles.
1283 * @phys_pg_pack_handles: idr to hold all device allocations handles.
1284 * @init_done: whether initialization was done. We need this because VM
1285 * initialization might be skipped during device initialization.
1288 struct gen_pool *dram_pg_pool;
1289 struct kref dram_pg_pool_refcount;
1290 spinlock_t idr_lock;
1291 struct idr phys_pg_pack_handles;
1297 * DEBUG, PROFILING STRUCTURE
1301 * struct hl_debug_params - Coresight debug parameters.
1302 * @input: pointer to component specific input parameters.
1303 * @output: pointer to component specific output parameters.
1304 * @output_size: size of output buffer.
1305 * @reg_idx: relevant register ID.
1306 * @op: component operation to execute.
1307 * @enable: true if to enable component debugging, false otherwise.
1309 struct hl_debug_params {
1319 * FILE PRIVATE STRUCTURE
1323 * struct hl_fpriv - process information stored in FD private data.
1324 * @hdev: habanalabs device structure.
1325 * @filp: pointer to the given file structure.
1326 * @taskpid: current process ID.
1327 * @ctx: current executing context. TODO: remove for multiple ctx per process
1328 * @ctx_mgr: context manager to handle multiple context for this FD.
1329 * @cb_mgr: command buffer manager to handle multiple buffers for this FD.
1330 * @debugfs_list: list of relevant ASIC debugfs.
1331 * @dev_node: node in the device list of file private data
1332 * @refcount: number of related contexts.
1333 * @restore_phase_mutex: lock for context switch and restore phase.
1334 * @is_control: true for control device, false otherwise
1337 struct hl_device *hdev;
1339 struct pid *taskpid;
1341 struct hl_ctx_mgr ctx_mgr;
1342 struct hl_cb_mgr cb_mgr;
1343 struct list_head debugfs_list;
1344 struct list_head dev_node;
1345 struct kref refcount;
1346 struct mutex restore_phase_mutex;
1356 * struct hl_info_list - debugfs file ops.
1358 * @show: function to output information.
1359 * @write: function to write to the file.
1361 struct hl_info_list {
1363 int (*show)(struct seq_file *s, void *data);
1364 ssize_t (*write)(struct file *file, const char __user *buf,
1365 size_t count, loff_t *f_pos);
1369 * struct hl_debugfs_entry - debugfs dentry wrapper.
1370 * @dent: base debugfs entry structure.
1371 * @info_ent: dentry realted ops.
1372 * @dev_entry: ASIC specific debugfs manager.
1374 struct hl_debugfs_entry {
1375 struct dentry *dent;
1376 const struct hl_info_list *info_ent;
1377 struct hl_dbg_device_entry *dev_entry;
1381 * struct hl_dbg_device_entry - ASIC specific debugfs manager.
1382 * @root: root dentry.
1383 * @hdev: habanalabs device structure.
1384 * @entry_arr: array of available hl_debugfs_entry.
1385 * @file_list: list of available debugfs files.
1386 * @file_mutex: protects file_list.
1387 * @cb_list: list of available CBs.
1388 * @cb_spinlock: protects cb_list.
1389 * @cs_list: list of available CSs.
1390 * @cs_spinlock: protects cs_list.
1391 * @cs_job_list: list of available CB jobs.
1392 * @cs_job_spinlock: protects cs_job_list.
1393 * @userptr_list: list of available userptrs (virtual memory chunk descriptor).
1394 * @userptr_spinlock: protects userptr_list.
1395 * @ctx_mem_hash_list: list of available contexts with MMU mappings.
1396 * @ctx_mem_hash_spinlock: protects cb_list.
1397 * @addr: next address to read/write from/to in read/write32.
1398 * @mmu_addr: next virtual address to translate to physical address in mmu_show.
1399 * @mmu_asid: ASID to use while translating in mmu_show.
1400 * @i2c_bus: generic u8 debugfs file for bus value to use in i2c_data_read.
1401 * @i2c_bus: generic u8 debugfs file for address value to use in i2c_data_read.
1402 * @i2c_bus: generic u8 debugfs file for register value to use in i2c_data_read.
1404 struct hl_dbg_device_entry {
1405 struct dentry *root;
1406 struct hl_device *hdev;
1407 struct hl_debugfs_entry *entry_arr;
1408 struct list_head file_list;
1409 struct mutex file_mutex;
1410 struct list_head cb_list;
1411 spinlock_t cb_spinlock;
1412 struct list_head cs_list;
1413 spinlock_t cs_spinlock;
1414 struct list_head cs_job_list;
1415 spinlock_t cs_job_spinlock;
1416 struct list_head userptr_list;
1417 spinlock_t userptr_spinlock;
1418 struct list_head ctx_mem_hash_list;
1419 spinlock_t ctx_mem_hash_spinlock;
1433 /* Theoretical limit only. A single host can only contain up to 4 or 8 PCIe
1434 * x16 cards. In extreme cases, there are hosts that can accommodate 16 cards.
1436 #define HL_MAX_MINORS 256
1439 * Registers read & write functions.
1442 u32 hl_rreg(struct hl_device *hdev, u32 reg);
1443 void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
1445 #define RREG32(reg) hdev->asic_funcs->rreg(hdev, (reg))
1446 #define WREG32(reg, v) hdev->asic_funcs->wreg(hdev, (reg), (v))
1447 #define DREG32(reg) pr_info("REGISTER: " #reg " : 0x%08X\n", \
1448 hdev->asic_funcs->rreg(hdev, (reg)))
1450 #define WREG32_P(reg, val, mask) \
1452 u32 tmp_ = RREG32(reg); \
1454 tmp_ |= ((val) & ~(mask)); \
1455 WREG32(reg, tmp_); \
1457 #define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
1458 #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
1460 #define RMWREG32(reg, val, mask) \
1462 u32 tmp_ = RREG32(reg); \
1464 tmp_ |= ((val) << __ffs(mask)); \
1465 WREG32(reg, tmp_); \
1468 #define RREG32_MASK(reg, mask) ((RREG32(reg) & mask) >> __ffs(mask))
1470 #define REG_FIELD_SHIFT(reg, field) reg##_##field##_SHIFT
1471 #define REG_FIELD_MASK(reg, field) reg##_##field##_MASK
1472 #define WREG32_FIELD(reg, offset, field, val) \
1473 WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & \
1474 ~REG_FIELD_MASK(reg, field)) | \
1475 (val) << REG_FIELD_SHIFT(reg, field))
1477 /* Timeout should be longer when working with simulator but cap the
1478 * increased timeout to some maximum
1480 #define hl_poll_timeout(hdev, addr, val, cond, sleep_us, timeout_us) \
1482 ktime_t __timeout; \
1484 __timeout = ktime_add_us(ktime_get(), timeout_us); \
1486 __timeout = ktime_add_us(ktime_get(),\
1487 min((u64)(timeout_us * 10), \
1488 (u64) HL_SIM_MAX_TIMEOUT_US)); \
1489 might_sleep_if(sleep_us); \
1491 (val) = RREG32(addr); \
1494 if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \
1495 (val) = RREG32(addr); \
1499 usleep_range((sleep_us >> 2) + 1, sleep_us); \
1501 (cond) ? 0 : -ETIMEDOUT; \
1505 * address in this macro points always to a memory location in the
1506 * host's (server's) memory. That location is updated asynchronously
1507 * either by the direct access of the device or by another core.
1509 * To work both in LE and BE architectures, we need to distinguish between the
1510 * two states (device or another core updates the memory location). Therefore,
1511 * if mem_written_by_device is true, the host memory being polled will be
1512 * updated directly by the device. If false, the host memory being polled will
1513 * be updated by host CPU. Required so host knows whether or not the memory
1514 * might need to be byte-swapped before returning value to caller.
1516 #define hl_poll_timeout_memory(hdev, addr, val, cond, sleep_us, timeout_us, \
1517 mem_written_by_device) \
1519 ktime_t __timeout; \
1521 __timeout = ktime_add_us(ktime_get(), timeout_us); \
1523 __timeout = ktime_add_us(ktime_get(),\
1524 min((u64)(timeout_us * 10), \
1525 (u64) HL_SIM_MAX_TIMEOUT_US)); \
1526 might_sleep_if(sleep_us); \
1528 /* Verify we read updates done by other cores or by device */ \
1530 (val) = *((u32 *)(addr)); \
1531 if (mem_written_by_device) \
1532 (val) = le32_to_cpu(*(__le32 *) &(val)); \
1535 if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \
1536 (val) = *((u32 *)(addr)); \
1537 if (mem_written_by_device) \
1538 (val) = le32_to_cpu(*(__le32 *) &(val)); \
1542 usleep_range((sleep_us >> 2) + 1, sleep_us); \
1544 (cond) ? 0 : -ETIMEDOUT; \
1547 #define hl_poll_timeout_device_memory(hdev, addr, val, cond, sleep_us, \
1550 ktime_t __timeout; \
1552 __timeout = ktime_add_us(ktime_get(), timeout_us); \
1554 __timeout = ktime_add_us(ktime_get(),\
1555 min((u64)(timeout_us * 10), \
1556 (u64) HL_SIM_MAX_TIMEOUT_US)); \
1557 might_sleep_if(sleep_us); \
1559 (val) = readl(addr); \
1562 if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \
1563 (val) = readl(addr); \
1567 usleep_range((sleep_us >> 2) + 1, sleep_us); \
1569 (cond) ? 0 : -ETIMEDOUT; \
1572 struct hwmon_chip_info;
1575 * struct hl_device_reset_work - reset workqueue task wrapper.
1576 * @reset_work: reset work to be done.
1577 * @hdev: habanalabs device structure.
1579 struct hl_device_reset_work {
1580 struct work_struct reset_work;
1581 struct hl_device *hdev;
1585 * struct hl_device_idle_busy_ts - used for calculating device utilization rate.
1586 * @idle_to_busy_ts: timestamp where device changed from idle to busy.
1587 * @busy_to_idle_ts: timestamp where device changed from busy to idle.
1589 struct hl_device_idle_busy_ts {
1590 ktime_t idle_to_busy_ts;
1591 ktime_t busy_to_idle_ts;
1595 * struct hr_mmu_hop_addrs - used for holding per-device host-resident mmu hop
1597 * @virt_addr: the virtual address of the hop.
1598 * @phys-addr: the physical address of the hop (used by the device-mmu).
1599 * @shadow_addr: The shadow of the hop used by the driver for walking the hops.
1601 struct hr_mmu_hop_addrs {
1608 * struct hl_mmu_hr_pgt_priv - used for holding per-device mmu host-resident
1609 * page-table internal information.
1610 * @mmu_pgt_pool: pool of page tables used by MMU for allocating hops.
1611 * @mmu_shadow_hop0: shadow array of hop0 tables.
1613 struct hl_mmu_hr_priv {
1614 struct gen_pool *mmu_pgt_pool;
1615 struct hr_mmu_hop_addrs *mmu_shadow_hop0;
1619 * struct hl_mmu_dr_pgt_priv - used for holding per-device mmu device-resident
1620 * page-table internal information.
1621 * @mmu_pgt_pool: pool of page tables used by MMU for allocating hops.
1622 * @mmu_shadow_hop0: shadow array of hop0 tables.
1624 struct hl_mmu_dr_priv {
1625 struct gen_pool *mmu_pgt_pool;
1626 void *mmu_shadow_hop0;
1630 * struct hl_mmu_priv - used for holding per-device mmu internal information.
1631 * @dr: information on the device-resident MMU, when exists.
1632 * @hr: information on the host-resident MMU, when exists.
1634 struct hl_mmu_priv {
1635 struct hl_mmu_dr_priv dr;
1636 struct hl_mmu_hr_priv hr;
1640 * struct hl_mmu_funcs - Device related MMU functions.
1641 * @init: initialize the MMU module.
1642 * @fini: release the MMU module.
1643 * @ctx_init: Initialize a context for using the MMU module.
1644 * @ctx_fini: disable a ctx from using the mmu module.
1645 * @map: maps a virtual address to physical address for a context.
1646 * @unmap: unmap a virtual address of a context.
1647 * @flush: flush all writes from all cores to reach device MMU.
1648 * @swap_out: marks all mapping of the given context as swapped out.
1649 * @swap_in: marks all mapping of the given context as swapped in.
1651 struct hl_mmu_funcs {
1652 int (*init)(struct hl_device *hdev);
1653 void (*fini)(struct hl_device *hdev);
1654 int (*ctx_init)(struct hl_ctx *ctx);
1655 void (*ctx_fini)(struct hl_ctx *ctx);
1656 int (*map)(struct hl_ctx *ctx,
1657 u64 virt_addr, u64 phys_addr, u32 page_size,
1659 int (*unmap)(struct hl_ctx *ctx,
1660 u64 virt_addr, bool is_dram_addr);
1661 void (*flush)(struct hl_ctx *ctx);
1662 void (*swap_out)(struct hl_ctx *ctx);
1663 void (*swap_in)(struct hl_ctx *ctx);
1667 * struct hl_device - habanalabs device structure.
1668 * @pdev: pointer to PCI device, can be NULL in case of simulator device.
1669 * @pcie_bar_phys: array of available PCIe bars physical addresses.
1670 * (required only for PCI address match mode)
1671 * @pcie_bar: array of available PCIe bars virtual addresses.
1672 * @rmmio: configuration area address on SRAM.
1673 * @cdev: related char device.
1674 * @cdev_ctrl: char device for control operations only (INFO IOCTL)
1675 * @dev: related kernel basic device structure.
1676 * @dev_ctrl: related kernel device structure for the control device
1677 * @work_freq: delayed work to lower device frequency if possible.
1678 * @work_heartbeat: delayed work for CPU-CP is-alive check.
1679 * @asic_name: ASIC specific name.
1680 * @asic_type: ASIC specific type.
1681 * @completion_queue: array of hl_cq.
1682 * @cq_wq: work queues of completion queues for executing work in process
1684 * @eq_wq: work queue of event queue for executing work in process context.
1685 * @kernel_ctx: Kernel driver context structure.
1686 * @kernel_queues: array of hl_hw_queue.
1687 * @hw_queues_mirror_list: CS mirror list for TDR.
1688 * @hw_queues_mirror_lock: protects hw_queues_mirror_list.
1689 * @kernel_cb_mgr: command buffer manager for creating/destroying/handling CGs.
1690 * @event_queue: event queue for IRQ from CPU-CP.
1691 * @dma_pool: DMA pool for small allocations.
1692 * @cpu_accessible_dma_mem: Host <-> CPU-CP shared memory CPU address.
1693 * @cpu_accessible_dma_address: Host <-> CPU-CP shared memory DMA address.
1694 * @cpu_accessible_dma_pool: Host <-> CPU-CP shared memory pool.
1695 * @asid_bitmap: holds used/available ASIDs.
1696 * @asid_mutex: protects asid_bitmap.
1697 * @send_cpu_message_lock: enforces only one message in Host <-> CPU-CP queue.
1698 * @debug_lock: protects critical section of setting debug mode for device
1699 * @asic_prop: ASIC specific immutable properties.
1700 * @asic_funcs: ASIC specific functions.
1701 * @asic_specific: ASIC specific information to use only from ASIC files.
1702 * @vm: virtual memory manager for MMU.
1703 * @mmu_cache_lock: protects MMU cache invalidation as it can serve one context.
1704 * @hwmon_dev: H/W monitor device.
1705 * @pm_mng_profile: current power management profile.
1706 * @hl_chip_info: ASIC's sensors information.
1707 * @hl_debugfs: device's debugfs manager.
1708 * @cb_pool: list of preallocated CBs.
1709 * @cb_pool_lock: protects the CB pool.
1710 * @internal_cb_pool_virt_addr: internal command buffer pool virtual address.
1711 * @internal_cb_pool_dma_addr: internal command buffer pool dma address.
1712 * @internal_cb_pool: internal command buffer memory pool.
1713 * @internal_cb_va_base: internal cb pool mmu virtual address base
1714 * @fpriv_list: list of file private data structures. Each structure is created
1715 * when a user opens the device
1716 * @fpriv_list_lock: protects the fpriv_list
1717 * @compute_ctx: current compute context executing.
1718 * @idle_busy_ts_arr: array to hold time stamps of transitions from idle to busy
1720 * @aggregated_cs_counters: aggregated cs counters among all contexts
1721 * @mmu_priv: device-specific MMU data.
1722 * @mmu_func: device-related MMU functions.
1723 * @dram_used_mem: current DRAM memory consumption.
1724 * @timeout_jiffies: device CS timeout value.
1725 * @max_power: the max power of the device, as configured by the sysadmin. This
1726 * value is saved so in case of hard-reset, the driver will restore
1727 * this value and update the F/W after the re-initialization
1728 * @clock_gating_mask: is clock gating enabled. bitmask that represents the
1729 * different engines. See debugfs-driver-habanalabs for
1731 * @in_reset: is device in reset flow.
1732 * @curr_pll_profile: current PLL profile.
1733 * @card_type: Various ASICs have several card types. This indicates the card
1734 * type of the current device.
1735 * @cs_active_cnt: number of active command submissions on this device (active
1736 * means already in H/W queues)
1737 * @major: habanalabs kernel driver major.
1738 * @high_pll: high PLL profile frequency.
1739 * @soft_reset_cnt: number of soft reset since the driver was loaded.
1740 * @hard_reset_cnt: number of hard reset since the driver was loaded.
1741 * @idle_busy_ts_idx: index of current entry in idle_busy_ts_arr
1742 * @clk_throttling_reason: bitmask represents the current clk throttling reasons
1743 * @id: device minor.
1744 * @id_control: minor of the control device
1745 * @cpu_pci_msb_addr: 50-bit extension bits for the device CPU's 40-bit
1747 * @disabled: is device disabled.
1748 * @late_init_done: is late init stage was done during initialization.
1749 * @hwmon_initialized: is H/W monitor sensors was initialized.
1750 * @hard_reset_pending: is there a hard reset work pending.
1751 * @heartbeat: is heartbeat sanity check towards CPU-CP enabled.
1752 * @reset_on_lockup: true if a reset should be done in case of stuck CS, false
1754 * @dram_supports_virtual_memory: is MMU enabled towards DRAM.
1755 * @dram_default_page_mapping: is DRAM default page mapping enabled.
1756 * @memory_scrub: true to perform device memory scrub in various locations,
1757 * such as context-switch, context close, page free, etc.
1758 * @pmmu_huge_range: is a different virtual addresses range used for PMMU with
1760 * @init_done: is the initialization of the device done.
1761 * @device_cpu_disabled: is the device CPU disabled (due to timeouts)
1762 * @dma_mask: the dma mask that was set for this device
1763 * @in_debug: is device under debug. This, together with fpriv_list, enforces
1764 * that only a single user is configuring the debug infrastructure.
1765 * @power9_64bit_dma_enable: true to enable 64-bit DMA mask support. Relevant
1766 * only to POWER9 machines.
1767 * @cdev_sysfs_created: were char devices and sysfs nodes created.
1768 * @stop_on_err: true if engines should stop on error.
1769 * @supports_sync_stream: is sync stream supported.
1770 * @sync_stream_queue_idx: helper index for sync stream queues initialization.
1771 * @collective_mon_idx: helper index for collective initialization
1772 * @supports_coresight: is CoreSight supported.
1773 * @supports_soft_reset: is soft reset supported.
1774 * @supports_cb_mapping: is mapping a CB to the device's MMU supported.
1777 struct pci_dev *pdev;
1778 u64 pcie_bar_phys[HL_PCI_NUM_BARS];
1779 void __iomem *pcie_bar[HL_PCI_NUM_BARS];
1780 void __iomem *rmmio;
1782 struct cdev cdev_ctrl;
1784 struct device *dev_ctrl;
1785 struct delayed_work work_freq;
1786 struct delayed_work work_heartbeat;
1788 enum hl_asic_type asic_type;
1789 struct hl_cq *completion_queue;
1790 struct workqueue_struct **cq_wq;
1791 struct workqueue_struct *eq_wq;
1792 struct hl_ctx *kernel_ctx;
1793 struct hl_hw_queue *kernel_queues;
1794 struct list_head hw_queues_mirror_list;
1795 spinlock_t hw_queues_mirror_lock;
1796 struct hl_cb_mgr kernel_cb_mgr;
1797 struct hl_eq event_queue;
1798 struct dma_pool *dma_pool;
1799 void *cpu_accessible_dma_mem;
1800 dma_addr_t cpu_accessible_dma_address;
1801 struct gen_pool *cpu_accessible_dma_pool;
1802 unsigned long *asid_bitmap;
1803 struct mutex asid_mutex;
1804 struct mutex send_cpu_message_lock;
1805 struct mutex debug_lock;
1806 struct asic_fixed_properties asic_prop;
1807 const struct hl_asic_funcs *asic_funcs;
1808 void *asic_specific;
1810 struct mutex mmu_cache_lock;
1811 struct device *hwmon_dev;
1812 enum hl_pm_mng_profile pm_mng_profile;
1813 struct hwmon_chip_info *hl_chip_info;
1815 struct hl_dbg_device_entry hl_debugfs;
1817 struct list_head cb_pool;
1818 spinlock_t cb_pool_lock;
1820 void *internal_cb_pool_virt_addr;
1821 dma_addr_t internal_cb_pool_dma_addr;
1822 struct gen_pool *internal_cb_pool;
1823 u64 internal_cb_va_base;
1825 struct list_head fpriv_list;
1826 struct mutex fpriv_list_lock;
1828 struct hl_ctx *compute_ctx;
1830 struct hl_device_idle_busy_ts *idle_busy_ts_arr;
1832 struct hl_cs_counters_atomic aggregated_cs_counters;
1834 struct hl_mmu_priv mmu_priv;
1835 struct hl_mmu_funcs mmu_func[MMU_NUM_PGT_LOCATIONS];
1837 atomic64_t dram_used_mem;
1838 u64 timeout_jiffies;
1840 u64 clock_gating_mask;
1842 enum hl_pll_frequency curr_pll_profile;
1843 enum cpucp_card_types card_type;
1849 u32 idle_busy_ts_idx;
1850 u32 clk_throttling_reason;
1853 u16 cpu_pci_msb_addr;
1856 u8 hwmon_initialized;
1857 u8 hard_reset_pending;
1860 u8 dram_supports_virtual_memory;
1861 u8 dram_default_page_mapping;
1865 u8 device_cpu_disabled;
1868 u8 power9_64bit_dma_enable;
1869 u8 cdev_sysfs_created;
1871 u8 supports_sync_stream;
1872 u8 sync_stream_queue_idx;
1873 u8 collective_mon_idx;
1874 u8 supports_coresight;
1875 u8 supports_soft_reset;
1876 u8 supports_cb_mapping;
1878 /* Parameters for bring-up */
1882 u8 mmu_huge_page_opt;
1885 u8 cpu_queues_enable;
1888 u8 sram_scrambler_enable;
1889 u8 dram_scrambler_enable;
1890 u8 hard_reset_on_fw_events;
1901 * typedef hl_ioctl_t - typedef for ioctl function in the driver
1902 * @hpriv: pointer to the FD's private data, which contains state of
1904 * @data: pointer to the input/output arguments structure of the IOCTL
1906 * Return: 0 for success, negative value for error
1908 typedef int hl_ioctl_t(struct hl_fpriv *hpriv, void *data);
1911 * struct hl_ioctl_desc - describes an IOCTL entry of the driver.
1912 * @cmd: the IOCTL code as created by the kernel macros.
1913 * @func: pointer to the driver's function that should be called for this IOCTL.
1915 struct hl_ioctl_desc {
1922 * Kernel module functions that can be accessed by entire module
1926 * hl_mem_area_inside_range() - Checks whether address+size are inside a range.
1927 * @address: The start address of the area we want to validate.
1928 * @size: The size in bytes of the area we want to validate.
1929 * @range_start_address: The start address of the valid range.
1930 * @range_end_address: The end address of the valid range.
1932 * Return: true if the area is inside the valid range, false otherwise.
1934 static inline bool hl_mem_area_inside_range(u64 address, u64 size,
1935 u64 range_start_address, u64 range_end_address)
1937 u64 end_address = address + size;
1939 if ((address >= range_start_address) &&
1940 (end_address <= range_end_address) &&
1941 (end_address > address))
1948 * hl_mem_area_crosses_range() - Checks whether address+size crossing a range.
1949 * @address: The start address of the area we want to validate.
1950 * @size: The size in bytes of the area we want to validate.
1951 * @range_start_address: The start address of the valid range.
1952 * @range_end_address: The end address of the valid range.
1954 * Return: true if the area overlaps part or all of the valid range,
1957 static inline bool hl_mem_area_crosses_range(u64 address, u32 size,
1958 u64 range_start_address, u64 range_end_address)
1960 u64 end_address = address + size;
1962 if ((address >= range_start_address) &&
1963 (address < range_end_address))
1966 if ((end_address >= range_start_address) &&
1967 (end_address < range_end_address))
1970 if ((address < range_start_address) &&
1971 (end_address >= range_end_address))
1977 int hl_device_open(struct inode *inode, struct file *filp);
1978 int hl_device_open_ctrl(struct inode *inode, struct file *filp);
1979 bool hl_device_disabled_or_in_reset(struct hl_device *hdev);
1980 enum hl_device_status hl_device_status(struct hl_device *hdev);
1981 int hl_device_set_debug_mode(struct hl_device *hdev, bool enable);
1982 int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
1983 enum hl_asic_type asic_type, int minor);
1984 void destroy_hdev(struct hl_device *hdev);
1985 int hl_hw_queues_create(struct hl_device *hdev);
1986 void hl_hw_queues_destroy(struct hl_device *hdev);
1987 int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
1988 u32 cb_size, u64 cb_ptr);
1989 int hl_hw_queue_schedule_cs(struct hl_cs *cs);
1990 u32 hl_hw_queue_add_ptr(u32 ptr, u16 val);
1991 void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id);
1992 void hl_int_hw_queue_update_ci(struct hl_cs *cs);
1993 void hl_hw_queue_reset(struct hl_device *hdev, bool hard_reset);
1995 #define hl_queue_inc_ptr(p) hl_hw_queue_add_ptr(p, 1)
1996 #define hl_pi_2_offset(pi) ((pi) & (HL_QUEUE_LENGTH - 1))
1998 int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id);
1999 void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q);
2000 int hl_eq_init(struct hl_device *hdev, struct hl_eq *q);
2001 void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q);
2002 void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q);
2003 void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q);
2004 irqreturn_t hl_irq_handler_cq(int irq, void *arg);
2005 irqreturn_t hl_irq_handler_eq(int irq, void *arg);
2006 u32 hl_cq_inc_ptr(u32 ptr);
2008 int hl_asid_init(struct hl_device *hdev);
2009 void hl_asid_fini(struct hl_device *hdev);
2010 unsigned long hl_asid_alloc(struct hl_device *hdev);
2011 void hl_asid_free(struct hl_device *hdev, unsigned long asid);
2013 int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv);
2014 void hl_ctx_free(struct hl_device *hdev, struct hl_ctx *ctx);
2015 int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx);
2016 void hl_ctx_do_release(struct kref *ref);
2017 void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx);
2018 int hl_ctx_put(struct hl_ctx *ctx);
2019 struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq);
2020 void hl_ctx_mgr_init(struct hl_ctx_mgr *mgr);
2021 void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *mgr);
2023 int hl_device_init(struct hl_device *hdev, struct class *hclass);
2024 void hl_device_fini(struct hl_device *hdev);
2025 int hl_device_suspend(struct hl_device *hdev);
2026 int hl_device_resume(struct hl_device *hdev);
2027 int hl_device_reset(struct hl_device *hdev, bool hard_reset,
2028 bool from_hard_reset_thread);
2029 void hl_hpriv_get(struct hl_fpriv *hpriv);
2030 void hl_hpriv_put(struct hl_fpriv *hpriv);
2031 int hl_device_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq);
2032 uint32_t hl_device_utilization(struct hl_device *hdev, uint32_t period_ms);
2034 int hl_build_hwmon_channel_info(struct hl_device *hdev,
2035 struct cpucp_sensor *sensors_arr);
2037 int hl_sysfs_init(struct hl_device *hdev);
2038 void hl_sysfs_fini(struct hl_device *hdev);
2040 int hl_hwmon_init(struct hl_device *hdev);
2041 void hl_hwmon_fini(struct hl_device *hdev);
2043 int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
2044 struct hl_ctx *ctx, u32 cb_size, bool internal_cb,
2045 bool map_cb, u64 *handle);
2046 int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle);
2047 int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma);
2048 struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr,
2050 void hl_cb_put(struct hl_cb *cb);
2051 void hl_cb_mgr_init(struct hl_cb_mgr *mgr);
2052 void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr);
2053 struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
2055 int hl_cb_pool_init(struct hl_device *hdev);
2056 int hl_cb_pool_fini(struct hl_device *hdev);
2057 int hl_cb_va_pool_init(struct hl_ctx *ctx);
2058 void hl_cb_va_pool_fini(struct hl_ctx *ctx);
2060 void hl_cs_rollback_all(struct hl_device *hdev);
2061 struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
2062 enum hl_queue_type queue_type, bool is_kernel_allocated_cb);
2063 void hl_sob_reset_error(struct kref *ref);
2064 int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask);
2065 void hl_fence_put(struct hl_fence *fence);
2066 void hl_fence_get(struct hl_fence *fence);
2067 void cs_get(struct hl_cs *cs);
2069 void goya_set_asic_funcs(struct hl_device *hdev);
2070 void gaudi_set_asic_funcs(struct hl_device *hdev);
2072 int hl_vm_ctx_init(struct hl_ctx *ctx);
2073 void hl_vm_ctx_fini(struct hl_ctx *ctx);
2075 int hl_vm_init(struct hl_device *hdev);
2076 void hl_vm_fini(struct hl_device *hdev);
2078 int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
2079 struct hl_userptr *userptr);
2080 void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr);
2081 void hl_userptr_delete_list(struct hl_device *hdev,
2082 struct list_head *userptr_list);
2083 bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr, u32 size,
2084 struct list_head *userptr_list,
2085 struct hl_userptr **userptr);
2087 int hl_mmu_init(struct hl_device *hdev);
2088 void hl_mmu_fini(struct hl_device *hdev);
2089 int hl_mmu_ctx_init(struct hl_ctx *ctx);
2090 void hl_mmu_ctx_fini(struct hl_ctx *ctx);
2091 int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
2092 u32 page_size, bool flush_pte);
2093 int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
2095 void hl_mmu_swap_out(struct hl_ctx *ctx);
2096 void hl_mmu_swap_in(struct hl_ctx *ctx);
2097 int hl_mmu_if_set_funcs(struct hl_device *hdev);
2098 void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu);
2100 int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
2101 void __iomem *dst, u32 src_offset, u32 size);
2102 int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode);
2103 int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
2104 u16 len, u32 timeout, long *result);
2105 int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type);
2106 int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr,
2107 size_t irq_arr_size);
2108 int hl_fw_test_cpu_queue(struct hl_device *hdev);
2109 void *hl_fw_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
2110 dma_addr_t *dma_handle);
2111 void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
2113 int hl_fw_send_heartbeat(struct hl_device *hdev);
2114 int hl_fw_cpucp_info_get(struct hl_device *hdev);
2115 int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size);
2116 int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
2117 struct hl_info_pci_counters *counters);
2118 int hl_fw_cpucp_total_energy_get(struct hl_device *hdev,
2120 int hl_fw_cpucp_pll_info_get(struct hl_device *hdev,
2121 enum cpucp_pll_type_attributes pll_type,
2122 enum cpucp_pll_reg_attributes pll_reg,
2124 int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
2125 u32 msg_to_cpu_reg, u32 cpu_msg_status_reg,
2126 u32 cpu_security_boot_status_reg, u32 boot_err0_reg,
2127 bool skip_bmc, u32 cpu_timeout, u32 boot_fit_timeout);
2128 int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg,
2129 u32 cpu_security_boot_status_reg, u32 boot_err0_reg,
2132 int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3],
2134 int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data);
2135 int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region,
2136 struct hl_inbound_pci_region *pci_region);
2137 int hl_pci_set_outbound_region(struct hl_device *hdev,
2138 struct hl_outbound_pci_region *pci_region);
2139 int hl_pci_init(struct hl_device *hdev, u32 cpu_boot_status_reg,
2140 u32 cpu_security_boot_status_reg, u32 boot_err0_reg,
2141 u32 preboot_ver_timeout);
2142 void hl_pci_fini(struct hl_device *hdev);
2144 long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr);
2145 void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq);
2146 int hl_get_temperature(struct hl_device *hdev,
2147 int sensor_index, u32 attr, long *value);
2148 int hl_set_temperature(struct hl_device *hdev,
2149 int sensor_index, u32 attr, long value);
2150 int hl_get_voltage(struct hl_device *hdev,
2151 int sensor_index, u32 attr, long *value);
2152 int hl_get_current(struct hl_device *hdev,
2153 int sensor_index, u32 attr, long *value);
2154 int hl_get_fan_speed(struct hl_device *hdev,
2155 int sensor_index, u32 attr, long *value);
2156 int hl_get_pwm_info(struct hl_device *hdev,
2157 int sensor_index, u32 attr, long *value);
2158 void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
2160 u64 hl_get_max_power(struct hl_device *hdev);
2161 void hl_set_max_power(struct hl_device *hdev);
2162 int hl_set_voltage(struct hl_device *hdev,
2163 int sensor_index, u32 attr, long value);
2164 int hl_set_current(struct hl_device *hdev,
2165 int sensor_index, u32 attr, long value);
2167 #ifdef CONFIG_DEBUG_FS
2169 void hl_debugfs_init(void);
2170 void hl_debugfs_fini(void);
2171 void hl_debugfs_add_device(struct hl_device *hdev);
2172 void hl_debugfs_remove_device(struct hl_device *hdev);
2173 void hl_debugfs_add_file(struct hl_fpriv *hpriv);
2174 void hl_debugfs_remove_file(struct hl_fpriv *hpriv);
2175 void hl_debugfs_add_cb(struct hl_cb *cb);
2176 void hl_debugfs_remove_cb(struct hl_cb *cb);
2177 void hl_debugfs_add_cs(struct hl_cs *cs);
2178 void hl_debugfs_remove_cs(struct hl_cs *cs);
2179 void hl_debugfs_add_job(struct hl_device *hdev, struct hl_cs_job *job);
2180 void hl_debugfs_remove_job(struct hl_device *hdev, struct hl_cs_job *job);
2181 void hl_debugfs_add_userptr(struct hl_device *hdev, struct hl_userptr *userptr);
2182 void hl_debugfs_remove_userptr(struct hl_device *hdev,
2183 struct hl_userptr *userptr);
2184 void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx);
2185 void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx);
2189 static inline void __init hl_debugfs_init(void)
2193 static inline void hl_debugfs_fini(void)
2197 static inline void hl_debugfs_add_device(struct hl_device *hdev)
2201 static inline void hl_debugfs_remove_device(struct hl_device *hdev)
2205 static inline void hl_debugfs_add_file(struct hl_fpriv *hpriv)
2209 static inline void hl_debugfs_remove_file(struct hl_fpriv *hpriv)
2213 static inline void hl_debugfs_add_cb(struct hl_cb *cb)
2217 static inline void hl_debugfs_remove_cb(struct hl_cb *cb)
2221 static inline void hl_debugfs_add_cs(struct hl_cs *cs)
2225 static inline void hl_debugfs_remove_cs(struct hl_cs *cs)
2229 static inline void hl_debugfs_add_job(struct hl_device *hdev,
2230 struct hl_cs_job *job)
2234 static inline void hl_debugfs_remove_job(struct hl_device *hdev,
2235 struct hl_cs_job *job)
2239 static inline void hl_debugfs_add_userptr(struct hl_device *hdev,
2240 struct hl_userptr *userptr)
2244 static inline void hl_debugfs_remove_userptr(struct hl_device *hdev,
2245 struct hl_userptr *userptr)
2249 static inline void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev,
2254 static inline void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev,
2262 long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
2263 long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg);
2264 int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data);
2265 int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data);
2266 int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data);
2267 int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data);
2269 #endif /* HABANALABSP_H_ */