1 /* SPDX-License-Identifier: GPL-2.0
3 * Copyright 2016-2019 HabanaLabs, Ltd.
11 #include "include/armcp_if.h"
12 #include "include/qman_if.h"
14 #include <linux/cdev.h>
15 #include <linux/iopoll.h>
16 #include <linux/irqreturn.h>
17 #include <linux/dma-fence.h>
18 #include <linux/dma-direction.h>
19 #include <linux/scatterlist.h>
20 #include <linux/hashtable.h>
22 #define HL_NAME "habanalabs"
24 #define HL_MMAP_CB_MASK (0x8000000000000000ull >> PAGE_SHIFT)
26 #define HL_PENDING_RESET_PER_SEC 5
28 #define HL_DEVICE_TIMEOUT_USEC 1000000 /* 1 s */
30 #define HL_HEARTBEAT_PER_USEC 5000000 /* 5 s */
32 #define HL_PLL_LOW_JOB_FREQ_USEC 5000000 /* 5 s */
34 #define HL_ARMCP_INFO_TIMEOUT_USEC 10000000 /* 10s */
35 #define HL_ARMCP_EEPROM_TIMEOUT_USEC 10000000 /* 10s */
37 #define HL_MAX_QUEUES 128
39 #define HL_MAX_JOBS_PER_CS 64
41 /* MUST BE POWER OF 2 and larger than 1 */
42 #define HL_MAX_PENDING_CS 64
45 #define MEM_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
48 #define MMU_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
51 * struct pgt_info - MMU hop page info.
52 * @node: hash linked-list node for the pgts shadow hash of pgts.
53 * @phys_addr: physical address of the pgt.
54 * @shadow_addr: shadow hop in the host.
55 * @ctx: pointer to the owner ctx.
56 * @num_of_ptes: indicates how many ptes are used in the pgt.
58 * The MMU page tables hierarchy is placed on the DRAM. When a new level (hop)
59 * is needed during mapping, a new page is allocated and this structure holds
60 * its essential information. During unmapping, if no valid PTEs remained in the
61 * page, it is freed with its pgt_info structure.
64 struct hlist_node node;
75 * enum hl_queue_type - Supported QUEUE types.
76 * @QUEUE_TYPE_NA: queue is not available.
77 * @QUEUE_TYPE_EXT: external queue which is a DMA channel that may access the
79 * @QUEUE_TYPE_INT: internal queue that performs DMA inside the device's
80 * memories and/or operates the compute engines.
81 * @QUEUE_TYPE_CPU: S/W queue for communication with the device's CPU.
91 * struct hw_queue_properties - queue information.
93 * @kmd_only: true if only KMD is allowed to send a job to this queue, false
96 struct hw_queue_properties {
97 enum hl_queue_type type;
102 * enum vm_type_t - virtual memory mapping request information.
103 * @VM_TYPE_USERPTR: mapping of user memory to device virtual address.
104 * @VM_TYPE_PHYS_PACK: mapping of DRAM memory to device virtual address.
112 * enum hl_device_hw_state - H/W device state. use this to understand whether
113 * to do reset before hw_init or not
114 * @HL_DEVICE_HW_STATE_CLEAN: H/W state is clean. i.e. after hard reset
115 * @HL_DEVICE_HW_STATE_DIRTY: H/W state is dirty. i.e. we started to execute
118 enum hl_device_hw_state {
119 HL_DEVICE_HW_STATE_CLEAN = 0,
120 HL_DEVICE_HW_STATE_DIRTY
124 * struct asic_fixed_properties - ASIC specific immutable properties.
125 * @hw_queues_props: H/W queues properties.
126 * @armcp_info: received various information from ArmCP regarding the H/W. e.g.
128 * @uboot_ver: F/W U-boot version.
129 * @preboot_ver: F/W Preboot version.
130 * @sram_base_address: SRAM physical start address.
131 * @sram_end_address: SRAM physical end address.
132 * @sram_user_base_address - SRAM physical start address for user access.
133 * @dram_base_address: DRAM physical start address.
134 * @dram_end_address: DRAM physical end address.
135 * @dram_user_base_address: DRAM physical start address for user access.
136 * @dram_size: DRAM total size.
137 * @dram_pci_bar_size: size of PCI bar towards DRAM.
138 * @host_phys_base_address: base physical address of host memory for
139 * transactions that the device generates.
140 * @max_power_default: max power of the device after reset
141 * @va_space_host_start_address: base address of virtual memory range for
142 * mapping host memory.
143 * @va_space_host_end_address: end address of virtual memory range for
144 * mapping host memory.
145 * @va_space_dram_start_address: base address of virtual memory range for
146 * mapping DRAM memory.
147 * @va_space_dram_end_address: end address of virtual memory range for
148 * mapping DRAM memory.
149 * @dram_size_for_default_page_mapping: DRAM size needed to map to avoid page
151 * @pcie_dbi_base_address: Base address of the PCIE_DBI block.
152 * @pcie_aux_dbi_reg_addr: Address of the PCIE_AUX DBI register.
153 * @mmu_pgt_addr: base physical address in DRAM of MMU page tables.
154 * @mmu_dram_default_page_addr: DRAM default page physical address.
155 * @mmu_pgt_size: MMU page tables total size.
156 * @mmu_pte_size: PTE size in MMU page tables.
157 * @mmu_hop_table_size: MMU hop table size.
158 * @mmu_hop0_tables_total_size: total size of MMU hop0 tables.
159 * @dram_page_size: page size for MMU DRAM allocation.
160 * @cfg_size: configuration space size on SRAM.
161 * @sram_size: total size of SRAM.
162 * @max_asid: maximum number of open contexts (ASIDs).
163 * @num_of_events: number of possible internal H/W IRQs.
164 * @psoc_pci_pll_nr: PCI PLL NR value.
165 * @psoc_pci_pll_nf: PCI PLL NF value.
166 * @psoc_pci_pll_od: PCI PLL OD value.
167 * @psoc_pci_pll_div_factor: PCI PLL DIV FACTOR 1 value.
168 * @completion_queues_count: number of completion queues.
169 * @high_pll: high PLL frequency used by the device.
170 * @cb_pool_cb_cnt: number of CBs in the CB pool.
171 * @cb_pool_cb_size: size of each CB in the CB pool.
172 * @tpc_enabled_mask: which TPCs are enabled.
174 struct asic_fixed_properties {
175 struct hw_queue_properties hw_queues_props[HL_MAX_QUEUES];
176 struct armcp_info armcp_info;
177 char uboot_ver[VERSION_MAX_LEN];
178 char preboot_ver[VERSION_MAX_LEN];
179 u64 sram_base_address;
180 u64 sram_end_address;
181 u64 sram_user_base_address;
182 u64 dram_base_address;
183 u64 dram_end_address;
184 u64 dram_user_base_address;
186 u64 dram_pci_bar_size;
187 u64 host_phys_base_address;
188 u64 max_power_default;
189 u64 va_space_host_start_address;
190 u64 va_space_host_end_address;
191 u64 va_space_dram_start_address;
192 u64 va_space_dram_end_address;
193 u64 dram_size_for_default_page_mapping;
194 u64 pcie_dbi_base_address;
195 u64 pcie_aux_dbi_reg_addr;
197 u64 mmu_dram_default_page_addr;
200 u32 mmu_hop_table_size;
201 u32 mmu_hop0_tables_total_size;
210 u32 psoc_pci_pll_div_factor;
214 u8 completion_queues_count;
219 * struct hl_dma_fence - wrapper for fence object used by command submissions.
220 * @base_fence: kernel fence object.
221 * @lock: spinlock to protect fence.
222 * @hdev: habanalabs device structure.
223 * @cs_seq: command submission sequence number.
225 struct hl_dma_fence {
226 struct dma_fence base_fence;
228 struct hl_device *hdev;
236 #define HL_MAX_CB_SIZE 0x200000 /* 2MB */
239 * struct hl_cb_mgr - describes a Command Buffer Manager.
240 * @cb_lock: protects cb_handles.
241 * @cb_handles: an idr to hold all command buffer handles.
245 struct idr cb_handles; /* protected by cb_lock */
249 * struct hl_cb - describes a Command Buffer.
250 * @refcount: reference counter for usage of the CB.
251 * @hdev: pointer to device this CB belongs to.
252 * @lock: spinlock to protect mmap/cs flows.
253 * @debugfs_list: node in debugfs list of command buffers.
254 * @pool_list: node in pool list of command buffers.
255 * @kernel_address: Holds the CB's kernel virtual address.
256 * @bus_address: Holds the CB's DMA address.
257 * @mmap_size: Holds the CB's size that was mmaped.
258 * @size: holds the CB's size.
260 * @cs_cnt: holds number of CS that this CB participates in.
261 * @ctx_id: holds the ID of the owner's context.
262 * @mmap: true if the CB is currently mmaped to user.
263 * @is_pool: true if CB was acquired from the pool, false otherwise.
266 struct kref refcount;
267 struct hl_device *hdev;
269 struct list_head debugfs_list;
270 struct list_head pool_list;
272 dma_addr_t bus_address;
290 * Currently, there are two limitations on the maximum length of a queue:
292 * 1. The memory footprint of the queue. The current allocated space for the
293 * queue is PAGE_SIZE. Because each entry in the queue is HL_BD_SIZE,
294 * the maximum length of the queue can be PAGE_SIZE / HL_BD_SIZE,
295 * which currently is 4096/16 = 256 entries.
297 * To increase that, we need either to decrease the size of the
298 * BD (difficult), or allocate more than a single page (easier).
300 * 2. Because the size of the JOB handle field in the BD CTL / completion queue
301 * is 10-bit, we can have up to 1024 open jobs per hardware queue.
302 * Therefore, each queue can hold up to 1024 entries.
304 * HL_QUEUE_LENGTH is in units of struct hl_bd.
305 * HL_QUEUE_LENGTH * sizeof(struct hl_bd) should be <= HL_PAGE_SIZE
308 #define HL_PAGE_SIZE 4096 /* minimum page size */
309 /* Must be power of 2 (HL_PAGE_SIZE / HL_BD_SIZE) */
310 #define HL_QUEUE_LENGTH 256
311 #define HL_QUEUE_SIZE_IN_BYTES (HL_QUEUE_LENGTH * HL_BD_SIZE)
314 * HL_CQ_LENGTH is in units of struct hl_cq_entry.
315 * HL_CQ_LENGTH should be <= HL_PAGE_SIZE
317 #define HL_CQ_LENGTH HL_QUEUE_LENGTH
318 #define HL_CQ_SIZE_IN_BYTES (HL_CQ_LENGTH * HL_CQ_ENTRY_SIZE)
320 /* Must be power of 2 (HL_PAGE_SIZE / HL_EQ_ENTRY_SIZE) */
321 #define HL_EQ_LENGTH 64
322 #define HL_EQ_SIZE_IN_BYTES (HL_EQ_LENGTH * HL_EQ_ENTRY_SIZE)
324 #define HL_CPU_PKT_SHIFT 5
325 #define HL_CPU_PKT_SIZE (1 << HL_CPU_PKT_SHIFT)
326 #define HL_CPU_PKT_MASK (~((1 << HL_CPU_PKT_SHIFT) - 1))
327 #define HL_CPU_MAX_PKTS_IN_CB 32
328 #define HL_CPU_CB_SIZE (HL_CPU_PKT_SIZE * \
329 HL_CPU_MAX_PKTS_IN_CB)
330 #define HL_CPU_CB_QUEUE_SIZE (HL_QUEUE_LENGTH * HL_CPU_CB_SIZE)
332 /* KMD <-> ArmCP shared memory size (EQ + PQ + CPU CB queue) */
333 #define HL_CPU_ACCESSIBLE_MEM_SIZE (HL_EQ_SIZE_IN_BYTES + \
334 HL_QUEUE_SIZE_IN_BYTES + \
335 HL_CPU_CB_QUEUE_SIZE)
338 * struct hl_hw_queue - describes a H/W transport queue.
339 * @shadow_queue: pointer to a shadow queue that holds pointers to jobs.
340 * @queue_type: type of queue.
341 * @kernel_address: holds the queue's kernel virtual address.
342 * @bus_address: holds the queue's DMA address.
343 * @pi: holds the queue's pi value.
344 * @ci: holds the queue's ci value, AS CALCULATED BY THE DRIVER (not real ci).
345 * @hw_queue_id: the id of the H/W queue.
346 * @int_queue_len: length of internal queue (number of entries).
347 * @valid: is the queue valid (we have array of 32 queues, not all of them
351 struct hl_cs_job **shadow_queue;
352 enum hl_queue_type queue_type;
354 dma_addr_t bus_address;
363 * struct hl_cq - describes a completion queue
364 * @hdev: pointer to the device structure
365 * @kernel_address: holds the queue's kernel virtual address
366 * @bus_address: holds the queue's DMA address
367 * @hw_queue_id: the id of the matching H/W queue
368 * @ci: ci inside the queue
369 * @pi: pi inside the queue
370 * @free_slots_cnt: counter of free slots in queue
373 struct hl_device *hdev;
375 dma_addr_t bus_address;
379 atomic_t free_slots_cnt;
383 * struct hl_eq - describes the event queue (single one per device)
384 * @hdev: pointer to the device structure
385 * @kernel_address: holds the queue's kernel virtual address
386 * @bus_address: holds the queue's DMA address
387 * @ci: ci inside the queue
390 struct hl_device *hdev;
392 dma_addr_t bus_address;
402 * enum hl_asic_type - supported ASIC types.
403 * @ASIC_INVALID: Invalid ASIC type.
404 * @ASIC_GOYA: Goya device.
414 * enum hl_pm_mng_profile - power management profile.
415 * @PM_AUTO: internal clock is set by KMD.
416 * @PM_MANUAL: internal clock is set by the user.
417 * @PM_LAST: last power management type.
419 enum hl_pm_mng_profile {
426 * enum hl_pll_frequency - PLL frequency.
427 * @PLL_HIGH: high frequency.
428 * @PLL_LOW: low frequency.
429 * @PLL_LAST: last frequency values that were configured by the user.
431 enum hl_pll_frequency {
438 * struct hl_asic_funcs - ASIC specific functions that are can be called from
440 * @early_init: sets up early driver state (pre sw_init), doesn't configure H/W.
441 * @early_fini: tears down what was done in early_init.
442 * @late_init: sets up late driver/hw state (post hw_init) - Optional.
443 * @late_fini: tears down what was done in late_init (pre hw_fini) - Optional.
444 * @sw_init: sets up driver state, does not configure H/W.
445 * @sw_fini: tears down driver state, does not configure H/W.
446 * @hw_init: sets up the H/W state.
447 * @hw_fini: tears down the H/W state.
448 * @halt_engines: halt engines, needed for reset sequence. This also disables
449 * interrupts from the device. Should be called before
450 * hw_fini and before CS rollback.
451 * @suspend: handles IP specific H/W or SW changes for suspend.
452 * @resume: handles IP specific H/W or SW changes for resume.
453 * @cb_mmap: maps a CB.
454 * @ring_doorbell: increment PI on a given QMAN.
455 * @flush_pq_write: flush PQ entry write if necessary, WARN if flushing failed.
456 * @dma_alloc_coherent: Allocate coherent DMA memory by calling
457 * dma_alloc_coherent(). This is ASIC function because its
458 * implementation is not trivial when the driver is loaded
459 * in simulation mode (not upstreamed).
460 * @dma_free_coherent: Free coherent DMA memory by calling dma_free_coherent().
461 * This is ASIC function because its implementation is not
462 * trivial when the driver is loaded in simulation mode
464 * @get_int_queue_base: get the internal queue base address.
465 * @test_queues: run simple test on all queues for sanity check.
466 * @dma_pool_zalloc: small DMA allocation of coherent memory from DMA pool.
467 * size of allocation is HL_DMA_POOL_BLK_SIZE.
468 * @dma_pool_free: free small DMA allocation from pool.
469 * @cpu_accessible_dma_pool_alloc: allocate CPU PQ packet from DMA pool.
470 * @cpu_accessible_dma_pool_free: free CPU PQ packet from DMA pool.
471 * @hl_dma_unmap_sg: DMA unmap scatter-gather list.
472 * @cs_parser: parse Command Submission.
473 * @asic_dma_map_sg: DMA map scatter-gather list.
474 * @get_dma_desc_list_size: get number of LIN_DMA packets required for CB.
475 * @add_end_of_cb_packets: Add packets to the end of CB, if device requires it.
476 * @update_eq_ci: update event queue CI.
477 * @context_switch: called upon ASID context switch.
478 * @restore_phase_topology: clear all SOBs amd MONs.
479 * @debugfs_read32: debug interface for reading u32 from DRAM/SRAM.
480 * @debugfs_write32: debug interface for writing u32 to DRAM/SRAM.
481 * @add_device_attr: add ASIC specific device attributes.
482 * @handle_eqe: handle event queue entry (IRQ) from ArmCP.
483 * @set_pll_profile: change PLL profile (manual/automatic).
484 * @get_events_stat: retrieve event queue entries histogram.
485 * @read_pte: read MMU page table entry from DRAM.
486 * @write_pte: write MMU page table entry to DRAM.
487 * @mmu_invalidate_cache: flush MMU STLB cache, either with soft (L1 only) or
488 * hard (L0 & L1) flush.
489 * @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with
491 * @send_heartbeat: send is-alive packet to ArmCP and verify response.
492 * @debug_coresight: perform certain actions on Coresight for debugging.
493 * @is_device_idle: return true if device is idle, false otherwise.
494 * @soft_reset_late_init: perform certain actions needed after soft reset.
495 * @hw_queues_lock: acquire H/W queues lock.
496 * @hw_queues_unlock: release H/W queues lock.
497 * @get_pci_id: retrieve PCI ID.
498 * @get_eeprom_data: retrieve EEPROM data from F/W.
499 * @send_cpu_message: send buffer to ArmCP.
500 * @get_hw_state: retrieve the H/W state
501 * @pci_bars_map: Map PCI BARs.
502 * @set_dram_bar_base: Set DRAM BAR to map specific device address. Returns
503 * old address the bar pointed to or U64_MAX for failure
504 * @init_iatu: Initialize the iATU unit inside the PCI controller.
505 * @rreg: Read a register. Needed for simulator support.
506 * @wreg: Write a register. Needed for simulator support.
508 struct hl_asic_funcs {
509 int (*early_init)(struct hl_device *hdev);
510 int (*early_fini)(struct hl_device *hdev);
511 int (*late_init)(struct hl_device *hdev);
512 void (*late_fini)(struct hl_device *hdev);
513 int (*sw_init)(struct hl_device *hdev);
514 int (*sw_fini)(struct hl_device *hdev);
515 int (*hw_init)(struct hl_device *hdev);
516 void (*hw_fini)(struct hl_device *hdev, bool hard_reset);
517 void (*halt_engines)(struct hl_device *hdev, bool hard_reset);
518 int (*suspend)(struct hl_device *hdev);
519 int (*resume)(struct hl_device *hdev);
520 int (*cb_mmap)(struct hl_device *hdev, struct vm_area_struct *vma,
521 u64 kaddress, phys_addr_t paddress, u32 size);
522 void (*ring_doorbell)(struct hl_device *hdev, u32 hw_queue_id, u32 pi);
523 void (*flush_pq_write)(struct hl_device *hdev, u64 *pq, u64 exp_val);
524 void* (*dma_alloc_coherent)(struct hl_device *hdev, size_t size,
525 dma_addr_t *dma_handle, gfp_t flag);
526 void (*dma_free_coherent)(struct hl_device *hdev, size_t size,
527 void *cpu_addr, dma_addr_t dma_handle);
528 void* (*get_int_queue_base)(struct hl_device *hdev, u32 queue_id,
529 dma_addr_t *dma_handle, u16 *queue_len);
530 int (*test_queues)(struct hl_device *hdev);
531 void* (*dma_pool_zalloc)(struct hl_device *hdev, size_t size,
532 gfp_t mem_flags, dma_addr_t *dma_handle);
533 void (*dma_pool_free)(struct hl_device *hdev, void *vaddr,
534 dma_addr_t dma_addr);
535 void* (*cpu_accessible_dma_pool_alloc)(struct hl_device *hdev,
536 size_t size, dma_addr_t *dma_handle);
537 void (*cpu_accessible_dma_pool_free)(struct hl_device *hdev,
538 size_t size, void *vaddr);
539 void (*hl_dma_unmap_sg)(struct hl_device *hdev,
540 struct scatterlist *sg, int nents,
541 enum dma_data_direction dir);
542 int (*cs_parser)(struct hl_device *hdev, struct hl_cs_parser *parser);
543 int (*asic_dma_map_sg)(struct hl_device *hdev,
544 struct scatterlist *sg, int nents,
545 enum dma_data_direction dir);
546 u32 (*get_dma_desc_list_size)(struct hl_device *hdev,
547 struct sg_table *sgt);
548 void (*add_end_of_cb_packets)(u64 kernel_address, u32 len, u64 cq_addr,
549 u32 cq_val, u32 msix_num);
550 void (*update_eq_ci)(struct hl_device *hdev, u32 val);
551 int (*context_switch)(struct hl_device *hdev, u32 asid);
552 void (*restore_phase_topology)(struct hl_device *hdev);
553 int (*debugfs_read32)(struct hl_device *hdev, u64 addr, u32 *val);
554 int (*debugfs_write32)(struct hl_device *hdev, u64 addr, u32 val);
555 void (*add_device_attr)(struct hl_device *hdev,
556 struct attribute_group *dev_attr_grp);
557 void (*handle_eqe)(struct hl_device *hdev,
558 struct hl_eq_entry *eq_entry);
559 void (*set_pll_profile)(struct hl_device *hdev,
560 enum hl_pll_frequency freq);
561 void* (*get_events_stat)(struct hl_device *hdev, u32 *size);
562 u64 (*read_pte)(struct hl_device *hdev, u64 addr);
563 void (*write_pte)(struct hl_device *hdev, u64 addr, u64 val);
564 void (*mmu_invalidate_cache)(struct hl_device *hdev, bool is_hard);
565 void (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard,
566 u32 asid, u64 va, u64 size);
567 int (*send_heartbeat)(struct hl_device *hdev);
568 int (*debug_coresight)(struct hl_device *hdev, void *data);
569 bool (*is_device_idle)(struct hl_device *hdev, char *buf, size_t size);
570 int (*soft_reset_late_init)(struct hl_device *hdev);
571 void (*hw_queues_lock)(struct hl_device *hdev);
572 void (*hw_queues_unlock)(struct hl_device *hdev);
573 u32 (*get_pci_id)(struct hl_device *hdev);
574 int (*get_eeprom_data)(struct hl_device *hdev, void *data,
576 int (*send_cpu_message)(struct hl_device *hdev, u32 *msg,
577 u16 len, u32 timeout, long *result);
578 enum hl_device_hw_state (*get_hw_state)(struct hl_device *hdev);
579 int (*pci_bars_map)(struct hl_device *hdev);
580 u64 (*set_dram_bar_base)(struct hl_device *hdev, u64 addr);
581 int (*init_iatu)(struct hl_device *hdev);
582 u32 (*rreg)(struct hl_device *hdev, u32 reg);
583 void (*wreg)(struct hl_device *hdev, u32 reg, u32 val);
591 #define HL_KERNEL_ASID_ID 0
594 * struct hl_va_range - virtual addresses range.
595 * @lock: protects the virtual addresses list.
596 * @list: list of virtual addresses blocks available for mappings.
597 * @start_addr: range start address.
598 * @end_addr: range end address.
602 struct list_head list;
608 * struct hl_ctx - user/kernel context.
609 * @mem_hash: holds mapping from virtual address to virtual memory area
610 * descriptor (hl_vm_phys_pg_list or hl_userptr).
611 * @mmu_phys_hash: holds a mapping from physical address to pgt_info structure.
612 * @mmu_shadow_hash: holds a mapping from shadow address to pgt_info structure.
613 * @hpriv: pointer to the private (KMD) data of the process (fd).
614 * @hdev: pointer to the device structure.
615 * @refcount: reference counter for the context. Context is released only when
616 * this hits 0l. It is incremented on CS and CS_WAIT.
617 * @cs_pending: array of DMA fence objects representing pending CS.
618 * @host_va_range: holds available virtual addresses for host mappings.
619 * @dram_va_range: holds available virtual addresses for DRAM mappings.
620 * @mem_hash_lock: protects the mem_hash.
621 * @mmu_lock: protects the MMU page tables. Any change to the PGT, modifing the
622 * MMU hash or walking the PGT requires talking this lock
623 * @debugfs_list: node in debugfs list of contexts.
624 * @cs_sequence: sequence number for CS. Value is assigned to a CS and passed
625 * to user so user could inquire about CS. It is used as
626 * index to cs_pending array.
627 * @dram_default_hops: array that holds all hops addresses needed for default
629 * @cs_lock: spinlock to protect cs_sequence.
630 * @dram_phys_mem: amount of used physical DRAM memory by this context.
631 * @thread_ctx_switch_token: token to prevent multiple threads of the same
632 * context from running the context switch phase.
633 * Only a single thread should run it.
634 * @thread_ctx_switch_wait_token: token to prevent the threads that didn't run
635 * the context switch phase from moving to their
636 * execution phase before the context switch phase
638 * @asid: context's unique address space ID in the device's MMU.
641 DECLARE_HASHTABLE(mem_hash, MEM_HASH_TABLE_BITS);
642 DECLARE_HASHTABLE(mmu_phys_hash, MMU_HASH_TABLE_BITS);
643 DECLARE_HASHTABLE(mmu_shadow_hash, MMU_HASH_TABLE_BITS);
644 struct hl_fpriv *hpriv;
645 struct hl_device *hdev;
646 struct kref refcount;
647 struct dma_fence *cs_pending[HL_MAX_PENDING_CS];
648 struct hl_va_range host_va_range;
649 struct hl_va_range dram_va_range;
650 struct mutex mem_hash_lock;
651 struct mutex mmu_lock;
652 struct list_head debugfs_list;
654 u64 *dram_default_hops;
656 atomic64_t dram_phys_mem;
657 atomic_t thread_ctx_switch_token;
658 u32 thread_ctx_switch_wait_token;
663 * struct hl_ctx_mgr - for handling multiple contexts.
664 * @ctx_lock: protects ctx_handles.
665 * @ctx_handles: idr to hold all ctx handles.
668 struct mutex ctx_lock;
669 struct idr ctx_handles;
675 * COMMAND SUBMISSIONS
679 * struct hl_userptr - memory mapping chunk information
680 * @vm_type: type of the VM.
681 * @job_node: linked-list node for hanging the object on the Job's list.
682 * @vec: pointer to the frame vector.
683 * @sgt: pointer to the scatter-gather table that holds the pages.
684 * @dir: for DMA unmapping, the direction must be supplied, so save it.
685 * @debugfs_list: node in debugfs list of command submissions.
686 * @addr: user-space virtual pointer to the start of the memory area.
687 * @size: size of the memory area to pin & map.
688 * @dma_mapped: true if the SG was mapped to DMA addresses, false otherwise.
691 enum vm_type_t vm_type; /* must be first */
692 struct list_head job_node;
693 struct frame_vector *vec;
694 struct sg_table *sgt;
695 enum dma_data_direction dir;
696 struct list_head debugfs_list;
703 * struct hl_cs - command submission.
704 * @jobs_in_queue_cnt: per each queue, maintain counter of submitted jobs.
705 * @ctx: the context this CS belongs to.
706 * @job_list: list of the CS's jobs in the various queues.
707 * @job_lock: spinlock for the CS's jobs list. Needed for free_job.
708 * @refcount: reference counter for usage of the CS.
709 * @fence: pointer to the fence object of this CS.
710 * @work_tdr: delayed work node for TDR.
711 * @mirror_node : node in device mirror list of command submissions.
712 * @debugfs_list: node in debugfs list of command submissions.
713 * @sequence: the sequence number of this CS.
714 * @submitted: true if CS was submitted to H/W.
715 * @completed: true if CS was completed by device.
716 * @timedout : true if CS was timedout.
717 * @tdr_active: true if TDR was activated for this CS (to prevent
718 * double TDR activation).
719 * @aborted: true if CS was aborted due to some device error.
722 u8 jobs_in_queue_cnt[HL_MAX_QUEUES];
724 struct list_head job_list;
726 struct kref refcount;
727 struct dma_fence *fence;
728 struct delayed_work work_tdr;
729 struct list_head mirror_node;
730 struct list_head debugfs_list;
740 * struct hl_cs_job - command submission job.
741 * @cs_node: the node to hang on the CS jobs list.
742 * @cs: the CS this job belongs to.
743 * @user_cb: the CB we got from the user.
744 * @patched_cb: in case of patching, this is internal CB which is submitted on
745 * the queue instead of the CB we got from the IOCTL.
746 * @finish_work: workqueue object to run when job is completed.
747 * @userptr_list: linked-list of userptr mappings that belong to this job and
748 * wait for completion.
749 * @debugfs_list: node in debugfs list of command submission jobs.
750 * @id: the id of this job inside a CS.
751 * @hw_queue_id: the id of the H/W queue this job is submitted to.
752 * @user_cb_size: the actual size of the CB we got from the user.
753 * @job_cb_size: the actual size of the CB that we put on the queue.
754 * @ext_queue: whether the job is for external queue or internal queue.
757 struct list_head cs_node;
759 struct hl_cb *user_cb;
760 struct hl_cb *patched_cb;
761 struct work_struct finish_work;
762 struct list_head userptr_list;
763 struct list_head debugfs_list;
772 * struct hl_cs_parser - command submission paerser properties.
773 * @user_cb: the CB we got from the user.
774 * @patched_cb: in case of patching, this is internal CB which is submitted on
775 * the queue instead of the CB we got from the IOCTL.
776 * @job_userptr_list: linked-list of userptr mappings that belong to the related
777 * job and wait for completion.
778 * @cs_sequence: the sequence number of the related CS.
779 * @ctx_id: the ID of the context the related CS belongs to.
780 * @hw_queue_id: the id of the H/W queue this job is submitted to.
781 * @user_cb_size: the actual size of the CB we got from the user.
782 * @patched_cb_size: the size of the CB after parsing.
783 * @ext_queue: whether the job is for external queue or internal queue.
784 * @job_id: the id of the related job inside the related CS.
785 * @use_virt_addr: whether to treat the addresses in the CB as virtual during
788 struct hl_cs_parser {
789 struct hl_cb *user_cb;
790 struct hl_cb *patched_cb;
791 struct list_head *job_userptr_list;
808 * struct hl_vm_hash_node - hash element from virtual address to virtual
809 * memory area descriptor (hl_vm_phys_pg_list or
811 * @node: node to hang on the hash table in context object.
812 * @vaddr: key virtual address.
813 * @ptr: value pointer (hl_vm_phys_pg_list or hl_userptr).
815 struct hl_vm_hash_node {
816 struct hlist_node node;
822 * struct hl_vm_phys_pg_pack - physical page pack.
823 * @vm_type: describes the type of the virtual area descriptor.
824 * @pages: the physical page array.
825 * @npages: num physical pages in the pack.
826 * @total_size: total size of all the pages in this list.
827 * @mapping_cnt: number of shared mappings.
828 * @asid: the context related to this list.
829 * @page_size: size of each page in the pack.
830 * @flags: HL_MEM_* flags related to this list.
831 * @handle: the provided handle related to this list.
832 * @offset: offset from the first page.
833 * @contiguous: is contiguous physical memory.
834 * @created_from_userptr: is product of host virtual address.
836 struct hl_vm_phys_pg_pack {
837 enum vm_type_t vm_type; /* must be first */
841 atomic_t mapping_cnt;
848 u8 created_from_userptr;
852 * struct hl_vm_va_block - virtual range block information.
853 * @node: node to hang on the virtual range list in context object.
854 * @start: virtual range start address.
855 * @end: virtual range end address.
856 * @size: virtual range size.
858 struct hl_vm_va_block {
859 struct list_head node;
866 * struct hl_vm - virtual memory manager for MMU.
867 * @dram_pg_pool: pool for DRAM physical pages of 2MB.
868 * @dram_pg_pool_refcount: reference counter for the pool usage.
869 * @idr_lock: protects the phys_pg_list_handles.
870 * @phys_pg_pack_handles: idr to hold all device allocations handles.
871 * @init_done: whether initialization was done. We need this because VM
872 * initialization might be skipped during device initialization.
875 struct gen_pool *dram_pg_pool;
876 struct kref dram_pg_pool_refcount;
878 struct idr phys_pg_pack_handles;
884 * DEBUG, PROFILING STRUCTURE
888 * struct hl_debug_params - Coresight debug parameters.
889 * @input: pointer to component specific input parameters.
890 * @output: pointer to component specific output parameters.
891 * @output_size: size of output buffer.
892 * @reg_idx: relevant register ID.
893 * @op: component operation to execute.
894 * @enable: true if to enable component debugging, false otherwise.
896 struct hl_debug_params {
906 * FILE PRIVATE STRUCTURE
910 * struct hl_fpriv - process information stored in FD private data.
911 * @hdev: habanalabs device structure.
912 * @filp: pointer to the given file structure.
913 * @taskpid: current process ID.
914 * @ctx: current executing context.
915 * @ctx_mgr: context manager to handle multiple context for this FD.
916 * @cb_mgr: command buffer manager to handle multiple buffers for this FD.
917 * @debugfs_list: list of relevant ASIC debugfs.
918 * @refcount: number of related contexts.
919 * @restore_phase_mutex: lock for context switch and restore phase.
922 struct hl_device *hdev;
925 struct hl_ctx *ctx; /* TODO: remove for multiple ctx */
926 struct hl_ctx_mgr ctx_mgr;
927 struct hl_cb_mgr cb_mgr;
928 struct list_head debugfs_list;
929 struct kref refcount;
930 struct mutex restore_phase_mutex;
939 * struct hl_info_list - debugfs file ops.
941 * @show: function to output information.
942 * @write: function to write to the file.
944 struct hl_info_list {
946 int (*show)(struct seq_file *s, void *data);
947 ssize_t (*write)(struct file *file, const char __user *buf,
948 size_t count, loff_t *f_pos);
952 * struct hl_debugfs_entry - debugfs dentry wrapper.
953 * @dent: base debugfs entry structure.
954 * @info_ent: dentry realted ops.
955 * @dev_entry: ASIC specific debugfs manager.
957 struct hl_debugfs_entry {
959 const struct hl_info_list *info_ent;
960 struct hl_dbg_device_entry *dev_entry;
964 * struct hl_dbg_device_entry - ASIC specific debugfs manager.
965 * @root: root dentry.
966 * @hdev: habanalabs device structure.
967 * @entry_arr: array of available hl_debugfs_entry.
968 * @file_list: list of available debugfs files.
969 * @file_mutex: protects file_list.
970 * @cb_list: list of available CBs.
971 * @cb_spinlock: protects cb_list.
972 * @cs_list: list of available CSs.
973 * @cs_spinlock: protects cs_list.
974 * @cs_job_list: list of available CB jobs.
975 * @cs_job_spinlock: protects cs_job_list.
976 * @userptr_list: list of available userptrs (virtual memory chunk descriptor).
977 * @userptr_spinlock: protects userptr_list.
978 * @ctx_mem_hash_list: list of available contexts with MMU mappings.
979 * @ctx_mem_hash_spinlock: protects cb_list.
980 * @addr: next address to read/write from/to in read/write32.
981 * @mmu_addr: next virtual address to translate to physical address in mmu_show.
982 * @mmu_asid: ASID to use while translating in mmu_show.
983 * @i2c_bus: generic u8 debugfs file for bus value to use in i2c_data_read.
984 * @i2c_bus: generic u8 debugfs file for address value to use in i2c_data_read.
985 * @i2c_bus: generic u8 debugfs file for register value to use in i2c_data_read.
987 struct hl_dbg_device_entry {
989 struct hl_device *hdev;
990 struct hl_debugfs_entry *entry_arr;
991 struct list_head file_list;
992 struct mutex file_mutex;
993 struct list_head cb_list;
994 spinlock_t cb_spinlock;
995 struct list_head cs_list;
996 spinlock_t cs_spinlock;
997 struct list_head cs_job_list;
998 spinlock_t cs_job_spinlock;
999 struct list_head userptr_list;
1000 spinlock_t userptr_spinlock;
1001 struct list_head ctx_mem_hash_list;
1002 spinlock_t ctx_mem_hash_spinlock;
1016 /* Theoretical limit only. A single host can only contain up to 4 or 8 PCIe
1017 * x16 cards. In extereme cases, there are hosts that can accommodate 16 cards
1019 #define HL_MAX_MINORS 256
1022 * Registers read & write functions.
1025 u32 hl_rreg(struct hl_device *hdev, u32 reg);
1026 void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
1028 #define RREG32(reg) hdev->asic_funcs->rreg(hdev, (reg))
1029 #define WREG32(reg, v) hdev->asic_funcs->wreg(hdev, (reg), (v))
1030 #define DREG32(reg) pr_info("REGISTER: " #reg " : 0x%08X\n", \
1031 hdev->asic_funcs->rreg(hdev, (reg)))
1033 #define WREG32_P(reg, val, mask) \
1035 u32 tmp_ = RREG32(reg); \
1037 tmp_ |= ((val) & ~(mask)); \
1038 WREG32(reg, tmp_); \
1040 #define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
1041 #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
1043 #define REG_FIELD_SHIFT(reg, field) reg##_##field##_SHIFT
1044 #define REG_FIELD_MASK(reg, field) reg##_##field##_MASK
1045 #define WREG32_FIELD(reg, field, val) \
1046 WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | \
1047 (val) << REG_FIELD_SHIFT(reg, field))
1049 #define hl_poll_timeout(hdev, addr, val, cond, sleep_us, timeout_us) \
1051 ktime_t __timeout = ktime_add_us(ktime_get(), timeout_us); \
1052 might_sleep_if(sleep_us); \
1054 (val) = RREG32(addr); \
1057 if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \
1058 (val) = RREG32(addr); \
1062 usleep_range((sleep_us >> 2) + 1, sleep_us); \
1064 (cond) ? 0 : -ETIMEDOUT; \
1068 #define HL_ENG_BUSY(buf, size, fmt, ...) ({ \
1070 snprintf(buf, size, fmt, ##__VA_ARGS__); \
1074 struct hwmon_chip_info;
1077 * struct hl_device_reset_work - reset workqueue task wrapper.
1078 * @reset_work: reset work to be done.
1079 * @hdev: habanalabs device structure.
1081 struct hl_device_reset_work {
1082 struct work_struct reset_work;
1083 struct hl_device *hdev;
1087 * struct hl_device - habanalabs device structure.
1088 * @pdev: pointer to PCI device, can be NULL in case of simulator device.
1089 * @pcie_bar: array of available PCIe bars.
1090 * @rmmio: configuration area address on SRAM.
1091 * @cdev: related char device.
1092 * @dev: realted kernel basic device structure.
1093 * @work_freq: delayed work to lower device frequency if possible.
1094 * @work_heartbeat: delayed work for ArmCP is-alive check.
1095 * @asic_name: ASIC specific nmae.
1096 * @asic_type: ASIC specific type.
1097 * @completion_queue: array of hl_cq.
1098 * @cq_wq: work queue of completion queues for executing work in process context
1099 * @eq_wq: work queue of event queue for executing work in process context.
1100 * @kernel_ctx: KMD context structure.
1101 * @kernel_queues: array of hl_hw_queue.
1102 * @hw_queues_mirror_list: CS mirror list for TDR.
1103 * @hw_queues_mirror_lock: protects hw_queues_mirror_list.
1104 * @kernel_cb_mgr: command buffer manager for creating/destroying/handling CGs.
1105 * @event_queue: event queue for IRQ from ArmCP.
1106 * @dma_pool: DMA pool for small allocations.
1107 * @cpu_accessible_dma_mem: KMD <-> ArmCP shared memory CPU address.
1108 * @cpu_accessible_dma_address: KMD <-> ArmCP shared memory DMA address.
1109 * @cpu_accessible_dma_pool: KMD <-> ArmCP shared memory pool.
1110 * @asid_bitmap: holds used/available ASIDs.
1111 * @asid_mutex: protects asid_bitmap.
1112 * @fd_open_cnt_lock: lock for updating fd_open_cnt in hl_device_open. Although
1113 * fd_open_cnt is atomic, we need this lock to serialize
1114 * the open function because the driver currently supports
1115 * only a single process at a time. In addition, we need a
1116 * lock here so we can flush user processes which are opening
1117 * the device while we are trying to hard reset it
1118 * @send_cpu_message_lock: enforces only one message in KMD <-> ArmCP queue.
1119 * @asic_prop: ASIC specific immutable properties.
1120 * @asic_funcs: ASIC specific functions.
1121 * @asic_specific: ASIC specific information to use only from ASIC files.
1122 * @mmu_pgt_pool: pool of available MMU hops.
1123 * @vm: virtual memory manager for MMU.
1124 * @mmu_cache_lock: protects MMU cache invalidation as it can serve one context.
1125 * @mmu_shadow_hop0: shadow mapping of the MMU hop 0 zone.
1126 * @hwmon_dev: H/W monitor device.
1127 * @pm_mng_profile: current power management profile.
1128 * @hl_chip_info: ASIC's sensors information.
1129 * @hl_debugfs: device's debugfs manager.
1130 * @cb_pool: list of preallocated CBs.
1131 * @cb_pool_lock: protects the CB pool.
1132 * @user_ctx: current user context executing.
1133 * @dram_used_mem: current DRAM memory consumption.
1134 * @timeout_jiffies: device CS timeout value.
1135 * @max_power: the max power of the device, as configured by the sysadmin. This
1136 * value is saved so in case of hard-reset, KMD will restore this
1137 * value and update the F/W after the re-initialization
1138 * @in_reset: is device in reset flow.
1139 * @curr_pll_profile: current PLL profile.
1140 * @fd_open_cnt: number of open user processes.
1141 * @cs_active_cnt: number of active command submissions on this device (active
1142 * means already in H/W queues)
1143 * @major: habanalabs KMD major.
1144 * @high_pll: high PLL profile frequency.
1145 * @soft_reset_cnt: number of soft reset since KMD loading.
1146 * @hard_reset_cnt: number of hard reset since KMD loading.
1147 * @id: device minor.
1148 * @disabled: is device disabled.
1149 * @late_init_done: is late init stage was done during initialization.
1150 * @hwmon_initialized: is H/W monitor sensors was initialized.
1151 * @hard_reset_pending: is there a hard reset work pending.
1152 * @heartbeat: is heartbeat sanity check towards ArmCP enabled.
1153 * @reset_on_lockup: true if a reset should be done in case of stuck CS, false
1155 * @dram_supports_virtual_memory: is MMU enabled towards DRAM.
1156 * @dram_default_page_mapping: is DRAM default page mapping enabled.
1157 * @init_done: is the initialization of the device done.
1158 * @mmu_enable: is MMU enabled.
1159 * @device_cpu_disabled: is the device CPU disabled (due to timeouts)
1160 * @dma_mask: the dma mask that was set for this device
1163 struct pci_dev *pdev;
1164 void __iomem *pcie_bar[6];
1165 void __iomem *rmmio;
1168 struct delayed_work work_freq;
1169 struct delayed_work work_heartbeat;
1171 enum hl_asic_type asic_type;
1172 struct hl_cq *completion_queue;
1173 struct workqueue_struct *cq_wq;
1174 struct workqueue_struct *eq_wq;
1175 struct hl_ctx *kernel_ctx;
1176 struct hl_hw_queue *kernel_queues;
1177 struct list_head hw_queues_mirror_list;
1178 spinlock_t hw_queues_mirror_lock;
1179 struct hl_cb_mgr kernel_cb_mgr;
1180 struct hl_eq event_queue;
1181 struct dma_pool *dma_pool;
1182 void *cpu_accessible_dma_mem;
1183 dma_addr_t cpu_accessible_dma_address;
1184 struct gen_pool *cpu_accessible_dma_pool;
1185 unsigned long *asid_bitmap;
1186 struct mutex asid_mutex;
1187 /* TODO: remove fd_open_cnt_lock for multiple process support */
1188 struct mutex fd_open_cnt_lock;
1189 struct mutex send_cpu_message_lock;
1190 struct asic_fixed_properties asic_prop;
1191 const struct hl_asic_funcs *asic_funcs;
1192 void *asic_specific;
1193 struct gen_pool *mmu_pgt_pool;
1195 struct mutex mmu_cache_lock;
1196 void *mmu_shadow_hop0;
1197 struct device *hwmon_dev;
1198 enum hl_pm_mng_profile pm_mng_profile;
1199 struct hwmon_chip_info *hl_chip_info;
1201 struct hl_dbg_device_entry hl_debugfs;
1203 struct list_head cb_pool;
1204 spinlock_t cb_pool_lock;
1206 /* TODO: remove user_ctx for multiple process support */
1207 struct hl_ctx *user_ctx;
1209 atomic64_t dram_used_mem;
1210 u64 timeout_jiffies;
1213 atomic_t curr_pll_profile;
1214 atomic_t fd_open_cnt;
1215 atomic_t cs_active_cnt;
1223 u8 hwmon_initialized;
1224 u8 hard_reset_pending;
1227 u8 dram_supports_virtual_memory;
1228 u8 dram_default_page_mapping;
1230 u8 device_cpu_disabled;
1233 /* Parameters for bring-up */
1237 u8 cpu_queues_enable;
1248 * typedef hl_ioctl_t - typedef for ioctl function in the driver
1249 * @hpriv: pointer to the FD's private data, which contains state of
1251 * @data: pointer to the input/output arguments structure of the IOCTL
1253 * Return: 0 for success, negative value for error
1255 typedef int hl_ioctl_t(struct hl_fpriv *hpriv, void *data);
1258 * struct hl_ioctl_desc - describes an IOCTL entry of the driver.
1259 * @cmd: the IOCTL code as created by the kernel macros.
1260 * @func: pointer to the driver's function that should be called for this IOCTL.
1262 struct hl_ioctl_desc {
1269 * Kernel module functions that can be accessed by entire module
1273 * hl_mem_area_inside_range() - Checks whether address+size are inside a range.
1274 * @address: The start address of the area we want to validate.
1275 * @size: The size in bytes of the area we want to validate.
1276 * @range_start_address: The start address of the valid range.
1277 * @range_end_address: The end address of the valid range.
1279 * Return: true if the area is inside the valid range, false otherwise.
1281 static inline bool hl_mem_area_inside_range(u64 address, u32 size,
1282 u64 range_start_address, u64 range_end_address)
1284 u64 end_address = address + size;
1286 if ((address >= range_start_address) &&
1287 (end_address <= range_end_address) &&
1288 (end_address > address))
1295 * hl_mem_area_crosses_range() - Checks whether address+size crossing a range.
1296 * @address: The start address of the area we want to validate.
1297 * @size: The size in bytes of the area we want to validate.
1298 * @range_start_address: The start address of the valid range.
1299 * @range_end_address: The end address of the valid range.
1301 * Return: true if the area overlaps part or all of the valid range,
1304 static inline bool hl_mem_area_crosses_range(u64 address, u32 size,
1305 u64 range_start_address, u64 range_end_address)
1307 u64 end_address = address + size;
1309 if ((address >= range_start_address) &&
1310 (address < range_end_address))
1313 if ((end_address >= range_start_address) &&
1314 (end_address < range_end_address))
1317 if ((address < range_start_address) &&
1318 (end_address >= range_end_address))
1324 int hl_device_open(struct inode *inode, struct file *filp);
1325 bool hl_device_disabled_or_in_reset(struct hl_device *hdev);
1326 enum hl_device_status hl_device_status(struct hl_device *hdev);
1327 int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
1328 enum hl_asic_type asic_type, int minor);
1329 void destroy_hdev(struct hl_device *hdev);
1330 int hl_poll_timeout_memory(struct hl_device *hdev, u64 addr, u32 timeout_us,
1332 int hl_poll_timeout_device_memory(struct hl_device *hdev, void __iomem *addr,
1333 u32 timeout_us, u32 *val);
1334 int hl_hw_queues_create(struct hl_device *hdev);
1335 void hl_hw_queues_destroy(struct hl_device *hdev);
1336 int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
1337 u32 cb_size, u64 cb_ptr);
1338 int hl_hw_queue_schedule_cs(struct hl_cs *cs);
1339 u32 hl_hw_queue_add_ptr(u32 ptr, u16 val);
1340 void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id);
1341 void hl_int_hw_queue_update_ci(struct hl_cs *cs);
1342 void hl_hw_queue_reset(struct hl_device *hdev, bool hard_reset);
1344 #define hl_queue_inc_ptr(p) hl_hw_queue_add_ptr(p, 1)
1345 #define hl_pi_2_offset(pi) ((pi) & (HL_QUEUE_LENGTH - 1))
1347 int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id);
1348 void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q);
1349 int hl_eq_init(struct hl_device *hdev, struct hl_eq *q);
1350 void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q);
1351 void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q);
1352 void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q);
1353 irqreturn_t hl_irq_handler_cq(int irq, void *arg);
1354 irqreturn_t hl_irq_handler_eq(int irq, void *arg);
1355 u32 hl_cq_inc_ptr(u32 ptr);
1357 int hl_asid_init(struct hl_device *hdev);
1358 void hl_asid_fini(struct hl_device *hdev);
1359 unsigned long hl_asid_alloc(struct hl_device *hdev);
1360 void hl_asid_free(struct hl_device *hdev, unsigned long asid);
1362 int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv);
1363 void hl_ctx_free(struct hl_device *hdev, struct hl_ctx *ctx);
1364 int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx);
1365 void hl_ctx_do_release(struct kref *ref);
1366 void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx);
1367 int hl_ctx_put(struct hl_ctx *ctx);
1368 struct dma_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq);
1369 void hl_ctx_mgr_init(struct hl_ctx_mgr *mgr);
1370 void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *mgr);
1372 int hl_device_init(struct hl_device *hdev, struct class *hclass);
1373 void hl_device_fini(struct hl_device *hdev);
1374 int hl_device_suspend(struct hl_device *hdev);
1375 int hl_device_resume(struct hl_device *hdev);
1376 int hl_device_reset(struct hl_device *hdev, bool hard_reset,
1377 bool from_hard_reset_thread);
1378 void hl_hpriv_get(struct hl_fpriv *hpriv);
1379 void hl_hpriv_put(struct hl_fpriv *hpriv);
1380 int hl_device_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq);
1382 int hl_build_hwmon_channel_info(struct hl_device *hdev,
1383 struct armcp_sensor *sensors_arr);
1385 int hl_sysfs_init(struct hl_device *hdev);
1386 void hl_sysfs_fini(struct hl_device *hdev);
1388 int hl_hwmon_init(struct hl_device *hdev);
1389 void hl_hwmon_fini(struct hl_device *hdev);
1391 int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr, u32 cb_size,
1392 u64 *handle, int ctx_id);
1393 int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle);
1394 int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma);
1395 struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr,
1397 void hl_cb_put(struct hl_cb *cb);
1398 void hl_cb_mgr_init(struct hl_cb_mgr *mgr);
1399 void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr);
1400 struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size);
1401 int hl_cb_pool_init(struct hl_device *hdev);
1402 int hl_cb_pool_fini(struct hl_device *hdev);
1404 void hl_cs_rollback_all(struct hl_device *hdev);
1405 struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, bool ext_queue);
1407 void goya_set_asic_funcs(struct hl_device *hdev);
1409 int hl_vm_ctx_init(struct hl_ctx *ctx);
1410 void hl_vm_ctx_fini(struct hl_ctx *ctx);
1412 int hl_vm_init(struct hl_device *hdev);
1413 void hl_vm_fini(struct hl_device *hdev);
1415 int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
1416 struct hl_userptr *userptr);
1417 int hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr);
1418 void hl_userptr_delete_list(struct hl_device *hdev,
1419 struct list_head *userptr_list);
1420 bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr, u32 size,
1421 struct list_head *userptr_list,
1422 struct hl_userptr **userptr);
1424 int hl_mmu_init(struct hl_device *hdev);
1425 void hl_mmu_fini(struct hl_device *hdev);
1426 int hl_mmu_ctx_init(struct hl_ctx *ctx);
1427 void hl_mmu_ctx_fini(struct hl_ctx *ctx);
1428 int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size);
1429 int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size);
1430 void hl_mmu_swap_out(struct hl_ctx *ctx);
1431 void hl_mmu_swap_in(struct hl_ctx *ctx);
1433 int hl_fw_push_fw_to_device(struct hl_device *hdev, const char *fw_name,
1435 int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode);
1436 int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
1437 u16 len, u32 timeout, long *result);
1438 int hl_fw_test_cpu_queue(struct hl_device *hdev);
1439 void *hl_fw_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
1440 dma_addr_t *dma_handle);
1441 void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
1443 int hl_fw_send_heartbeat(struct hl_device *hdev);
1444 int hl_fw_armcp_info_get(struct hl_device *hdev);
1445 int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size);
1447 int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3],
1449 int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data);
1450 int hl_pci_set_dram_bar_base(struct hl_device *hdev, u8 inbound_region, u8 bar,
1452 int hl_pci_init_iatu(struct hl_device *hdev, u64 sram_base_address,
1453 u64 dram_base_address, u64 host_phys_size);
1454 int hl_pci_init(struct hl_device *hdev, u8 dma_mask);
1455 void hl_pci_fini(struct hl_device *hdev);
1456 int hl_pci_set_dma_mask(struct hl_device *hdev, u8 dma_mask);
1458 long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr);
1459 void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq);
1460 long hl_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr);
1461 long hl_get_voltage(struct hl_device *hdev, int sensor_index, u32 attr);
1462 long hl_get_current(struct hl_device *hdev, int sensor_index, u32 attr);
1463 long hl_get_fan_speed(struct hl_device *hdev, int sensor_index, u32 attr);
1464 long hl_get_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr);
1465 void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
1467 u64 hl_get_max_power(struct hl_device *hdev);
1468 void hl_set_max_power(struct hl_device *hdev, u64 value);
1470 #ifdef CONFIG_DEBUG_FS
1472 void hl_debugfs_init(void);
1473 void hl_debugfs_fini(void);
1474 void hl_debugfs_add_device(struct hl_device *hdev);
1475 void hl_debugfs_remove_device(struct hl_device *hdev);
1476 void hl_debugfs_add_file(struct hl_fpriv *hpriv);
1477 void hl_debugfs_remove_file(struct hl_fpriv *hpriv);
1478 void hl_debugfs_add_cb(struct hl_cb *cb);
1479 void hl_debugfs_remove_cb(struct hl_cb *cb);
1480 void hl_debugfs_add_cs(struct hl_cs *cs);
1481 void hl_debugfs_remove_cs(struct hl_cs *cs);
1482 void hl_debugfs_add_job(struct hl_device *hdev, struct hl_cs_job *job);
1483 void hl_debugfs_remove_job(struct hl_device *hdev, struct hl_cs_job *job);
1484 void hl_debugfs_add_userptr(struct hl_device *hdev, struct hl_userptr *userptr);
1485 void hl_debugfs_remove_userptr(struct hl_device *hdev,
1486 struct hl_userptr *userptr);
1487 void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx);
1488 void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx);
1492 static inline void __init hl_debugfs_init(void)
1496 static inline void hl_debugfs_fini(void)
1500 static inline void hl_debugfs_add_device(struct hl_device *hdev)
1504 static inline void hl_debugfs_remove_device(struct hl_device *hdev)
1508 static inline void hl_debugfs_add_file(struct hl_fpriv *hpriv)
1512 static inline void hl_debugfs_remove_file(struct hl_fpriv *hpriv)
1516 static inline void hl_debugfs_add_cb(struct hl_cb *cb)
1520 static inline void hl_debugfs_remove_cb(struct hl_cb *cb)
1524 static inline void hl_debugfs_add_cs(struct hl_cs *cs)
1528 static inline void hl_debugfs_remove_cs(struct hl_cs *cs)
1532 static inline void hl_debugfs_add_job(struct hl_device *hdev,
1533 struct hl_cs_job *job)
1537 static inline void hl_debugfs_remove_job(struct hl_device *hdev,
1538 struct hl_cs_job *job)
1542 static inline void hl_debugfs_add_userptr(struct hl_device *hdev,
1543 struct hl_userptr *userptr)
1547 static inline void hl_debugfs_remove_userptr(struct hl_device *hdev,
1548 struct hl_userptr *userptr)
1552 static inline void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev,
1557 static inline void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev,
1565 long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
1566 int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data);
1567 int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data);
1568 int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data);
1569 int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data);
1571 #endif /* HABANALABSP_H_ */