1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2019 HabanaLabs, Ltd.
8 #include <uapi/misc/habanalabs.h>
9 #include "habanalabs.h"
10 #include "../include/hw_ip/mmu/mmu_general.h"
12 #include <linux/uaccess.h>
13 #include <linux/slab.h>
14 #include <linux/genalloc.h>
16 #define HL_MMU_DEBUG 0
19 * The va ranges in context object contain a list with the available chunks of
20 * device virtual memory.
21 * There is one range for host allocations and one for DRAM allocations.
23 * On initialization each range contains one chunk of all of its available
24 * virtual range which is a half of the total device virtual range.
26 * On each mapping of physical pages, a suitable virtual range chunk (with a
27 * minimum size) is selected from the list. If the chunk size equals the
28 * requested size, the chunk is returned. Otherwise, the chunk is split into
29 * two chunks - one to return as result and a remainder to stay in the list.
31 * On each Unmapping of a virtual address, the relevant virtual chunk is
32 * returned to the list. The chunk is added to the list and if its edges match
33 * the edges of the adjacent chunks (means a contiguous chunk can be created),
34 * the chunks are merged.
36 * On finish, the list is checked to have only one chunk of all the relevant
37 * virtual range (which is a half of the device total virtual range).
38 * If not (means not all mappings were unmapped), a warning is printed.
42 * alloc_device_memory - allocate device memory
44 * @ctx : current context
45 * @args : host parameters containing the requested size
46 * @ret_handle : result handle
48 * This function does the following:
49 * - Allocate the requested size rounded up to 2MB pages
50 * - Return unique handle
52 static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
55 struct hl_device *hdev = ctx->hdev;
56 struct hl_vm *vm = &hdev->vm;
57 struct hl_vm_phys_pg_pack *phys_pg_pack;
58 u64 paddr = 0, total_size, num_pgs, i;
59 u32 num_curr_pgs, page_size, page_shift;
64 page_size = hdev->asic_prop.dram_page_size;
65 page_shift = __ffs(page_size);
66 num_pgs = (args->alloc.mem_size + (page_size - 1)) >> page_shift;
67 total_size = num_pgs << page_shift;
70 dev_err(hdev->dev, "Cannot allocate 0 bytes\n");
74 contiguous = args->flags & HL_MEM_CONTIGUOUS;
77 paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size);
80 "failed to allocate %llu huge contiguous pages\n",
86 phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
92 phys_pg_pack->vm_type = VM_TYPE_PHYS_PACK;
93 phys_pg_pack->asid = ctx->asid;
94 phys_pg_pack->npages = num_pgs;
95 phys_pg_pack->page_size = page_size;
96 phys_pg_pack->total_size = total_size;
97 phys_pg_pack->flags = args->flags;
98 phys_pg_pack->contiguous = contiguous;
100 phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL);
101 if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) {
106 if (phys_pg_pack->contiguous) {
107 for (i = 0 ; i < num_pgs ; i++)
108 phys_pg_pack->pages[i] = paddr + i * page_size;
110 for (i = 0 ; i < num_pgs ; i++) {
111 phys_pg_pack->pages[i] = (u64) gen_pool_alloc(
114 if (!phys_pg_pack->pages[i]) {
116 "Failed to allocate device memory (out of memory)\n");
125 spin_lock(&vm->idr_lock);
126 handle = idr_alloc(&vm->phys_pg_pack_handles, phys_pg_pack, 1, 0,
128 spin_unlock(&vm->idr_lock);
131 dev_err(hdev->dev, "Failed to get handle for page\n");
136 for (i = 0 ; i < num_pgs ; i++)
137 kref_get(&vm->dram_pg_pool_refcount);
139 phys_pg_pack->handle = handle;
141 atomic64_add(phys_pg_pack->total_size, &ctx->dram_phys_mem);
142 atomic64_add(phys_pg_pack->total_size, &hdev->dram_used_mem);
144 *ret_handle = handle;
150 if (!phys_pg_pack->contiguous)
151 for (i = 0 ; i < num_curr_pgs ; i++)
152 gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i],
155 kvfree(phys_pg_pack->pages);
160 gen_pool_free(vm->dram_pg_pool, paddr, total_size);
166 * dma_map_host_va - DMA mapping of the given host virtual address.
167 * @hdev: habanalabs device structure
168 * @addr: the host virtual address of the memory area
169 * @size: the size of the memory area
170 * @p_userptr: pointer to result userptr structure
172 * This function does the following:
173 * - Allocate userptr structure
174 * - Pin the given host memory using the userptr structure
175 * - Perform DMA mapping to have the DMA addresses of the pages
177 static int dma_map_host_va(struct hl_device *hdev, u64 addr, u64 size,
178 struct hl_userptr **p_userptr)
180 struct hl_userptr *userptr;
183 userptr = kzalloc(sizeof(*userptr), GFP_KERNEL);
189 rc = hl_pin_host_memory(hdev, addr, size, userptr);
191 dev_err(hdev->dev, "Failed to pin host memory\n");
195 rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
196 userptr->sgt->nents, DMA_BIDIRECTIONAL);
198 dev_err(hdev->dev, "failed to map sgt with DMA region\n");
202 userptr->dma_mapped = true;
203 userptr->dir = DMA_BIDIRECTIONAL;
204 userptr->vm_type = VM_TYPE_USERPTR;
206 *p_userptr = userptr;
211 hl_unpin_host_memory(hdev, userptr);
220 * dma_unmap_host_va - DMA unmapping of the given host virtual address.
221 * @hdev: habanalabs device structure
222 * @userptr: userptr to free
224 * This function does the following:
225 * - Unpins the physical pages
226 * - Frees the userptr structure
228 static void dma_unmap_host_va(struct hl_device *hdev,
229 struct hl_userptr *userptr)
231 hl_unpin_host_memory(hdev, userptr);
236 * dram_pg_pool_do_release - free DRAM pages pool
238 * @ref : pointer to reference object
240 * This function does the following:
241 * - Frees the idr structure of physical pages handles
242 * - Frees the generic pool of DRAM physical pages
244 static void dram_pg_pool_do_release(struct kref *ref)
246 struct hl_vm *vm = container_of(ref, struct hl_vm,
247 dram_pg_pool_refcount);
250 * free the idr here as only here we know for sure that there are no
251 * allocated physical pages and hence there are no handles in use
253 idr_destroy(&vm->phys_pg_pack_handles);
254 gen_pool_destroy(vm->dram_pg_pool);
258 * free_phys_pg_pack - free physical page pack
259 * @hdev: habanalabs device structure
260 * @phys_pg_pack: physical page pack to free
262 * This function does the following:
263 * - For DRAM memory only, iterate over the pack and free each physical block
264 * structure by returning it to the general pool
265 * - Free the hl_vm_phys_pg_pack structure
267 static void free_phys_pg_pack(struct hl_device *hdev,
268 struct hl_vm_phys_pg_pack *phys_pg_pack)
270 struct hl_vm *vm = &hdev->vm;
273 if (!phys_pg_pack->created_from_userptr) {
274 if (phys_pg_pack->contiguous) {
275 gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[0],
276 phys_pg_pack->total_size);
278 for (i = 0; i < phys_pg_pack->npages ; i++)
279 kref_put(&vm->dram_pg_pool_refcount,
280 dram_pg_pool_do_release);
282 for (i = 0 ; i < phys_pg_pack->npages ; i++) {
283 gen_pool_free(vm->dram_pg_pool,
284 phys_pg_pack->pages[i],
285 phys_pg_pack->page_size);
286 kref_put(&vm->dram_pg_pool_refcount,
287 dram_pg_pool_do_release);
292 kvfree(phys_pg_pack->pages);
297 * free_device_memory - free device memory
299 * @ctx : current context
300 * @handle : handle of the memory chunk to free
302 * This function does the following:
303 * - Free the device memory related to the given handle
305 static int free_device_memory(struct hl_ctx *ctx, u32 handle)
307 struct hl_device *hdev = ctx->hdev;
308 struct hl_vm *vm = &hdev->vm;
309 struct hl_vm_phys_pg_pack *phys_pg_pack;
311 spin_lock(&vm->idr_lock);
312 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
314 if (atomic_read(&phys_pg_pack->mapping_cnt) > 0) {
315 dev_err(hdev->dev, "handle %u is mapped, cannot free\n",
317 spin_unlock(&vm->idr_lock);
322 * must remove from idr before the freeing of the physical
323 * pages as the refcount of the pool is also the trigger of the
326 idr_remove(&vm->phys_pg_pack_handles, handle);
327 spin_unlock(&vm->idr_lock);
329 atomic64_sub(phys_pg_pack->total_size, &ctx->dram_phys_mem);
330 atomic64_sub(phys_pg_pack->total_size, &hdev->dram_used_mem);
332 free_phys_pg_pack(hdev, phys_pg_pack);
334 spin_unlock(&vm->idr_lock);
336 "free device memory failed, no match for handle %u\n",
345 * clear_va_list_locked - free virtual addresses list
347 * @hdev : habanalabs device structure
348 * @va_list : list of virtual addresses to free
350 * This function does the following:
351 * - Iterate over the list and free each virtual addresses block
353 * This function should be called only when va_list lock is taken
355 static void clear_va_list_locked(struct hl_device *hdev,
356 struct list_head *va_list)
358 struct hl_vm_va_block *va_block, *tmp;
360 list_for_each_entry_safe(va_block, tmp, va_list, node) {
361 list_del(&va_block->node);
367 * print_va_list_locked - print virtual addresses list
369 * @hdev : habanalabs device structure
370 * @va_list : list of virtual addresses to print
372 * This function does the following:
373 * - Iterate over the list and print each virtual addresses block
375 * This function should be called only when va_list lock is taken
377 static void print_va_list_locked(struct hl_device *hdev,
378 struct list_head *va_list)
381 struct hl_vm_va_block *va_block;
383 dev_dbg(hdev->dev, "print va list:\n");
385 list_for_each_entry(va_block, va_list, node)
387 "va block, start: 0x%llx, end: 0x%llx, size: %llu\n",
388 va_block->start, va_block->end, va_block->size);
393 * merge_va_blocks_locked - merge a virtual block if possible
395 * @hdev : pointer to the habanalabs device structure
396 * @va_list : pointer to the virtual addresses block list
397 * @va_block : virtual block to merge with adjacent blocks
399 * This function does the following:
400 * - Merge the given blocks with the adjacent blocks if their virtual ranges
401 * create a contiguous virtual range
403 * This Function should be called only when va_list lock is taken
405 static void merge_va_blocks_locked(struct hl_device *hdev,
406 struct list_head *va_list, struct hl_vm_va_block *va_block)
408 struct hl_vm_va_block *prev, *next;
410 prev = list_prev_entry(va_block, node);
411 if (&prev->node != va_list && prev->end + 1 == va_block->start) {
412 prev->end = va_block->end;
413 prev->size = prev->end - prev->start;
414 list_del(&va_block->node);
419 next = list_next_entry(va_block, node);
420 if (&next->node != va_list && va_block->end + 1 == next->start) {
421 next->start = va_block->start;
422 next->size = next->end - next->start;
423 list_del(&va_block->node);
429 * add_va_block_locked - add a virtual block to the virtual addresses list
431 * @hdev : pointer to the habanalabs device structure
432 * @va_list : pointer to the virtual addresses block list
433 * @start : start virtual address
434 * @end : end virtual address
436 * This function does the following:
437 * - Add the given block to the virtual blocks list and merge with other
438 * blocks if a contiguous virtual block can be created
440 * This Function should be called only when va_list lock is taken
442 static int add_va_block_locked(struct hl_device *hdev,
443 struct list_head *va_list, u64 start, u64 end)
445 struct hl_vm_va_block *va_block, *res = NULL;
446 u64 size = end - start;
448 print_va_list_locked(hdev, va_list);
450 list_for_each_entry(va_block, va_list, node) {
451 /* TODO: remove upon matureness */
452 if (hl_mem_area_crosses_range(start, size, va_block->start,
455 "block crossing ranges at start 0x%llx, end 0x%llx\n",
456 va_block->start, va_block->end);
460 if (va_block->end < start)
464 va_block = kmalloc(sizeof(*va_block), GFP_KERNEL);
468 va_block->start = start;
470 va_block->size = size;
473 list_add(&va_block->node, va_list);
475 list_add(&va_block->node, &res->node);
477 merge_va_blocks_locked(hdev, va_list, va_block);
479 print_va_list_locked(hdev, va_list);
485 * add_va_block - wrapper for add_va_block_locked
487 * @hdev : pointer to the habanalabs device structure
488 * @va_list : pointer to the virtual addresses block list
489 * @start : start virtual address
490 * @end : end virtual address
492 * This function does the following:
493 * - Takes the list lock and calls add_va_block_locked
495 static inline int add_va_block(struct hl_device *hdev,
496 struct hl_va_range *va_range, u64 start, u64 end)
500 mutex_lock(&va_range->lock);
501 rc = add_va_block_locked(hdev, &va_range->list, start, end);
502 mutex_unlock(&va_range->lock);
508 * get_va_block - get a virtual block with the requested size
510 * @hdev : pointer to the habanalabs device structure
511 * @va_range : pointer to the virtual addresses range
512 * @size : requested block size
513 * @hint_addr : hint for request address by the user
514 * @is_userptr : is host or DRAM memory
516 * This function does the following:
517 * - Iterate on the virtual block list to find a suitable virtual block for the
519 * - Reserve the requested block and update the list
520 * - Return the start address of the virtual block
522 static u64 get_va_block(struct hl_device *hdev,
523 struct hl_va_range *va_range, u64 size, u64 hint_addr,
526 struct hl_vm_va_block *va_block, *new_va_block = NULL;
527 u64 valid_start, valid_size, prev_start, prev_end, page_mask,
528 res_valid_start = 0, res_valid_size = 0;
530 bool add_prev = false;
534 * We cannot know if the user allocated memory with huge pages
535 * or not, hence we continue with the biggest possible
538 page_size = hdev->asic_prop.pmmu_huge.page_size;
540 page_size = hdev->asic_prop.dmmu.page_size;
542 page_mask = ~((u64)page_size - 1);
544 mutex_lock(&va_range->lock);
546 print_va_list_locked(hdev, &va_range->list);
548 list_for_each_entry(va_block, &va_range->list, node) {
549 /* calc the first possible aligned addr */
550 valid_start = va_block->start;
552 if (valid_start & (page_size - 1)) {
553 valid_start &= page_mask;
554 valid_start += page_size;
555 if (valid_start > va_block->end)
559 valid_size = va_block->end - valid_start;
561 if (valid_size >= size &&
562 (!new_va_block || valid_size < res_valid_size)) {
563 new_va_block = va_block;
564 res_valid_start = valid_start;
565 res_valid_size = valid_size;
568 if (hint_addr && hint_addr >= valid_start &&
569 ((hint_addr + size) <= va_block->end)) {
570 new_va_block = va_block;
571 res_valid_start = hint_addr;
572 res_valid_size = valid_size;
578 dev_err(hdev->dev, "no available va block for size %llu\n",
583 if (res_valid_start > new_va_block->start) {
584 prev_start = new_va_block->start;
585 prev_end = res_valid_start - 1;
587 new_va_block->start = res_valid_start;
588 new_va_block->size = res_valid_size;
593 if (new_va_block->size > size) {
594 new_va_block->start += size;
595 new_va_block->size = new_va_block->end - new_va_block->start;
597 list_del(&new_va_block->node);
602 add_va_block_locked(hdev, &va_range->list, prev_start,
605 print_va_list_locked(hdev, &va_range->list);
607 mutex_unlock(&va_range->lock);
609 return res_valid_start;
613 * get_sg_info - get number of pages and the DMA address from SG list
616 * @dma_addr : pointer to DMA address to return
618 * Calculate the number of consecutive pages described by the SG list. Take the
619 * offset of the address in the first page, add to it the length and round it up
620 * to the number of needed pages.
622 static u32 get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr)
624 *dma_addr = sg_dma_address(sg);
626 return ((((*dma_addr) & (PAGE_SIZE - 1)) + sg_dma_len(sg)) +
627 (PAGE_SIZE - 1)) >> PAGE_SHIFT;
631 * init_phys_pg_pack_from_userptr - initialize physical page pack from host
633 * @ctx: current context
634 * @userptr: userptr to initialize from
635 * @pphys_pg_pack: result pointer
637 * This function does the following:
638 * - Pin the physical pages related to the given virtual block
639 * - Create a physical page pack from the physical pages related to the given
642 static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
643 struct hl_userptr *userptr,
644 struct hl_vm_phys_pg_pack **pphys_pg_pack)
646 struct hl_vm_phys_pg_pack *phys_pg_pack;
647 struct scatterlist *sg;
649 u64 page_mask, total_npages;
650 u32 npages, page_size = PAGE_SIZE,
651 huge_page_size = ctx->hdev->asic_prop.pmmu_huge.page_size;
652 bool first = true, is_huge_page_opt = true;
654 u32 pgs_in_huge_page = huge_page_size >> __ffs(page_size);
656 phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
660 phys_pg_pack->vm_type = userptr->vm_type;
661 phys_pg_pack->created_from_userptr = true;
662 phys_pg_pack->asid = ctx->asid;
663 atomic_set(&phys_pg_pack->mapping_cnt, 1);
665 /* Only if all dma_addrs are aligned to 2MB and their
666 * sizes is at least 2MB, we can use huge page mapping.
667 * We limit the 2MB optimization to this condition,
668 * since later on we acquire the related VA range as one
672 for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
673 npages = get_sg_info(sg, &dma_addr);
675 total_npages += npages;
677 if ((npages % pgs_in_huge_page) ||
678 (dma_addr & (huge_page_size - 1)))
679 is_huge_page_opt = false;
682 if (is_huge_page_opt) {
683 page_size = huge_page_size;
684 do_div(total_npages, pgs_in_huge_page);
687 page_mask = ~(((u64) page_size) - 1);
689 phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64),
691 if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) {
693 goto page_pack_arr_mem_err;
696 phys_pg_pack->npages = total_npages;
697 phys_pg_pack->page_size = page_size;
698 phys_pg_pack->total_size = total_npages * page_size;
701 for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
702 npages = get_sg_info(sg, &dma_addr);
704 /* align down to physical page size and save the offset */
707 phys_pg_pack->offset = dma_addr & (page_size - 1);
708 dma_addr &= page_mask;
712 phys_pg_pack->pages[j++] = dma_addr;
713 dma_addr += page_size;
715 if (is_huge_page_opt)
716 npages -= pgs_in_huge_page;
722 *pphys_pg_pack = phys_pg_pack;
726 page_pack_arr_mem_err:
733 * map_phys_pg_pack - maps the physical page pack.
734 * @ctx: current context
735 * @vaddr: start address of the virtual area to map from
736 * @phys_pg_pack: the pack of physical pages to map to
738 * This function does the following:
739 * - Maps each chunk of virtual memory to matching physical chunk
740 * - Stores number of successful mappings in the given argument
741 * - Returns 0 on success, error code otherwise
743 static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
744 struct hl_vm_phys_pg_pack *phys_pg_pack)
746 struct hl_device *hdev = ctx->hdev;
747 u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i;
748 u32 page_size = phys_pg_pack->page_size;
751 for (i = 0 ; i < phys_pg_pack->npages ; i++) {
752 paddr = phys_pg_pack->pages[i];
754 rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size,
755 (i + 1) == phys_pg_pack->npages);
758 "map failed for handle %u, npages: %llu, mapped: %llu",
759 phys_pg_pack->handle, phys_pg_pack->npages,
765 next_vaddr += page_size;
772 for (i = 0 ; i < mapped_pg_cnt ; i++) {
773 if (hl_mmu_unmap(ctx, next_vaddr, page_size,
774 (i + 1) == mapped_pg_cnt))
775 dev_warn_ratelimited(hdev->dev,
776 "failed to unmap handle %u, va: 0x%llx, pa: 0x%llx, page size: %u\n",
777 phys_pg_pack->handle, next_vaddr,
778 phys_pg_pack->pages[i], page_size);
780 next_vaddr += page_size;
787 * unmap_phys_pg_pack - unmaps the physical page pack
788 * @ctx: current context
789 * @vaddr: start address of the virtual area to unmap
790 * @phys_pg_pack: the pack of physical pages to unmap
792 static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
793 struct hl_vm_phys_pg_pack *phys_pg_pack)
795 struct hl_device *hdev = ctx->hdev;
799 page_size = phys_pg_pack->page_size;
802 for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) {
803 if (hl_mmu_unmap(ctx, next_vaddr, page_size,
804 (i + 1) == phys_pg_pack->npages))
805 dev_warn_ratelimited(hdev->dev,
806 "unmap failed for vaddr: 0x%llx\n", next_vaddr);
809 * unmapping on Palladium can be really long, so avoid a CPU
810 * soft lockup bug by sleeping a little between unmapping pages
813 usleep_range(500, 1000);
817 static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args,
820 struct hl_device *hdev = ctx->hdev;
821 struct hl_vm *vm = &hdev->vm;
822 struct hl_vm_phys_pg_pack *phys_pg_pack;
825 handle = lower_32_bits(args->map_device.handle);
826 spin_lock(&vm->idr_lock);
827 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
829 spin_unlock(&vm->idr_lock);
830 dev_err(hdev->dev, "no match for handle %u\n", handle);
834 *paddr = phys_pg_pack->pages[0];
836 spin_unlock(&vm->idr_lock);
842 * map_device_va - map the given memory
844 * @ctx : current context
845 * @args : host parameters with handle/host virtual address
846 * @device_addr : pointer to result device virtual address
848 * This function does the following:
849 * - If given a physical device memory handle, map to a device virtual block
850 * and return the start address of this block
851 * - If given a host virtual address and size, find the related physical pages,
852 * map a device virtual block to this pages and return the start address of
855 static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
858 struct hl_device *hdev = ctx->hdev;
859 struct hl_vm *vm = &hdev->vm;
860 struct hl_vm_phys_pg_pack *phys_pg_pack;
861 struct hl_userptr *userptr = NULL;
862 struct hl_vm_hash_node *hnode;
863 struct hl_va_range *va_range;
864 enum vm_type_t *vm_type;
865 u64 ret_vaddr, hint_addr;
868 bool is_userptr = args->flags & HL_MEM_USERPTR;
874 u64 addr = args->map_host.host_virt_addr,
875 size = args->map_host.mem_size;
877 rc = dma_map_host_va(hdev, addr, size, &userptr);
879 dev_err(hdev->dev, "failed to get userptr from va\n");
883 rc = init_phys_pg_pack_from_userptr(ctx, userptr,
887 "unable to init page pack for vaddr 0x%llx\n",
889 goto init_page_pack_err;
892 vm_type = (enum vm_type_t *) userptr;
893 hint_addr = args->map_host.hint_addr;
894 handle = phys_pg_pack->handle;
896 handle = lower_32_bits(args->map_device.handle);
898 spin_lock(&vm->idr_lock);
899 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
901 spin_unlock(&vm->idr_lock);
903 "no match for handle %u\n", handle);
907 /* increment now to avoid freeing device memory while mapping */
908 atomic_inc(&phys_pg_pack->mapping_cnt);
910 spin_unlock(&vm->idr_lock);
912 vm_type = (enum vm_type_t *) phys_pg_pack;
914 hint_addr = args->map_device.hint_addr;
918 * relevant for mapping device physical memory only, as host memory is
921 if (!is_userptr && !(phys_pg_pack->flags & HL_MEM_SHARED) &&
922 phys_pg_pack->asid != ctx->asid) {
924 "Failed to map memory, handle %u is not shared\n",
930 hnode = kzalloc(sizeof(*hnode), GFP_KERNEL);
937 if (phys_pg_pack->page_size == hdev->asic_prop.pmmu.page_size)
938 va_range = ctx->host_va_range;
940 va_range = ctx->host_huge_va_range;
942 va_range = ctx->dram_va_range;
944 ret_vaddr = get_va_block(hdev, va_range, phys_pg_pack->total_size,
945 hint_addr, is_userptr);
947 dev_err(hdev->dev, "no available va block for handle %u\n",
953 mutex_lock(&ctx->mmu_lock);
955 rc = map_phys_pg_pack(ctx, ret_vaddr, phys_pg_pack);
957 mutex_unlock(&ctx->mmu_lock);
958 dev_err(hdev->dev, "mapping page pack failed for handle %u\n",
963 rc = hdev->asic_funcs->mmu_invalidate_cache(hdev, false, *vm_type);
965 mutex_unlock(&ctx->mmu_lock);
969 "mapping handle %u failed due to MMU cache invalidation\n",
974 ret_vaddr += phys_pg_pack->offset;
976 hnode->ptr = vm_type;
977 hnode->vaddr = ret_vaddr;
979 mutex_lock(&ctx->mem_hash_lock);
980 hash_add(ctx->mem_hash, &hnode->node, ret_vaddr);
981 mutex_unlock(&ctx->mem_hash_lock);
983 *device_addr = ret_vaddr;
986 free_phys_pg_pack(hdev, phys_pg_pack);
991 if (add_va_block(hdev, va_range, ret_vaddr,
992 ret_vaddr + phys_pg_pack->total_size - 1))
994 "release va block failed for handle 0x%x, vaddr: 0x%llx\n",
1001 atomic_dec(&phys_pg_pack->mapping_cnt);
1003 free_phys_pg_pack(hdev, phys_pg_pack);
1006 dma_unmap_host_va(hdev, userptr);
1012 * unmap_device_va - unmap the given device virtual address
1014 * @ctx : current context
1015 * @vaddr : device virtual address to unmap
1016 * @ctx_free : true if in context free flow, false otherwise.
1018 * This function does the following:
1019 * - Unmap the physical pages related to the given virtual address
1020 * - return the device virtual block to the virtual block list
1022 static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr, bool ctx_free)
1024 struct hl_device *hdev = ctx->hdev;
1025 struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
1026 struct hl_vm_hash_node *hnode = NULL;
1027 struct hl_userptr *userptr = NULL;
1028 struct hl_va_range *va_range;
1029 enum vm_type_t *vm_type;
1033 /* protect from double entrance */
1034 mutex_lock(&ctx->mem_hash_lock);
1035 hash_for_each_possible(ctx->mem_hash, hnode, node, (unsigned long)vaddr)
1036 if (vaddr == hnode->vaddr)
1040 mutex_unlock(&ctx->mem_hash_lock);
1042 "unmap failed, no mem hnode for vaddr 0x%llx\n",
1047 hash_del(&hnode->node);
1048 mutex_unlock(&ctx->mem_hash_lock);
1050 vm_type = hnode->ptr;
1052 if (*vm_type == VM_TYPE_USERPTR) {
1054 userptr = hnode->ptr;
1055 rc = init_phys_pg_pack_from_userptr(ctx, userptr,
1059 "unable to init page pack for vaddr 0x%llx\n",
1064 if (phys_pg_pack->page_size ==
1065 hdev->asic_prop.pmmu.page_size)
1066 va_range = ctx->host_va_range;
1068 va_range = ctx->host_huge_va_range;
1069 } else if (*vm_type == VM_TYPE_PHYS_PACK) {
1071 va_range = ctx->dram_va_range;
1072 phys_pg_pack = hnode->ptr;
1075 "unmap failed, unknown vm desc for vaddr 0x%llx\n",
1081 if (atomic_read(&phys_pg_pack->mapping_cnt) == 0) {
1082 dev_err(hdev->dev, "vaddr 0x%llx is not mapped\n", vaddr);
1084 goto mapping_cnt_err;
1087 vaddr &= ~(((u64) phys_pg_pack->page_size) - 1);
1089 mutex_lock(&ctx->mmu_lock);
1091 unmap_phys_pg_pack(ctx, vaddr, phys_pg_pack);
1094 * During context free this function is called in a loop to clean all
1095 * the context mappings. Hence the cache invalidation can be called once
1096 * at the loop end rather than for each iteration
1099 rc = hdev->asic_funcs->mmu_invalidate_cache(hdev, true,
1102 mutex_unlock(&ctx->mmu_lock);
1105 * If the context is closing we don't need to check for the MMU cache
1106 * invalidation return code and update the VA free list as in this flow
1107 * we invalidate the MMU cache outside of this unmap function and the VA
1108 * free list will be freed anyway.
1115 "unmapping vaddr 0x%llx failed due to MMU cache invalidation\n",
1118 tmp_rc = add_va_block(hdev, va_range, vaddr,
1119 vaddr + phys_pg_pack->total_size - 1);
1122 "add va block failed for vaddr: 0x%llx\n",
1129 atomic_dec(&phys_pg_pack->mapping_cnt);
1133 free_phys_pg_pack(hdev, phys_pg_pack);
1134 dma_unmap_host_va(hdev, userptr);
1141 free_phys_pg_pack(hdev, phys_pg_pack);
1143 mutex_lock(&ctx->mem_hash_lock);
1144 hash_add(ctx->mem_hash, &hnode->node, vaddr);
1145 mutex_unlock(&ctx->mem_hash_lock);
1150 static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args)
1152 struct hl_device *hdev = hpriv->hdev;
1153 struct hl_ctx *ctx = hpriv->ctx;
1154 u64 device_addr = 0;
1158 switch (args->in.op) {
1159 case HL_MEM_OP_ALLOC:
1160 if (args->in.alloc.mem_size == 0) {
1162 "alloc size must be larger than 0\n");
1167 /* Force contiguous as there are no real MMU
1168 * translations to overcome physical memory gaps
1170 args->in.flags |= HL_MEM_CONTIGUOUS;
1171 rc = alloc_device_memory(ctx, &args->in, &handle);
1173 memset(args, 0, sizeof(*args));
1174 args->out.handle = (__u64) handle;
1177 case HL_MEM_OP_FREE:
1178 rc = free_device_memory(ctx, args->in.free.handle);
1182 if (args->in.flags & HL_MEM_USERPTR) {
1183 device_addr = args->in.map_host.host_virt_addr;
1186 rc = get_paddr_from_handle(ctx, &args->in,
1190 memset(args, 0, sizeof(*args));
1191 args->out.device_virt_addr = device_addr;
1194 case HL_MEM_OP_UNMAP:
1199 dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
1208 int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
1210 union hl_mem_args *args = data;
1211 struct hl_device *hdev = hpriv->hdev;
1212 struct hl_ctx *ctx = hpriv->ctx;
1213 u64 device_addr = 0;
1217 if (hl_device_disabled_or_in_reset(hdev)) {
1218 dev_warn_ratelimited(hdev->dev,
1219 "Device is %s. Can't execute MEMORY IOCTL\n",
1220 atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
1224 if (!hdev->mmu_enable)
1225 return mem_ioctl_no_mmu(hpriv, args);
1227 switch (args->in.op) {
1228 case HL_MEM_OP_ALLOC:
1229 if (!hdev->dram_supports_virtual_memory) {
1230 dev_err(hdev->dev, "DRAM alloc is not supported\n");
1235 if (args->in.alloc.mem_size == 0) {
1237 "alloc size must be larger than 0\n");
1241 rc = alloc_device_memory(ctx, &args->in, &handle);
1243 memset(args, 0, sizeof(*args));
1244 args->out.handle = (__u64) handle;
1247 case HL_MEM_OP_FREE:
1248 rc = free_device_memory(ctx, args->in.free.handle);
1252 rc = map_device_va(ctx, &args->in, &device_addr);
1254 memset(args, 0, sizeof(*args));
1255 args->out.device_virt_addr = device_addr;
1258 case HL_MEM_OP_UNMAP:
1259 rc = unmap_device_va(ctx, args->in.unmap.device_virt_addr,
1264 dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
1273 static int get_user_memory(struct hl_device *hdev, u64 addr, u64 size,
1274 u32 npages, u64 start, u32 offset,
1275 struct hl_userptr *userptr)
1279 if (!access_ok((void __user *) (uintptr_t) addr, size)) {
1280 dev_err(hdev->dev, "user pointer is invalid - 0x%llx\n", addr);
1284 userptr->vec = frame_vector_create(npages);
1285 if (!userptr->vec) {
1286 dev_err(hdev->dev, "Failed to create frame vector\n");
1290 rc = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
1295 "Failed to map host memory, user ptr probably wrong\n");
1297 goto destroy_framevec;
1302 if (frame_vector_to_pages(userptr->vec) < 0) {
1304 "Failed to translate frame vector to pages\n");
1309 rc = sg_alloc_table_from_pages(userptr->sgt,
1310 frame_vector_pages(userptr->vec),
1311 npages, offset, size, GFP_ATOMIC);
1313 dev_err(hdev->dev, "failed to create SG table from pages\n");
1320 put_vaddr_frames(userptr->vec);
1322 frame_vector_destroy(userptr->vec);
1327 * hl_pin_host_memory - pins a chunk of host memory.
1328 * @hdev: pointer to the habanalabs device structure
1329 * @addr: the host virtual address of the memory area
1330 * @size: the size of the memory area
1331 * @userptr: pointer to hl_userptr structure
1333 * This function does the following:
1334 * - Pins the physical pages
1335 * - Create an SG list from those pages
1337 int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
1338 struct hl_userptr *userptr)
1345 dev_err(hdev->dev, "size to pin is invalid - %llu\n", size);
1350 * If the combination of the address and size requested for this memory
1351 * region causes an integer overflow, return error.
1353 if (((addr + size) < addr) ||
1354 PAGE_ALIGN(addr + size) < (addr + size)) {
1356 "user pointer 0x%llx + %llu causes integer overflow\n",
1362 * This function can be called also from data path, hence use atomic
1363 * always as it is not a big allocation.
1365 userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_ATOMIC);
1369 start = addr & PAGE_MASK;
1370 offset = addr & ~PAGE_MASK;
1371 end = PAGE_ALIGN(addr + size);
1372 npages = (end - start) >> PAGE_SHIFT;
1374 userptr->size = size;
1375 userptr->addr = addr;
1376 userptr->dma_mapped = false;
1377 INIT_LIST_HEAD(&userptr->job_node);
1379 rc = get_user_memory(hdev, addr, size, npages, start, offset,
1383 "failed to get user memory for address 0x%llx\n",
1388 hl_debugfs_add_userptr(hdev, userptr);
1393 kfree(userptr->sgt);
1398 * hl_unpin_host_memory - unpins a chunk of host memory.
1399 * @hdev: pointer to the habanalabs device structure
1400 * @userptr: pointer to hl_userptr structure
1402 * This function does the following:
1403 * - Unpins the physical pages related to the host memory
1404 * - Free the SG list
1406 void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
1408 struct page **pages;
1410 hl_debugfs_remove_userptr(hdev, userptr);
1412 if (userptr->dma_mapped)
1413 hdev->asic_funcs->hl_dma_unmap_sg(hdev, userptr->sgt->sgl,
1414 userptr->sgt->nents,
1417 pages = frame_vector_pages(userptr->vec);
1418 if (!IS_ERR(pages)) {
1421 for (i = 0; i < frame_vector_count(userptr->vec); i++)
1422 set_page_dirty_lock(pages[i]);
1424 put_vaddr_frames(userptr->vec);
1425 frame_vector_destroy(userptr->vec);
1427 list_del(&userptr->job_node);
1429 sg_free_table(userptr->sgt);
1430 kfree(userptr->sgt);
1434 * hl_userptr_delete_list - clear userptr list
1436 * @hdev : pointer to the habanalabs device structure
1437 * @userptr_list : pointer to the list to clear
1439 * This function does the following:
1440 * - Iterates over the list and unpins the host memory and frees the userptr
1443 void hl_userptr_delete_list(struct hl_device *hdev,
1444 struct list_head *userptr_list)
1446 struct hl_userptr *userptr, *tmp;
1448 list_for_each_entry_safe(userptr, tmp, userptr_list, job_node) {
1449 hl_unpin_host_memory(hdev, userptr);
1453 INIT_LIST_HEAD(userptr_list);
1457 * hl_userptr_is_pinned - returns whether the given userptr is pinned
1459 * @hdev : pointer to the habanalabs device structure
1460 * @userptr_list : pointer to the list to clear
1461 * @userptr : pointer to userptr to check
1463 * This function does the following:
1464 * - Iterates over the list and checks if the given userptr is in it, means is
1465 * pinned. If so, returns true, otherwise returns false.
1467 bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr,
1468 u32 size, struct list_head *userptr_list,
1469 struct hl_userptr **userptr)
1471 list_for_each_entry((*userptr), userptr_list, job_node) {
1472 if ((addr == (*userptr)->addr) && (size == (*userptr)->size))
1480 * va_range_init - initialize virtual addresses range
1481 * @hdev: pointer to the habanalabs device structure
1482 * @va_range: pointer to the range to initialize
1483 * @start: range start address
1484 * @end: range end address
1486 * This function does the following:
1487 * - Initializes the virtual addresses list of the given range with the given
1490 static int va_range_init(struct hl_device *hdev, struct hl_va_range *va_range,
1495 INIT_LIST_HEAD(&va_range->list);
1497 /* PAGE_SIZE alignment */
1499 if (start & (PAGE_SIZE - 1)) {
1504 if (end & (PAGE_SIZE - 1))
1508 dev_err(hdev->dev, "too small vm range for va list\n");
1512 rc = add_va_block(hdev, va_range, start, end);
1515 dev_err(hdev->dev, "Failed to init host va list\n");
1519 va_range->start_addr = start;
1520 va_range->end_addr = end;
1526 * va_range_fini() - clear a virtual addresses range
1527 * @hdev: pointer to the habanalabs structure
1528 * va_range: pointer to virtual addresses range
1530 * This function does the following:
1531 * - Frees the virtual addresses block list and its lock
1533 static void va_range_fini(struct hl_device *hdev,
1534 struct hl_va_range *va_range)
1536 mutex_lock(&va_range->lock);
1537 clear_va_list_locked(hdev, &va_range->list);
1538 mutex_unlock(&va_range->lock);
1540 mutex_destroy(&va_range->lock);
1545 * vm_ctx_init_with_ranges() - initialize virtual memory for context
1546 * @ctx: pointer to the habanalabs context structure
1547 * @host_range_start: host virtual addresses range start.
1548 * @host_range_end: host virtual addresses range end.
1549 * @host_huge_range_start: host virtual addresses range start for memory
1550 * allocated with huge pages.
1551 * @host_huge_range_end: host virtual addresses range end for memory allocated
1553 * @dram_range_start: dram virtual addresses range start.
1554 * @dram_range_end: dram virtual addresses range end.
1556 * This function initializes the following:
1558 * - Virtual address to area descriptor hashtable
1559 * - Virtual block list of available virtual memory
1561 static int vm_ctx_init_with_ranges(struct hl_ctx *ctx,
1562 u64 host_range_start,
1564 u64 host_huge_range_start,
1565 u64 host_huge_range_end,
1566 u64 dram_range_start,
1569 struct hl_device *hdev = ctx->hdev;
1572 ctx->host_va_range = kzalloc(sizeof(*ctx->host_va_range), GFP_KERNEL);
1573 if (!ctx->host_va_range)
1576 ctx->host_huge_va_range = kzalloc(sizeof(*ctx->host_huge_va_range),
1578 if (!ctx->host_huge_va_range) {
1580 goto host_huge_va_range_err;
1583 ctx->dram_va_range = kzalloc(sizeof(*ctx->dram_va_range), GFP_KERNEL);
1584 if (!ctx->dram_va_range) {
1586 goto dram_va_range_err;
1589 rc = hl_mmu_ctx_init(ctx);
1591 dev_err(hdev->dev, "failed to init context %d\n", ctx->asid);
1595 mutex_init(&ctx->mem_hash_lock);
1596 hash_init(ctx->mem_hash);
1598 mutex_init(&ctx->host_va_range->lock);
1600 rc = va_range_init(hdev, ctx->host_va_range, host_range_start,
1603 dev_err(hdev->dev, "failed to init host vm range\n");
1604 goto host_page_range_err;
1607 if (hdev->pmmu_huge_range) {
1608 mutex_init(&ctx->host_huge_va_range->lock);
1610 rc = va_range_init(hdev, ctx->host_huge_va_range,
1611 host_huge_range_start,
1612 host_huge_range_end);
1615 "failed to init host huge vm range\n");
1616 goto host_hpage_range_err;
1619 ctx->host_huge_va_range = ctx->host_va_range;
1622 mutex_init(&ctx->dram_va_range->lock);
1624 rc = va_range_init(hdev, ctx->dram_va_range, dram_range_start,
1627 dev_err(hdev->dev, "failed to init dram vm range\n");
1631 hl_debugfs_add_ctx_mem_hash(hdev, ctx);
1636 mutex_destroy(&ctx->dram_va_range->lock);
1638 if (hdev->pmmu_huge_range) {
1639 mutex_lock(&ctx->host_huge_va_range->lock);
1640 clear_va_list_locked(hdev, &ctx->host_huge_va_range->list);
1641 mutex_unlock(&ctx->host_huge_va_range->lock);
1643 host_hpage_range_err:
1644 if (hdev->pmmu_huge_range)
1645 mutex_destroy(&ctx->host_huge_va_range->lock);
1646 mutex_lock(&ctx->host_va_range->lock);
1647 clear_va_list_locked(hdev, &ctx->host_va_range->list);
1648 mutex_unlock(&ctx->host_va_range->lock);
1649 host_page_range_err:
1650 mutex_destroy(&ctx->host_va_range->lock);
1651 mutex_destroy(&ctx->mem_hash_lock);
1652 hl_mmu_ctx_fini(ctx);
1654 kfree(ctx->dram_va_range);
1656 kfree(ctx->host_huge_va_range);
1657 host_huge_va_range_err:
1658 kfree(ctx->host_va_range);
1663 int hl_vm_ctx_init(struct hl_ctx *ctx)
1665 struct asic_fixed_properties *prop = &ctx->hdev->asic_prop;
1666 u64 host_range_start, host_range_end, host_huge_range_start,
1667 host_huge_range_end, dram_range_start, dram_range_end;
1669 atomic64_set(&ctx->dram_phys_mem, 0);
1672 * - If MMU is enabled, init the ranges as usual.
1673 * - If MMU is disabled, in case of host mapping, the returned address
1675 * In case of DRAM mapping, the returned address is the physical
1676 * address of the memory related to the given handle.
1678 if (ctx->hdev->mmu_enable) {
1679 dram_range_start = prop->dmmu.start_addr;
1680 dram_range_end = prop->dmmu.end_addr;
1681 host_range_start = prop->pmmu.start_addr;
1682 host_range_end = prop->pmmu.end_addr;
1683 host_huge_range_start = prop->pmmu_huge.start_addr;
1684 host_huge_range_end = prop->pmmu_huge.end_addr;
1686 dram_range_start = prop->dram_user_base_address;
1687 dram_range_end = prop->dram_end_address;
1688 host_range_start = prop->dram_user_base_address;
1689 host_range_end = prop->dram_end_address;
1690 host_huge_range_start = prop->dram_user_base_address;
1691 host_huge_range_end = prop->dram_end_address;
1694 return vm_ctx_init_with_ranges(ctx, host_range_start, host_range_end,
1695 host_huge_range_start,
1696 host_huge_range_end,
1702 * hl_vm_ctx_fini - virtual memory teardown of context
1704 * @ctx : pointer to the habanalabs context structure
1706 * This function perform teardown the following:
1707 * - Virtual block list of available virtual memory
1708 * - Virtual address to area descriptor hashtable
1711 * In addition this function does the following:
1712 * - Unmaps the existing hashtable nodes if the hashtable is not empty. The
1713 * hashtable should be empty as no valid mappings should exist at this
1715 * - Frees any existing physical page list from the idr which relates to the
1716 * current context asid.
1717 * - This function checks the virtual block list for correctness. At this point
1718 * the list should contain one element which describes the whole virtual
1719 * memory range of the context. Otherwise, a warning is printed.
1721 void hl_vm_ctx_fini(struct hl_ctx *ctx)
1723 struct hl_device *hdev = ctx->hdev;
1724 struct hl_vm *vm = &hdev->vm;
1725 struct hl_vm_phys_pg_pack *phys_pg_list;
1726 struct hl_vm_hash_node *hnode;
1727 struct hlist_node *tmp_node;
1730 hl_debugfs_remove_ctx_mem_hash(hdev, ctx);
1733 * Clearly something went wrong on hard reset so no point in printing
1734 * another side effect error
1736 if (!hdev->hard_reset_pending && !hash_empty(ctx->mem_hash))
1737 dev_notice(hdev->dev,
1738 "user released device without removing its memory mappings\n");
1740 hash_for_each_safe(ctx->mem_hash, i, tmp_node, hnode, node) {
1742 "hl_mem_hash_node of vaddr 0x%llx of asid %d is still alive\n",
1743 hnode->vaddr, ctx->asid);
1744 unmap_device_va(ctx, hnode->vaddr, true);
1747 /* invalidate the cache once after the unmapping loop */
1748 hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
1749 hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_PHYS_PACK);
1751 spin_lock(&vm->idr_lock);
1752 idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_list, i)
1753 if (phys_pg_list->asid == ctx->asid) {
1755 "page list 0x%px of asid %d is still alive\n",
1756 phys_pg_list, ctx->asid);
1757 atomic64_sub(phys_pg_list->total_size,
1758 &hdev->dram_used_mem);
1759 free_phys_pg_pack(hdev, phys_pg_list);
1760 idr_remove(&vm->phys_pg_pack_handles, i);
1762 spin_unlock(&vm->idr_lock);
1764 va_range_fini(hdev, ctx->dram_va_range);
1765 if (hdev->pmmu_huge_range)
1766 va_range_fini(hdev, ctx->host_huge_va_range);
1767 va_range_fini(hdev, ctx->host_va_range);
1769 mutex_destroy(&ctx->mem_hash_lock);
1770 hl_mmu_ctx_fini(ctx);
1774 * hl_vm_init - initialize virtual memory module
1776 * @hdev : pointer to the habanalabs device structure
1778 * This function initializes the following:
1780 * - DRAM physical pages pool of 2MB
1781 * - Idr for device memory allocation handles
1783 int hl_vm_init(struct hl_device *hdev)
1785 struct asic_fixed_properties *prop = &hdev->asic_prop;
1786 struct hl_vm *vm = &hdev->vm;
1789 vm->dram_pg_pool = gen_pool_create(__ffs(prop->dram_page_size), -1);
1790 if (!vm->dram_pg_pool) {
1791 dev_err(hdev->dev, "Failed to create dram page pool\n");
1795 kref_init(&vm->dram_pg_pool_refcount);
1797 rc = gen_pool_add(vm->dram_pg_pool, prop->dram_user_base_address,
1798 prop->dram_end_address - prop->dram_user_base_address,
1803 "Failed to add memory to dram page pool %d\n", rc);
1807 spin_lock_init(&vm->idr_lock);
1808 idr_init(&vm->phys_pg_pack_handles);
1810 atomic64_set(&hdev->dram_used_mem, 0);
1812 vm->init_done = true;
1817 gen_pool_destroy(vm->dram_pg_pool);
1823 * hl_vm_fini - virtual memory module teardown
1825 * @hdev : pointer to the habanalabs device structure
1827 * This function perform teardown to the following:
1828 * - Idr for device memory allocation handles
1829 * - DRAM physical pages pool of 2MB
1832 void hl_vm_fini(struct hl_device *hdev)
1834 struct hl_vm *vm = &hdev->vm;
1840 * At this point all the contexts should be freed and hence no DRAM
1841 * memory should be in use. Hence the DRAM pool should be freed here.
1843 if (kref_put(&vm->dram_pg_pool_refcount, dram_pg_pool_do_release) != 1)
1844 dev_warn(hdev->dev, "dram_pg_pool was not destroyed on %s\n",
1847 vm->init_done = false;