1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2019 HabanaLabs, Ltd.
8 #include <uapi/misc/habanalabs.h>
9 #include "habanalabs.h"
10 #include "../include/hw_ip/mmu/mmu_general.h"
12 #include <linux/uaccess.h>
13 #include <linux/slab.h>
15 #define HL_MMU_DEBUG 0
18 * The va ranges in context object contain a list with the available chunks of
19 * device virtual memory.
20 * There is one range for host allocations and one for DRAM allocations.
22 * On initialization each range contains one chunk of all of its available
23 * virtual range which is a half of the total device virtual range.
25 * On each mapping of physical pages, a suitable virtual range chunk (with a
26 * minimum size) is selected from the list. If the chunk size equals the
27 * requested size, the chunk is returned. Otherwise, the chunk is split into
28 * two chunks - one to return as result and a remainder to stay in the list.
30 * On each Unmapping of a virtual address, the relevant virtual chunk is
31 * returned to the list. The chunk is added to the list and if its edges match
32 * the edges of the adjacent chunks (means a contiguous chunk can be created),
33 * the chunks are merged.
35 * On finish, the list is checked to have only one chunk of all the relevant
36 * virtual range (which is a half of the device total virtual range).
37 * If not (means not all mappings were unmapped), a warning is printed.
41 * alloc_device_memory - allocate device memory
43 * @ctx : current context
44 * @args : host parameters containing the requested size
45 * @ret_handle : result handle
47 * This function does the following:
48 * - Allocate the requested size rounded up to 'dram_page_size' pages
49 * - Return unique handle
51 static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
54 struct hl_device *hdev = ctx->hdev;
55 struct hl_vm *vm = &hdev->vm;
56 struct hl_vm_phys_pg_pack *phys_pg_pack;
57 u64 paddr = 0, total_size, num_pgs, i;
58 u32 num_curr_pgs, page_size, page_shift;
63 page_size = hdev->asic_prop.dram_page_size;
64 page_shift = __ffs(page_size);
65 num_pgs = (args->alloc.mem_size + (page_size - 1)) >> page_shift;
66 total_size = num_pgs << page_shift;
69 dev_err(hdev->dev, "Cannot allocate 0 bytes\n");
73 contiguous = args->flags & HL_MEM_CONTIGUOUS;
76 paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size);
79 "failed to allocate %llu contiguous pages with total size of %llu\n",
84 if (hdev->memory_scrub) {
85 rc = hdev->asic_funcs->scrub_device_mem(hdev, paddr,
89 "Failed to scrub contiguous device memory\n");
95 phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
101 phys_pg_pack->vm_type = VM_TYPE_PHYS_PACK;
102 phys_pg_pack->asid = ctx->asid;
103 phys_pg_pack->npages = num_pgs;
104 phys_pg_pack->page_size = page_size;
105 phys_pg_pack->total_size = total_size;
106 phys_pg_pack->flags = args->flags;
107 phys_pg_pack->contiguous = contiguous;
109 phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL);
110 if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) {
115 if (phys_pg_pack->contiguous) {
116 for (i = 0 ; i < num_pgs ; i++)
117 phys_pg_pack->pages[i] = paddr + i * page_size;
119 for (i = 0 ; i < num_pgs ; i++) {
120 phys_pg_pack->pages[i] = (u64) gen_pool_alloc(
123 if (!phys_pg_pack->pages[i]) {
125 "Failed to allocate device memory (out of memory)\n");
130 if (hdev->memory_scrub) {
131 rc = hdev->asic_funcs->scrub_device_mem(hdev,
132 phys_pg_pack->pages[i],
136 "Failed to scrub device memory\n");
145 spin_lock(&vm->idr_lock);
146 handle = idr_alloc(&vm->phys_pg_pack_handles, phys_pg_pack, 1, 0,
148 spin_unlock(&vm->idr_lock);
151 dev_err(hdev->dev, "Failed to get handle for page\n");
156 for (i = 0 ; i < num_pgs ; i++)
157 kref_get(&vm->dram_pg_pool_refcount);
159 phys_pg_pack->handle = handle;
161 atomic64_add(phys_pg_pack->total_size, &ctx->dram_phys_mem);
162 atomic64_add(phys_pg_pack->total_size, &hdev->dram_used_mem);
164 *ret_handle = handle;
170 if (!phys_pg_pack->contiguous)
171 for (i = 0 ; i < num_curr_pgs ; i++)
172 gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i],
175 kvfree(phys_pg_pack->pages);
180 gen_pool_free(vm->dram_pg_pool, paddr, total_size);
186 * dma_map_host_va - DMA mapping of the given host virtual address.
187 * @hdev: habanalabs device structure
188 * @addr: the host virtual address of the memory area
189 * @size: the size of the memory area
190 * @p_userptr: pointer to result userptr structure
192 * This function does the following:
193 * - Allocate userptr structure
194 * - Pin the given host memory using the userptr structure
195 * - Perform DMA mapping to have the DMA addresses of the pages
197 static int dma_map_host_va(struct hl_device *hdev, u64 addr, u64 size,
198 struct hl_userptr **p_userptr)
200 struct hl_userptr *userptr;
203 userptr = kzalloc(sizeof(*userptr), GFP_KERNEL);
209 rc = hl_pin_host_memory(hdev, addr, size, userptr);
211 dev_err(hdev->dev, "Failed to pin host memory\n");
215 rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
216 userptr->sgt->nents, DMA_BIDIRECTIONAL);
218 dev_err(hdev->dev, "failed to map sgt with DMA region\n");
222 userptr->dma_mapped = true;
223 userptr->dir = DMA_BIDIRECTIONAL;
224 userptr->vm_type = VM_TYPE_USERPTR;
226 *p_userptr = userptr;
231 hl_unpin_host_memory(hdev, userptr);
240 * dma_unmap_host_va - DMA unmapping of the given host virtual address.
241 * @hdev: habanalabs device structure
242 * @userptr: userptr to free
244 * This function does the following:
245 * - Unpins the physical pages
246 * - Frees the userptr structure
248 static void dma_unmap_host_va(struct hl_device *hdev,
249 struct hl_userptr *userptr)
251 hl_unpin_host_memory(hdev, userptr);
256 * dram_pg_pool_do_release - free DRAM pages pool
258 * @ref : pointer to reference object
260 * This function does the following:
261 * - Frees the idr structure of physical pages handles
262 * - Frees the generic pool of DRAM physical pages
264 static void dram_pg_pool_do_release(struct kref *ref)
266 struct hl_vm *vm = container_of(ref, struct hl_vm,
267 dram_pg_pool_refcount);
270 * free the idr here as only here we know for sure that there are no
271 * allocated physical pages and hence there are no handles in use
273 idr_destroy(&vm->phys_pg_pack_handles);
274 gen_pool_destroy(vm->dram_pg_pool);
278 * free_phys_pg_pack - free physical page pack
279 * @hdev: habanalabs device structure
280 * @phys_pg_pack: physical page pack to free
282 * This function does the following:
283 * - For DRAM memory only, iterate over the pack and free each physical block
284 * structure by returning it to the general pool
285 * - Free the hl_vm_phys_pg_pack structure
287 static void free_phys_pg_pack(struct hl_device *hdev,
288 struct hl_vm_phys_pg_pack *phys_pg_pack)
290 struct hl_vm *vm = &hdev->vm;
293 if (!phys_pg_pack->created_from_userptr) {
294 if (phys_pg_pack->contiguous) {
295 gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[0],
296 phys_pg_pack->total_size);
298 for (i = 0; i < phys_pg_pack->npages ; i++)
299 kref_put(&vm->dram_pg_pool_refcount,
300 dram_pg_pool_do_release);
302 for (i = 0 ; i < phys_pg_pack->npages ; i++) {
303 gen_pool_free(vm->dram_pg_pool,
304 phys_pg_pack->pages[i],
305 phys_pg_pack->page_size);
306 kref_put(&vm->dram_pg_pool_refcount,
307 dram_pg_pool_do_release);
312 kvfree(phys_pg_pack->pages);
317 * free_device_memory - free device memory
319 * @ctx : current context
320 * @handle : handle of the memory chunk to free
322 * This function does the following:
323 * - Free the device memory related to the given handle
325 static int free_device_memory(struct hl_ctx *ctx, u32 handle)
327 struct hl_device *hdev = ctx->hdev;
328 struct hl_vm *vm = &hdev->vm;
329 struct hl_vm_phys_pg_pack *phys_pg_pack;
331 spin_lock(&vm->idr_lock);
332 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
334 if (atomic_read(&phys_pg_pack->mapping_cnt) > 0) {
335 dev_err(hdev->dev, "handle %u is mapped, cannot free\n",
337 spin_unlock(&vm->idr_lock);
342 * must remove from idr before the freeing of the physical
343 * pages as the refcount of the pool is also the trigger of the
346 idr_remove(&vm->phys_pg_pack_handles, handle);
347 spin_unlock(&vm->idr_lock);
349 atomic64_sub(phys_pg_pack->total_size, &ctx->dram_phys_mem);
350 atomic64_sub(phys_pg_pack->total_size, &hdev->dram_used_mem);
352 free_phys_pg_pack(hdev, phys_pg_pack);
354 spin_unlock(&vm->idr_lock);
356 "free device memory failed, no match for handle %u\n",
365 * clear_va_list_locked - free virtual addresses list
367 * @hdev : habanalabs device structure
368 * @va_list : list of virtual addresses to free
370 * This function does the following:
371 * - Iterate over the list and free each virtual addresses block
373 * This function should be called only when va_list lock is taken
375 static void clear_va_list_locked(struct hl_device *hdev,
376 struct list_head *va_list)
378 struct hl_vm_va_block *va_block, *tmp;
380 list_for_each_entry_safe(va_block, tmp, va_list, node) {
381 list_del(&va_block->node);
387 * print_va_list_locked - print virtual addresses list
389 * @hdev : habanalabs device structure
390 * @va_list : list of virtual addresses to print
392 * This function does the following:
393 * - Iterate over the list and print each virtual addresses block
395 * This function should be called only when va_list lock is taken
397 static void print_va_list_locked(struct hl_device *hdev,
398 struct list_head *va_list)
401 struct hl_vm_va_block *va_block;
403 dev_dbg(hdev->dev, "print va list:\n");
405 list_for_each_entry(va_block, va_list, node)
407 "va block, start: 0x%llx, end: 0x%llx, size: %llu\n",
408 va_block->start, va_block->end, va_block->size);
413 * merge_va_blocks_locked - merge a virtual block if possible
415 * @hdev : pointer to the habanalabs device structure
416 * @va_list : pointer to the virtual addresses block list
417 * @va_block : virtual block to merge with adjacent blocks
419 * This function does the following:
420 * - Merge the given blocks with the adjacent blocks if their virtual ranges
421 * create a contiguous virtual range
423 * This Function should be called only when va_list lock is taken
425 static void merge_va_blocks_locked(struct hl_device *hdev,
426 struct list_head *va_list, struct hl_vm_va_block *va_block)
428 struct hl_vm_va_block *prev, *next;
430 prev = list_prev_entry(va_block, node);
431 if (&prev->node != va_list && prev->end + 1 == va_block->start) {
432 prev->end = va_block->end;
433 prev->size = prev->end - prev->start;
434 list_del(&va_block->node);
439 next = list_next_entry(va_block, node);
440 if (&next->node != va_list && va_block->end + 1 == next->start) {
441 next->start = va_block->start;
442 next->size = next->end - next->start;
443 list_del(&va_block->node);
449 * add_va_block_locked - add a virtual block to the virtual addresses list
451 * @hdev : pointer to the habanalabs device structure
452 * @va_list : pointer to the virtual addresses block list
453 * @start : start virtual address
454 * @end : end virtual address
456 * This function does the following:
457 * - Add the given block to the virtual blocks list and merge with other
458 * blocks if a contiguous virtual block can be created
460 * This Function should be called only when va_list lock is taken
462 static int add_va_block_locked(struct hl_device *hdev,
463 struct list_head *va_list, u64 start, u64 end)
465 struct hl_vm_va_block *va_block, *res = NULL;
466 u64 size = end - start;
468 print_va_list_locked(hdev, va_list);
470 list_for_each_entry(va_block, va_list, node) {
471 /* TODO: remove upon matureness */
472 if (hl_mem_area_crosses_range(start, size, va_block->start,
475 "block crossing ranges at start 0x%llx, end 0x%llx\n",
476 va_block->start, va_block->end);
480 if (va_block->end < start)
484 va_block = kmalloc(sizeof(*va_block), GFP_KERNEL);
488 va_block->start = start;
490 va_block->size = size;
493 list_add(&va_block->node, va_list);
495 list_add(&va_block->node, &res->node);
497 merge_va_blocks_locked(hdev, va_list, va_block);
499 print_va_list_locked(hdev, va_list);
505 * add_va_block - wrapper for add_va_block_locked
507 * @hdev : pointer to the habanalabs device structure
508 * @va_list : pointer to the virtual addresses block list
509 * @start : start virtual address
510 * @end : end virtual address
512 * This function does the following:
513 * - Takes the list lock and calls add_va_block_locked
515 static inline int add_va_block(struct hl_device *hdev,
516 struct hl_va_range *va_range, u64 start, u64 end)
520 mutex_lock(&va_range->lock);
521 rc = add_va_block_locked(hdev, &va_range->list, start, end);
522 mutex_unlock(&va_range->lock);
528 * get_va_block() - get a virtual block for the given size and alignment.
529 * @hdev: pointer to the habanalabs device structure.
530 * @va_range: pointer to the virtual addresses range.
531 * @size: requested block size.
532 * @hint_addr: hint for requested address by the user.
533 * @va_block_align: required alignment of the virtual block start address.
535 * This function does the following:
536 * - Iterate on the virtual block list to find a suitable virtual block for the
537 * given size and alignment.
538 * - Reserve the requested block and update the list.
539 * - Return the start address of the virtual block.
541 static u64 get_va_block(struct hl_device *hdev, struct hl_va_range *va_range,
542 u64 size, u64 hint_addr, u32 va_block_align)
544 struct hl_vm_va_block *va_block, *new_va_block = NULL;
545 u64 valid_start, valid_size, prev_start, prev_end, align_mask,
546 res_valid_start = 0, res_valid_size = 0;
547 bool add_prev = false;
549 align_mask = ~((u64)va_block_align - 1);
551 /* check if hint_addr is aligned */
552 if (hint_addr & (va_block_align - 1))
555 mutex_lock(&va_range->lock);
557 print_va_list_locked(hdev, &va_range->list);
559 list_for_each_entry(va_block, &va_range->list, node) {
560 /* calc the first possible aligned addr */
561 valid_start = va_block->start;
563 if (valid_start & (va_block_align - 1)) {
564 valid_start &= align_mask;
565 valid_start += va_block_align;
566 if (valid_start > va_block->end)
570 valid_size = va_block->end - valid_start;
572 if (valid_size >= size &&
573 (!new_va_block || valid_size < res_valid_size)) {
574 new_va_block = va_block;
575 res_valid_start = valid_start;
576 res_valid_size = valid_size;
579 if (hint_addr && hint_addr >= valid_start &&
580 ((hint_addr + size) <= va_block->end)) {
581 new_va_block = va_block;
582 res_valid_start = hint_addr;
583 res_valid_size = valid_size;
589 dev_err(hdev->dev, "no available va block for size %llu\n",
594 if (res_valid_start > new_va_block->start) {
595 prev_start = new_va_block->start;
596 prev_end = res_valid_start - 1;
598 new_va_block->start = res_valid_start;
599 new_va_block->size = res_valid_size;
604 if (new_va_block->size > size) {
605 new_va_block->start += size;
606 new_va_block->size = new_va_block->end - new_va_block->start;
608 list_del(&new_va_block->node);
613 add_va_block_locked(hdev, &va_range->list, prev_start,
616 print_va_list_locked(hdev, &va_range->list);
618 mutex_unlock(&va_range->lock);
620 return res_valid_start;
624 * get_sg_info - get number of pages and the DMA address from SG list
627 * @dma_addr : pointer to DMA address to return
629 * Calculate the number of consecutive pages described by the SG list. Take the
630 * offset of the address in the first page, add to it the length and round it up
631 * to the number of needed pages.
633 static u32 get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr)
635 *dma_addr = sg_dma_address(sg);
637 return ((((*dma_addr) & (PAGE_SIZE - 1)) + sg_dma_len(sg)) +
638 (PAGE_SIZE - 1)) >> PAGE_SHIFT;
642 * init_phys_pg_pack_from_userptr - initialize physical page pack from host
644 * @ctx: current context
645 * @userptr: userptr to initialize from
646 * @pphys_pg_pack: result pointer
648 * This function does the following:
649 * - Pin the physical pages related to the given virtual block
650 * - Create a physical page pack from the physical pages related to the given
653 static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
654 struct hl_userptr *userptr,
655 struct hl_vm_phys_pg_pack **pphys_pg_pack)
657 struct hl_vm_phys_pg_pack *phys_pg_pack;
658 struct scatterlist *sg;
660 u64 page_mask, total_npages;
661 u32 npages, page_size = PAGE_SIZE,
662 huge_page_size = ctx->hdev->asic_prop.pmmu_huge.page_size;
663 bool first = true, is_huge_page_opt = true;
665 u32 pgs_in_huge_page = huge_page_size >> __ffs(page_size);
667 phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
671 phys_pg_pack->vm_type = userptr->vm_type;
672 phys_pg_pack->created_from_userptr = true;
673 phys_pg_pack->asid = ctx->asid;
674 atomic_set(&phys_pg_pack->mapping_cnt, 1);
676 /* Only if all dma_addrs are aligned to 2MB and their
677 * sizes is at least 2MB, we can use huge page mapping.
678 * We limit the 2MB optimization to this condition,
679 * since later on we acquire the related VA range as one
683 for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
684 npages = get_sg_info(sg, &dma_addr);
686 total_npages += npages;
688 if ((npages % pgs_in_huge_page) ||
689 (dma_addr & (huge_page_size - 1)))
690 is_huge_page_opt = false;
693 if (is_huge_page_opt) {
694 page_size = huge_page_size;
695 do_div(total_npages, pgs_in_huge_page);
698 page_mask = ~(((u64) page_size) - 1);
700 phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64),
702 if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) {
704 goto page_pack_arr_mem_err;
707 phys_pg_pack->npages = total_npages;
708 phys_pg_pack->page_size = page_size;
709 phys_pg_pack->total_size = total_npages * page_size;
712 for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
713 npages = get_sg_info(sg, &dma_addr);
715 /* align down to physical page size and save the offset */
718 phys_pg_pack->offset = dma_addr & (page_size - 1);
719 dma_addr &= page_mask;
723 phys_pg_pack->pages[j++] = dma_addr;
724 dma_addr += page_size;
726 if (is_huge_page_opt)
727 npages -= pgs_in_huge_page;
733 *pphys_pg_pack = phys_pg_pack;
737 page_pack_arr_mem_err:
744 * map_phys_pg_pack - maps the physical page pack.
745 * @ctx: current context
746 * @vaddr: start address of the virtual area to map from
747 * @phys_pg_pack: the pack of physical pages to map to
749 * This function does the following:
750 * - Maps each chunk of virtual memory to matching physical chunk
751 * - Stores number of successful mappings in the given argument
752 * - Returns 0 on success, error code otherwise
754 static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
755 struct hl_vm_phys_pg_pack *phys_pg_pack)
757 struct hl_device *hdev = ctx->hdev;
758 u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i;
759 u32 page_size = phys_pg_pack->page_size;
762 for (i = 0 ; i < phys_pg_pack->npages ; i++) {
763 paddr = phys_pg_pack->pages[i];
765 rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size,
766 (i + 1) == phys_pg_pack->npages);
769 "map failed for handle %u, npages: %llu, mapped: %llu",
770 phys_pg_pack->handle, phys_pg_pack->npages,
776 next_vaddr += page_size;
783 for (i = 0 ; i < mapped_pg_cnt ; i++) {
784 if (hl_mmu_unmap(ctx, next_vaddr, page_size,
785 (i + 1) == mapped_pg_cnt))
786 dev_warn_ratelimited(hdev->dev,
787 "failed to unmap handle %u, va: 0x%llx, pa: 0x%llx, page size: %u\n",
788 phys_pg_pack->handle, next_vaddr,
789 phys_pg_pack->pages[i], page_size);
791 next_vaddr += page_size;
798 * unmap_phys_pg_pack - unmaps the physical page pack
799 * @ctx: current context
800 * @vaddr: start address of the virtual area to unmap
801 * @phys_pg_pack: the pack of physical pages to unmap
803 static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
804 struct hl_vm_phys_pg_pack *phys_pg_pack)
806 struct hl_device *hdev = ctx->hdev;
810 page_size = phys_pg_pack->page_size;
813 for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) {
814 if (hl_mmu_unmap(ctx, next_vaddr, page_size,
815 (i + 1) == phys_pg_pack->npages))
816 dev_warn_ratelimited(hdev->dev,
817 "unmap failed for vaddr: 0x%llx\n", next_vaddr);
820 * unmapping on Palladium can be really long, so avoid a CPU
821 * soft lockup bug by sleeping a little between unmapping pages
824 usleep_range(500, 1000);
828 static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args,
831 struct hl_device *hdev = ctx->hdev;
832 struct hl_vm *vm = &hdev->vm;
833 struct hl_vm_phys_pg_pack *phys_pg_pack;
836 handle = lower_32_bits(args->map_device.handle);
837 spin_lock(&vm->idr_lock);
838 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
840 spin_unlock(&vm->idr_lock);
841 dev_err(hdev->dev, "no match for handle %u\n", handle);
845 *paddr = phys_pg_pack->pages[0];
847 spin_unlock(&vm->idr_lock);
853 * map_device_va - map the given memory
855 * @ctx : current context
856 * @args : host parameters with handle/host virtual address
857 * @device_addr : pointer to result device virtual address
859 * This function does the following:
860 * - If given a physical device memory handle, map to a device virtual block
861 * and return the start address of this block
862 * - If given a host virtual address and size, find the related physical pages,
863 * map a device virtual block to this pages and return the start address of
866 static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
869 struct hl_device *hdev = ctx->hdev;
870 struct hl_vm *vm = &hdev->vm;
871 struct hl_vm_phys_pg_pack *phys_pg_pack;
872 struct hl_userptr *userptr = NULL;
873 struct hl_vm_hash_node *hnode;
874 struct hl_va_range *va_range;
875 enum vm_type_t *vm_type;
876 u64 ret_vaddr, hint_addr;
877 u32 handle = 0, va_block_align;
879 bool is_userptr = args->flags & HL_MEM_USERPTR;
885 u64 addr = args->map_host.host_virt_addr,
886 size = args->map_host.mem_size;
887 u32 page_size = hdev->asic_prop.pmmu.page_size,
888 huge_page_size = hdev->asic_prop.pmmu_huge.page_size;
890 rc = dma_map_host_va(hdev, addr, size, &userptr);
892 dev_err(hdev->dev, "failed to get userptr from va\n");
896 rc = init_phys_pg_pack_from_userptr(ctx, userptr,
900 "unable to init page pack for vaddr 0x%llx\n",
902 goto init_page_pack_err;
905 vm_type = (enum vm_type_t *) userptr;
906 hint_addr = args->map_host.hint_addr;
907 handle = phys_pg_pack->handle;
909 /* get required alignment */
910 if (phys_pg_pack->page_size == page_size) {
911 va_range = ctx->host_va_range;
914 * huge page alignment may be needed in case of regular
915 * page mapping, depending on the host VA alignment
917 if (addr & (huge_page_size - 1))
918 va_block_align = page_size;
920 va_block_align = huge_page_size;
923 * huge page alignment is needed in case of huge page
926 va_range = ctx->host_huge_va_range;
927 va_block_align = huge_page_size;
930 handle = lower_32_bits(args->map_device.handle);
932 spin_lock(&vm->idr_lock);
933 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
935 spin_unlock(&vm->idr_lock);
937 "no match for handle %u\n", handle);
941 /* increment now to avoid freeing device memory while mapping */
942 atomic_inc(&phys_pg_pack->mapping_cnt);
944 spin_unlock(&vm->idr_lock);
946 vm_type = (enum vm_type_t *) phys_pg_pack;
948 hint_addr = args->map_device.hint_addr;
950 /* DRAM VA alignment is the same as the DRAM page size */
951 va_range = ctx->dram_va_range;
952 va_block_align = hdev->asic_prop.dmmu.page_size;
956 * relevant for mapping device physical memory only, as host memory is
959 if (!is_userptr && !(phys_pg_pack->flags & HL_MEM_SHARED) &&
960 phys_pg_pack->asid != ctx->asid) {
962 "Failed to map memory, handle %u is not shared\n",
968 hnode = kzalloc(sizeof(*hnode), GFP_KERNEL);
974 ret_vaddr = get_va_block(hdev, va_range, phys_pg_pack->total_size,
975 hint_addr, va_block_align);
977 dev_err(hdev->dev, "no available va block for handle %u\n",
983 mutex_lock(&ctx->mmu_lock);
985 rc = map_phys_pg_pack(ctx, ret_vaddr, phys_pg_pack);
987 mutex_unlock(&ctx->mmu_lock);
988 dev_err(hdev->dev, "mapping page pack failed for handle %u\n",
993 rc = hdev->asic_funcs->mmu_invalidate_cache(hdev, false, *vm_type);
995 mutex_unlock(&ctx->mmu_lock);
999 "mapping handle %u failed due to MMU cache invalidation\n",
1004 ret_vaddr += phys_pg_pack->offset;
1006 hnode->ptr = vm_type;
1007 hnode->vaddr = ret_vaddr;
1009 mutex_lock(&ctx->mem_hash_lock);
1010 hash_add(ctx->mem_hash, &hnode->node, ret_vaddr);
1011 mutex_unlock(&ctx->mem_hash_lock);
1013 *device_addr = ret_vaddr;
1016 free_phys_pg_pack(hdev, phys_pg_pack);
1021 if (add_va_block(hdev, va_range, ret_vaddr,
1022 ret_vaddr + phys_pg_pack->total_size - 1))
1024 "release va block failed for handle 0x%x, vaddr: 0x%llx\n",
1031 atomic_dec(&phys_pg_pack->mapping_cnt);
1033 free_phys_pg_pack(hdev, phys_pg_pack);
1036 dma_unmap_host_va(hdev, userptr);
1042 * unmap_device_va - unmap the given device virtual address
1044 * @ctx : current context
1045 * @vaddr : device virtual address to unmap
1046 * @ctx_free : true if in context free flow, false otherwise.
1048 * This function does the following:
1049 * - Unmap the physical pages related to the given virtual address
1050 * - return the device virtual block to the virtual block list
1052 static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr, bool ctx_free)
1054 struct hl_device *hdev = ctx->hdev;
1055 struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
1056 struct hl_vm_hash_node *hnode = NULL;
1057 struct hl_userptr *userptr = NULL;
1058 struct hl_va_range *va_range;
1059 enum vm_type_t *vm_type;
1063 /* protect from double entrance */
1064 mutex_lock(&ctx->mem_hash_lock);
1065 hash_for_each_possible(ctx->mem_hash, hnode, node, (unsigned long)vaddr)
1066 if (vaddr == hnode->vaddr)
1070 mutex_unlock(&ctx->mem_hash_lock);
1072 "unmap failed, no mem hnode for vaddr 0x%llx\n",
1077 hash_del(&hnode->node);
1078 mutex_unlock(&ctx->mem_hash_lock);
1080 vm_type = hnode->ptr;
1082 if (*vm_type == VM_TYPE_USERPTR) {
1084 userptr = hnode->ptr;
1085 rc = init_phys_pg_pack_from_userptr(ctx, userptr,
1089 "unable to init page pack for vaddr 0x%llx\n",
1094 if (phys_pg_pack->page_size ==
1095 hdev->asic_prop.pmmu.page_size)
1096 va_range = ctx->host_va_range;
1098 va_range = ctx->host_huge_va_range;
1099 } else if (*vm_type == VM_TYPE_PHYS_PACK) {
1101 va_range = ctx->dram_va_range;
1102 phys_pg_pack = hnode->ptr;
1105 "unmap failed, unknown vm desc for vaddr 0x%llx\n",
1111 if (atomic_read(&phys_pg_pack->mapping_cnt) == 0) {
1112 dev_err(hdev->dev, "vaddr 0x%llx is not mapped\n", vaddr);
1114 goto mapping_cnt_err;
1117 vaddr &= ~(((u64) phys_pg_pack->page_size) - 1);
1119 mutex_lock(&ctx->mmu_lock);
1121 unmap_phys_pg_pack(ctx, vaddr, phys_pg_pack);
1124 * During context free this function is called in a loop to clean all
1125 * the context mappings. Hence the cache invalidation can be called once
1126 * at the loop end rather than for each iteration
1129 rc = hdev->asic_funcs->mmu_invalidate_cache(hdev, true,
1132 mutex_unlock(&ctx->mmu_lock);
1135 * If the context is closing we don't need to check for the MMU cache
1136 * invalidation return code and update the VA free list as in this flow
1137 * we invalidate the MMU cache outside of this unmap function and the VA
1138 * free list will be freed anyway.
1145 "unmapping vaddr 0x%llx failed due to MMU cache invalidation\n",
1148 tmp_rc = add_va_block(hdev, va_range, vaddr,
1149 vaddr + phys_pg_pack->total_size - 1);
1152 "add va block failed for vaddr: 0x%llx\n",
1159 atomic_dec(&phys_pg_pack->mapping_cnt);
1163 free_phys_pg_pack(hdev, phys_pg_pack);
1164 dma_unmap_host_va(hdev, userptr);
1171 free_phys_pg_pack(hdev, phys_pg_pack);
1173 mutex_lock(&ctx->mem_hash_lock);
1174 hash_add(ctx->mem_hash, &hnode->node, vaddr);
1175 mutex_unlock(&ctx->mem_hash_lock);
1180 static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args)
1182 struct hl_device *hdev = hpriv->hdev;
1183 struct hl_ctx *ctx = hpriv->ctx;
1184 u64 device_addr = 0;
1188 switch (args->in.op) {
1189 case HL_MEM_OP_ALLOC:
1190 if (args->in.alloc.mem_size == 0) {
1192 "alloc size must be larger than 0\n");
1197 /* Force contiguous as there are no real MMU
1198 * translations to overcome physical memory gaps
1200 args->in.flags |= HL_MEM_CONTIGUOUS;
1201 rc = alloc_device_memory(ctx, &args->in, &handle);
1203 memset(args, 0, sizeof(*args));
1204 args->out.handle = (__u64) handle;
1207 case HL_MEM_OP_FREE:
1208 rc = free_device_memory(ctx, args->in.free.handle);
1212 if (args->in.flags & HL_MEM_USERPTR) {
1213 device_addr = args->in.map_host.host_virt_addr;
1216 rc = get_paddr_from_handle(ctx, &args->in,
1220 memset(args, 0, sizeof(*args));
1221 args->out.device_virt_addr = device_addr;
1224 case HL_MEM_OP_UNMAP:
1229 dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
1238 int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
1240 union hl_mem_args *args = data;
1241 struct hl_device *hdev = hpriv->hdev;
1242 struct hl_ctx *ctx = hpriv->ctx;
1243 u64 device_addr = 0;
1247 if (hl_device_disabled_or_in_reset(hdev)) {
1248 dev_warn_ratelimited(hdev->dev,
1249 "Device is %s. Can't execute MEMORY IOCTL\n",
1250 atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
1254 if (!hdev->mmu_enable)
1255 return mem_ioctl_no_mmu(hpriv, args);
1257 switch (args->in.op) {
1258 case HL_MEM_OP_ALLOC:
1259 if (args->in.alloc.mem_size == 0) {
1261 "alloc size must be larger than 0\n");
1266 /* If DRAM does not support virtual memory the driver won't
1267 * handle the allocation/freeing of that memory. However, for
1268 * system administration/monitoring purposes, the driver will
1269 * keep track of the amount of DRAM memory that is allocated
1270 * and freed by the user. Because this code totally relies on
1271 * the user's input, the driver can't ensure the validity
1272 * of this accounting.
1274 if (!hdev->dram_supports_virtual_memory) {
1275 atomic64_add(args->in.alloc.mem_size,
1276 &ctx->dram_phys_mem);
1277 atomic64_add(args->in.alloc.mem_size,
1278 &hdev->dram_used_mem);
1280 dev_dbg(hdev->dev, "DRAM alloc is not supported\n");
1283 memset(args, 0, sizeof(*args));
1284 args->out.handle = 0;
1288 rc = alloc_device_memory(ctx, &args->in, &handle);
1290 memset(args, 0, sizeof(*args));
1291 args->out.handle = (__u64) handle;
1294 case HL_MEM_OP_FREE:
1295 /* If DRAM does not support virtual memory the driver won't
1296 * handle the allocation/freeing of that memory. However, for
1297 * system administration/monitoring purposes, the driver will
1298 * keep track of the amount of DRAM memory that is allocated
1299 * and freed by the user. Because this code totally relies on
1300 * the user's input, the driver can't ensure the validity
1301 * of this accounting.
1303 if (!hdev->dram_supports_virtual_memory) {
1304 atomic64_sub(args->in.alloc.mem_size,
1305 &ctx->dram_phys_mem);
1306 atomic64_sub(args->in.alloc.mem_size,
1307 &hdev->dram_used_mem);
1309 dev_dbg(hdev->dev, "DRAM alloc is not supported\n");
1315 rc = free_device_memory(ctx, args->in.free.handle);
1319 rc = map_device_va(ctx, &args->in, &device_addr);
1321 memset(args, 0, sizeof(*args));
1322 args->out.device_virt_addr = device_addr;
1325 case HL_MEM_OP_UNMAP:
1326 rc = unmap_device_va(ctx, args->in.unmap.device_virt_addr,
1331 dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
1340 static int get_user_memory(struct hl_device *hdev, u64 addr, u64 size,
1341 u32 npages, u64 start, u32 offset,
1342 struct hl_userptr *userptr)
1346 if (!access_ok((void __user *) (uintptr_t) addr, size)) {
1347 dev_err(hdev->dev, "user pointer is invalid - 0x%llx\n", addr);
1351 userptr->vec = frame_vector_create(npages);
1352 if (!userptr->vec) {
1353 dev_err(hdev->dev, "Failed to create frame vector\n");
1357 rc = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
1362 "Failed to map host memory, user ptr probably wrong\n");
1364 goto destroy_framevec;
1369 if (frame_vector_to_pages(userptr->vec) < 0) {
1371 "Failed to translate frame vector to pages\n");
1376 rc = sg_alloc_table_from_pages(userptr->sgt,
1377 frame_vector_pages(userptr->vec),
1378 npages, offset, size, GFP_ATOMIC);
1380 dev_err(hdev->dev, "failed to create SG table from pages\n");
1387 put_vaddr_frames(userptr->vec);
1389 frame_vector_destroy(userptr->vec);
1394 * hl_pin_host_memory - pins a chunk of host memory.
1395 * @hdev: pointer to the habanalabs device structure
1396 * @addr: the host virtual address of the memory area
1397 * @size: the size of the memory area
1398 * @userptr: pointer to hl_userptr structure
1400 * This function does the following:
1401 * - Pins the physical pages
1402 * - Create an SG list from those pages
1404 int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
1405 struct hl_userptr *userptr)
1412 dev_err(hdev->dev, "size to pin is invalid - %llu\n", size);
1417 * If the combination of the address and size requested for this memory
1418 * region causes an integer overflow, return error.
1420 if (((addr + size) < addr) ||
1421 PAGE_ALIGN(addr + size) < (addr + size)) {
1423 "user pointer 0x%llx + %llu causes integer overflow\n",
1429 * This function can be called also from data path, hence use atomic
1430 * always as it is not a big allocation.
1432 userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_ATOMIC);
1436 start = addr & PAGE_MASK;
1437 offset = addr & ~PAGE_MASK;
1438 end = PAGE_ALIGN(addr + size);
1439 npages = (end - start) >> PAGE_SHIFT;
1441 userptr->size = size;
1442 userptr->addr = addr;
1443 userptr->dma_mapped = false;
1444 INIT_LIST_HEAD(&userptr->job_node);
1446 rc = get_user_memory(hdev, addr, size, npages, start, offset,
1450 "failed to get user memory for address 0x%llx\n",
1455 hl_debugfs_add_userptr(hdev, userptr);
1460 kfree(userptr->sgt);
1465 * hl_unpin_host_memory - unpins a chunk of host memory.
1466 * @hdev: pointer to the habanalabs device structure
1467 * @userptr: pointer to hl_userptr structure
1469 * This function does the following:
1470 * - Unpins the physical pages related to the host memory
1471 * - Free the SG list
1473 void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
1475 struct page **pages;
1477 hl_debugfs_remove_userptr(hdev, userptr);
1479 if (userptr->dma_mapped)
1480 hdev->asic_funcs->hl_dma_unmap_sg(hdev, userptr->sgt->sgl,
1481 userptr->sgt->nents,
1484 pages = frame_vector_pages(userptr->vec);
1485 if (!IS_ERR(pages)) {
1488 for (i = 0; i < frame_vector_count(userptr->vec); i++)
1489 set_page_dirty_lock(pages[i]);
1491 put_vaddr_frames(userptr->vec);
1492 frame_vector_destroy(userptr->vec);
1494 list_del(&userptr->job_node);
1496 sg_free_table(userptr->sgt);
1497 kfree(userptr->sgt);
1501 * hl_userptr_delete_list - clear userptr list
1503 * @hdev : pointer to the habanalabs device structure
1504 * @userptr_list : pointer to the list to clear
1506 * This function does the following:
1507 * - Iterates over the list and unpins the host memory and frees the userptr
1510 void hl_userptr_delete_list(struct hl_device *hdev,
1511 struct list_head *userptr_list)
1513 struct hl_userptr *userptr, *tmp;
1515 list_for_each_entry_safe(userptr, tmp, userptr_list, job_node) {
1516 hl_unpin_host_memory(hdev, userptr);
1520 INIT_LIST_HEAD(userptr_list);
1524 * hl_userptr_is_pinned - returns whether the given userptr is pinned
1526 * @hdev : pointer to the habanalabs device structure
1527 * @userptr_list : pointer to the list to clear
1528 * @userptr : pointer to userptr to check
1530 * This function does the following:
1531 * - Iterates over the list and checks if the given userptr is in it, means is
1532 * pinned. If so, returns true, otherwise returns false.
1534 bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr,
1535 u32 size, struct list_head *userptr_list,
1536 struct hl_userptr **userptr)
1538 list_for_each_entry((*userptr), userptr_list, job_node) {
1539 if ((addr == (*userptr)->addr) && (size == (*userptr)->size))
1547 * va_range_init - initialize virtual addresses range
1548 * @hdev: pointer to the habanalabs device structure
1549 * @va_range: pointer to the range to initialize
1550 * @start: range start address
1551 * @end: range end address
1553 * This function does the following:
1554 * - Initializes the virtual addresses list of the given range with the given
1557 static int va_range_init(struct hl_device *hdev, struct hl_va_range *va_range,
1562 INIT_LIST_HEAD(&va_range->list);
1564 /* PAGE_SIZE alignment */
1566 if (start & (PAGE_SIZE - 1)) {
1571 if (end & (PAGE_SIZE - 1))
1575 dev_err(hdev->dev, "too small vm range for va list\n");
1579 rc = add_va_block(hdev, va_range, start, end);
1582 dev_err(hdev->dev, "Failed to init host va list\n");
1586 va_range->start_addr = start;
1587 va_range->end_addr = end;
1593 * va_range_fini() - clear a virtual addresses range
1594 * @hdev: pointer to the habanalabs structure
1595 * va_range: pointer to virtual addresses range
1597 * This function does the following:
1598 * - Frees the virtual addresses block list and its lock
1600 static void va_range_fini(struct hl_device *hdev,
1601 struct hl_va_range *va_range)
1603 mutex_lock(&va_range->lock);
1604 clear_va_list_locked(hdev, &va_range->list);
1605 mutex_unlock(&va_range->lock);
1607 mutex_destroy(&va_range->lock);
1612 * vm_ctx_init_with_ranges() - initialize virtual memory for context
1613 * @ctx: pointer to the habanalabs context structure
1614 * @host_range_start: host virtual addresses range start.
1615 * @host_range_end: host virtual addresses range end.
1616 * @host_huge_range_start: host virtual addresses range start for memory
1617 * allocated with huge pages.
1618 * @host_huge_range_end: host virtual addresses range end for memory allocated
1620 * @dram_range_start: dram virtual addresses range start.
1621 * @dram_range_end: dram virtual addresses range end.
1623 * This function initializes the following:
1625 * - Virtual address to area descriptor hashtable
1626 * - Virtual block list of available virtual memory
1628 static int vm_ctx_init_with_ranges(struct hl_ctx *ctx,
1629 u64 host_range_start,
1631 u64 host_huge_range_start,
1632 u64 host_huge_range_end,
1633 u64 dram_range_start,
1636 struct hl_device *hdev = ctx->hdev;
1639 ctx->host_va_range = kzalloc(sizeof(*ctx->host_va_range), GFP_KERNEL);
1640 if (!ctx->host_va_range)
1643 ctx->host_huge_va_range = kzalloc(sizeof(*ctx->host_huge_va_range),
1645 if (!ctx->host_huge_va_range) {
1647 goto host_huge_va_range_err;
1650 ctx->dram_va_range = kzalloc(sizeof(*ctx->dram_va_range), GFP_KERNEL);
1651 if (!ctx->dram_va_range) {
1653 goto dram_va_range_err;
1656 rc = hl_mmu_ctx_init(ctx);
1658 dev_err(hdev->dev, "failed to init context %d\n", ctx->asid);
1662 mutex_init(&ctx->mem_hash_lock);
1663 hash_init(ctx->mem_hash);
1665 mutex_init(&ctx->host_va_range->lock);
1667 rc = va_range_init(hdev, ctx->host_va_range, host_range_start,
1670 dev_err(hdev->dev, "failed to init host vm range\n");
1671 goto host_page_range_err;
1674 if (hdev->pmmu_huge_range) {
1675 mutex_init(&ctx->host_huge_va_range->lock);
1677 rc = va_range_init(hdev, ctx->host_huge_va_range,
1678 host_huge_range_start,
1679 host_huge_range_end);
1682 "failed to init host huge vm range\n");
1683 goto host_hpage_range_err;
1686 ctx->host_huge_va_range = ctx->host_va_range;
1689 mutex_init(&ctx->dram_va_range->lock);
1691 rc = va_range_init(hdev, ctx->dram_va_range, dram_range_start,
1694 dev_err(hdev->dev, "failed to init dram vm range\n");
1698 hl_debugfs_add_ctx_mem_hash(hdev, ctx);
1703 mutex_destroy(&ctx->dram_va_range->lock);
1705 if (hdev->pmmu_huge_range) {
1706 mutex_lock(&ctx->host_huge_va_range->lock);
1707 clear_va_list_locked(hdev, &ctx->host_huge_va_range->list);
1708 mutex_unlock(&ctx->host_huge_va_range->lock);
1710 host_hpage_range_err:
1711 if (hdev->pmmu_huge_range)
1712 mutex_destroy(&ctx->host_huge_va_range->lock);
1713 mutex_lock(&ctx->host_va_range->lock);
1714 clear_va_list_locked(hdev, &ctx->host_va_range->list);
1715 mutex_unlock(&ctx->host_va_range->lock);
1716 host_page_range_err:
1717 mutex_destroy(&ctx->host_va_range->lock);
1718 mutex_destroy(&ctx->mem_hash_lock);
1719 hl_mmu_ctx_fini(ctx);
1721 kfree(ctx->dram_va_range);
1723 kfree(ctx->host_huge_va_range);
1724 host_huge_va_range_err:
1725 kfree(ctx->host_va_range);
1730 int hl_vm_ctx_init(struct hl_ctx *ctx)
1732 struct asic_fixed_properties *prop = &ctx->hdev->asic_prop;
1733 u64 host_range_start, host_range_end, host_huge_range_start,
1734 host_huge_range_end, dram_range_start, dram_range_end;
1736 atomic64_set(&ctx->dram_phys_mem, 0);
1739 * - If MMU is enabled, init the ranges as usual.
1740 * - If MMU is disabled, in case of host mapping, the returned address
1742 * In case of DRAM mapping, the returned address is the physical
1743 * address of the memory related to the given handle.
1745 if (!ctx->hdev->mmu_enable)
1748 dram_range_start = prop->dmmu.start_addr;
1749 dram_range_end = prop->dmmu.end_addr;
1750 host_range_start = prop->pmmu.start_addr;
1751 host_range_end = prop->pmmu.end_addr;
1752 host_huge_range_start = prop->pmmu_huge.start_addr;
1753 host_huge_range_end = prop->pmmu_huge.end_addr;
1755 return vm_ctx_init_with_ranges(ctx, host_range_start, host_range_end,
1756 host_huge_range_start, host_huge_range_end,
1757 dram_range_start, dram_range_end);
1761 * hl_vm_ctx_fini - virtual memory teardown of context
1763 * @ctx : pointer to the habanalabs context structure
1765 * This function perform teardown the following:
1766 * - Virtual block list of available virtual memory
1767 * - Virtual address to area descriptor hashtable
1770 * In addition this function does the following:
1771 * - Unmaps the existing hashtable nodes if the hashtable is not empty. The
1772 * hashtable should be empty as no valid mappings should exist at this
1774 * - Frees any existing physical page list from the idr which relates to the
1775 * current context asid.
1776 * - This function checks the virtual block list for correctness. At this point
1777 * the list should contain one element which describes the whole virtual
1778 * memory range of the context. Otherwise, a warning is printed.
1780 void hl_vm_ctx_fini(struct hl_ctx *ctx)
1782 struct hl_device *hdev = ctx->hdev;
1783 struct hl_vm *vm = &hdev->vm;
1784 struct hl_vm_phys_pg_pack *phys_pg_list;
1785 struct hl_vm_hash_node *hnode;
1786 struct hlist_node *tmp_node;
1789 if (!ctx->hdev->mmu_enable)
1792 hl_debugfs_remove_ctx_mem_hash(hdev, ctx);
1795 * Clearly something went wrong on hard reset so no point in printing
1796 * another side effect error
1798 if (!hdev->hard_reset_pending && !hash_empty(ctx->mem_hash))
1799 dev_notice(hdev->dev,
1800 "user released device without removing its memory mappings\n");
1802 hash_for_each_safe(ctx->mem_hash, i, tmp_node, hnode, node) {
1804 "hl_mem_hash_node of vaddr 0x%llx of asid %d is still alive\n",
1805 hnode->vaddr, ctx->asid);
1806 unmap_device_va(ctx, hnode->vaddr, true);
1809 /* invalidate the cache once after the unmapping loop */
1810 hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
1811 hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_PHYS_PACK);
1813 spin_lock(&vm->idr_lock);
1814 idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_list, i)
1815 if (phys_pg_list->asid == ctx->asid) {
1817 "page list 0x%px of asid %d is still alive\n",
1818 phys_pg_list, ctx->asid);
1819 atomic64_sub(phys_pg_list->total_size,
1820 &hdev->dram_used_mem);
1821 free_phys_pg_pack(hdev, phys_pg_list);
1822 idr_remove(&vm->phys_pg_pack_handles, i);
1824 spin_unlock(&vm->idr_lock);
1826 va_range_fini(hdev, ctx->dram_va_range);
1827 if (hdev->pmmu_huge_range)
1828 va_range_fini(hdev, ctx->host_huge_va_range);
1829 va_range_fini(hdev, ctx->host_va_range);
1831 mutex_destroy(&ctx->mem_hash_lock);
1832 hl_mmu_ctx_fini(ctx);
1834 /* In this case we need to clear the global accounting of DRAM usage
1835 * because the user notifies us on allocations. If the user is no more,
1836 * all DRAM is available
1838 if (!ctx->hdev->dram_supports_virtual_memory)
1839 atomic64_set(&ctx->hdev->dram_used_mem, 0);
1843 * hl_vm_init - initialize virtual memory module
1845 * @hdev : pointer to the habanalabs device structure
1847 * This function initializes the following:
1849 * - DRAM physical pages pool of 2MB
1850 * - Idr for device memory allocation handles
1852 int hl_vm_init(struct hl_device *hdev)
1854 struct asic_fixed_properties *prop = &hdev->asic_prop;
1855 struct hl_vm *vm = &hdev->vm;
1858 vm->dram_pg_pool = gen_pool_create(__ffs(prop->dram_page_size), -1);
1859 if (!vm->dram_pg_pool) {
1860 dev_err(hdev->dev, "Failed to create dram page pool\n");
1864 kref_init(&vm->dram_pg_pool_refcount);
1866 rc = gen_pool_add(vm->dram_pg_pool, prop->dram_user_base_address,
1867 prop->dram_end_address - prop->dram_user_base_address,
1872 "Failed to add memory to dram page pool %d\n", rc);
1876 spin_lock_init(&vm->idr_lock);
1877 idr_init(&vm->phys_pg_pack_handles);
1879 atomic64_set(&hdev->dram_used_mem, 0);
1881 vm->init_done = true;
1886 gen_pool_destroy(vm->dram_pg_pool);
1892 * hl_vm_fini - virtual memory module teardown
1894 * @hdev : pointer to the habanalabs device structure
1896 * This function perform teardown to the following:
1897 * - Idr for device memory allocation handles
1898 * - DRAM physical pages pool of 2MB
1901 void hl_vm_fini(struct hl_device *hdev)
1903 struct hl_vm *vm = &hdev->vm;
1909 * At this point all the contexts should be freed and hence no DRAM
1910 * memory should be in use. Hence the DRAM pool should be freed here.
1912 if (kref_put(&vm->dram_pg_pool_refcount, dram_pg_pool_do_release) != 1)
1913 dev_warn(hdev->dev, "dram_pg_pool was not destroyed on %s\n",
1916 vm->init_done = false;