1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2019 HabanaLabs, Ltd.
8 #include <uapi/misc/habanalabs.h>
9 #include "habanalabs.h"
10 #include "../include/hw_ip/mmu/mmu_general.h"
12 #include <linux/uaccess.h>
13 #include <linux/slab.h>
15 #define HL_MMU_DEBUG 0
17 /* use small pages for supporting non-pow2 (32M/40M/48M) DRAM phys page sizes */
18 #define DRAM_POOL_PAGE_SIZE SZ_8M
21 * The va ranges in context object contain a list with the available chunks of
22 * device virtual memory.
23 * There is one range for host allocations and one for DRAM allocations.
25 * On initialization each range contains one chunk of all of its available
26 * virtual range which is a half of the total device virtual range.
28 * On each mapping of physical pages, a suitable virtual range chunk (with a
29 * minimum size) is selected from the list. If the chunk size equals the
30 * requested size, the chunk is returned. Otherwise, the chunk is split into
31 * two chunks - one to return as result and a remainder to stay in the list.
33 * On each Unmapping of a virtual address, the relevant virtual chunk is
34 * returned to the list. The chunk is added to the list and if its edges match
35 * the edges of the adjacent chunks (means a contiguous chunk can be created),
36 * the chunks are merged.
38 * On finish, the list is checked to have only one chunk of all the relevant
39 * virtual range (which is a half of the device total virtual range).
40 * If not (means not all mappings were unmapped), a warning is printed.
44 * alloc_device_memory() - allocate device memory.
45 * @ctx: pointer to the context structure.
46 * @args: host parameters containing the requested size.
47 * @ret_handle: result handle.
49 * This function does the following:
50 * - Allocate the requested size rounded up to 'dram_page_size' pages.
51 * - Return unique handle for later map/unmap/free.
53 static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
56 struct hl_device *hdev = ctx->hdev;
57 struct hl_vm *vm = &hdev->vm;
58 struct hl_vm_phys_pg_pack *phys_pg_pack;
59 u64 paddr = 0, total_size, num_pgs, i;
60 u32 num_curr_pgs, page_size;
65 page_size = hdev->asic_prop.dram_page_size;
66 num_pgs = DIV_ROUND_UP_ULL(args->alloc.mem_size, page_size);
67 total_size = num_pgs * page_size;
70 dev_err(hdev->dev, "Cannot allocate 0 bytes\n");
74 contiguous = args->flags & HL_MEM_CONTIGUOUS;
77 paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size);
80 "failed to allocate %llu contiguous pages with total size of %llu\n",
86 phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
92 phys_pg_pack->vm_type = VM_TYPE_PHYS_PACK;
93 phys_pg_pack->asid = ctx->asid;
94 phys_pg_pack->npages = num_pgs;
95 phys_pg_pack->page_size = page_size;
96 phys_pg_pack->total_size = total_size;
97 phys_pg_pack->flags = args->flags;
98 phys_pg_pack->contiguous = contiguous;
100 phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL);
101 if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) {
106 if (phys_pg_pack->contiguous) {
107 for (i = 0 ; i < num_pgs ; i++)
108 phys_pg_pack->pages[i] = paddr + i * page_size;
110 for (i = 0 ; i < num_pgs ; i++) {
111 phys_pg_pack->pages[i] = (u64) gen_pool_alloc(
114 if (!phys_pg_pack->pages[i]) {
116 "Failed to allocate device memory (out of memory)\n");
125 spin_lock(&vm->idr_lock);
126 handle = idr_alloc(&vm->phys_pg_pack_handles, phys_pg_pack, 1, 0,
128 spin_unlock(&vm->idr_lock);
131 dev_err(hdev->dev, "Failed to get handle for page\n");
136 for (i = 0 ; i < num_pgs ; i++)
137 kref_get(&vm->dram_pg_pool_refcount);
139 phys_pg_pack->handle = handle;
141 atomic64_add(phys_pg_pack->total_size, &ctx->dram_phys_mem);
142 atomic64_add(phys_pg_pack->total_size, &hdev->dram_used_mem);
144 *ret_handle = handle;
150 if (!phys_pg_pack->contiguous)
151 for (i = 0 ; i < num_curr_pgs ; i++)
152 gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i],
155 kvfree(phys_pg_pack->pages);
160 gen_pool_free(vm->dram_pg_pool, paddr, total_size);
166 * dma_map_host_va() - DMA mapping of the given host virtual address.
167 * @hdev: habanalabs device structure.
168 * @addr: the host virtual address of the memory area.
169 * @size: the size of the memory area.
170 * @p_userptr: pointer to result userptr structure.
172 * This function does the following:
173 * - Allocate userptr structure.
174 * - Pin the given host memory using the userptr structure.
175 * - Perform DMA mapping to have the DMA addresses of the pages.
177 static int dma_map_host_va(struct hl_device *hdev, u64 addr, u64 size,
178 struct hl_userptr **p_userptr)
180 struct hl_userptr *userptr;
183 userptr = kzalloc(sizeof(*userptr), GFP_KERNEL);
189 rc = hl_pin_host_memory(hdev, addr, size, userptr);
191 dev_err(hdev->dev, "Failed to pin host memory\n");
195 rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
196 userptr->sgt->nents, DMA_BIDIRECTIONAL);
198 dev_err(hdev->dev, "failed to map sgt with DMA region\n");
202 userptr->dma_mapped = true;
203 userptr->dir = DMA_BIDIRECTIONAL;
204 userptr->vm_type = VM_TYPE_USERPTR;
206 *p_userptr = userptr;
211 hl_unpin_host_memory(hdev, userptr);
220 * dma_unmap_host_va() - DMA unmapping of the given host virtual address.
221 * @hdev: habanalabs device structure.
222 * @userptr: userptr to free.
224 * This function does the following:
225 * - Unpins the physical pages.
226 * - Frees the userptr structure.
228 static void dma_unmap_host_va(struct hl_device *hdev,
229 struct hl_userptr *userptr)
231 hl_unpin_host_memory(hdev, userptr);
236 * dram_pg_pool_do_release() - free DRAM pages pool
237 * @ref: pointer to reference object.
239 * This function does the following:
240 * - Frees the idr structure of physical pages handles.
241 * - Frees the generic pool of DRAM physical pages.
243 static void dram_pg_pool_do_release(struct kref *ref)
245 struct hl_vm *vm = container_of(ref, struct hl_vm,
246 dram_pg_pool_refcount);
249 * free the idr here as only here we know for sure that there are no
250 * allocated physical pages and hence there are no handles in use
252 idr_destroy(&vm->phys_pg_pack_handles);
253 gen_pool_destroy(vm->dram_pg_pool);
257 * free_phys_pg_pack() - free physical page pack.
258 * @hdev: habanalabs device structure.
259 * @phys_pg_pack: physical page pack to free.
261 * This function does the following:
262 * - For DRAM memory only
263 * - iterate over the pack, scrub and free each physical block structure by
264 * returning it to the general pool.
265 * In case of error during scrubbing, initiate hard reset.
266 * Once hard reset is triggered, scrubbing is bypassed while freeing the
268 * - Free the hl_vm_phys_pg_pack structure.
270 static int free_phys_pg_pack(struct hl_device *hdev,
271 struct hl_vm_phys_pg_pack *phys_pg_pack)
273 struct hl_vm *vm = &hdev->vm;
277 if (phys_pg_pack->created_from_userptr)
280 if (phys_pg_pack->contiguous) {
281 if (hdev->memory_scrub && !hdev->disabled) {
282 rc = hdev->asic_funcs->scrub_device_mem(hdev,
283 phys_pg_pack->pages[0],
284 phys_pg_pack->total_size);
287 "Failed to scrub contiguous device memory\n");
290 gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[0],
291 phys_pg_pack->total_size);
293 for (i = 0; i < phys_pg_pack->npages ; i++)
294 kref_put(&vm->dram_pg_pool_refcount,
295 dram_pg_pool_do_release);
297 for (i = 0 ; i < phys_pg_pack->npages ; i++) {
298 if (hdev->memory_scrub && !hdev->disabled && rc == 0) {
299 rc = hdev->asic_funcs->scrub_device_mem(
301 phys_pg_pack->pages[i],
302 phys_pg_pack->page_size);
305 "Failed to scrub device memory\n");
307 gen_pool_free(vm->dram_pg_pool,
308 phys_pg_pack->pages[i],
309 phys_pg_pack->page_size);
310 kref_put(&vm->dram_pg_pool_refcount,
311 dram_pg_pool_do_release);
315 if (rc && !hdev->disabled)
316 hl_device_reset(hdev, HL_RESET_HARD);
319 kvfree(phys_pg_pack->pages);
326 * free_device_memory() - free device memory.
327 * @ctx: pointer to the context structure.
328 * @args: host parameters containing the requested size.
330 * This function does the following:
331 * - Free the device memory related to the given handle.
333 static int free_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args)
335 struct hl_device *hdev = ctx->hdev;
336 struct hl_vm *vm = &hdev->vm;
337 struct hl_vm_phys_pg_pack *phys_pg_pack;
338 u32 handle = args->free.handle;
340 spin_lock(&vm->idr_lock);
341 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
343 if (atomic_read(&phys_pg_pack->mapping_cnt) > 0) {
344 dev_err(hdev->dev, "handle %u is mapped, cannot free\n",
346 spin_unlock(&vm->idr_lock);
351 * must remove from idr before the freeing of the physical
352 * pages as the refcount of the pool is also the trigger of the
355 idr_remove(&vm->phys_pg_pack_handles, handle);
356 spin_unlock(&vm->idr_lock);
358 atomic64_sub(phys_pg_pack->total_size, &ctx->dram_phys_mem);
359 atomic64_sub(phys_pg_pack->total_size, &hdev->dram_used_mem);
361 return free_phys_pg_pack(hdev, phys_pg_pack);
363 spin_unlock(&vm->idr_lock);
365 "free device memory failed, no match for handle %u\n",
374 * clear_va_list_locked() - free virtual addresses list.
375 * @hdev: habanalabs device structure.
376 * @va_list: list of virtual addresses to free.
378 * This function does the following:
379 * - Iterate over the list and free each virtual addresses block.
381 * This function should be called only when va_list lock is taken.
383 static void clear_va_list_locked(struct hl_device *hdev,
384 struct list_head *va_list)
386 struct hl_vm_va_block *va_block, *tmp;
388 list_for_each_entry_safe(va_block, tmp, va_list, node) {
389 list_del(&va_block->node);
395 * print_va_list_locked() - print virtual addresses list.
396 * @hdev: habanalabs device structure.
397 * @va_list: list of virtual addresses to print.
399 * This function does the following:
400 * - Iterate over the list and print each virtual addresses block.
402 * This function should be called only when va_list lock is taken.
404 static void print_va_list_locked(struct hl_device *hdev,
405 struct list_head *va_list)
408 struct hl_vm_va_block *va_block;
410 dev_dbg(hdev->dev, "print va list:\n");
412 list_for_each_entry(va_block, va_list, node)
414 "va block, start: 0x%llx, end: 0x%llx, size: %llu\n",
415 va_block->start, va_block->end, va_block->size);
420 * merge_va_blocks_locked() - merge a virtual block if possible.
421 * @hdev: pointer to the habanalabs device structure.
422 * @va_list: pointer to the virtual addresses block list.
423 * @va_block: virtual block to merge with adjacent blocks.
425 * This function does the following:
426 * - Merge the given blocks with the adjacent blocks if their virtual ranges
427 * create a contiguous virtual range.
429 * This Function should be called only when va_list lock is taken.
431 static void merge_va_blocks_locked(struct hl_device *hdev,
432 struct list_head *va_list, struct hl_vm_va_block *va_block)
434 struct hl_vm_va_block *prev, *next;
436 prev = list_prev_entry(va_block, node);
437 if (&prev->node != va_list && prev->end + 1 == va_block->start) {
438 prev->end = va_block->end;
439 prev->size = prev->end - prev->start;
440 list_del(&va_block->node);
445 next = list_next_entry(va_block, node);
446 if (&next->node != va_list && va_block->end + 1 == next->start) {
447 next->start = va_block->start;
448 next->size = next->end - next->start;
449 list_del(&va_block->node);
455 * add_va_block_locked() - add a virtual block to the virtual addresses list.
456 * @hdev: pointer to the habanalabs device structure.
457 * @va_list: pointer to the virtual addresses block list.
458 * @start: start virtual address.
459 * @end: end virtual address.
461 * This function does the following:
462 * - Add the given block to the virtual blocks list and merge with other blocks
463 * if a contiguous virtual block can be created.
465 * This Function should be called only when va_list lock is taken.
467 static int add_va_block_locked(struct hl_device *hdev,
468 struct list_head *va_list, u64 start, u64 end)
470 struct hl_vm_va_block *va_block, *res = NULL;
471 u64 size = end - start;
473 print_va_list_locked(hdev, va_list);
475 list_for_each_entry(va_block, va_list, node) {
476 /* TODO: remove upon matureness */
477 if (hl_mem_area_crosses_range(start, size, va_block->start,
480 "block crossing ranges at start 0x%llx, end 0x%llx\n",
481 va_block->start, va_block->end);
485 if (va_block->end < start)
489 va_block = kmalloc(sizeof(*va_block), GFP_KERNEL);
493 va_block->start = start;
495 va_block->size = size;
498 list_add(&va_block->node, va_list);
500 list_add(&va_block->node, &res->node);
502 merge_va_blocks_locked(hdev, va_list, va_block);
504 print_va_list_locked(hdev, va_list);
510 * add_va_block() - wrapper for add_va_block_locked.
511 * @hdev: pointer to the habanalabs device structure.
512 * @va_list: pointer to the virtual addresses block list.
513 * @start: start virtual address.
514 * @end: end virtual address.
516 * This function does the following:
517 * - Takes the list lock and calls add_va_block_locked.
519 static inline int add_va_block(struct hl_device *hdev,
520 struct hl_va_range *va_range, u64 start, u64 end)
524 mutex_lock(&va_range->lock);
525 rc = add_va_block_locked(hdev, &va_range->list, start, end);
526 mutex_unlock(&va_range->lock);
532 * get_va_block() - get a virtual block for the given size and alignment.
534 * @hdev: pointer to the habanalabs device structure.
535 * @va_range: pointer to the virtual addresses range.
536 * @size: requested block size.
537 * @hint_addr: hint for requested address by the user.
538 * @va_block_align: required alignment of the virtual block start address.
540 * This function does the following:
541 * - Iterate on the virtual block list to find a suitable virtual block for the
542 * given size, hint address and alignment.
543 * - Reserve the requested block and update the list.
544 * - Return the start address of the virtual block.
546 static u64 get_va_block(struct hl_device *hdev,
547 struct hl_va_range *va_range,
548 u64 size, u64 hint_addr, u32 va_block_align)
550 struct hl_vm_va_block *va_block, *new_va_block = NULL;
551 u64 tmp_hint_addr, valid_start, valid_size, prev_start, prev_end,
552 align_mask, reserved_valid_start = 0, reserved_valid_size = 0;
553 bool add_prev = false;
554 bool is_align_pow_2 = is_power_of_2(va_range->page_size);
557 align_mask = ~((u64)va_block_align - 1);
560 * with non-power-of-2 range we work only with page granularity
561 * and the start address is page aligned,
562 * so no need for alignment checking.
564 size = DIV_ROUND_UP_ULL(size, va_range->page_size) *
567 tmp_hint_addr = hint_addr;
569 /* Check if we need to ignore hint address */
570 if ((is_align_pow_2 && (hint_addr & (va_block_align - 1))) ||
572 do_div(tmp_hint_addr, va_range->page_size))) {
573 dev_info(hdev->dev, "Hint address 0x%llx will be ignored\n",
578 mutex_lock(&va_range->lock);
580 print_va_list_locked(hdev, &va_range->list);
582 list_for_each_entry(va_block, &va_range->list, node) {
583 /* Calc the first possible aligned addr */
584 valid_start = va_block->start;
586 if (is_align_pow_2 && (valid_start & (va_block_align - 1))) {
587 valid_start &= align_mask;
588 valid_start += va_block_align;
589 if (valid_start > va_block->end)
593 valid_size = va_block->end - valid_start;
594 if (valid_size < size)
597 /* Pick the minimal length block which has the required size */
598 if (!new_va_block || (valid_size < reserved_valid_size)) {
599 new_va_block = va_block;
600 reserved_valid_start = valid_start;
601 reserved_valid_size = valid_size;
604 if (hint_addr && hint_addr >= valid_start &&
605 (hint_addr + size) <= va_block->end) {
606 new_va_block = va_block;
607 reserved_valid_start = hint_addr;
608 reserved_valid_size = valid_size;
614 dev_err(hdev->dev, "no available va block for size %llu\n",
620 * Check if there is some leftover range due to reserving the new
621 * va block, then return it to the main virtual addresses list.
623 if (reserved_valid_start > new_va_block->start) {
624 prev_start = new_va_block->start;
625 prev_end = reserved_valid_start - 1;
627 new_va_block->start = reserved_valid_start;
628 new_va_block->size = reserved_valid_size;
633 if (new_va_block->size > size) {
634 new_va_block->start += size;
635 new_va_block->size = new_va_block->end - new_va_block->start;
637 list_del(&new_va_block->node);
642 add_va_block_locked(hdev, &va_range->list, prev_start,
645 print_va_list_locked(hdev, &va_range->list);
647 mutex_unlock(&va_range->lock);
649 return reserved_valid_start;
653 * hl_reserve_va_block() - reserve a virtual block of a given size.
654 * @hdev: pointer to the habanalabs device structure.
655 * @ctx: current context
656 * @type: virtual addresses range type.
657 * @size: requested block size.
658 * @alignment: required alignment in bytes of the virtual block start address,
659 * 0 means no alignment.
661 * This function does the following:
662 * - Iterate on the virtual block list to find a suitable virtual block for the
663 * given size and alignment.
664 * - Reserve the requested block and update the list.
665 * - Return the start address of the virtual block.
667 u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
668 enum hl_va_range_type type, u32 size, u32 alignment)
670 return get_va_block(hdev, ctx->va_range[type], size, 0,
671 max(alignment, ctx->va_range[type]->page_size));
675 * hl_get_va_range_type() - get va_range type for the given address and size.
676 * @address: the start address of the area we want to validate.
677 * @size: the size in bytes of the area we want to validate.
678 * @type: returned va_range type.
680 * Return: true if the area is inside a valid range, false otherwise.
682 static int hl_get_va_range_type(struct hl_ctx *ctx, u64 address, u64 size,
683 enum hl_va_range_type *type)
687 for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX; i++) {
688 if (hl_mem_area_inside_range(address, size,
689 ctx->va_range[i]->start_addr,
690 ctx->va_range[i]->end_addr)) {
700 * hl_unreserve_va_block() - wrapper for add_va_block to unreserve a va block.
701 * @hdev: pointer to the habanalabs device structure
702 * @ctx: pointer to the context structure.
703 * @start: start virtual address.
704 * @end: end virtual address.
706 * This function does the following:
707 * - Takes the list lock and calls add_va_block_locked.
709 int hl_unreserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
710 u64 start_addr, u64 size)
712 enum hl_va_range_type type;
715 rc = hl_get_va_range_type(ctx, start_addr, size, &type);
718 "cannot find va_range for va %#llx size %llu",
723 rc = add_va_block(hdev, ctx->va_range[type], start_addr,
724 start_addr + size - 1);
727 "add va block failed for vaddr: 0x%llx\n", start_addr);
733 * get_sg_info() - get number of pages and the DMA address from SG list.
735 * @dma_addr: pointer to DMA address to return.
737 * Calculate the number of consecutive pages described by the SG list. Take the
738 * offset of the address in the first page, add to it the length and round it up
739 * to the number of needed pages.
741 static u32 get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr)
743 *dma_addr = sg_dma_address(sg);
745 return ((((*dma_addr) & (PAGE_SIZE - 1)) + sg_dma_len(sg)) +
746 (PAGE_SIZE - 1)) >> PAGE_SHIFT;
750 * init_phys_pg_pack_from_userptr() - initialize physical page pack from host
752 * @ctx: pointer to the context structure.
753 * @userptr: userptr to initialize from.
754 * @pphys_pg_pack: result pointer.
756 * This function does the following:
757 * - Pin the physical pages related to the given virtual block.
758 * - Create a physical page pack from the physical pages related to the given
761 static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
762 struct hl_userptr *userptr,
763 struct hl_vm_phys_pg_pack **pphys_pg_pack)
765 struct hl_vm_phys_pg_pack *phys_pg_pack;
766 struct scatterlist *sg;
768 u64 page_mask, total_npages;
769 u32 npages, page_size = PAGE_SIZE,
770 huge_page_size = ctx->hdev->asic_prop.pmmu_huge.page_size;
771 bool first = true, is_huge_page_opt = true;
773 u32 pgs_in_huge_page = huge_page_size >> __ffs(page_size);
775 phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
779 phys_pg_pack->vm_type = userptr->vm_type;
780 phys_pg_pack->created_from_userptr = true;
781 phys_pg_pack->asid = ctx->asid;
782 atomic_set(&phys_pg_pack->mapping_cnt, 1);
784 /* Only if all dma_addrs are aligned to 2MB and their
785 * sizes is at least 2MB, we can use huge page mapping.
786 * We limit the 2MB optimization to this condition,
787 * since later on we acquire the related VA range as one
791 for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
792 npages = get_sg_info(sg, &dma_addr);
794 total_npages += npages;
796 if ((npages % pgs_in_huge_page) ||
797 (dma_addr & (huge_page_size - 1)))
798 is_huge_page_opt = false;
801 if (is_huge_page_opt) {
802 page_size = huge_page_size;
803 do_div(total_npages, pgs_in_huge_page);
806 page_mask = ~(((u64) page_size) - 1);
808 phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64),
810 if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) {
812 goto page_pack_arr_mem_err;
815 phys_pg_pack->npages = total_npages;
816 phys_pg_pack->page_size = page_size;
817 phys_pg_pack->total_size = total_npages * page_size;
820 for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
821 npages = get_sg_info(sg, &dma_addr);
823 /* align down to physical page size and save the offset */
826 phys_pg_pack->offset = dma_addr & (page_size - 1);
827 dma_addr &= page_mask;
831 phys_pg_pack->pages[j++] = dma_addr;
832 dma_addr += page_size;
834 if (is_huge_page_opt)
835 npages -= pgs_in_huge_page;
841 *pphys_pg_pack = phys_pg_pack;
845 page_pack_arr_mem_err:
852 * map_phys_pg_pack() - maps the physical page pack..
853 * @ctx: pointer to the context structure.
854 * @vaddr: start address of the virtual area to map from.
855 * @phys_pg_pack: the pack of physical pages to map to.
857 * This function does the following:
858 * - Maps each chunk of virtual memory to matching physical chunk.
859 * - Stores number of successful mappings in the given argument.
860 * - Returns 0 on success, error code otherwise.
862 static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
863 struct hl_vm_phys_pg_pack *phys_pg_pack)
865 struct hl_device *hdev = ctx->hdev;
866 u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i;
867 u32 page_size = phys_pg_pack->page_size;
871 for (i = 0 ; i < phys_pg_pack->npages ; i++) {
872 paddr = phys_pg_pack->pages[i];
874 rc = hl_mmu_map_page(ctx, next_vaddr, paddr, page_size,
875 (i + 1) == phys_pg_pack->npages);
878 "map failed for handle %u, npages: %llu, mapped: %llu",
879 phys_pg_pack->handle, phys_pg_pack->npages,
885 next_vaddr += page_size;
891 is_host_addr = !hl_is_dram_va(hdev, vaddr);
894 for (i = 0 ; i < mapped_pg_cnt ; i++) {
895 if (hl_mmu_unmap_page(ctx, next_vaddr, page_size,
896 (i + 1) == mapped_pg_cnt))
897 dev_warn_ratelimited(hdev->dev,
898 "failed to unmap handle %u, va: 0x%llx, pa: 0x%llx, page size: %u\n",
899 phys_pg_pack->handle, next_vaddr,
900 phys_pg_pack->pages[i], page_size);
902 next_vaddr += page_size;
905 * unmapping on Palladium can be really long, so avoid a CPU
906 * soft lockup bug by sleeping a little between unmapping pages
908 * In addition, on host num of pages could be huge,
909 * because page size could be 4KB, so when unmapping host
910 * pages sleep every 32K pages to avoid soft lockup
912 if (hdev->pldm || (is_host_addr && (i & 0x7FFF) == 0))
913 usleep_range(50, 200);
920 * unmap_phys_pg_pack() - unmaps the physical page pack.
921 * @ctx: pointer to the context structure.
922 * @vaddr: start address of the virtual area to unmap.
923 * @phys_pg_pack: the pack of physical pages to unmap.
925 static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
926 struct hl_vm_phys_pg_pack *phys_pg_pack)
928 struct hl_device *hdev = ctx->hdev;
933 is_host_addr = !hl_is_dram_va(hdev, vaddr);
934 page_size = phys_pg_pack->page_size;
937 for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) {
938 if (hl_mmu_unmap_page(ctx, next_vaddr, page_size,
939 (i + 1) == phys_pg_pack->npages))
940 dev_warn_ratelimited(hdev->dev,
941 "unmap failed for vaddr: 0x%llx\n", next_vaddr);
944 * unmapping on Palladium can be really long, so avoid a CPU
945 * soft lockup bug by sleeping a little between unmapping pages
947 * In addition, on host num of pages could be huge,
948 * because page size could be 4KB, so when unmapping host
949 * pages sleep every 32K pages to avoid soft lockup
951 if (hdev->pldm || (is_host_addr && (i & 0x7FFF) == 0))
952 usleep_range(50, 200);
956 static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args,
959 struct hl_device *hdev = ctx->hdev;
960 struct hl_vm *vm = &hdev->vm;
961 struct hl_vm_phys_pg_pack *phys_pg_pack;
964 handle = lower_32_bits(args->map_device.handle);
965 spin_lock(&vm->idr_lock);
966 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
968 spin_unlock(&vm->idr_lock);
969 dev_err(hdev->dev, "no match for handle %u\n", handle);
973 *paddr = phys_pg_pack->pages[0];
975 spin_unlock(&vm->idr_lock);
981 * map_device_va() - map the given memory.
982 * @ctx: pointer to the context structure.
983 * @args: host parameters with handle/host virtual address.
984 * @device_addr: pointer to result device virtual address.
986 * This function does the following:
987 * - If given a physical device memory handle, map to a device virtual block
988 * and return the start address of this block.
989 * - If given a host virtual address and size, find the related physical pages,
990 * map a device virtual block to this pages and return the start address of
993 static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
996 struct hl_device *hdev = ctx->hdev;
997 struct hl_vm *vm = &hdev->vm;
998 struct hl_vm_phys_pg_pack *phys_pg_pack;
999 struct hl_userptr *userptr = NULL;
1000 struct hl_vm_hash_node *hnode;
1001 struct hl_va_range *va_range;
1002 enum vm_type_t *vm_type;
1003 u64 ret_vaddr, hint_addr;
1004 u32 handle = 0, va_block_align;
1006 bool is_userptr = args->flags & HL_MEM_USERPTR;
1008 /* Assume failure */
1012 u64 addr = args->map_host.host_virt_addr,
1013 size = args->map_host.mem_size;
1014 u32 page_size = hdev->asic_prop.pmmu.page_size,
1015 huge_page_size = hdev->asic_prop.pmmu_huge.page_size;
1017 rc = dma_map_host_va(hdev, addr, size, &userptr);
1019 dev_err(hdev->dev, "failed to get userptr from va\n");
1023 rc = init_phys_pg_pack_from_userptr(ctx, userptr,
1027 "unable to init page pack for vaddr 0x%llx\n",
1029 goto init_page_pack_err;
1032 vm_type = (enum vm_type_t *) userptr;
1033 hint_addr = args->map_host.hint_addr;
1034 handle = phys_pg_pack->handle;
1036 /* get required alignment */
1037 if (phys_pg_pack->page_size == page_size) {
1038 va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST];
1041 * huge page alignment may be needed in case of regular
1042 * page mapping, depending on the host VA alignment
1044 if (addr & (huge_page_size - 1))
1045 va_block_align = page_size;
1047 va_block_align = huge_page_size;
1050 * huge page alignment is needed in case of huge page
1053 va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE];
1054 va_block_align = huge_page_size;
1057 handle = lower_32_bits(args->map_device.handle);
1059 spin_lock(&vm->idr_lock);
1060 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
1061 if (!phys_pg_pack) {
1062 spin_unlock(&vm->idr_lock);
1064 "no match for handle %u\n", handle);
1068 /* increment now to avoid freeing device memory while mapping */
1069 atomic_inc(&phys_pg_pack->mapping_cnt);
1071 spin_unlock(&vm->idr_lock);
1073 vm_type = (enum vm_type_t *) phys_pg_pack;
1075 hint_addr = args->map_device.hint_addr;
1077 /* DRAM VA alignment is the same as the MMU page size */
1078 va_range = ctx->va_range[HL_VA_RANGE_TYPE_DRAM];
1079 va_block_align = hdev->asic_prop.dmmu.page_size;
1083 * relevant for mapping device physical memory only, as host memory is
1086 if (!is_userptr && !(phys_pg_pack->flags & HL_MEM_SHARED) &&
1087 phys_pg_pack->asid != ctx->asid) {
1089 "Failed to map memory, handle %u is not shared\n",
1095 hnode = kzalloc(sizeof(*hnode), GFP_KERNEL);
1101 ret_vaddr = get_va_block(hdev, va_range, phys_pg_pack->total_size,
1102 hint_addr, va_block_align);
1104 dev_err(hdev->dev, "no available va block for handle %u\n",
1110 mutex_lock(&ctx->mmu_lock);
1112 rc = map_phys_pg_pack(ctx, ret_vaddr, phys_pg_pack);
1114 mutex_unlock(&ctx->mmu_lock);
1115 dev_err(hdev->dev, "mapping page pack failed for handle %u\n",
1120 rc = hdev->asic_funcs->mmu_invalidate_cache(hdev, false, *vm_type);
1122 mutex_unlock(&ctx->mmu_lock);
1126 "mapping handle %u failed due to MMU cache invalidation\n",
1131 ret_vaddr += phys_pg_pack->offset;
1133 hnode->ptr = vm_type;
1134 hnode->vaddr = ret_vaddr;
1136 mutex_lock(&ctx->mem_hash_lock);
1137 hash_add(ctx->mem_hash, &hnode->node, ret_vaddr);
1138 mutex_unlock(&ctx->mem_hash_lock);
1140 *device_addr = ret_vaddr;
1143 rc = free_phys_pg_pack(hdev, phys_pg_pack);
1148 if (add_va_block(hdev, va_range, ret_vaddr,
1149 ret_vaddr + phys_pg_pack->total_size - 1))
1151 "release va block failed for handle 0x%x, vaddr: 0x%llx\n",
1158 atomic_dec(&phys_pg_pack->mapping_cnt);
1160 free_phys_pg_pack(hdev, phys_pg_pack);
1163 dma_unmap_host_va(hdev, userptr);
1169 * unmap_device_va() - unmap the given device virtual address.
1170 * @ctx: pointer to the context structure.
1171 * @args: host parameters with device virtual address to unmap.
1172 * @ctx_free: true if in context free flow, false otherwise.
1174 * This function does the following:
1175 * - unmap the physical pages related to the given virtual address.
1176 * - return the device virtual block to the virtual block list.
1178 static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
1181 struct hl_device *hdev = ctx->hdev;
1182 struct asic_fixed_properties *prop = &hdev->asic_prop;
1183 struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
1184 struct hl_vm_hash_node *hnode = NULL;
1185 struct hl_userptr *userptr = NULL;
1186 struct hl_va_range *va_range;
1187 u64 vaddr = args->unmap.device_virt_addr;
1188 enum vm_type_t *vm_type;
1192 /* protect from double entrance */
1193 mutex_lock(&ctx->mem_hash_lock);
1194 hash_for_each_possible(ctx->mem_hash, hnode, node, (unsigned long)vaddr)
1195 if (vaddr == hnode->vaddr)
1199 mutex_unlock(&ctx->mem_hash_lock);
1201 "unmap failed, no mem hnode for vaddr 0x%llx\n",
1206 hash_del(&hnode->node);
1207 mutex_unlock(&ctx->mem_hash_lock);
1209 vm_type = hnode->ptr;
1211 if (*vm_type == VM_TYPE_USERPTR) {
1213 userptr = hnode->ptr;
1214 rc = init_phys_pg_pack_from_userptr(ctx, userptr,
1218 "unable to init page pack for vaddr 0x%llx\n",
1223 if (phys_pg_pack->page_size ==
1224 hdev->asic_prop.pmmu.page_size)
1225 va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST];
1227 va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE];
1228 } else if (*vm_type == VM_TYPE_PHYS_PACK) {
1230 va_range = ctx->va_range[HL_VA_RANGE_TYPE_DRAM];
1231 phys_pg_pack = hnode->ptr;
1234 "unmap failed, unknown vm desc for vaddr 0x%llx\n",
1240 if (atomic_read(&phys_pg_pack->mapping_cnt) == 0) {
1241 dev_err(hdev->dev, "vaddr 0x%llx is not mapped\n", vaddr);
1243 goto mapping_cnt_err;
1246 if (!is_userptr && !is_power_of_2(phys_pg_pack->page_size))
1247 vaddr = prop->dram_base_address +
1248 DIV_ROUND_DOWN_ULL(vaddr - prop->dram_base_address,
1249 phys_pg_pack->page_size) *
1250 phys_pg_pack->page_size;
1252 vaddr &= ~(((u64) phys_pg_pack->page_size) - 1);
1254 mutex_lock(&ctx->mmu_lock);
1256 unmap_phys_pg_pack(ctx, vaddr, phys_pg_pack);
1259 * During context free this function is called in a loop to clean all
1260 * the context mappings. Hence the cache invalidation can be called once
1261 * at the loop end rather than for each iteration
1264 rc = hdev->asic_funcs->mmu_invalidate_cache(hdev, true,
1267 mutex_unlock(&ctx->mmu_lock);
1270 * If the context is closing we don't need to check for the MMU cache
1271 * invalidation return code and update the VA free list as in this flow
1272 * we invalidate the MMU cache outside of this unmap function and the VA
1273 * free list will be freed anyway.
1280 "unmapping vaddr 0x%llx failed due to MMU cache invalidation\n",
1283 tmp_rc = add_va_block(hdev, va_range, vaddr,
1284 vaddr + phys_pg_pack->total_size - 1);
1287 "add va block failed for vaddr: 0x%llx\n",
1294 atomic_dec(&phys_pg_pack->mapping_cnt);
1298 rc = free_phys_pg_pack(hdev, phys_pg_pack);
1299 dma_unmap_host_va(hdev, userptr);
1306 free_phys_pg_pack(hdev, phys_pg_pack);
1308 mutex_lock(&ctx->mem_hash_lock);
1309 hash_add(ctx->mem_hash, &hnode->node, vaddr);
1310 mutex_unlock(&ctx->mem_hash_lock);
1315 static int map_block(struct hl_device *hdev, u64 address, u64 *handle,
1321 rc = hdev->asic_funcs->get_hw_block_id(hdev, address, size, &block_id);
1323 *handle = block_id | HL_MMAP_TYPE_BLOCK;
1324 *handle <<= PAGE_SHIFT;
1329 static void hw_block_vm_close(struct vm_area_struct *vma)
1331 struct hl_vm_hw_block_list_node *lnode =
1332 (struct hl_vm_hw_block_list_node *) vma->vm_private_data;
1333 struct hl_ctx *ctx = lnode->ctx;
1335 mutex_lock(&ctx->hw_block_list_lock);
1336 list_del(&lnode->node);
1337 mutex_unlock(&ctx->hw_block_list_lock);
1340 vma->vm_private_data = NULL;
1343 static const struct vm_operations_struct hw_block_vm_ops = {
1344 .close = hw_block_vm_close
1348 * hl_hw_block_mmap() - mmap a hw block to user.
1349 * @hpriv: pointer to the private data of the fd
1350 * @vma: pointer to vm_area_struct of the process
1352 * Driver increments context reference for every HW block mapped in order
1353 * to prevent user from closing FD without unmapping first
1355 int hl_hw_block_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
1357 struct hl_vm_hw_block_list_node *lnode;
1358 struct hl_device *hdev = hpriv->hdev;
1359 struct hl_ctx *ctx = hpriv->ctx;
1360 u32 block_id, block_size;
1363 /* We use the page offset to hold the block id and thus we need to clear
1364 * it before doing the mmap itself
1366 block_id = vma->vm_pgoff;
1369 /* Driver only allows mapping of a complete HW block */
1370 block_size = vma->vm_end - vma->vm_start;
1372 #ifdef _HAS_TYPE_ARG_IN_ACCESS_OK
1373 if (!access_ok(VERIFY_WRITE,
1374 (void __user *) (uintptr_t) vma->vm_start, block_size)) {
1376 if (!access_ok((void __user *) (uintptr_t) vma->vm_start, block_size)) {
1379 "user pointer is invalid - 0x%lx\n",
1385 lnode = kzalloc(sizeof(*lnode), GFP_KERNEL);
1389 vma->vm_ops = &hw_block_vm_ops;
1390 vma->vm_private_data = lnode;
1392 hl_ctx_get(hdev, ctx);
1394 rc = hdev->asic_funcs->hw_block_mmap(hdev, vma, block_id, block_size);
1402 lnode->vaddr = vma->vm_start;
1403 lnode->size = block_size;
1404 lnode->id = block_id;
1406 mutex_lock(&ctx->hw_block_list_lock);
1407 list_add_tail(&lnode->node, &ctx->hw_block_mem_list);
1408 mutex_unlock(&ctx->hw_block_list_lock);
1410 vma->vm_pgoff = block_id;
1415 static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args)
1417 struct hl_device *hdev = hpriv->hdev;
1418 struct hl_ctx *ctx = hpriv->ctx;
1419 u64 block_handle, device_addr = 0;
1420 u32 handle = 0, block_size;
1423 switch (args->in.op) {
1424 case HL_MEM_OP_ALLOC:
1425 if (args->in.alloc.mem_size == 0) {
1427 "alloc size must be larger than 0\n");
1432 /* Force contiguous as there are no real MMU
1433 * translations to overcome physical memory gaps
1435 args->in.flags |= HL_MEM_CONTIGUOUS;
1436 rc = alloc_device_memory(ctx, &args->in, &handle);
1438 memset(args, 0, sizeof(*args));
1439 args->out.handle = (__u64) handle;
1442 case HL_MEM_OP_FREE:
1443 rc = free_device_memory(ctx, &args->in);
1447 if (args->in.flags & HL_MEM_USERPTR) {
1448 device_addr = args->in.map_host.host_virt_addr;
1451 rc = get_paddr_from_handle(ctx, &args->in,
1455 memset(args, 0, sizeof(*args));
1456 args->out.device_virt_addr = device_addr;
1459 case HL_MEM_OP_UNMAP:
1463 case HL_MEM_OP_MAP_BLOCK:
1464 rc = map_block(hdev, args->in.map_block.block_addr,
1465 &block_handle, &block_size);
1466 args->out.block_handle = block_handle;
1467 args->out.block_size = block_size;
1471 dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
1480 int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
1482 enum hl_device_status status;
1483 union hl_mem_args *args = data;
1484 struct hl_device *hdev = hpriv->hdev;
1485 struct hl_ctx *ctx = hpriv->ctx;
1486 u64 block_handle, device_addr = 0;
1487 u32 handle = 0, block_size;
1490 if (!hl_device_operational(hdev, &status)) {
1491 dev_warn_ratelimited(hdev->dev,
1492 "Device is %s. Can't execute MEMORY IOCTL\n",
1493 hdev->status[status]);
1497 if (!hdev->mmu_enable)
1498 return mem_ioctl_no_mmu(hpriv, args);
1500 switch (args->in.op) {
1501 case HL_MEM_OP_ALLOC:
1502 if (args->in.alloc.mem_size == 0) {
1504 "alloc size must be larger than 0\n");
1509 /* If DRAM does not support virtual memory the driver won't
1510 * handle the allocation/freeing of that memory. However, for
1511 * system administration/monitoring purposes, the driver will
1512 * keep track of the amount of DRAM memory that is allocated
1513 * and freed by the user. Because this code totally relies on
1514 * the user's input, the driver can't ensure the validity
1515 * of this accounting.
1517 if (!hdev->asic_prop.dram_supports_virtual_memory) {
1518 atomic64_add(args->in.alloc.mem_size,
1519 &ctx->dram_phys_mem);
1520 atomic64_add(args->in.alloc.mem_size,
1521 &hdev->dram_used_mem);
1523 dev_dbg(hdev->dev, "DRAM alloc is not supported\n");
1526 memset(args, 0, sizeof(*args));
1527 args->out.handle = 0;
1531 rc = alloc_device_memory(ctx, &args->in, &handle);
1533 memset(args, 0, sizeof(*args));
1534 args->out.handle = (__u64) handle;
1537 case HL_MEM_OP_FREE:
1538 /* If DRAM does not support virtual memory the driver won't
1539 * handle the allocation/freeing of that memory. However, for
1540 * system administration/monitoring purposes, the driver will
1541 * keep track of the amount of DRAM memory that is allocated
1542 * and freed by the user. Because this code totally relies on
1543 * the user's input, the driver can't ensure the validity
1544 * of this accounting.
1546 if (!hdev->asic_prop.dram_supports_virtual_memory) {
1547 atomic64_sub(args->in.alloc.mem_size,
1548 &ctx->dram_phys_mem);
1549 atomic64_sub(args->in.alloc.mem_size,
1550 &hdev->dram_used_mem);
1552 dev_dbg(hdev->dev, "DRAM alloc is not supported\n");
1558 rc = free_device_memory(ctx, &args->in);
1562 rc = map_device_va(ctx, &args->in, &device_addr);
1564 memset(args, 0, sizeof(*args));
1565 args->out.device_virt_addr = device_addr;
1568 case HL_MEM_OP_UNMAP:
1569 rc = unmap_device_va(ctx, &args->in, false);
1572 case HL_MEM_OP_MAP_BLOCK:
1573 rc = map_block(hdev, args->in.map_block.block_addr,
1574 &block_handle, &block_size);
1575 args->out.block_handle = block_handle;
1576 args->out.block_size = block_size;
1580 dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
1589 static int get_user_memory(struct hl_device *hdev, u64 addr, u64 size,
1590 u32 npages, u64 start, u32 offset,
1591 struct hl_userptr *userptr)
1595 if (!access_ok((void __user *) (uintptr_t) addr, size)) {
1596 dev_err(hdev->dev, "user pointer is invalid - 0x%llx\n", addr);
1600 userptr->pages = kvmalloc_array(npages, sizeof(*userptr->pages),
1602 if (!userptr->pages)
1605 rc = pin_user_pages_fast(start, npages,
1606 FOLL_FORCE | FOLL_WRITE | FOLL_LONGTERM,
1611 "Failed to map host memory, user ptr probably wrong\n");
1618 userptr->npages = npages;
1620 rc = sg_alloc_table_from_pages(userptr->sgt,
1622 npages, offset, size, GFP_KERNEL);
1624 dev_err(hdev->dev, "failed to create SG table from pages\n");
1631 unpin_user_pages(userptr->pages, npages);
1633 kvfree(userptr->pages);
1638 * hl_pin_host_memory() - pins a chunk of host memory.
1639 * @hdev: pointer to the habanalabs device structure.
1640 * @addr: the host virtual address of the memory area.
1641 * @size: the size of the memory area.
1642 * @userptr: pointer to hl_userptr structure.
1644 * This function does the following:
1645 * - Pins the physical pages.
1646 * - Create an SG list from those pages.
1648 int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
1649 struct hl_userptr *userptr)
1656 dev_err(hdev->dev, "size to pin is invalid - %llu\n", size);
1661 * If the combination of the address and size requested for this memory
1662 * region causes an integer overflow, return error.
1664 if (((addr + size) < addr) ||
1665 PAGE_ALIGN(addr + size) < (addr + size)) {
1667 "user pointer 0x%llx + %llu causes integer overflow\n",
1672 userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_KERNEL);
1676 start = addr & PAGE_MASK;
1677 offset = addr & ~PAGE_MASK;
1678 end = PAGE_ALIGN(addr + size);
1679 npages = (end - start) >> PAGE_SHIFT;
1681 userptr->size = size;
1682 userptr->addr = addr;
1683 userptr->dma_mapped = false;
1684 INIT_LIST_HEAD(&userptr->job_node);
1686 rc = get_user_memory(hdev, addr, size, npages, start, offset,
1690 "failed to get user memory for address 0x%llx\n",
1695 hl_debugfs_add_userptr(hdev, userptr);
1700 kfree(userptr->sgt);
1705 * hl_unpin_host_memory - unpins a chunk of host memory.
1706 * @hdev: pointer to the habanalabs device structure
1707 * @userptr: pointer to hl_userptr structure
1709 * This function does the following:
1710 * - Unpins the physical pages related to the host memory
1711 * - Free the SG list
1713 void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
1715 hl_debugfs_remove_userptr(hdev, userptr);
1717 if (userptr->dma_mapped)
1718 hdev->asic_funcs->hl_dma_unmap_sg(hdev, userptr->sgt->sgl,
1719 userptr->sgt->nents,
1722 unpin_user_pages_dirty_lock(userptr->pages, userptr->npages, true);
1723 kvfree(userptr->pages);
1725 list_del(&userptr->job_node);
1727 sg_free_table(userptr->sgt);
1728 kfree(userptr->sgt);
1732 * hl_userptr_delete_list() - clear userptr list.
1733 * @hdev: pointer to the habanalabs device structure.
1734 * @userptr_list: pointer to the list to clear.
1736 * This function does the following:
1737 * - Iterates over the list and unpins the host memory and frees the userptr
1740 void hl_userptr_delete_list(struct hl_device *hdev,
1741 struct list_head *userptr_list)
1743 struct hl_userptr *userptr, *tmp;
1745 list_for_each_entry_safe(userptr, tmp, userptr_list, job_node) {
1746 hl_unpin_host_memory(hdev, userptr);
1750 INIT_LIST_HEAD(userptr_list);
1754 * hl_userptr_is_pinned() - returns whether the given userptr is pinned.
1755 * @hdev: pointer to the habanalabs device structure.
1756 * @userptr_list: pointer to the list to clear.
1757 * @userptr: pointer to userptr to check.
1759 * This function does the following:
1760 * - Iterates over the list and checks if the given userptr is in it, means is
1761 * pinned. If so, returns true, otherwise returns false.
1763 bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr,
1764 u32 size, struct list_head *userptr_list,
1765 struct hl_userptr **userptr)
1767 list_for_each_entry((*userptr), userptr_list, job_node) {
1768 if ((addr == (*userptr)->addr) && (size == (*userptr)->size))
1776 * va_range_init() - initialize virtual addresses range.
1777 * @hdev: pointer to the habanalabs device structure.
1778 * @va_range: pointer to the range to initialize.
1779 * @start: range start address.
1780 * @end: range end address.
1782 * This function does the following:
1783 * - Initializes the virtual addresses list of the given range with the given
1786 static int va_range_init(struct hl_device *hdev, struct hl_va_range *va_range,
1787 u64 start, u64 end, u32 page_size)
1791 INIT_LIST_HEAD(&va_range->list);
1794 * PAGE_SIZE alignment
1795 * it is the callers responsibility to align the addresses if the
1796 * page size is not a power of 2
1799 if (is_power_of_2(page_size)) {
1800 if (start & (PAGE_SIZE - 1)) {
1805 if (end & (PAGE_SIZE - 1))
1810 dev_err(hdev->dev, "too small vm range for va list\n");
1814 rc = add_va_block(hdev, va_range, start, end);
1817 dev_err(hdev->dev, "Failed to init host va list\n");
1821 va_range->start_addr = start;
1822 va_range->end_addr = end;
1823 va_range->page_size = page_size;
1829 * va_range_fini() - clear a virtual addresses range.
1830 * @hdev: pointer to the habanalabs structure.
1831 * va_range: pointer to virtual addresses rang.e
1833 * This function does the following:
1834 * - Frees the virtual addresses block list and its lock.
1836 static void va_range_fini(struct hl_device *hdev, struct hl_va_range *va_range)
1838 mutex_lock(&va_range->lock);
1839 clear_va_list_locked(hdev, &va_range->list);
1840 mutex_unlock(&va_range->lock);
1842 mutex_destroy(&va_range->lock);
1847 * vm_ctx_init_with_ranges() - initialize virtual memory for context.
1848 * @ctx: pointer to the habanalabs context structure.
1849 * @host_range_start: host virtual addresses range start.
1850 * @host_range_end: host virtual addresses range end.
1851 * @host_huge_range_start: host virtual addresses range start for memory
1852 * allocated with huge pages.
1853 * @host_huge_range_end: host virtual addresses range end for memory allocated
1855 * @dram_range_start: dram virtual addresses range start.
1856 * @dram_range_end: dram virtual addresses range end.
1858 * This function initializes the following:
1859 * - MMU for context.
1860 * - Virtual address to area descriptor hashtable.
1861 * - Virtual block list of available virtual memory.
1863 static int vm_ctx_init_with_ranges(struct hl_ctx *ctx,
1864 u64 host_range_start,
1867 u64 host_huge_range_start,
1868 u64 host_huge_range_end,
1869 u32 host_huge_page_size,
1870 u64 dram_range_start,
1874 struct hl_device *hdev = ctx->hdev;
1877 for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX ; i++) {
1879 kzalloc(sizeof(struct hl_va_range), GFP_KERNEL);
1880 if (!ctx->va_range[i]) {
1886 rc = hl_mmu_ctx_init(ctx);
1888 dev_err(hdev->dev, "failed to init context %d\n", ctx->asid);
1892 mutex_init(&ctx->mem_hash_lock);
1893 hash_init(ctx->mem_hash);
1895 mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
1897 rc = va_range_init(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST],
1898 host_range_start, host_range_end, host_page_size);
1900 dev_err(hdev->dev, "failed to init host vm range\n");
1904 if (hdev->pmmu_huge_range) {
1905 mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
1907 rc = va_range_init(hdev,
1908 ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE],
1909 host_huge_range_start, host_huge_range_end,
1910 host_huge_page_size);
1913 "failed to init host huge vm range\n");
1914 goto clear_host_va_range;
1917 kfree(ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]);
1918 ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE] =
1919 ctx->va_range[HL_VA_RANGE_TYPE_HOST];
1922 mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_DRAM]->lock);
1924 rc = va_range_init(hdev, ctx->va_range[HL_VA_RANGE_TYPE_DRAM],
1925 dram_range_start, dram_range_end, dram_page_size);
1927 dev_err(hdev->dev, "failed to init dram vm range\n");
1928 goto clear_host_huge_va_range;
1931 hl_debugfs_add_ctx_mem_hash(hdev, ctx);
1935 clear_host_huge_va_range:
1936 mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_DRAM]->lock);
1938 if (hdev->pmmu_huge_range) {
1939 mutex_lock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
1940 clear_va_list_locked(hdev,
1941 &ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->list);
1942 mutex_unlock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
1944 clear_host_va_range:
1945 if (hdev->pmmu_huge_range)
1946 mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
1947 mutex_lock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
1948 clear_va_list_locked(hdev, &ctx->va_range[HL_VA_RANGE_TYPE_HOST]->list);
1949 mutex_unlock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
1951 mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
1952 mutex_destroy(&ctx->mem_hash_lock);
1953 hl_mmu_ctx_fini(ctx);
1955 for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX ; i++)
1956 kfree(ctx->va_range[i]);
1961 int hl_vm_ctx_init(struct hl_ctx *ctx)
1963 struct asic_fixed_properties *prop = &ctx->hdev->asic_prop;
1964 u64 host_range_start, host_range_end, host_huge_range_start,
1965 host_huge_range_end, dram_range_start, dram_range_end;
1966 u32 host_page_size, host_huge_page_size, dram_page_size;
1968 atomic64_set(&ctx->dram_phys_mem, 0);
1971 * - If MMU is enabled, init the ranges as usual.
1972 * - If MMU is disabled, in case of host mapping, the returned address
1974 * In case of DRAM mapping, the returned address is the physical
1975 * address of the memory related to the given handle.
1977 if (!ctx->hdev->mmu_enable)
1980 dram_range_start = prop->dmmu.start_addr;
1981 dram_range_end = prop->dmmu.end_addr;
1982 dram_page_size = prop->dram_page_size ?
1983 prop->dram_page_size : prop->dmmu.page_size;
1984 host_range_start = prop->pmmu.start_addr;
1985 host_range_end = prop->pmmu.end_addr;
1986 host_page_size = prop->pmmu.page_size;
1987 host_huge_range_start = prop->pmmu_huge.start_addr;
1988 host_huge_range_end = prop->pmmu_huge.end_addr;
1989 host_huge_page_size = prop->pmmu_huge.page_size;
1991 return vm_ctx_init_with_ranges(ctx, host_range_start, host_range_end,
1992 host_page_size, host_huge_range_start,
1993 host_huge_range_end, host_huge_page_size,
1994 dram_range_start, dram_range_end, dram_page_size);
1998 * hl_vm_ctx_fini() - virtual memory teardown of context.
1999 * @ctx: pointer to the habanalabs context structure.
2001 * This function perform teardown the following:
2002 * - Virtual block list of available virtual memory.
2003 * - Virtual address to area descriptor hashtable.
2004 * - MMU for context.
2006 * In addition this function does the following:
2007 * - Unmaps the existing hashtable nodes if the hashtable is not empty. The
2008 * hashtable should be empty as no valid mappings should exist at this
2010 * - Frees any existing physical page list from the idr which relates to the
2011 * current context asid.
2012 * - This function checks the virtual block list for correctness. At this point
2013 * the list should contain one element which describes the whole virtual
2014 * memory range of the context. Otherwise, a warning is printed.
2016 void hl_vm_ctx_fini(struct hl_ctx *ctx)
2018 struct hl_device *hdev = ctx->hdev;
2019 struct hl_vm *vm = &hdev->vm;
2020 struct hl_vm_phys_pg_pack *phys_pg_list;
2021 struct hl_vm_hash_node *hnode;
2022 struct hlist_node *tmp_node;
2023 struct hl_mem_in args;
2026 if (!hdev->mmu_enable)
2029 hl_debugfs_remove_ctx_mem_hash(hdev, ctx);
2032 * Clearly something went wrong on hard reset so no point in printing
2033 * another side effect error
2035 if (!hdev->hard_reset_pending && !hash_empty(ctx->mem_hash))
2036 dev_notice(hdev->dev,
2037 "user released device without removing its memory mappings\n");
2039 hash_for_each_safe(ctx->mem_hash, i, tmp_node, hnode, node) {
2041 "hl_mem_hash_node of vaddr 0x%llx of asid %d is still alive\n",
2042 hnode->vaddr, ctx->asid);
2043 args.unmap.device_virt_addr = hnode->vaddr;
2044 unmap_device_va(ctx, &args, true);
2047 mutex_lock(&ctx->mmu_lock);
2049 /* invalidate the cache once after the unmapping loop */
2050 hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
2051 hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_PHYS_PACK);
2053 mutex_unlock(&ctx->mmu_lock);
2055 spin_lock(&vm->idr_lock);
2056 idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_list, i)
2057 if (phys_pg_list->asid == ctx->asid) {
2059 "page list 0x%px of asid %d is still alive\n",
2060 phys_pg_list, ctx->asid);
2061 atomic64_sub(phys_pg_list->total_size,
2062 &hdev->dram_used_mem);
2063 free_phys_pg_pack(hdev, phys_pg_list);
2064 idr_remove(&vm->phys_pg_pack_handles, i);
2066 spin_unlock(&vm->idr_lock);
2068 va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_DRAM]);
2069 va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST]);
2071 if (hdev->pmmu_huge_range)
2072 va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]);
2074 mutex_destroy(&ctx->mem_hash_lock);
2075 hl_mmu_ctx_fini(ctx);
2077 /* In this case we need to clear the global accounting of DRAM usage
2078 * because the user notifies us on allocations. If the user is no more,
2079 * all DRAM is available
2081 if (ctx->asid != HL_KERNEL_ASID_ID &&
2082 !hdev->asic_prop.dram_supports_virtual_memory)
2083 atomic64_set(&hdev->dram_used_mem, 0);
2087 * hl_vm_init() - initialize virtual memory module.
2088 * @hdev: pointer to the habanalabs device structure.
2090 * This function initializes the following:
2092 * - DRAM physical pages pool of 2MB.
2093 * - Idr for device memory allocation handles.
2095 int hl_vm_init(struct hl_device *hdev)
2097 struct asic_fixed_properties *prop = &hdev->asic_prop;
2098 struct hl_vm *vm = &hdev->vm;
2101 if (is_power_of_2(prop->dram_page_size))
2103 gen_pool_create(__ffs(prop->dram_page_size), -1);
2106 gen_pool_create(__ffs(DRAM_POOL_PAGE_SIZE), -1);
2108 if (!vm->dram_pg_pool) {
2109 dev_err(hdev->dev, "Failed to create dram page pool\n");
2113 kref_init(&vm->dram_pg_pool_refcount);
2115 rc = gen_pool_add(vm->dram_pg_pool, prop->dram_user_base_address,
2116 prop->dram_end_address - prop->dram_user_base_address,
2121 "Failed to add memory to dram page pool %d\n", rc);
2125 spin_lock_init(&vm->idr_lock);
2126 idr_init(&vm->phys_pg_pack_handles);
2128 atomic64_set(&hdev->dram_used_mem, 0);
2130 vm->init_done = true;
2135 gen_pool_destroy(vm->dram_pg_pool);
2141 * hl_vm_fini() - virtual memory module teardown.
2142 * @hdev: pointer to the habanalabs device structure.
2144 * This function perform teardown to the following:
2145 * - Idr for device memory allocation handles.
2146 * - DRAM physical pages pool of 2MB.
2149 void hl_vm_fini(struct hl_device *hdev)
2151 struct hl_vm *vm = &hdev->vm;
2157 * At this point all the contexts should be freed and hence no DRAM
2158 * memory should be in use. Hence the DRAM pool should be freed here.
2160 if (kref_put(&vm->dram_pg_pool_refcount, dram_pg_pool_do_release) != 1)
2161 dev_warn(hdev->dev, "dram_pg_pool was not destroyed on %s\n",
2164 vm->init_done = false;
2168 * hl_hw_block_mem_init() - HW block memory initialization.
2169 * @ctx: pointer to the habanalabs context structure.
2171 * This function initializes the HW block virtual mapped addresses list and
2174 void hl_hw_block_mem_init(struct hl_ctx *ctx)
2176 mutex_init(&ctx->hw_block_list_lock);
2177 INIT_LIST_HEAD(&ctx->hw_block_mem_list);
2181 * hl_hw_block_mem_fini() - HW block memory teardown.
2182 * @ctx: pointer to the habanalabs context structure.
2184 * This function clears the HW block virtual mapped addresses list and destroys
2187 void hl_hw_block_mem_fini(struct hl_ctx *ctx)
2189 struct hl_vm_hw_block_list_node *lnode, *tmp;
2191 if (!list_empty(&ctx->hw_block_mem_list))
2192 dev_crit(ctx->hdev->dev, "HW block mem list isn't empty\n");
2194 list_for_each_entry_safe(lnode, tmp, &ctx->hw_block_mem_list, node) {
2195 list_del(&lnode->node);
2199 mutex_destroy(&ctx->hw_block_list_lock);