1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2021 HabanaLabs, Ltd.
8 #include <uapi/misc/habanalabs.h>
9 #include "habanalabs.h"
10 #include "../include/hw_ip/mmu/mmu_general.h"
12 #include <linux/uaccess.h>
13 #include <linux/slab.h>
14 #include <linux/pci-p2pdma.h>
16 MODULE_IMPORT_NS(DMA_BUF);
18 #define HL_MMU_DEBUG 0
20 /* use small pages for supporting non-pow2 (32M/40M/48M) DRAM phys page sizes */
21 #define DRAM_POOL_PAGE_SIZE SZ_8M
24 * The va ranges in context object contain a list with the available chunks of
25 * device virtual memory.
26 * There is one range for host allocations and one for DRAM allocations.
28 * On initialization each range contains one chunk of all of its available
29 * virtual range which is a half of the total device virtual range.
31 * On each mapping of physical pages, a suitable virtual range chunk (with a
32 * minimum size) is selected from the list. If the chunk size equals the
33 * requested size, the chunk is returned. Otherwise, the chunk is split into
34 * two chunks - one to return as result and a remainder to stay in the list.
36 * On each Unmapping of a virtual address, the relevant virtual chunk is
37 * returned to the list. The chunk is added to the list and if its edges match
38 * the edges of the adjacent chunks (means a contiguous chunk can be created),
39 * the chunks are merged.
41 * On finish, the list is checked to have only one chunk of all the relevant
42 * virtual range (which is a half of the device total virtual range).
43 * If not (means not all mappings were unmapped), a warning is printed.
47 * alloc_device_memory() - allocate device memory.
48 * @ctx: pointer to the context structure.
49 * @args: host parameters containing the requested size.
50 * @ret_handle: result handle.
52 * This function does the following:
53 * - Allocate the requested size rounded up to 'dram_page_size' pages.
54 * - Return unique handle for later map/unmap/free.
56 static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
59 struct hl_device *hdev = ctx->hdev;
60 struct hl_vm *vm = &hdev->vm;
61 struct hl_vm_phys_pg_pack *phys_pg_pack;
62 u64 paddr = 0, total_size, num_pgs, i;
63 u32 num_curr_pgs, page_size;
68 page_size = hdev->asic_prop.dram_page_size;
69 num_pgs = DIV_ROUND_UP_ULL(args->alloc.mem_size, page_size);
70 total_size = num_pgs * page_size;
73 dev_err(hdev->dev, "Cannot allocate 0 bytes\n");
77 contiguous = args->flags & HL_MEM_CONTIGUOUS;
80 paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size);
83 "failed to allocate %llu contiguous pages with total size of %llu\n",
89 phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
95 phys_pg_pack->vm_type = VM_TYPE_PHYS_PACK;
96 phys_pg_pack->asid = ctx->asid;
97 phys_pg_pack->npages = num_pgs;
98 phys_pg_pack->page_size = page_size;
99 phys_pg_pack->total_size = total_size;
100 phys_pg_pack->flags = args->flags;
101 phys_pg_pack->contiguous = contiguous;
103 phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL);
104 if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) {
109 if (phys_pg_pack->contiguous) {
110 for (i = 0 ; i < num_pgs ; i++)
111 phys_pg_pack->pages[i] = paddr + i * page_size;
113 for (i = 0 ; i < num_pgs ; i++) {
114 phys_pg_pack->pages[i] = (u64) gen_pool_alloc(
117 if (!phys_pg_pack->pages[i]) {
119 "Failed to allocate device memory (out of memory)\n");
128 spin_lock(&vm->idr_lock);
129 handle = idr_alloc(&vm->phys_pg_pack_handles, phys_pg_pack, 1, 0,
131 spin_unlock(&vm->idr_lock);
134 dev_err(hdev->dev, "Failed to get handle for page\n");
139 for (i = 0 ; i < num_pgs ; i++)
140 kref_get(&vm->dram_pg_pool_refcount);
142 phys_pg_pack->handle = handle;
144 atomic64_add(phys_pg_pack->total_size, &ctx->dram_phys_mem);
145 atomic64_add(phys_pg_pack->total_size, &hdev->dram_used_mem);
147 *ret_handle = handle;
153 if (!phys_pg_pack->contiguous)
154 for (i = 0 ; i < num_curr_pgs ; i++)
155 gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i],
158 kvfree(phys_pg_pack->pages);
163 gen_pool_free(vm->dram_pg_pool, paddr, total_size);
169 * dma_map_host_va() - DMA mapping of the given host virtual address.
170 * @hdev: habanalabs device structure.
171 * @addr: the host virtual address of the memory area.
172 * @size: the size of the memory area.
173 * @p_userptr: pointer to result userptr structure.
175 * This function does the following:
176 * - Allocate userptr structure.
177 * - Pin the given host memory using the userptr structure.
178 * - Perform DMA mapping to have the DMA addresses of the pages.
180 static int dma_map_host_va(struct hl_device *hdev, u64 addr, u64 size,
181 struct hl_userptr **p_userptr)
183 struct hl_userptr *userptr;
186 userptr = kzalloc(sizeof(*userptr), GFP_KERNEL);
192 rc = hl_pin_host_memory(hdev, addr, size, userptr);
194 dev_err(hdev->dev, "Failed to pin host memory\n");
198 rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
199 userptr->sgt->nents, DMA_BIDIRECTIONAL);
201 dev_err(hdev->dev, "failed to map sgt with DMA region\n");
205 userptr->dma_mapped = true;
206 userptr->dir = DMA_BIDIRECTIONAL;
207 userptr->vm_type = VM_TYPE_USERPTR;
209 *p_userptr = userptr;
214 hl_unpin_host_memory(hdev, userptr);
223 * dma_unmap_host_va() - DMA unmapping of the given host virtual address.
224 * @hdev: habanalabs device structure.
225 * @userptr: userptr to free.
227 * This function does the following:
228 * - Unpins the physical pages.
229 * - Frees the userptr structure.
231 static void dma_unmap_host_va(struct hl_device *hdev,
232 struct hl_userptr *userptr)
234 hl_unpin_host_memory(hdev, userptr);
239 * dram_pg_pool_do_release() - free DRAM pages pool
240 * @ref: pointer to reference object.
242 * This function does the following:
243 * - Frees the idr structure of physical pages handles.
244 * - Frees the generic pool of DRAM physical pages.
246 static void dram_pg_pool_do_release(struct kref *ref)
248 struct hl_vm *vm = container_of(ref, struct hl_vm,
249 dram_pg_pool_refcount);
252 * free the idr here as only here we know for sure that there are no
253 * allocated physical pages and hence there are no handles in use
255 idr_destroy(&vm->phys_pg_pack_handles);
256 gen_pool_destroy(vm->dram_pg_pool);
260 * free_phys_pg_pack() - free physical page pack.
261 * @hdev: habanalabs device structure.
262 * @phys_pg_pack: physical page pack to free.
264 * This function does the following:
265 * - For DRAM memory only
266 * - iterate over the pack, scrub and free each physical block structure by
267 * returning it to the general pool.
268 * In case of error during scrubbing, initiate hard reset.
269 * Once hard reset is triggered, scrubbing is bypassed while freeing the
271 * - Free the hl_vm_phys_pg_pack structure.
273 static int free_phys_pg_pack(struct hl_device *hdev,
274 struct hl_vm_phys_pg_pack *phys_pg_pack)
276 struct hl_vm *vm = &hdev->vm;
280 if (phys_pg_pack->created_from_userptr)
283 if (phys_pg_pack->contiguous) {
284 if (hdev->memory_scrub && !hdev->disabled) {
285 rc = hdev->asic_funcs->scrub_device_mem(hdev,
286 phys_pg_pack->pages[0],
287 phys_pg_pack->total_size);
290 "Failed to scrub contiguous device memory\n");
293 gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[0],
294 phys_pg_pack->total_size);
296 for (i = 0; i < phys_pg_pack->npages ; i++)
297 kref_put(&vm->dram_pg_pool_refcount,
298 dram_pg_pool_do_release);
300 for (i = 0 ; i < phys_pg_pack->npages ; i++) {
301 if (hdev->memory_scrub && !hdev->disabled && rc == 0) {
302 rc = hdev->asic_funcs->scrub_device_mem(
304 phys_pg_pack->pages[i],
305 phys_pg_pack->page_size);
308 "Failed to scrub device memory\n");
310 gen_pool_free(vm->dram_pg_pool,
311 phys_pg_pack->pages[i],
312 phys_pg_pack->page_size);
313 kref_put(&vm->dram_pg_pool_refcount,
314 dram_pg_pool_do_release);
318 if (rc && !hdev->disabled)
319 hl_device_reset(hdev, HL_DRV_RESET_HARD);
322 kvfree(phys_pg_pack->pages);
329 * free_device_memory() - free device memory.
330 * @ctx: pointer to the context structure.
331 * @args: host parameters containing the requested size.
333 * This function does the following:
334 * - Free the device memory related to the given handle.
336 static int free_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args)
338 struct hl_device *hdev = ctx->hdev;
339 struct hl_vm *vm = &hdev->vm;
340 struct hl_vm_phys_pg_pack *phys_pg_pack;
341 u32 handle = args->free.handle;
343 spin_lock(&vm->idr_lock);
344 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
346 if (atomic_read(&phys_pg_pack->mapping_cnt) > 0) {
347 dev_err(hdev->dev, "handle %u is mapped, cannot free\n",
349 spin_unlock(&vm->idr_lock);
353 if (phys_pg_pack->exporting_cnt) {
354 dev_dbg(hdev->dev, "handle %u is exported, cannot free\n", handle);
355 spin_unlock(&vm->idr_lock);
360 * must remove from idr before the freeing of the physical
361 * pages as the refcount of the pool is also the trigger of the
364 idr_remove(&vm->phys_pg_pack_handles, handle);
365 spin_unlock(&vm->idr_lock);
367 atomic64_sub(phys_pg_pack->total_size, &ctx->dram_phys_mem);
368 atomic64_sub(phys_pg_pack->total_size, &hdev->dram_used_mem);
370 return free_phys_pg_pack(hdev, phys_pg_pack);
372 spin_unlock(&vm->idr_lock);
374 "free device memory failed, no match for handle %u\n",
383 * clear_va_list_locked() - free virtual addresses list.
384 * @hdev: habanalabs device structure.
385 * @va_list: list of virtual addresses to free.
387 * This function does the following:
388 * - Iterate over the list and free each virtual addresses block.
390 * This function should be called only when va_list lock is taken.
392 static void clear_va_list_locked(struct hl_device *hdev,
393 struct list_head *va_list)
395 struct hl_vm_va_block *va_block, *tmp;
397 list_for_each_entry_safe(va_block, tmp, va_list, node) {
398 list_del(&va_block->node);
404 * print_va_list_locked() - print virtual addresses list.
405 * @hdev: habanalabs device structure.
406 * @va_list: list of virtual addresses to print.
408 * This function does the following:
409 * - Iterate over the list and print each virtual addresses block.
411 * This function should be called only when va_list lock is taken.
413 static void print_va_list_locked(struct hl_device *hdev,
414 struct list_head *va_list)
417 struct hl_vm_va_block *va_block;
419 dev_dbg(hdev->dev, "print va list:\n");
421 list_for_each_entry(va_block, va_list, node)
423 "va block, start: 0x%llx, end: 0x%llx, size: %llu\n",
424 va_block->start, va_block->end, va_block->size);
429 * merge_va_blocks_locked() - merge a virtual block if possible.
430 * @hdev: pointer to the habanalabs device structure.
431 * @va_list: pointer to the virtual addresses block list.
432 * @va_block: virtual block to merge with adjacent blocks.
434 * This function does the following:
435 * - Merge the given blocks with the adjacent blocks if their virtual ranges
436 * create a contiguous virtual range.
438 * This Function should be called only when va_list lock is taken.
440 static void merge_va_blocks_locked(struct hl_device *hdev,
441 struct list_head *va_list, struct hl_vm_va_block *va_block)
443 struct hl_vm_va_block *prev, *next;
445 prev = list_prev_entry(va_block, node);
446 if (&prev->node != va_list && prev->end + 1 == va_block->start) {
447 prev->end = va_block->end;
448 prev->size = prev->end - prev->start;
449 list_del(&va_block->node);
454 next = list_next_entry(va_block, node);
455 if (&next->node != va_list && va_block->end + 1 == next->start) {
456 next->start = va_block->start;
457 next->size = next->end - next->start;
458 list_del(&va_block->node);
464 * add_va_block_locked() - add a virtual block to the virtual addresses list.
465 * @hdev: pointer to the habanalabs device structure.
466 * @va_list: pointer to the virtual addresses block list.
467 * @start: start virtual address.
468 * @end: end virtual address.
470 * This function does the following:
471 * - Add the given block to the virtual blocks list and merge with other blocks
472 * if a contiguous virtual block can be created.
474 * This Function should be called only when va_list lock is taken.
476 static int add_va_block_locked(struct hl_device *hdev,
477 struct list_head *va_list, u64 start, u64 end)
479 struct hl_vm_va_block *va_block, *res = NULL;
480 u64 size = end - start + 1;
482 print_va_list_locked(hdev, va_list);
484 list_for_each_entry(va_block, va_list, node) {
485 /* TODO: remove upon matureness */
486 if (hl_mem_area_crosses_range(start, size, va_block->start,
489 "block crossing ranges at start 0x%llx, end 0x%llx\n",
490 va_block->start, va_block->end);
494 if (va_block->end < start)
498 va_block = kmalloc(sizeof(*va_block), GFP_KERNEL);
502 va_block->start = start;
504 va_block->size = size;
507 list_add(&va_block->node, va_list);
509 list_add(&va_block->node, &res->node);
511 merge_va_blocks_locked(hdev, va_list, va_block);
513 print_va_list_locked(hdev, va_list);
519 * add_va_block() - wrapper for add_va_block_locked.
520 * @hdev: pointer to the habanalabs device structure.
521 * @va_range: pointer to the virtual addresses range object.
522 * @start: start virtual address.
523 * @end: end virtual address.
525 * This function does the following:
526 * - Takes the list lock and calls add_va_block_locked.
528 static inline int add_va_block(struct hl_device *hdev,
529 struct hl_va_range *va_range, u64 start, u64 end)
533 mutex_lock(&va_range->lock);
534 rc = add_va_block_locked(hdev, &va_range->list, start, end);
535 mutex_unlock(&va_range->lock);
541 * is_hint_crossing_range() - check if hint address crossing specified reserved.
542 * @range_type: virtual space range type.
543 * @start_addr: start virtual address.
545 * @prop: asic properties structure to retrieve reserved ranges from.
547 static inline bool is_hint_crossing_range(enum hl_va_range_type range_type,
548 u64 start_addr, u32 size, struct asic_fixed_properties *prop) {
551 if (range_type == HL_VA_RANGE_TYPE_DRAM)
553 hl_mem_area_crosses_range(start_addr, size,
554 prop->hints_dram_reserved_va_range.start_addr,
555 prop->hints_dram_reserved_va_range.end_addr);
556 else if (range_type == HL_VA_RANGE_TYPE_HOST)
558 hl_mem_area_crosses_range(start_addr, size,
559 prop->hints_host_reserved_va_range.start_addr,
560 prop->hints_host_reserved_va_range.end_addr);
563 hl_mem_area_crosses_range(start_addr, size,
564 prop->hints_host_hpage_reserved_va_range.start_addr,
565 prop->hints_host_hpage_reserved_va_range.end_addr);
571 * get_va_block() - get a virtual block for the given size and alignment.
573 * @hdev: pointer to the habanalabs device structure.
574 * @va_range: pointer to the virtual addresses range.
575 * @size: requested block size.
576 * @hint_addr: hint for requested address by the user.
577 * @va_block_align: required alignment of the virtual block start address.
578 * @range_type: va range type (host, dram)
579 * @flags: additional memory flags, currently only uses HL_MEM_FORCE_HINT
581 * This function does the following:
582 * - Iterate on the virtual block list to find a suitable virtual block for the
583 * given size, hint address and alignment.
584 * - Reserve the requested block and update the list.
585 * - Return the start address of the virtual block.
587 static u64 get_va_block(struct hl_device *hdev,
588 struct hl_va_range *va_range,
589 u64 size, u64 hint_addr, u32 va_block_align,
590 enum hl_va_range_type range_type,
593 struct hl_vm_va_block *va_block, *new_va_block = NULL;
594 struct asic_fixed_properties *prop = &hdev->asic_prop;
595 u64 tmp_hint_addr, valid_start, valid_size, prev_start, prev_end,
596 align_mask, reserved_valid_start = 0, reserved_valid_size = 0,
597 dram_hint_mask = prop->dram_hints_align_mask;
598 bool add_prev = false;
599 bool is_align_pow_2 = is_power_of_2(va_range->page_size);
600 bool is_hint_dram_addr = hl_is_dram_va(hdev, hint_addr);
601 bool force_hint = flags & HL_MEM_FORCE_HINT;
604 align_mask = ~((u64)va_block_align - 1);
607 * with non-power-of-2 range we work only with page granularity
608 * and the start address is page aligned,
609 * so no need for alignment checking.
611 size = DIV_ROUND_UP_ULL(size, va_range->page_size) *
614 tmp_hint_addr = hint_addr & ~dram_hint_mask;
616 /* Check if we need to ignore hint address */
617 if ((is_align_pow_2 && (hint_addr & (va_block_align - 1))) ||
618 (!is_align_pow_2 && is_hint_dram_addr &&
619 do_div(tmp_hint_addr, va_range->page_size))) {
622 /* Hint must be respected, so here we just fail */
624 "Hint address 0x%llx is not page aligned - cannot be respected\n",
630 "Hint address 0x%llx will be ignored because it is not aligned\n",
635 mutex_lock(&va_range->lock);
637 print_va_list_locked(hdev, &va_range->list);
639 list_for_each_entry(va_block, &va_range->list, node) {
640 /* Calc the first possible aligned addr */
641 valid_start = va_block->start;
643 if (is_align_pow_2 && (valid_start & (va_block_align - 1))) {
644 valid_start &= align_mask;
645 valid_start += va_block_align;
646 if (valid_start > va_block->end)
650 valid_size = va_block->end - valid_start + 1;
651 if (valid_size < size)
655 * In case hint address is 0, and arc_hints_range_reservation
656 * property enabled, then avoid allocating va blocks from the
657 * range reserved for hint addresses
659 if (prop->hints_range_reservation && !hint_addr)
660 if (is_hint_crossing_range(range_type, valid_start,
664 /* Pick the minimal length block which has the required size */
665 if (!new_va_block || (valid_size < reserved_valid_size)) {
666 new_va_block = va_block;
667 reserved_valid_start = valid_start;
668 reserved_valid_size = valid_size;
671 if (hint_addr && hint_addr >= valid_start &&
672 (hint_addr + size) <= va_block->end) {
673 new_va_block = va_block;
674 reserved_valid_start = hint_addr;
675 reserved_valid_size = valid_size;
681 dev_err(hdev->dev, "no available va block for size %llu\n",
686 if (force_hint && reserved_valid_start != hint_addr) {
687 /* Hint address must be respected. If we are here - this means
688 * we could not respect it.
691 "Hint address 0x%llx could not be respected\n",
693 reserved_valid_start = 0;
698 * Check if there is some leftover range due to reserving the new
699 * va block, then return it to the main virtual addresses list.
701 if (reserved_valid_start > new_va_block->start) {
702 prev_start = new_va_block->start;
703 prev_end = reserved_valid_start - 1;
705 new_va_block->start = reserved_valid_start;
706 new_va_block->size = reserved_valid_size;
711 if (new_va_block->size > size) {
712 new_va_block->start += size;
713 new_va_block->size = new_va_block->end - new_va_block->start + 1;
715 list_del(&new_va_block->node);
720 add_va_block_locked(hdev, &va_range->list, prev_start,
723 print_va_list_locked(hdev, &va_range->list);
725 mutex_unlock(&va_range->lock);
727 return reserved_valid_start;
731 * hl_reserve_va_block() - reserve a virtual block of a given size.
732 * @hdev: pointer to the habanalabs device structure.
733 * @ctx: current context
734 * @type: virtual addresses range type.
735 * @size: requested block size.
736 * @alignment: required alignment in bytes of the virtual block start address,
737 * 0 means no alignment.
739 * This function does the following:
740 * - Iterate on the virtual block list to find a suitable virtual block for the
741 * given size and alignment.
742 * - Reserve the requested block and update the list.
743 * - Return the start address of the virtual block.
745 u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
746 enum hl_va_range_type type, u32 size, u32 alignment)
748 return get_va_block(hdev, ctx->va_range[type], size, 0,
749 max(alignment, ctx->va_range[type]->page_size),
754 * hl_get_va_range_type() - get va_range type for the given address and size.
755 * @ctx: context to fetch va_range from.
756 * @address: the start address of the area we want to validate.
757 * @size: the size in bytes of the area we want to validate.
758 * @type: returned va_range type.
760 * Return: true if the area is inside a valid range, false otherwise.
762 static int hl_get_va_range_type(struct hl_ctx *ctx, u64 address, u64 size,
763 enum hl_va_range_type *type)
767 for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX; i++) {
768 if (hl_mem_area_inside_range(address, size,
769 ctx->va_range[i]->start_addr,
770 ctx->va_range[i]->end_addr)) {
780 * hl_unreserve_va_block() - wrapper for add_va_block to unreserve a va block.
781 * @hdev: pointer to the habanalabs device structure
782 * @ctx: pointer to the context structure.
783 * @start_addr: start virtual address.
784 * @size: number of bytes to unreserve.
786 * This function does the following:
787 * - Takes the list lock and calls add_va_block_locked.
789 int hl_unreserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
790 u64 start_addr, u64 size)
792 enum hl_va_range_type type;
795 rc = hl_get_va_range_type(ctx, start_addr, size, &type);
798 "cannot find va_range for va %#llx size %llu",
803 rc = add_va_block(hdev, ctx->va_range[type], start_addr,
804 start_addr + size - 1);
807 "add va block failed for vaddr: 0x%llx\n", start_addr);
813 * init_phys_pg_pack_from_userptr() - initialize physical page pack from host
815 * @ctx: pointer to the context structure.
816 * @userptr: userptr to initialize from.
817 * @pphys_pg_pack: result pointer.
818 * @force_regular_page: tell the function to ignore huge page optimization,
819 * even if possible. Needed for cases where the device VA
820 * is allocated before we know the composition of the
823 * This function does the following:
824 * - Pin the physical pages related to the given virtual block.
825 * - Create a physical page pack from the physical pages related to the given
828 static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
829 struct hl_userptr *userptr,
830 struct hl_vm_phys_pg_pack **pphys_pg_pack,
831 bool force_regular_page)
833 u32 npages, page_size = PAGE_SIZE,
834 huge_page_size = ctx->hdev->asic_prop.pmmu_huge.page_size;
835 u32 pgs_in_huge_page = huge_page_size >> __ffs(page_size);
836 struct hl_vm_phys_pg_pack *phys_pg_pack;
837 bool first = true, is_huge_page_opt;
838 u64 page_mask, total_npages;
839 struct scatterlist *sg;
843 phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
847 phys_pg_pack->vm_type = userptr->vm_type;
848 phys_pg_pack->created_from_userptr = true;
849 phys_pg_pack->asid = ctx->asid;
850 atomic_set(&phys_pg_pack->mapping_cnt, 1);
852 is_huge_page_opt = (force_regular_page ? false : true);
854 /* Only if all dma_addrs are aligned to 2MB and their
855 * sizes is at least 2MB, we can use huge page mapping.
856 * We limit the 2MB optimization to this condition,
857 * since later on we acquire the related VA range as one
861 for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
862 npages = hl_get_sg_info(sg, &dma_addr);
864 total_npages += npages;
866 if ((npages % pgs_in_huge_page) ||
867 (dma_addr & (huge_page_size - 1)))
868 is_huge_page_opt = false;
871 if (is_huge_page_opt) {
872 page_size = huge_page_size;
873 do_div(total_npages, pgs_in_huge_page);
876 page_mask = ~(((u64) page_size) - 1);
878 phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64),
880 if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) {
882 goto page_pack_arr_mem_err;
885 phys_pg_pack->npages = total_npages;
886 phys_pg_pack->page_size = page_size;
887 phys_pg_pack->total_size = total_npages * page_size;
890 for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
891 npages = hl_get_sg_info(sg, &dma_addr);
893 /* align down to physical page size and save the offset */
896 phys_pg_pack->offset = dma_addr & (page_size - 1);
897 dma_addr &= page_mask;
901 phys_pg_pack->pages[j++] = dma_addr;
902 dma_addr += page_size;
904 if (is_huge_page_opt)
905 npages -= pgs_in_huge_page;
911 *pphys_pg_pack = phys_pg_pack;
915 page_pack_arr_mem_err:
922 * map_phys_pg_pack() - maps the physical page pack..
923 * @ctx: pointer to the context structure.
924 * @vaddr: start address of the virtual area to map from.
925 * @phys_pg_pack: the pack of physical pages to map to.
927 * This function does the following:
928 * - Maps each chunk of virtual memory to matching physical chunk.
929 * - Stores number of successful mappings in the given argument.
930 * - Returns 0 on success, error code otherwise.
932 static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
933 struct hl_vm_phys_pg_pack *phys_pg_pack)
935 struct hl_device *hdev = ctx->hdev;
936 u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i;
937 u32 page_size = phys_pg_pack->page_size;
941 for (i = 0 ; i < phys_pg_pack->npages ; i++) {
942 paddr = phys_pg_pack->pages[i];
944 rc = hl_mmu_map_page(ctx, next_vaddr, paddr, page_size,
945 (i + 1) == phys_pg_pack->npages);
948 "map failed for handle %u, npages: %llu, mapped: %llu",
949 phys_pg_pack->handle, phys_pg_pack->npages,
955 next_vaddr += page_size;
961 is_host_addr = !hl_is_dram_va(hdev, vaddr);
964 for (i = 0 ; i < mapped_pg_cnt ; i++) {
965 if (hl_mmu_unmap_page(ctx, next_vaddr, page_size,
966 (i + 1) == mapped_pg_cnt))
967 dev_warn_ratelimited(hdev->dev,
968 "failed to unmap handle %u, va: 0x%llx, pa: 0x%llx, page size: %u\n",
969 phys_pg_pack->handle, next_vaddr,
970 phys_pg_pack->pages[i], page_size);
972 next_vaddr += page_size;
975 * unmapping on Palladium can be really long, so avoid a CPU
976 * soft lockup bug by sleeping a little between unmapping pages
978 * In addition, on host num of pages could be huge,
979 * because page size could be 4KB, so when unmapping host
980 * pages sleep every 32K pages to avoid soft lockup
982 if (hdev->pldm || (is_host_addr && (i & 0x7FFF) == 0))
983 usleep_range(50, 200);
990 * unmap_phys_pg_pack() - unmaps the physical page pack.
991 * @ctx: pointer to the context structure.
992 * @vaddr: start address of the virtual area to unmap.
993 * @phys_pg_pack: the pack of physical pages to unmap.
995 static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
996 struct hl_vm_phys_pg_pack *phys_pg_pack)
998 struct hl_device *hdev = ctx->hdev;
1003 is_host_addr = !hl_is_dram_va(hdev, vaddr);
1004 page_size = phys_pg_pack->page_size;
1007 for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) {
1008 if (hl_mmu_unmap_page(ctx, next_vaddr, page_size,
1009 (i + 1) == phys_pg_pack->npages))
1010 dev_warn_ratelimited(hdev->dev,
1011 "unmap failed for vaddr: 0x%llx\n", next_vaddr);
1014 * unmapping on Palladium can be really long, so avoid a CPU
1015 * soft lockup bug by sleeping a little between unmapping pages
1017 * In addition, on host num of pages could be huge,
1018 * because page size could be 4KB, so when unmapping host
1019 * pages sleep every 32K pages to avoid soft lockup
1021 if (hdev->pldm || (is_host_addr && (i & 0x7FFF) == 0))
1022 usleep_range(50, 200);
1026 static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args,
1029 struct hl_device *hdev = ctx->hdev;
1030 struct hl_vm *vm = &hdev->vm;
1031 struct hl_vm_phys_pg_pack *phys_pg_pack;
1034 handle = lower_32_bits(args->map_device.handle);
1035 spin_lock(&vm->idr_lock);
1036 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
1037 if (!phys_pg_pack) {
1038 spin_unlock(&vm->idr_lock);
1039 dev_err(hdev->dev, "no match for handle %u\n", handle);
1043 *paddr = phys_pg_pack->pages[0];
1045 spin_unlock(&vm->idr_lock);
1051 * map_device_va() - map the given memory.
1052 * @ctx: pointer to the context structure.
1053 * @args: host parameters with handle/host virtual address.
1054 * @device_addr: pointer to result device virtual address.
1056 * This function does the following:
1057 * - If given a physical device memory handle, map to a device virtual block
1058 * and return the start address of this block.
1059 * - If given a host virtual address and size, find the related physical pages,
1060 * map a device virtual block to this pages and return the start address of
1063 static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
1066 struct hl_device *hdev = ctx->hdev;
1067 struct hl_vm *vm = &hdev->vm;
1068 struct hl_vm_phys_pg_pack *phys_pg_pack;
1069 struct hl_userptr *userptr = NULL;
1070 struct hl_vm_hash_node *hnode;
1071 struct hl_va_range *va_range;
1072 enum vm_type *vm_type;
1073 u64 ret_vaddr, hint_addr;
1074 u32 handle = 0, va_block_align;
1076 bool is_userptr = args->flags & HL_MEM_USERPTR;
1077 enum hl_va_range_type va_range_type = 0;
1079 /* Assume failure */
1083 u64 addr = args->map_host.host_virt_addr,
1084 size = args->map_host.mem_size;
1085 u32 page_size = hdev->asic_prop.pmmu.page_size,
1086 huge_page_size = hdev->asic_prop.pmmu_huge.page_size;
1088 rc = dma_map_host_va(hdev, addr, size, &userptr);
1090 dev_err(hdev->dev, "failed to get userptr from va\n");
1094 rc = init_phys_pg_pack_from_userptr(ctx, userptr,
1095 &phys_pg_pack, false);
1098 "unable to init page pack for vaddr 0x%llx\n",
1100 goto init_page_pack_err;
1103 vm_type = (enum vm_type *) userptr;
1104 hint_addr = args->map_host.hint_addr;
1105 handle = phys_pg_pack->handle;
1107 /* get required alignment */
1108 if (phys_pg_pack->page_size == page_size) {
1109 va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST];
1110 va_range_type = HL_VA_RANGE_TYPE_HOST;
1112 * huge page alignment may be needed in case of regular
1113 * page mapping, depending on the host VA alignment
1115 if (addr & (huge_page_size - 1))
1116 va_block_align = page_size;
1118 va_block_align = huge_page_size;
1121 * huge page alignment is needed in case of huge page
1124 va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE];
1125 va_range_type = HL_VA_RANGE_TYPE_HOST_HUGE;
1126 va_block_align = huge_page_size;
1129 handle = lower_32_bits(args->map_device.handle);
1131 spin_lock(&vm->idr_lock);
1132 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
1133 if (!phys_pg_pack) {
1134 spin_unlock(&vm->idr_lock);
1136 "no match for handle %u\n", handle);
1140 /* increment now to avoid freeing device memory while mapping */
1141 atomic_inc(&phys_pg_pack->mapping_cnt);
1143 spin_unlock(&vm->idr_lock);
1145 vm_type = (enum vm_type *) phys_pg_pack;
1147 hint_addr = args->map_device.hint_addr;
1149 /* DRAM VA alignment is the same as the MMU page size */
1150 va_range = ctx->va_range[HL_VA_RANGE_TYPE_DRAM];
1151 va_range_type = HL_VA_RANGE_TYPE_DRAM;
1152 va_block_align = hdev->asic_prop.dmmu.page_size;
1156 * relevant for mapping device physical memory only, as host memory is
1159 if (!is_userptr && !(phys_pg_pack->flags & HL_MEM_SHARED) &&
1160 phys_pg_pack->asid != ctx->asid) {
1162 "Failed to map memory, handle %u is not shared\n",
1168 hnode = kzalloc(sizeof(*hnode), GFP_KERNEL);
1174 if (hint_addr && phys_pg_pack->offset) {
1175 if (args->flags & HL_MEM_FORCE_HINT) {
1176 /* Fail if hint must be respected but it can't be */
1178 "Hint address 0x%llx cannot be respected because source memory is not aligned 0x%x\n",
1179 hint_addr, phys_pg_pack->offset);
1184 "Hint address 0x%llx will be ignored because source memory is not aligned 0x%x\n",
1185 hint_addr, phys_pg_pack->offset);
1188 ret_vaddr = get_va_block(hdev, va_range, phys_pg_pack->total_size,
1189 hint_addr, va_block_align,
1190 va_range_type, args->flags);
1192 dev_err(hdev->dev, "no available va block for handle %u\n",
1198 mutex_lock(&ctx->mmu_lock);
1200 rc = map_phys_pg_pack(ctx, ret_vaddr, phys_pg_pack);
1202 mutex_unlock(&ctx->mmu_lock);
1203 dev_err(hdev->dev, "mapping page pack failed for handle %u\n",
1208 rc = hl_mmu_invalidate_cache_range(hdev, false, *vm_type | MMU_OP_SKIP_LOW_CACHE_INV,
1209 ctx->asid, ret_vaddr, phys_pg_pack->total_size);
1211 mutex_unlock(&ctx->mmu_lock);
1216 ret_vaddr += phys_pg_pack->offset;
1218 hnode->ptr = vm_type;
1219 hnode->vaddr = ret_vaddr;
1221 mutex_lock(&ctx->mem_hash_lock);
1222 hash_add(ctx->mem_hash, &hnode->node, ret_vaddr);
1223 mutex_unlock(&ctx->mem_hash_lock);
1225 *device_addr = ret_vaddr;
1228 rc = free_phys_pg_pack(hdev, phys_pg_pack);
1233 if (add_va_block(hdev, va_range, ret_vaddr,
1234 ret_vaddr + phys_pg_pack->total_size - 1))
1236 "release va block failed for handle 0x%x, vaddr: 0x%llx\n",
1243 atomic_dec(&phys_pg_pack->mapping_cnt);
1245 free_phys_pg_pack(hdev, phys_pg_pack);
1248 dma_unmap_host_va(hdev, userptr);
1254 * unmap_device_va() - unmap the given device virtual address.
1255 * @ctx: pointer to the context structure.
1256 * @args: host parameters with device virtual address to unmap.
1257 * @ctx_free: true if in context free flow, false otherwise.
1259 * This function does the following:
1260 * - unmap the physical pages related to the given virtual address.
1261 * - return the device virtual block to the virtual block list.
1263 static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
1266 struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
1267 u64 vaddr = args->unmap.device_virt_addr;
1268 struct hl_vm_hash_node *hnode = NULL;
1269 struct asic_fixed_properties *prop;
1270 struct hl_device *hdev = ctx->hdev;
1271 struct hl_userptr *userptr = NULL;
1272 struct hl_va_range *va_range;
1273 enum vm_type *vm_type;
1277 prop = &hdev->asic_prop;
1279 /* protect from double entrance */
1280 mutex_lock(&ctx->mem_hash_lock);
1281 hash_for_each_possible(ctx->mem_hash, hnode, node, (unsigned long)vaddr)
1282 if (vaddr == hnode->vaddr)
1286 mutex_unlock(&ctx->mem_hash_lock);
1288 "unmap failed, no mem hnode for vaddr 0x%llx\n",
1293 hash_del(&hnode->node);
1294 mutex_unlock(&ctx->mem_hash_lock);
1296 vm_type = hnode->ptr;
1298 if (*vm_type == VM_TYPE_USERPTR) {
1300 userptr = hnode->ptr;
1302 rc = init_phys_pg_pack_from_userptr(ctx, userptr, &phys_pg_pack,
1306 "unable to init page pack for vaddr 0x%llx\n",
1311 if (phys_pg_pack->page_size ==
1312 hdev->asic_prop.pmmu.page_size)
1313 va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST];
1315 va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE];
1316 } else if (*vm_type == VM_TYPE_PHYS_PACK) {
1318 va_range = ctx->va_range[HL_VA_RANGE_TYPE_DRAM];
1319 phys_pg_pack = hnode->ptr;
1322 "unmap failed, unknown vm desc for vaddr 0x%llx\n",
1328 if (atomic_read(&phys_pg_pack->mapping_cnt) == 0) {
1329 dev_err(hdev->dev, "vaddr 0x%llx is not mapped\n", vaddr);
1331 goto mapping_cnt_err;
1334 if (!is_userptr && !is_power_of_2(phys_pg_pack->page_size))
1335 vaddr = prop->dram_base_address +
1336 DIV_ROUND_DOWN_ULL(vaddr - prop->dram_base_address,
1337 phys_pg_pack->page_size) *
1338 phys_pg_pack->page_size;
1340 vaddr &= ~(((u64) phys_pg_pack->page_size) - 1);
1342 mutex_lock(&ctx->mmu_lock);
1344 unmap_phys_pg_pack(ctx, vaddr, phys_pg_pack);
1347 * During context free this function is called in a loop to clean all
1348 * the context mappings. Hence the cache invalidation can be called once
1349 * at the loop end rather than for each iteration
1352 rc = hl_mmu_invalidate_cache_range(hdev, true, *vm_type, ctx->asid, vaddr,
1353 phys_pg_pack->total_size);
1355 mutex_unlock(&ctx->mmu_lock);
1358 * If the context is closing we don't need to check for the MMU cache
1359 * invalidation return code and update the VA free list as in this flow
1360 * we invalidate the MMU cache outside of this unmap function and the VA
1361 * free list will be freed anyway.
1366 tmp_rc = add_va_block(hdev, va_range, vaddr,
1367 vaddr + phys_pg_pack->total_size - 1);
1370 "add va block failed for vaddr: 0x%llx\n",
1377 atomic_dec(&phys_pg_pack->mapping_cnt);
1381 free_phys_pg_pack(hdev, phys_pg_pack);
1382 dma_unmap_host_va(hdev, userptr);
1389 free_phys_pg_pack(hdev, phys_pg_pack);
1391 mutex_lock(&ctx->mem_hash_lock);
1392 hash_add(ctx->mem_hash, &hnode->node, vaddr);
1393 mutex_unlock(&ctx->mem_hash_lock);
1398 static int map_block(struct hl_device *hdev, u64 address, u64 *handle,
1404 rc = hdev->asic_funcs->get_hw_block_id(hdev, address, size, &block_id);
1406 *handle = block_id | HL_MMAP_TYPE_BLOCK;
1407 *handle <<= PAGE_SHIFT;
1412 static void hw_block_vm_close(struct vm_area_struct *vma)
1414 struct hl_vm_hw_block_list_node *lnode =
1415 (struct hl_vm_hw_block_list_node *) vma->vm_private_data;
1416 struct hl_ctx *ctx = lnode->ctx;
1418 mutex_lock(&ctx->hw_block_list_lock);
1419 list_del(&lnode->node);
1420 mutex_unlock(&ctx->hw_block_list_lock);
1423 vma->vm_private_data = NULL;
1426 static const struct vm_operations_struct hw_block_vm_ops = {
1427 .close = hw_block_vm_close
1431 * hl_hw_block_mmap() - mmap a hw block to user.
1432 * @hpriv: pointer to the private data of the fd
1433 * @vma: pointer to vm_area_struct of the process
1435 * Driver increments context reference for every HW block mapped in order
1436 * to prevent user from closing FD without unmapping first
1438 int hl_hw_block_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
1440 struct hl_vm_hw_block_list_node *lnode;
1441 struct hl_device *hdev = hpriv->hdev;
1442 struct hl_ctx *ctx = hpriv->ctx;
1443 u32 block_id, block_size;
1446 /* We use the page offset to hold the block id and thus we need to clear
1447 * it before doing the mmap itself
1449 block_id = vma->vm_pgoff;
1452 /* Driver only allows mapping of a complete HW block */
1453 block_size = vma->vm_end - vma->vm_start;
1455 if (!access_ok((void __user *) (uintptr_t) vma->vm_start, block_size)) {
1457 "user pointer is invalid - 0x%lx\n",
1463 lnode = kzalloc(sizeof(*lnode), GFP_KERNEL);
1467 vma->vm_ops = &hw_block_vm_ops;
1468 vma->vm_private_data = lnode;
1470 hl_ctx_get(hdev, ctx);
1472 rc = hdev->asic_funcs->hw_block_mmap(hdev, vma, block_id, block_size);
1480 lnode->vaddr = vma->vm_start;
1481 lnode->size = block_size;
1482 lnode->id = block_id;
1484 mutex_lock(&ctx->hw_block_list_lock);
1485 list_add_tail(&lnode->node, &ctx->hw_block_mem_list);
1486 mutex_unlock(&ctx->hw_block_list_lock);
1488 vma->vm_pgoff = block_id;
1493 static int set_dma_sg(struct scatterlist *sg, u64 bar_address, u64 chunk_size,
1494 struct device *dev, enum dma_data_direction dir)
1499 addr = dma_map_resource(dev, bar_address, chunk_size, dir,
1500 DMA_ATTR_SKIP_CPU_SYNC);
1501 rc = dma_mapping_error(dev, addr);
1505 sg_set_page(sg, NULL, chunk_size, 0);
1506 sg_dma_address(sg) = addr;
1507 sg_dma_len(sg) = chunk_size;
1512 static struct sg_table *alloc_sgt_from_device_pages(struct hl_device *hdev, u64 *pages, u64 npages,
1513 u64 page_size, struct device *dev,
1514 enum dma_data_direction dir)
1516 u64 chunk_size, bar_address, dma_max_seg_size;
1517 struct asic_fixed_properties *prop;
1518 int rc, i, j, nents, cur_page;
1519 struct scatterlist *sg;
1520 struct sg_table *sgt;
1522 prop = &hdev->asic_prop;
1524 dma_max_seg_size = dma_get_max_seg_size(dev);
1526 /* We would like to align the max segment size to PAGE_SIZE, so the
1527 * SGL will contain aligned addresses that can be easily mapped to
1530 dma_max_seg_size = ALIGN_DOWN(dma_max_seg_size, PAGE_SIZE);
1531 if (dma_max_seg_size < PAGE_SIZE) {
1532 dev_err_ratelimited(hdev->dev,
1533 "dma_max_seg_size %llu can't be smaller than PAGE_SIZE\n",
1535 return ERR_PTR(-EINVAL);
1538 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
1540 return ERR_PTR(-ENOMEM);
1542 /* If the size of each page is larger than the dma max segment size,
1543 * then we can't combine pages and the number of entries in the SGL
1545 * <number of pages> * <chunks of max segment size in each page>
1547 if (page_size > dma_max_seg_size)
1548 nents = npages * DIV_ROUND_UP_ULL(page_size, dma_max_seg_size);
1550 /* Get number of non-contiguous chunks */
1551 for (i = 1, nents = 1, chunk_size = page_size ; i < npages ; i++) {
1552 if (pages[i - 1] + page_size != pages[i] ||
1553 chunk_size + page_size > dma_max_seg_size) {
1555 chunk_size = page_size;
1559 chunk_size += page_size;
1562 rc = sg_alloc_table(sgt, nents, GFP_KERNEL | __GFP_ZERO);
1568 if (page_size > dma_max_seg_size) {
1569 u64 size_left, cur_device_address = 0;
1571 size_left = page_size;
1573 /* Need to split each page into the number of chunks of
1576 for_each_sgtable_dma_sg(sgt, sg, i) {
1577 if (size_left == page_size)
1578 cur_device_address =
1579 pages[cur_page] - prop->dram_base_address;
1581 cur_device_address += dma_max_seg_size;
1583 chunk_size = min(size_left, dma_max_seg_size);
1585 bar_address = hdev->dram_pci_bar_start + cur_device_address;
1587 rc = set_dma_sg(sg, bar_address, chunk_size, dev, dir);
1591 if (size_left > dma_max_seg_size) {
1592 size_left -= dma_max_seg_size;
1595 size_left = page_size;
1599 /* Merge pages and put them into the scatterlist */
1600 for_each_sgtable_dma_sg(sgt, sg, i) {
1601 chunk_size = page_size;
1602 for (j = cur_page + 1 ; j < npages ; j++) {
1603 if (pages[j - 1] + page_size != pages[j] ||
1604 chunk_size + page_size > dma_max_seg_size)
1607 chunk_size += page_size;
1610 bar_address = hdev->dram_pci_bar_start +
1611 (pages[cur_page] - prop->dram_base_address);
1613 rc = set_dma_sg(sg, bar_address, chunk_size, dev, dir);
1621 /* Because we are not going to include a CPU list we want to have some
1622 * chance that other users will detect this by setting the orig_nents
1623 * to 0 and using only nents (length of DMA list) when going over the
1626 sgt->orig_nents = 0;
1631 for_each_sgtable_dma_sg(sgt, sg, i) {
1632 if (!sg_dma_len(sg))
1635 dma_unmap_resource(dev, sg_dma_address(sg),
1636 sg_dma_len(sg), dir,
1637 DMA_ATTR_SKIP_CPU_SYNC);
1647 static int hl_dmabuf_attach(struct dma_buf *dmabuf,
1648 struct dma_buf_attachment *attachment)
1650 struct hl_dmabuf_priv *hl_dmabuf;
1651 struct hl_device *hdev;
1654 hl_dmabuf = dmabuf->priv;
1655 hdev = hl_dmabuf->ctx->hdev;
1657 rc = pci_p2pdma_distance_many(hdev->pdev, &attachment->dev, 1, true);
1660 attachment->peer2peer = false;
1664 static struct sg_table *hl_map_dmabuf(struct dma_buf_attachment *attachment,
1665 enum dma_data_direction dir)
1667 struct dma_buf *dma_buf = attachment->dmabuf;
1668 struct hl_vm_phys_pg_pack *phys_pg_pack;
1669 struct hl_dmabuf_priv *hl_dmabuf;
1670 struct hl_device *hdev;
1671 struct sg_table *sgt;
1673 hl_dmabuf = dma_buf->priv;
1674 hdev = hl_dmabuf->ctx->hdev;
1675 phys_pg_pack = hl_dmabuf->phys_pg_pack;
1677 if (!attachment->peer2peer) {
1678 dev_dbg(hdev->dev, "Failed to map dmabuf because p2p is disabled\n");
1679 return ERR_PTR(-EPERM);
1683 sgt = alloc_sgt_from_device_pages(hdev,
1684 phys_pg_pack->pages,
1685 phys_pg_pack->npages,
1686 phys_pg_pack->page_size,
1690 sgt = alloc_sgt_from_device_pages(hdev,
1691 &hl_dmabuf->device_address,
1693 hl_dmabuf->dmabuf->size,
1698 dev_err(hdev->dev, "failed (%ld) to initialize sgt for dmabuf\n", PTR_ERR(sgt));
1703 static void hl_unmap_dmabuf(struct dma_buf_attachment *attachment,
1704 struct sg_table *sgt,
1705 enum dma_data_direction dir)
1707 struct scatterlist *sg;
1710 /* The memory behind the dma-buf has *always* resided on the device itself, i.e. it lives
1711 * only in the 'device' domain (after all, it maps a PCI bar address which points to the
1714 * Therefore, it was never in the 'CPU' domain and hence, there is no need to perform
1715 * a sync of the memory to the CPU's cache, as it never resided inside that cache.
1717 for_each_sgtable_dma_sg(sgt, sg, i)
1718 dma_unmap_resource(attachment->dev, sg_dma_address(sg),
1719 sg_dma_len(sg), dir,
1720 DMA_ATTR_SKIP_CPU_SYNC);
1722 /* Need to restore orig_nents because sg_free_table use that field */
1723 sgt->orig_nents = sgt->nents;
1728 static void hl_release_dmabuf(struct dma_buf *dmabuf)
1730 struct hl_dmabuf_priv *hl_dmabuf = dmabuf->priv;
1731 struct hl_ctx *ctx = hl_dmabuf->ctx;
1732 struct hl_device *hdev = ctx->hdev;
1733 struct hl_vm *vm = &hdev->vm;
1735 if (hl_dmabuf->phys_pg_pack) {
1736 spin_lock(&vm->idr_lock);
1737 hl_dmabuf->phys_pg_pack->exporting_cnt--;
1738 spin_unlock(&vm->idr_lock);
1741 hl_ctx_put(hl_dmabuf->ctx);
1746 static const struct dma_buf_ops habanalabs_dmabuf_ops = {
1747 .attach = hl_dmabuf_attach,
1748 .map_dma_buf = hl_map_dmabuf,
1749 .unmap_dma_buf = hl_unmap_dmabuf,
1750 .release = hl_release_dmabuf,
1753 static int export_dmabuf_common(struct hl_ctx *ctx,
1754 struct hl_dmabuf_priv *hl_dmabuf,
1755 u64 total_size, int flags, int *dmabuf_fd)
1757 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1758 struct hl_device *hdev = ctx->hdev;
1761 exp_info.ops = &habanalabs_dmabuf_ops;
1762 exp_info.size = total_size;
1763 exp_info.flags = flags;
1764 exp_info.priv = hl_dmabuf;
1766 hl_dmabuf->dmabuf = dma_buf_export(&exp_info);
1767 if (IS_ERR(hl_dmabuf->dmabuf)) {
1768 dev_err(hdev->dev, "failed to export dma-buf\n");
1769 return PTR_ERR(hl_dmabuf->dmabuf);
1772 fd = dma_buf_fd(hl_dmabuf->dmabuf, flags);
1774 dev_err(hdev->dev, "failed to get a file descriptor for a dma-buf\n");
1776 goto err_dma_buf_put;
1779 hl_dmabuf->ctx = ctx;
1780 hl_ctx_get(hdev, hl_dmabuf->ctx);
1787 dma_buf_put(hl_dmabuf->dmabuf);
1792 * export_dmabuf_from_addr() - export a dma-buf object for the given memory
1794 * @ctx: pointer to the context structure.
1795 * @device_addr: device memory physical address.
1796 * @size: size of device memory.
1797 * @flags: DMA-BUF file/FD flags.
1798 * @dmabuf_fd: pointer to result FD that represents the dma-buf object.
1800 * Create and export a dma-buf object for an existing memory allocation inside
1801 * the device memory, and return a FD which is associated with the dma-buf
1804 * Return: 0 on success, non-zero for failure.
1806 static int export_dmabuf_from_addr(struct hl_ctx *ctx, u64 device_addr,
1807 u64 size, int flags, int *dmabuf_fd)
1809 struct hl_dmabuf_priv *hl_dmabuf;
1810 struct hl_device *hdev = ctx->hdev;
1811 struct asic_fixed_properties *prop;
1815 prop = &hdev->asic_prop;
1817 if (!IS_ALIGNED(device_addr, PAGE_SIZE)) {
1819 "exported device memory address 0x%llx should be aligned to 0x%lx\n",
1820 device_addr, PAGE_SIZE);
1824 if (size < PAGE_SIZE) {
1826 "exported device memory size %llu should be equal to or greater than %lu\n",
1831 if (device_addr < prop->dram_user_base_address ||
1832 device_addr + size > prop->dram_end_address ||
1833 device_addr + size < device_addr) {
1835 "DRAM memory range 0x%llx (+0x%llx) is outside of DRAM boundaries\n",
1840 bar_address = hdev->dram_pci_bar_start +
1841 (device_addr - prop->dram_base_address);
1843 if (bar_address + size >
1844 hdev->dram_pci_bar_start + prop->dram_pci_bar_size ||
1845 bar_address + size < bar_address) {
1847 "DRAM memory range 0x%llx (+0x%llx) is outside of PCI BAR boundaries\n",
1852 hl_dmabuf = kzalloc(sizeof(*hl_dmabuf), GFP_KERNEL);
1856 hl_dmabuf->device_address = device_addr;
1858 rc = export_dmabuf_common(ctx, hl_dmabuf, size, flags, dmabuf_fd);
1860 goto err_free_dmabuf_wrapper;
1864 err_free_dmabuf_wrapper:
1870 * export_dmabuf_from_handle() - export a dma-buf object for the given memory
1872 * @ctx: pointer to the context structure.
1873 * @handle: device memory allocation handle.
1874 * @flags: DMA-BUF file/FD flags.
1875 * @dmabuf_fd: pointer to result FD that represents the dma-buf object.
1877 * Create and export a dma-buf object for an existing memory allocation inside
1878 * the device memory, and return a FD which is associated with the dma-buf
1881 * Return: 0 on success, non-zero for failure.
1883 static int export_dmabuf_from_handle(struct hl_ctx *ctx, u64 handle, int flags,
1886 struct hl_vm_phys_pg_pack *phys_pg_pack;
1887 struct hl_dmabuf_priv *hl_dmabuf;
1888 struct hl_device *hdev = ctx->hdev;
1889 struct asic_fixed_properties *prop;
1890 struct hl_vm *vm = &hdev->vm;
1894 prop = &hdev->asic_prop;
1896 if (upper_32_bits(handle)) {
1897 dev_dbg(hdev->dev, "no match for handle 0x%llx\n", handle);
1901 spin_lock(&vm->idr_lock);
1903 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, (u32) handle);
1904 if (!phys_pg_pack) {
1905 spin_unlock(&vm->idr_lock);
1906 dev_dbg(hdev->dev, "no match for handle 0x%x\n", (u32) handle);
1910 /* increment now to avoid freeing device memory while exporting */
1911 phys_pg_pack->exporting_cnt++;
1913 spin_unlock(&vm->idr_lock);
1915 if (phys_pg_pack->vm_type != VM_TYPE_PHYS_PACK) {
1916 dev_dbg(hdev->dev, "handle 0x%llx does not represent DRAM memory\n", handle);
1918 goto err_dec_exporting_cnt;
1921 for (i = 0 ; i < phys_pg_pack->npages ; i++) {
1923 bar_address = hdev->dram_pci_bar_start +
1924 (phys_pg_pack->pages[i] -
1925 prop->dram_base_address);
1927 if (bar_address + phys_pg_pack->page_size >
1928 hdev->dram_pci_bar_start + prop->dram_pci_bar_size ||
1929 bar_address + phys_pg_pack->page_size < bar_address) {
1932 "DRAM memory range 0x%llx (+0x%x) is outside of PCI BAR boundaries\n",
1933 phys_pg_pack->pages[i],
1934 phys_pg_pack->page_size);
1937 goto err_dec_exporting_cnt;
1941 hl_dmabuf = kzalloc(sizeof(*hl_dmabuf), GFP_KERNEL);
1944 goto err_dec_exporting_cnt;
1947 hl_dmabuf->phys_pg_pack = phys_pg_pack;
1949 rc = export_dmabuf_common(ctx, hl_dmabuf, phys_pg_pack->total_size,
1952 goto err_free_dmabuf_wrapper;
1956 err_free_dmabuf_wrapper:
1959 err_dec_exporting_cnt:
1960 spin_lock(&vm->idr_lock);
1961 phys_pg_pack->exporting_cnt--;
1962 spin_unlock(&vm->idr_lock);
1967 static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args)
1969 struct hl_device *hdev = hpriv->hdev;
1970 struct hl_ctx *ctx = hpriv->ctx;
1971 u64 block_handle, device_addr = 0;
1972 u32 handle = 0, block_size;
1973 int rc, dmabuf_fd = -EBADF;
1975 switch (args->in.op) {
1976 case HL_MEM_OP_ALLOC:
1977 if (args->in.alloc.mem_size == 0) {
1979 "alloc size must be larger than 0\n");
1984 /* Force contiguous as there are no real MMU
1985 * translations to overcome physical memory gaps
1987 args->in.flags |= HL_MEM_CONTIGUOUS;
1988 rc = alloc_device_memory(ctx, &args->in, &handle);
1990 memset(args, 0, sizeof(*args));
1991 args->out.handle = (__u64) handle;
1994 case HL_MEM_OP_FREE:
1995 rc = free_device_memory(ctx, &args->in);
1999 if (args->in.flags & HL_MEM_USERPTR) {
2000 device_addr = args->in.map_host.host_virt_addr;
2003 rc = get_paddr_from_handle(ctx, &args->in,
2007 memset(args, 0, sizeof(*args));
2008 args->out.device_virt_addr = device_addr;
2011 case HL_MEM_OP_UNMAP:
2015 case HL_MEM_OP_MAP_BLOCK:
2016 rc = map_block(hdev, args->in.map_block.block_addr,
2017 &block_handle, &block_size);
2018 args->out.block_handle = block_handle;
2019 args->out.block_size = block_size;
2022 case HL_MEM_OP_EXPORT_DMABUF_FD:
2023 rc = export_dmabuf_from_addr(ctx,
2024 args->in.export_dmabuf_fd.handle,
2025 args->in.export_dmabuf_fd.mem_size,
2028 memset(args, 0, sizeof(*args));
2029 args->out.fd = dmabuf_fd;
2033 dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
2042 int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
2044 enum hl_device_status status;
2045 union hl_mem_args *args = data;
2046 struct hl_device *hdev = hpriv->hdev;
2047 struct hl_ctx *ctx = hpriv->ctx;
2048 u64 block_handle, device_addr = 0;
2049 u32 handle = 0, block_size;
2050 int rc, dmabuf_fd = -EBADF;
2052 if (!hl_device_operational(hdev, &status)) {
2053 dev_warn_ratelimited(hdev->dev,
2054 "Device is %s. Can't execute MEMORY IOCTL\n",
2055 hdev->status[status]);
2059 if (!hdev->mmu_enable)
2060 return mem_ioctl_no_mmu(hpriv, args);
2062 switch (args->in.op) {
2063 case HL_MEM_OP_ALLOC:
2064 if (args->in.alloc.mem_size == 0) {
2066 "alloc size must be larger than 0\n");
2071 /* If DRAM does not support virtual memory the driver won't
2072 * handle the allocation/freeing of that memory. However, for
2073 * system administration/monitoring purposes, the driver will
2074 * keep track of the amount of DRAM memory that is allocated
2075 * and freed by the user. Because this code totally relies on
2076 * the user's input, the driver can't ensure the validity
2077 * of this accounting.
2079 if (!hdev->asic_prop.dram_supports_virtual_memory) {
2080 atomic64_add(args->in.alloc.mem_size,
2081 &ctx->dram_phys_mem);
2082 atomic64_add(args->in.alloc.mem_size,
2083 &hdev->dram_used_mem);
2085 dev_dbg(hdev->dev, "DRAM alloc is not supported\n");
2088 memset(args, 0, sizeof(*args));
2089 args->out.handle = 0;
2093 rc = alloc_device_memory(ctx, &args->in, &handle);
2095 memset(args, 0, sizeof(*args));
2096 args->out.handle = (__u64) handle;
2099 case HL_MEM_OP_FREE:
2100 /* If DRAM does not support virtual memory the driver won't
2101 * handle the allocation/freeing of that memory. However, for
2102 * system administration/monitoring purposes, the driver will
2103 * keep track of the amount of DRAM memory that is allocated
2104 * and freed by the user. Because this code totally relies on
2105 * the user's input, the driver can't ensure the validity
2106 * of this accounting.
2108 if (!hdev->asic_prop.dram_supports_virtual_memory) {
2109 atomic64_sub(args->in.alloc.mem_size,
2110 &ctx->dram_phys_mem);
2111 atomic64_sub(args->in.alloc.mem_size,
2112 &hdev->dram_used_mem);
2114 dev_dbg(hdev->dev, "DRAM alloc is not supported\n");
2120 rc = free_device_memory(ctx, &args->in);
2124 rc = map_device_va(ctx, &args->in, &device_addr);
2126 memset(args, 0, sizeof(*args));
2127 args->out.device_virt_addr = device_addr;
2130 case HL_MEM_OP_UNMAP:
2131 rc = unmap_device_va(ctx, &args->in, false);
2134 case HL_MEM_OP_MAP_BLOCK:
2135 rc = map_block(hdev, args->in.map_block.block_addr,
2136 &block_handle, &block_size);
2137 args->out.block_handle = block_handle;
2138 args->out.block_size = block_size;
2141 case HL_MEM_OP_EXPORT_DMABUF_FD:
2142 if (hdev->asic_prop.dram_supports_virtual_memory)
2143 rc = export_dmabuf_from_handle(ctx,
2144 args->in.export_dmabuf_fd.handle,
2148 rc = export_dmabuf_from_addr(ctx,
2149 args->in.export_dmabuf_fd.handle,
2150 args->in.export_dmabuf_fd.mem_size,
2153 memset(args, 0, sizeof(*args));
2154 args->out.fd = dmabuf_fd;
2158 dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
2167 static int get_user_memory(struct hl_device *hdev, u64 addr, u64 size,
2168 u32 npages, u64 start, u32 offset,
2169 struct hl_userptr *userptr)
2173 if (!access_ok((void __user *) (uintptr_t) addr, size)) {
2174 dev_err(hdev->dev, "user pointer is invalid - 0x%llx\n", addr);
2178 userptr->pages = kvmalloc_array(npages, sizeof(*userptr->pages),
2180 if (!userptr->pages)
2183 rc = pin_user_pages_fast(start, npages,
2184 FOLL_FORCE | FOLL_WRITE | FOLL_LONGTERM,
2189 "Failed (%d) to pin host memory with user ptr 0x%llx, size 0x%llx, npages %d\n",
2190 rc, addr, size, npages);
2197 userptr->npages = npages;
2199 rc = sg_alloc_table_from_pages(userptr->sgt,
2201 npages, offset, size, GFP_KERNEL);
2203 dev_err(hdev->dev, "failed to create SG table from pages\n");
2210 unpin_user_pages(userptr->pages, npages);
2212 kvfree(userptr->pages);
2217 * hl_pin_host_memory() - pins a chunk of host memory.
2218 * @hdev: pointer to the habanalabs device structure.
2219 * @addr: the host virtual address of the memory area.
2220 * @size: the size of the memory area.
2221 * @userptr: pointer to hl_userptr structure.
2223 * This function does the following:
2224 * - Pins the physical pages.
2225 * - Create an SG list from those pages.
2227 int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
2228 struct hl_userptr *userptr)
2235 dev_err(hdev->dev, "size to pin is invalid - %llu\n", size);
2240 * If the combination of the address and size requested for this memory
2241 * region causes an integer overflow, return error.
2243 if (((addr + size) < addr) ||
2244 PAGE_ALIGN(addr + size) < (addr + size)) {
2246 "user pointer 0x%llx + %llu causes integer overflow\n",
2251 userptr->pid = current->pid;
2252 userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_KERNEL);
2256 start = addr & PAGE_MASK;
2257 offset = addr & ~PAGE_MASK;
2258 end = PAGE_ALIGN(addr + size);
2259 npages = (end - start) >> PAGE_SHIFT;
2261 userptr->size = size;
2262 userptr->addr = addr;
2263 userptr->dma_mapped = false;
2264 INIT_LIST_HEAD(&userptr->job_node);
2266 rc = get_user_memory(hdev, addr, size, npages, start, offset,
2270 "failed to get user memory for address 0x%llx\n",
2275 hl_debugfs_add_userptr(hdev, userptr);
2280 kfree(userptr->sgt);
2285 * hl_unpin_host_memory - unpins a chunk of host memory.
2286 * @hdev: pointer to the habanalabs device structure
2287 * @userptr: pointer to hl_userptr structure
2289 * This function does the following:
2290 * - Unpins the physical pages related to the host memory
2291 * - Free the SG list
2293 void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
2295 hl_debugfs_remove_userptr(hdev, userptr);
2297 if (userptr->dma_mapped)
2298 hdev->asic_funcs->hl_dma_unmap_sg(hdev, userptr->sgt->sgl,
2299 userptr->sgt->nents,
2302 unpin_user_pages_dirty_lock(userptr->pages, userptr->npages, true);
2303 kvfree(userptr->pages);
2305 list_del(&userptr->job_node);
2307 sg_free_table(userptr->sgt);
2308 kfree(userptr->sgt);
2312 * hl_userptr_delete_list() - clear userptr list.
2313 * @hdev: pointer to the habanalabs device structure.
2314 * @userptr_list: pointer to the list to clear.
2316 * This function does the following:
2317 * - Iterates over the list and unpins the host memory and frees the userptr
2320 void hl_userptr_delete_list(struct hl_device *hdev,
2321 struct list_head *userptr_list)
2323 struct hl_userptr *userptr, *tmp;
2325 list_for_each_entry_safe(userptr, tmp, userptr_list, job_node) {
2326 hl_unpin_host_memory(hdev, userptr);
2330 INIT_LIST_HEAD(userptr_list);
2334 * hl_userptr_is_pinned() - returns whether the given userptr is pinned.
2335 * @hdev: pointer to the habanalabs device structure.
2336 * @addr: user address to check.
2337 * @size: user block size to check.
2338 * @userptr_list: pointer to the list to clear.
2339 * @userptr: pointer to userptr to check.
2341 * This function does the following:
2342 * - Iterates over the list and checks if the given userptr is in it, means is
2343 * pinned. If so, returns true, otherwise returns false.
2345 bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr,
2346 u32 size, struct list_head *userptr_list,
2347 struct hl_userptr **userptr)
2349 list_for_each_entry((*userptr), userptr_list, job_node) {
2350 if ((addr == (*userptr)->addr) && (size == (*userptr)->size))
2358 * va_range_init() - initialize virtual addresses range.
2359 * @hdev: pointer to the habanalabs device structure.
2360 * @va_ranges: pointer to va_ranges array.
2361 * @start: range start address.
2362 * @end: range end address.
2363 * @page_size: page size for this va_range.
2365 * This function does the following:
2366 * - Initializes the virtual addresses list of the given range with the given
2369 static int va_range_init(struct hl_device *hdev, struct hl_va_range *va_range,
2370 u64 start, u64 end, u32 page_size)
2374 INIT_LIST_HEAD(&va_range->list);
2377 * PAGE_SIZE alignment
2378 * it is the callers responsibility to align the addresses if the
2379 * page size is not a power of 2
2382 if (is_power_of_2(page_size)) {
2383 if (start & (PAGE_SIZE - 1)) {
2389 * The end of the range is inclusive, hence we need to align it
2390 * to the end of the last full page in the range. For example if
2391 * end = 0x3ff5 with page size 0x1000, we need to align it to
2392 * 0x2fff. The remainig 0xff5 bytes do not form a full page.
2394 if ((end + 1) & (PAGE_SIZE - 1))
2395 end = ((end + 1) & PAGE_MASK) - 1;
2399 dev_err(hdev->dev, "too small vm range for va list\n");
2403 rc = add_va_block(hdev, va_range, start, end);
2406 dev_err(hdev->dev, "Failed to init host va list\n");
2410 va_range->start_addr = start;
2411 va_range->end_addr = end;
2412 va_range->page_size = page_size;
2418 * va_range_fini() - clear a virtual addresses range.
2419 * @hdev: pointer to the habanalabs structure.
2420 * @va_range: pointer to virtual addresses range.
2422 * This function does the following:
2423 * - Frees the virtual addresses block list and its lock.
2425 static void va_range_fini(struct hl_device *hdev, struct hl_va_range *va_range)
2427 mutex_lock(&va_range->lock);
2428 clear_va_list_locked(hdev, &va_range->list);
2429 mutex_unlock(&va_range->lock);
2431 mutex_destroy(&va_range->lock);
2436 * vm_ctx_init_with_ranges() - initialize virtual memory for context.
2437 * @ctx: pointer to the habanalabs context structure.
2438 * @host_range_start: host virtual addresses range start.
2439 * @host_range_end: host virtual addresses range end.
2440 * @host_page_size: host page size.
2441 * @host_huge_range_start: host virtual addresses range start for memory
2442 * allocated with huge pages.
2443 * @host_huge_range_end: host virtual addresses range end for memory allocated
2445 * @host_huge_page_size: host huge page size.
2446 * @dram_range_start: dram virtual addresses range start.
2447 * @dram_range_end: dram virtual addresses range end.
2448 * @dram_page_size: dram page size.
2450 * This function initializes the following:
2451 * - MMU for context.
2452 * - Virtual address to area descriptor hashtable.
2453 * - Virtual block list of available virtual memory.
2455 static int vm_ctx_init_with_ranges(struct hl_ctx *ctx,
2456 u64 host_range_start,
2459 u64 host_huge_range_start,
2460 u64 host_huge_range_end,
2461 u32 host_huge_page_size,
2462 u64 dram_range_start,
2466 struct hl_device *hdev = ctx->hdev;
2469 for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX ; i++) {
2471 kzalloc(sizeof(struct hl_va_range), GFP_KERNEL);
2472 if (!ctx->va_range[i]) {
2478 rc = hl_mmu_ctx_init(ctx);
2480 dev_err(hdev->dev, "failed to init context %d\n", ctx->asid);
2484 mutex_init(&ctx->mem_hash_lock);
2485 hash_init(ctx->mem_hash);
2487 mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
2489 rc = va_range_init(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST],
2490 host_range_start, host_range_end, host_page_size);
2492 dev_err(hdev->dev, "failed to init host vm range\n");
2496 if (hdev->pmmu_huge_range) {
2497 mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
2499 rc = va_range_init(hdev,
2500 ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE],
2501 host_huge_range_start, host_huge_range_end,
2502 host_huge_page_size);
2505 "failed to init host huge vm range\n");
2506 goto clear_host_va_range;
2509 kfree(ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]);
2510 ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE] =
2511 ctx->va_range[HL_VA_RANGE_TYPE_HOST];
2514 mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_DRAM]->lock);
2516 rc = va_range_init(hdev, ctx->va_range[HL_VA_RANGE_TYPE_DRAM],
2517 dram_range_start, dram_range_end, dram_page_size);
2519 dev_err(hdev->dev, "failed to init dram vm range\n");
2520 goto clear_host_huge_va_range;
2523 hl_debugfs_add_ctx_mem_hash(hdev, ctx);
2527 clear_host_huge_va_range:
2528 mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_DRAM]->lock);
2530 if (hdev->pmmu_huge_range) {
2531 mutex_lock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
2532 clear_va_list_locked(hdev,
2533 &ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->list);
2534 mutex_unlock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
2536 clear_host_va_range:
2537 if (hdev->pmmu_huge_range)
2538 mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
2539 mutex_lock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
2540 clear_va_list_locked(hdev, &ctx->va_range[HL_VA_RANGE_TYPE_HOST]->list);
2541 mutex_unlock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
2543 mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
2544 mutex_destroy(&ctx->mem_hash_lock);
2545 hl_mmu_ctx_fini(ctx);
2547 for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX ; i++)
2548 kfree(ctx->va_range[i]);
2553 int hl_vm_ctx_init(struct hl_ctx *ctx)
2555 struct asic_fixed_properties *prop = &ctx->hdev->asic_prop;
2556 u64 host_range_start, host_range_end, host_huge_range_start,
2557 host_huge_range_end, dram_range_start, dram_range_end;
2558 u32 host_page_size, host_huge_page_size, dram_page_size;
2560 atomic64_set(&ctx->dram_phys_mem, 0);
2563 * - If MMU is enabled, init the ranges as usual.
2564 * - If MMU is disabled, in case of host mapping, the returned address
2566 * In case of DRAM mapping, the returned address is the physical
2567 * address of the memory related to the given handle.
2569 if (!ctx->hdev->mmu_enable)
2572 dram_range_start = prop->dmmu.start_addr;
2573 dram_range_end = prop->dmmu.end_addr - 1;
2574 dram_page_size = prop->dram_page_size ?
2575 prop->dram_page_size : prop->dmmu.page_size;
2576 host_range_start = prop->pmmu.start_addr;
2577 host_range_end = prop->pmmu.end_addr - 1;
2578 host_page_size = prop->pmmu.page_size;
2579 host_huge_range_start = prop->pmmu_huge.start_addr;
2580 host_huge_range_end = prop->pmmu_huge.end_addr - 1;
2581 host_huge_page_size = prop->pmmu_huge.page_size;
2583 return vm_ctx_init_with_ranges(ctx, host_range_start, host_range_end,
2584 host_page_size, host_huge_range_start,
2585 host_huge_range_end, host_huge_page_size,
2586 dram_range_start, dram_range_end, dram_page_size);
2590 * hl_vm_ctx_fini() - virtual memory teardown of context.
2591 * @ctx: pointer to the habanalabs context structure.
2593 * This function perform teardown the following:
2594 * - Virtual block list of available virtual memory.
2595 * - Virtual address to area descriptor hashtable.
2596 * - MMU for context.
2598 * In addition this function does the following:
2599 * - Unmaps the existing hashtable nodes if the hashtable is not empty. The
2600 * hashtable should be empty as no valid mappings should exist at this
2602 * - Frees any existing physical page list from the idr which relates to the
2603 * current context asid.
2604 * - This function checks the virtual block list for correctness. At this point
2605 * the list should contain one element which describes the whole virtual
2606 * memory range of the context. Otherwise, a warning is printed.
2608 void hl_vm_ctx_fini(struct hl_ctx *ctx)
2610 struct hl_vm_phys_pg_pack *phys_pg_list;
2611 struct hl_device *hdev = ctx->hdev;
2612 struct hl_vm_hash_node *hnode;
2613 struct hl_vm *vm = &hdev->vm;
2614 struct hlist_node *tmp_node;
2615 struct list_head free_list;
2616 struct hl_mem_in args;
2619 if (!hdev->mmu_enable)
2622 hl_debugfs_remove_ctx_mem_hash(hdev, ctx);
2625 * Clearly something went wrong on hard reset so no point in printing
2626 * another side effect error
2628 if (!hdev->reset_info.hard_reset_pending && !hash_empty(ctx->mem_hash))
2630 "user released device without removing its memory mappings\n");
2632 hash_for_each_safe(ctx->mem_hash, i, tmp_node, hnode, node) {
2634 "hl_mem_hash_node of vaddr 0x%llx of asid %d is still alive\n",
2635 hnode->vaddr, ctx->asid);
2636 args.unmap.device_virt_addr = hnode->vaddr;
2637 unmap_device_va(ctx, &args, true);
2640 mutex_lock(&ctx->mmu_lock);
2642 /* invalidate the cache once after the unmapping loop */
2643 hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR);
2644 hl_mmu_invalidate_cache(hdev, true, MMU_OP_PHYS_PACK);
2646 mutex_unlock(&ctx->mmu_lock);
2648 INIT_LIST_HEAD(&free_list);
2650 spin_lock(&vm->idr_lock);
2651 idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_list, i)
2652 if (phys_pg_list->asid == ctx->asid) {
2654 "page list 0x%px of asid %d is still alive\n",
2655 phys_pg_list, ctx->asid);
2657 atomic64_sub(phys_pg_list->total_size, &hdev->dram_used_mem);
2658 idr_remove(&vm->phys_pg_pack_handles, i);
2659 list_add(&phys_pg_list->node, &free_list);
2661 spin_unlock(&vm->idr_lock);
2663 list_for_each_entry(phys_pg_list, &free_list, node)
2664 free_phys_pg_pack(hdev, phys_pg_list);
2666 va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_DRAM]);
2667 va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST]);
2669 if (hdev->pmmu_huge_range)
2670 va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]);
2672 mutex_destroy(&ctx->mem_hash_lock);
2673 hl_mmu_ctx_fini(ctx);
2675 /* In this case we need to clear the global accounting of DRAM usage
2676 * because the user notifies us on allocations. If the user is no more,
2677 * all DRAM is available
2679 if (ctx->asid != HL_KERNEL_ASID_ID &&
2680 !hdev->asic_prop.dram_supports_virtual_memory)
2681 atomic64_set(&hdev->dram_used_mem, 0);
2685 * hl_vm_init() - initialize virtual memory module.
2686 * @hdev: pointer to the habanalabs device structure.
2688 * This function initializes the following:
2690 * - DRAM physical pages pool of 2MB.
2691 * - Idr for device memory allocation handles.
2693 int hl_vm_init(struct hl_device *hdev)
2695 struct asic_fixed_properties *prop = &hdev->asic_prop;
2696 struct hl_vm *vm = &hdev->vm;
2699 if (is_power_of_2(prop->dram_page_size))
2701 gen_pool_create(__ffs(prop->dram_page_size), -1);
2704 gen_pool_create(__ffs(DRAM_POOL_PAGE_SIZE), -1);
2706 if (!vm->dram_pg_pool) {
2707 dev_err(hdev->dev, "Failed to create dram page pool\n");
2711 kref_init(&vm->dram_pg_pool_refcount);
2713 rc = gen_pool_add(vm->dram_pg_pool, prop->dram_user_base_address,
2714 prop->dram_end_address - prop->dram_user_base_address,
2719 "Failed to add memory to dram page pool %d\n", rc);
2723 spin_lock_init(&vm->idr_lock);
2724 idr_init(&vm->phys_pg_pack_handles);
2726 atomic64_set(&hdev->dram_used_mem, 0);
2728 vm->init_done = true;
2733 gen_pool_destroy(vm->dram_pg_pool);
2739 * hl_vm_fini() - virtual memory module teardown.
2740 * @hdev: pointer to the habanalabs device structure.
2742 * This function perform teardown to the following:
2743 * - Idr for device memory allocation handles.
2744 * - DRAM physical pages pool of 2MB.
2747 void hl_vm_fini(struct hl_device *hdev)
2749 struct hl_vm *vm = &hdev->vm;
2755 * At this point all the contexts should be freed and hence no DRAM
2756 * memory should be in use. Hence the DRAM pool should be freed here.
2758 if (kref_put(&vm->dram_pg_pool_refcount, dram_pg_pool_do_release) != 1)
2759 dev_warn(hdev->dev, "dram_pg_pool was not destroyed on %s\n",
2762 vm->init_done = false;
2766 * hl_hw_block_mem_init() - HW block memory initialization.
2767 * @ctx: pointer to the habanalabs context structure.
2769 * This function initializes the HW block virtual mapped addresses list and
2772 void hl_hw_block_mem_init(struct hl_ctx *ctx)
2774 mutex_init(&ctx->hw_block_list_lock);
2775 INIT_LIST_HEAD(&ctx->hw_block_mem_list);
2779 * hl_hw_block_mem_fini() - HW block memory teardown.
2780 * @ctx: pointer to the habanalabs context structure.
2782 * This function clears the HW block virtual mapped addresses list and destroys
2785 void hl_hw_block_mem_fini(struct hl_ctx *ctx)
2787 struct hl_vm_hw_block_list_node *lnode, *tmp;
2789 if (!list_empty(&ctx->hw_block_mem_list))
2790 dev_crit(ctx->hdev->dev, "HW block mem list isn't empty\n");
2792 list_for_each_entry_safe(lnode, tmp, &ctx->hw_block_mem_list, node) {
2793 list_del(&lnode->node);
2797 mutex_destroy(&ctx->hw_block_list_lock);