Merge tag 'dma-mapping-5.14' of git://git.infradead.org/users/hch/dma-mapping
[linux-2.6-microblaze.git] / drivers / misc / habanalabs / common / memory.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4  * Copyright 2016-2019 HabanaLabs, Ltd.
5  * All Rights Reserved.
6  */
7
8 #include <uapi/misc/habanalabs.h>
9 #include "habanalabs.h"
10 #include "../include/hw_ip/mmu/mmu_general.h"
11
12 #include <linux/uaccess.h>
13 #include <linux/slab.h>
14
15 #define HL_MMU_DEBUG    0
16
17 /* use small pages for supporting non-pow2 (32M/40M/48M) DRAM phys page sizes */
18 #define DRAM_POOL_PAGE_SIZE SZ_8M
19
20 /*
21  * The va ranges in context object contain a list with the available chunks of
22  * device virtual memory.
23  * There is one range for host allocations and one for DRAM allocations.
24  *
25  * On initialization each range contains one chunk of all of its available
26  * virtual range which is a half of the total device virtual range.
27  *
28  * On each mapping of physical pages, a suitable virtual range chunk (with a
29  * minimum size) is selected from the list. If the chunk size equals the
30  * requested size, the chunk is returned. Otherwise, the chunk is split into
31  * two chunks - one to return as result and a remainder to stay in the list.
32  *
33  * On each Unmapping of a virtual address, the relevant virtual chunk is
34  * returned to the list. The chunk is added to the list and if its edges match
35  * the edges of the adjacent chunks (means a contiguous chunk can be created),
36  * the chunks are merged.
37  *
38  * On finish, the list is checked to have only one chunk of all the relevant
39  * virtual range (which is a half of the device total virtual range).
40  * If not (means not all mappings were unmapped), a warning is printed.
41  */
42
43 /*
44  * alloc_device_memory() - allocate device memory.
45  * @ctx: pointer to the context structure.
46  * @args: host parameters containing the requested size.
47  * @ret_handle: result handle.
48  *
49  * This function does the following:
50  * - Allocate the requested size rounded up to 'dram_page_size' pages.
51  * - Return unique handle for later map/unmap/free.
52  */
53 static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
54                                 u32 *ret_handle)
55 {
56         struct hl_device *hdev = ctx->hdev;
57         struct hl_vm *vm = &hdev->vm;
58         struct hl_vm_phys_pg_pack *phys_pg_pack;
59         u64 paddr = 0, total_size, num_pgs, i;
60         u32 num_curr_pgs, page_size;
61         int handle, rc;
62         bool contiguous;
63
64         num_curr_pgs = 0;
65         page_size = hdev->asic_prop.dram_page_size;
66         num_pgs = DIV_ROUND_UP_ULL(args->alloc.mem_size, page_size);
67         total_size = num_pgs * page_size;
68
69         if (!total_size) {
70                 dev_err(hdev->dev, "Cannot allocate 0 bytes\n");
71                 return -EINVAL;
72         }
73
74         contiguous = args->flags & HL_MEM_CONTIGUOUS;
75
76         if (contiguous) {
77                 paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size);
78                 if (!paddr) {
79                         dev_err(hdev->dev,
80                                 "failed to allocate %llu contiguous pages with total size of %llu\n",
81                                 num_pgs, total_size);
82                         return -ENOMEM;
83                 }
84         }
85
86         phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
87         if (!phys_pg_pack) {
88                 rc = -ENOMEM;
89                 goto pages_pack_err;
90         }
91
92         phys_pg_pack->vm_type = VM_TYPE_PHYS_PACK;
93         phys_pg_pack->asid = ctx->asid;
94         phys_pg_pack->npages = num_pgs;
95         phys_pg_pack->page_size = page_size;
96         phys_pg_pack->total_size = total_size;
97         phys_pg_pack->flags = args->flags;
98         phys_pg_pack->contiguous = contiguous;
99
100         phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL);
101         if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) {
102                 rc = -ENOMEM;
103                 goto pages_arr_err;
104         }
105
106         if (phys_pg_pack->contiguous) {
107                 for (i = 0 ; i < num_pgs ; i++)
108                         phys_pg_pack->pages[i] = paddr + i * page_size;
109         } else {
110                 for (i = 0 ; i < num_pgs ; i++) {
111                         phys_pg_pack->pages[i] = (u64) gen_pool_alloc(
112                                                         vm->dram_pg_pool,
113                                                         page_size);
114                         if (!phys_pg_pack->pages[i]) {
115                                 dev_err(hdev->dev,
116                                         "Failed to allocate device memory (out of memory)\n");
117                                 rc = -ENOMEM;
118                                 goto page_err;
119                         }
120
121                         num_curr_pgs++;
122                 }
123         }
124
125         spin_lock(&vm->idr_lock);
126         handle = idr_alloc(&vm->phys_pg_pack_handles, phys_pg_pack, 1, 0,
127                                 GFP_KERNEL);
128         spin_unlock(&vm->idr_lock);
129
130         if (handle < 0) {
131                 dev_err(hdev->dev, "Failed to get handle for page\n");
132                 rc = -EFAULT;
133                 goto idr_err;
134         }
135
136         for (i = 0 ; i < num_pgs ; i++)
137                 kref_get(&vm->dram_pg_pool_refcount);
138
139         phys_pg_pack->handle = handle;
140
141         atomic64_add(phys_pg_pack->total_size, &ctx->dram_phys_mem);
142         atomic64_add(phys_pg_pack->total_size, &hdev->dram_used_mem);
143
144         *ret_handle = handle;
145
146         return 0;
147
148 idr_err:
149 page_err:
150         if (!phys_pg_pack->contiguous)
151                 for (i = 0 ; i < num_curr_pgs ; i++)
152                         gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i],
153                                         page_size);
154
155         kvfree(phys_pg_pack->pages);
156 pages_arr_err:
157         kfree(phys_pg_pack);
158 pages_pack_err:
159         if (contiguous)
160                 gen_pool_free(vm->dram_pg_pool, paddr, total_size);
161
162         return rc;
163 }
164
165 /**
166  * dma_map_host_va() - DMA mapping of the given host virtual address.
167  * @hdev: habanalabs device structure.
168  * @addr: the host virtual address of the memory area.
169  * @size: the size of the memory area.
170  * @p_userptr: pointer to result userptr structure.
171  *
172  * This function does the following:
173  * - Allocate userptr structure.
174  * - Pin the given host memory using the userptr structure.
175  * - Perform DMA mapping to have the DMA addresses of the pages.
176  */
177 static int dma_map_host_va(struct hl_device *hdev, u64 addr, u64 size,
178                                 struct hl_userptr **p_userptr)
179 {
180         struct hl_userptr *userptr;
181         int rc;
182
183         userptr = kzalloc(sizeof(*userptr), GFP_KERNEL);
184         if (!userptr) {
185                 rc = -ENOMEM;
186                 goto userptr_err;
187         }
188
189         rc = hl_pin_host_memory(hdev, addr, size, userptr);
190         if (rc) {
191                 dev_err(hdev->dev, "Failed to pin host memory\n");
192                 goto pin_err;
193         }
194
195         rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
196                                         userptr->sgt->nents, DMA_BIDIRECTIONAL);
197         if (rc) {
198                 dev_err(hdev->dev, "failed to map sgt with DMA region\n");
199                 goto dma_map_err;
200         }
201
202         userptr->dma_mapped = true;
203         userptr->dir = DMA_BIDIRECTIONAL;
204         userptr->vm_type = VM_TYPE_USERPTR;
205
206         *p_userptr = userptr;
207
208         return 0;
209
210 dma_map_err:
211         hl_unpin_host_memory(hdev, userptr);
212 pin_err:
213         kfree(userptr);
214 userptr_err:
215
216         return rc;
217 }
218
219 /**
220  * dma_unmap_host_va() - DMA unmapping of the given host virtual address.
221  * @hdev: habanalabs device structure.
222  * @userptr: userptr to free.
223  *
224  * This function does the following:
225  * - Unpins the physical pages.
226  * - Frees the userptr structure.
227  */
228 static void dma_unmap_host_va(struct hl_device *hdev,
229                                 struct hl_userptr *userptr)
230 {
231         hl_unpin_host_memory(hdev, userptr);
232         kfree(userptr);
233 }
234
235 /**
236  * dram_pg_pool_do_release() - free DRAM pages pool
237  * @ref: pointer to reference object.
238  *
239  * This function does the following:
240  * - Frees the idr structure of physical pages handles.
241  * - Frees the generic pool of DRAM physical pages.
242  */
243 static void dram_pg_pool_do_release(struct kref *ref)
244 {
245         struct hl_vm *vm = container_of(ref, struct hl_vm,
246                         dram_pg_pool_refcount);
247
248         /*
249          * free the idr here as only here we know for sure that there are no
250          * allocated physical pages and hence there are no handles in use
251          */
252         idr_destroy(&vm->phys_pg_pack_handles);
253         gen_pool_destroy(vm->dram_pg_pool);
254 }
255
256 /**
257  * free_phys_pg_pack() - free physical page pack.
258  * @hdev: habanalabs device structure.
259  * @phys_pg_pack: physical page pack to free.
260  *
261  * This function does the following:
262  * - For DRAM memory only
263  *   - iterate over the pack, scrub and free each physical block structure by
264  *     returning it to the general pool.
265  *     In case of error during scrubbing, initiate hard reset.
266  *     Once hard reset is triggered, scrubbing is bypassed while freeing the
267  *     memory continues.
268  * - Free the hl_vm_phys_pg_pack structure.
269  */
270 static int free_phys_pg_pack(struct hl_device *hdev,
271                                 struct hl_vm_phys_pg_pack *phys_pg_pack)
272 {
273         struct hl_vm *vm = &hdev->vm;
274         u64 i;
275         int rc = 0;
276
277         if (phys_pg_pack->created_from_userptr)
278                 goto end;
279
280         if (phys_pg_pack->contiguous) {
281                 if (hdev->memory_scrub && !hdev->disabled) {
282                         rc = hdev->asic_funcs->scrub_device_mem(hdev,
283                                         phys_pg_pack->pages[0],
284                                         phys_pg_pack->total_size);
285                         if (rc)
286                                 dev_err(hdev->dev,
287                                         "Failed to scrub contiguous device memory\n");
288                 }
289
290                 gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[0],
291                         phys_pg_pack->total_size);
292
293                 for (i = 0; i < phys_pg_pack->npages ; i++)
294                         kref_put(&vm->dram_pg_pool_refcount,
295                                 dram_pg_pool_do_release);
296         } else {
297                 for (i = 0 ; i < phys_pg_pack->npages ; i++) {
298                         if (hdev->memory_scrub && !hdev->disabled && rc == 0) {
299                                 rc = hdev->asic_funcs->scrub_device_mem(
300                                                 hdev,
301                                                 phys_pg_pack->pages[i],
302                                                 phys_pg_pack->page_size);
303                                 if (rc)
304                                         dev_err(hdev->dev,
305                                                 "Failed to scrub device memory\n");
306                         }
307                         gen_pool_free(vm->dram_pg_pool,
308                                 phys_pg_pack->pages[i],
309                                 phys_pg_pack->page_size);
310                         kref_put(&vm->dram_pg_pool_refcount,
311                                 dram_pg_pool_do_release);
312                 }
313         }
314
315         if (rc && !hdev->disabled)
316                 hl_device_reset(hdev, HL_RESET_HARD);
317
318 end:
319         kvfree(phys_pg_pack->pages);
320         kfree(phys_pg_pack);
321
322         return rc;
323 }
324
325 /**
326  * free_device_memory() - free device memory.
327  * @ctx: pointer to the context structure.
328  * @args: host parameters containing the requested size.
329  *
330  * This function does the following:
331  * - Free the device memory related to the given handle.
332  */
333 static int free_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args)
334 {
335         struct hl_device *hdev = ctx->hdev;
336         struct hl_vm *vm = &hdev->vm;
337         struct hl_vm_phys_pg_pack *phys_pg_pack;
338         u32 handle = args->free.handle;
339
340         spin_lock(&vm->idr_lock);
341         phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
342         if (phys_pg_pack) {
343                 if (atomic_read(&phys_pg_pack->mapping_cnt) > 0) {
344                         dev_err(hdev->dev, "handle %u is mapped, cannot free\n",
345                                 handle);
346                         spin_unlock(&vm->idr_lock);
347                         return -EINVAL;
348                 }
349
350                 /*
351                  * must remove from idr before the freeing of the physical
352                  * pages as the refcount of the pool is also the trigger of the
353                  * idr destroy
354                  */
355                 idr_remove(&vm->phys_pg_pack_handles, handle);
356                 spin_unlock(&vm->idr_lock);
357
358                 atomic64_sub(phys_pg_pack->total_size, &ctx->dram_phys_mem);
359                 atomic64_sub(phys_pg_pack->total_size, &hdev->dram_used_mem);
360
361                 return free_phys_pg_pack(hdev, phys_pg_pack);
362         } else {
363                 spin_unlock(&vm->idr_lock);
364                 dev_err(hdev->dev,
365                         "free device memory failed, no match for handle %u\n",
366                         handle);
367                 return -EINVAL;
368         }
369
370         return 0;
371 }
372
373 /**
374  * clear_va_list_locked() - free virtual addresses list.
375  * @hdev: habanalabs device structure.
376  * @va_list: list of virtual addresses to free.
377  *
378  * This function does the following:
379  * - Iterate over the list and free each virtual addresses block.
380  *
381  * This function should be called only when va_list lock is taken.
382  */
383 static void clear_va_list_locked(struct hl_device *hdev,
384                 struct list_head *va_list)
385 {
386         struct hl_vm_va_block *va_block, *tmp;
387
388         list_for_each_entry_safe(va_block, tmp, va_list, node) {
389                 list_del(&va_block->node);
390                 kfree(va_block);
391         }
392 }
393
394 /**
395  * print_va_list_locked() - print virtual addresses list.
396  * @hdev: habanalabs device structure.
397  * @va_list: list of virtual addresses to print.
398  *
399  * This function does the following:
400  * - Iterate over the list and print each virtual addresses block.
401  *
402  * This function should be called only when va_list lock is taken.
403  */
404 static void print_va_list_locked(struct hl_device *hdev,
405                 struct list_head *va_list)
406 {
407 #if HL_MMU_DEBUG
408         struct hl_vm_va_block *va_block;
409
410         dev_dbg(hdev->dev, "print va list:\n");
411
412         list_for_each_entry(va_block, va_list, node)
413                 dev_dbg(hdev->dev,
414                         "va block, start: 0x%llx, end: 0x%llx, size: %llu\n",
415                         va_block->start, va_block->end, va_block->size);
416 #endif
417 }
418
419 /**
420  * merge_va_blocks_locked() - merge a virtual block if possible.
421  * @hdev: pointer to the habanalabs device structure.
422  * @va_list: pointer to the virtual addresses block list.
423  * @va_block: virtual block to merge with adjacent blocks.
424  *
425  * This function does the following:
426  * - Merge the given blocks with the adjacent blocks if their virtual ranges
427  *   create a contiguous virtual range.
428  *
429  * This Function should be called only when va_list lock is taken.
430  */
431 static void merge_va_blocks_locked(struct hl_device *hdev,
432                 struct list_head *va_list, struct hl_vm_va_block *va_block)
433 {
434         struct hl_vm_va_block *prev, *next;
435
436         prev = list_prev_entry(va_block, node);
437         if (&prev->node != va_list && prev->end + 1 == va_block->start) {
438                 prev->end = va_block->end;
439                 prev->size = prev->end - prev->start;
440                 list_del(&va_block->node);
441                 kfree(va_block);
442                 va_block = prev;
443         }
444
445         next = list_next_entry(va_block, node);
446         if (&next->node != va_list && va_block->end + 1 == next->start) {
447                 next->start = va_block->start;
448                 next->size = next->end - next->start;
449                 list_del(&va_block->node);
450                 kfree(va_block);
451         }
452 }
453
454 /**
455  * add_va_block_locked() - add a virtual block to the virtual addresses list.
456  * @hdev: pointer to the habanalabs device structure.
457  * @va_list: pointer to the virtual addresses block list.
458  * @start: start virtual address.
459  * @end: end virtual address.
460  *
461  * This function does the following:
462  * - Add the given block to the virtual blocks list and merge with other blocks
463  *   if a contiguous virtual block can be created.
464  *
465  * This Function should be called only when va_list lock is taken.
466  */
467 static int add_va_block_locked(struct hl_device *hdev,
468                 struct list_head *va_list, u64 start, u64 end)
469 {
470         struct hl_vm_va_block *va_block, *res = NULL;
471         u64 size = end - start;
472
473         print_va_list_locked(hdev, va_list);
474
475         list_for_each_entry(va_block, va_list, node) {
476                 /* TODO: remove upon matureness */
477                 if (hl_mem_area_crosses_range(start, size, va_block->start,
478                                 va_block->end)) {
479                         dev_err(hdev->dev,
480                                 "block crossing ranges at start 0x%llx, end 0x%llx\n",
481                                 va_block->start, va_block->end);
482                         return -EINVAL;
483                 }
484
485                 if (va_block->end < start)
486                         res = va_block;
487         }
488
489         va_block = kmalloc(sizeof(*va_block), GFP_KERNEL);
490         if (!va_block)
491                 return -ENOMEM;
492
493         va_block->start = start;
494         va_block->end = end;
495         va_block->size = size;
496
497         if (!res)
498                 list_add(&va_block->node, va_list);
499         else
500                 list_add(&va_block->node, &res->node);
501
502         merge_va_blocks_locked(hdev, va_list, va_block);
503
504         print_va_list_locked(hdev, va_list);
505
506         return 0;
507 }
508
509 /**
510  * add_va_block() - wrapper for add_va_block_locked.
511  * @hdev: pointer to the habanalabs device structure.
512  * @va_list: pointer to the virtual addresses block list.
513  * @start: start virtual address.
514  * @end: end virtual address.
515  *
516  * This function does the following:
517  * - Takes the list lock and calls add_va_block_locked.
518  */
519 static inline int add_va_block(struct hl_device *hdev,
520                 struct hl_va_range *va_range, u64 start, u64 end)
521 {
522         int rc;
523
524         mutex_lock(&va_range->lock);
525         rc = add_va_block_locked(hdev, &va_range->list, start, end);
526         mutex_unlock(&va_range->lock);
527
528         return rc;
529 }
530
531 /**
532  * get_va_block() - get a virtual block for the given size and alignment.
533  *
534  * @hdev: pointer to the habanalabs device structure.
535  * @va_range: pointer to the virtual addresses range.
536  * @size: requested block size.
537  * @hint_addr: hint for requested address by the user.
538  * @va_block_align: required alignment of the virtual block start address.
539  *
540  * This function does the following:
541  * - Iterate on the virtual block list to find a suitable virtual block for the
542  *   given size, hint address and alignment.
543  * - Reserve the requested block and update the list.
544  * - Return the start address of the virtual block.
545  */
546 static u64 get_va_block(struct hl_device *hdev,
547                                 struct hl_va_range *va_range,
548                                 u64 size, u64 hint_addr, u32 va_block_align)
549 {
550         struct hl_vm_va_block *va_block, *new_va_block = NULL;
551         u64 tmp_hint_addr, valid_start, valid_size, prev_start, prev_end,
552                 align_mask, reserved_valid_start = 0, reserved_valid_size = 0;
553         bool add_prev = false;
554         bool is_align_pow_2  = is_power_of_2(va_range->page_size);
555
556         if (is_align_pow_2)
557                 align_mask = ~((u64)va_block_align - 1);
558         else
559                 /*
560                  * with non-power-of-2 range we work only with page granularity
561                  * and the start address is page aligned,
562                  * so no need for alignment checking.
563                  */
564                 size = DIV_ROUND_UP_ULL(size, va_range->page_size) *
565                                                         va_range->page_size;
566
567         tmp_hint_addr = hint_addr;
568
569         /* Check if we need to ignore hint address */
570         if ((is_align_pow_2 && (hint_addr & (va_block_align - 1))) ||
571                         (!is_align_pow_2 &&
572                                 do_div(tmp_hint_addr, va_range->page_size))) {
573                 dev_info(hdev->dev, "Hint address 0x%llx will be ignored\n",
574                                         hint_addr);
575                 hint_addr = 0;
576         }
577
578         mutex_lock(&va_range->lock);
579
580         print_va_list_locked(hdev, &va_range->list);
581
582         list_for_each_entry(va_block, &va_range->list, node) {
583                 /* Calc the first possible aligned addr */
584                 valid_start = va_block->start;
585
586                 if (is_align_pow_2 && (valid_start & (va_block_align - 1))) {
587                         valid_start &= align_mask;
588                         valid_start += va_block_align;
589                         if (valid_start > va_block->end)
590                                 continue;
591                 }
592
593                 valid_size = va_block->end - valid_start;
594                 if (valid_size < size)
595                         continue;
596
597                 /* Pick the minimal length block which has the required size */
598                 if (!new_va_block || (valid_size < reserved_valid_size)) {
599                         new_va_block = va_block;
600                         reserved_valid_start = valid_start;
601                         reserved_valid_size = valid_size;
602                 }
603
604                 if (hint_addr && hint_addr >= valid_start &&
605                                         (hint_addr + size) <= va_block->end) {
606                         new_va_block = va_block;
607                         reserved_valid_start = hint_addr;
608                         reserved_valid_size = valid_size;
609                         break;
610                 }
611         }
612
613         if (!new_va_block) {
614                 dev_err(hdev->dev, "no available va block for size %llu\n",
615                                                                 size);
616                 goto out;
617         }
618
619         /*
620          * Check if there is some leftover range due to reserving the new
621          * va block, then return it to the main virtual addresses list.
622          */
623         if (reserved_valid_start > new_va_block->start) {
624                 prev_start = new_va_block->start;
625                 prev_end = reserved_valid_start - 1;
626
627                 new_va_block->start = reserved_valid_start;
628                 new_va_block->size = reserved_valid_size;
629
630                 add_prev = true;
631         }
632
633         if (new_va_block->size > size) {
634                 new_va_block->start += size;
635                 new_va_block->size = new_va_block->end - new_va_block->start;
636         } else {
637                 list_del(&new_va_block->node);
638                 kfree(new_va_block);
639         }
640
641         if (add_prev)
642                 add_va_block_locked(hdev, &va_range->list, prev_start,
643                                 prev_end);
644
645         print_va_list_locked(hdev, &va_range->list);
646 out:
647         mutex_unlock(&va_range->lock);
648
649         return reserved_valid_start;
650 }
651
652 /*
653  * hl_reserve_va_block() - reserve a virtual block of a given size.
654  * @hdev: pointer to the habanalabs device structure.
655  * @ctx: current context
656  * @type: virtual addresses range type.
657  * @size: requested block size.
658  * @alignment: required alignment in bytes of the virtual block start address,
659  *             0 means no alignment.
660  *
661  * This function does the following:
662  * - Iterate on the virtual block list to find a suitable virtual block for the
663  *   given size and alignment.
664  * - Reserve the requested block and update the list.
665  * - Return the start address of the virtual block.
666  */
667 u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
668                 enum hl_va_range_type type, u32 size, u32 alignment)
669 {
670         return get_va_block(hdev, ctx->va_range[type], size, 0,
671                         max(alignment, ctx->va_range[type]->page_size));
672 }
673
674 /**
675  * hl_get_va_range_type() - get va_range type for the given address and size.
676  * @address: the start address of the area we want to validate.
677  * @size: the size in bytes of the area we want to validate.
678  * @type: returned va_range type.
679  *
680  * Return: true if the area is inside a valid range, false otherwise.
681  */
682 static int hl_get_va_range_type(struct hl_ctx *ctx, u64 address, u64 size,
683                         enum hl_va_range_type *type)
684 {
685         int i;
686
687         for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX; i++) {
688                 if (hl_mem_area_inside_range(address, size,
689                                 ctx->va_range[i]->start_addr,
690                                 ctx->va_range[i]->end_addr)) {
691                         *type = i;
692                         return 0;
693                 }
694         }
695
696         return -EINVAL;
697 }
698
699 /**
700  * hl_unreserve_va_block() - wrapper for add_va_block to unreserve a va block.
701  * @hdev: pointer to the habanalabs device structure
702  * @ctx: pointer to the context structure.
703  * @start: start virtual address.
704  * @end: end virtual address.
705  *
706  * This function does the following:
707  * - Takes the list lock and calls add_va_block_locked.
708  */
709 int hl_unreserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
710                 u64 start_addr, u64 size)
711 {
712         enum hl_va_range_type type;
713         int rc;
714
715         rc = hl_get_va_range_type(ctx, start_addr, size, &type);
716         if (rc) {
717                 dev_err(hdev->dev,
718                         "cannot find va_range for va %#llx size %llu",
719                         start_addr, size);
720                 return rc;
721         }
722
723         rc = add_va_block(hdev, ctx->va_range[type], start_addr,
724                                                 start_addr + size - 1);
725         if (rc)
726                 dev_warn(hdev->dev,
727                         "add va block failed for vaddr: 0x%llx\n", start_addr);
728
729         return rc;
730 }
731
732 /**
733  * get_sg_info() - get number of pages and the DMA address from SG list.
734  * @sg: the SG list.
735  * @dma_addr: pointer to DMA address to return.
736  *
737  * Calculate the number of consecutive pages described by the SG list. Take the
738  * offset of the address in the first page, add to it the length and round it up
739  * to the number of needed pages.
740  */
741 static u32 get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr)
742 {
743         *dma_addr = sg_dma_address(sg);
744
745         return ((((*dma_addr) & (PAGE_SIZE - 1)) + sg_dma_len(sg)) +
746                         (PAGE_SIZE - 1)) >> PAGE_SHIFT;
747 }
748
749 /**
750  * init_phys_pg_pack_from_userptr() - initialize physical page pack from host
751  *                                    memory
752  * @ctx: pointer to the context structure.
753  * @userptr: userptr to initialize from.
754  * @pphys_pg_pack: result pointer.
755  *
756  * This function does the following:
757  * - Pin the physical pages related to the given virtual block.
758  * - Create a physical page pack from the physical pages related to the given
759  *   virtual block.
760  */
761 static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
762                                 struct hl_userptr *userptr,
763                                 struct hl_vm_phys_pg_pack **pphys_pg_pack)
764 {
765         struct hl_vm_phys_pg_pack *phys_pg_pack;
766         struct scatterlist *sg;
767         dma_addr_t dma_addr;
768         u64 page_mask, total_npages;
769         u32 npages, page_size = PAGE_SIZE,
770                 huge_page_size = ctx->hdev->asic_prop.pmmu_huge.page_size;
771         bool first = true, is_huge_page_opt = true;
772         int rc, i, j;
773         u32 pgs_in_huge_page = huge_page_size >> __ffs(page_size);
774
775         phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
776         if (!phys_pg_pack)
777                 return -ENOMEM;
778
779         phys_pg_pack->vm_type = userptr->vm_type;
780         phys_pg_pack->created_from_userptr = true;
781         phys_pg_pack->asid = ctx->asid;
782         atomic_set(&phys_pg_pack->mapping_cnt, 1);
783
784         /* Only if all dma_addrs are aligned to 2MB and their
785          * sizes is at least 2MB, we can use huge page mapping.
786          * We limit the 2MB optimization to this condition,
787          * since later on we acquire the related VA range as one
788          * consecutive block.
789          */
790         total_npages = 0;
791         for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
792                 npages = get_sg_info(sg, &dma_addr);
793
794                 total_npages += npages;
795
796                 if ((npages % pgs_in_huge_page) ||
797                                         (dma_addr & (huge_page_size - 1)))
798                         is_huge_page_opt = false;
799         }
800
801         if (is_huge_page_opt) {
802                 page_size = huge_page_size;
803                 do_div(total_npages, pgs_in_huge_page);
804         }
805
806         page_mask = ~(((u64) page_size) - 1);
807
808         phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64),
809                                                 GFP_KERNEL);
810         if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) {
811                 rc = -ENOMEM;
812                 goto page_pack_arr_mem_err;
813         }
814
815         phys_pg_pack->npages = total_npages;
816         phys_pg_pack->page_size = page_size;
817         phys_pg_pack->total_size = total_npages * page_size;
818
819         j = 0;
820         for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
821                 npages = get_sg_info(sg, &dma_addr);
822
823                 /* align down to physical page size and save the offset */
824                 if (first) {
825                         first = false;
826                         phys_pg_pack->offset = dma_addr & (page_size - 1);
827                         dma_addr &= page_mask;
828                 }
829
830                 while (npages) {
831                         phys_pg_pack->pages[j++] = dma_addr;
832                         dma_addr += page_size;
833
834                         if (is_huge_page_opt)
835                                 npages -= pgs_in_huge_page;
836                         else
837                                 npages--;
838                 }
839         }
840
841         *pphys_pg_pack = phys_pg_pack;
842
843         return 0;
844
845 page_pack_arr_mem_err:
846         kfree(phys_pg_pack);
847
848         return rc;
849 }
850
851 /**
852  * map_phys_pg_pack() - maps the physical page pack..
853  * @ctx: pointer to the context structure.
854  * @vaddr: start address of the virtual area to map from.
855  * @phys_pg_pack: the pack of physical pages to map to.
856  *
857  * This function does the following:
858  * - Maps each chunk of virtual memory to matching physical chunk.
859  * - Stores number of successful mappings in the given argument.
860  * - Returns 0 on success, error code otherwise.
861  */
862 static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
863                                 struct hl_vm_phys_pg_pack *phys_pg_pack)
864 {
865         struct hl_device *hdev = ctx->hdev;
866         u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i;
867         u32 page_size = phys_pg_pack->page_size;
868         int rc = 0;
869         bool is_host_addr;
870
871         for (i = 0 ; i < phys_pg_pack->npages ; i++) {
872                 paddr = phys_pg_pack->pages[i];
873
874                 rc = hl_mmu_map_page(ctx, next_vaddr, paddr, page_size,
875                                 (i + 1) == phys_pg_pack->npages);
876                 if (rc) {
877                         dev_err(hdev->dev,
878                                 "map failed for handle %u, npages: %llu, mapped: %llu",
879                                 phys_pg_pack->handle, phys_pg_pack->npages,
880                                 mapped_pg_cnt);
881                         goto err;
882                 }
883
884                 mapped_pg_cnt++;
885                 next_vaddr += page_size;
886         }
887
888         return 0;
889
890 err:
891         is_host_addr = !hl_is_dram_va(hdev, vaddr);
892
893         next_vaddr = vaddr;
894         for (i = 0 ; i < mapped_pg_cnt ; i++) {
895                 if (hl_mmu_unmap_page(ctx, next_vaddr, page_size,
896                                         (i + 1) == mapped_pg_cnt))
897                         dev_warn_ratelimited(hdev->dev,
898                                 "failed to unmap handle %u, va: 0x%llx, pa: 0x%llx, page size: %u\n",
899                                         phys_pg_pack->handle, next_vaddr,
900                                         phys_pg_pack->pages[i], page_size);
901
902                 next_vaddr += page_size;
903
904                 /*
905                  * unmapping on Palladium can be really long, so avoid a CPU
906                  * soft lockup bug by sleeping a little between unmapping pages
907                  *
908                  * In addition, on host num of pages could be huge,
909                  * because page size could be 4KB, so when unmapping host
910                  * pages sleep every 32K pages to avoid soft lockup
911                  */
912                 if (hdev->pldm || (is_host_addr && (i & 0x7FFF) == 0))
913                         usleep_range(50, 200);
914         }
915
916         return rc;
917 }
918
919 /**
920  * unmap_phys_pg_pack() - unmaps the physical page pack.
921  * @ctx: pointer to the context structure.
922  * @vaddr: start address of the virtual area to unmap.
923  * @phys_pg_pack: the pack of physical pages to unmap.
924  */
925 static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
926                                 struct hl_vm_phys_pg_pack *phys_pg_pack)
927 {
928         struct hl_device *hdev = ctx->hdev;
929         u64 next_vaddr, i;
930         bool is_host_addr;
931         u32 page_size;
932
933         is_host_addr = !hl_is_dram_va(hdev, vaddr);
934         page_size = phys_pg_pack->page_size;
935         next_vaddr = vaddr;
936
937         for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) {
938                 if (hl_mmu_unmap_page(ctx, next_vaddr, page_size,
939                                        (i + 1) == phys_pg_pack->npages))
940                         dev_warn_ratelimited(hdev->dev,
941                         "unmap failed for vaddr: 0x%llx\n", next_vaddr);
942
943                 /*
944                  * unmapping on Palladium can be really long, so avoid a CPU
945                  * soft lockup bug by sleeping a little between unmapping pages
946                  *
947                  * In addition, on host num of pages could be huge,
948                  * because page size could be 4KB, so when unmapping host
949                  * pages sleep every 32K pages to avoid soft lockup
950                  */
951                 if (hdev->pldm || (is_host_addr && (i & 0x7FFF) == 0))
952                         usleep_range(50, 200);
953         }
954 }
955
956 static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args,
957                                         u64 *paddr)
958 {
959         struct hl_device *hdev = ctx->hdev;
960         struct hl_vm *vm = &hdev->vm;
961         struct hl_vm_phys_pg_pack *phys_pg_pack;
962         u32 handle;
963
964         handle = lower_32_bits(args->map_device.handle);
965         spin_lock(&vm->idr_lock);
966         phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
967         if (!phys_pg_pack) {
968                 spin_unlock(&vm->idr_lock);
969                 dev_err(hdev->dev, "no match for handle %u\n", handle);
970                 return -EINVAL;
971         }
972
973         *paddr = phys_pg_pack->pages[0];
974
975         spin_unlock(&vm->idr_lock);
976
977         return 0;
978 }
979
980 /**
981  * map_device_va() - map the given memory.
982  * @ctx: pointer to the context structure.
983  * @args: host parameters with handle/host virtual address.
984  * @device_addr: pointer to result device virtual address.
985  *
986  * This function does the following:
987  * - If given a physical device memory handle, map to a device virtual block
988  *   and return the start address of this block.
989  * - If given a host virtual address and size, find the related physical pages,
990  *   map a device virtual block to this pages and return the start address of
991  *   this block.
992  */
993 static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
994                 u64 *device_addr)
995 {
996         struct hl_device *hdev = ctx->hdev;
997         struct hl_vm *vm = &hdev->vm;
998         struct hl_vm_phys_pg_pack *phys_pg_pack;
999         struct hl_userptr *userptr = NULL;
1000         struct hl_vm_hash_node *hnode;
1001         struct hl_va_range *va_range;
1002         enum vm_type_t *vm_type;
1003         u64 ret_vaddr, hint_addr;
1004         u32 handle = 0, va_block_align;
1005         int rc;
1006         bool is_userptr = args->flags & HL_MEM_USERPTR;
1007
1008         /* Assume failure */
1009         *device_addr = 0;
1010
1011         if (is_userptr) {
1012                 u64 addr = args->map_host.host_virt_addr,
1013                         size = args->map_host.mem_size;
1014                 u32 page_size = hdev->asic_prop.pmmu.page_size,
1015                         huge_page_size = hdev->asic_prop.pmmu_huge.page_size;
1016
1017                 rc = dma_map_host_va(hdev, addr, size, &userptr);
1018                 if (rc) {
1019                         dev_err(hdev->dev, "failed to get userptr from va\n");
1020                         return rc;
1021                 }
1022
1023                 rc = init_phys_pg_pack_from_userptr(ctx, userptr,
1024                                 &phys_pg_pack);
1025                 if (rc) {
1026                         dev_err(hdev->dev,
1027                                 "unable to init page pack for vaddr 0x%llx\n",
1028                                 addr);
1029                         goto init_page_pack_err;
1030                 }
1031
1032                 vm_type = (enum vm_type_t *) userptr;
1033                 hint_addr = args->map_host.hint_addr;
1034                 handle = phys_pg_pack->handle;
1035
1036                 /* get required alignment */
1037                 if (phys_pg_pack->page_size == page_size) {
1038                         va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST];
1039
1040                         /*
1041                          * huge page alignment may be needed in case of regular
1042                          * page mapping, depending on the host VA alignment
1043                          */
1044                         if (addr & (huge_page_size - 1))
1045                                 va_block_align = page_size;
1046                         else
1047                                 va_block_align = huge_page_size;
1048                 } else {
1049                         /*
1050                          * huge page alignment is needed in case of huge page
1051                          * mapping
1052                          */
1053                         va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE];
1054                         va_block_align = huge_page_size;
1055                 }
1056         } else {
1057                 handle = lower_32_bits(args->map_device.handle);
1058
1059                 spin_lock(&vm->idr_lock);
1060                 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
1061                 if (!phys_pg_pack) {
1062                         spin_unlock(&vm->idr_lock);
1063                         dev_err(hdev->dev,
1064                                 "no match for handle %u\n", handle);
1065                         return -EINVAL;
1066                 }
1067
1068                 /* increment now to avoid freeing device memory while mapping */
1069                 atomic_inc(&phys_pg_pack->mapping_cnt);
1070
1071                 spin_unlock(&vm->idr_lock);
1072
1073                 vm_type = (enum vm_type_t *) phys_pg_pack;
1074
1075                 hint_addr = args->map_device.hint_addr;
1076
1077                 /* DRAM VA alignment is the same as the MMU page size */
1078                 va_range = ctx->va_range[HL_VA_RANGE_TYPE_DRAM];
1079                 va_block_align = hdev->asic_prop.dmmu.page_size;
1080         }
1081
1082         /*
1083          * relevant for mapping device physical memory only, as host memory is
1084          * implicitly shared
1085          */
1086         if (!is_userptr && !(phys_pg_pack->flags & HL_MEM_SHARED) &&
1087                         phys_pg_pack->asid != ctx->asid) {
1088                 dev_err(hdev->dev,
1089                         "Failed to map memory, handle %u is not shared\n",
1090                         handle);
1091                 rc = -EPERM;
1092                 goto shared_err;
1093         }
1094
1095         hnode = kzalloc(sizeof(*hnode), GFP_KERNEL);
1096         if (!hnode) {
1097                 rc = -ENOMEM;
1098                 goto hnode_err;
1099         }
1100
1101         ret_vaddr = get_va_block(hdev, va_range, phys_pg_pack->total_size,
1102                                         hint_addr, va_block_align);
1103         if (!ret_vaddr) {
1104                 dev_err(hdev->dev, "no available va block for handle %u\n",
1105                                 handle);
1106                 rc = -ENOMEM;
1107                 goto va_block_err;
1108         }
1109
1110         mutex_lock(&ctx->mmu_lock);
1111
1112         rc = map_phys_pg_pack(ctx, ret_vaddr, phys_pg_pack);
1113         if (rc) {
1114                 mutex_unlock(&ctx->mmu_lock);
1115                 dev_err(hdev->dev, "mapping page pack failed for handle %u\n",
1116                                 handle);
1117                 goto map_err;
1118         }
1119
1120         rc = hdev->asic_funcs->mmu_invalidate_cache(hdev, false, *vm_type);
1121
1122         mutex_unlock(&ctx->mmu_lock);
1123
1124         if (rc) {
1125                 dev_err(hdev->dev,
1126                         "mapping handle %u failed due to MMU cache invalidation\n",
1127                         handle);
1128                 goto map_err;
1129         }
1130
1131         ret_vaddr += phys_pg_pack->offset;
1132
1133         hnode->ptr = vm_type;
1134         hnode->vaddr = ret_vaddr;
1135
1136         mutex_lock(&ctx->mem_hash_lock);
1137         hash_add(ctx->mem_hash, &hnode->node, ret_vaddr);
1138         mutex_unlock(&ctx->mem_hash_lock);
1139
1140         *device_addr = ret_vaddr;
1141
1142         if (is_userptr)
1143                 rc = free_phys_pg_pack(hdev, phys_pg_pack);
1144
1145         return rc;
1146
1147 map_err:
1148         if (add_va_block(hdev, va_range, ret_vaddr,
1149                                 ret_vaddr + phys_pg_pack->total_size - 1))
1150                 dev_warn(hdev->dev,
1151                         "release va block failed for handle 0x%x, vaddr: 0x%llx\n",
1152                                 handle, ret_vaddr);
1153
1154 va_block_err:
1155         kfree(hnode);
1156 hnode_err:
1157 shared_err:
1158         atomic_dec(&phys_pg_pack->mapping_cnt);
1159         if (is_userptr)
1160                 free_phys_pg_pack(hdev, phys_pg_pack);
1161 init_page_pack_err:
1162         if (is_userptr)
1163                 dma_unmap_host_va(hdev, userptr);
1164
1165         return rc;
1166 }
1167
1168 /**
1169  * unmap_device_va() - unmap the given device virtual address.
1170  * @ctx: pointer to the context structure.
1171  * @args: host parameters with device virtual address to unmap.
1172  * @ctx_free: true if in context free flow, false otherwise.
1173  *
1174  * This function does the following:
1175  * - unmap the physical pages related to the given virtual address.
1176  * - return the device virtual block to the virtual block list.
1177  */
1178 static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
1179                                 bool ctx_free)
1180 {
1181         struct hl_device *hdev = ctx->hdev;
1182         struct asic_fixed_properties *prop = &hdev->asic_prop;
1183         struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
1184         struct hl_vm_hash_node *hnode = NULL;
1185         struct hl_userptr *userptr = NULL;
1186         struct hl_va_range *va_range;
1187         u64 vaddr = args->unmap.device_virt_addr;
1188         enum vm_type_t *vm_type;
1189         bool is_userptr;
1190         int rc = 0;
1191
1192         /* protect from double entrance */
1193         mutex_lock(&ctx->mem_hash_lock);
1194         hash_for_each_possible(ctx->mem_hash, hnode, node, (unsigned long)vaddr)
1195                 if (vaddr == hnode->vaddr)
1196                         break;
1197
1198         if (!hnode) {
1199                 mutex_unlock(&ctx->mem_hash_lock);
1200                 dev_err(hdev->dev,
1201                         "unmap failed, no mem hnode for vaddr 0x%llx\n",
1202                         vaddr);
1203                 return -EINVAL;
1204         }
1205
1206         hash_del(&hnode->node);
1207         mutex_unlock(&ctx->mem_hash_lock);
1208
1209         vm_type = hnode->ptr;
1210
1211         if (*vm_type == VM_TYPE_USERPTR) {
1212                 is_userptr = true;
1213                 userptr = hnode->ptr;
1214                 rc = init_phys_pg_pack_from_userptr(ctx, userptr,
1215                                                         &phys_pg_pack);
1216                 if (rc) {
1217                         dev_err(hdev->dev,
1218                                 "unable to init page pack for vaddr 0x%llx\n",
1219                                 vaddr);
1220                         goto vm_type_err;
1221                 }
1222
1223                 if (phys_pg_pack->page_size ==
1224                                         hdev->asic_prop.pmmu.page_size)
1225                         va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST];
1226                 else
1227                         va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE];
1228         } else if (*vm_type == VM_TYPE_PHYS_PACK) {
1229                 is_userptr = false;
1230                 va_range = ctx->va_range[HL_VA_RANGE_TYPE_DRAM];
1231                 phys_pg_pack = hnode->ptr;
1232         } else {
1233                 dev_warn(hdev->dev,
1234                         "unmap failed, unknown vm desc for vaddr 0x%llx\n",
1235                                 vaddr);
1236                 rc = -EFAULT;
1237                 goto vm_type_err;
1238         }
1239
1240         if (atomic_read(&phys_pg_pack->mapping_cnt) == 0) {
1241                 dev_err(hdev->dev, "vaddr 0x%llx is not mapped\n", vaddr);
1242                 rc = -EINVAL;
1243                 goto mapping_cnt_err;
1244         }
1245
1246         if (!is_userptr && !is_power_of_2(phys_pg_pack->page_size))
1247                 vaddr = prop->dram_base_address +
1248                         DIV_ROUND_DOWN_ULL(vaddr - prop->dram_base_address,
1249                                                 phys_pg_pack->page_size) *
1250                                                         phys_pg_pack->page_size;
1251         else
1252                 vaddr &= ~(((u64) phys_pg_pack->page_size) - 1);
1253
1254         mutex_lock(&ctx->mmu_lock);
1255
1256         unmap_phys_pg_pack(ctx, vaddr, phys_pg_pack);
1257
1258         /*
1259          * During context free this function is called in a loop to clean all
1260          * the context mappings. Hence the cache invalidation can be called once
1261          * at the loop end rather than for each iteration
1262          */
1263         if (!ctx_free)
1264                 rc = hdev->asic_funcs->mmu_invalidate_cache(hdev, true,
1265                                                                 *vm_type);
1266
1267         mutex_unlock(&ctx->mmu_lock);
1268
1269         /*
1270          * If the context is closing we don't need to check for the MMU cache
1271          * invalidation return code and update the VA free list as in this flow
1272          * we invalidate the MMU cache outside of this unmap function and the VA
1273          * free list will be freed anyway.
1274          */
1275         if (!ctx_free) {
1276                 int tmp_rc;
1277
1278                 if (rc)
1279                         dev_err(hdev->dev,
1280                                 "unmapping vaddr 0x%llx failed due to MMU cache invalidation\n",
1281                                 vaddr);
1282
1283                 tmp_rc = add_va_block(hdev, va_range, vaddr,
1284                                         vaddr + phys_pg_pack->total_size - 1);
1285                 if (tmp_rc) {
1286                         dev_warn(hdev->dev,
1287                                         "add va block failed for vaddr: 0x%llx\n",
1288                                         vaddr);
1289                         if (!rc)
1290                                 rc = tmp_rc;
1291                 }
1292         }
1293
1294         atomic_dec(&phys_pg_pack->mapping_cnt);
1295         kfree(hnode);
1296
1297         if (is_userptr) {
1298                 rc = free_phys_pg_pack(hdev, phys_pg_pack);
1299                 dma_unmap_host_va(hdev, userptr);
1300         }
1301
1302         return rc;
1303
1304 mapping_cnt_err:
1305         if (is_userptr)
1306                 free_phys_pg_pack(hdev, phys_pg_pack);
1307 vm_type_err:
1308         mutex_lock(&ctx->mem_hash_lock);
1309         hash_add(ctx->mem_hash, &hnode->node, vaddr);
1310         mutex_unlock(&ctx->mem_hash_lock);
1311
1312         return rc;
1313 }
1314
1315 static int map_block(struct hl_device *hdev, u64 address, u64 *handle,
1316                         u32 *size)
1317 {
1318         u32 block_id = 0;
1319         int rc;
1320
1321         rc = hdev->asic_funcs->get_hw_block_id(hdev, address, size, &block_id);
1322
1323         *handle = block_id | HL_MMAP_TYPE_BLOCK;
1324         *handle <<= PAGE_SHIFT;
1325
1326         return rc;
1327 }
1328
1329 static void hw_block_vm_close(struct vm_area_struct *vma)
1330 {
1331         struct hl_vm_hw_block_list_node *lnode =
1332                 (struct hl_vm_hw_block_list_node *) vma->vm_private_data;
1333         struct hl_ctx *ctx = lnode->ctx;
1334
1335         mutex_lock(&ctx->hw_block_list_lock);
1336         list_del(&lnode->node);
1337         mutex_unlock(&ctx->hw_block_list_lock);
1338         hl_ctx_put(ctx);
1339         kfree(lnode);
1340         vma->vm_private_data = NULL;
1341 }
1342
1343 static const struct vm_operations_struct hw_block_vm_ops = {
1344         .close = hw_block_vm_close
1345 };
1346
1347 /**
1348  * hl_hw_block_mmap() - mmap a hw block to user.
1349  * @hpriv: pointer to the private data of the fd
1350  * @vma: pointer to vm_area_struct of the process
1351  *
1352  * Driver increments context reference for every HW block mapped in order
1353  * to prevent user from closing FD without unmapping first
1354  */
1355 int hl_hw_block_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
1356 {
1357         struct hl_vm_hw_block_list_node *lnode;
1358         struct hl_device *hdev = hpriv->hdev;
1359         struct hl_ctx *ctx = hpriv->ctx;
1360         u32 block_id, block_size;
1361         int rc;
1362
1363         /* We use the page offset to hold the block id and thus we need to clear
1364          * it before doing the mmap itself
1365          */
1366         block_id = vma->vm_pgoff;
1367         vma->vm_pgoff = 0;
1368
1369         /* Driver only allows mapping of a complete HW block */
1370         block_size = vma->vm_end - vma->vm_start;
1371
1372 #ifdef _HAS_TYPE_ARG_IN_ACCESS_OK
1373         if (!access_ok(VERIFY_WRITE,
1374                 (void __user *) (uintptr_t) vma->vm_start, block_size)) {
1375 #else
1376         if (!access_ok((void __user *) (uintptr_t) vma->vm_start, block_size)) {
1377 #endif
1378                 dev_err(hdev->dev,
1379                         "user pointer is invalid - 0x%lx\n",
1380                         vma->vm_start);
1381
1382                 return -EINVAL;
1383         }
1384
1385         lnode = kzalloc(sizeof(*lnode), GFP_KERNEL);
1386         if (!lnode)
1387                 return -ENOMEM;
1388
1389         vma->vm_ops = &hw_block_vm_ops;
1390         vma->vm_private_data = lnode;
1391
1392         hl_ctx_get(hdev, ctx);
1393
1394         rc = hdev->asic_funcs->hw_block_mmap(hdev, vma, block_id, block_size);
1395         if (rc) {
1396                 hl_ctx_put(ctx);
1397                 kfree(lnode);
1398                 return rc;
1399         }
1400
1401         lnode->ctx = ctx;
1402         lnode->vaddr = vma->vm_start;
1403         lnode->size = block_size;
1404         lnode->id = block_id;
1405
1406         mutex_lock(&ctx->hw_block_list_lock);
1407         list_add_tail(&lnode->node, &ctx->hw_block_mem_list);
1408         mutex_unlock(&ctx->hw_block_list_lock);
1409
1410         vma->vm_pgoff = block_id;
1411
1412         return 0;
1413 }
1414
1415 static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args)
1416 {
1417         struct hl_device *hdev = hpriv->hdev;
1418         struct hl_ctx *ctx = hpriv->ctx;
1419         u64 block_handle, device_addr = 0;
1420         u32 handle = 0, block_size;
1421         int rc;
1422
1423         switch (args->in.op) {
1424         case HL_MEM_OP_ALLOC:
1425                 if (args->in.alloc.mem_size == 0) {
1426                         dev_err(hdev->dev,
1427                                 "alloc size must be larger than 0\n");
1428                         rc = -EINVAL;
1429                         goto out;
1430                 }
1431
1432                 /* Force contiguous as there are no real MMU
1433                  * translations to overcome physical memory gaps
1434                  */
1435                 args->in.flags |= HL_MEM_CONTIGUOUS;
1436                 rc = alloc_device_memory(ctx, &args->in, &handle);
1437
1438                 memset(args, 0, sizeof(*args));
1439                 args->out.handle = (__u64) handle;
1440                 break;
1441
1442         case HL_MEM_OP_FREE:
1443                 rc = free_device_memory(ctx, &args->in);
1444                 break;
1445
1446         case HL_MEM_OP_MAP:
1447                 if (args->in.flags & HL_MEM_USERPTR) {
1448                         device_addr = args->in.map_host.host_virt_addr;
1449                         rc = 0;
1450                 } else {
1451                         rc = get_paddr_from_handle(ctx, &args->in,
1452                                                         &device_addr);
1453                 }
1454
1455                 memset(args, 0, sizeof(*args));
1456                 args->out.device_virt_addr = device_addr;
1457                 break;
1458
1459         case HL_MEM_OP_UNMAP:
1460                 rc = 0;
1461                 break;
1462
1463         case HL_MEM_OP_MAP_BLOCK:
1464                 rc = map_block(hdev, args->in.map_block.block_addr,
1465                                 &block_handle, &block_size);
1466                 args->out.block_handle = block_handle;
1467                 args->out.block_size = block_size;
1468                 break;
1469
1470         default:
1471                 dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
1472                 rc = -ENOTTY;
1473                 break;
1474         }
1475
1476 out:
1477         return rc;
1478 }
1479
1480 int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
1481 {
1482         enum hl_device_status status;
1483         union hl_mem_args *args = data;
1484         struct hl_device *hdev = hpriv->hdev;
1485         struct hl_ctx *ctx = hpriv->ctx;
1486         u64 block_handle, device_addr = 0;
1487         u32 handle = 0, block_size;
1488         int rc;
1489
1490         if (!hl_device_operational(hdev, &status)) {
1491                 dev_warn_ratelimited(hdev->dev,
1492                         "Device is %s. Can't execute MEMORY IOCTL\n",
1493                         hdev->status[status]);
1494                 return -EBUSY;
1495         }
1496
1497         if (!hdev->mmu_enable)
1498                 return mem_ioctl_no_mmu(hpriv, args);
1499
1500         switch (args->in.op) {
1501         case HL_MEM_OP_ALLOC:
1502                 if (args->in.alloc.mem_size == 0) {
1503                         dev_err(hdev->dev,
1504                                 "alloc size must be larger than 0\n");
1505                         rc = -EINVAL;
1506                         goto out;
1507                 }
1508
1509                 /* If DRAM does not support virtual memory the driver won't
1510                  * handle the allocation/freeing of that memory. However, for
1511                  * system administration/monitoring purposes, the driver will
1512                  * keep track of the amount of DRAM memory that is allocated
1513                  * and freed by the user. Because this code totally relies on
1514                  * the user's input, the driver can't ensure the validity
1515                  * of this accounting.
1516                  */
1517                 if (!hdev->asic_prop.dram_supports_virtual_memory) {
1518                         atomic64_add(args->in.alloc.mem_size,
1519                                         &ctx->dram_phys_mem);
1520                         atomic64_add(args->in.alloc.mem_size,
1521                                         &hdev->dram_used_mem);
1522
1523                         dev_dbg(hdev->dev, "DRAM alloc is not supported\n");
1524                         rc = 0;
1525
1526                         memset(args, 0, sizeof(*args));
1527                         args->out.handle = 0;
1528                         goto out;
1529                 }
1530
1531                 rc = alloc_device_memory(ctx, &args->in, &handle);
1532
1533                 memset(args, 0, sizeof(*args));
1534                 args->out.handle = (__u64) handle;
1535                 break;
1536
1537         case HL_MEM_OP_FREE:
1538                 /* If DRAM does not support virtual memory the driver won't
1539                  * handle the allocation/freeing of that memory. However, for
1540                  * system administration/monitoring purposes, the driver will
1541                  * keep track of the amount of DRAM memory that is allocated
1542                  * and freed by the user. Because this code totally relies on
1543                  * the user's input, the driver can't ensure the validity
1544                  * of this accounting.
1545                  */
1546                 if (!hdev->asic_prop.dram_supports_virtual_memory) {
1547                         atomic64_sub(args->in.alloc.mem_size,
1548                                         &ctx->dram_phys_mem);
1549                         atomic64_sub(args->in.alloc.mem_size,
1550                                         &hdev->dram_used_mem);
1551
1552                         dev_dbg(hdev->dev, "DRAM alloc is not supported\n");
1553                         rc = 0;
1554
1555                         goto out;
1556                 }
1557
1558                 rc = free_device_memory(ctx, &args->in);
1559                 break;
1560
1561         case HL_MEM_OP_MAP:
1562                 rc = map_device_va(ctx, &args->in, &device_addr);
1563
1564                 memset(args, 0, sizeof(*args));
1565                 args->out.device_virt_addr = device_addr;
1566                 break;
1567
1568         case HL_MEM_OP_UNMAP:
1569                 rc = unmap_device_va(ctx, &args->in, false);
1570                 break;
1571
1572         case HL_MEM_OP_MAP_BLOCK:
1573                 rc = map_block(hdev, args->in.map_block.block_addr,
1574                                 &block_handle, &block_size);
1575                 args->out.block_handle = block_handle;
1576                 args->out.block_size = block_size;
1577                 break;
1578
1579         default:
1580                 dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
1581                 rc = -ENOTTY;
1582                 break;
1583         }
1584
1585 out:
1586         return rc;
1587 }
1588
1589 static int get_user_memory(struct hl_device *hdev, u64 addr, u64 size,
1590                                 u32 npages, u64 start, u32 offset,
1591                                 struct hl_userptr *userptr)
1592 {
1593         int rc;
1594
1595         if (!access_ok((void __user *) (uintptr_t) addr, size)) {
1596                 dev_err(hdev->dev, "user pointer is invalid - 0x%llx\n", addr);
1597                 return -EFAULT;
1598         }
1599
1600         userptr->pages = kvmalloc_array(npages, sizeof(*userptr->pages),
1601                                         GFP_KERNEL);
1602         if (!userptr->pages)
1603                 return -ENOMEM;
1604
1605         rc = pin_user_pages_fast(start, npages,
1606                                  FOLL_FORCE | FOLL_WRITE | FOLL_LONGTERM,
1607                                  userptr->pages);
1608
1609         if (rc != npages) {
1610                 dev_err(hdev->dev,
1611                         "Failed to map host memory, user ptr probably wrong\n");
1612                 if (rc < 0)
1613                         goto destroy_pages;
1614                 npages = rc;
1615                 rc = -EFAULT;
1616                 goto put_pages;
1617         }
1618         userptr->npages = npages;
1619
1620         rc = sg_alloc_table_from_pages(userptr->sgt,
1621                                        userptr->pages,
1622                                        npages, offset, size, GFP_KERNEL);
1623         if (rc < 0) {
1624                 dev_err(hdev->dev, "failed to create SG table from pages\n");
1625                 goto put_pages;
1626         }
1627
1628         return 0;
1629
1630 put_pages:
1631         unpin_user_pages(userptr->pages, npages);
1632 destroy_pages:
1633         kvfree(userptr->pages);
1634         return rc;
1635 }
1636
1637 /**
1638  * hl_pin_host_memory() - pins a chunk of host memory.
1639  * @hdev: pointer to the habanalabs device structure.
1640  * @addr: the host virtual address of the memory area.
1641  * @size: the size of the memory area.
1642  * @userptr: pointer to hl_userptr structure.
1643  *
1644  * This function does the following:
1645  * - Pins the physical pages.
1646  * - Create an SG list from those pages.
1647  */
1648 int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
1649                                         struct hl_userptr *userptr)
1650 {
1651         u64 start, end;
1652         u32 npages, offset;
1653         int rc;
1654
1655         if (!size) {
1656                 dev_err(hdev->dev, "size to pin is invalid - %llu\n", size);
1657                 return -EINVAL;
1658         }
1659
1660         /*
1661          * If the combination of the address and size requested for this memory
1662          * region causes an integer overflow, return error.
1663          */
1664         if (((addr + size) < addr) ||
1665                         PAGE_ALIGN(addr + size) < (addr + size)) {
1666                 dev_err(hdev->dev,
1667                         "user pointer 0x%llx + %llu causes integer overflow\n",
1668                         addr, size);
1669                 return -EINVAL;
1670         }
1671
1672         userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_KERNEL);
1673         if (!userptr->sgt)
1674                 return -ENOMEM;
1675
1676         start = addr & PAGE_MASK;
1677         offset = addr & ~PAGE_MASK;
1678         end = PAGE_ALIGN(addr + size);
1679         npages = (end - start) >> PAGE_SHIFT;
1680
1681         userptr->size = size;
1682         userptr->addr = addr;
1683         userptr->dma_mapped = false;
1684         INIT_LIST_HEAD(&userptr->job_node);
1685
1686         rc = get_user_memory(hdev, addr, size, npages, start, offset,
1687                                 userptr);
1688         if (rc) {
1689                 dev_err(hdev->dev,
1690                         "failed to get user memory for address 0x%llx\n",
1691                         addr);
1692                 goto free_sgt;
1693         }
1694
1695         hl_debugfs_add_userptr(hdev, userptr);
1696
1697         return 0;
1698
1699 free_sgt:
1700         kfree(userptr->sgt);
1701         return rc;
1702 }
1703
1704 /*
1705  * hl_unpin_host_memory - unpins a chunk of host memory.
1706  * @hdev: pointer to the habanalabs device structure
1707  * @userptr: pointer to hl_userptr structure
1708  *
1709  * This function does the following:
1710  * - Unpins the physical pages related to the host memory
1711  * - Free the SG list
1712  */
1713 void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
1714 {
1715         hl_debugfs_remove_userptr(hdev, userptr);
1716
1717         if (userptr->dma_mapped)
1718                 hdev->asic_funcs->hl_dma_unmap_sg(hdev, userptr->sgt->sgl,
1719                                                         userptr->sgt->nents,
1720                                                         userptr->dir);
1721
1722         unpin_user_pages_dirty_lock(userptr->pages, userptr->npages, true);
1723         kvfree(userptr->pages);
1724
1725         list_del(&userptr->job_node);
1726
1727         sg_free_table(userptr->sgt);
1728         kfree(userptr->sgt);
1729 }
1730
1731 /**
1732  * hl_userptr_delete_list() - clear userptr list.
1733  * @hdev: pointer to the habanalabs device structure.
1734  * @userptr_list: pointer to the list to clear.
1735  *
1736  * This function does the following:
1737  * - Iterates over the list and unpins the host memory and frees the userptr
1738  *   structure.
1739  */
1740 void hl_userptr_delete_list(struct hl_device *hdev,
1741                                 struct list_head *userptr_list)
1742 {
1743         struct hl_userptr *userptr, *tmp;
1744
1745         list_for_each_entry_safe(userptr, tmp, userptr_list, job_node) {
1746                 hl_unpin_host_memory(hdev, userptr);
1747                 kfree(userptr);
1748         }
1749
1750         INIT_LIST_HEAD(userptr_list);
1751 }
1752
1753 /**
1754  * hl_userptr_is_pinned() - returns whether the given userptr is pinned.
1755  * @hdev: pointer to the habanalabs device structure.
1756  * @userptr_list: pointer to the list to clear.
1757  * @userptr: pointer to userptr to check.
1758  *
1759  * This function does the following:
1760  * - Iterates over the list and checks if the given userptr is in it, means is
1761  *   pinned. If so, returns true, otherwise returns false.
1762  */
1763 bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr,
1764                                 u32 size, struct list_head *userptr_list,
1765                                 struct hl_userptr **userptr)
1766 {
1767         list_for_each_entry((*userptr), userptr_list, job_node) {
1768                 if ((addr == (*userptr)->addr) && (size == (*userptr)->size))
1769                         return true;
1770         }
1771
1772         return false;
1773 }
1774
1775 /**
1776  * va_range_init() - initialize virtual addresses range.
1777  * @hdev: pointer to the habanalabs device structure.
1778  * @va_range: pointer to the range to initialize.
1779  * @start: range start address.
1780  * @end: range end address.
1781  *
1782  * This function does the following:
1783  * - Initializes the virtual addresses list of the given range with the given
1784  *   addresses.
1785  */
1786 static int va_range_init(struct hl_device *hdev, struct hl_va_range *va_range,
1787                                 u64 start, u64 end, u32 page_size)
1788 {
1789         int rc;
1790
1791         INIT_LIST_HEAD(&va_range->list);
1792
1793         /*
1794          * PAGE_SIZE alignment
1795          * it is the callers responsibility to align the addresses if the
1796          * page size is not a power of 2
1797          */
1798
1799         if (is_power_of_2(page_size)) {
1800                 if (start & (PAGE_SIZE - 1)) {
1801                         start &= PAGE_MASK;
1802                         start += PAGE_SIZE;
1803                 }
1804
1805                 if (end & (PAGE_SIZE - 1))
1806                         end &= PAGE_MASK;
1807         }
1808
1809         if (start >= end) {
1810                 dev_err(hdev->dev, "too small vm range for va list\n");
1811                 return -EFAULT;
1812         }
1813
1814         rc = add_va_block(hdev, va_range, start, end);
1815
1816         if (rc) {
1817                 dev_err(hdev->dev, "Failed to init host va list\n");
1818                 return rc;
1819         }
1820
1821         va_range->start_addr = start;
1822         va_range->end_addr = end;
1823         va_range->page_size = page_size;
1824
1825         return 0;
1826 }
1827
1828 /**
1829  * va_range_fini() - clear a virtual addresses range.
1830  * @hdev: pointer to the habanalabs structure.
1831  * va_range: pointer to virtual addresses rang.e
1832  *
1833  * This function does the following:
1834  * - Frees the virtual addresses block list and its lock.
1835  */
1836 static void va_range_fini(struct hl_device *hdev, struct hl_va_range *va_range)
1837 {
1838         mutex_lock(&va_range->lock);
1839         clear_va_list_locked(hdev, &va_range->list);
1840         mutex_unlock(&va_range->lock);
1841
1842         mutex_destroy(&va_range->lock);
1843         kfree(va_range);
1844 }
1845
1846 /**
1847  * vm_ctx_init_with_ranges() - initialize virtual memory for context.
1848  * @ctx: pointer to the habanalabs context structure.
1849  * @host_range_start: host virtual addresses range start.
1850  * @host_range_end: host virtual addresses range end.
1851  * @host_huge_range_start: host virtual addresses range start for memory
1852  *                         allocated with huge pages.
1853  * @host_huge_range_end: host virtual addresses range end for memory allocated
1854  *                        with huge pages.
1855  * @dram_range_start: dram virtual addresses range start.
1856  * @dram_range_end: dram virtual addresses range end.
1857  *
1858  * This function initializes the following:
1859  * - MMU for context.
1860  * - Virtual address to area descriptor hashtable.
1861  * - Virtual block list of available virtual memory.
1862  */
1863 static int vm_ctx_init_with_ranges(struct hl_ctx *ctx,
1864                                         u64 host_range_start,
1865                                         u64 host_range_end,
1866                                         u32 host_page_size,
1867                                         u64 host_huge_range_start,
1868                                         u64 host_huge_range_end,
1869                                         u32 host_huge_page_size,
1870                                         u64 dram_range_start,
1871                                         u64 dram_range_end,
1872                                         u32 dram_page_size)
1873 {
1874         struct hl_device *hdev = ctx->hdev;
1875         int i, rc;
1876
1877         for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX ; i++) {
1878                 ctx->va_range[i] =
1879                         kzalloc(sizeof(struct hl_va_range), GFP_KERNEL);
1880                 if (!ctx->va_range[i]) {
1881                         rc = -ENOMEM;
1882                         goto free_va_range;
1883                 }
1884         }
1885
1886         rc = hl_mmu_ctx_init(ctx);
1887         if (rc) {
1888                 dev_err(hdev->dev, "failed to init context %d\n", ctx->asid);
1889                 goto free_va_range;
1890         }
1891
1892         mutex_init(&ctx->mem_hash_lock);
1893         hash_init(ctx->mem_hash);
1894
1895         mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
1896
1897         rc = va_range_init(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST],
1898                         host_range_start, host_range_end, host_page_size);
1899         if (rc) {
1900                 dev_err(hdev->dev, "failed to init host vm range\n");
1901                 goto mmu_ctx_fini;
1902         }
1903
1904         if (hdev->pmmu_huge_range) {
1905                 mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
1906
1907                 rc = va_range_init(hdev,
1908                         ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE],
1909                         host_huge_range_start, host_huge_range_end,
1910                         host_huge_page_size);
1911                 if (rc) {
1912                         dev_err(hdev->dev,
1913                                 "failed to init host huge vm range\n");
1914                         goto clear_host_va_range;
1915                 }
1916         } else {
1917                 kfree(ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]);
1918                 ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE] =
1919                                 ctx->va_range[HL_VA_RANGE_TYPE_HOST];
1920         }
1921
1922         mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_DRAM]->lock);
1923
1924         rc = va_range_init(hdev, ctx->va_range[HL_VA_RANGE_TYPE_DRAM],
1925                         dram_range_start, dram_range_end, dram_page_size);
1926         if (rc) {
1927                 dev_err(hdev->dev, "failed to init dram vm range\n");
1928                 goto clear_host_huge_va_range;
1929         }
1930
1931         hl_debugfs_add_ctx_mem_hash(hdev, ctx);
1932
1933         return 0;
1934
1935 clear_host_huge_va_range:
1936         mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_DRAM]->lock);
1937
1938         if (hdev->pmmu_huge_range) {
1939                 mutex_lock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
1940                 clear_va_list_locked(hdev,
1941                         &ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->list);
1942                 mutex_unlock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
1943         }
1944 clear_host_va_range:
1945         if (hdev->pmmu_huge_range)
1946                 mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
1947         mutex_lock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
1948         clear_va_list_locked(hdev, &ctx->va_range[HL_VA_RANGE_TYPE_HOST]->list);
1949         mutex_unlock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
1950 mmu_ctx_fini:
1951         mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
1952         mutex_destroy(&ctx->mem_hash_lock);
1953         hl_mmu_ctx_fini(ctx);
1954 free_va_range:
1955         for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX ; i++)
1956                 kfree(ctx->va_range[i]);
1957
1958         return rc;
1959 }
1960
1961 int hl_vm_ctx_init(struct hl_ctx *ctx)
1962 {
1963         struct asic_fixed_properties *prop = &ctx->hdev->asic_prop;
1964         u64 host_range_start, host_range_end, host_huge_range_start,
1965                 host_huge_range_end, dram_range_start, dram_range_end;
1966         u32 host_page_size, host_huge_page_size, dram_page_size;
1967
1968         atomic64_set(&ctx->dram_phys_mem, 0);
1969
1970         /*
1971          * - If MMU is enabled, init the ranges as usual.
1972          * - If MMU is disabled, in case of host mapping, the returned address
1973          *   is the given one.
1974          *   In case of DRAM mapping, the returned address is the physical
1975          *   address of the memory related to the given handle.
1976          */
1977         if (!ctx->hdev->mmu_enable)
1978                 return 0;
1979
1980         dram_range_start = prop->dmmu.start_addr;
1981         dram_range_end = prop->dmmu.end_addr;
1982         dram_page_size = prop->dram_page_size ?
1983                                 prop->dram_page_size : prop->dmmu.page_size;
1984         host_range_start = prop->pmmu.start_addr;
1985         host_range_end = prop->pmmu.end_addr;
1986         host_page_size = prop->pmmu.page_size;
1987         host_huge_range_start = prop->pmmu_huge.start_addr;
1988         host_huge_range_end = prop->pmmu_huge.end_addr;
1989         host_huge_page_size = prop->pmmu_huge.page_size;
1990
1991         return vm_ctx_init_with_ranges(ctx, host_range_start, host_range_end,
1992                         host_page_size, host_huge_range_start,
1993                         host_huge_range_end, host_huge_page_size,
1994                         dram_range_start, dram_range_end, dram_page_size);
1995 }
1996
1997 /**
1998  * hl_vm_ctx_fini() - virtual memory teardown of context.
1999  * @ctx: pointer to the habanalabs context structure.
2000  *
2001  * This function perform teardown the following:
2002  * - Virtual block list of available virtual memory.
2003  * - Virtual address to area descriptor hashtable.
2004  * - MMU for context.
2005  *
2006  * In addition this function does the following:
2007  * - Unmaps the existing hashtable nodes if the hashtable is not empty. The
2008  *   hashtable should be empty as no valid mappings should exist at this
2009  *   point.
2010  * - Frees any existing physical page list from the idr which relates to the
2011  *   current context asid.
2012  * - This function checks the virtual block list for correctness. At this point
2013  *   the list should contain one element which describes the whole virtual
2014  *   memory range of the context. Otherwise, a warning is printed.
2015  */
2016 void hl_vm_ctx_fini(struct hl_ctx *ctx)
2017 {
2018         struct hl_device *hdev = ctx->hdev;
2019         struct hl_vm *vm = &hdev->vm;
2020         struct hl_vm_phys_pg_pack *phys_pg_list;
2021         struct hl_vm_hash_node *hnode;
2022         struct hlist_node *tmp_node;
2023         struct hl_mem_in args;
2024         int i;
2025
2026         if (!hdev->mmu_enable)
2027                 return;
2028
2029         hl_debugfs_remove_ctx_mem_hash(hdev, ctx);
2030
2031         /*
2032          * Clearly something went wrong on hard reset so no point in printing
2033          * another side effect error
2034          */
2035         if (!hdev->hard_reset_pending && !hash_empty(ctx->mem_hash))
2036                 dev_notice(hdev->dev,
2037                         "user released device without removing its memory mappings\n");
2038
2039         hash_for_each_safe(ctx->mem_hash, i, tmp_node, hnode, node) {
2040                 dev_dbg(hdev->dev,
2041                         "hl_mem_hash_node of vaddr 0x%llx of asid %d is still alive\n",
2042                         hnode->vaddr, ctx->asid);
2043                 args.unmap.device_virt_addr = hnode->vaddr;
2044                 unmap_device_va(ctx, &args, true);
2045         }
2046
2047         mutex_lock(&ctx->mmu_lock);
2048
2049         /* invalidate the cache once after the unmapping loop */
2050         hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
2051         hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_PHYS_PACK);
2052
2053         mutex_unlock(&ctx->mmu_lock);
2054
2055         spin_lock(&vm->idr_lock);
2056         idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_list, i)
2057                 if (phys_pg_list->asid == ctx->asid) {
2058                         dev_dbg(hdev->dev,
2059                                 "page list 0x%px of asid %d is still alive\n",
2060                                 phys_pg_list, ctx->asid);
2061                         atomic64_sub(phys_pg_list->total_size,
2062                                         &hdev->dram_used_mem);
2063                         free_phys_pg_pack(hdev, phys_pg_list);
2064                         idr_remove(&vm->phys_pg_pack_handles, i);
2065                 }
2066         spin_unlock(&vm->idr_lock);
2067
2068         va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_DRAM]);
2069         va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST]);
2070
2071         if (hdev->pmmu_huge_range)
2072                 va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]);
2073
2074         mutex_destroy(&ctx->mem_hash_lock);
2075         hl_mmu_ctx_fini(ctx);
2076
2077         /* In this case we need to clear the global accounting of DRAM usage
2078          * because the user notifies us on allocations. If the user is no more,
2079          * all DRAM is available
2080          */
2081         if (ctx->asid != HL_KERNEL_ASID_ID &&
2082                         !hdev->asic_prop.dram_supports_virtual_memory)
2083                 atomic64_set(&hdev->dram_used_mem, 0);
2084 }
2085
2086 /**
2087  * hl_vm_init() - initialize virtual memory module.
2088  * @hdev: pointer to the habanalabs device structure.
2089  *
2090  * This function initializes the following:
2091  * - MMU module.
2092  * - DRAM physical pages pool of 2MB.
2093  * - Idr for device memory allocation handles.
2094  */
2095 int hl_vm_init(struct hl_device *hdev)
2096 {
2097         struct asic_fixed_properties *prop = &hdev->asic_prop;
2098         struct hl_vm *vm = &hdev->vm;
2099         int rc;
2100
2101         if (is_power_of_2(prop->dram_page_size))
2102                 vm->dram_pg_pool =
2103                         gen_pool_create(__ffs(prop->dram_page_size), -1);
2104         else
2105                 vm->dram_pg_pool =
2106                         gen_pool_create(__ffs(DRAM_POOL_PAGE_SIZE), -1);
2107
2108         if (!vm->dram_pg_pool) {
2109                 dev_err(hdev->dev, "Failed to create dram page pool\n");
2110                 return -ENOMEM;
2111         }
2112
2113         kref_init(&vm->dram_pg_pool_refcount);
2114
2115         rc = gen_pool_add(vm->dram_pg_pool, prop->dram_user_base_address,
2116                         prop->dram_end_address - prop->dram_user_base_address,
2117                         -1);
2118
2119         if (rc) {
2120                 dev_err(hdev->dev,
2121                         "Failed to add memory to dram page pool %d\n", rc);
2122                 goto pool_add_err;
2123         }
2124
2125         spin_lock_init(&vm->idr_lock);
2126         idr_init(&vm->phys_pg_pack_handles);
2127
2128         atomic64_set(&hdev->dram_used_mem, 0);
2129
2130         vm->init_done = true;
2131
2132         return 0;
2133
2134 pool_add_err:
2135         gen_pool_destroy(vm->dram_pg_pool);
2136
2137         return rc;
2138 }
2139
2140 /**
2141  * hl_vm_fini() - virtual memory module teardown.
2142  * @hdev: pointer to the habanalabs device structure.
2143  *
2144  * This function perform teardown to the following:
2145  * - Idr for device memory allocation handles.
2146  * - DRAM physical pages pool of 2MB.
2147  * - MMU module.
2148  */
2149 void hl_vm_fini(struct hl_device *hdev)
2150 {
2151         struct hl_vm *vm = &hdev->vm;
2152
2153         if (!vm->init_done)
2154                 return;
2155
2156         /*
2157          * At this point all the contexts should be freed and hence no DRAM
2158          * memory should be in use. Hence the DRAM pool should be freed here.
2159          */
2160         if (kref_put(&vm->dram_pg_pool_refcount, dram_pg_pool_do_release) != 1)
2161                 dev_warn(hdev->dev, "dram_pg_pool was not destroyed on %s\n",
2162                                 __func__);
2163
2164         vm->init_done = false;
2165 }
2166
2167 /**
2168  * hl_hw_block_mem_init() - HW block memory initialization.
2169  * @ctx: pointer to the habanalabs context structure.
2170  *
2171  * This function initializes the HW block virtual mapped addresses list and
2172  * it's lock.
2173  */
2174 void hl_hw_block_mem_init(struct hl_ctx *ctx)
2175 {
2176         mutex_init(&ctx->hw_block_list_lock);
2177         INIT_LIST_HEAD(&ctx->hw_block_mem_list);
2178 }
2179
2180 /**
2181  * hl_hw_block_mem_fini() - HW block memory teardown.
2182  * @ctx: pointer to the habanalabs context structure.
2183  *
2184  * This function clears the HW block virtual mapped addresses list and destroys
2185  * it's lock.
2186  */
2187 void hl_hw_block_mem_fini(struct hl_ctx *ctx)
2188 {
2189         struct hl_vm_hw_block_list_node *lnode, *tmp;
2190
2191         if (!list_empty(&ctx->hw_block_mem_list))
2192                 dev_crit(ctx->hdev->dev, "HW block mem list isn't empty\n");
2193
2194         list_for_each_entry_safe(lnode, tmp, &ctx->hw_block_mem_list, node) {
2195                 list_del(&lnode->node);
2196                 kfree(lnode);
2197         }
2198
2199         mutex_destroy(&ctx->hw_block_list_lock);
2200 }