habanalabs: Move repeatedly included headers to habanalabs.h
[linux-2.6-microblaze.git] / drivers / misc / habanalabs / common / memory.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4  * Copyright 2016-2019 HabanaLabs, Ltd.
5  * All Rights Reserved.
6  */
7
8 #include <uapi/misc/habanalabs.h>
9 #include "habanalabs.h"
10 #include "../include/hw_ip/mmu/mmu_general.h"
11
12 #include <linux/uaccess.h>
13 #include <linux/slab.h>
14
15 #define HL_MMU_DEBUG    0
16
17 /*
18  * The va ranges in context object contain a list with the available chunks of
19  * device virtual memory.
20  * There is one range for host allocations and one for DRAM allocations.
21  *
22  * On initialization each range contains one chunk of all of its available
23  * virtual range which is a half of the total device virtual range.
24  *
25  * On each mapping of physical pages, a suitable virtual range chunk (with a
26  * minimum size) is selected from the list. If the chunk size equals the
27  * requested size, the chunk is returned. Otherwise, the chunk is split into
28  * two chunks - one to return as result and a remainder to stay in the list.
29  *
30  * On each Unmapping of a virtual address, the relevant virtual chunk is
31  * returned to the list. The chunk is added to the list and if its edges match
32  * the edges of the adjacent chunks (means a contiguous chunk can be created),
33  * the chunks are merged.
34  *
35  * On finish, the list is checked to have only one chunk of all the relevant
36  * virtual range (which is a half of the device total virtual range).
37  * If not (means not all mappings were unmapped), a warning is printed.
38  */
39
40 /*
41  * alloc_device_memory - allocate device memory
42  *
43  * @ctx                 : current context
44  * @args                : host parameters containing the requested size
45  * @ret_handle          : result handle
46  *
47  * This function does the following:
48  * - Allocate the requested size rounded up to 'dram_page_size' pages
49  * - Return unique handle
50  */
51 static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
52                                 u32 *ret_handle)
53 {
54         struct hl_device *hdev = ctx->hdev;
55         struct hl_vm *vm = &hdev->vm;
56         struct hl_vm_phys_pg_pack *phys_pg_pack;
57         u64 paddr = 0, total_size, num_pgs, i;
58         u32 num_curr_pgs, page_size, page_shift;
59         int handle, rc;
60         bool contiguous;
61
62         num_curr_pgs = 0;
63         page_size = hdev->asic_prop.dram_page_size;
64         page_shift = __ffs(page_size);
65         num_pgs = (args->alloc.mem_size + (page_size - 1)) >> page_shift;
66         total_size = num_pgs << page_shift;
67
68         if (!total_size) {
69                 dev_err(hdev->dev, "Cannot allocate 0 bytes\n");
70                 return -EINVAL;
71         }
72
73         contiguous = args->flags & HL_MEM_CONTIGUOUS;
74
75         if (contiguous) {
76                 paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size);
77                 if (!paddr) {
78                         dev_err(hdev->dev,
79                                 "failed to allocate %llu contiguous pages with total size of %llu\n",
80                                 num_pgs, total_size);
81                         return -ENOMEM;
82                 }
83
84                 if (hdev->memory_scrub) {
85                         rc = hdev->asic_funcs->scrub_device_mem(hdev, paddr,
86                                         total_size);
87                         if (rc) {
88                                 dev_err(hdev->dev,
89                                         "Failed to scrub contiguous device memory\n");
90                                 goto pages_pack_err;
91                         }
92                 }
93         }
94
95         phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
96         if (!phys_pg_pack) {
97                 rc = -ENOMEM;
98                 goto pages_pack_err;
99         }
100
101         phys_pg_pack->vm_type = VM_TYPE_PHYS_PACK;
102         phys_pg_pack->asid = ctx->asid;
103         phys_pg_pack->npages = num_pgs;
104         phys_pg_pack->page_size = page_size;
105         phys_pg_pack->total_size = total_size;
106         phys_pg_pack->flags = args->flags;
107         phys_pg_pack->contiguous = contiguous;
108
109         phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL);
110         if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) {
111                 rc = -ENOMEM;
112                 goto pages_arr_err;
113         }
114
115         if (phys_pg_pack->contiguous) {
116                 for (i = 0 ; i < num_pgs ; i++)
117                         phys_pg_pack->pages[i] = paddr + i * page_size;
118         } else {
119                 for (i = 0 ; i < num_pgs ; i++) {
120                         phys_pg_pack->pages[i] = (u64) gen_pool_alloc(
121                                                         vm->dram_pg_pool,
122                                                         page_size);
123                         if (!phys_pg_pack->pages[i]) {
124                                 dev_err(hdev->dev,
125                                         "Failed to allocate device memory (out of memory)\n");
126                                 rc = -ENOMEM;
127                                 goto page_err;
128                         }
129
130                         if (hdev->memory_scrub) {
131                                 rc = hdev->asic_funcs->scrub_device_mem(hdev,
132                                                 phys_pg_pack->pages[i],
133                                                 page_size);
134                                 if (rc) {
135                                         dev_err(hdev->dev,
136                                                 "Failed to scrub device memory\n");
137                                         goto page_err;
138                                 }
139                         }
140
141                         num_curr_pgs++;
142                 }
143         }
144
145         spin_lock(&vm->idr_lock);
146         handle = idr_alloc(&vm->phys_pg_pack_handles, phys_pg_pack, 1, 0,
147                                 GFP_ATOMIC);
148         spin_unlock(&vm->idr_lock);
149
150         if (handle < 0) {
151                 dev_err(hdev->dev, "Failed to get handle for page\n");
152                 rc = -EFAULT;
153                 goto idr_err;
154         }
155
156         for (i = 0 ; i < num_pgs ; i++)
157                 kref_get(&vm->dram_pg_pool_refcount);
158
159         phys_pg_pack->handle = handle;
160
161         atomic64_add(phys_pg_pack->total_size, &ctx->dram_phys_mem);
162         atomic64_add(phys_pg_pack->total_size, &hdev->dram_used_mem);
163
164         *ret_handle = handle;
165
166         return 0;
167
168 idr_err:
169 page_err:
170         if (!phys_pg_pack->contiguous)
171                 for (i = 0 ; i < num_curr_pgs ; i++)
172                         gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i],
173                                         page_size);
174
175         kvfree(phys_pg_pack->pages);
176 pages_arr_err:
177         kfree(phys_pg_pack);
178 pages_pack_err:
179         if (contiguous)
180                 gen_pool_free(vm->dram_pg_pool, paddr, total_size);
181
182         return rc;
183 }
184
185 /*
186  * dma_map_host_va - DMA mapping of the given host virtual address.
187  * @hdev: habanalabs device structure
188  * @addr: the host virtual address of the memory area
189  * @size: the size of the memory area
190  * @p_userptr: pointer to result userptr structure
191  *
192  * This function does the following:
193  * - Allocate userptr structure
194  * - Pin the given host memory using the userptr structure
195  * - Perform DMA mapping to have the DMA addresses of the pages
196  */
197 static int dma_map_host_va(struct hl_device *hdev, u64 addr, u64 size,
198                                 struct hl_userptr **p_userptr)
199 {
200         struct hl_userptr *userptr;
201         int rc;
202
203         userptr = kzalloc(sizeof(*userptr), GFP_KERNEL);
204         if (!userptr) {
205                 rc = -ENOMEM;
206                 goto userptr_err;
207         }
208
209         rc = hl_pin_host_memory(hdev, addr, size, userptr);
210         if (rc) {
211                 dev_err(hdev->dev, "Failed to pin host memory\n");
212                 goto pin_err;
213         }
214
215         rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
216                                         userptr->sgt->nents, DMA_BIDIRECTIONAL);
217         if (rc) {
218                 dev_err(hdev->dev, "failed to map sgt with DMA region\n");
219                 goto dma_map_err;
220         }
221
222         userptr->dma_mapped = true;
223         userptr->dir = DMA_BIDIRECTIONAL;
224         userptr->vm_type = VM_TYPE_USERPTR;
225
226         *p_userptr = userptr;
227
228         return 0;
229
230 dma_map_err:
231         hl_unpin_host_memory(hdev, userptr);
232 pin_err:
233         kfree(userptr);
234 userptr_err:
235
236         return rc;
237 }
238
239 /*
240  * dma_unmap_host_va - DMA unmapping of the given host virtual address.
241  * @hdev: habanalabs device structure
242  * @userptr: userptr to free
243  *
244  * This function does the following:
245  * - Unpins the physical pages
246  * - Frees the userptr structure
247  */
248 static void dma_unmap_host_va(struct hl_device *hdev,
249                                 struct hl_userptr *userptr)
250 {
251         hl_unpin_host_memory(hdev, userptr);
252         kfree(userptr);
253 }
254
255 /*
256  * dram_pg_pool_do_release - free DRAM pages pool
257  *
258  * @ref                 : pointer to reference object
259  *
260  * This function does the following:
261  * - Frees the idr structure of physical pages handles
262  * - Frees the generic pool of DRAM physical pages
263  */
264 static void dram_pg_pool_do_release(struct kref *ref)
265 {
266         struct hl_vm *vm = container_of(ref, struct hl_vm,
267                         dram_pg_pool_refcount);
268
269         /*
270          * free the idr here as only here we know for sure that there are no
271          * allocated physical pages and hence there are no handles in use
272          */
273         idr_destroy(&vm->phys_pg_pack_handles);
274         gen_pool_destroy(vm->dram_pg_pool);
275 }
276
277 /*
278  * free_phys_pg_pack - free physical page pack
279  * @hdev: habanalabs device structure
280  * @phys_pg_pack: physical page pack to free
281  *
282  * This function does the following:
283  * - For DRAM memory only, iterate over the pack and free each physical block
284  *   structure by returning it to the general pool
285  * - Free the hl_vm_phys_pg_pack structure
286  */
287 static void free_phys_pg_pack(struct hl_device *hdev,
288                                 struct hl_vm_phys_pg_pack *phys_pg_pack)
289 {
290         struct hl_vm *vm = &hdev->vm;
291         u64 i;
292
293         if (!phys_pg_pack->created_from_userptr) {
294                 if (phys_pg_pack->contiguous) {
295                         gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[0],
296                                         phys_pg_pack->total_size);
297
298                         for (i = 0; i < phys_pg_pack->npages ; i++)
299                                 kref_put(&vm->dram_pg_pool_refcount,
300                                         dram_pg_pool_do_release);
301                 } else {
302                         for (i = 0 ; i < phys_pg_pack->npages ; i++) {
303                                 gen_pool_free(vm->dram_pg_pool,
304                                                 phys_pg_pack->pages[i],
305                                                 phys_pg_pack->page_size);
306                                 kref_put(&vm->dram_pg_pool_refcount,
307                                         dram_pg_pool_do_release);
308                         }
309                 }
310         }
311
312         kvfree(phys_pg_pack->pages);
313         kfree(phys_pg_pack);
314 }
315
316 /*
317  * free_device_memory - free device memory
318  *
319  * @ctx                  : current context
320  * @handle              : handle of the memory chunk to free
321  *
322  * This function does the following:
323  * - Free the device memory related to the given handle
324  */
325 static int free_device_memory(struct hl_ctx *ctx, u32 handle)
326 {
327         struct hl_device *hdev = ctx->hdev;
328         struct hl_vm *vm = &hdev->vm;
329         struct hl_vm_phys_pg_pack *phys_pg_pack;
330
331         spin_lock(&vm->idr_lock);
332         phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
333         if (phys_pg_pack) {
334                 if (atomic_read(&phys_pg_pack->mapping_cnt) > 0) {
335                         dev_err(hdev->dev, "handle %u is mapped, cannot free\n",
336                                 handle);
337                         spin_unlock(&vm->idr_lock);
338                         return -EINVAL;
339                 }
340
341                 /*
342                  * must remove from idr before the freeing of the physical
343                  * pages as the refcount of the pool is also the trigger of the
344                  * idr destroy
345                  */
346                 idr_remove(&vm->phys_pg_pack_handles, handle);
347                 spin_unlock(&vm->idr_lock);
348
349                 atomic64_sub(phys_pg_pack->total_size, &ctx->dram_phys_mem);
350                 atomic64_sub(phys_pg_pack->total_size, &hdev->dram_used_mem);
351
352                 free_phys_pg_pack(hdev, phys_pg_pack);
353         } else {
354                 spin_unlock(&vm->idr_lock);
355                 dev_err(hdev->dev,
356                         "free device memory failed, no match for handle %u\n",
357                         handle);
358                 return -EINVAL;
359         }
360
361         return 0;
362 }
363
364 /*
365  * clear_va_list_locked - free virtual addresses list
366  *
367  * @hdev                : habanalabs device structure
368  * @va_list             : list of virtual addresses to free
369  *
370  * This function does the following:
371  * - Iterate over the list and free each virtual addresses block
372  *
373  * This function should be called only when va_list lock is taken
374  */
375 static void clear_va_list_locked(struct hl_device *hdev,
376                 struct list_head *va_list)
377 {
378         struct hl_vm_va_block *va_block, *tmp;
379
380         list_for_each_entry_safe(va_block, tmp, va_list, node) {
381                 list_del(&va_block->node);
382                 kfree(va_block);
383         }
384 }
385
386 /*
387  * print_va_list_locked    - print virtual addresses list
388  *
389  * @hdev                : habanalabs device structure
390  * @va_list             : list of virtual addresses to print
391  *
392  * This function does the following:
393  * - Iterate over the list and print each virtual addresses block
394  *
395  * This function should be called only when va_list lock is taken
396  */
397 static void print_va_list_locked(struct hl_device *hdev,
398                 struct list_head *va_list)
399 {
400 #if HL_MMU_DEBUG
401         struct hl_vm_va_block *va_block;
402
403         dev_dbg(hdev->dev, "print va list:\n");
404
405         list_for_each_entry(va_block, va_list, node)
406                 dev_dbg(hdev->dev,
407                         "va block, start: 0x%llx, end: 0x%llx, size: %llu\n",
408                         va_block->start, va_block->end, va_block->size);
409 #endif
410 }
411
412 /*
413  * merge_va_blocks_locked - merge a virtual block if possible
414  *
415  * @hdev                : pointer to the habanalabs device structure
416  * @va_list             : pointer to the virtual addresses block list
417  * @va_block            : virtual block to merge with adjacent blocks
418  *
419  * This function does the following:
420  * - Merge the given blocks with the adjacent blocks if their virtual ranges
421  *   create a contiguous virtual range
422  *
423  * This Function should be called only when va_list lock is taken
424  */
425 static void merge_va_blocks_locked(struct hl_device *hdev,
426                 struct list_head *va_list, struct hl_vm_va_block *va_block)
427 {
428         struct hl_vm_va_block *prev, *next;
429
430         prev = list_prev_entry(va_block, node);
431         if (&prev->node != va_list && prev->end + 1 == va_block->start) {
432                 prev->end = va_block->end;
433                 prev->size = prev->end - prev->start;
434                 list_del(&va_block->node);
435                 kfree(va_block);
436                 va_block = prev;
437         }
438
439         next = list_next_entry(va_block, node);
440         if (&next->node != va_list && va_block->end + 1 == next->start) {
441                 next->start = va_block->start;
442                 next->size = next->end - next->start;
443                 list_del(&va_block->node);
444                 kfree(va_block);
445         }
446 }
447
448 /*
449  * add_va_block_locked - add a virtual block to the virtual addresses list
450  *
451  * @hdev                : pointer to the habanalabs device structure
452  * @va_list             : pointer to the virtual addresses block list
453  * @start               : start virtual address
454  * @end                 : end virtual address
455  *
456  * This function does the following:
457  * - Add the given block to the virtual blocks list and merge with other
458  * blocks if a contiguous virtual block can be created
459  *
460  * This Function should be called only when va_list lock is taken
461  */
462 static int add_va_block_locked(struct hl_device *hdev,
463                 struct list_head *va_list, u64 start, u64 end)
464 {
465         struct hl_vm_va_block *va_block, *res = NULL;
466         u64 size = end - start;
467
468         print_va_list_locked(hdev, va_list);
469
470         list_for_each_entry(va_block, va_list, node) {
471                 /* TODO: remove upon matureness */
472                 if (hl_mem_area_crosses_range(start, size, va_block->start,
473                                 va_block->end)) {
474                         dev_err(hdev->dev,
475                                 "block crossing ranges at start 0x%llx, end 0x%llx\n",
476                                 va_block->start, va_block->end);
477                         return -EINVAL;
478                 }
479
480                 if (va_block->end < start)
481                         res = va_block;
482         }
483
484         va_block = kmalloc(sizeof(*va_block), GFP_KERNEL);
485         if (!va_block)
486                 return -ENOMEM;
487
488         va_block->start = start;
489         va_block->end = end;
490         va_block->size = size;
491
492         if (!res)
493                 list_add(&va_block->node, va_list);
494         else
495                 list_add(&va_block->node, &res->node);
496
497         merge_va_blocks_locked(hdev, va_list, va_block);
498
499         print_va_list_locked(hdev, va_list);
500
501         return 0;
502 }
503
504 /*
505  * add_va_block - wrapper for add_va_block_locked
506  *
507  * @hdev                : pointer to the habanalabs device structure
508  * @va_list             : pointer to the virtual addresses block list
509  * @start               : start virtual address
510  * @end                 : end virtual address
511  *
512  * This function does the following:
513  * - Takes the list lock and calls add_va_block_locked
514  */
515 static inline int add_va_block(struct hl_device *hdev,
516                 struct hl_va_range *va_range, u64 start, u64 end)
517 {
518         int rc;
519
520         mutex_lock(&va_range->lock);
521         rc = add_va_block_locked(hdev, &va_range->list, start, end);
522         mutex_unlock(&va_range->lock);
523
524         return rc;
525 }
526
527 /*
528  * get_va_block() - get a virtual block for the given size and alignment.
529  * @hdev: pointer to the habanalabs device structure.
530  * @va_range: pointer to the virtual addresses range.
531  * @size: requested block size.
532  * @hint_addr: hint for requested address by the user.
533  * @va_block_align: required alignment of the virtual block start address.
534  *
535  * This function does the following:
536  * - Iterate on the virtual block list to find a suitable virtual block for the
537  *   given size and alignment.
538  * - Reserve the requested block and update the list.
539  * - Return the start address of the virtual block.
540  */
541 static u64 get_va_block(struct hl_device *hdev, struct hl_va_range *va_range,
542                         u64 size, u64 hint_addr, u32 va_block_align)
543 {
544         struct hl_vm_va_block *va_block, *new_va_block = NULL;
545         u64 valid_start, valid_size, prev_start, prev_end, align_mask,
546                 res_valid_start = 0, res_valid_size = 0;
547         bool add_prev = false;
548
549         align_mask = ~((u64)va_block_align - 1);
550
551         /* check if hint_addr is aligned */
552         if (hint_addr & (va_block_align - 1))
553                 hint_addr = 0;
554
555         mutex_lock(&va_range->lock);
556
557         print_va_list_locked(hdev, &va_range->list);
558
559         list_for_each_entry(va_block, &va_range->list, node) {
560                 /* calc the first possible aligned addr */
561                 valid_start = va_block->start;
562
563                 if (valid_start & (va_block_align - 1)) {
564                         valid_start &= align_mask;
565                         valid_start += va_block_align;
566                         if (valid_start > va_block->end)
567                                 continue;
568                 }
569
570                 valid_size = va_block->end - valid_start;
571
572                 if (valid_size >= size &&
573                         (!new_va_block || valid_size < res_valid_size)) {
574                         new_va_block = va_block;
575                         res_valid_start = valid_start;
576                         res_valid_size = valid_size;
577                 }
578
579                 if (hint_addr && hint_addr >= valid_start &&
580                                 ((hint_addr + size) <= va_block->end)) {
581                         new_va_block = va_block;
582                         res_valid_start = hint_addr;
583                         res_valid_size = valid_size;
584                         break;
585                 }
586         }
587
588         if (!new_va_block) {
589                 dev_err(hdev->dev, "no available va block for size %llu\n",
590                                 size);
591                 goto out;
592         }
593
594         if (res_valid_start > new_va_block->start) {
595                 prev_start = new_va_block->start;
596                 prev_end = res_valid_start - 1;
597
598                 new_va_block->start = res_valid_start;
599                 new_va_block->size = res_valid_size;
600
601                 add_prev = true;
602         }
603
604         if (new_va_block->size > size) {
605                 new_va_block->start += size;
606                 new_va_block->size = new_va_block->end - new_va_block->start;
607         } else {
608                 list_del(&new_va_block->node);
609                 kfree(new_va_block);
610         }
611
612         if (add_prev)
613                 add_va_block_locked(hdev, &va_range->list, prev_start,
614                                 prev_end);
615
616         print_va_list_locked(hdev, &va_range->list);
617 out:
618         mutex_unlock(&va_range->lock);
619
620         return res_valid_start;
621 }
622
623 /*
624  * get_sg_info - get number of pages and the DMA address from SG list
625  *
626  * @sg                 : the SG list
627  * @dma_addr           : pointer to DMA address to return
628  *
629  * Calculate the number of consecutive pages described by the SG list. Take the
630  * offset of the address in the first page, add to it the length and round it up
631  * to the number of needed pages.
632  */
633 static u32 get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr)
634 {
635         *dma_addr = sg_dma_address(sg);
636
637         return ((((*dma_addr) & (PAGE_SIZE - 1)) + sg_dma_len(sg)) +
638                         (PAGE_SIZE - 1)) >> PAGE_SHIFT;
639 }
640
641 /*
642  * init_phys_pg_pack_from_userptr - initialize physical page pack from host
643  *                                  memory
644  * @ctx: current context
645  * @userptr: userptr to initialize from
646  * @pphys_pg_pack: result pointer
647  *
648  * This function does the following:
649  * - Pin the physical pages related to the given virtual block
650  * - Create a physical page pack from the physical pages related to the given
651  *   virtual block
652  */
653 static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
654                                 struct hl_userptr *userptr,
655                                 struct hl_vm_phys_pg_pack **pphys_pg_pack)
656 {
657         struct hl_vm_phys_pg_pack *phys_pg_pack;
658         struct scatterlist *sg;
659         dma_addr_t dma_addr;
660         u64 page_mask, total_npages;
661         u32 npages, page_size = PAGE_SIZE,
662                 huge_page_size = ctx->hdev->asic_prop.pmmu_huge.page_size;
663         bool first = true, is_huge_page_opt = true;
664         int rc, i, j;
665         u32 pgs_in_huge_page = huge_page_size >> __ffs(page_size);
666
667         phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
668         if (!phys_pg_pack)
669                 return -ENOMEM;
670
671         phys_pg_pack->vm_type = userptr->vm_type;
672         phys_pg_pack->created_from_userptr = true;
673         phys_pg_pack->asid = ctx->asid;
674         atomic_set(&phys_pg_pack->mapping_cnt, 1);
675
676         /* Only if all dma_addrs are aligned to 2MB and their
677          * sizes is at least 2MB, we can use huge page mapping.
678          * We limit the 2MB optimization to this condition,
679          * since later on we acquire the related VA range as one
680          * consecutive block.
681          */
682         total_npages = 0;
683         for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
684                 npages = get_sg_info(sg, &dma_addr);
685
686                 total_npages += npages;
687
688                 if ((npages % pgs_in_huge_page) ||
689                                         (dma_addr & (huge_page_size - 1)))
690                         is_huge_page_opt = false;
691         }
692
693         if (is_huge_page_opt) {
694                 page_size = huge_page_size;
695                 do_div(total_npages, pgs_in_huge_page);
696         }
697
698         page_mask = ~(((u64) page_size) - 1);
699
700         phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64),
701                                                 GFP_KERNEL);
702         if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) {
703                 rc = -ENOMEM;
704                 goto page_pack_arr_mem_err;
705         }
706
707         phys_pg_pack->npages = total_npages;
708         phys_pg_pack->page_size = page_size;
709         phys_pg_pack->total_size = total_npages * page_size;
710
711         j = 0;
712         for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
713                 npages = get_sg_info(sg, &dma_addr);
714
715                 /* align down to physical page size and save the offset */
716                 if (first) {
717                         first = false;
718                         phys_pg_pack->offset = dma_addr & (page_size - 1);
719                         dma_addr &= page_mask;
720                 }
721
722                 while (npages) {
723                         phys_pg_pack->pages[j++] = dma_addr;
724                         dma_addr += page_size;
725
726                         if (is_huge_page_opt)
727                                 npages -= pgs_in_huge_page;
728                         else
729                                 npages--;
730                 }
731         }
732
733         *pphys_pg_pack = phys_pg_pack;
734
735         return 0;
736
737 page_pack_arr_mem_err:
738         kfree(phys_pg_pack);
739
740         return rc;
741 }
742
743 /*
744  * map_phys_pg_pack - maps the physical page pack.
745  * @ctx: current context
746  * @vaddr: start address of the virtual area to map from
747  * @phys_pg_pack: the pack of physical pages to map to
748  *
749  * This function does the following:
750  * - Maps each chunk of virtual memory to matching physical chunk
751  * - Stores number of successful mappings in the given argument
752  * - Returns 0 on success, error code otherwise
753  */
754 static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
755                                 struct hl_vm_phys_pg_pack *phys_pg_pack)
756 {
757         struct hl_device *hdev = ctx->hdev;
758         u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i;
759         u32 page_size = phys_pg_pack->page_size;
760         int rc = 0;
761
762         for (i = 0 ; i < phys_pg_pack->npages ; i++) {
763                 paddr = phys_pg_pack->pages[i];
764
765                 rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size,
766                                 (i + 1) == phys_pg_pack->npages);
767                 if (rc) {
768                         dev_err(hdev->dev,
769                                 "map failed for handle %u, npages: %llu, mapped: %llu",
770                                 phys_pg_pack->handle, phys_pg_pack->npages,
771                                 mapped_pg_cnt);
772                         goto err;
773                 }
774
775                 mapped_pg_cnt++;
776                 next_vaddr += page_size;
777         }
778
779         return 0;
780
781 err:
782         next_vaddr = vaddr;
783         for (i = 0 ; i < mapped_pg_cnt ; i++) {
784                 if (hl_mmu_unmap(ctx, next_vaddr, page_size,
785                                         (i + 1) == mapped_pg_cnt))
786                         dev_warn_ratelimited(hdev->dev,
787                                 "failed to unmap handle %u, va: 0x%llx, pa: 0x%llx, page size: %u\n",
788                                         phys_pg_pack->handle, next_vaddr,
789                                         phys_pg_pack->pages[i], page_size);
790
791                 next_vaddr += page_size;
792         }
793
794         return rc;
795 }
796
797 /*
798  * unmap_phys_pg_pack - unmaps the physical page pack
799  * @ctx: current context
800  * @vaddr: start address of the virtual area to unmap
801  * @phys_pg_pack: the pack of physical pages to unmap
802  */
803 static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
804                                 struct hl_vm_phys_pg_pack *phys_pg_pack)
805 {
806         struct hl_device *hdev = ctx->hdev;
807         u64 next_vaddr, i;
808         u32 page_size;
809
810         page_size = phys_pg_pack->page_size;
811         next_vaddr = vaddr;
812
813         for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) {
814                 if (hl_mmu_unmap(ctx, next_vaddr, page_size,
815                                        (i + 1) == phys_pg_pack->npages))
816                         dev_warn_ratelimited(hdev->dev,
817                         "unmap failed for vaddr: 0x%llx\n", next_vaddr);
818
819                 /*
820                  * unmapping on Palladium can be really long, so avoid a CPU
821                  * soft lockup bug by sleeping a little between unmapping pages
822                  */
823                 if (hdev->pldm)
824                         usleep_range(500, 1000);
825         }
826 }
827
828 static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args,
829                                 u64 *paddr)
830 {
831         struct hl_device *hdev = ctx->hdev;
832         struct hl_vm *vm = &hdev->vm;
833         struct hl_vm_phys_pg_pack *phys_pg_pack;
834         u32 handle;
835
836         handle = lower_32_bits(args->map_device.handle);
837         spin_lock(&vm->idr_lock);
838         phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
839         if (!phys_pg_pack) {
840                 spin_unlock(&vm->idr_lock);
841                 dev_err(hdev->dev, "no match for handle %u\n", handle);
842                 return -EINVAL;
843         }
844
845         *paddr = phys_pg_pack->pages[0];
846
847         spin_unlock(&vm->idr_lock);
848
849         return 0;
850 }
851
852 /*
853  * map_device_va - map the given memory
854  *
855  * @ctx          : current context
856  * @args         : host parameters with handle/host virtual address
857  * @device_addr  : pointer to result device virtual address
858  *
859  * This function does the following:
860  * - If given a physical device memory handle, map to a device virtual block
861  *   and return the start address of this block
862  * - If given a host virtual address and size, find the related physical pages,
863  *   map a device virtual block to this pages and return the start address of
864  *   this block
865  */
866 static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
867                 u64 *device_addr)
868 {
869         struct hl_device *hdev = ctx->hdev;
870         struct hl_vm *vm = &hdev->vm;
871         struct hl_vm_phys_pg_pack *phys_pg_pack;
872         struct hl_userptr *userptr = NULL;
873         struct hl_vm_hash_node *hnode;
874         struct hl_va_range *va_range;
875         enum vm_type_t *vm_type;
876         u64 ret_vaddr, hint_addr;
877         u32 handle = 0, va_block_align;
878         int rc;
879         bool is_userptr = args->flags & HL_MEM_USERPTR;
880
881         /* Assume failure */
882         *device_addr = 0;
883
884         if (is_userptr) {
885                 u64 addr = args->map_host.host_virt_addr,
886                         size = args->map_host.mem_size;
887                 u32 page_size = hdev->asic_prop.pmmu.page_size,
888                         huge_page_size = hdev->asic_prop.pmmu_huge.page_size;
889
890                 rc = dma_map_host_va(hdev, addr, size, &userptr);
891                 if (rc) {
892                         dev_err(hdev->dev, "failed to get userptr from va\n");
893                         return rc;
894                 }
895
896                 rc = init_phys_pg_pack_from_userptr(ctx, userptr,
897                                 &phys_pg_pack);
898                 if (rc) {
899                         dev_err(hdev->dev,
900                                 "unable to init page pack for vaddr 0x%llx\n",
901                                 addr);
902                         goto init_page_pack_err;
903                 }
904
905                 vm_type = (enum vm_type_t *) userptr;
906                 hint_addr = args->map_host.hint_addr;
907                 handle = phys_pg_pack->handle;
908
909                 /* get required alignment */
910                 if (phys_pg_pack->page_size == page_size) {
911                         va_range = ctx->host_va_range;
912
913                         /*
914                          * huge page alignment may be needed in case of regular
915                          * page mapping, depending on the host VA alignment
916                          */
917                         if (addr & (huge_page_size - 1))
918                                 va_block_align = page_size;
919                         else
920                                 va_block_align = huge_page_size;
921                 } else {
922                         /*
923                          * huge page alignment is needed in case of huge page
924                          * mapping
925                          */
926                         va_range = ctx->host_huge_va_range;
927                         va_block_align = huge_page_size;
928                 }
929         } else {
930                 handle = lower_32_bits(args->map_device.handle);
931
932                 spin_lock(&vm->idr_lock);
933                 phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
934                 if (!phys_pg_pack) {
935                         spin_unlock(&vm->idr_lock);
936                         dev_err(hdev->dev,
937                                 "no match for handle %u\n", handle);
938                         return -EINVAL;
939                 }
940
941                 /* increment now to avoid freeing device memory while mapping */
942                 atomic_inc(&phys_pg_pack->mapping_cnt);
943
944                 spin_unlock(&vm->idr_lock);
945
946                 vm_type = (enum vm_type_t *) phys_pg_pack;
947
948                 hint_addr = args->map_device.hint_addr;
949
950                 /* DRAM VA alignment is the same as the DRAM page size */
951                 va_range = ctx->dram_va_range;
952                 va_block_align = hdev->asic_prop.dmmu.page_size;
953         }
954
955         /*
956          * relevant for mapping device physical memory only, as host memory is
957          * implicitly shared
958          */
959         if (!is_userptr && !(phys_pg_pack->flags & HL_MEM_SHARED) &&
960                         phys_pg_pack->asid != ctx->asid) {
961                 dev_err(hdev->dev,
962                         "Failed to map memory, handle %u is not shared\n",
963                         handle);
964                 rc = -EPERM;
965                 goto shared_err;
966         }
967
968         hnode = kzalloc(sizeof(*hnode), GFP_KERNEL);
969         if (!hnode) {
970                 rc = -ENOMEM;
971                 goto hnode_err;
972         }
973
974         ret_vaddr = get_va_block(hdev, va_range, phys_pg_pack->total_size,
975                                         hint_addr, va_block_align);
976         if (!ret_vaddr) {
977                 dev_err(hdev->dev, "no available va block for handle %u\n",
978                                 handle);
979                 rc = -ENOMEM;
980                 goto va_block_err;
981         }
982
983         mutex_lock(&ctx->mmu_lock);
984
985         rc = map_phys_pg_pack(ctx, ret_vaddr, phys_pg_pack);
986         if (rc) {
987                 mutex_unlock(&ctx->mmu_lock);
988                 dev_err(hdev->dev, "mapping page pack failed for handle %u\n",
989                                 handle);
990                 goto map_err;
991         }
992
993         rc = hdev->asic_funcs->mmu_invalidate_cache(hdev, false, *vm_type);
994
995         mutex_unlock(&ctx->mmu_lock);
996
997         if (rc) {
998                 dev_err(hdev->dev,
999                         "mapping handle %u failed due to MMU cache invalidation\n",
1000                         handle);
1001                 goto map_err;
1002         }
1003
1004         ret_vaddr += phys_pg_pack->offset;
1005
1006         hnode->ptr = vm_type;
1007         hnode->vaddr = ret_vaddr;
1008
1009         mutex_lock(&ctx->mem_hash_lock);
1010         hash_add(ctx->mem_hash, &hnode->node, ret_vaddr);
1011         mutex_unlock(&ctx->mem_hash_lock);
1012
1013         *device_addr = ret_vaddr;
1014
1015         if (is_userptr)
1016                 free_phys_pg_pack(hdev, phys_pg_pack);
1017
1018         return 0;
1019
1020 map_err:
1021         if (add_va_block(hdev, va_range, ret_vaddr,
1022                                 ret_vaddr + phys_pg_pack->total_size - 1))
1023                 dev_warn(hdev->dev,
1024                         "release va block failed for handle 0x%x, vaddr: 0x%llx\n",
1025                                 handle, ret_vaddr);
1026
1027 va_block_err:
1028         kfree(hnode);
1029 hnode_err:
1030 shared_err:
1031         atomic_dec(&phys_pg_pack->mapping_cnt);
1032         if (is_userptr)
1033                 free_phys_pg_pack(hdev, phys_pg_pack);
1034 init_page_pack_err:
1035         if (is_userptr)
1036                 dma_unmap_host_va(hdev, userptr);
1037
1038         return rc;
1039 }
1040
1041 /*
1042  * unmap_device_va      - unmap the given device virtual address
1043  *
1044  * @ctx                 : current context
1045  * @vaddr               : device virtual address to unmap
1046  * @ctx_free            : true if in context free flow, false otherwise.
1047  *
1048  * This function does the following:
1049  * - Unmap the physical pages related to the given virtual address
1050  * - return the device virtual block to the virtual block list
1051  */
1052 static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr, bool ctx_free)
1053 {
1054         struct hl_device *hdev = ctx->hdev;
1055         struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
1056         struct hl_vm_hash_node *hnode = NULL;
1057         struct hl_userptr *userptr = NULL;
1058         struct hl_va_range *va_range;
1059         enum vm_type_t *vm_type;
1060         bool is_userptr;
1061         int rc = 0;
1062
1063         /* protect from double entrance */
1064         mutex_lock(&ctx->mem_hash_lock);
1065         hash_for_each_possible(ctx->mem_hash, hnode, node, (unsigned long)vaddr)
1066                 if (vaddr == hnode->vaddr)
1067                         break;
1068
1069         if (!hnode) {
1070                 mutex_unlock(&ctx->mem_hash_lock);
1071                 dev_err(hdev->dev,
1072                         "unmap failed, no mem hnode for vaddr 0x%llx\n",
1073                         vaddr);
1074                 return -EINVAL;
1075         }
1076
1077         hash_del(&hnode->node);
1078         mutex_unlock(&ctx->mem_hash_lock);
1079
1080         vm_type = hnode->ptr;
1081
1082         if (*vm_type == VM_TYPE_USERPTR) {
1083                 is_userptr = true;
1084                 userptr = hnode->ptr;
1085                 rc = init_phys_pg_pack_from_userptr(ctx, userptr,
1086                                                         &phys_pg_pack);
1087                 if (rc) {
1088                         dev_err(hdev->dev,
1089                                 "unable to init page pack for vaddr 0x%llx\n",
1090                                 vaddr);
1091                         goto vm_type_err;
1092                 }
1093
1094                 if (phys_pg_pack->page_size ==
1095                                         hdev->asic_prop.pmmu.page_size)
1096                         va_range = ctx->host_va_range;
1097                 else
1098                         va_range = ctx->host_huge_va_range;
1099         } else if (*vm_type == VM_TYPE_PHYS_PACK) {
1100                 is_userptr = false;
1101                 va_range = ctx->dram_va_range;
1102                 phys_pg_pack = hnode->ptr;
1103         } else {
1104                 dev_warn(hdev->dev,
1105                         "unmap failed, unknown vm desc for vaddr 0x%llx\n",
1106                                 vaddr);
1107                 rc = -EFAULT;
1108                 goto vm_type_err;
1109         }
1110
1111         if (atomic_read(&phys_pg_pack->mapping_cnt) == 0) {
1112                 dev_err(hdev->dev, "vaddr 0x%llx is not mapped\n", vaddr);
1113                 rc = -EINVAL;
1114                 goto mapping_cnt_err;
1115         }
1116
1117         vaddr &= ~(((u64) phys_pg_pack->page_size) - 1);
1118
1119         mutex_lock(&ctx->mmu_lock);
1120
1121         unmap_phys_pg_pack(ctx, vaddr, phys_pg_pack);
1122
1123         /*
1124          * During context free this function is called in a loop to clean all
1125          * the context mappings. Hence the cache invalidation can be called once
1126          * at the loop end rather than for each iteration
1127          */
1128         if (!ctx_free)
1129                 rc = hdev->asic_funcs->mmu_invalidate_cache(hdev, true,
1130                                                                 *vm_type);
1131
1132         mutex_unlock(&ctx->mmu_lock);
1133
1134         /*
1135          * If the context is closing we don't need to check for the MMU cache
1136          * invalidation return code and update the VA free list as in this flow
1137          * we invalidate the MMU cache outside of this unmap function and the VA
1138          * free list will be freed anyway.
1139          */
1140         if (!ctx_free) {
1141                 int tmp_rc;
1142
1143                 if (rc)
1144                         dev_err(hdev->dev,
1145                                 "unmapping vaddr 0x%llx failed due to MMU cache invalidation\n",
1146                                 vaddr);
1147
1148                 tmp_rc = add_va_block(hdev, va_range, vaddr,
1149                                         vaddr + phys_pg_pack->total_size - 1);
1150                 if (tmp_rc) {
1151                         dev_warn(hdev->dev,
1152                                         "add va block failed for vaddr: 0x%llx\n",
1153                                         vaddr);
1154                         if (!rc)
1155                                 rc = tmp_rc;
1156                 }
1157         }
1158
1159         atomic_dec(&phys_pg_pack->mapping_cnt);
1160         kfree(hnode);
1161
1162         if (is_userptr) {
1163                 free_phys_pg_pack(hdev, phys_pg_pack);
1164                 dma_unmap_host_va(hdev, userptr);
1165         }
1166
1167         return rc;
1168
1169 mapping_cnt_err:
1170         if (is_userptr)
1171                 free_phys_pg_pack(hdev, phys_pg_pack);
1172 vm_type_err:
1173         mutex_lock(&ctx->mem_hash_lock);
1174         hash_add(ctx->mem_hash, &hnode->node, vaddr);
1175         mutex_unlock(&ctx->mem_hash_lock);
1176
1177         return rc;
1178 }
1179
1180 static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args)
1181 {
1182         struct hl_device *hdev = hpriv->hdev;
1183         struct hl_ctx *ctx = hpriv->ctx;
1184         u64 device_addr = 0;
1185         u32 handle = 0;
1186         int rc;
1187
1188         switch (args->in.op) {
1189         case HL_MEM_OP_ALLOC:
1190                 if (args->in.alloc.mem_size == 0) {
1191                         dev_err(hdev->dev,
1192                                 "alloc size must be larger than 0\n");
1193                         rc = -EINVAL;
1194                         goto out;
1195                 }
1196
1197                 /* Force contiguous as there are no real MMU
1198                  * translations to overcome physical memory gaps
1199                  */
1200                 args->in.flags |= HL_MEM_CONTIGUOUS;
1201                 rc = alloc_device_memory(ctx, &args->in, &handle);
1202
1203                 memset(args, 0, sizeof(*args));
1204                 args->out.handle = (__u64) handle;
1205                 break;
1206
1207         case HL_MEM_OP_FREE:
1208                 rc = free_device_memory(ctx, args->in.free.handle);
1209                 break;
1210
1211         case HL_MEM_OP_MAP:
1212                 if (args->in.flags & HL_MEM_USERPTR) {
1213                         device_addr = args->in.map_host.host_virt_addr;
1214                         rc = 0;
1215                 } else {
1216                         rc = get_paddr_from_handle(ctx, &args->in,
1217                                         &device_addr);
1218                 }
1219
1220                 memset(args, 0, sizeof(*args));
1221                 args->out.device_virt_addr = device_addr;
1222                 break;
1223
1224         case HL_MEM_OP_UNMAP:
1225                 rc = 0;
1226                 break;
1227
1228         default:
1229                 dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
1230                 rc = -ENOTTY;
1231                 break;
1232         }
1233
1234 out:
1235         return rc;
1236 }
1237
1238 int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
1239 {
1240         union hl_mem_args *args = data;
1241         struct hl_device *hdev = hpriv->hdev;
1242         struct hl_ctx *ctx = hpriv->ctx;
1243         u64 device_addr = 0;
1244         u32 handle = 0;
1245         int rc;
1246
1247         if (hl_device_disabled_or_in_reset(hdev)) {
1248                 dev_warn_ratelimited(hdev->dev,
1249                         "Device is %s. Can't execute MEMORY IOCTL\n",
1250                         atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
1251                 return -EBUSY;
1252         }
1253
1254         if (!hdev->mmu_enable)
1255                 return mem_ioctl_no_mmu(hpriv, args);
1256
1257         switch (args->in.op) {
1258         case HL_MEM_OP_ALLOC:
1259                 if (args->in.alloc.mem_size == 0) {
1260                         dev_err(hdev->dev,
1261                                 "alloc size must be larger than 0\n");
1262                         rc = -EINVAL;
1263                         goto out;
1264                 }
1265
1266                 /* If DRAM does not support virtual memory the driver won't
1267                  * handle the allocation/freeing of that memory. However, for
1268                  * system administration/monitoring purposes, the driver will
1269                  * keep track of the amount of DRAM memory that is allocated
1270                  * and freed by the user. Because this code totally relies on
1271                  * the user's input, the driver can't ensure the validity
1272                  * of this accounting.
1273                  */
1274                 if (!hdev->dram_supports_virtual_memory) {
1275                         atomic64_add(args->in.alloc.mem_size,
1276                                         &ctx->dram_phys_mem);
1277                         atomic64_add(args->in.alloc.mem_size,
1278                                         &hdev->dram_used_mem);
1279
1280                         dev_dbg(hdev->dev, "DRAM alloc is not supported\n");
1281                         rc = 0;
1282
1283                         memset(args, 0, sizeof(*args));
1284                         args->out.handle = 0;
1285                         goto out;
1286                 }
1287
1288                 rc = alloc_device_memory(ctx, &args->in, &handle);
1289
1290                 memset(args, 0, sizeof(*args));
1291                 args->out.handle = (__u64) handle;
1292                 break;
1293
1294         case HL_MEM_OP_FREE:
1295                 /* If DRAM does not support virtual memory the driver won't
1296                  * handle the allocation/freeing of that memory. However, for
1297                  * system administration/monitoring purposes, the driver will
1298                  * keep track of the amount of DRAM memory that is allocated
1299                  * and freed by the user. Because this code totally relies on
1300                  * the user's input, the driver can't ensure the validity
1301                  * of this accounting.
1302                  */
1303                 if (!hdev->dram_supports_virtual_memory) {
1304                         atomic64_sub(args->in.alloc.mem_size,
1305                                         &ctx->dram_phys_mem);
1306                         atomic64_sub(args->in.alloc.mem_size,
1307                                         &hdev->dram_used_mem);
1308
1309                         dev_dbg(hdev->dev, "DRAM alloc is not supported\n");
1310                         rc = 0;
1311
1312                         goto out;
1313                 }
1314
1315                 rc = free_device_memory(ctx, args->in.free.handle);
1316                 break;
1317
1318         case HL_MEM_OP_MAP:
1319                 rc = map_device_va(ctx, &args->in, &device_addr);
1320
1321                 memset(args, 0, sizeof(*args));
1322                 args->out.device_virt_addr = device_addr;
1323                 break;
1324
1325         case HL_MEM_OP_UNMAP:
1326                 rc = unmap_device_va(ctx, args->in.unmap.device_virt_addr,
1327                                         false);
1328                 break;
1329
1330         default:
1331                 dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
1332                 rc = -ENOTTY;
1333                 break;
1334         }
1335
1336 out:
1337         return rc;
1338 }
1339
1340 static int get_user_memory(struct hl_device *hdev, u64 addr, u64 size,
1341                                 u32 npages, u64 start, u32 offset,
1342                                 struct hl_userptr *userptr)
1343 {
1344         int rc;
1345
1346         if (!access_ok((void __user *) (uintptr_t) addr, size)) {
1347                 dev_err(hdev->dev, "user pointer is invalid - 0x%llx\n", addr);
1348                 return -EFAULT;
1349         }
1350
1351         userptr->vec = frame_vector_create(npages);
1352         if (!userptr->vec) {
1353                 dev_err(hdev->dev, "Failed to create frame vector\n");
1354                 return -ENOMEM;
1355         }
1356
1357         rc = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
1358                                 userptr->vec);
1359
1360         if (rc != npages) {
1361                 dev_err(hdev->dev,
1362                         "Failed to map host memory, user ptr probably wrong\n");
1363                 if (rc < 0)
1364                         goto destroy_framevec;
1365                 rc = -EFAULT;
1366                 goto put_framevec;
1367         }
1368
1369         if (frame_vector_to_pages(userptr->vec) < 0) {
1370                 dev_err(hdev->dev,
1371                         "Failed to translate frame vector to pages\n");
1372                 rc = -EFAULT;
1373                 goto put_framevec;
1374         }
1375
1376         rc = sg_alloc_table_from_pages(userptr->sgt,
1377                                         frame_vector_pages(userptr->vec),
1378                                         npages, offset, size, GFP_ATOMIC);
1379         if (rc < 0) {
1380                 dev_err(hdev->dev, "failed to create SG table from pages\n");
1381                 goto put_framevec;
1382         }
1383
1384         return 0;
1385
1386 put_framevec:
1387         put_vaddr_frames(userptr->vec);
1388 destroy_framevec:
1389         frame_vector_destroy(userptr->vec);
1390         return rc;
1391 }
1392
1393 /*
1394  * hl_pin_host_memory - pins a chunk of host memory.
1395  * @hdev: pointer to the habanalabs device structure
1396  * @addr: the host virtual address of the memory area
1397  * @size: the size of the memory area
1398  * @userptr: pointer to hl_userptr structure
1399  *
1400  * This function does the following:
1401  * - Pins the physical pages
1402  * - Create an SG list from those pages
1403  */
1404 int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
1405                                         struct hl_userptr *userptr)
1406 {
1407         u64 start, end;
1408         u32 npages, offset;
1409         int rc;
1410
1411         if (!size) {
1412                 dev_err(hdev->dev, "size to pin is invalid - %llu\n", size);
1413                 return -EINVAL;
1414         }
1415
1416         /*
1417          * If the combination of the address and size requested for this memory
1418          * region causes an integer overflow, return error.
1419          */
1420         if (((addr + size) < addr) ||
1421                         PAGE_ALIGN(addr + size) < (addr + size)) {
1422                 dev_err(hdev->dev,
1423                         "user pointer 0x%llx + %llu causes integer overflow\n",
1424                         addr, size);
1425                 return -EINVAL;
1426         }
1427
1428         /*
1429          * This function can be called also from data path, hence use atomic
1430          * always as it is not a big allocation.
1431          */
1432         userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_ATOMIC);
1433         if (!userptr->sgt)
1434                 return -ENOMEM;
1435
1436         start = addr & PAGE_MASK;
1437         offset = addr & ~PAGE_MASK;
1438         end = PAGE_ALIGN(addr + size);
1439         npages = (end - start) >> PAGE_SHIFT;
1440
1441         userptr->size = size;
1442         userptr->addr = addr;
1443         userptr->dma_mapped = false;
1444         INIT_LIST_HEAD(&userptr->job_node);
1445
1446         rc = get_user_memory(hdev, addr, size, npages, start, offset,
1447                                 userptr);
1448         if (rc) {
1449                 dev_err(hdev->dev,
1450                         "failed to get user memory for address 0x%llx\n",
1451                         addr);
1452                 goto free_sgt;
1453         }
1454
1455         hl_debugfs_add_userptr(hdev, userptr);
1456
1457         return 0;
1458
1459 free_sgt:
1460         kfree(userptr->sgt);
1461         return rc;
1462 }
1463
1464 /*
1465  * hl_unpin_host_memory - unpins a chunk of host memory.
1466  * @hdev: pointer to the habanalabs device structure
1467  * @userptr: pointer to hl_userptr structure
1468  *
1469  * This function does the following:
1470  * - Unpins the physical pages related to the host memory
1471  * - Free the SG list
1472  */
1473 void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
1474 {
1475         struct page **pages;
1476
1477         hl_debugfs_remove_userptr(hdev, userptr);
1478
1479         if (userptr->dma_mapped)
1480                 hdev->asic_funcs->hl_dma_unmap_sg(hdev, userptr->sgt->sgl,
1481                                                         userptr->sgt->nents,
1482                                                         userptr->dir);
1483
1484         pages = frame_vector_pages(userptr->vec);
1485         if (!IS_ERR(pages)) {
1486                 int i;
1487
1488                 for (i = 0; i < frame_vector_count(userptr->vec); i++)
1489                         set_page_dirty_lock(pages[i]);
1490         }
1491         put_vaddr_frames(userptr->vec);
1492         frame_vector_destroy(userptr->vec);
1493
1494         list_del(&userptr->job_node);
1495
1496         sg_free_table(userptr->sgt);
1497         kfree(userptr->sgt);
1498 }
1499
1500 /*
1501  * hl_userptr_delete_list - clear userptr list
1502  *
1503  * @hdev                : pointer to the habanalabs device structure
1504  * @userptr_list        : pointer to the list to clear
1505  *
1506  * This function does the following:
1507  * - Iterates over the list and unpins the host memory and frees the userptr
1508  *   structure.
1509  */
1510 void hl_userptr_delete_list(struct hl_device *hdev,
1511                                 struct list_head *userptr_list)
1512 {
1513         struct hl_userptr *userptr, *tmp;
1514
1515         list_for_each_entry_safe(userptr, tmp, userptr_list, job_node) {
1516                 hl_unpin_host_memory(hdev, userptr);
1517                 kfree(userptr);
1518         }
1519
1520         INIT_LIST_HEAD(userptr_list);
1521 }
1522
1523 /*
1524  * hl_userptr_is_pinned - returns whether the given userptr is pinned
1525  *
1526  * @hdev                : pointer to the habanalabs device structure
1527  * @userptr_list        : pointer to the list to clear
1528  * @userptr             : pointer to userptr to check
1529  *
1530  * This function does the following:
1531  * - Iterates over the list and checks if the given userptr is in it, means is
1532  *   pinned. If so, returns true, otherwise returns false.
1533  */
1534 bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr,
1535                                 u32 size, struct list_head *userptr_list,
1536                                 struct hl_userptr **userptr)
1537 {
1538         list_for_each_entry((*userptr), userptr_list, job_node) {
1539                 if ((addr == (*userptr)->addr) && (size == (*userptr)->size))
1540                         return true;
1541         }
1542
1543         return false;
1544 }
1545
1546 /*
1547  * va_range_init - initialize virtual addresses range
1548  * @hdev: pointer to the habanalabs device structure
1549  * @va_range: pointer to the range to initialize
1550  * @start: range start address
1551  * @end: range end address
1552  *
1553  * This function does the following:
1554  * - Initializes the virtual addresses list of the given range with the given
1555  *   addresses.
1556  */
1557 static int va_range_init(struct hl_device *hdev, struct hl_va_range *va_range,
1558                                 u64 start, u64 end)
1559 {
1560         int rc;
1561
1562         INIT_LIST_HEAD(&va_range->list);
1563
1564         /* PAGE_SIZE alignment */
1565
1566         if (start & (PAGE_SIZE - 1)) {
1567                 start &= PAGE_MASK;
1568                 start += PAGE_SIZE;
1569         }
1570
1571         if (end & (PAGE_SIZE - 1))
1572                 end &= PAGE_MASK;
1573
1574         if (start >= end) {
1575                 dev_err(hdev->dev, "too small vm range for va list\n");
1576                 return -EFAULT;
1577         }
1578
1579         rc = add_va_block(hdev, va_range, start, end);
1580
1581         if (rc) {
1582                 dev_err(hdev->dev, "Failed to init host va list\n");
1583                 return rc;
1584         }
1585
1586         va_range->start_addr = start;
1587         va_range->end_addr = end;
1588
1589         return 0;
1590 }
1591
1592 /*
1593  * va_range_fini() - clear a virtual addresses range
1594  * @hdev: pointer to the habanalabs structure
1595  * va_range: pointer to virtual addresses range
1596  *
1597  * This function does the following:
1598  * - Frees the virtual addresses block list and its lock
1599  */
1600 static void va_range_fini(struct hl_device *hdev,
1601                 struct hl_va_range *va_range)
1602 {
1603         mutex_lock(&va_range->lock);
1604         clear_va_list_locked(hdev, &va_range->list);
1605         mutex_unlock(&va_range->lock);
1606
1607         mutex_destroy(&va_range->lock);
1608         kfree(va_range);
1609 }
1610
1611 /*
1612  * vm_ctx_init_with_ranges() - initialize virtual memory for context
1613  * @ctx: pointer to the habanalabs context structure
1614  * @host_range_start: host virtual addresses range start.
1615  * @host_range_end: host virtual addresses range end.
1616  * @host_huge_range_start: host virtual addresses range start for memory
1617  *                          allocated with huge pages.
1618  * @host_huge_range_end: host virtual addresses range end for memory allocated
1619  *                        with huge pages.
1620  * @dram_range_start: dram virtual addresses range start.
1621  * @dram_range_end: dram virtual addresses range end.
1622  *
1623  * This function initializes the following:
1624  * - MMU for context
1625  * - Virtual address to area descriptor hashtable
1626  * - Virtual block list of available virtual memory
1627  */
1628 static int vm_ctx_init_with_ranges(struct hl_ctx *ctx,
1629                                         u64 host_range_start,
1630                                         u64 host_range_end,
1631                                         u64 host_huge_range_start,
1632                                         u64 host_huge_range_end,
1633                                         u64 dram_range_start,
1634                                         u64 dram_range_end)
1635 {
1636         struct hl_device *hdev = ctx->hdev;
1637         int rc;
1638
1639         ctx->host_va_range = kzalloc(sizeof(*ctx->host_va_range), GFP_KERNEL);
1640         if (!ctx->host_va_range)
1641                 return -ENOMEM;
1642
1643         ctx->host_huge_va_range = kzalloc(sizeof(*ctx->host_huge_va_range),
1644                                                 GFP_KERNEL);
1645         if (!ctx->host_huge_va_range) {
1646                 rc =  -ENOMEM;
1647                 goto host_huge_va_range_err;
1648         }
1649
1650         ctx->dram_va_range = kzalloc(sizeof(*ctx->dram_va_range), GFP_KERNEL);
1651         if (!ctx->dram_va_range) {
1652                 rc = -ENOMEM;
1653                 goto dram_va_range_err;
1654         }
1655
1656         rc = hl_mmu_ctx_init(ctx);
1657         if (rc) {
1658                 dev_err(hdev->dev, "failed to init context %d\n", ctx->asid);
1659                 goto mmu_ctx_err;
1660         }
1661
1662         mutex_init(&ctx->mem_hash_lock);
1663         hash_init(ctx->mem_hash);
1664
1665         mutex_init(&ctx->host_va_range->lock);
1666
1667         rc = va_range_init(hdev, ctx->host_va_range, host_range_start,
1668                                 host_range_end);
1669         if (rc) {
1670                 dev_err(hdev->dev, "failed to init host vm range\n");
1671                 goto host_page_range_err;
1672         }
1673
1674         if (hdev->pmmu_huge_range) {
1675                 mutex_init(&ctx->host_huge_va_range->lock);
1676
1677                 rc = va_range_init(hdev, ctx->host_huge_va_range,
1678                                         host_huge_range_start,
1679                                         host_huge_range_end);
1680                 if (rc) {
1681                         dev_err(hdev->dev,
1682                                 "failed to init host huge vm range\n");
1683                         goto host_hpage_range_err;
1684                 }
1685         } else {
1686                 ctx->host_huge_va_range = ctx->host_va_range;
1687         }
1688
1689         mutex_init(&ctx->dram_va_range->lock);
1690
1691         rc = va_range_init(hdev, ctx->dram_va_range, dram_range_start,
1692                         dram_range_end);
1693         if (rc) {
1694                 dev_err(hdev->dev, "failed to init dram vm range\n");
1695                 goto dram_vm_err;
1696         }
1697
1698         hl_debugfs_add_ctx_mem_hash(hdev, ctx);
1699
1700         return 0;
1701
1702 dram_vm_err:
1703         mutex_destroy(&ctx->dram_va_range->lock);
1704
1705         if (hdev->pmmu_huge_range) {
1706                 mutex_lock(&ctx->host_huge_va_range->lock);
1707                 clear_va_list_locked(hdev, &ctx->host_huge_va_range->list);
1708                 mutex_unlock(&ctx->host_huge_va_range->lock);
1709         }
1710 host_hpage_range_err:
1711         if (hdev->pmmu_huge_range)
1712                 mutex_destroy(&ctx->host_huge_va_range->lock);
1713         mutex_lock(&ctx->host_va_range->lock);
1714         clear_va_list_locked(hdev, &ctx->host_va_range->list);
1715         mutex_unlock(&ctx->host_va_range->lock);
1716 host_page_range_err:
1717         mutex_destroy(&ctx->host_va_range->lock);
1718         mutex_destroy(&ctx->mem_hash_lock);
1719         hl_mmu_ctx_fini(ctx);
1720 mmu_ctx_err:
1721         kfree(ctx->dram_va_range);
1722 dram_va_range_err:
1723         kfree(ctx->host_huge_va_range);
1724 host_huge_va_range_err:
1725         kfree(ctx->host_va_range);
1726
1727         return rc;
1728 }
1729
1730 int hl_vm_ctx_init(struct hl_ctx *ctx)
1731 {
1732         struct asic_fixed_properties *prop = &ctx->hdev->asic_prop;
1733         u64 host_range_start, host_range_end, host_huge_range_start,
1734                 host_huge_range_end, dram_range_start, dram_range_end;
1735
1736         atomic64_set(&ctx->dram_phys_mem, 0);
1737
1738         /*
1739          * - If MMU is enabled, init the ranges as usual.
1740          * - If MMU is disabled, in case of host mapping, the returned address
1741          *   is the given one.
1742          *   In case of DRAM mapping, the returned address is the physical
1743          *   address of the memory related to the given handle.
1744          */
1745         if (!ctx->hdev->mmu_enable)
1746                 return 0;
1747
1748         dram_range_start = prop->dmmu.start_addr;
1749         dram_range_end = prop->dmmu.end_addr;
1750         host_range_start = prop->pmmu.start_addr;
1751         host_range_end = prop->pmmu.end_addr;
1752         host_huge_range_start = prop->pmmu_huge.start_addr;
1753         host_huge_range_end = prop->pmmu_huge.end_addr;
1754
1755         return vm_ctx_init_with_ranges(ctx, host_range_start, host_range_end,
1756                                 host_huge_range_start, host_huge_range_end,
1757                                 dram_range_start, dram_range_end);
1758 }
1759
1760 /*
1761  * hl_vm_ctx_fini       - virtual memory teardown of context
1762  *
1763  * @ctx                 : pointer to the habanalabs context structure
1764  *
1765  * This function perform teardown the following:
1766  * - Virtual block list of available virtual memory
1767  * - Virtual address to area descriptor hashtable
1768  * - MMU for context
1769  *
1770  * In addition this function does the following:
1771  * - Unmaps the existing hashtable nodes if the hashtable is not empty. The
1772  *   hashtable should be empty as no valid mappings should exist at this
1773  *   point.
1774  * - Frees any existing physical page list from the idr which relates to the
1775  *   current context asid.
1776  * - This function checks the virtual block list for correctness. At this point
1777  *   the list should contain one element which describes the whole virtual
1778  *   memory range of the context. Otherwise, a warning is printed.
1779  */
1780 void hl_vm_ctx_fini(struct hl_ctx *ctx)
1781 {
1782         struct hl_device *hdev = ctx->hdev;
1783         struct hl_vm *vm = &hdev->vm;
1784         struct hl_vm_phys_pg_pack *phys_pg_list;
1785         struct hl_vm_hash_node *hnode;
1786         struct hlist_node *tmp_node;
1787         int i;
1788
1789         if (!ctx->hdev->mmu_enable)
1790                 return;
1791
1792         hl_debugfs_remove_ctx_mem_hash(hdev, ctx);
1793
1794         /*
1795          * Clearly something went wrong on hard reset so no point in printing
1796          * another side effect error
1797          */
1798         if (!hdev->hard_reset_pending && !hash_empty(ctx->mem_hash))
1799                 dev_notice(hdev->dev,
1800                         "user released device without removing its memory mappings\n");
1801
1802         hash_for_each_safe(ctx->mem_hash, i, tmp_node, hnode, node) {
1803                 dev_dbg(hdev->dev,
1804                         "hl_mem_hash_node of vaddr 0x%llx of asid %d is still alive\n",
1805                         hnode->vaddr, ctx->asid);
1806                 unmap_device_va(ctx, hnode->vaddr, true);
1807         }
1808
1809         /* invalidate the cache once after the unmapping loop */
1810         hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
1811         hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_PHYS_PACK);
1812
1813         spin_lock(&vm->idr_lock);
1814         idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_list, i)
1815                 if (phys_pg_list->asid == ctx->asid) {
1816                         dev_dbg(hdev->dev,
1817                                 "page list 0x%px of asid %d is still alive\n",
1818                                 phys_pg_list, ctx->asid);
1819                         atomic64_sub(phys_pg_list->total_size,
1820                                         &hdev->dram_used_mem);
1821                         free_phys_pg_pack(hdev, phys_pg_list);
1822                         idr_remove(&vm->phys_pg_pack_handles, i);
1823                 }
1824         spin_unlock(&vm->idr_lock);
1825
1826         va_range_fini(hdev, ctx->dram_va_range);
1827         if (hdev->pmmu_huge_range)
1828                 va_range_fini(hdev, ctx->host_huge_va_range);
1829         va_range_fini(hdev, ctx->host_va_range);
1830
1831         mutex_destroy(&ctx->mem_hash_lock);
1832         hl_mmu_ctx_fini(ctx);
1833
1834         /* In this case we need to clear the global accounting of DRAM usage
1835          * because the user notifies us on allocations. If the user is no more,
1836          * all DRAM is available
1837          */
1838         if (!ctx->hdev->dram_supports_virtual_memory)
1839                 atomic64_set(&ctx->hdev->dram_used_mem, 0);
1840 }
1841
1842 /*
1843  * hl_vm_init           - initialize virtual memory module
1844  *
1845  * @hdev                : pointer to the habanalabs device structure
1846  *
1847  * This function initializes the following:
1848  * - MMU module
1849  * - DRAM physical pages pool of 2MB
1850  * - Idr for device memory allocation handles
1851  */
1852 int hl_vm_init(struct hl_device *hdev)
1853 {
1854         struct asic_fixed_properties *prop = &hdev->asic_prop;
1855         struct hl_vm *vm = &hdev->vm;
1856         int rc;
1857
1858         vm->dram_pg_pool = gen_pool_create(__ffs(prop->dram_page_size), -1);
1859         if (!vm->dram_pg_pool) {
1860                 dev_err(hdev->dev, "Failed to create dram page pool\n");
1861                 return -ENOMEM;
1862         }
1863
1864         kref_init(&vm->dram_pg_pool_refcount);
1865
1866         rc = gen_pool_add(vm->dram_pg_pool, prop->dram_user_base_address,
1867                         prop->dram_end_address - prop->dram_user_base_address,
1868                         -1);
1869
1870         if (rc) {
1871                 dev_err(hdev->dev,
1872                         "Failed to add memory to dram page pool %d\n", rc);
1873                 goto pool_add_err;
1874         }
1875
1876         spin_lock_init(&vm->idr_lock);
1877         idr_init(&vm->phys_pg_pack_handles);
1878
1879         atomic64_set(&hdev->dram_used_mem, 0);
1880
1881         vm->init_done = true;
1882
1883         return 0;
1884
1885 pool_add_err:
1886         gen_pool_destroy(vm->dram_pg_pool);
1887
1888         return rc;
1889 }
1890
1891 /*
1892  * hl_vm_fini           - virtual memory module teardown
1893  *
1894  * @hdev                : pointer to the habanalabs device structure
1895  *
1896  * This function perform teardown to the following:
1897  * - Idr for device memory allocation handles
1898  * - DRAM physical pages pool of 2MB
1899  * - MMU module
1900  */
1901 void hl_vm_fini(struct hl_device *hdev)
1902 {
1903         struct hl_vm *vm = &hdev->vm;
1904
1905         if (!vm->init_done)
1906                 return;
1907
1908         /*
1909          * At this point all the contexts should be freed and hence no DRAM
1910          * memory should be in use. Hence the DRAM pool should be freed here.
1911          */
1912         if (kref_put(&vm->dram_pg_pool_refcount, dram_pg_pool_do_release) != 1)
1913                 dev_warn(hdev->dev, "dram_pg_pool was not destroyed on %s\n",
1914                                 __func__);
1915
1916         vm->init_done = false;
1917 }