Merge tag 'drm-next-2021-09-10' of git://anongit.freedesktop.org/drm/drm
[linux-2.6-microblaze.git] / drivers / misc / habanalabs / common / command_buffer.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4  * Copyright 2016-2019 HabanaLabs, Ltd.
5  * All Rights Reserved.
6  */
7
8 #include <uapi/misc/habanalabs.h>
9 #include "habanalabs.h"
10
11 #include <linux/mm.h>
12 #include <linux/slab.h>
13 #include <linux/uaccess.h>
14
15 static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb)
16 {
17         struct hl_device *hdev = ctx->hdev;
18         struct asic_fixed_properties *prop = &hdev->asic_prop;
19         struct hl_vm_va_block *va_block, *tmp;
20         dma_addr_t bus_addr;
21         u64 virt_addr;
22         u32 page_size = prop->pmmu.page_size;
23         s32 offset;
24         int rc;
25
26         if (!hdev->supports_cb_mapping) {
27                 dev_err_ratelimited(hdev->dev,
28                                 "Cannot map CB because no VA range is allocated for CB mapping\n");
29                 return -EINVAL;
30         }
31
32         if (!hdev->mmu_enable) {
33                 dev_err_ratelimited(hdev->dev,
34                                 "Cannot map CB because MMU is disabled\n");
35                 return -EINVAL;
36         }
37
38         INIT_LIST_HEAD(&cb->va_block_list);
39
40         for (bus_addr = cb->bus_address;
41                         bus_addr < cb->bus_address + cb->size;
42                         bus_addr += page_size) {
43
44                 virt_addr = (u64) gen_pool_alloc(ctx->cb_va_pool, page_size);
45                 if (!virt_addr) {
46                         dev_err(hdev->dev,
47                                 "Failed to allocate device virtual address for CB\n");
48                         rc = -ENOMEM;
49                         goto err_va_pool_free;
50                 }
51
52                 va_block = kzalloc(sizeof(*va_block), GFP_KERNEL);
53                 if (!va_block) {
54                         rc = -ENOMEM;
55                         gen_pool_free(ctx->cb_va_pool, virt_addr, page_size);
56                         goto err_va_pool_free;
57                 }
58
59                 va_block->start = virt_addr;
60                 va_block->end = virt_addr + page_size;
61                 va_block->size = page_size;
62                 list_add_tail(&va_block->node, &cb->va_block_list);
63         }
64
65         mutex_lock(&ctx->mmu_lock);
66
67         bus_addr = cb->bus_address;
68         offset = 0;
69         list_for_each_entry(va_block, &cb->va_block_list, node) {
70                 rc = hl_mmu_map_page(ctx, va_block->start, bus_addr,
71                                 va_block->size, list_is_last(&va_block->node,
72                                                         &cb->va_block_list));
73                 if (rc) {
74                         dev_err(hdev->dev, "Failed to map VA %#llx to CB\n",
75                                 va_block->start);
76                         goto err_va_umap;
77                 }
78
79                 bus_addr += va_block->size;
80                 offset += va_block->size;
81         }
82
83         hdev->asic_funcs->mmu_invalidate_cache(hdev, false, VM_TYPE_USERPTR);
84
85         mutex_unlock(&ctx->mmu_lock);
86
87         cb->is_mmu_mapped = true;
88
89         return 0;
90
91 err_va_umap:
92         list_for_each_entry(va_block, &cb->va_block_list, node) {
93                 if (offset <= 0)
94                         break;
95                 hl_mmu_unmap_page(ctx, va_block->start, va_block->size,
96                                 offset <= va_block->size);
97                 offset -= va_block->size;
98         }
99
100         hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
101
102         mutex_unlock(&ctx->mmu_lock);
103
104 err_va_pool_free:
105         list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) {
106                 gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size);
107                 list_del(&va_block->node);
108                 kfree(va_block);
109         }
110
111         return rc;
112 }
113
114 static void cb_unmap_mem(struct hl_ctx *ctx, struct hl_cb *cb)
115 {
116         struct hl_device *hdev = ctx->hdev;
117         struct hl_vm_va_block *va_block, *tmp;
118
119         mutex_lock(&ctx->mmu_lock);
120
121         list_for_each_entry(va_block, &cb->va_block_list, node)
122                 if (hl_mmu_unmap_page(ctx, va_block->start, va_block->size,
123                                 list_is_last(&va_block->node,
124                                                 &cb->va_block_list)))
125                         dev_warn_ratelimited(hdev->dev,
126                                         "Failed to unmap CB's va 0x%llx\n",
127                                         va_block->start);
128
129         hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
130
131         mutex_unlock(&ctx->mmu_lock);
132
133         list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) {
134                 gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size);
135                 list_del(&va_block->node);
136                 kfree(va_block);
137         }
138 }
139
140 static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
141 {
142         if (cb->is_internal)
143                 gen_pool_free(hdev->internal_cb_pool,
144                                 (uintptr_t)cb->kernel_address, cb->size);
145         else
146                 hdev->asic_funcs->asic_dma_free_coherent(hdev, cb->size,
147                                 cb->kernel_address, cb->bus_address);
148
149         kfree(cb);
150 }
151
152 static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb)
153 {
154         if (cb->is_pool) {
155                 spin_lock(&hdev->cb_pool_lock);
156                 list_add(&cb->pool_list, &hdev->cb_pool);
157                 spin_unlock(&hdev->cb_pool_lock);
158         } else {
159                 cb_fini(hdev, cb);
160         }
161 }
162
163 static void cb_release(struct kref *ref)
164 {
165         struct hl_device *hdev;
166         struct hl_cb *cb;
167
168         cb = container_of(ref, struct hl_cb, refcount);
169         hdev = cb->hdev;
170
171         hl_debugfs_remove_cb(cb);
172
173         if (cb->is_mmu_mapped)
174                 cb_unmap_mem(cb->ctx, cb);
175
176         hl_ctx_put(cb->ctx);
177
178         cb_do_release(hdev, cb);
179 }
180
181 static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
182                                         int ctx_id, bool internal_cb)
183 {
184         struct hl_cb *cb = NULL;
185         u32 cb_offset;
186         void *p;
187
188         /*
189          * We use of GFP_ATOMIC here because this function can be called from
190          * the latency-sensitive code path for command submission. Due to H/W
191          * limitations in some of the ASICs, the kernel must copy the user CB
192          * that is designated for an external queue and actually enqueue
193          * the kernel's copy. Hence, we must never sleep in this code section
194          * and must use GFP_ATOMIC for all memory allocations.
195          */
196         if (ctx_id == HL_KERNEL_ASID_ID && !hdev->disabled)
197                 cb = kzalloc(sizeof(*cb), GFP_ATOMIC);
198
199         if (!cb)
200                 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
201
202         if (!cb)
203                 return NULL;
204
205         if (internal_cb) {
206                 p = (void *) gen_pool_alloc(hdev->internal_cb_pool, cb_size);
207                 if (!p) {
208                         kfree(cb);
209                         return NULL;
210                 }
211
212                 cb_offset = p - hdev->internal_cb_pool_virt_addr;
213                 cb->is_internal = true;
214                 cb->bus_address =  hdev->internal_cb_va_base + cb_offset;
215         } else if (ctx_id == HL_KERNEL_ASID_ID) {
216                 p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size,
217                                                 &cb->bus_address, GFP_ATOMIC);
218                 if (!p)
219                         p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
220                                         cb_size, &cb->bus_address, GFP_KERNEL);
221         } else {
222                 p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size,
223                                                 &cb->bus_address,
224                                                 GFP_USER | __GFP_ZERO);
225         }
226
227         if (!p) {
228                 dev_err(hdev->dev,
229                         "failed to allocate %d of dma memory for CB\n",
230                         cb_size);
231                 kfree(cb);
232                 return NULL;
233         }
234
235         cb->kernel_address = p;
236         cb->size = cb_size;
237
238         return cb;
239 }
240
241 int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
242                         struct hl_ctx *ctx, u32 cb_size, bool internal_cb,
243                         bool map_cb, u64 *handle)
244 {
245         struct hl_cb *cb;
246         bool alloc_new_cb = true;
247         int rc, ctx_id = ctx->asid;
248
249         /*
250          * Can't use generic function to check this because of special case
251          * where we create a CB as part of the reset process
252          */
253         if ((hdev->disabled) || ((atomic_read(&hdev->in_reset)) &&
254                                         (ctx_id != HL_KERNEL_ASID_ID))) {
255                 dev_warn_ratelimited(hdev->dev,
256                         "Device is disabled or in reset. Can't create new CBs\n");
257                 rc = -EBUSY;
258                 goto out_err;
259         }
260
261         if (cb_size > SZ_2M) {
262                 dev_err(hdev->dev, "CB size %d must be less than %d\n",
263                         cb_size, SZ_2M);
264                 rc = -EINVAL;
265                 goto out_err;
266         }
267
268         if (!internal_cb) {
269                 /* Minimum allocation must be PAGE SIZE */
270                 if (cb_size < PAGE_SIZE)
271                         cb_size = PAGE_SIZE;
272
273                 if (ctx_id == HL_KERNEL_ASID_ID &&
274                                 cb_size <= hdev->asic_prop.cb_pool_cb_size) {
275
276                         spin_lock(&hdev->cb_pool_lock);
277                         if (!list_empty(&hdev->cb_pool)) {
278                                 cb = list_first_entry(&hdev->cb_pool,
279                                                 typeof(*cb), pool_list);
280                                 list_del(&cb->pool_list);
281                                 spin_unlock(&hdev->cb_pool_lock);
282                                 alloc_new_cb = false;
283                         } else {
284                                 spin_unlock(&hdev->cb_pool_lock);
285                                 dev_dbg(hdev->dev, "CB pool is empty\n");
286                         }
287                 }
288         }
289
290         if (alloc_new_cb) {
291                 cb = hl_cb_alloc(hdev, cb_size, ctx_id, internal_cb);
292                 if (!cb) {
293                         rc = -ENOMEM;
294                         goto out_err;
295                 }
296         }
297
298         cb->hdev = hdev;
299         cb->ctx = ctx;
300         hl_ctx_get(hdev, cb->ctx);
301
302         if (map_cb) {
303                 if (ctx_id == HL_KERNEL_ASID_ID) {
304                         dev_err(hdev->dev,
305                                 "CB mapping is not supported for kernel context\n");
306                         rc = -EINVAL;
307                         goto release_cb;
308                 }
309
310                 rc = cb_map_mem(ctx, cb);
311                 if (rc)
312                         goto release_cb;
313         }
314
315         spin_lock(&mgr->cb_lock);
316         rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_ATOMIC);
317         if (rc < 0)
318                 rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_KERNEL);
319         spin_unlock(&mgr->cb_lock);
320
321         if (rc < 0) {
322                 dev_err(hdev->dev, "Failed to allocate IDR for a new CB\n");
323                 goto unmap_mem;
324         }
325
326         cb->id = (u64) rc;
327
328         kref_init(&cb->refcount);
329         spin_lock_init(&cb->lock);
330
331         /*
332          * idr is 32-bit so we can safely OR it with a mask that is above
333          * 32 bit
334          */
335         *handle = cb->id | HL_MMAP_TYPE_CB;
336         *handle <<= PAGE_SHIFT;
337
338         hl_debugfs_add_cb(cb);
339
340         return 0;
341
342 unmap_mem:
343         if (cb->is_mmu_mapped)
344                 cb_unmap_mem(cb->ctx, cb);
345 release_cb:
346         hl_ctx_put(cb->ctx);
347         cb_do_release(hdev, cb);
348 out_err:
349         *handle = 0;
350
351         return rc;
352 }
353
354 int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle)
355 {
356         struct hl_cb *cb;
357         u32 handle;
358         int rc = 0;
359
360         /*
361          * handle was given to user to do mmap, I need to shift it back to
362          * how the idr module gave it to me
363          */
364         cb_handle >>= PAGE_SHIFT;
365         handle = (u32) cb_handle;
366
367         spin_lock(&mgr->cb_lock);
368
369         cb = idr_find(&mgr->cb_handles, handle);
370         if (cb) {
371                 idr_remove(&mgr->cb_handles, handle);
372                 spin_unlock(&mgr->cb_lock);
373                 kref_put(&cb->refcount, cb_release);
374         } else {
375                 spin_unlock(&mgr->cb_lock);
376                 dev_err(hdev->dev,
377                         "CB destroy failed, no match to handle 0x%x\n", handle);
378                 rc = -EINVAL;
379         }
380
381         return rc;
382 }
383
384 static int hl_cb_info(struct hl_device *hdev, struct hl_cb_mgr *mgr,
385                         u64 cb_handle, u32 *usage_cnt)
386 {
387         struct hl_cb *cb;
388         u32 handle;
389         int rc = 0;
390
391         /* The CB handle was given to user to do mmap, so need to shift it back
392          * to the value which was allocated by the IDR module.
393          */
394         cb_handle >>= PAGE_SHIFT;
395         handle = (u32) cb_handle;
396
397         spin_lock(&mgr->cb_lock);
398
399         cb = idr_find(&mgr->cb_handles, handle);
400         if (!cb) {
401                 dev_err(hdev->dev,
402                         "CB info failed, no match to handle 0x%x\n", handle);
403                 rc = -EINVAL;
404                 goto out;
405         }
406
407         *usage_cnt = atomic_read(&cb->cs_cnt);
408
409 out:
410         spin_unlock(&mgr->cb_lock);
411         return rc;
412 }
413
414 int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
415 {
416         union hl_cb_args *args = data;
417         struct hl_device *hdev = hpriv->hdev;
418         enum hl_device_status status;
419         u64 handle = 0;
420         u32 usage_cnt = 0;
421         int rc;
422
423         if (!hl_device_operational(hdev, &status)) {
424                 dev_warn_ratelimited(hdev->dev,
425                         "Device is %s. Can't execute CB IOCTL\n",
426                         hdev->status[status]);
427                 return -EBUSY;
428         }
429
430         switch (args->in.op) {
431         case HL_CB_OP_CREATE:
432                 if (args->in.cb_size > HL_MAX_CB_SIZE) {
433                         dev_err(hdev->dev,
434                                 "User requested CB size %d must be less than %d\n",
435                                 args->in.cb_size, HL_MAX_CB_SIZE);
436                         rc = -EINVAL;
437                 } else {
438                         rc = hl_cb_create(hdev, &hpriv->cb_mgr, hpriv->ctx,
439                                         args->in.cb_size, false,
440                                         !!(args->in.flags & HL_CB_FLAGS_MAP),
441                                         &handle);
442                 }
443
444                 memset(args, 0, sizeof(*args));
445                 args->out.cb_handle = handle;
446                 break;
447
448         case HL_CB_OP_DESTROY:
449                 rc = hl_cb_destroy(hdev, &hpriv->cb_mgr,
450                                         args->in.cb_handle);
451                 break;
452
453         case HL_CB_OP_INFO:
454                 rc = hl_cb_info(hdev, &hpriv->cb_mgr, args->in.cb_handle,
455                                 &usage_cnt);
456                 memset(args, 0, sizeof(*args));
457                 args->out.usage_cnt = usage_cnt;
458                 break;
459
460         default:
461                 rc = -ENOTTY;
462                 break;
463         }
464
465         return rc;
466 }
467
468 static void cb_vm_close(struct vm_area_struct *vma)
469 {
470         struct hl_cb *cb = (struct hl_cb *) vma->vm_private_data;
471         long new_mmap_size;
472
473         new_mmap_size = cb->mmap_size - (vma->vm_end - vma->vm_start);
474
475         if (new_mmap_size > 0) {
476                 cb->mmap_size = new_mmap_size;
477                 return;
478         }
479
480         spin_lock(&cb->lock);
481         cb->mmap = false;
482         spin_unlock(&cb->lock);
483
484         hl_cb_put(cb);
485         vma->vm_private_data = NULL;
486 }
487
488 static const struct vm_operations_struct cb_vm_ops = {
489         .close = cb_vm_close
490 };
491
492 int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
493 {
494         struct hl_device *hdev = hpriv->hdev;
495         struct hl_cb *cb;
496         u32 handle, user_cb_size;
497         int rc;
498
499         /* We use the page offset to hold the idr and thus we need to clear
500          * it before doing the mmap itself
501          */
502         handle = vma->vm_pgoff;
503         vma->vm_pgoff = 0;
504
505         /* reference was taken here */
506         cb = hl_cb_get(hdev, &hpriv->cb_mgr, handle);
507         if (!cb) {
508                 dev_err(hdev->dev,
509                         "CB mmap failed, no match to handle 0x%x\n", handle);
510                 return -EINVAL;
511         }
512
513         /* Validation check */
514         user_cb_size = vma->vm_end - vma->vm_start;
515         if (user_cb_size != ALIGN(cb->size, PAGE_SIZE)) {
516                 dev_err(hdev->dev,
517                         "CB mmap failed, mmap size 0x%lx != 0x%x cb size\n",
518                         vma->vm_end - vma->vm_start, cb->size);
519                 rc = -EINVAL;
520                 goto put_cb;
521         }
522
523         if (!access_ok((void __user *) (uintptr_t) vma->vm_start,
524                                                         user_cb_size)) {
525                 dev_err(hdev->dev,
526                         "user pointer is invalid - 0x%lx\n",
527                         vma->vm_start);
528
529                 rc = -EINVAL;
530                 goto put_cb;
531         }
532
533         spin_lock(&cb->lock);
534
535         if (cb->mmap) {
536                 dev_err(hdev->dev,
537                         "CB mmap failed, CB already mmaped to user\n");
538                 rc = -EINVAL;
539                 goto release_lock;
540         }
541
542         cb->mmap = true;
543
544         spin_unlock(&cb->lock);
545
546         vma->vm_ops = &cb_vm_ops;
547
548         /*
549          * Note: We're transferring the cb reference to
550          * vma->vm_private_data here.
551          */
552
553         vma->vm_private_data = cb;
554
555         rc = hdev->asic_funcs->cb_mmap(hdev, vma, cb->kernel_address,
556                                         cb->bus_address, cb->size);
557         if (rc) {
558                 spin_lock(&cb->lock);
559                 cb->mmap = false;
560                 goto release_lock;
561         }
562
563         cb->mmap_size = cb->size;
564         vma->vm_pgoff = handle;
565
566         return 0;
567
568 release_lock:
569         spin_unlock(&cb->lock);
570 put_cb:
571         hl_cb_put(cb);
572         return rc;
573 }
574
575 struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr,
576                         u32 handle)
577 {
578         struct hl_cb *cb;
579
580         spin_lock(&mgr->cb_lock);
581         cb = idr_find(&mgr->cb_handles, handle);
582
583         if (!cb) {
584                 spin_unlock(&mgr->cb_lock);
585                 dev_warn(hdev->dev,
586                         "CB get failed, no match to handle 0x%x\n", handle);
587                 return NULL;
588         }
589
590         kref_get(&cb->refcount);
591
592         spin_unlock(&mgr->cb_lock);
593
594         return cb;
595
596 }
597
598 void hl_cb_put(struct hl_cb *cb)
599 {
600         kref_put(&cb->refcount, cb_release);
601 }
602
603 void hl_cb_mgr_init(struct hl_cb_mgr *mgr)
604 {
605         spin_lock_init(&mgr->cb_lock);
606         idr_init(&mgr->cb_handles);
607 }
608
609 void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr)
610 {
611         struct hl_cb *cb;
612         struct idr *idp;
613         u32 id;
614
615         idp = &mgr->cb_handles;
616
617         idr_for_each_entry(idp, cb, id) {
618                 if (kref_put(&cb->refcount, cb_release) != 1)
619                         dev_err(hdev->dev,
620                                 "CB %d for CTX ID %d is still alive\n",
621                                 id, cb->ctx->asid);
622         }
623
624         idr_destroy(&mgr->cb_handles);
625 }
626
627 struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
628                                         bool internal_cb)
629 {
630         u64 cb_handle;
631         struct hl_cb *cb;
632         int rc;
633
634         rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx, cb_size,
635                                 internal_cb, false, &cb_handle);
636         if (rc) {
637                 dev_err(hdev->dev,
638                         "Failed to allocate CB for the kernel driver %d\n", rc);
639                 return NULL;
640         }
641
642         cb_handle >>= PAGE_SHIFT;
643         cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, (u32) cb_handle);
644         /* hl_cb_get should never fail here */
645         if (!cb) {
646                 dev_crit(hdev->dev, "Kernel CB handle invalid 0x%x\n",
647                                 (u32) cb_handle);
648                 goto destroy_cb;
649         }
650
651         return cb;
652
653 destroy_cb:
654         hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb_handle << PAGE_SHIFT);
655
656         return NULL;
657 }
658
659 int hl_cb_pool_init(struct hl_device *hdev)
660 {
661         struct hl_cb *cb;
662         int i;
663
664         INIT_LIST_HEAD(&hdev->cb_pool);
665         spin_lock_init(&hdev->cb_pool_lock);
666
667         for (i = 0 ; i < hdev->asic_prop.cb_pool_cb_cnt ; i++) {
668                 cb = hl_cb_alloc(hdev, hdev->asic_prop.cb_pool_cb_size,
669                                 HL_KERNEL_ASID_ID, false);
670                 if (cb) {
671                         cb->is_pool = true;
672                         list_add(&cb->pool_list, &hdev->cb_pool);
673                 } else {
674                         hl_cb_pool_fini(hdev);
675                         return -ENOMEM;
676                 }
677         }
678
679         return 0;
680 }
681
682 int hl_cb_pool_fini(struct hl_device *hdev)
683 {
684         struct hl_cb *cb, *tmp;
685
686         list_for_each_entry_safe(cb, tmp, &hdev->cb_pool, pool_list) {
687                 list_del(&cb->pool_list);
688                 cb_fini(hdev, cb);
689         }
690
691         return 0;
692 }
693
694 int hl_cb_va_pool_init(struct hl_ctx *ctx)
695 {
696         struct hl_device *hdev = ctx->hdev;
697         struct asic_fixed_properties *prop = &hdev->asic_prop;
698         int rc;
699
700         if (!hdev->supports_cb_mapping)
701                 return 0;
702
703         ctx->cb_va_pool = gen_pool_create(__ffs(prop->pmmu.page_size), -1);
704         if (!ctx->cb_va_pool) {
705                 dev_err(hdev->dev,
706                         "Failed to create VA gen pool for CB mapping\n");
707                 return -ENOMEM;
708         }
709
710         rc = gen_pool_add(ctx->cb_va_pool, prop->cb_va_start_addr,
711                         prop->cb_va_end_addr - prop->cb_va_start_addr, -1);
712         if (rc) {
713                 dev_err(hdev->dev,
714                         "Failed to add memory to VA gen pool for CB mapping\n");
715                 goto err_pool_destroy;
716         }
717
718         return 0;
719
720 err_pool_destroy:
721         gen_pool_destroy(ctx->cb_va_pool);
722
723         return rc;
724 }
725
726 void hl_cb_va_pool_fini(struct hl_ctx *ctx)
727 {
728         struct hl_device *hdev = ctx->hdev;
729
730         if (!hdev->supports_cb_mapping)
731                 return;
732
733         gen_pool_destroy(ctx->cb_va_pool);
734 }