1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2019 HabanaLabs, Ltd.
8 #include <uapi/misc/habanalabs.h>
9 #include "habanalabs.h"
12 #include <linux/slab.h>
13 #include <linux/uaccess.h>
14 #include <linux/genalloc.h>
16 static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb)
18 struct hl_device *hdev = ctx->hdev;
19 struct asic_fixed_properties *prop = &hdev->asic_prop;
20 struct hl_vm_va_block *va_block, *tmp;
23 u32 page_size = prop->pmmu.page_size;
27 if (!hdev->supports_cb_mapping) {
28 dev_err_ratelimited(hdev->dev,
29 "Cannot map CB because no VA range is allocated for CB mapping\n");
33 if (!hdev->mmu_enable) {
34 dev_err_ratelimited(hdev->dev,
35 "Cannot map CB because MMU is disabled\n");
39 INIT_LIST_HEAD(&cb->va_block_list);
41 for (bus_addr = cb->bus_address;
42 bus_addr < cb->bus_address + cb->size;
43 bus_addr += page_size) {
45 virt_addr = (u64) gen_pool_alloc(ctx->cb_va_pool, page_size);
48 "Failed to allocate device virtual address for CB\n");
50 goto err_va_pool_free;
53 va_block = kzalloc(sizeof(*va_block), GFP_KERNEL);
56 gen_pool_free(ctx->cb_va_pool, virt_addr, page_size);
57 goto err_va_pool_free;
60 va_block->start = virt_addr;
61 va_block->end = virt_addr + page_size;
62 va_block->size = page_size;
63 list_add_tail(&va_block->node, &cb->va_block_list);
66 mutex_lock(&ctx->mmu_lock);
68 bus_addr = cb->bus_address;
70 list_for_each_entry(va_block, &cb->va_block_list, node) {
71 rc = hl_mmu_map(ctx, va_block->start, bus_addr, va_block->size,
72 list_is_last(&va_block->node,
75 dev_err(hdev->dev, "Failed to map VA %#llx to CB\n",
80 bus_addr += va_block->size;
81 offset += va_block->size;
84 hdev->asic_funcs->mmu_invalidate_cache(hdev, false, VM_TYPE_USERPTR);
86 mutex_unlock(&ctx->mmu_lock);
88 cb->is_mmu_mapped = true;
93 list_for_each_entry(va_block, &cb->va_block_list, node) {
96 hl_mmu_unmap(ctx, va_block->start, va_block->size,
97 offset <= va_block->size);
98 offset -= va_block->size;
101 hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
103 mutex_unlock(&ctx->mmu_lock);
106 list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) {
107 gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size);
108 list_del(&va_block->node);
115 static void cb_unmap_mem(struct hl_ctx *ctx, struct hl_cb *cb)
117 struct hl_device *hdev = ctx->hdev;
118 struct hl_vm_va_block *va_block, *tmp;
120 mutex_lock(&ctx->mmu_lock);
122 list_for_each_entry(va_block, &cb->va_block_list, node)
123 if (hl_mmu_unmap(ctx, va_block->start, va_block->size,
124 list_is_last(&va_block->node,
125 &cb->va_block_list)))
126 dev_warn_ratelimited(hdev->dev,
127 "Failed to unmap CB's va 0x%llx\n",
130 hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
132 mutex_unlock(&ctx->mmu_lock);
134 list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) {
135 gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size);
136 list_del(&va_block->node);
141 static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
144 gen_pool_free(hdev->internal_cb_pool,
145 (uintptr_t)cb->kernel_address, cb->size);
147 hdev->asic_funcs->asic_dma_free_coherent(hdev, cb->size,
148 cb->kernel_address, cb->bus_address);
153 static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb)
156 spin_lock(&hdev->cb_pool_lock);
157 list_add(&cb->pool_list, &hdev->cb_pool);
158 spin_unlock(&hdev->cb_pool_lock);
164 static void cb_release(struct kref *ref)
166 struct hl_device *hdev;
169 cb = container_of(ref, struct hl_cb, refcount);
172 hl_debugfs_remove_cb(cb);
174 if (cb->is_mmu_mapped)
175 cb_unmap_mem(cb->ctx, cb);
179 cb_do_release(hdev, cb);
182 static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
183 int ctx_id, bool internal_cb)
190 * We use of GFP_ATOMIC here because this function can be called from
191 * the latency-sensitive code path for command submission. Due to H/W
192 * limitations in some of the ASICs, the kernel must copy the user CB
193 * that is designated for an external queue and actually enqueue
194 * the kernel's copy. Hence, we must never sleep in this code section
195 * and must use GFP_ATOMIC for all memory allocations.
197 if (ctx_id == HL_KERNEL_ASID_ID)
198 cb = kzalloc(sizeof(*cb), GFP_ATOMIC);
200 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
206 p = (void *) gen_pool_alloc(hdev->internal_cb_pool, cb_size);
212 cb_offset = p - hdev->internal_cb_pool_virt_addr;
213 cb->is_internal = true;
214 cb->bus_address = hdev->internal_cb_va_base + cb_offset;
215 } else if (ctx_id == HL_KERNEL_ASID_ID) {
216 p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size,
217 &cb->bus_address, GFP_ATOMIC);
219 p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size,
221 GFP_USER | __GFP_ZERO);
226 "failed to allocate %d of dma memory for CB\n",
232 cb->kernel_address = p;
238 int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
239 struct hl_ctx *ctx, u32 cb_size, bool internal_cb,
240 bool map_cb, u64 *handle)
243 bool alloc_new_cb = true;
244 int rc, ctx_id = ctx->asid;
247 * Can't use generic function to check this because of special case
248 * where we create a CB as part of the reset process
250 if ((hdev->disabled) || ((atomic_read(&hdev->in_reset)) &&
251 (ctx_id != HL_KERNEL_ASID_ID))) {
252 dev_warn_ratelimited(hdev->dev,
253 "Device is disabled or in reset. Can't create new CBs\n");
258 if (cb_size > SZ_2M) {
259 dev_err(hdev->dev, "CB size %d must be less than %d\n",
266 /* Minimum allocation must be PAGE SIZE */
267 if (cb_size < PAGE_SIZE)
270 if (ctx_id == HL_KERNEL_ASID_ID &&
271 cb_size <= hdev->asic_prop.cb_pool_cb_size) {
273 spin_lock(&hdev->cb_pool_lock);
274 if (!list_empty(&hdev->cb_pool)) {
275 cb = list_first_entry(&hdev->cb_pool,
276 typeof(*cb), pool_list);
277 list_del(&cb->pool_list);
278 spin_unlock(&hdev->cb_pool_lock);
279 alloc_new_cb = false;
281 spin_unlock(&hdev->cb_pool_lock);
282 dev_dbg(hdev->dev, "CB pool is empty\n");
288 cb = hl_cb_alloc(hdev, cb_size, ctx_id, internal_cb);
297 hl_ctx_get(hdev, cb->ctx);
300 if (ctx_id == HL_KERNEL_ASID_ID) {
302 "CB mapping is not supported for kernel context\n");
307 rc = cb_map_mem(ctx, cb);
312 spin_lock(&mgr->cb_lock);
313 rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_ATOMIC);
314 spin_unlock(&mgr->cb_lock);
317 dev_err(hdev->dev, "Failed to allocate IDR for a new CB\n");
323 kref_init(&cb->refcount);
324 spin_lock_init(&cb->lock);
327 * idr is 32-bit so we can safely OR it with a mask that is above
330 *handle = cb->id | HL_MMAP_TYPE_CB;
331 *handle <<= PAGE_SHIFT;
333 hl_debugfs_add_cb(cb);
338 if (cb->is_mmu_mapped)
339 cb_unmap_mem(cb->ctx, cb);
342 cb_do_release(hdev, cb);
349 int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle)
356 * handle was given to user to do mmap, I need to shift it back to
357 * how the idr module gave it to me
359 cb_handle >>= PAGE_SHIFT;
360 handle = (u32) cb_handle;
362 spin_lock(&mgr->cb_lock);
364 cb = idr_find(&mgr->cb_handles, handle);
366 idr_remove(&mgr->cb_handles, handle);
367 spin_unlock(&mgr->cb_lock);
368 kref_put(&cb->refcount, cb_release);
370 spin_unlock(&mgr->cb_lock);
372 "CB destroy failed, no match to handle 0x%x\n", handle);
379 int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
381 union hl_cb_args *args = data;
382 struct hl_device *hdev = hpriv->hdev;
386 if (hl_device_disabled_or_in_reset(hdev)) {
387 dev_warn_ratelimited(hdev->dev,
388 "Device is %s. Can't execute CB IOCTL\n",
389 atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
393 switch (args->in.op) {
394 case HL_CB_OP_CREATE:
395 if (args->in.cb_size > HL_MAX_CB_SIZE) {
397 "User requested CB size %d must be less than %d\n",
398 args->in.cb_size, HL_MAX_CB_SIZE);
401 rc = hl_cb_create(hdev, &hpriv->cb_mgr, hpriv->ctx,
402 args->in.cb_size, false,
403 !!(args->in.flags & HL_CB_FLAGS_MAP),
407 memset(args, 0, sizeof(*args));
408 args->out.cb_handle = handle;
411 case HL_CB_OP_DESTROY:
412 rc = hl_cb_destroy(hdev, &hpriv->cb_mgr,
424 static void cb_vm_close(struct vm_area_struct *vma)
426 struct hl_cb *cb = (struct hl_cb *) vma->vm_private_data;
429 new_mmap_size = cb->mmap_size - (vma->vm_end - vma->vm_start);
431 if (new_mmap_size > 0) {
432 cb->mmap_size = new_mmap_size;
436 spin_lock(&cb->lock);
438 spin_unlock(&cb->lock);
441 vma->vm_private_data = NULL;
444 static const struct vm_operations_struct cb_vm_ops = {
448 int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
450 struct hl_device *hdev = hpriv->hdev;
452 u32 handle, user_cb_size;
455 /* We use the page offset to hold the idr and thus we need to clear
456 * it before doing the mmap itself
458 handle = vma->vm_pgoff;
461 /* reference was taken here */
462 cb = hl_cb_get(hdev, &hpriv->cb_mgr, handle);
465 "CB mmap failed, no match to handle 0x%x\n", handle);
469 /* Validation check */
470 user_cb_size = vma->vm_end - vma->vm_start;
471 if (user_cb_size != ALIGN(cb->size, PAGE_SIZE)) {
473 "CB mmap failed, mmap size 0x%lx != 0x%x cb size\n",
474 vma->vm_end - vma->vm_start, cb->size);
479 if (!access_ok((void __user *) (uintptr_t) vma->vm_start,
482 "user pointer is invalid - 0x%lx\n",
489 spin_lock(&cb->lock);
493 "CB mmap failed, CB already mmaped to user\n");
500 spin_unlock(&cb->lock);
502 vma->vm_ops = &cb_vm_ops;
505 * Note: We're transferring the cb reference to
506 * vma->vm_private_data here.
509 vma->vm_private_data = cb;
511 rc = hdev->asic_funcs->cb_mmap(hdev, vma, cb->kernel_address,
512 cb->bus_address, cb->size);
514 spin_lock(&cb->lock);
519 cb->mmap_size = cb->size;
524 spin_unlock(&cb->lock);
530 struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr,
535 spin_lock(&mgr->cb_lock);
536 cb = idr_find(&mgr->cb_handles, handle);
539 spin_unlock(&mgr->cb_lock);
541 "CB get failed, no match to handle 0x%x\n", handle);
545 kref_get(&cb->refcount);
547 spin_unlock(&mgr->cb_lock);
553 void hl_cb_put(struct hl_cb *cb)
555 kref_put(&cb->refcount, cb_release);
558 void hl_cb_mgr_init(struct hl_cb_mgr *mgr)
560 spin_lock_init(&mgr->cb_lock);
561 idr_init(&mgr->cb_handles);
564 void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr)
570 idp = &mgr->cb_handles;
572 idr_for_each_entry(idp, cb, id) {
573 if (kref_put(&cb->refcount, cb_release) != 1)
575 "CB %d for CTX ID %d is still alive\n",
579 idr_destroy(&mgr->cb_handles);
582 struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
589 rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx, cb_size,
590 internal_cb, false, &cb_handle);
593 "Failed to allocate CB for the kernel driver %d\n", rc);
597 cb_handle >>= PAGE_SHIFT;
598 cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, (u32) cb_handle);
599 /* hl_cb_get should never fail here so use kernel WARN */
600 WARN(!cb, "Kernel CB handle invalid 0x%x\n", (u32) cb_handle);
607 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb_handle << PAGE_SHIFT);
612 int hl_cb_pool_init(struct hl_device *hdev)
617 INIT_LIST_HEAD(&hdev->cb_pool);
618 spin_lock_init(&hdev->cb_pool_lock);
620 for (i = 0 ; i < hdev->asic_prop.cb_pool_cb_cnt ; i++) {
621 cb = hl_cb_alloc(hdev, hdev->asic_prop.cb_pool_cb_size,
622 HL_KERNEL_ASID_ID, false);
625 list_add(&cb->pool_list, &hdev->cb_pool);
627 hl_cb_pool_fini(hdev);
635 int hl_cb_pool_fini(struct hl_device *hdev)
637 struct hl_cb *cb, *tmp;
639 list_for_each_entry_safe(cb, tmp, &hdev->cb_pool, pool_list) {
640 list_del(&cb->pool_list);
647 int hl_cb_va_pool_init(struct hl_ctx *ctx)
649 struct hl_device *hdev = ctx->hdev;
650 struct asic_fixed_properties *prop = &hdev->asic_prop;
653 if (!hdev->supports_cb_mapping)
656 ctx->cb_va_pool = gen_pool_create(__ffs(prop->pmmu.page_size), -1);
657 if (!ctx->cb_va_pool) {
659 "Failed to create VA gen pool for CB mapping\n");
663 rc = gen_pool_add(ctx->cb_va_pool, prop->cb_va_start_addr,
664 prop->cb_va_end_addr - prop->cb_va_start_addr, -1);
667 "Failed to add memory to VA gen pool for CB mapping\n");
668 goto err_pool_destroy;
674 gen_pool_destroy(ctx->cb_va_pool);
679 void hl_cb_va_pool_fini(struct hl_ctx *ctx)
681 struct hl_device *hdev = ctx->hdev;
683 if (!hdev->supports_cb_mapping)
686 gen_pool_destroy(ctx->cb_va_pool);