1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2019 HabanaLabs, Ltd.
8 #include <uapi/misc/habanalabs.h>
9 #include "habanalabs.h"
12 #include <linux/slab.h>
13 #include <linux/uaccess.h>
14 #include <linux/genalloc.h>
16 static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
19 gen_pool_free(hdev->internal_cb_pool,
20 cb->kernel_address, cb->size);
22 hdev->asic_funcs->asic_dma_free_coherent(hdev, cb->size,
23 (void *) (uintptr_t) cb->kernel_address,
29 static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb)
32 spin_lock(&hdev->cb_pool_lock);
33 list_add(&cb->pool_list, &hdev->cb_pool);
34 spin_unlock(&hdev->cb_pool_lock);
40 static void cb_release(struct kref *ref)
42 struct hl_device *hdev;
45 cb = container_of(ref, struct hl_cb, refcount);
48 hl_debugfs_remove_cb(cb);
50 cb_do_release(hdev, cb);
53 static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
54 int ctx_id, bool internal_cb)
61 * We use of GFP_ATOMIC here because this function can be called from
62 * the latency-sensitive code path for command submission. Due to H/W
63 * limitations in some of the ASICs, the kernel must copy the user CB
64 * that is designated for an external queue and actually enqueue
65 * the kernel's copy. Hence, we must never sleep in this code section
66 * and must use GFP_ATOMIC for all memory allocations.
68 if (ctx_id == HL_KERNEL_ASID_ID)
69 cb = kzalloc(sizeof(*cb), GFP_ATOMIC);
71 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
77 p = (void *) gen_pool_alloc(hdev->internal_cb_pool, cb_size);
83 cb_offset = p - hdev->internal_cb_pool_virt_addr;
84 cb->is_internal = true;
85 cb->bus_address = hdev->internal_cb_va_base + cb_offset;
86 } else if (ctx_id == HL_KERNEL_ASID_ID) {
87 p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size,
88 &cb->bus_address, GFP_ATOMIC);
90 p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size,
92 GFP_USER | __GFP_ZERO);
97 "failed to allocate %d of dma memory for CB\n",
103 cb->kernel_address = (u64) (uintptr_t) p;
109 int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
110 u32 cb_size, u64 *handle, int ctx_id, bool internal_cb)
113 bool alloc_new_cb = true;
117 * Can't use generic function to check this because of special case
118 * where we create a CB as part of the reset process
120 if ((hdev->disabled) || ((atomic_read(&hdev->in_reset)) &&
121 (ctx_id != HL_KERNEL_ASID_ID))) {
122 dev_warn_ratelimited(hdev->dev,
123 "Device is disabled or in reset. Can't create new CBs\n");
128 if (cb_size > SZ_2M) {
129 dev_err(hdev->dev, "CB size %d must be less than %d\n",
136 /* Minimum allocation must be PAGE SIZE */
137 if (cb_size < PAGE_SIZE)
140 if (ctx_id == HL_KERNEL_ASID_ID &&
141 cb_size <= hdev->asic_prop.cb_pool_cb_size) {
143 spin_lock(&hdev->cb_pool_lock);
144 if (!list_empty(&hdev->cb_pool)) {
145 cb = list_first_entry(&hdev->cb_pool,
146 typeof(*cb), pool_list);
147 list_del(&cb->pool_list);
148 spin_unlock(&hdev->cb_pool_lock);
149 alloc_new_cb = false;
151 spin_unlock(&hdev->cb_pool_lock);
152 dev_dbg(hdev->dev, "CB pool is empty\n");
158 cb = hl_cb_alloc(hdev, cb_size, ctx_id, internal_cb);
168 spin_lock(&mgr->cb_lock);
169 rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_ATOMIC);
170 spin_unlock(&mgr->cb_lock);
173 dev_err(hdev->dev, "Failed to allocate IDR for a new CB\n");
179 kref_init(&cb->refcount);
180 spin_lock_init(&cb->lock);
183 * idr is 32-bit so we can safely OR it with a mask that is above
186 *handle = cb->id | HL_MMAP_CB_MASK;
187 *handle <<= PAGE_SHIFT;
189 hl_debugfs_add_cb(cb);
194 cb_do_release(hdev, cb);
201 int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle)
208 * handle was given to user to do mmap, I need to shift it back to
209 * how the idr module gave it to me
211 cb_handle >>= PAGE_SHIFT;
212 handle = (u32) cb_handle;
214 spin_lock(&mgr->cb_lock);
216 cb = idr_find(&mgr->cb_handles, handle);
218 idr_remove(&mgr->cb_handles, handle);
219 spin_unlock(&mgr->cb_lock);
220 kref_put(&cb->refcount, cb_release);
222 spin_unlock(&mgr->cb_lock);
224 "CB destroy failed, no match to handle 0x%x\n", handle);
231 int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
233 union hl_cb_args *args = data;
234 struct hl_device *hdev = hpriv->hdev;
238 if (hl_device_disabled_or_in_reset(hdev)) {
239 dev_warn_ratelimited(hdev->dev,
240 "Device is %s. Can't execute CB IOCTL\n",
241 atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
245 switch (args->in.op) {
246 case HL_CB_OP_CREATE:
247 if (args->in.cb_size > HL_MAX_CB_SIZE) {
249 "User requested CB size %d must be less than %d\n",
250 args->in.cb_size, HL_MAX_CB_SIZE);
253 rc = hl_cb_create(hdev, &hpriv->cb_mgr,
254 args->in.cb_size, &handle,
255 hpriv->ctx->asid, false);
258 memset(args, 0, sizeof(*args));
259 args->out.cb_handle = handle;
262 case HL_CB_OP_DESTROY:
263 rc = hl_cb_destroy(hdev, &hpriv->cb_mgr,
275 static void cb_vm_close(struct vm_area_struct *vma)
277 struct hl_cb *cb = (struct hl_cb *) vma->vm_private_data;
280 new_mmap_size = cb->mmap_size - (vma->vm_end - vma->vm_start);
282 if (new_mmap_size > 0) {
283 cb->mmap_size = new_mmap_size;
287 spin_lock(&cb->lock);
289 spin_unlock(&cb->lock);
292 vma->vm_private_data = NULL;
295 static const struct vm_operations_struct cb_vm_ops = {
299 int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
301 struct hl_device *hdev = hpriv->hdev;
304 u32 handle, user_cb_size;
307 handle = vma->vm_pgoff;
309 /* reference was taken here */
310 cb = hl_cb_get(hdev, &hpriv->cb_mgr, handle);
313 "CB mmap failed, no match to handle 0x%x\n", handle);
317 /* Validation check */
318 user_cb_size = vma->vm_end - vma->vm_start;
319 if (user_cb_size != ALIGN(cb->size, PAGE_SIZE)) {
321 "CB mmap failed, mmap size 0x%lx != 0x%x cb size\n",
322 vma->vm_end - vma->vm_start, cb->size);
327 if (!access_ok((void __user *) (uintptr_t) vma->vm_start,
330 "user pointer is invalid - 0x%lx\n",
337 spin_lock(&cb->lock);
341 "CB mmap failed, CB already mmaped to user\n");
348 spin_unlock(&cb->lock);
350 vma->vm_ops = &cb_vm_ops;
353 * Note: We're transferring the cb reference to
354 * vma->vm_private_data here.
357 vma->vm_private_data = cb;
359 /* Calculate address for CB */
360 address = virt_to_phys((void *) (uintptr_t) cb->kernel_address);
362 rc = hdev->asic_funcs->cb_mmap(hdev, vma, cb->kernel_address,
366 spin_lock(&cb->lock);
371 cb->mmap_size = cb->size;
376 spin_unlock(&cb->lock);
382 struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr,
387 spin_lock(&mgr->cb_lock);
388 cb = idr_find(&mgr->cb_handles, handle);
391 spin_unlock(&mgr->cb_lock);
393 "CB get failed, no match to handle 0x%x\n", handle);
397 kref_get(&cb->refcount);
399 spin_unlock(&mgr->cb_lock);
405 void hl_cb_put(struct hl_cb *cb)
407 kref_put(&cb->refcount, cb_release);
410 void hl_cb_mgr_init(struct hl_cb_mgr *mgr)
412 spin_lock_init(&mgr->cb_lock);
413 idr_init(&mgr->cb_handles);
416 void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr)
422 idp = &mgr->cb_handles;
424 idr_for_each_entry(idp, cb, id) {
425 if (kref_put(&cb->refcount, cb_release) != 1)
427 "CB %d for CTX ID %d is still alive\n",
431 idr_destroy(&mgr->cb_handles);
434 struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
441 rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, cb_size, &cb_handle,
442 HL_KERNEL_ASID_ID, internal_cb);
445 "Failed to allocate CB for the kernel driver %d\n", rc);
449 cb_handle >>= PAGE_SHIFT;
450 cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, (u32) cb_handle);
451 /* hl_cb_get should never fail here so use kernel WARN */
452 WARN(!cb, "Kernel CB handle invalid 0x%x\n", (u32) cb_handle);
459 hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb_handle << PAGE_SHIFT);
464 int hl_cb_pool_init(struct hl_device *hdev)
469 INIT_LIST_HEAD(&hdev->cb_pool);
470 spin_lock_init(&hdev->cb_pool_lock);
472 for (i = 0 ; i < hdev->asic_prop.cb_pool_cb_cnt ; i++) {
473 cb = hl_cb_alloc(hdev, hdev->asic_prop.cb_pool_cb_size,
474 HL_KERNEL_ASID_ID, false);
477 list_add(&cb->pool_list, &hdev->cb_pool);
479 hl_cb_pool_fini(hdev);
487 int hl_cb_pool_fini(struct hl_device *hdev)
489 struct hl_cb *cb, *tmp;
491 list_for_each_entry_safe(cb, tmp, &hdev->cb_pool, pool_list) {
492 list_del(&cb->pool_list);