1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2019 HabanaLabs, Ltd.
8 #include "habanalabs.h"
10 #include <linux/slab.h>
12 static void hl_ctx_fini(struct hl_ctx *ctx)
14 struct hl_device *hdev = ctx->hdev;
17 /* Release all allocated pending cb's, those cb's were never
18 * scheduled so it is safe to release them here
20 hl_pending_cb_list_flush(ctx);
22 /* Release all allocated HW block mapped list entries and destroy
25 hl_hw_block_mem_fini(ctx);
28 * If we arrived here, there are no jobs waiting for this context
29 * on its queues so we can safely remove it.
30 * This is because for each CS, we increment the ref count and for
31 * every CS that was finished we decrement it and we won't arrive
32 * to this function unless the ref count is 0
35 for (i = 0 ; i < hdev->asic_prop.max_pending_cs ; i++)
36 hl_fence_put(ctx->cs_pending[i]);
38 kfree(ctx->cs_pending);
40 if (ctx->asid != HL_KERNEL_ASID_ID) {
41 dev_dbg(hdev->dev, "closing user context %d\n", ctx->asid);
43 /* The engines are stopped as there is no executing CS, but the
44 * Coresight might be still working by accessing addresses
45 * related to the stopped engines. Hence stop it explicitly.
46 * Stop only if this is the compute context, as there can be
47 * only one compute context
49 if ((hdev->in_debug) && (hdev->compute_ctx == ctx))
50 hl_device_set_debug_mode(hdev, false);
52 hdev->asic_funcs->ctx_fini(ctx);
53 hl_cb_va_pool_fini(ctx);
55 hl_asid_free(hdev, ctx->asid);
57 /* Scrub both SRAM and DRAM */
58 hdev->asic_funcs->scrub_device_mem(hdev, 0, 0);
60 dev_dbg(hdev->dev, "closing kernel context\n");
61 hdev->asic_funcs->ctx_fini(ctx);
67 void hl_ctx_do_release(struct kref *ref)
71 ctx = container_of(ref, struct hl_ctx, refcount);
76 hl_hpriv_put(ctx->hpriv);
81 int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv)
83 struct hl_ctx_mgr *mgr = &hpriv->ctx_mgr;
87 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
93 mutex_lock(&mgr->ctx_lock);
94 rc = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
95 mutex_unlock(&mgr->ctx_lock);
98 dev_err(hdev->dev, "Failed to allocate IDR for a new CTX\n");
104 rc = hl_ctx_init(hdev, ctx, false);
106 goto remove_from_idr;
111 /* TODO: remove for multiple contexts per process */
114 /* TODO: remove the following line for multiple process support */
115 hdev->compute_ctx = ctx;
120 mutex_lock(&mgr->ctx_lock);
121 idr_remove(&mgr->ctx_handles, ctx->handle);
122 mutex_unlock(&mgr->ctx_lock);
129 void hl_ctx_free(struct hl_device *hdev, struct hl_ctx *ctx)
131 if (kref_put(&ctx->refcount, hl_ctx_do_release) == 1)
135 "user process released device but its command submissions are still executing\n");
138 int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
144 kref_init(&ctx->refcount);
146 ctx->cs_sequence = 1;
147 INIT_LIST_HEAD(&ctx->pending_cb_list);
148 spin_lock_init(&ctx->pending_cb_lock);
149 spin_lock_init(&ctx->cs_lock);
150 atomic_set(&ctx->thread_ctx_switch_token, 1);
151 atomic_set(&ctx->thread_pending_cb_token, 1);
152 ctx->thread_ctx_switch_wait_token = 0;
153 ctx->cs_pending = kcalloc(hdev->asic_prop.max_pending_cs,
154 sizeof(struct hl_fence *),
156 if (!ctx->cs_pending)
159 hl_hw_block_mem_init(ctx);
162 ctx->asid = HL_KERNEL_ASID_ID; /* Kernel driver gets ASID 0 */
163 rc = hl_vm_ctx_init(ctx);
165 dev_err(hdev->dev, "Failed to init mem ctx module\n");
167 goto err_hw_block_mem_fini;
170 rc = hdev->asic_funcs->ctx_init(ctx);
172 dev_err(hdev->dev, "ctx_init failed\n");
173 goto err_vm_ctx_fini;
176 ctx->asid = hl_asid_alloc(hdev);
178 dev_err(hdev->dev, "No free ASID, failed to create context\n");
180 goto err_hw_block_mem_fini;
183 rc = hl_vm_ctx_init(ctx);
185 dev_err(hdev->dev, "Failed to init mem ctx module\n");
190 rc = hl_cb_va_pool_init(ctx);
193 "Failed to init VA pool for mapped CB\n");
194 goto err_vm_ctx_fini;
197 rc = hdev->asic_funcs->ctx_init(ctx);
199 dev_err(hdev->dev, "ctx_init failed\n");
200 goto err_cb_va_pool_fini;
203 dev_dbg(hdev->dev, "create user context %d\n", ctx->asid);
209 hl_cb_va_pool_fini(ctx);
213 if (ctx->asid != HL_KERNEL_ASID_ID)
214 hl_asid_free(hdev, ctx->asid);
215 err_hw_block_mem_fini:
216 hl_hw_block_mem_fini(ctx);
217 kfree(ctx->cs_pending);
222 void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx)
224 kref_get(&ctx->refcount);
227 int hl_ctx_put(struct hl_ctx *ctx)
229 return kref_put(&ctx->refcount, hl_ctx_do_release);
232 struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq)
234 struct asic_fixed_properties *asic_prop = &ctx->hdev->asic_prop;
235 struct hl_fence *fence;
237 spin_lock(&ctx->cs_lock);
239 if (seq >= ctx->cs_sequence) {
240 spin_unlock(&ctx->cs_lock);
241 return ERR_PTR(-EINVAL);
244 if (seq + asic_prop->max_pending_cs < ctx->cs_sequence) {
245 spin_unlock(&ctx->cs_lock);
249 fence = ctx->cs_pending[seq & (asic_prop->max_pending_cs - 1)];
252 spin_unlock(&ctx->cs_lock);
258 * hl_ctx_mgr_init - initialize the context manager
260 * @mgr: pointer to context manager structure
262 * This manager is an object inside the hpriv object of the user process.
263 * The function is called when a user process opens the FD.
265 void hl_ctx_mgr_init(struct hl_ctx_mgr *mgr)
267 mutex_init(&mgr->ctx_lock);
268 idr_init(&mgr->ctx_handles);
272 * hl_ctx_mgr_fini - finalize the context manager
274 * @hdev: pointer to device structure
275 * @mgr: pointer to context manager structure
277 * This function goes over all the contexts in the manager and frees them.
278 * It is called when a process closes the FD.
280 void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *mgr)
286 idp = &mgr->ctx_handles;
288 idr_for_each_entry(idp, ctx, id)
289 hl_ctx_free(hdev, ctx);
291 idr_destroy(&mgr->ctx_handles);
292 mutex_destroy(&mgr->ctx_lock);