habanalabs: add an option to delay a device reset
[linux-2.6-microblaze.git] / drivers / misc / habanalabs / common / device.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4  * Copyright 2016-2022 HabanaLabs, Ltd.
5  * All Rights Reserved.
6  */
7
8 #define pr_fmt(fmt)                     "habanalabs: " fmt
9
10 #include <uapi/misc/habanalabs.h>
11 #include "habanalabs.h"
12
13 #include <linux/pci.h>
14 #include <linux/hwmon.h>
15
16 #define HL_RESET_DELAY_USEC             10000   /* 10ms */
17
18 enum hl_device_status hl_device_status(struct hl_device *hdev)
19 {
20         enum hl_device_status status;
21
22         if (hdev->reset_info.in_reset)
23                 status = HL_DEVICE_STATUS_IN_RESET;
24         else if (hdev->reset_info.needs_reset)
25                 status = HL_DEVICE_STATUS_NEEDS_RESET;
26         else if (hdev->disabled)
27                 status = HL_DEVICE_STATUS_MALFUNCTION;
28         else if (!hdev->init_done)
29                 status = HL_DEVICE_STATUS_IN_DEVICE_CREATION;
30         else
31                 status = HL_DEVICE_STATUS_OPERATIONAL;
32
33         return status;
34 }
35
36 bool hl_device_operational(struct hl_device *hdev,
37                 enum hl_device_status *status)
38 {
39         enum hl_device_status current_status;
40
41         current_status = hl_device_status(hdev);
42         if (status)
43                 *status = current_status;
44
45         switch (current_status) {
46         case HL_DEVICE_STATUS_IN_RESET:
47         case HL_DEVICE_STATUS_MALFUNCTION:
48         case HL_DEVICE_STATUS_NEEDS_RESET:
49                 return false;
50         case HL_DEVICE_STATUS_OPERATIONAL:
51         case HL_DEVICE_STATUS_IN_DEVICE_CREATION:
52         default:
53                 return true;
54         }
55 }
56
57 static void hpriv_release(struct kref *ref)
58 {
59         u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};
60         bool device_is_idle = true;
61         struct hl_fpriv *hpriv;
62         struct hl_device *hdev;
63
64         hpriv = container_of(ref, struct hl_fpriv, refcount);
65
66         hdev = hpriv->hdev;
67
68         put_pid(hpriv->taskpid);
69
70         hl_debugfs_remove_file(hpriv);
71
72         mutex_destroy(&hpriv->restore_phase_mutex);
73
74         if ((!hdev->pldm) && (hdev->pdev) &&
75                         (!hdev->asic_funcs->is_device_idle(hdev,
76                                 idle_mask,
77                                 HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL))) {
78                 dev_err(hdev->dev,
79                         "device not idle after user context is closed (0x%llx_%llx)\n",
80                         idle_mask[1], idle_mask[0]);
81
82                 device_is_idle = false;
83         }
84
85         /* We need to remove the user from the list to make sure the reset process won't
86          * try to kill the user process. Because, if we got here, it means there are no
87          * more driver/device resources that the user process is occupying so there is
88          * no need to kill it
89          *
90          * However, we can't set the compute_ctx to NULL at this stage. This is to prevent
91          * a race between the release and opening the device again. We don't want to let
92          * a user open the device while there a reset is about to happen.
93          */
94         mutex_lock(&hdev->fpriv_list_lock);
95         list_del(&hpriv->dev_node);
96         mutex_unlock(&hdev->fpriv_list_lock);
97
98         if ((hdev->reset_if_device_not_idle && !device_is_idle)
99                         || hdev->reset_upon_device_release)
100                 hl_device_reset(hdev, HL_DRV_RESET_DEV_RELEASE);
101
102         /* Now we can mark the compute_ctx as not active. Even if a reset is running in a different
103          * thread, we don't care because the in_reset is marked so if a user will try to open
104          * the device it will fail on that, even if compute_ctx is false.
105          */
106         mutex_lock(&hdev->fpriv_list_lock);
107         hdev->is_compute_ctx_active = false;
108         mutex_unlock(&hdev->fpriv_list_lock);
109
110         kfree(hpriv);
111 }
112
113 void hl_hpriv_get(struct hl_fpriv *hpriv)
114 {
115         kref_get(&hpriv->refcount);
116 }
117
118 int hl_hpriv_put(struct hl_fpriv *hpriv)
119 {
120         return kref_put(&hpriv->refcount, hpriv_release);
121 }
122
123 /*
124  * hl_device_release - release function for habanalabs device
125  *
126  * @inode: pointer to inode structure
127  * @filp: pointer to file structure
128  *
129  * Called when process closes an habanalabs device
130  */
131 static int hl_device_release(struct inode *inode, struct file *filp)
132 {
133         struct hl_fpriv *hpriv = filp->private_data;
134         struct hl_device *hdev = hpriv->hdev;
135
136         filp->private_data = NULL;
137
138         if (!hdev) {
139                 pr_crit("Closing FD after device was removed. Memory leak will occur and it is advised to reboot.\n");
140                 put_pid(hpriv->taskpid);
141                 return 0;
142         }
143
144         /* Each pending user interrupt holds the user's context, hence we
145          * must release them all before calling hl_ctx_mgr_fini().
146          */
147         hl_release_pending_user_interrupts(hpriv->hdev);
148
149         hl_cb_mgr_fini(hdev, &hpriv->cb_mgr);
150         hl_ts_mgr_fini(hpriv->hdev, &hpriv->ts_mem_mgr);
151         hl_ctx_mgr_fini(hdev, &hpriv->ctx_mgr);
152
153         if (!hl_hpriv_put(hpriv))
154                 dev_notice(hdev->dev,
155                         "User process closed FD but device still in use\n");
156
157         hdev->last_open_session_duration_jif =
158                 jiffies - hdev->last_successful_open_jif;
159
160         return 0;
161 }
162
163 static int hl_device_release_ctrl(struct inode *inode, struct file *filp)
164 {
165         struct hl_fpriv *hpriv = filp->private_data;
166         struct hl_device *hdev = hpriv->hdev;
167
168         filp->private_data = NULL;
169
170         if (!hdev) {
171                 pr_err("Closing FD after device was removed\n");
172                 goto out;
173         }
174
175         mutex_lock(&hdev->fpriv_ctrl_list_lock);
176         list_del(&hpriv->dev_node);
177         mutex_unlock(&hdev->fpriv_ctrl_list_lock);
178 out:
179         put_pid(hpriv->taskpid);
180
181         kfree(hpriv);
182
183         return 0;
184 }
185
186 /*
187  * hl_mmap - mmap function for habanalabs device
188  *
189  * @*filp: pointer to file structure
190  * @*vma: pointer to vm_area_struct of the process
191  *
192  * Called when process does an mmap on habanalabs device. Call the device's mmap
193  * function at the end of the common code.
194  */
195 static int hl_mmap(struct file *filp, struct vm_area_struct *vma)
196 {
197         struct hl_fpriv *hpriv = filp->private_data;
198         struct hl_device *hdev = hpriv->hdev;
199         unsigned long vm_pgoff;
200
201         if (!hdev) {
202                 pr_err_ratelimited("Trying to mmap after device was removed! Please close FD\n");
203                 return -ENODEV;
204         }
205
206         vm_pgoff = vma->vm_pgoff;
207         vma->vm_pgoff = HL_MMAP_OFFSET_VALUE_GET(vm_pgoff);
208
209         switch (vm_pgoff & HL_MMAP_TYPE_MASK) {
210         case HL_MMAP_TYPE_CB:
211                 return hl_cb_mmap(hpriv, vma);
212
213         case HL_MMAP_TYPE_BLOCK:
214                 return hl_hw_block_mmap(hpriv, vma);
215
216         case HL_MMAP_TYPE_TS_BUFF:
217                 return hl_ts_mmap(hpriv, vma);
218         }
219
220         return -EINVAL;
221 }
222
223 static const struct file_operations hl_ops = {
224         .owner = THIS_MODULE,
225         .open = hl_device_open,
226         .release = hl_device_release,
227         .mmap = hl_mmap,
228         .unlocked_ioctl = hl_ioctl,
229         .compat_ioctl = hl_ioctl
230 };
231
232 static const struct file_operations hl_ctrl_ops = {
233         .owner = THIS_MODULE,
234         .open = hl_device_open_ctrl,
235         .release = hl_device_release_ctrl,
236         .unlocked_ioctl = hl_ioctl_control,
237         .compat_ioctl = hl_ioctl_control
238 };
239
240 static void device_release_func(struct device *dev)
241 {
242         kfree(dev);
243 }
244
245 /*
246  * device_init_cdev - Initialize cdev and device for habanalabs device
247  *
248  * @hdev: pointer to habanalabs device structure
249  * @hclass: pointer to the class object of the device
250  * @minor: minor number of the specific device
251  * @fpos: file operations to install for this device
252  * @name: name of the device as it will appear in the filesystem
253  * @cdev: pointer to the char device object that will be initialized
254  * @dev: pointer to the device object that will be initialized
255  *
256  * Initialize a cdev and a Linux device for habanalabs's device.
257  */
258 static int device_init_cdev(struct hl_device *hdev, struct class *hclass,
259                                 int minor, const struct file_operations *fops,
260                                 char *name, struct cdev *cdev,
261                                 struct device **dev)
262 {
263         cdev_init(cdev, fops);
264         cdev->owner = THIS_MODULE;
265
266         *dev = kzalloc(sizeof(**dev), GFP_KERNEL);
267         if (!*dev)
268                 return -ENOMEM;
269
270         device_initialize(*dev);
271         (*dev)->devt = MKDEV(hdev->major, minor);
272         (*dev)->class = hclass;
273         (*dev)->release = device_release_func;
274         dev_set_drvdata(*dev, hdev);
275         dev_set_name(*dev, "%s", name);
276
277         return 0;
278 }
279
280 static int device_cdev_sysfs_add(struct hl_device *hdev)
281 {
282         int rc;
283
284         rc = cdev_device_add(&hdev->cdev, hdev->dev);
285         if (rc) {
286                 dev_err(hdev->dev,
287                         "failed to add a char device to the system\n");
288                 return rc;
289         }
290
291         rc = cdev_device_add(&hdev->cdev_ctrl, hdev->dev_ctrl);
292         if (rc) {
293                 dev_err(hdev->dev,
294                         "failed to add a control char device to the system\n");
295                 goto delete_cdev_device;
296         }
297
298         /* hl_sysfs_init() must be done after adding the device to the system */
299         rc = hl_sysfs_init(hdev);
300         if (rc) {
301                 dev_err(hdev->dev, "failed to initialize sysfs\n");
302                 goto delete_ctrl_cdev_device;
303         }
304
305         hdev->cdev_sysfs_created = true;
306
307         return 0;
308
309 delete_ctrl_cdev_device:
310         cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl);
311 delete_cdev_device:
312         cdev_device_del(&hdev->cdev, hdev->dev);
313         return rc;
314 }
315
316 static void device_cdev_sysfs_del(struct hl_device *hdev)
317 {
318         if (!hdev->cdev_sysfs_created)
319                 goto put_devices;
320
321         hl_sysfs_fini(hdev);
322         cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl);
323         cdev_device_del(&hdev->cdev, hdev->dev);
324
325 put_devices:
326         put_device(hdev->dev);
327         put_device(hdev->dev_ctrl);
328 }
329
330 static void device_hard_reset_pending(struct work_struct *work)
331 {
332         struct hl_device_reset_work *device_reset_work =
333                 container_of(work, struct hl_device_reset_work, reset_work.work);
334         struct hl_device *hdev = device_reset_work->hdev;
335         u32 flags;
336         int rc;
337
338         flags = device_reset_work->flags | HL_DRV_RESET_FROM_RESET_THR;
339
340         rc = hl_device_reset(hdev, flags);
341         if ((rc == -EBUSY) && !hdev->device_fini_pending) {
342                 dev_info(hdev->dev,
343                         "Could not reset device. will try again in %u seconds",
344                         HL_PENDING_RESET_PER_SEC);
345
346                 queue_delayed_work(device_reset_work->wq,
347                         &device_reset_work->reset_work,
348                         msecs_to_jiffies(HL_PENDING_RESET_PER_SEC * 1000));
349         }
350 }
351
352 /*
353  * device_early_init - do some early initialization for the habanalabs device
354  *
355  * @hdev: pointer to habanalabs device structure
356  *
357  * Install the relevant function pointers and call the early_init function,
358  * if such a function exists
359  */
360 static int device_early_init(struct hl_device *hdev)
361 {
362         int i, rc;
363         char workq_name[32];
364
365         switch (hdev->asic_type) {
366         case ASIC_GOYA:
367                 goya_set_asic_funcs(hdev);
368                 strscpy(hdev->asic_name, "GOYA", sizeof(hdev->asic_name));
369                 break;
370         case ASIC_GAUDI:
371                 gaudi_set_asic_funcs(hdev);
372                 strscpy(hdev->asic_name, "GAUDI", sizeof(hdev->asic_name));
373                 break;
374         case ASIC_GAUDI_SEC:
375                 gaudi_set_asic_funcs(hdev);
376                 strscpy(hdev->asic_name, "GAUDI SEC", sizeof(hdev->asic_name));
377                 break;
378         default:
379                 dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
380                         hdev->asic_type);
381                 return -EINVAL;
382         }
383
384         rc = hdev->asic_funcs->early_init(hdev);
385         if (rc)
386                 return rc;
387
388         rc = hl_asid_init(hdev);
389         if (rc)
390                 goto early_fini;
391
392         if (hdev->asic_prop.completion_queues_count) {
393                 hdev->cq_wq = kcalloc(hdev->asic_prop.completion_queues_count,
394                                 sizeof(*hdev->cq_wq),
395                                 GFP_KERNEL);
396                 if (!hdev->cq_wq) {
397                         rc = -ENOMEM;
398                         goto asid_fini;
399                 }
400         }
401
402         for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
403                 snprintf(workq_name, 32, "hl-free-jobs-%u", (u32) i);
404                 hdev->cq_wq[i] = create_singlethread_workqueue(workq_name);
405                 if (hdev->cq_wq[i] == NULL) {
406                         dev_err(hdev->dev, "Failed to allocate CQ workqueue\n");
407                         rc = -ENOMEM;
408                         goto free_cq_wq;
409                 }
410         }
411
412         hdev->eq_wq = alloc_workqueue("hl-events", WQ_UNBOUND, 0);
413         if (hdev->eq_wq == NULL) {
414                 dev_err(hdev->dev, "Failed to allocate EQ workqueue\n");
415                 rc = -ENOMEM;
416                 goto free_cq_wq;
417         }
418
419         hdev->ts_free_obj_wq = alloc_workqueue("hl-ts-free-obj", WQ_UNBOUND, 0);
420         if (!hdev->ts_free_obj_wq) {
421                 dev_err(hdev->dev,
422                         "Failed to allocate Timestamp registration free workqueue\n");
423                 rc = -ENOMEM;
424                 goto free_eq_wq;
425         }
426
427         hdev->hl_chip_info = kzalloc(sizeof(struct hwmon_chip_info),
428                                         GFP_KERNEL);
429         if (!hdev->hl_chip_info) {
430                 rc = -ENOMEM;
431                 goto free_ts_free_wq;
432         }
433
434         rc = hl_mmu_if_set_funcs(hdev);
435         if (rc)
436                 goto free_chip_info;
437
438         hl_cb_mgr_init(&hdev->kernel_cb_mgr);
439
440         hdev->device_reset_work.wq =
441                         create_singlethread_workqueue("hl_device_reset");
442         if (!hdev->device_reset_work.wq) {
443                 rc = -ENOMEM;
444                 dev_err(hdev->dev, "Failed to create device reset WQ\n");
445                 goto free_cb_mgr;
446         }
447
448         INIT_DELAYED_WORK(&hdev->device_reset_work.reset_work,
449                         device_hard_reset_pending);
450         hdev->device_reset_work.hdev = hdev;
451         hdev->device_fini_pending = 0;
452
453         mutex_init(&hdev->send_cpu_message_lock);
454         mutex_init(&hdev->debug_lock);
455         INIT_LIST_HEAD(&hdev->cs_mirror_list);
456         spin_lock_init(&hdev->cs_mirror_lock);
457         spin_lock_init(&hdev->reset_info.lock);
458         INIT_LIST_HEAD(&hdev->fpriv_list);
459         INIT_LIST_HEAD(&hdev->fpriv_ctrl_list);
460         mutex_init(&hdev->fpriv_list_lock);
461         mutex_init(&hdev->fpriv_ctrl_list_lock);
462         mutex_init(&hdev->clk_throttling.lock);
463
464         return 0;
465
466 free_cb_mgr:
467         hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr);
468 free_chip_info:
469         kfree(hdev->hl_chip_info);
470 free_ts_free_wq:
471         destroy_workqueue(hdev->ts_free_obj_wq);
472 free_eq_wq:
473         destroy_workqueue(hdev->eq_wq);
474 free_cq_wq:
475         for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
476                 if (hdev->cq_wq[i])
477                         destroy_workqueue(hdev->cq_wq[i]);
478         kfree(hdev->cq_wq);
479 asid_fini:
480         hl_asid_fini(hdev);
481 early_fini:
482         if (hdev->asic_funcs->early_fini)
483                 hdev->asic_funcs->early_fini(hdev);
484
485         return rc;
486 }
487
488 /*
489  * device_early_fini - finalize all that was done in device_early_init
490  *
491  * @hdev: pointer to habanalabs device structure
492  *
493  */
494 static void device_early_fini(struct hl_device *hdev)
495 {
496         int i;
497
498         mutex_destroy(&hdev->debug_lock);
499         mutex_destroy(&hdev->send_cpu_message_lock);
500
501         mutex_destroy(&hdev->fpriv_list_lock);
502         mutex_destroy(&hdev->fpriv_ctrl_list_lock);
503
504         mutex_destroy(&hdev->clk_throttling.lock);
505
506         hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr);
507
508         kfree(hdev->hl_chip_info);
509
510         destroy_workqueue(hdev->ts_free_obj_wq);
511         destroy_workqueue(hdev->eq_wq);
512         destroy_workqueue(hdev->device_reset_work.wq);
513
514         for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
515                 destroy_workqueue(hdev->cq_wq[i]);
516         kfree(hdev->cq_wq);
517
518         hl_asid_fini(hdev);
519
520         if (hdev->asic_funcs->early_fini)
521                 hdev->asic_funcs->early_fini(hdev);
522 }
523
524 static void hl_device_heartbeat(struct work_struct *work)
525 {
526         struct hl_device *hdev = container_of(work, struct hl_device,
527                                                 work_heartbeat.work);
528
529         if (!hl_device_operational(hdev, NULL))
530                 goto reschedule;
531
532         if (!hdev->asic_funcs->send_heartbeat(hdev))
533                 goto reschedule;
534
535         if (hl_device_operational(hdev, NULL))
536                 dev_err(hdev->dev, "Device heartbeat failed!\n");
537
538         hl_device_reset(hdev, HL_DRV_RESET_HARD | HL_DRV_RESET_HEARTBEAT);
539
540         return;
541
542 reschedule:
543         /*
544          * prev_reset_trigger tracks consecutive fatal h/w errors until first
545          * heartbeat immediately post reset.
546          * If control reached here, then at least one heartbeat work has been
547          * scheduled since last reset/init cycle.
548          * So if the device is not already in reset cycle, reset the flag
549          * prev_reset_trigger as no reset occurred with HL_DRV_RESET_FW_FATAL_ERR
550          * status for at least one heartbeat. From this point driver restarts
551          * tracking future consecutive fatal errors.
552          */
553         if (!hdev->reset_info.in_reset)
554                 hdev->reset_info.prev_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
555
556         schedule_delayed_work(&hdev->work_heartbeat,
557                         usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));
558 }
559
560 /*
561  * device_late_init - do late stuff initialization for the habanalabs device
562  *
563  * @hdev: pointer to habanalabs device structure
564  *
565  * Do stuff that either needs the device H/W queues to be active or needs
566  * to happen after all the rest of the initialization is finished
567  */
568 static int device_late_init(struct hl_device *hdev)
569 {
570         int rc;
571
572         if (hdev->asic_funcs->late_init) {
573                 rc = hdev->asic_funcs->late_init(hdev);
574                 if (rc) {
575                         dev_err(hdev->dev,
576                                 "failed late initialization for the H/W\n");
577                         return rc;
578                 }
579         }
580
581         hdev->high_pll = hdev->asic_prop.high_pll;
582
583         if (hdev->heartbeat) {
584                 INIT_DELAYED_WORK(&hdev->work_heartbeat, hl_device_heartbeat);
585                 schedule_delayed_work(&hdev->work_heartbeat,
586                                 usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));
587         }
588
589         hdev->late_init_done = true;
590
591         return 0;
592 }
593
594 /*
595  * device_late_fini - finalize all that was done in device_late_init
596  *
597  * @hdev: pointer to habanalabs device structure
598  *
599  */
600 static void device_late_fini(struct hl_device *hdev)
601 {
602         if (!hdev->late_init_done)
603                 return;
604
605         if (hdev->heartbeat)
606                 cancel_delayed_work_sync(&hdev->work_heartbeat);
607
608         if (hdev->asic_funcs->late_fini)
609                 hdev->asic_funcs->late_fini(hdev);
610
611         hdev->late_init_done = false;
612 }
613
614 int hl_device_utilization(struct hl_device *hdev, u32 *utilization)
615 {
616         u64 max_power, curr_power, dc_power, dividend;
617         int rc;
618
619         max_power = hdev->max_power;
620         dc_power = hdev->asic_prop.dc_power_default;
621         rc = hl_fw_cpucp_power_get(hdev, &curr_power);
622
623         if (rc)
624                 return rc;
625
626         curr_power = clamp(curr_power, dc_power, max_power);
627
628         dividend = (curr_power - dc_power) * 100;
629         *utilization = (u32) div_u64(dividend, (max_power - dc_power));
630
631         return 0;
632 }
633
634 int hl_device_set_debug_mode(struct hl_device *hdev, struct hl_ctx *ctx, bool enable)
635 {
636         int rc = 0;
637
638         mutex_lock(&hdev->debug_lock);
639
640         if (!enable) {
641                 if (!hdev->in_debug) {
642                         dev_err(hdev->dev,
643                                 "Failed to disable debug mode because device was not in debug mode\n");
644                         rc = -EFAULT;
645                         goto out;
646                 }
647
648                 if (!hdev->reset_info.hard_reset_pending)
649                         hdev->asic_funcs->halt_coresight(hdev, ctx);
650
651                 hdev->in_debug = 0;
652
653                 goto out;
654         }
655
656         if (hdev->in_debug) {
657                 dev_err(hdev->dev,
658                         "Failed to enable debug mode because device is already in debug mode\n");
659                 rc = -EFAULT;
660                 goto out;
661         }
662
663         hdev->in_debug = 1;
664
665 out:
666         mutex_unlock(&hdev->debug_lock);
667
668         return rc;
669 }
670
671 static void take_release_locks(struct hl_device *hdev)
672 {
673         /* Flush anyone that is inside the critical section of enqueue
674          * jobs to the H/W
675          */
676         hdev->asic_funcs->hw_queues_lock(hdev);
677         hdev->asic_funcs->hw_queues_unlock(hdev);
678
679         /* Flush processes that are sending message to CPU */
680         mutex_lock(&hdev->send_cpu_message_lock);
681         mutex_unlock(&hdev->send_cpu_message_lock);
682
683         /* Flush anyone that is inside device open */
684         mutex_lock(&hdev->fpriv_list_lock);
685         mutex_unlock(&hdev->fpriv_list_lock);
686         mutex_lock(&hdev->fpriv_ctrl_list_lock);
687         mutex_unlock(&hdev->fpriv_ctrl_list_lock);
688 }
689
690 static void cleanup_resources(struct hl_device *hdev, bool hard_reset, bool fw_reset,
691                                 bool skip_wq_flush)
692 {
693         if (hard_reset)
694                 device_late_fini(hdev);
695
696         /*
697          * Halt the engines and disable interrupts so we won't get any more
698          * completions from H/W and we won't have any accesses from the
699          * H/W to the host machine
700          */
701         hdev->asic_funcs->halt_engines(hdev, hard_reset, fw_reset);
702
703         /* Go over all the queues, release all CS and their jobs */
704         hl_cs_rollback_all(hdev, skip_wq_flush);
705
706         /* Release all pending user interrupts, each pending user interrupt
707          * holds a reference to user context
708          */
709         hl_release_pending_user_interrupts(hdev);
710 }
711
712 /*
713  * hl_device_suspend - initiate device suspend
714  *
715  * @hdev: pointer to habanalabs device structure
716  *
717  * Puts the hw in the suspend state (all asics).
718  * Returns 0 for success or an error on failure.
719  * Called at driver suspend.
720  */
721 int hl_device_suspend(struct hl_device *hdev)
722 {
723         int rc;
724
725         pci_save_state(hdev->pdev);
726
727         /* Block future CS/VM/JOB completion operations */
728         spin_lock(&hdev->reset_info.lock);
729         if (hdev->reset_info.in_reset) {
730                 spin_unlock(&hdev->reset_info.lock);
731                 dev_err(hdev->dev, "Can't suspend while in reset\n");
732                 return -EIO;
733         }
734         hdev->reset_info.in_reset = 1;
735         spin_unlock(&hdev->reset_info.lock);
736
737         /* This blocks all other stuff that is not blocked by in_reset */
738         hdev->disabled = true;
739
740         take_release_locks(hdev);
741
742         rc = hdev->asic_funcs->suspend(hdev);
743         if (rc)
744                 dev_err(hdev->dev,
745                         "Failed to disable PCI access of device CPU\n");
746
747         /* Shut down the device */
748         pci_disable_device(hdev->pdev);
749         pci_set_power_state(hdev->pdev, PCI_D3hot);
750
751         return 0;
752 }
753
754 /*
755  * hl_device_resume - initiate device resume
756  *
757  * @hdev: pointer to habanalabs device structure
758  *
759  * Bring the hw back to operating state (all asics).
760  * Returns 0 for success or an error on failure.
761  * Called at driver resume.
762  */
763 int hl_device_resume(struct hl_device *hdev)
764 {
765         int rc;
766
767         pci_set_power_state(hdev->pdev, PCI_D0);
768         pci_restore_state(hdev->pdev);
769         rc = pci_enable_device_mem(hdev->pdev);
770         if (rc) {
771                 dev_err(hdev->dev,
772                         "Failed to enable PCI device in resume\n");
773                 return rc;
774         }
775
776         pci_set_master(hdev->pdev);
777
778         rc = hdev->asic_funcs->resume(hdev);
779         if (rc) {
780                 dev_err(hdev->dev, "Failed to resume device after suspend\n");
781                 goto disable_device;
782         }
783
784
785         /* 'in_reset' was set to true during suspend, now we must clear it in order
786          * for hard reset to be performed
787          */
788         hdev->reset_info.in_reset = 0;
789
790         rc = hl_device_reset(hdev, HL_DRV_RESET_HARD);
791         if (rc) {
792                 dev_err(hdev->dev, "Failed to reset device during resume\n");
793                 goto disable_device;
794         }
795
796         return 0;
797
798 disable_device:
799         pci_clear_master(hdev->pdev);
800         pci_disable_device(hdev->pdev);
801
802         return rc;
803 }
804
805 static int device_kill_open_processes(struct hl_device *hdev, u32 timeout, bool control_dev)
806 {
807         struct task_struct *task = NULL;
808         struct list_head *fd_list;
809         struct hl_fpriv *hpriv;
810         struct mutex *fd_lock;
811         u32 pending_cnt;
812
813         fd_lock = control_dev ? &hdev->fpriv_ctrl_list_lock : &hdev->fpriv_list_lock;
814         fd_list = control_dev ? &hdev->fpriv_ctrl_list : &hdev->fpriv_list;
815
816         /* Giving time for user to close FD, and for processes that are inside
817          * hl_device_open to finish
818          */
819         if (!list_empty(fd_list))
820                 ssleep(1);
821
822         if (timeout) {
823                 pending_cnt = timeout;
824         } else {
825                 if (hdev->process_kill_trial_cnt) {
826                         /* Processes have been already killed */
827                         pending_cnt = 1;
828                         goto wait_for_processes;
829                 } else {
830                         /* Wait a small period after process kill */
831                         pending_cnt = HL_PENDING_RESET_PER_SEC;
832                 }
833         }
834
835         mutex_lock(fd_lock);
836
837         /* This section must be protected because we are dereferencing
838          * pointers that are freed if the process exits
839          */
840         list_for_each_entry(hpriv, fd_list, dev_node) {
841                 task = get_pid_task(hpriv->taskpid, PIDTYPE_PID);
842                 if (task) {
843                         dev_info(hdev->dev, "Killing user process pid=%d\n",
844                                 task_pid_nr(task));
845                         send_sig(SIGKILL, task, 1);
846                         usleep_range(1000, 10000);
847
848                         put_task_struct(task);
849                 } else {
850                         dev_warn(hdev->dev,
851                                 "Can't get task struct for PID so giving up on killing process\n");
852                         mutex_unlock(fd_lock);
853                         return -ETIME;
854                 }
855         }
856
857         mutex_unlock(fd_lock);
858
859         /*
860          * We killed the open users, but that doesn't mean they are closed.
861          * It could be that they are running a long cleanup phase in the driver
862          * e.g. MMU unmappings, or running other long teardown flow even before
863          * our cleanup.
864          * Therefore we need to wait again to make sure they are closed before
865          * continuing with the reset.
866          */
867
868 wait_for_processes:
869         while ((!list_empty(fd_list)) && (pending_cnt)) {
870                 dev_dbg(hdev->dev,
871                         "Waiting for all unmap operations to finish before hard reset\n");
872
873                 pending_cnt--;
874
875                 ssleep(1);
876         }
877
878         /* All processes exited successfully */
879         if (list_empty(fd_list))
880                 return 0;
881
882         /* Give up waiting for processes to exit */
883         if (hdev->process_kill_trial_cnt == HL_PENDING_RESET_MAX_TRIALS)
884                 return -ETIME;
885
886         hdev->process_kill_trial_cnt++;
887
888         return -EBUSY;
889 }
890
891 static void device_disable_open_processes(struct hl_device *hdev, bool control_dev)
892 {
893         struct list_head *fd_list;
894         struct hl_fpriv *hpriv;
895         struct mutex *fd_lock;
896
897         fd_lock = control_dev ? &hdev->fpriv_ctrl_list_lock : &hdev->fpriv_list_lock;
898         fd_list = control_dev ? &hdev->fpriv_ctrl_list : &hdev->fpriv_list;
899
900         mutex_lock(fd_lock);
901         list_for_each_entry(hpriv, fd_list, dev_node)
902                 hpriv->hdev = NULL;
903         mutex_unlock(fd_lock);
904 }
905
906 static void handle_reset_trigger(struct hl_device *hdev, u32 flags)
907 {
908         u32 cur_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
909
910         /*
911          * 'reset cause' is being updated here, because getting here
912          * means that it's the 1st time and the last time we're here
913          * ('in_reset' makes sure of it). This makes sure that
914          * 'reset_cause' will continue holding its 1st recorded reason!
915          */
916         if (flags & HL_DRV_RESET_HEARTBEAT) {
917                 hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_HEARTBEAT;
918                 cur_reset_trigger = HL_DRV_RESET_HEARTBEAT;
919         } else if (flags & HL_DRV_RESET_TDR) {
920                 hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_TDR;
921                 cur_reset_trigger = HL_DRV_RESET_TDR;
922         } else if (flags & HL_DRV_RESET_FW_FATAL_ERR) {
923                 hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
924                 cur_reset_trigger = HL_DRV_RESET_FW_FATAL_ERR;
925         } else {
926                 hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
927         }
928
929         /*
930          * If reset cause is same twice, then reset_trigger_repeated
931          * is set and if this reset is due to a fatal FW error
932          * device is set to an unstable state.
933          */
934         if (hdev->reset_info.prev_reset_trigger != cur_reset_trigger) {
935                 hdev->reset_info.prev_reset_trigger = cur_reset_trigger;
936                 hdev->reset_info.reset_trigger_repeated = 0;
937         } else {
938                 hdev->reset_info.reset_trigger_repeated = 1;
939         }
940
941         /* If reset is due to heartbeat, device CPU is no responsive in
942          * which case no point sending PCI disable message to it.
943          *
944          * If F/W is performing the reset, no need to send it a message to disable
945          * PCI access
946          */
947         if ((flags & HL_DRV_RESET_HARD) &&
948                         !(flags & (HL_DRV_RESET_HEARTBEAT | HL_DRV_RESET_BYPASS_REQ_TO_FW))) {
949                 /* Disable PCI access from device F/W so he won't send
950                  * us additional interrupts. We disable MSI/MSI-X at
951                  * the halt_engines function and we can't have the F/W
952                  * sending us interrupts after that. We need to disable
953                  * the access here because if the device is marked
954                  * disable, the message won't be send. Also, in case
955                  * of heartbeat, the device CPU is marked as disable
956                  * so this message won't be sent
957                  */
958                 if (hl_fw_send_pci_access_msg(hdev,
959                                 CPUCP_PACKET_DISABLE_PCI_ACCESS))
960                         dev_warn(hdev->dev,
961                                 "Failed to disable PCI access by F/W\n");
962         }
963 }
964
965 /*
966  * hl_device_reset - reset the device
967  *
968  * @hdev: pointer to habanalabs device structure
969  * @flags: reset flags.
970  *
971  * Block future CS and wait for pending CS to be enqueued
972  * Call ASIC H/W fini
973  * Flush all completions
974  * Re-initialize all internal data structures
975  * Call ASIC H/W init, late_init
976  * Test queues
977  * Enable device
978  *
979  * Returns 0 for success or an error on failure.
980  */
981 int hl_device_reset(struct hl_device *hdev, u32 flags)
982 {
983         bool hard_reset, from_hard_reset_thread, fw_reset, hard_instead_soft = false,
984                         reset_upon_device_release = false, schedule_hard_reset = false,
985                         skip_wq_flush, delay_reset;
986         u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};
987         struct hl_ctx *ctx;
988         int i, rc;
989
990         if (!hdev->init_done) {
991                 dev_err(hdev->dev, "Can't reset before initialization is done\n");
992                 return 0;
993         }
994
995         hard_reset = !!(flags & HL_DRV_RESET_HARD);
996         from_hard_reset_thread = !!(flags & HL_DRV_RESET_FROM_RESET_THR);
997         fw_reset = !!(flags & HL_DRV_RESET_BYPASS_REQ_TO_FW);
998         skip_wq_flush = !!(flags & HL_DRV_RESET_DEV_RELEASE);
999         delay_reset = !!(flags & HL_DRV_RESET_DELAY);
1000
1001         if (!hard_reset && !hdev->asic_prop.supports_soft_reset) {
1002                 hard_instead_soft = true;
1003                 hard_reset = true;
1004         }
1005
1006         if (hdev->reset_upon_device_release && (flags & HL_DRV_RESET_DEV_RELEASE)) {
1007                 if (hard_reset) {
1008                         dev_crit(hdev->dev,
1009                                 "Aborting reset because hard-reset is mutually exclusive with reset-on-device-release\n");
1010                         return -EINVAL;
1011                 }
1012
1013                 reset_upon_device_release = true;
1014
1015                 goto do_reset;
1016         }
1017
1018         if (!hard_reset && !hdev->asic_prop.allow_inference_soft_reset) {
1019                 hard_instead_soft = true;
1020                 hard_reset = true;
1021         }
1022
1023         if (hard_instead_soft)
1024                 dev_dbg(hdev->dev, "Doing hard-reset instead of soft-reset\n");
1025
1026 do_reset:
1027         /* Re-entry of reset thread */
1028         if (from_hard_reset_thread && hdev->process_kill_trial_cnt)
1029                 goto kill_processes;
1030
1031         /*
1032          * Prevent concurrency in this function - only one reset should be
1033          * done at any given time. Only need to perform this if we didn't
1034          * get from the dedicated hard reset thread
1035          */
1036         if (!from_hard_reset_thread) {
1037                 /* Block future CS/VM/JOB completion operations */
1038                 spin_lock(&hdev->reset_info.lock);
1039                 if (hdev->reset_info.in_reset) {
1040                         /* We only allow scheduling of a hard reset during soft reset */
1041                         if (hard_reset && hdev->reset_info.is_in_soft_reset)
1042                                 hdev->reset_info.hard_reset_schedule_flags = flags;
1043                         spin_unlock(&hdev->reset_info.lock);
1044                         return 0;
1045                 }
1046                 hdev->reset_info.in_reset = 1;
1047                 spin_unlock(&hdev->reset_info.lock);
1048
1049                 if (delay_reset)
1050                         usleep_range(HL_RESET_DELAY_USEC, HL_RESET_DELAY_USEC << 1);
1051
1052                 handle_reset_trigger(hdev, flags);
1053
1054                 /* This still allows the completion of some KDMA ops */
1055                 hdev->reset_info.is_in_soft_reset = !hard_reset;
1056
1057                 /* This also blocks future CS/VM/JOB completion operations */
1058                 hdev->disabled = true;
1059
1060                 take_release_locks(hdev);
1061
1062                 if (hard_reset)
1063                         dev_info(hdev->dev, "Going to reset device\n");
1064                 else if (reset_upon_device_release)
1065                         dev_info(hdev->dev, "Going to reset device after release by user\n");
1066                 else
1067                         dev_info(hdev->dev, "Going to reset engines of inference device\n");
1068         }
1069
1070 again:
1071         if ((hard_reset) && (!from_hard_reset_thread)) {
1072                 hdev->reset_info.hard_reset_pending = true;
1073
1074                 hdev->process_kill_trial_cnt = 0;
1075
1076                 hdev->device_reset_work.flags = flags;
1077
1078                 /*
1079                  * Because the reset function can't run from heartbeat work,
1080                  * we need to call the reset function from a dedicated work.
1081                  */
1082                 queue_delayed_work(hdev->device_reset_work.wq,
1083                         &hdev->device_reset_work.reset_work, 0);
1084
1085                 return 0;
1086         }
1087
1088         cleanup_resources(hdev, hard_reset, fw_reset, skip_wq_flush);
1089
1090 kill_processes:
1091         if (hard_reset) {
1092                 /* Kill processes here after CS rollback. This is because the
1093                  * process can't really exit until all its CSs are done, which
1094                  * is what we do in cs rollback
1095                  */
1096                 rc = device_kill_open_processes(hdev, 0, false);
1097
1098                 if (rc == -EBUSY) {
1099                         if (hdev->device_fini_pending) {
1100                                 dev_crit(hdev->dev,
1101                                         "Failed to kill all open processes, stopping hard reset\n");
1102                                 goto out_err;
1103                         }
1104
1105                         /* signal reset thread to reschedule */
1106                         return rc;
1107                 }
1108
1109                 if (rc) {
1110                         dev_crit(hdev->dev,
1111                                 "Failed to kill all open processes, stopping hard reset\n");
1112                         goto out_err;
1113                 }
1114
1115                 /* Flush the Event queue workers to make sure no other thread is
1116                  * reading or writing to registers during the reset
1117                  */
1118                 flush_workqueue(hdev->eq_wq);
1119         }
1120
1121         /* Reset the H/W. It will be in idle state after this returns */
1122         hdev->asic_funcs->hw_fini(hdev, hard_reset, fw_reset);
1123
1124         if (hard_reset) {
1125                 hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE;
1126
1127                 /* Release kernel context */
1128                 if (hdev->kernel_ctx && hl_ctx_put(hdev->kernel_ctx) == 1)
1129                         hdev->kernel_ctx = NULL;
1130
1131                 hl_vm_fini(hdev);
1132                 hl_mmu_fini(hdev);
1133                 hl_eq_reset(hdev, &hdev->event_queue);
1134         }
1135
1136         /* Re-initialize PI,CI to 0 in all queues (hw queue, cq) */
1137         hl_hw_queue_reset(hdev, hard_reset);
1138         for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
1139                 hl_cq_reset(hdev, &hdev->completion_queue[i]);
1140
1141         /* Make sure the context switch phase will run again */
1142         ctx = hl_get_compute_ctx(hdev);
1143         if (ctx) {
1144                 atomic_set(&ctx->thread_ctx_switch_token, 1);
1145                 ctx->thread_ctx_switch_wait_token = 0;
1146                 hl_ctx_put(ctx);
1147         }
1148
1149         /* Finished tear-down, starting to re-initialize */
1150
1151         if (hard_reset) {
1152                 hdev->device_cpu_disabled = false;
1153                 hdev->reset_info.hard_reset_pending = false;
1154
1155                 if (hdev->reset_info.reset_trigger_repeated &&
1156                                 (hdev->reset_info.prev_reset_trigger ==
1157                                                 HL_DRV_RESET_FW_FATAL_ERR)) {
1158                         /* if there 2 back to back resets from FW,
1159                          * ensure driver puts the driver in a unusable state
1160                          */
1161                         dev_crit(hdev->dev,
1162                                 "Consecutive FW fatal errors received, stopping hard reset\n");
1163                         rc = -EIO;
1164                         goto out_err;
1165                 }
1166
1167                 if (hdev->kernel_ctx) {
1168                         dev_crit(hdev->dev,
1169                                 "kernel ctx was alive during hard reset, something is terribly wrong\n");
1170                         rc = -EBUSY;
1171                         goto out_err;
1172                 }
1173
1174                 rc = hl_mmu_init(hdev);
1175                 if (rc) {
1176                         dev_err(hdev->dev,
1177                                 "Failed to initialize MMU S/W after hard reset\n");
1178                         goto out_err;
1179                 }
1180
1181                 /* Allocate the kernel context */
1182                 hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx),
1183                                                 GFP_KERNEL);
1184                 if (!hdev->kernel_ctx) {
1185                         rc = -ENOMEM;
1186                         hl_mmu_fini(hdev);
1187                         goto out_err;
1188                 }
1189
1190                 hdev->is_compute_ctx_active = false;
1191
1192                 rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
1193                 if (rc) {
1194                         dev_err(hdev->dev,
1195                                 "failed to init kernel ctx in hard reset\n");
1196                         kfree(hdev->kernel_ctx);
1197                         hdev->kernel_ctx = NULL;
1198                         hl_mmu_fini(hdev);
1199                         goto out_err;
1200                 }
1201         }
1202
1203         /* Device is now enabled as part of the initialization requires
1204          * communication with the device firmware to get information that
1205          * is required for the initialization itself
1206          */
1207         hdev->disabled = false;
1208
1209         rc = hdev->asic_funcs->hw_init(hdev);
1210         if (rc) {
1211                 dev_err(hdev->dev, "failed to initialize the H/W after reset\n");
1212                 goto out_err;
1213         }
1214
1215         /* If device is not idle fail the reset process */
1216         if (!hdev->asic_funcs->is_device_idle(hdev, idle_mask,
1217                         HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL)) {
1218                 dev_err(hdev->dev, "device is not idle (mask 0x%llx_%llx) after reset\n",
1219                         idle_mask[1], idle_mask[0]);
1220                 rc = -EIO;
1221                 goto out_err;
1222         }
1223
1224         /* Check that the communication with the device is working */
1225         rc = hdev->asic_funcs->test_queues(hdev);
1226         if (rc) {
1227                 dev_err(hdev->dev, "Failed to detect if device is alive after reset\n");
1228                 goto out_err;
1229         }
1230
1231         if (hard_reset) {
1232                 rc = device_late_init(hdev);
1233                 if (rc) {
1234                         dev_err(hdev->dev, "Failed late init after hard reset\n");
1235                         goto out_err;
1236                 }
1237
1238                 rc = hl_vm_init(hdev);
1239                 if (rc) {
1240                         dev_err(hdev->dev, "Failed to init memory module after hard reset\n");
1241                         goto out_err;
1242                 }
1243
1244                 hl_fw_set_max_power(hdev);
1245         } else {
1246                 rc = hdev->asic_funcs->non_hard_reset_late_init(hdev);
1247                 if (rc) {
1248                         if (reset_upon_device_release)
1249                                 dev_err(hdev->dev,
1250                                         "Failed late init in reset after device release\n");
1251                         else
1252                                 dev_err(hdev->dev, "Failed late init after soft reset\n");
1253                         goto out_err;
1254                 }
1255         }
1256
1257         spin_lock(&hdev->reset_info.lock);
1258         hdev->reset_info.is_in_soft_reset = false;
1259
1260         /* Schedule hard reset only if requested and if not already in hard reset.
1261          * We keep 'in_reset' enabled, so no other reset can go in during the hard
1262          * reset schedule
1263          */
1264         if (!hard_reset && hdev->reset_info.hard_reset_schedule_flags)
1265                 schedule_hard_reset = true;
1266         else
1267                 hdev->reset_info.in_reset = 0;
1268
1269         spin_unlock(&hdev->reset_info.lock);
1270
1271         hdev->reset_info.needs_reset = false;
1272
1273         dev_notice(hdev->dev, "Successfully finished resetting the device\n");
1274
1275         if (hard_reset) {
1276                 hdev->reset_info.hard_reset_cnt++;
1277
1278                 /* After reset is done, we are ready to receive events from
1279                  * the F/W. We can't do it before because we will ignore events
1280                  * and if those events are fatal, we won't know about it and
1281                  * the device will be operational although it shouldn't be
1282                  */
1283                 hdev->asic_funcs->enable_events_from_fw(hdev);
1284         } else if (!reset_upon_device_release) {
1285                 hdev->reset_info.soft_reset_cnt++;
1286         }
1287
1288         if (schedule_hard_reset) {
1289                 dev_info(hdev->dev, "Performing hard reset scheduled during soft reset\n");
1290                 flags = hdev->reset_info.hard_reset_schedule_flags;
1291                 hdev->reset_info.hard_reset_schedule_flags = 0;
1292                 hdev->disabled = true;
1293                 hard_reset = true;
1294                 handle_reset_trigger(hdev, flags);
1295                 goto again;
1296         }
1297
1298         return 0;
1299
1300 out_err:
1301         hdev->disabled = true;
1302         hdev->reset_info.is_in_soft_reset = false;
1303
1304         if (hard_reset) {
1305                 dev_err(hdev->dev, "Failed to reset! Device is NOT usable\n");
1306                 hdev->reset_info.hard_reset_cnt++;
1307         } else if (reset_upon_device_release) {
1308                 dev_err(hdev->dev, "Failed to reset device after user release\n");
1309                 flags |= HL_DRV_RESET_HARD;
1310                 flags &= ~HL_DRV_RESET_DEV_RELEASE;
1311                 hard_reset = true;
1312                 goto again;
1313         } else {
1314                 dev_err(hdev->dev, "Failed to do soft-reset\n");
1315                 hdev->reset_info.soft_reset_cnt++;
1316                 flags |= HL_DRV_RESET_HARD;
1317                 hard_reset = true;
1318                 goto again;
1319         }
1320
1321         hdev->reset_info.in_reset = 0;
1322
1323         return rc;
1324 }
1325
1326 /*
1327  * hl_device_init - main initialization function for habanalabs device
1328  *
1329  * @hdev: pointer to habanalabs device structure
1330  *
1331  * Allocate an id for the device, do early initialization and then call the
1332  * ASIC specific initialization functions. Finally, create the cdev and the
1333  * Linux device to expose it to the user
1334  */
1335 int hl_device_init(struct hl_device *hdev, struct class *hclass)
1336 {
1337         int i, rc, cq_cnt, user_interrupt_cnt, cq_ready_cnt;
1338         char *name;
1339         bool add_cdev_sysfs_on_err = false;
1340
1341         name = kasprintf(GFP_KERNEL, "hl%d", hdev->id / 2);
1342         if (!name) {
1343                 rc = -ENOMEM;
1344                 goto out_disabled;
1345         }
1346
1347         /* Initialize cdev and device structures */
1348         rc = device_init_cdev(hdev, hclass, hdev->id, &hl_ops, name,
1349                                 &hdev->cdev, &hdev->dev);
1350
1351         kfree(name);
1352
1353         if (rc)
1354                 goto out_disabled;
1355
1356         name = kasprintf(GFP_KERNEL, "hl_controlD%d", hdev->id / 2);
1357         if (!name) {
1358                 rc = -ENOMEM;
1359                 goto free_dev;
1360         }
1361
1362         /* Initialize cdev and device structures for control device */
1363         rc = device_init_cdev(hdev, hclass, hdev->id_control, &hl_ctrl_ops,
1364                                 name, &hdev->cdev_ctrl, &hdev->dev_ctrl);
1365
1366         kfree(name);
1367
1368         if (rc)
1369                 goto free_dev;
1370
1371         /* Initialize ASIC function pointers and perform early init */
1372         rc = device_early_init(hdev);
1373         if (rc)
1374                 goto free_dev_ctrl;
1375
1376         user_interrupt_cnt = hdev->asic_prop.user_interrupt_count;
1377
1378         if (user_interrupt_cnt) {
1379                 hdev->user_interrupt = kcalloc(user_interrupt_cnt,
1380                                 sizeof(*hdev->user_interrupt),
1381                                 GFP_KERNEL);
1382
1383                 if (!hdev->user_interrupt) {
1384                         rc = -ENOMEM;
1385                         goto early_fini;
1386                 }
1387         }
1388
1389         /*
1390          * Start calling ASIC initialization. First S/W then H/W and finally
1391          * late init
1392          */
1393         rc = hdev->asic_funcs->sw_init(hdev);
1394         if (rc)
1395                 goto user_interrupts_fini;
1396
1397
1398         /* initialize completion structure for multi CS wait */
1399         hl_multi_cs_completion_init(hdev);
1400
1401         /*
1402          * Initialize the H/W queues. Must be done before hw_init, because
1403          * there the addresses of the kernel queue are being written to the
1404          * registers of the device
1405          */
1406         rc = hl_hw_queues_create(hdev);
1407         if (rc) {
1408                 dev_err(hdev->dev, "failed to initialize kernel queues\n");
1409                 goto sw_fini;
1410         }
1411
1412         cq_cnt = hdev->asic_prop.completion_queues_count;
1413
1414         /*
1415          * Initialize the completion queues. Must be done before hw_init,
1416          * because there the addresses of the completion queues are being
1417          * passed as arguments to request_irq
1418          */
1419         if (cq_cnt) {
1420                 hdev->completion_queue = kcalloc(cq_cnt,
1421                                 sizeof(*hdev->completion_queue),
1422                                 GFP_KERNEL);
1423
1424                 if (!hdev->completion_queue) {
1425                         dev_err(hdev->dev,
1426                                 "failed to allocate completion queues\n");
1427                         rc = -ENOMEM;
1428                         goto hw_queues_destroy;
1429                 }
1430         }
1431
1432         for (i = 0, cq_ready_cnt = 0 ; i < cq_cnt ; i++, cq_ready_cnt++) {
1433                 rc = hl_cq_init(hdev, &hdev->completion_queue[i],
1434                                 hdev->asic_funcs->get_queue_id_for_cq(hdev, i));
1435                 if (rc) {
1436                         dev_err(hdev->dev,
1437                                 "failed to initialize completion queue\n");
1438                         goto cq_fini;
1439                 }
1440                 hdev->completion_queue[i].cq_idx = i;
1441         }
1442
1443         /*
1444          * Initialize the event queue. Must be done before hw_init,
1445          * because there the address of the event queue is being
1446          * passed as argument to request_irq
1447          */
1448         rc = hl_eq_init(hdev, &hdev->event_queue);
1449         if (rc) {
1450                 dev_err(hdev->dev, "failed to initialize event queue\n");
1451                 goto cq_fini;
1452         }
1453
1454         /* MMU S/W must be initialized before kernel context is created */
1455         rc = hl_mmu_init(hdev);
1456         if (rc) {
1457                 dev_err(hdev->dev, "Failed to initialize MMU S/W structures\n");
1458                 goto eq_fini;
1459         }
1460
1461         /* Allocate the kernel context */
1462         hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx), GFP_KERNEL);
1463         if (!hdev->kernel_ctx) {
1464                 rc = -ENOMEM;
1465                 goto mmu_fini;
1466         }
1467
1468         hdev->is_compute_ctx_active = false;
1469
1470         hdev->asic_funcs->state_dump_init(hdev);
1471
1472         hl_debugfs_add_device(hdev);
1473
1474         /* debugfs nodes are created in hl_ctx_init so it must be called after
1475          * hl_debugfs_add_device.
1476          */
1477         rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
1478         if (rc) {
1479                 dev_err(hdev->dev, "failed to initialize kernel context\n");
1480                 kfree(hdev->kernel_ctx);
1481                 goto remove_device_from_debugfs;
1482         }
1483
1484         rc = hl_cb_pool_init(hdev);
1485         if (rc) {
1486                 dev_err(hdev->dev, "failed to initialize CB pool\n");
1487                 goto release_ctx;
1488         }
1489
1490         /*
1491          * From this point, override rc (=0) in case of an error to allow
1492          * debugging (by adding char devices and create sysfs nodes as part of
1493          * the error flow).
1494          */
1495         add_cdev_sysfs_on_err = true;
1496
1497         /* Device is now enabled as part of the initialization requires
1498          * communication with the device firmware to get information that
1499          * is required for the initialization itself
1500          */
1501         hdev->disabled = false;
1502
1503         rc = hdev->asic_funcs->hw_init(hdev);
1504         if (rc) {
1505                 dev_err(hdev->dev, "failed to initialize the H/W\n");
1506                 rc = 0;
1507                 goto out_disabled;
1508         }
1509
1510         /* Check that the communication with the device is working */
1511         rc = hdev->asic_funcs->test_queues(hdev);
1512         if (rc) {
1513                 dev_err(hdev->dev, "Failed to detect if device is alive\n");
1514                 rc = 0;
1515                 goto out_disabled;
1516         }
1517
1518         rc = device_late_init(hdev);
1519         if (rc) {
1520                 dev_err(hdev->dev, "Failed late initialization\n");
1521                 rc = 0;
1522                 goto out_disabled;
1523         }
1524
1525         dev_info(hdev->dev, "Found %s device with %lluGB DRAM\n",
1526                 hdev->asic_name,
1527                 hdev->asic_prop.dram_size / SZ_1G);
1528
1529         rc = hl_vm_init(hdev);
1530         if (rc) {
1531                 dev_err(hdev->dev, "Failed to initialize memory module\n");
1532                 rc = 0;
1533                 goto out_disabled;
1534         }
1535
1536         /*
1537          * Expose devices and sysfs nodes to user.
1538          * From here there is no need to add char devices and create sysfs nodes
1539          * in case of an error.
1540          */
1541         add_cdev_sysfs_on_err = false;
1542         rc = device_cdev_sysfs_add(hdev);
1543         if (rc) {
1544                 dev_err(hdev->dev,
1545                         "Failed to add char devices and sysfs nodes\n");
1546                 rc = 0;
1547                 goto out_disabled;
1548         }
1549
1550         /* Need to call this again because the max power might change,
1551          * depending on card type for certain ASICs
1552          */
1553         if (hdev->asic_prop.set_max_power_on_device_init)
1554                 hl_fw_set_max_power(hdev);
1555
1556         /*
1557          * hl_hwmon_init() must be called after device_late_init(), because only
1558          * there we get the information from the device about which
1559          * hwmon-related sensors the device supports.
1560          * Furthermore, it must be done after adding the device to the system.
1561          */
1562         rc = hl_hwmon_init(hdev);
1563         if (rc) {
1564                 dev_err(hdev->dev, "Failed to initialize hwmon\n");
1565                 rc = 0;
1566                 goto out_disabled;
1567         }
1568
1569         dev_notice(hdev->dev,
1570                 "Successfully added device to habanalabs driver\n");
1571
1572         hdev->init_done = true;
1573
1574         /* After initialization is done, we are ready to receive events from
1575          * the F/W. We can't do it before because we will ignore events and if
1576          * those events are fatal, we won't know about it and the device will
1577          * be operational although it shouldn't be
1578          */
1579         hdev->asic_funcs->enable_events_from_fw(hdev);
1580
1581         return 0;
1582
1583 release_ctx:
1584         if (hl_ctx_put(hdev->kernel_ctx) != 1)
1585                 dev_err(hdev->dev,
1586                         "kernel ctx is still alive on initialization failure\n");
1587 remove_device_from_debugfs:
1588         hl_debugfs_remove_device(hdev);
1589 mmu_fini:
1590         hl_mmu_fini(hdev);
1591 eq_fini:
1592         hl_eq_fini(hdev, &hdev->event_queue);
1593 cq_fini:
1594         for (i = 0 ; i < cq_ready_cnt ; i++)
1595                 hl_cq_fini(hdev, &hdev->completion_queue[i]);
1596         kfree(hdev->completion_queue);
1597 hw_queues_destroy:
1598         hl_hw_queues_destroy(hdev);
1599 sw_fini:
1600         hdev->asic_funcs->sw_fini(hdev);
1601 user_interrupts_fini:
1602         kfree(hdev->user_interrupt);
1603 early_fini:
1604         device_early_fini(hdev);
1605 free_dev_ctrl:
1606         put_device(hdev->dev_ctrl);
1607 free_dev:
1608         put_device(hdev->dev);
1609 out_disabled:
1610         hdev->disabled = true;
1611         if (add_cdev_sysfs_on_err)
1612                 device_cdev_sysfs_add(hdev);
1613         if (hdev->pdev)
1614                 dev_err(&hdev->pdev->dev,
1615                         "Failed to initialize hl%d. Device is NOT usable !\n",
1616                         hdev->id / 2);
1617         else
1618                 pr_err("Failed to initialize hl%d. Device is NOT usable !\n",
1619                         hdev->id / 2);
1620
1621         return rc;
1622 }
1623
1624 /*
1625  * hl_device_fini - main tear-down function for habanalabs device
1626  *
1627  * @hdev: pointer to habanalabs device structure
1628  *
1629  * Destroy the device, call ASIC fini functions and release the id
1630  */
1631 void hl_device_fini(struct hl_device *hdev)
1632 {
1633         bool device_in_reset;
1634         ktime_t timeout;
1635         u64 reset_sec;
1636         int i, rc;
1637
1638         dev_info(hdev->dev, "Removing device\n");
1639
1640         hdev->device_fini_pending = 1;
1641         flush_delayed_work(&hdev->device_reset_work.reset_work);
1642
1643         if (hdev->pldm)
1644                 reset_sec = HL_PLDM_HARD_RESET_MAX_TIMEOUT;
1645         else
1646                 reset_sec = HL_HARD_RESET_MAX_TIMEOUT;
1647
1648         /*
1649          * This function is competing with the reset function, so try to
1650          * take the reset atomic and if we are already in middle of reset,
1651          * wait until reset function is finished. Reset function is designed
1652          * to always finish. However, in Gaudi, because of all the network
1653          * ports, the hard reset could take between 10-30 seconds
1654          */
1655
1656         timeout = ktime_add_us(ktime_get(), reset_sec * 1000 * 1000);
1657
1658         spin_lock(&hdev->reset_info.lock);
1659         device_in_reset = !!hdev->reset_info.in_reset;
1660         if (!device_in_reset)
1661                 hdev->reset_info.in_reset = 1;
1662         spin_unlock(&hdev->reset_info.lock);
1663
1664         while (device_in_reset) {
1665                 usleep_range(50, 200);
1666
1667                 spin_lock(&hdev->reset_info.lock);
1668                 device_in_reset = !!hdev->reset_info.in_reset;
1669                 if (!device_in_reset)
1670                         hdev->reset_info.in_reset = 1;
1671                 spin_unlock(&hdev->reset_info.lock);
1672
1673                 if (ktime_compare(ktime_get(), timeout) > 0) {
1674                         dev_crit(hdev->dev,
1675                                 "Failed to remove device because reset function did not finish\n");
1676                         return;
1677                 }
1678         }
1679
1680         /* Disable PCI access from device F/W so it won't send us additional
1681          * interrupts. We disable MSI/MSI-X at the halt_engines function and we
1682          * can't have the F/W sending us interrupts after that. We need to
1683          * disable the access here because if the device is marked disable, the
1684          * message won't be send. Also, in case of heartbeat, the device CPU is
1685          * marked as disable so this message won't be sent
1686          */
1687         hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS);
1688
1689         /* Mark device as disabled */
1690         hdev->disabled = true;
1691
1692         take_release_locks(hdev);
1693
1694         hdev->reset_info.hard_reset_pending = true;
1695
1696         hl_hwmon_fini(hdev);
1697
1698         cleanup_resources(hdev, true, false, false);
1699
1700         /* Kill processes here after CS rollback. This is because the process
1701          * can't really exit until all its CSs are done, which is what we
1702          * do in cs rollback
1703          */
1704         dev_info(hdev->dev,
1705                 "Waiting for all processes to exit (timeout of %u seconds)",
1706                 HL_PENDING_RESET_LONG_SEC);
1707
1708         rc = device_kill_open_processes(hdev, HL_PENDING_RESET_LONG_SEC, false);
1709         if (rc) {
1710                 dev_crit(hdev->dev, "Failed to kill all open processes\n");
1711                 device_disable_open_processes(hdev, false);
1712         }
1713
1714         rc = device_kill_open_processes(hdev, 0, true);
1715         if (rc) {
1716                 dev_crit(hdev->dev, "Failed to kill all control device open processes\n");
1717                 device_disable_open_processes(hdev, true);
1718         }
1719
1720         hl_cb_pool_fini(hdev);
1721
1722         /* Reset the H/W. It will be in idle state after this returns */
1723         hdev->asic_funcs->hw_fini(hdev, true, false);
1724
1725         hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE;
1726
1727         /* Release kernel context */
1728         if ((hdev->kernel_ctx) && (hl_ctx_put(hdev->kernel_ctx) != 1))
1729                 dev_err(hdev->dev, "kernel ctx is still alive\n");
1730
1731         hl_debugfs_remove_device(hdev);
1732
1733         hl_vm_fini(hdev);
1734
1735         hl_mmu_fini(hdev);
1736
1737         hl_eq_fini(hdev, &hdev->event_queue);
1738
1739         for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
1740                 hl_cq_fini(hdev, &hdev->completion_queue[i]);
1741         kfree(hdev->completion_queue);
1742         kfree(hdev->user_interrupt);
1743
1744         hl_hw_queues_destroy(hdev);
1745
1746         /* Call ASIC S/W finalize function */
1747         hdev->asic_funcs->sw_fini(hdev);
1748
1749         device_early_fini(hdev);
1750
1751         /* Hide devices and sysfs nodes from user */
1752         device_cdev_sysfs_del(hdev);
1753
1754         pr_info("removed device successfully\n");
1755 }
1756
1757 /*
1758  * MMIO register access helper functions.
1759  */
1760
1761 /*
1762  * hl_rreg - Read an MMIO register
1763  *
1764  * @hdev: pointer to habanalabs device structure
1765  * @reg: MMIO register offset (in bytes)
1766  *
1767  * Returns the value of the MMIO register we are asked to read
1768  *
1769  */
1770 inline u32 hl_rreg(struct hl_device *hdev, u32 reg)
1771 {
1772         return readl(hdev->rmmio + reg);
1773 }
1774
1775 /*
1776  * hl_wreg - Write to an MMIO register
1777  *
1778  * @hdev: pointer to habanalabs device structure
1779  * @reg: MMIO register offset (in bytes)
1780  * @val: 32-bit value
1781  *
1782  * Writes the 32-bit value into the MMIO register
1783  *
1784  */
1785 inline void hl_wreg(struct hl_device *hdev, u32 reg, u32 val)
1786 {
1787         writel(val, hdev->rmmio + reg);
1788 }