1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020-2023 Intel Corporation
6 #include <linux/firmware.h>
7 #include <linux/module.h>
10 #include <drm/drm_accel.h>
11 #include <drm/drm_file.h>
12 #include <drm/drm_gem.h>
13 #include <drm/drm_ioctl.h>
14 #include <drm/drm_prime.h>
16 #include "vpu_boot_api.h"
17 #include "ivpu_debugfs.h"
24 #include "ivpu_jsm_msg.h"
26 #include "ivpu_mmu_context.h"
29 #ifndef DRIVER_VERSION_STR
30 #define DRIVER_VERSION_STR __stringify(DRM_IVPU_DRIVER_MAJOR) "." \
31 __stringify(DRM_IVPU_DRIVER_MINOR) "."
34 static const struct drm_driver driver;
36 static struct lock_class_key submitted_jobs_xa_lock_class_key;
39 module_param_named(dbg_mask, ivpu_dbg_mask, int, 0644);
40 MODULE_PARM_DESC(dbg_mask, "Driver debug mask. See IVPU_DBG_* macros.");
43 module_param_named_unsafe(test_mode, ivpu_test_mode, int, 0644);
44 MODULE_PARM_DESC(test_mode, "Test mode: 0 - normal operation, 1 - fw unit test, 2 - null hw");
46 u8 ivpu_pll_min_ratio;
47 module_param_named(pll_min_ratio, ivpu_pll_min_ratio, byte, 0644);
48 MODULE_PARM_DESC(pll_min_ratio, "Minimum PLL ratio used to set VPU frequency");
50 u8 ivpu_pll_max_ratio = U8_MAX;
51 module_param_named(pll_max_ratio, ivpu_pll_max_ratio, byte, 0644);
52 MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set VPU frequency");
54 bool ivpu_disable_mmu_cont_pages;
55 module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0644);
56 MODULE_PARM_DESC(disable_mmu_cont_pages, "Disable MMU contiguous pages optimization");
58 struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv)
60 struct ivpu_device *vdev = file_priv->vdev;
62 kref_get(&file_priv->ref);
64 ivpu_dbg(vdev, KREF, "file_priv get: ctx %u refcount %u\n",
65 file_priv->ctx.id, kref_read(&file_priv->ref));
70 struct ivpu_file_priv *ivpu_file_priv_get_by_ctx_id(struct ivpu_device *vdev, unsigned long id)
72 struct ivpu_file_priv *file_priv;
74 xa_lock_irq(&vdev->context_xa);
75 file_priv = xa_load(&vdev->context_xa, id);
76 /* file_priv may still be in context_xa during file_priv_release() */
77 if (file_priv && !kref_get_unless_zero(&file_priv->ref))
79 xa_unlock_irq(&vdev->context_xa);
82 ivpu_dbg(vdev, KREF, "file_priv get by id: ctx %u refcount %u\n",
83 file_priv->ctx.id, kref_read(&file_priv->ref));
88 static void file_priv_release(struct kref *ref)
90 struct ivpu_file_priv *file_priv = container_of(ref, struct ivpu_file_priv, ref);
91 struct ivpu_device *vdev = file_priv->vdev;
93 ivpu_dbg(vdev, FILE, "file_priv release: ctx %u\n", file_priv->ctx.id);
95 ivpu_cmdq_release_all(file_priv);
96 ivpu_bo_remove_all_bos_from_context(&file_priv->ctx);
97 ivpu_jsm_context_release(vdev, file_priv->ctx.id);
98 ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
99 drm_WARN_ON(&vdev->drm, xa_erase_irq(&vdev->context_xa, file_priv->ctx.id) != file_priv);
100 mutex_destroy(&file_priv->lock);
104 void ivpu_file_priv_put(struct ivpu_file_priv **link)
106 struct ivpu_file_priv *file_priv = *link;
107 struct ivpu_device *vdev = file_priv->vdev;
109 drm_WARN_ON(&vdev->drm, !file_priv);
111 ivpu_dbg(vdev, KREF, "file_priv put: ctx %u refcount %u\n",
112 file_priv->ctx.id, kref_read(&file_priv->ref));
115 kref_put(&file_priv->ref, file_priv_release);
118 static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param *args)
120 switch (args->index) {
121 case DRM_IVPU_CAP_METRIC_STREAMER:
124 case DRM_IVPU_CAP_DMA_MEMORY_RANGE:
134 static int ivpu_get_core_clock_rate(struct ivpu_device *vdev, u64 *clk_rate)
138 ret = ivpu_rpm_get_if_active(vdev);
142 *clk_rate = ret ? ivpu_hw_reg_pll_freq_get(vdev) : 0;
150 static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
152 struct ivpu_file_priv *file_priv = file->driver_priv;
153 struct ivpu_device *vdev = file_priv->vdev;
154 struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
155 struct drm_ivpu_param *args = data;
159 if (!drm_dev_enter(dev, &idx))
162 switch (args->param) {
163 case DRM_IVPU_PARAM_DEVICE_ID:
164 args->value = pdev->device;
166 case DRM_IVPU_PARAM_DEVICE_REVISION:
167 args->value = pdev->revision;
169 case DRM_IVPU_PARAM_PLATFORM_TYPE:
170 args->value = vdev->platform;
172 case DRM_IVPU_PARAM_CORE_CLOCK_RATE:
173 ret = ivpu_get_core_clock_rate(vdev, &args->value);
175 case DRM_IVPU_PARAM_NUM_CONTEXTS:
176 args->value = ivpu_get_context_count(vdev);
178 case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
179 args->value = vdev->hw->ranges.user.start;
181 case DRM_IVPU_PARAM_CONTEXT_PRIORITY:
182 args->value = file_priv->priority;
184 case DRM_IVPU_PARAM_CONTEXT_ID:
185 args->value = file_priv->ctx.id;
187 case DRM_IVPU_PARAM_FW_API_VERSION:
188 if (args->index < VPU_FW_API_VER_NUM) {
189 struct vpu_firmware_header *fw_hdr;
191 fw_hdr = (struct vpu_firmware_header *)vdev->fw->file->data;
192 args->value = fw_hdr->api_version[args->index];
197 case DRM_IVPU_PARAM_ENGINE_HEARTBEAT:
198 ret = ivpu_jsm_get_heartbeat(vdev, args->index, &args->value);
200 case DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID:
201 args->value = (u64)atomic64_inc_return(&vdev->unique_id_counter);
203 case DRM_IVPU_PARAM_TILE_CONFIG:
204 args->value = vdev->hw->tile_fuse;
206 case DRM_IVPU_PARAM_SKU:
207 args->value = vdev->hw->sku;
209 case DRM_IVPU_PARAM_CAPABILITIES:
210 ret = ivpu_get_capabilities(vdev, args);
221 static int ivpu_set_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
223 struct ivpu_file_priv *file_priv = file->driver_priv;
224 struct drm_ivpu_param *args = data;
227 switch (args->param) {
228 case DRM_IVPU_PARAM_CONTEXT_PRIORITY:
229 if (args->value <= DRM_IVPU_CONTEXT_PRIORITY_REALTIME)
230 file_priv->priority = args->value;
241 static int ivpu_open(struct drm_device *dev, struct drm_file *file)
243 struct ivpu_device *vdev = to_ivpu_device(dev);
244 struct ivpu_file_priv *file_priv;
249 ret = xa_alloc_irq(&vdev->context_xa, &ctx_id, NULL, vdev->context_xa_limit, GFP_KERNEL);
251 ivpu_err(vdev, "Failed to allocate context id: %d\n", ret);
255 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
261 file_priv->vdev = vdev;
262 file_priv->priority = DRM_IVPU_CONTEXT_PRIORITY_NORMAL;
263 kref_init(&file_priv->ref);
264 mutex_init(&file_priv->lock);
266 ret = ivpu_mmu_user_context_init(vdev, &file_priv->ctx, ctx_id);
268 goto err_mutex_destroy;
270 old = xa_store_irq(&vdev->context_xa, ctx_id, file_priv, GFP_KERNEL);
271 if (xa_is_err(old)) {
273 ivpu_err(vdev, "Failed to store context %u: %d\n", ctx_id, ret);
277 ivpu_dbg(vdev, FILE, "file_priv create: ctx %u process %s pid %d\n",
278 ctx_id, current->comm, task_pid_nr(current));
280 file->driver_priv = file_priv;
284 ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
286 mutex_destroy(&file_priv->lock);
289 xa_erase_irq(&vdev->context_xa, ctx_id);
293 static void ivpu_postclose(struct drm_device *dev, struct drm_file *file)
295 struct ivpu_file_priv *file_priv = file->driver_priv;
296 struct ivpu_device *vdev = to_ivpu_device(dev);
298 ivpu_dbg(vdev, FILE, "file_priv close: ctx %u process %s pid %d\n",
299 file_priv->ctx.id, current->comm, task_pid_nr(current));
301 ivpu_file_priv_put(&file_priv);
304 static const struct drm_ioctl_desc ivpu_drm_ioctls[] = {
305 DRM_IOCTL_DEF_DRV(IVPU_GET_PARAM, ivpu_get_param_ioctl, 0),
306 DRM_IOCTL_DEF_DRV(IVPU_SET_PARAM, ivpu_set_param_ioctl, 0),
307 DRM_IOCTL_DEF_DRV(IVPU_BO_CREATE, ivpu_bo_create_ioctl, 0),
308 DRM_IOCTL_DEF_DRV(IVPU_BO_INFO, ivpu_bo_info_ioctl, 0),
309 DRM_IOCTL_DEF_DRV(IVPU_SUBMIT, ivpu_submit_ioctl, 0),
310 DRM_IOCTL_DEF_DRV(IVPU_BO_WAIT, ivpu_bo_wait_ioctl, 0),
313 static int ivpu_wait_for_ready(struct ivpu_device *vdev)
315 struct ivpu_ipc_consumer cons;
316 struct ivpu_ipc_hdr ipc_hdr;
317 unsigned long timeout;
320 if (ivpu_test_mode == IVPU_TEST_MODE_FW_TEST)
323 ivpu_ipc_consumer_add(vdev, &cons, IVPU_IPC_CHAN_BOOT_MSG);
325 timeout = jiffies + msecs_to_jiffies(vdev->timeout.boot);
327 ret = ivpu_ipc_irq_handler(vdev);
330 ret = ivpu_ipc_receive(vdev, &cons, &ipc_hdr, NULL, 0);
331 if (ret != -ETIMEDOUT || time_after_eq(jiffies, timeout))
337 ivpu_ipc_consumer_del(vdev, &cons);
339 if (!ret && ipc_hdr.data_addr != IVPU_IPC_BOOT_MSG_DATA_ADDR) {
340 ivpu_err(vdev, "Invalid VPU ready message: 0x%x\n",
346 ivpu_dbg(vdev, PM, "VPU ready message received successfully\n");
348 ivpu_hw_diagnose_failure(vdev);
354 * ivpu_boot() - Start VPU firmware
357 * This function is paired with ivpu_shutdown() but it doesn't power up the
358 * VPU because power up has to be called very early in ivpu_probe().
360 int ivpu_boot(struct ivpu_device *vdev)
364 /* Update boot params located at first 4KB of FW memory */
365 ivpu_fw_boot_params_setup(vdev, vdev->fw->mem->kvaddr);
367 ret = ivpu_hw_boot_fw(vdev);
369 ivpu_err(vdev, "Failed to start the firmware: %d\n", ret);
373 ret = ivpu_wait_for_ready(vdev);
375 ivpu_err(vdev, "Failed to boot the firmware: %d\n", ret);
379 ivpu_hw_irq_clear(vdev);
380 enable_irq(vdev->irq);
381 ivpu_hw_irq_enable(vdev);
382 ivpu_ipc_enable(vdev);
386 void ivpu_prepare_for_reset(struct ivpu_device *vdev)
388 ivpu_hw_irq_disable(vdev);
389 disable_irq(vdev->irq);
390 ivpu_ipc_disable(vdev);
391 ivpu_mmu_disable(vdev);
394 int ivpu_shutdown(struct ivpu_device *vdev)
398 ivpu_prepare_for_reset(vdev);
400 ret = ivpu_hw_power_down(vdev);
402 ivpu_warn(vdev, "Failed to power down HW: %d\n", ret);
407 static const struct file_operations ivpu_fops = {
408 .owner = THIS_MODULE,
412 static const struct drm_driver driver = {
413 .driver_features = DRIVER_GEM | DRIVER_COMPUTE_ACCEL,
416 .postclose = ivpu_postclose,
417 .gem_prime_import = ivpu_gem_prime_import,
419 .ioctls = ivpu_drm_ioctls,
420 .num_ioctls = ARRAY_SIZE(ivpu_drm_ioctls),
426 .major = DRM_IVPU_DRIVER_MAJOR,
427 .minor = DRM_IVPU_DRIVER_MINOR,
430 static int ivpu_irq_init(struct ivpu_device *vdev)
432 struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
435 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX);
437 ivpu_err(vdev, "Failed to allocate a MSI IRQ: %d\n", ret);
441 vdev->irq = pci_irq_vector(pdev, 0);
443 ret = devm_request_irq(vdev->drm.dev, vdev->irq, vdev->hw->ops->irq_handler,
444 IRQF_NO_AUTOEN, DRIVER_NAME, vdev);
446 ivpu_err(vdev, "Failed to request an IRQ %d\n", ret);
451 static int ivpu_pci_init(struct ivpu_device *vdev)
453 struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
454 struct resource *bar0 = &pdev->resource[0];
455 struct resource *bar4 = &pdev->resource[4];
458 ivpu_dbg(vdev, MISC, "Mapping BAR0 (RegV) %pR\n", bar0);
459 vdev->regv = devm_ioremap_resource(vdev->drm.dev, bar0);
460 if (IS_ERR(vdev->regv)) {
461 ivpu_err(vdev, "Failed to map bar 0: %pe\n", vdev->regv);
462 return PTR_ERR(vdev->regv);
465 ivpu_dbg(vdev, MISC, "Mapping BAR4 (RegB) %pR\n", bar4);
466 vdev->regb = devm_ioremap_resource(vdev->drm.dev, bar4);
467 if (IS_ERR(vdev->regb)) {
468 ivpu_err(vdev, "Failed to map bar 4: %pe\n", vdev->regb);
469 return PTR_ERR(vdev->regb);
472 ret = dma_set_mask_and_coherent(vdev->drm.dev, DMA_BIT_MASK(vdev->hw->dma_bits));
474 ivpu_err(vdev, "Failed to set DMA mask: %d\n", ret);
477 dma_set_max_seg_size(vdev->drm.dev, UINT_MAX);
479 /* Clear any pending errors */
480 pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f);
482 /* VPU 37XX does not require 10m D3hot delay */
483 if (ivpu_hw_gen(vdev) == IVPU_HW_37XX)
484 pdev->d3hot_delay = 0;
486 ret = pcim_enable_device(pdev);
488 ivpu_err(vdev, "Failed to enable PCI device: %d\n", ret);
492 pci_set_master(pdev);
497 static int ivpu_dev_init(struct ivpu_device *vdev)
501 vdev->hw = drmm_kzalloc(&vdev->drm, sizeof(*vdev->hw), GFP_KERNEL);
505 vdev->mmu = drmm_kzalloc(&vdev->drm, sizeof(*vdev->mmu), GFP_KERNEL);
509 vdev->fw = drmm_kzalloc(&vdev->drm, sizeof(*vdev->fw), GFP_KERNEL);
513 vdev->ipc = drmm_kzalloc(&vdev->drm, sizeof(*vdev->ipc), GFP_KERNEL);
517 vdev->pm = drmm_kzalloc(&vdev->drm, sizeof(*vdev->pm), GFP_KERNEL);
521 if (ivpu_hw_gen(vdev) >= IVPU_HW_40XX) {
522 vdev->hw->ops = &ivpu_hw_40xx_ops;
523 vdev->hw->dma_bits = 48;
525 vdev->hw->ops = &ivpu_hw_37xx_ops;
526 vdev->hw->dma_bits = 38;
529 vdev->platform = IVPU_PLATFORM_INVALID;
530 vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
531 vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID;
532 atomic64_set(&vdev->unique_id_counter, 0);
533 xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC);
534 xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
535 lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
537 ret = ivpu_pci_init(vdev);
541 ret = ivpu_irq_init(vdev);
545 /* Init basic HW info based on buttress registers which are accessible before power up */
546 ret = ivpu_hw_info_init(vdev);
550 /* Power up early so the rest of init code can access VPU registers */
551 ret = ivpu_hw_power_up(vdev);
555 ret = ivpu_mmu_global_context_init(vdev);
559 ret = ivpu_mmu_init(vdev);
561 goto err_mmu_gctx_fini;
563 ret = ivpu_mmu_reserved_context_init(vdev);
565 goto err_mmu_gctx_fini;
567 ret = ivpu_fw_init(vdev);
569 goto err_mmu_rctx_fini;
571 ret = ivpu_ipc_init(vdev);
577 ret = ivpu_job_done_thread_init(vdev);
581 ret = ivpu_boot(vdev);
583 goto err_job_done_thread_fini;
585 ivpu_pm_enable(vdev);
589 err_job_done_thread_fini:
590 ivpu_job_done_thread_fini(vdev);
596 ivpu_mmu_reserved_context_fini(vdev);
598 ivpu_mmu_global_context_fini(vdev);
600 ivpu_hw_power_down(vdev);
601 if (IVPU_WA(d3hot_after_power_off))
602 pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
604 xa_destroy(&vdev->submitted_jobs_xa);
605 xa_destroy(&vdev->context_xa);
609 static void ivpu_dev_fini(struct ivpu_device *vdev)
611 ivpu_pm_disable(vdev);
613 if (IVPU_WA(d3hot_after_power_off))
614 pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
615 ivpu_job_done_thread_fini(vdev);
616 ivpu_pm_cancel_recovery(vdev);
620 ivpu_mmu_reserved_context_fini(vdev);
621 ivpu_mmu_global_context_fini(vdev);
623 drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
624 xa_destroy(&vdev->submitted_jobs_xa);
625 drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->context_xa));
626 xa_destroy(&vdev->context_xa);
629 static struct pci_device_id ivpu_pci_ids[] = {
630 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_MTL) },
631 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_ARL) },
632 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_LNL) },
635 MODULE_DEVICE_TABLE(pci, ivpu_pci_ids);
637 static int ivpu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
639 struct ivpu_device *vdev;
642 vdev = devm_drm_dev_alloc(&pdev->dev, &driver, struct ivpu_device, drm);
644 return PTR_ERR(vdev);
646 pci_set_drvdata(pdev, vdev);
648 ret = ivpu_dev_init(vdev);
652 ivpu_debugfs_init(vdev);
654 ret = drm_dev_register(&vdev->drm, 0);
656 dev_err(&pdev->dev, "Failed to register DRM device: %d\n", ret);
663 static void ivpu_remove(struct pci_dev *pdev)
665 struct ivpu_device *vdev = pci_get_drvdata(pdev);
667 drm_dev_unplug(&vdev->drm);
671 static const struct dev_pm_ops ivpu_drv_pci_pm = {
672 SET_SYSTEM_SLEEP_PM_OPS(ivpu_pm_suspend_cb, ivpu_pm_resume_cb)
673 SET_RUNTIME_PM_OPS(ivpu_pm_runtime_suspend_cb, ivpu_pm_runtime_resume_cb, NULL)
676 static const struct pci_error_handlers ivpu_drv_pci_err = {
677 .reset_prepare = ivpu_pm_reset_prepare_cb,
678 .reset_done = ivpu_pm_reset_done_cb,
681 static struct pci_driver ivpu_pci_driver = {
682 .name = KBUILD_MODNAME,
683 .id_table = ivpu_pci_ids,
685 .remove = ivpu_remove,
687 .pm = &ivpu_drv_pci_pm,
689 .err_handler = &ivpu_drv_pci_err,
692 module_pci_driver(ivpu_pci_driver);
694 MODULE_AUTHOR("Intel Corporation");
695 MODULE_DESCRIPTION(DRIVER_DESC);
696 MODULE_LICENSE("GPL and additional rights");
697 MODULE_VERSION(DRIVER_VERSION_STR);